diff --git "a/825.jsonl" "b/825.jsonl" new file mode 100644--- /dev/null +++ "b/825.jsonl" @@ -0,0 +1,88 @@ +{"seq_id":"73505080860","text":"# 合并json\nimport os\n# 获取目标文件夹的路径\n# filedir ='C:/Users/Desktop/04072'\nfiledir ='D:\\周景怡\\周景怡\\WHU\\大三上\\文本理解与数据挖掘\\chinese-poetry-master\\peot22'\n# 获取当前文件夹中的文件名称列表\n# filenames=os.listdir(filedir)\nfilenames=os.listdir(filedir)\n# 打开当前目录下的result.json文件,如果没有则创建\nf = open('D:\\周景怡\\周景怡\\WHU\\大三上\\文本理解与数据挖掘\\chinese-poetry-master\\peot22\\peot2.json','w',encoding=\"utf-8\")\n# 先遍历文件名`在这里插入代码片`\nfor filename in filenames:\n filepath = filedir+'/'+filename\n # 遍历单个文件,读取行数\n for line in open(filepath, encoding=\"utf-8\"):\n f.writelines(line)\n f.write('\\n')\n# 关闭文件\nf.close()\n","repo_name":"fumuling/ancientModernProseClassifier","sub_path":"预处理/json_hebing.py","file_name":"json_hebing.py","file_ext":"py","file_size_in_byte":787,"program_lang":"python","lang":"zh","doc_type":"code","stars":3,"dataset":"github-code","pt":"69"} +{"seq_id":"74417094299","text":"from django.shortcuts import render\n\n# Create your views here.\nfrom django.views.generic import TemplateView\n\nfrom django.contrib.auth import authenticate, login\nfrom django.shortcuts import render, redirect\n\nfrom decimal import Decimal\n\n\nclass HomePageView(TemplateView):\n template_name = \"home.html\"\n\n # Total a receber e a pagar\n # Exibir Gráfico de barras com previsão de valores a receber por mês\n # Exibir Gráfico de barras com previsão de valores a pagar por mês\n # Exibir Gráfico de barras com Vendas últimos 3 meses\n # Exibir Gráfico de barras com Pagamentos recebidos últimos 3 meses\n # Exibir Tabela com 3 últimos pagamentos\n # Exibir Tabela com 3 últimas vendas\n\n def get_context_data(self, **kwargs):\n ultimospagamentos = [\n [\"01/01/2023\", \"nome do cliente\", \"descrição\", Decimal(100)],\n [\"01/01/2023\", \"nome do cliente\", \"descrição\", Decimal(100)],\n [\"01/01/2023\", \"nome do cliente\", \"descrição\", Decimal(100)],\n ]\n\n ultimasvendas = [\n [\"01/01/2023\", \"nome do cliente\", \"nome do produto\", Decimal(100)],\n [\"01/01/2023\", \"nome do cliente\", \"nome do produto\", Decimal(100)],\n [\"01/01/2023\", \"nome do cliente\", \"nome do produto\", Decimal(100)],\n ]\n\n context = super().get_context_data(**kwargs)\n context[\"totalareceber\"] = Decimal(9999.99)\n context[\"totalapagar\"] = Decimal(9999.99)\n context[\"ultimospagamentos\"] = ultimospagamentos\n context[\"ultimasvendas\"] = ultimasvendas\n return context\n\n\nclass AboutPageView(TemplateView):\n template_name = \"about.html\"\n\n\n\"\"\"\ndef login_view(request):\n if request.method == \"POST\":\n username = request.POST[\"username\"]\n password = request.POST[\"password\"]\n user = authenticate(request, username=username, password=password)\n if user is not None:\n login(request, user)\n return redirect(\"index\")\n else:\n # exibir mensagem de erro de autenticação\n pass\n else:\n return render(request, \"login.html\")\n\"\"\"\n","repo_name":"luizaraujoneto/rlnclothes","sub_path":"pages/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2132,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"69986663581","text":"import numpy as np\nfrom numba import njit, prange\nimport logging\n\nfrom arguseyes.issues import Issue, IssueDetector\nfrom arguseyes.templates import SourceType, Output\n\n\n# removed cache=True because of https://github.com/numba/numba/issues/4908 need a workaround soon\n@njit(fastmath=True, parallel=True)\ndef _compute_shapley_values(X_train, y_train, X_test, y_test, K=1):\n N = len(X_train)\n M = len(X_test)\n result = np.zeros(N, dtype=np.float32)\n\n for j in prange(M):\n score = np.zeros(N, dtype=np.float32)\n dist = np.zeros(N, dtype=np.float32)\n div_range = np.arange(1.0, N)\n div_min = np.minimum(div_range, K)\n for i in range(N):\n dist[i] = np.sqrt(np.sum(np.square(X_train[i] - X_test[j])))\n indices = np.argsort(dist)\n y_sorted = y_train[indices]\n eq_check = (y_sorted == y_test[j]) * 1.0\n diff = - 1 / K * (eq_check[1:] - eq_check[:-1])\n diff /= div_range\n diff *= div_min\n score[indices[:-1]] = diff\n score[indices[-1]] = eq_check[-1] / N\n score[indices] += np.sum(score[indices]) - np.cumsum(score[indices])\n result += score / M\n\n return result\n\n\nclass LabelErrors(IssueDetector):\n\n def detect(self, pipeline, params) -> Issue:\n\n if 'k' in params:\n k = params['k']\n else:\n k = 100\n\n threshold = params['max_fraction']\n\n X_train = pipeline.outputs[Output.X_TRAIN]\n y_train = pipeline.outputs[Output.Y_TRAIN]\n\n X_test = pipeline.outputs[Output.X_TEST]\n y_test = pipeline.outputs[Output.Y_TEST]\n\n # Still hacky, we need a principled way to flatten tensors for CV pipelines\n if len(X_train.shape) == 3:\n X_train = X_train.reshape(int(X_train.shape[0] / X_train.shape[1]), X_train.shape[1] * X_train.shape[1])\n X_test = X_test.reshape(int(X_test.shape[0] / X_test.shape[1]), X_test.shape[1] * X_test.shape[1])\n\n shapley_values = _compute_shapley_values(X_train,\n np.squeeze(y_train),\n X_test,\n np.squeeze(y_test),\n k)\n\n lineage_X_train = pipeline.output_lineage[Output.X_TRAIN]\n\n fact_table_index, fact_table_source = \\\n [(index, train_source) for index, train_source in enumerate(pipeline.train_sources)\n if train_source.source_type == SourceType.ENTITIES][0]\n\n shapley_values_by_row_id = {}\n\n for polynomial, shapley_value in zip(lineage_X_train, shapley_values):\n for entry in polynomial:\n if entry.operator_id == fact_table_source.operator_id:\n shapley_values_by_row_id[entry.row_id] = shapley_value\n\n data = fact_table_source.data\n fact_table_lineage = pipeline.train_source_lineage[fact_table_index]\n\n for row_index, row in data.iterrows(): \n data.at[row_index, '__shapley_value'] = \\\n self._find_shapley(fact_table_lineage[row_index], shapley_values_by_row_id)\n\n self.log_tag('arguseyes.shapley_values.operator_id', fact_table_source.operator_id)\n self.log_tag('arguseyes.shapley_values.k', k)\n self.log_tag('arguseyes.shapley_values.data_file', 'input-with-shapley-values.parquet')\n self.log_as_parquet_file(data, 'input-with-shapley-values.parquet')\n\n\n num_samples = len(shapley_values_by_row_id)\n num_negative = int(np.sum(np.array(list(shapley_values_by_row_id.values())) < 0, axis=0))\n fraction_negative = float(num_negative) / num_samples\n\n logging.info(f'Found {num_negative} out of {num_samples} samples with negative Shapley value.')\n\n\n\n has_too_many_label_errors = fraction_negative > threshold\n\n issue_details = {\n 'num_samples': num_samples,\n 'num_erroneous': num_negative,\n 'fraction': fraction_negative\n }\n\n return Issue('label_errors', has_too_many_label_errors, issue_details)\n\n\n\n def error_msg(self, issue) -> str:\n details = issue.details\n return f'Found {details[\"fraction\"]*100:.2f}% ({details[\"num_erroneous\"]}/{details[\"num_samples\"]}) ' \\\n 'of potentially mislabeled samples in the training data!'\n\n\n\n @staticmethod\n def _find_shapley(polynomial, shapley_values_by_row_id):\n for entry in polynomial:\n if entry.row_id in shapley_values_by_row_id:\n return shapley_values_by_row_id[entry.row_id]\n return 0.0\n","repo_name":"amsterdata/arguseyes","sub_path":"arguseyes/issues/_label_errors.py","file_name":"_label_errors.py","file_ext":"py","file_size_in_byte":4629,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"20077077774","text":"# run with python plot_result.py -b\n# -b is for batch mode\n\nimport ROOT \nimport sys\n\n# my modules\n# importing modules\nconfig_module_name = str(sys.argv[1])[:-3]\nprint (\"importing module\" + config_module_name)\ncf = __import__(config_module_name)\n\noperator_module_name = str(sys.argv[2])[:-3]\nprint (\"importing module\" + operator_module_name)\nops = __import__(operator_module_name)\n\n# apro il file csv\nf = open(cf.tag + '/results.csv','r')\n\n# leggo una riga come stringa\n\nread_file = []\n\nline = f.readline()\nwhile ( line != \"\"):\n line = line.replace('\\n','')\n temp = line.split(',')\n read_file.append(temp)\n line = f.readline()\n\nf.close() \n\n# select best variables\nrecords = []\nrecord_index = 0\nrecords.append(read_file[1])\nwidth = 0.5*(float(records[0][3])-float(records[0][2]))\nprev_ele_name = records[0][0]\n\nfor element in read_file[2:]:\n if element[0] == prev_ele_name:\n new_width = 0.5*(float(element[3])-float(element[2]))\n if new_width < width:\n records[record_index] = element\n width = new_width\n if element[0] != prev_ele_name:\n prev_ele_name = element[0]\n record_index += 1 \n records.append(element)\n width = 0.5*(float(element[3])-float(element[2]))\n\n# sorting by 1 sigma interval width\nrecords.sort(key = lambda a : 0.5*(float(a[3]) - float(a[2])))\n\n# produce histogram\nnbins = len(records)\n# print(nbins)\n\nhisto1Sigma = ROOT.TH1F(\"histo1Sigma\",\"histo1Sigma\",nbins,0,nbins)\nhisto2Sigma = ROOT.TH1F(\"histo2Sigma\",\"histo2Sigma\",nbins,0,nbins)\n\n# plotting half CI widths\nfor i in range(nbins):\n histo1Sigma.SetBinContent(i+1,0.5*(float(records[i][3])-float(records[i][2])))\n histo1Sigma.GetXaxis().SetBinLabel(i+1,records[i][0] + ' (' + records[i][1] + ')')\n histo1Sigma.GetXaxis().ChangeLabel(i+1,-45)\n histo2Sigma.SetBinContent(i+1,0.5*(float(records[i][5])-float(records[i][4])))\n histo2Sigma.GetXaxis().SetBinLabel(i+1,records[i][0] + ' (' + records[i][1] + ')')\n histo2Sigma.GetXaxis().ChangeLabel(i+1,-45)\n\nc1 = ROOT.TCanvas()\n\n\nhisto2Sigma.Draw()\nhisto2Sigma.SetTitle(\"\")\nhisto2Sigma.SetFillColor(ROOT.kOrange)\nhisto1Sigma.Draw(\"same\")\nhisto1Sigma.SetTitle(\"\")\nhisto1Sigma.SetFillColor(ROOT.kRed)\n\n\nleg = ROOT.TLegend(0.1,0.7,0.4,0.9)\nleg.AddEntry(histo1Sigma,\"1 #sigma half width\",\"f\")\nleg.AddEntry(histo2Sigma,\"2 #sigma half width\",\"f\")\nleg.Draw()\n\nROOT.gStyle.SetOptStat(0)\nROOT.gPad.SetGrid()\n# printout\nc1.Print(cf.tag + \"/results_all.png\",\".png\")\nhisto2Sigma.GetYaxis().SetRangeUser(0,5)\nhisto1Sigma.GetYaxis().SetRangeUser(0,5)\nc1.Print(cf.tag + \"/results_all_zoom.png\",\".png\")\nc1.Print(cf.tag + \"/results_all_zoom.root\",\".root\")\n\nc1.SetLogy()\nc1.Print(cf.tag + \"/results_log.png\",\".png\")\n\n\n# make variable comparison prints for every operator\n\nfor op in ops.operator:\n\n elements = [ x for x in read_file[1:] if x[0]== \"k_\"+op]\n\n # sorting by width\n elements.sort(key = lambda el : 0.5*(float(el[3]) - float(el[2])))\n\n nbins = len(elements)\n name1 = \"h_1S_Var_{}\".format(op)\n name2 = \"h_2S_Var_{}\".format(op)\n h_1S_Var = ROOT.TH1F(name1,name1,nbins,0,nbins)\n h_2S_Var = ROOT.TH1F(name2,name2,nbins,0,nbins)\n\n for i in range(nbins):\n h_1S_Var.SetBinContent(i+1,0.5*(float(elements[i][3])-float(elements[i][2])))\n h_1S_Var.GetXaxis().SetBinLabel(i+1, elements[i][1] )\n # h_1S_Var.GetXaxis().ChangeLabel(i+1,45)\n h_2S_Var.SetBinContent(i+1,0.5*(float(elements[i][5])-float(elements[i][4])))\n h_2S_Var.GetXaxis().SetBinLabel(i+1, elements[i][1] )\n # h_2S_Var.GetXaxis().ChangeLabel(i+1,45)\n\n c1 = ROOT.TCanvas()\n\n h_2S_Var.Draw()\n h_2S_Var.SetMinimum(0)\n h_2S_Var.SetMinimum(h_2S_Var.GetMaximum()*1.1)\n h_2S_Var.SetFillColor(ROOT.kCyan)\n h_2S_Var.SetTitle(op)\n h_1S_Var.Draw(\"same\")\n h_1S_Var.SetFillColor(ROOT.kBlue)\n h_1S_Var.SetTitle(op)\n\n leg = ROOT.TLegend(0.6,0.7,0.9,0.9)\n leg.AddEntry(h_1S_Var,\"1 #sigma half width\",\"f\")\n leg.AddEntry(h_2S_Var,\"2 #sigma half width\",\"f\")\n leg.Draw()\n\n ROOT.gStyle.SetOptStat(0)\n ROOT.gPad.SetGrid()\n # printout\n c1.Print(cf.tag + \"/results_{}.png\".format(op),\".png\")\n c1.Print(cf.tag + \"/results_{}.root\".format(op),\".root\")","repo_name":"dbrambilla13/EFT_Dim6_Reco_Fit","sub_path":"plot_result.py","file_name":"plot_result.py","file_ext":"py","file_size_in_byte":4217,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"35128711839","text":"from django.urls import path\n\nfrom . import views\n\napp_name = 'authorization'\nurlpatterns = [\n # ex: /authorization/1\n path('campaign/', views.CampaignDetailView.as_view(), name='campaign_detail'),\n path('campaign/add', views.CampaignCreateView.as_view(), name='campaign_create'),\n path('campaign/set_active/', views.set_campaign, name='set_campaign'),\n path('character/', views.PlayerCharacterDetailView.as_view(), name='character_detail'),\n path('character/add', views.PlayerCharacterCreateView.as_view(), name='character_create'),\n path('character/set_active/', views.set_character, name='set_character'),\n path('permissions/add/', views.PermissionsCreateView.as_view(), name='permissions_add'),\n path('permissions//', views.PermissionsUpdateView.as_view(), name='permission_change'),\n]\n","repo_name":"TrevorBrennan/DnD-Agroth","sub_path":"dndsite/authorization/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":857,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"28691566054","text":"import matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport math\r\nimport random\r\n\r\n###############Batch Gradient Descent################# \r\ndef BGD(x,y,yita,exp):\r\n m = len(x)\r\n x_b = np.c_[np.ones((m, 1)), x]\r\n w = np.random.randn(2)\r\n\r\n n=0\r\n while True:\r\n n = n+1\r\n grad = x_b.T.dot(x_b.dot(w)-y)\r\n w = w - yita*grad \r\n error = np.dot(x_b.dot(w)-y,x_b.dot(w)-y)/m\r\n if error 1:\n \n ProcessCode = sys.argv[1]\n \n else:\n print(\"Veuillez fournir les paramètres 'process_code' à l'appel du fichier LaunchProcess.py\" )\n # process_code =\"Respect_Procedure\"\n # process_code =\"Distance_DOM\"\n # process_code =\"Track\"\n # process_code =\"Plot\"\n # process_code = \"DailyReports\"\n ProcessCode = \"Plaintes\"\n ProcessCode = \"Departure\"\n\n launch_update(ProcessCode)\n\n # Voir Comment faire Check Relevant par l'utilisateur. Penser à modifier la variable param ini SOURCE_FOR_OLD_STATUS_RELEVANT si on passe sur Firebase. Peut_étre créer dans Firebase une location CheckRelevant avec sous dossier Checked, To_Do. Si key dans Checked on ne change pas le statut\n \n ","repo_name":"Cocor31/ProjectAvions","sub_path":"LaunchProcess.py","file_name":"LaunchProcess.py","file_ext":"py","file_size_in_byte":4008,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"} +{"seq_id":"7285689247","text":"class Solution(object):\n def containsDuplicate(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: bool\n \"\"\"\n # sets cannot have duplicates\n # if item exists in the set, cannot be added -> return True\n # otherwise, return False\n sets = set()\n for i in nums:\n if i not in sets:\n sets.add(i)\n else:\n return True\n return False\n # another faster runtime solution\n # return (len(nums)) != (len(set(nums)))","repo_name":"cwong6854/LeetCode","sub_path":"217-contains-duplicate/217-contains-duplicate.py","file_name":"217-contains-duplicate.py","file_ext":"py","file_size_in_byte":537,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"34411700290","text":"import numpy as np\nfrom numba import jit\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom random import sample as sp\nfrom enum import Enum\nimport sys\nfrom numbasom import simplesom as SOM\n\n\nclass DeepSom:\n def __init__(self, n_epoch):\n self.n_epoch = n_epoch\n self.soms = set()\n\n def add_som(self, domain):\n self.soms.add(domain)\n\n def get_root(self):\n return None\n\n def train(self, data):\n # Organise soms into queue so they are trained in the right order\n training_queue = self.sort_soms()\n final_som = None\n counter = 0\n # Start training\n print(training_queue)\n while len(training_queue) > 0:\n som = training_queue.pop(0)\n model = som.initialise_som(self.n_epoch)\n \n if som.domain_type is DomainType.LEAF:\n # Extract data with correct dimensions\n training_data = self.extract_input_data(data, som.dimensions)\n else:\n # Extract weight lists from children soms and concatenate them together\n training_data = self.extract_children_data(som).tolist()\n \n for _ in range(self.n_epoch):\n model.learn(sp(training_data, 1))\n\n # Remember the last model (which would be the root SOM)\n if len(training_queue) == 0:\n final_som = model\n \n # Return root som\n return final_som\n\n def extract_children_data(self, som):\n weights = []\n for child in som.children:\n child_weights = child.get_som().dump_weight_list()\n weights.append(child_weights)\n concat_weights = np.concatenate(weights)\n return concat_weights\n\n def extract_input_data(self, data, dimensions):\n final_data = []\n for line in data:\n line_data = [line[index] for index in dimensions]\n final_data.append(line_data)\n return final_data\n\n # Sort SOMs by DFS\n def sort_soms(self):\n visited = {som : False for som in self.soms}\n queue = []\n for som in self.soms:\n if not visited[som]:\n visited, queue = self.visit_som(som, visited, queue)\n return queue\n\n def visit_som(self, som, visited, queue):\n visited[som] = True\n # Visit children first\n for child in som.children:\n if not visited[child]:\n visited, queue = self.visit_som(child, visited, queue)\n # Then add self to queue\n queue.append(som)\n return visited, queue\n\n\nclass DomainType(Enum):\n LEAF = 1\n NON_LEAF = 2\n\n\nclass Domain:\n def __init__(self, lattice_width, lattice_height, domain_type):\n self.lattice_width = lattice_width\n self.lattice_height = lattice_height\n self.domain_type = domain_type\n self.som = None\n\n # Dimensions is an array of indices of which dimensions this domain involves\n self.dimensions = set()\n\n # Children is a set of Domains that the current Domain is based on\n self.children = set()\n\n def define_dimensions(self, dimensions):\n for index in dimensions:\n self.dimensions.add(index)\n\n def add_child(self, domain):\n self.children.add(domain)\n\n def initialise_som(self, n_epoch):\n if len(self.dimensions) == 0:\n self.som = SOM(self.lattice_width, self.lattice_height, self.find_largest_dimension_in_children(), init_epoch=n_epoch)\n else:\n self.som = SOM(self.lattice_width, self.lattice_height, len(self.dimensions), init_epoch=n_epoch)\n return self.som\n\n def find_largest_dimension_in_children(self):\n largest_dimension = 0\n for child in self.children:\n if len(child.dimensions) > largest_dimension:\n largest_dimension = len(child.dimensions)\n return largest_dimension\n\n def get_som(self):\n return self.som\n\n\ndatastr = [l.strip().split(',') for l in open(sys.argv[1]).readlines()]\ndata = [[float(c) for c in e] for e in datastr]\n\ninit_epoch = 200\ndeep_som = DeepSom(init_epoch)\n\n# User defining domains for the base layer\ndomain_a = Domain(100, 100, DomainType.LEAF)\ndomain_a.define_dimensions((0, 1))\ndeep_som.add_som(domain_a)\nprint(\"a:\", domain_a)\n\ndomain_b = Domain(100, 100, DomainType.LEAF)\ndomain_b.define_dimensions((1, 2))\ndeep_som.add_som(domain_b)\nprint(\"b:\", domain_b)\n\n# User defining domains for upper layers\ndomain_c = Domain(200, 200, DomainType.NON_LEAF)\ndomain_c.add_child(domain_a)\ndomain_c.add_child(domain_b)\ndeep_som.add_som(domain_c)\nprint(\"c:\", domain_c)\n\n# domain_d = Domain(init_epoch, 100, 100, DomainType.NON_LEAF)\n# domain_d.add_child(domain_a)\n# domain_d.add_child(domain_b)\n# deep_som.add_som(domain_d)\n\n# domain_e = Domain(init_epoch, 100, 100, DomainType.NON_LEAF)\n# domain_e.add_child(domain_c)\n# domain_e.add_child(domain_d)\n# deep_som.add_som(domain_e)\n\n# Train the deep SOM\nroot_som = deep_som.train(data)\n\nweights = root_som.dump_weight_list()\n\n# Graph the resulting weights (doesn't work --> bug at line 174 where axes is only size 2)\nfig = plt.figure()\nax = fig.add_subplot(projection='3d')\n\naxes = list(zip(*weights))\naxes_o = list(zip(*data))\nprint(len(axes_o))\nax.set_box_aspect((np.ptp(axes[0]), np.ptp(axes[1]), np.ptp(axes[2])))\n \nax.scatter(*axes, marker='o', s=1)\nax.scatter(*axes_o, marker='o', s=1.4, color=\"magenta\")\nax.set_xlabel('X')\nax.set_ylabel('Y')\nax.set_zlabel('Z')\n\nplt.savefig(f\"deep_som.png\")","repo_name":"takatsuka/DeepSOM","sub_path":"playgrounds/jenny_deep_som/deep_som.py","file_name":"deep_som.py","file_ext":"py","file_size_in_byte":5520,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"69"} +{"seq_id":"40749020300","text":"from django.urls import path\nfrom . import views\n\n\napp_name = 'cart'\n\nurlpatterns = [\n path('', views.cart_home, name='cart_home'),\n path('update/', views.cart_update, name='cart_update'),\n path('checkout/', views.checkout_home, name='checkout'),\n path('orders/', views.orders, name='orders'),\n path('orders//', views.orders_details, name='orders_details'),\n path('sells_activity/', views.sells_activity, name='sells_activity'),\n]\n","repo_name":"uprm-inso-4101-2020-2021-s2/semester-project-team-5","sub_path":"cart/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":462,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"} +{"seq_id":"23732889934","text":"from pathlib import Path\nimport torch\nimport torch.nn as nn\nfrom torch.utils.data import Dataset, DataLoader\nfrom torch.optim import Adam\nfrom sklearn.metrics import accuracy_score\nfrom tqdm import tqdm\n\nfrom utils.logger import Logger\n\n\nclass MaskClassifier(nn.Module):\n\n def __init__(self, train_dataset: Dataset = None, val_dataset: Dataset = None,\n batch_size: int = 32, n_epochs: int = 10, device: str = \"cuda\",\n save_path: Path = Path(\"checkpoints\"), logger: Logger = None):\n super().__init__()\n self.batch_size = batch_size\n self.n_epochs = n_epochs\n self.device = \"cuda\" if device == \"cuda\" and torch.cuda.is_available() else \"cpu\"\n self.logger = logger\n self.save_p = save_path\n\n # datasets\n self.train_dataset = train_dataset\n self.val_dataset = val_dataset\n\n # dataloaders\n if train_dataset:\n self.train_dataloader = DataLoader(train_dataset,\n batch_size=self.batch_size,\n shuffle=True,\n num_workers=2)\n if val_dataset:\n self.val_dataloader = DataLoader(val_dataset,\n batch_size=self.batch_size,\n num_workers=2)\n\n # NNet\n self.convlayer1 = nn.Sequential(\n nn.Conv2d(3, 32, kernel_size=(3, 3), padding=(1, 1)),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=(2, 2))\n )\n self.convlayer2 = nn.Sequential(\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=(1, 1)),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=(2, 2))\n )\n self.convlayer3 = nn.Sequential(\n nn.Conv2d(64, 128, kernel_size=(3, 3),\n padding=(1, 1), stride=(3, 3)),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=(2, 2))\n )\n self.head = nn.Sequential(\n nn.Linear(2048, 1024),\n nn.ReLU(),\n nn.Linear(1024, 2)\n )\n\n # enforce Kaiming initialization\n for sequential in [self.convlayer1, self.convlayer2, self.convlayer3, self.head]:\n for layer in sequential.children():\n if isinstance(layer, (nn.Linear, nn.Conv2d)):\n nn.init.kaiming_uniform_(layer.weight)\n\n # class weighting to counter class imbalance\n if self.train_dataset:\n n_masked = sum(self.train_dataset.df[\"mask\"] == 1)\n n_nonmasked = sum(self.train_dataset.df[\"mask\"] == 0)\n self.class_weigthing = 1 - \\\n torch.tensor([n_masked, n_nonmasked],\n dtype=torch.float) / (n_masked + n_nonmasked)\n else:\n self.class_weigthing = torch.tensor([1, 1], dtype=torch.float)\n\n # weighted loss function\n self.loss_fn = nn.CrossEntropyLoss(weight=self.class_weigthing)\n\n # optimizer\n self.optimizer = Adam(self.parameters(),\n lr=0.0001)\n\n # move module parameters to device\n self.to(self.device)\n\n def forward(self, x):\n y = self.convlayer1(x)\n y = self.convlayer2(y)\n y = self.convlayer3(y)\n y = y.view(-1, 2048)\n y = self.head(y)\n return y\n\n def training_step(self, batch):\n images, labels = batch[\"image\"].to(\n self.device), batch[\"mask\"].to(self.device)\n self.train()\n self.optimizer.zero_grad()\n outputs = self.forward(images)\n loss_batch = self.loss_fn(outputs, labels)\n loss_batch.backward()\n self.optimizer.step()\n return {\"loss_batch\": loss_batch.item()}\n\n def training_epoch(self, epoch):\n train_loss = 0.\n for batch in tqdm(self.train_dataloader, desc=\"epoch {} - train\".format(epoch)):\n res = self.training_step(batch)\n train_loss += res[\"loss_batch\"]\n train_loss = train_loss / len(self.train_dataloader)\n return {\"train_loss\": train_loss}\n\n def validation_step(self, batch):\n images, labels = batch[\"image\"].to(\n self.device), batch[\"mask\"].to(self.device)\n self.eval()\n with torch.no_grad():\n outputs = self.forward(images)\n loss_batch = self.loss_fn(outputs, labels)\n _, classes = torch.max(outputs, dim=1)\n accuracy = accuracy_score(classes.cpu(), labels.flatten().cpu())\n return {\"val_loss_batch\": loss_batch.item(), \"accuracy\": accuracy}\n\n def validation_epoch(self, epoch):\n val_loss = 0.\n val_accuracy = 0.\n for batch in tqdm(self.val_dataloader, desc=\"epoch {} - val\".format(epoch)):\n res = self.validation_step(batch)\n val_loss += res[\"val_loss_batch\"]\n val_accuracy += res[\"accuracy\"]\n val_loss = val_loss / len(self.val_dataloader)\n val_accuracy = val_accuracy / len(self.val_dataloader)\n return {\"val_loss\": val_loss, \"val_accuracy\": val_accuracy}\n\n def fit(self):\n best_solution = {\"epoch\": 0, \"val_accuracy\": 0.}\n for epoch in range(self.n_epochs):\n res_train = self.training_epoch(epoch)\n res_val = self.validation_epoch(epoch)\n if self.logger is not None:\n self.logger.update(res_train, res_val)\n self.save(epoch)\n print(\"train loss: {:1.3E} | val loss: {:1.3E} - val acc: {}\".format(res_train[\"train_loss\"],\n res_val[\"val_loss\"],\n res_val[\"val_accuracy\"]))\n if res_val[\"val_accuracy\"] > best_solution[\"val_accuracy\"]:\n best_solution[\"epoch\"] = epoch\n best_solution[\"val_accuracy\"] = res_val[\"val_accuracy\"]\n return best_solution\n\n def save(self, epoch: int):\n self.save_p.mkdir(parents=True, exist_ok=True)\n model_name = \"model_epoch{:03}.ckpt\".format(epoch)\n torch.save(self.state_dict(), self.save_p / model_name)\n","repo_name":"alaflaquiere/mask-detector","sub_path":"mask_detector/mask_classifier.py","file_name":"mask_classifier.py","file_ext":"py","file_size_in_byte":6201,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"30388407294","text":"trial = int(input())\n\ndef score(result):\n temp = 0\n sum = 0\n for i in range(len(result)):\n if result[i] == 'O':\n temp = temp + 1\n sum = sum + temp\n else:\n temp = 0\n return sum \n\nfor i in range(0, trial):\n result = input()\n answer = score(result)\n print(answer)\n","repo_name":"praymeta/Code-Up-Algorithm","sub_path":"BOJ_8958_OXQuiz.py","file_name":"BOJ_8958_OXQuiz.py","file_ext":"py","file_size_in_byte":329,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"73515505180","text":"import math\nfrom constants import * \n\ndef isco(BHmass,spin):\n\t# BHmass=BHmass*solarMass\n\n\tif spin == \"prograde\":\n\t\tfactor=1.0\n\tif spin == \"no spin\":\n\t\tfactor=6.0\n\tif spin == \"retrograde\":\n\t\tfactor=9.0\n\n\tgravitationalRadius=(G*BHmass)/(c**2)\n\tisco=factor*gravitationalRadius\n\t\n\treturn(isco)\n\ndef gravRadius(BHmass):\n\t# BHmass=BHmass*solarMass\n\tradius=(2.0*G*BHmass)/(c**2)\n\t\n\treturn(radius)\n\ndef mass(isco,spin):\n\tif spin == \"prograde\":\n\t\tfactor=1.0\n\tif spin == \"no spin\":\n\t\tfactor=6.0\n\tif spin == \"retrograde\":\n\t\tfactor=9.0\n\n\tBHmass=(isco*(c**2))/(factor*G)\n\n\treturn(BHmass)\n\ndef acrretionRate(BHmass,ratio,isco):\n\t# BHmass=BHmass*solarMass\n\tMdot=(ratio)*((8.0*math.pi*3.0e8*mProton*isco)/sigT) \t\t #accretion rate : kg/s\n\n\treturn(Mdot)\n\ndef info():\n\n\tprint(\"function: isco(BHmass: solar masses, spin= 'prograde' or'retrograde' or 'no spin') returns isco size in meters. \")\n\tprint(\"\")\n\tprint(\"function: gravRadius(BHmass: solar masses) returns gravitational radius. \")\n\tprint(\"\")\n\tprint(\"function: mass(isco in meters,spin='prograde' or 'retrograde' or 'no spin') returns mass given isco im meters and spin\")\n\tprint(\"\")\n\tprint(\"function: accretionRate(BHmass: solar masses, Eddington ratio,isco: meters) returns accretion rate\")\n\tprint(\"\")","repo_name":"jgphy/lensingResearch","sub_path":"quasar.py","file_name":"quasar.py","file_ext":"py","file_size_in_byte":1253,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"20963115231","text":"from flask_restplus import Resource\nfrom request_handlers.orchestrator import orchestrator\nfrom flask import jsonify, request, abort\nfrom flask import Flask\nfrom flask_restplus import Api\nfrom flask_cors import CORS\n\napp = Flask(__name__)\nCORS(app)\napi = Api(app=app)\n\n\n@api.param('path_file', 'image_path', _in='formData')\n@api.route('/ocrscanhandwriting/')\nclass Ocrhandwriting(Resource):\n def post(self):\n path_file = request.form.get('path_file')\n orc = orchestrator()\n result = orc.scan_ocr_handwriting(path_file)\n return jsonify(result=result)\n\n\n@api.param('path', 'table_path', _in='formData')\n@api.route('/ocrscantable/')\nclass Ocrtable(Resource):\n def post(self):\n path = request.form.get('path')\n orc = orchestrator()\n result = orc.scan_ocr_table(path)\n if result is None:\n abort(401)\n else:\n return jsonify(result=result)\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"MasihKarimi/final-flask-app","sub_path":"access_point.py","file_name":"access_point.py","file_ext":"py","file_size_in_byte":977,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"} +{"seq_id":"40430962771","text":"from normalization import normalization\r\n# from plots import plot_feature\r\nimport torch\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn.manifold import TSNE\r\nfrom normalization import *\r\n\r\ndef norm_expand_tensor(feature):\r\n assert len(feature.shape) == 2\r\n mean = feature.mean(dim=0, keepdim=True)\r\n var = feature.std(dim=0, keepdim=True)\r\n return (feature - mean) / (var + 1e-6)\r\n\r\ndef LDA_loss_of_a_pair(mu1, mu2, sigma1_vec, sigma2_vec): #need to double check\r\n '''\r\n mu1, mu2 are two mean vectors of dim d for class 1 and 2\r\n sigma1, sigma2 are two variance matrix of dim nd*d for class 1 and 2\r\n '''\r\n sigma_sum = sigma1_vec + sigma2_vec + 1e-6 # d * 1\r\n w = (1/sigma_sum).mul(mu1-mu2) # d * 1\r\n J = w.mul(mu1-mu2).sum()\r\n #print(\"mu=\", mu1 , mu2, \"sigma=\", sigma1_vec, sigma2_vec, \"w=\", w)\r\n return J\r\n\r\ndef LDA_loss(H, Y_onehot, nb_each_class_inv_mat, norm_or_not=True):\r\n '''\r\n H is representation matrix of dim n * d\r\n Y_onehot is a one-hot matrix of dim n * c, c is number of class\r\n nb_each_class_inv_mat is a diagonal matrix of dim c * c\r\n this loss encourage node in different class to be as linear seperable as possible\r\n '''\r\n result = 0\r\n weight_sum = 0\r\n if norm_or_not:\r\n # do expand_norm won't effect LDA_loss\r\n # print(norm_or_not)\r\n H = norm_expand_tensor(H)\r\n\r\n # step1: get shape\r\n nb_nodes = Y_onehot.shape[0]\r\n nb_class = Y_onehot.shape[1]\r\n\r\n # step2: get mean_mat, each column is a mean vector for a class\r\n H_T = torch.transpose(H, 0, 1) # transpose of matrix H\r\n sum_mat = torch.mm(H_T, Y_onehot) # d * c\r\n mean_mat = torch.mm(sum_mat, nb_each_class_inv_mat) # d * c\r\n\r\n # step3: get var_mat, each colums is a variance vector for a class\r\n '''\r\n var(X) = mean(X^2) - mean(X)^2 \r\n '''\r\n H2 = H.mul(H) # each item in H2 is the square of corresponding item in H\r\n H2_T = torch.transpose(H2, 0, 1) # transpose of matrix H2\r\n sum_mat2 = torch.mm(H2_T, Y_onehot) # d * c\r\n mean_mat2 = torch.mm(sum_mat2, nb_each_class_inv_mat) # d * c\r\n var_mat = mean_mat2 - mean_mat.mul(mean_mat) # d * c\r\n var_mat = torch.relu(var_mat)\r\n\r\n # step4: for each pair, get weight and score\r\n for i in range(nb_class):\r\n for j in range(i + 1, nb_class):\r\n weight = 1 / nb_each_class_inv_mat[i][i].cpu().numpy() + 1 / nb_each_class_inv_mat[j][j].cpu().numpy()\r\n score = LDA_loss_of_a_pair(mean_mat[:, i], mean_mat[:, j], var_mat[:, i], var_mat[:, j])\r\n weight_sum = weight_sum + weight\r\n result = result + weight * score\r\n\r\n result = result / weight_sum\r\n\r\n return result","repo_name":"fxsxjtu/GraTO","sub_path":"LDA_loss.py","file_name":"LDA_loss.py","file_ext":"py","file_size_in_byte":2733,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"69"} +{"seq_id":"26545564756","text":"from commands.command import Command\n\nclass GiveCommand(Command):\n def __init__(self, info, description):\n Command.__init__(self, info, description)\n \n def execute(self, game, command):\n if command.get_command_length() > 2:\n player = game.get_player()\n item = player.get_item(command.get_word(1))\n character = player.get_room().get_character(command.get_word(2))\n if character is not None and item is not None:\n player.drop_item(item.get_name())\n player.set_weight()\n character.pickup_item(item)\n character.set_weight()\n print(\"\\nYou give the %s to %s.\\n\" % (command.get_word(1), command.get_word(2)))\n elif character is None:\n print(\"\\n`%s' is not in the room.\\n\" % command.get_word(2))\n return\n elif item is None:\n print(\"\\nYou are not carrying a `%s'.\\n\" % command.get_word(1))\n return\n elif command.get_command_length() == 1:\n print(\"\\nGive %s to who?\\n\" % command[1].get_name())\n else:\n print(\"\\nGive what to who?\\n\")\n \n def usage(self):\n return \"Usage: give [item] [character]\"\n","repo_name":"war4uthor/zuul-python","sub_path":"game/player_commands/give.py","file_name":"give.py","file_ext":"py","file_size_in_byte":1265,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"} +{"seq_id":"35393058017","text":"from _json import make_encoder\nfrom builtins import property\n\nfrom django.db import models\nfrom core import thumnails\nimport os\n\n\n# Create your models here.\n\nTYPES_OF_GOODS = ((0, 'Двери'), (1, 'Акссесуары'))\n\nclass TypesOfGoods(models.Model):\n name = models.CharField('Название', max_length=150)\n\n class Meta:\n verbose_name = 'Тип товара'\n verbose_name_plural = 'Типы товара'\n\n def __str__(self):\n return str(self.name)\n\nclass Countries(models.Model):\n name = models.CharField('Название', max_length=150)\n\n class Meta:\n verbose_name = 'Страна'\n verbose_name_plural = 'Страны'\n\n def __str__(self):\n return str(self.name)\n\nclass Goods(models.Model):\n article = models.CharField('Артикул',max_length=50)\n name = models.CharField('Название', max_length=150)\n desc = models.TextField('Описание',null=True, blank=True)\n type = models.ForeignKey(TypesOfGoods, verbose_name='Тип товара')\n foto = models.ImageField('Фото', upload_to='goods_foto')\n\n class Meta:\n verbose_name = 'Товар'\n verbose_name_plural = 'Товары'\n\n def __str__(self):\n return '%s - %s' % (self.article, self.name)\n\n def save(self, force_insert=False, force_update=False, using=None):\n try:\n obj = Goods.objects.get(id=self.id)\n print(obj)\n if obj and (obj.foto.path != self.foto.path):\n thumnails.delete_thumbnail(obj.foto.path)\n except:\n pass\n\n super(Goods, self).save()\n thumnails.make_thumbnail(self.foto.path)\n\n @property\n def thumnail_img(self):\n img_url = self.foto.url\n thumnail_url = thumnails.getThumbnail(img_url, False)\n return thumnail_url if os.path.exists(thumnails.getThumbnail(self.foto.path)) else img_url\n\n def delete(self, using=None):\n try:\n obj = Goods.objects.get(id=self.id)\n thumnails.delete_thumbnail(obj.foto.path)\n obj.foto.delete()\n except (Goods.DoesNotExist, ValueError):\n pass\n super(Goods, self).delete()\n\n\nclass Colors(models.Model):\n name = models.CharField('Цвет', max_length=150)\n image = models.ImageField('Фото', upload_to='colors_foto')\n\n class Meta:\n verbose_name = 'Цвет'\n verbose_name_plural = 'Цвета'\n\n def __str__(self):\n return self.name\n\n def save(self, force_insert=False, force_update=False, using=None):\n try:\n obj = Colors.objects.get(id=self.id)\n print(obj)\n if obj and (obj.image.path != self.image.path):\n thumnails.delete_thumbnail(obj.image.path)\n except:\n pass\n\n super(Colors, self).save()\n thumnails.make_thumbnail(self.image.path)\n\n def delete(self, using=None):\n try:\n obj = Colors.objects.get(id=self.id)\n thumnails.delete_thumbnail(obj.image.path)\n obj.image.delete()\n except (Colors.DoesNotExist, ValueError):\n pass\n super(Colors, self).delete()\n\nclass GoodsColors(models.Model):\n good = models.ForeignKey(Goods, verbose_name='Товар', related_name='goods_colors')\n color = models.ForeignKey(Colors, verbose_name='Цвет', null = True, blank = True)\n\n class Meta:\n verbose_name = 'Товар с цветом'\n verbose_name_plural = 'Товары с цветами'\n\n def __str__(self):\n return '%s - %s' % (str(self.good), self.color.name)\n\nclass Properties(models.Model):\n name = models.CharField('Имя', max_length=150)\n\n class Meta:\n verbose_name = 'Свойство'\n verbose_name_plural = 'Свойства'\n ordering = ('name', )\n\n def __str__(self):\n return str(self.name)\n\nclass Values(models.Model):\n good_property = models.ForeignKey(Properties, verbose_name=\"Свойство\", related_name=\"property_values\")\n value = models.CharField('Значение', max_length=150)\n\n class Meta:\n verbose_name = 'Значения свойств'\n verbose_name_plural = 'Значение свойства'\n\n def __str__(self):\n return \"%s - %s\" % (str(self.good_property), self.value)\n\nclass GoodsProperties(models.Model):\n good = models.ForeignKey(Goods, verbose_name='Товар', related_name='goods_properties')\n value = models.ForeignKey(Values, verbose_name=\"Значение свойства\", related_name='good_value')\n\n class Meta:\n verbose_name = 'Свойство товара'\n verbose_name_plural = 'Свойства товаров'\n ordering = ('value',)\n\n def __str__(self):\n return \"%s - %s\" % (str(self.good), self.value)\n\n","repo_name":"BorisMishunin/doors","sub_path":"web/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4820,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"29781672415","text":"import itertools\nimport os\nimport pickle\nimport time\nimport math\n\nimport torch\nfrom torch.utils.tensorboard import SummaryWriter \nfrom torch.optim.lr_scheduler import LambdaLR\nfrom transformers.optimization import get_constant_schedule_with_warmup\n\nimport cs\nfrom . import config\nfrom tools.logger import Logger\nfrom tools.utils import save_model, log_config, to_device, get_training_set_loader, StopTraining, only_main_process\nfrom .predict import bert_classifier_validation\nfrom .utils import get_model, get_training_set\n\nlogger: Logger\nstart_time = int(time.time())\nlast_log_time = start_time\nlast_save_time = start_time\nloss_sum = 0\nloss_cnt = 0\nbatch_cnt = 0\n\n\ndef generate_data(dataloader, sampler):\n global logger, batch_cnt\n for epoch in itertools.count():\n logger.info(f'epoch: {epoch} begin.')\n if sampler is not None:\n sampler.set_epoch(epoch)\n for data in dataloader:\n yield data\n batch_cnt += 1\n logger.info(f'epoch: {epoch} end.')\n\n\ndef train_one_batch(model, loss_fn, opt, data):\n model.train()\n opt.zero_grad()\n\n char_ids, mask, char_position_ids, word_position_ids, labels = [d.to(cs.device) for d in data]\n output = model(\n char_ids,\n attention_mask=mask,\n char_position_ids=char_position_ids,\n word_position_ids=word_position_ids,\n )\n logits = output[0]\n loss = loss_fn(logits, labels)\n loss.backward()\n opt.step()\n\n return loss.item()\n\n\ndef after_batch(model, opt, loss, scheduler):\n global loss_sum, loss_cnt\n step_no = scheduler.last_epoch\n batch_cnt_in_step = batch_cnt % config.batches_per_step\n\n if cs.rank == 0:\n loss_sum += loss\n loss_cnt += 1\n print('\\r', step_no, batch_cnt_in_step,\n f'{(loss_sum / loss_cnt):.5f} ', end='')\n\n if batch_cnt_in_step == config.batches_per_step - 1:\n after_step(step_no, model, opt)\n scheduler.step()\n\n\n@only_main_process\ndef after_step(step_no, model, opt):\n \n step = step_no\n global loss_sum, loss_cnt, last_log_time, last_save_time\n if time.time() - last_log_time > config.log_interval:\n logger.info(f'step_no: {step_no}, avg_loss: {(loss_sum / loss_cnt):.7f}, '\n f'current lr: {opt.param_groups[0][\"lr\"]}')\n writer = SummaryWriter('./tensorboard')\n writer.add_scalar('train/avg_loss', (loss_sum / loss_cnt), step)\n writer.add_scalar('train/lr', opt.param_groups[0][\"lr\"], step)\n val_result, val_value = bert_classifier_validation(model)\n test_result, test_value = bert_classifier_validation(model, test_set='test_set')\n logger.info(f'step_no: {step_no}, val result: {val_result}')\n logger.info(f'step_no: {step_no}, test result: {test_result}')\n\n dev_overall_acc, dev_overall_p, dev_overall_r, dev_overall_f1 = val_value[0], val_value[1], val_value[2], val_value[3]\n dev_high_acc, dev_high_p, dev_high_r, dev_high_f1 = val_value[4], val_value[5], val_value[6], val_value[7]\n dev_middle_acc, dev_middle_p, dev_middle_r, dev_middle_f1 = val_value[8], val_value[9], val_value[10], val_value[11]\n dev_few_acc, dev_few_p, dev_few_r, dev_few_f1 = val_value[12], val_value[13], val_value[14], val_value[15]\n\n writer.add_scalar('overall/dev_overall_acc', dev_overall_acc, step)\n writer.add_scalar('overall/dev_overall_p', dev_overall_p, step)\n writer.add_scalar('overall/dev_overall_r', dev_overall_r, step)\n writer.add_scalar('overall/dev_overall_f1', dev_overall_f1, step)\n writer.add_scalar('high/dev_high_acc', dev_high_acc, step)\n writer.add_scalar('high/dev_high_p', dev_high_p, step)\n writer.add_scalar('high/dev_high_r', dev_high_r, step)\n writer.add_scalar('high/dev_high_f1', dev_high_f1, step)\n writer.add_scalar('middle/dev_middle_acc', dev_middle_acc, step)\n writer.add_scalar('middle/dev_middle_p', dev_middle_p, step)\n writer.add_scalar('middle/dev_middle_r', dev_middle_r, step)\n writer.add_scalar('middle/dev_middle_f1', dev_middle_f1, step)\n writer.add_scalar('few/dev_few_acc', dev_few_acc, step)\n writer.add_scalar('few/dev_few_p', dev_few_p, step)\n writer.add_scalar('few/dev_few_r', dev_few_r, step)\n writer.add_scalar('few/dev_few_f1', dev_few_f1, step)\n\n test_overall_acc, test_overall_p, test_overall_r, test_overall_f1 = test_value[0], test_value[1], test_value[2], test_value[3]\n test_high_acc, test_high_p, test_high_r, test_high_f1 = test_value[4], test_value[5], test_value[6], test_value[7]\n test_middle_acc, test_middle_p, test_middle_r, test_middle_f1 = test_value[8], test_value[9], test_value[10], test_value[11]\n test_few_acc, test_few_p, test_few_r, test_few_f1 = test_value[12], test_value[13], test_value[14], test_value[15] \n\n writer.add_scalar('overall/test_overall_acc', test_overall_acc, step)\n writer.add_scalar('overall/test_overall_p', test_overall_p, step)\n writer.add_scalar('overall/test_overall_r', test_overall_r, step)\n writer.add_scalar('overall/test_overall_f1', test_overall_f1, step)\n writer.add_scalar('high/test_high_acc', test_high_acc, step)\n writer.add_scalar('high/test_high_p', test_high_p, step)\n writer.add_scalar('high/test_high_r', test_high_r, step)\n writer.add_scalar('high/test_high_f1', test_high_f1, step)\n writer.add_scalar('middle/test_middle_acc', test_middle_acc, step)\n writer.add_scalar('middle/test_middle_p', test_middle_p, step)\n writer.add_scalar('middle/test_middle_r', test_middle_r, step)\n writer.add_scalar('middle/test_middle_f1', test_middle_f1, step)\n writer.add_scalar('few/test_few_acc', test_few_acc, step)\n writer.add_scalar('few/test_few_p', test_few_p, step)\n writer.add_scalar('few/test_few_r', test_few_r, step)\n writer.add_scalar('few/test_few_f1', test_few_f1, step) \n\n loss_sum, loss_cnt = 0, 0\n last_log_time = time.time()\n if time.time() - last_save_time > config.checkpoint_interval:\n save_model(model, f'{config.task_name}_{start_time}', f'model_{step_no:05}.pt')\n save_model(opt, f'{config.task_name}_{start_time}', f'opt.pt')\n logger.info(f'step_no: {step_no}, save checkpoint!')\n last_save_time = time.time()\n\n\ndef train():\n global logger, batch_cnt\n logger = Logger(\n config.task_name,\n format_str='%(asctime)s - %(message)s',\n file_path=os.path.join(cs.LOG_DIR, f'{config.task_name}_{start_time}.log')\n )\n log_config(logger, vars(config))\n \n model = get_model()\n if config.last_training_time == 0 and cs.rank == 0:\n os.makedirs(os.path.join(cs.SAVED_MODEL_DIR, f'{config.task_name}_{start_time}'), exist_ok=True)\n with open(os.path.join(cs.SAVED_MODEL_DIR, f'{config.task_name}_{start_time}', f'model_conf.pkl'), 'wb') as f:\n pickle.dump(model.config, f)\n model = to_device(model)\n\n opt = torch.optim.AdamW(\n [{'params': model.parameters(), 'lr': config.init_lr, 'initial_lr': config.init_lr}],\n lr=config.init_lr,\n )\n if config.last_training_time != 0:\n opt.load_state_dict(torch.load(\n os.path.join(\n cs.SAVED_MODEL_DIR,\n f'{config.task_name}_{config.last_training_time}',\n f'opt.pt'\n ), map_location=torch.device(\"cpu\")\n ))\n\n scheduler = get_constant_schedule_with_warmup(opt, config.num_warmup_steps, config.last_step)\n\n training_set = get_training_set()\n dataloader, sampler = get_training_set_loader(\n training_set,\n batch_size=config.batch_size,\n num_workers=config.dataloader_workers\n )\n \n nor2len_train_part_dict = pickle.load(open(cs.save_pkl_root+'nor2len_train_part_dict.pkl', 'rb'))\n afid2nor = pickle.load(open(cs.save_pkl_root+\"afid2nor.pkl\", \"rb\"))\n id_to_cls = pickle.load(open(config.id_to_cls_file, 'rb'))\n cls_to_id = { v:k for k,v in id_to_cls.items() }\n weights = [(1./nor2len_train_part_dict[afid2nor[cls_to_id[i]]])**(config.reweight_exp) for i in range(len(cls_to_id))]\n weights = torch.tensor(weights).to(torch.device(\"cuda\"))\n loss_fn = torch.nn.CrossEntropyLoss(weight=weights)\n\n try:\n for data in generate_data(dataloader, sampler):\n loss = train_one_batch(\n model=model,\n loss_fn=loss_fn,\n opt=opt,\n data=data,\n )\n after_batch(model, opt, loss, scheduler)\n except (StopTraining, KeyboardInterrupt):\n logger.info('Stop training.')\n","repo_name":"LUMIA-Group/LoT-insts","sub_path":"code/bert_character_level/ann_new/src/tasks/bert_classifier/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":8699,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"69"} +{"seq_id":"9250352324","text":"\"\"\"\ncelery_app.py\n\ncreated by dromakin as 03.05.2021\nProject app\n\"\"\"\n\n__author__ = 'dromakin'\n__maintainer__ = 'dromakin'\n__credits__ = ['dromakin', ]\n__status__ = 'Development'\n__version__ = '20210503'\n\nfrom celery import Celery\n\ncelery_app = Celery(\"worker\", broker=\"amqp://guest@queue//\")\n\ncelery_app.conf.task_routes = {\"app.worker.test_celery\": \"main-queue\"}\n","repo_name":"dromakin/db_api_protection","sub_path":"backend/app/app/core/celery_app.py","file_name":"celery_app.py","file_ext":"py","file_size_in_byte":364,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"} +{"seq_id":"32095077792","text":"import pickle\nimport os\nimport glob\nimport numpy as np\nimport math\nimport matplotlib.pyplot as plt\nimport subprocess\nfrom joblib import Parallel, delayed\n\n\n\n# THIS ASSUMES THAT stochastic_far_unmatched.py has been executed\n\n#islets = pickle.load(open('far_unmatched_islets.p', 'rb'))\n\n\nis_sorted = lambda a: np.all(a[:-1] <= a[1:])\n\ndirr = 'stochastic'\n\n\nn_pert = 50\n\nn_perm = 50\n\n\ndef single_job(job):\n\n fprefix = job[0]\n typ = job[1]\n pert = job[2]\n\n #print(fprefix)\n\n print(typ, fprefix, pert)\n\n for max_pert in [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1]:\n\n\n islet_dirr = dirr + '/' + fprefix\n\n ##########################################\n # ad loops\n\n if typ == 'ad':\n pert_dirr = islet_dirr + '/Perturbations_ad'\n elif typ == 'b':\n pert_dirr = islet_dirr + '/Perturbations_b'\n\n\n max_pert_dirr = pert_dirr + '/Perturbation'+str(max_pert)\n\n # Cluster pts are not perturbed\n if typ == 'ad':\n cluster_pts_file = '../VerticesEdges/' + fprefix + '_bvertices_PH.csv'\n elif typ == 'b':\n cluster_pts_file = '../VerticesEdges/' + fprefix + '_advertices_PH.csv'\n\n #print(pert, end='\\r')\n\n this_pert_dirr = max_pert_dirr + '/Perturbation' + str(pert)\n\n if typ == 'ad':\n loop_verts_file = this_pert_dirr + '/advertices_PH.csv'\n loop_edges_file = this_pert_dirr + '/adedges_PH.csv'\n elif typ == 'b':\n loop_verts_file = this_pert_dirr + '/bvertices_PH.csv'\n loop_edges_file = this_pert_dirr + '/bedges_PH.csv'\n\n if not os.path.isfile(loop_verts_file):\n continue\n\n if not os.path.isfile(loop_edges_file):\n continue\n\n flag_processed = 1\n perm_dirr = this_pert_dirr + '/Permutations'\n if not os.path.isdir(perm_dirr):\n os.mkdir(perm_dirr)\n\n\n\n flag_processed = 1\n for perm in range(n_perm):\n this_perm_dirr = perm_dirr + '/Permutation'+str(perm)\n if not os.path.isdir(this_perm_dirr):\n flag_processed = 0\n os.mkdir(this_perm_dirr)\n break\n\n if typ == 'ad':\n new_undead_file = this_perm_dirr + '/adundead_PH.csv'\n elif typ == 'b':\n new_undead_file = this_perm_dirr + '/bundead_PH.csv'\n\n if os.path.isfile(new_undead_file):\n continue\n else:\n flag_processed = 0\n break\n\n if flag_processed == 1:\n continue\n\n if typ == 'ad':\n # Get triangles\n pert_triangles_file = this_pert_dirr + '/adtriangles_PH.csv'\n elif typ == 'b':\n pert_triangles_file = this_pert_dirr + '/btriangles_PH.csv'\n\n ## MAKE PERMUTATIONS OF EDGES\n loop_vertices = np.loadtxt(loop_verts_file, delimiter=',')\n loop_edges = np.loadtxt(loop_edges_file, delimiter=',')\n\n loop_triangles = np.loadtxt(pert_triangles_file, delimiter=',')\n \n if not len(loop_triangles):\n continue\n\n if loop_triangles.ndim == 1:\n loop_triangles = np.reshape(loop_triangles, (1, 3))\n\n\n\n loop_dist = []\n for edge in loop_edges:\n\n e1, e2 = edge\n\n x1,y1 = loop_vertices[int(e1)]\n x2,y2 = loop_vertices[int(e2)]\n\n loop_dist.append(round(math.sqrt((x1-x2)**2 + (y1-y2)**2), 0))\n\n loop_dist = np.array(loop_dist)\n\n uni, counts = np.unique(loop_dist, return_counts=True)\n possible_n_perm = 1\n for val in counts:\n possible_n_perm = possible_n_perm * math.factorial(val)\n\n local_n_perm = min(n_perm, possible_n_perm)\n edge_indices = np.array(list(range(len(loop_edges))))\n perm_map = np.zeros(len(loop_edges), dtype=int)\n\n all_permutations = []\n\n #print('Permuting...')\n\n perm_dirr = this_pert_dirr + '/Permutations'\n if not os.path.isdir(perm_dirr):\n os.mkdir(perm_dirr)\n\n perm = 0\n while (perm < local_n_perm):\n\n #print(perm, end='\\r')\n\n new_permutation = []\n\n edge_ptr = 0.\n for idx, val in enumerate(counts):\n permuted = np.random.permutation(edge_indices[int(edge_ptr):int(edge_ptr+val)])\n new_permutation += list(permuted)\n edge_ptr += val\n\n if new_permutation in all_permutations:\n continue\n\n all_permutations.append(new_permutation)\n\n this_edges = loop_edges[all_permutations[perm]]\n\n for new_idx, old_idx in enumerate(all_permutations[perm]):\n perm_map[old_idx] = new_idx\n\n this_perm_dirr = perm_dirr + '/Permutation'+str(perm)\n if not os.path.isdir(this_perm_dirr):\n os.mkdir(this_perm_dirr)\n\n # Save permutation\n np.savetxt(this_perm_dirr+'/permutation.csv'\\\n , all_permutations[perm]\\\n , delimiter=','\\\n , fmt='%d')\n\n\n # Save permuted edges\n this_edge_file = this_perm_dirr + '/edges.csv'\n np.savetxt(this_edge_file, this_edges, delimiter=',', fmt='%d')\n\n # Modify triangle boundaries with new edge-indices\n new_triangles = []\n triangle_dia = []\n for triangle in loop_triangles:\n e1, e2, e3 = triangle\n\n new_e1 = perm_map[int(e1)]\n new_e2 = perm_map[int(e2)]\n new_e3 = perm_map[int(e3)]\n\n new_triangle = [new_e1, new_e2, new_e3]\n\n # BOUNDARY HAS TO BE SORTED!!!!!!\n new_triangles.append(sorted(new_triangle, key=int))\n\n triangle_dia.append(max(new_triangle))\n\n new_triangles = np.array(new_triangles)\n\n triangle_dia = np.array(triangle_dia, dtype=int)\n\n idxs = np.argsort(triangle_dia)\n\n new_triangles = new_triangles[idxs]\n\n this_triangles_file = this_perm_dirr + '/triangles.csv'\n\n np.savetxt(this_triangles_file, new_triangles, delimiter=',', fmt='%d')\n\n perm += 1\n\n # Save undead\n if typ == 'ad':\n new_undead_file = this_perm_dirr + '/adundead_PH.csv'\n elif typ == 'b':\n new_undead_file = this_perm_dirr + '/bundead_PH.csv'\n\n subprocess.run([\"../get_undead.o\"\\\n , this_edge_file\\\n , this_triangles_file\\\n , new_undead_file\\\n , cluster_pts_file\\\n , loop_verts_file\\\n ])\n\n\n#for islet in islets:\n# single_islet(islet)\n# exit()\n\n\nfar_ad_mantles_file = open('far_ad_mantles.csv', 'r')\nfar_ad_mantles = far_ad_mantles_file.readline().split(',')[:-1]\nfar_ad_mantles = frozenset(far_ad_mantles)\n\nfar_b_mantles_file = open('far_b_mantles.csv', 'r')\nfar_b_mantles = far_b_mantles_file.readline().split(',')[:-1]\nfar_b_mantles = frozenset(far_b_mantles)\n\n\njobs = []\n\nfor fprefix in far_ad_mantles:\n\n for pert in range(n_pert):\n\n jobs.append([fprefix, 'ad', pert])\n\n\nfor fprefix in far_b_mantles:\n\n for pert in range(n_pert):\n\n jobs.append([fprefix, 'b', pert])\n\n\n#for job in jobs:\n# single_job(job)\n# input('w')\n\nnum_cores = 8\nParallel(n_jobs=6, verbose = 12)\\\n (delayed(single_job)\\\n (job) for job in jobs)\n\n\n\n","repo_name":"nihcompmed/Pancreatic-Islets","sub_path":"Islet_PH_C_code_final/Stochastic/Diab/make_perms.py","file_name":"make_perms.py","file_ext":"py","file_size_in_byte":7577,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"22543834545","text":"from PIL import Image\nimport io\nimport json\nimport http.client\n\n\ndef get_image_list(movie_json: str) -> list:\n movies = json.loads(movie_json, encoding=\"utf-8\")\n images = []\n for movie in movies:\n images.append({\n \"name\": movie[\"title\"],\n \"url\": movie[\"image\"]\n })\n return images\n\n\ndef download_images(image_list: list):\n conn = http.client.HTTPSConnection(\"img3.doubanio.com\")\n for image_info in image_list:\n url = image_info[\"url\"].split(\".com\")[1]\n conn.request(\"GET\", url)\n response = conn.getresponse()\n image = Image.open(io.BytesIO(response.read()))\n size = (int(image.size[0] * 200 / image.size[1]), 200)\n image.thumbnail(size)\n image.save(\"img/%s.jpg\" % image_info[\"name\"], \"JPEG\")\n print(\"Download file: %s.jpg\" % image_info[\"name\"])\n\n\nif __name__ == \"__main__\":\n movie_json = open(\"my_movie.json\", \"rt\", encoding=\"utf-8\").read()\n images = get_image_list(movie_json)\n download_images(images)","repo_name":"bluicezhen/DoubanSipder","sub_path":"image_download.py","file_name":"image_download.py","file_ext":"py","file_size_in_byte":1020,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"} +{"seq_id":"30617390799","text":"import csv\n\n# Static Input Files\nimport time\nfrom urllib.parse import urlparse\n\npeople = 'data/people/people.csv'\nblacklist = 'data/Static/Blacklist.csv'\nprocessed_emails = 'data/static/processedBA.csv'\n\nstates = 'data/Static/states.csv'\n\nstatic = {'people': people, 'states': states, 'processed_emails': processed_emails,\n 'blacklist': blacklist}\n\n# Output Files\nnot_found_companies = 'data/Output/NotFoundQA.csv'\noutput_file = 'data/Output/FinalQA.csv'\nemail_file = processed_emails # specify different file if you want a file different from processed email read file\ncompany_file = 'data/processed.csv'\n\nswap = []\n\n\n####################################################################################################################\n# Initializition\n\ndef open_files(array):\n for nm, vr in static.items():\n array[nm] = vr\n for k, v in array.items():\n temporary = []\n with open(v, 'r') as z:\n y = z.read().split('\\n')\n reader = csv.reader(y)\n for row in reader:\n try:\n if len(row[0].strip(\" \")) > 1:\n temporary.append(row)\n except:\n pass\n array[k] = temporary\n return array\n\n\ndef Convert(lst):\n d = {}\n for row in lst:\n try:\n key = row[1]\n value = row[0]\n d[key] = value\n except:\n pass\n return d\n\n\ndef extract_companies(people):\n companies = []\n for k, v in enumerate(people):\n if v:\n companies.append(v[0].lower())\n return companies\n\n\ndef listify_companies(companies):\n index = {req_word: [idx for idx, word in enumerate(companies) if word == req_word] for req_word in\n set(companies)}\n return index\n\n\n#######################################################################################################################\n# Check\n\ndef check_in_blacklist(company, blacklist):\n skip = False\n for blk, blk2 in enumerate(blacklist):\n if blk2[0].lower() == company:\n skip = True\n break\n return skip\n\n\ndef filter_sent_emails(occurence, processed_emails, people):\n temp = []\n email_processed = False\n for lookup in occurence:\n for email, email2 in enumerate(processed_emails):\n if email2[0] == people[lookup][4]:\n # print(\"Matching email found\")\n email_processed = True\n break\n if not email_processed:\n # print(people[lookup])\n temp.append(people[lookup])\n return temp\n\n\ndef check_company(company, processed_companies):\n company_processed = False\n for company1, company2 in enumerate(processed_companies):\n if company2[0] == company:\n # print(\"Matching company found\")\n company_processed = True\n break\n return company_processed\n\n\n####################################################################################################################\n# Others\n\ndef get_hostname(url, uri_type='both'):\n \"\"\"Get the host name from the url\"\"\"\n parsed_uri = urlparse(url)\n return '{uri.netloc}'.format(uri=parsed_uri)\n\n\ndef Initialization(file_array):\n new_array = open_files(file_array)\n jobs, people, states, processed_emails, blacklist, processed_companies = file_array['jobs'], file_array['people'], \\\n file_array['states'], file_array[\n 'processed_emails'], file_array[\n 'blacklist'], file_array[\n 'processed_companies'],\n states = Convert(states)\n people_companies = extract_companies(people)\n occr = listify_companies(people_companies)\n\n\n return {'jobs': jobs, 'people': people, 'states': states, 'processed_emails': processed_emails, 'occr': occr,\n 'processed_companies': processed_companies, 'blacklist': blacklist, 'people_companies': people_companies}\n\n\ndef Check(people, company, processed_emails, processed_companies, blacklist, occurence):\n is_blacklisted = check_in_blacklist(company, blacklist)\n new_data = filter_sent_emails(occurence, processed_emails, people)\n is_old_company = check_company(company, processed_companies)\n\n return {'is_blacklisted': is_blacklisted, 'is_old_company': is_old_company, 'new_data': new_data}\n\n\ndef Process(city, trty, data2, requirement):\n counter = 0\n data = []\n for i, v in enumerate(data2):\n if v in swap:\n pass\n # print(\"found match\", v)\n else:\n # print(v, swap)\n # time.sleep(5)\n if counter >= requirement:\n break\n match = [x.lower() for x in v]\n if trty in match and city in match:\n data.insert(0, v)\n counter += 1\n elif trty.lower() in match:\n data.insert(3, v)\n else:\n data.insert(10, v)\n data = data[:requirement]\n for ind, row in enumerate(data):\n swap.append(row)\n return data\n","repo_name":"RajuDhl/Multiprocessing","sub_path":"source/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":5284,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"10508532517","text":"from copy import deepcopy\n\nimport pytest\nimport yaml\n\nfrom dataClasses.base import Ingredient, Recipe, IngredientCollection\nfrom gtnhClasses.overclocks import overclockRecipe\n\nimport json\ndef loadTestConfig():\n with open('config_factory_graph.yaml', 'r') as f:\n graph_config = yaml.safe_load(f)\n return graph_config\n\n\ndef test_standardOverclock():\n r_base = Recipe(\n 'centrifuge',\n 'MV',\n IngredientCollection(\n Ingredient('glass dust', 1)\n ),\n IngredientCollection(\n Ingredient('silicon dioxide', 1)\n ),\n 5,\n 80\n )\n\n r = deepcopy(r_base)\n r.user_voltage = 'MV'\n r = overclockRecipe(r)\n\n assert r.eut == 20\n assert r.dur == 40\n\n r = deepcopy(r_base)\n r.user_voltage = 'HV'\n r = overclockRecipe(r)\n\n assert r.eut == 80\n assert r.dur == 20\n\n\ndef test_perfectOverclock():\n r_base = Recipe(\n 'large chemical reactor',\n 'MV',\n IngredientCollection(\n Ingredient('glass dust', 1)\n ),\n IngredientCollection(\n Ingredient('silicon dioxide', 1)\n ),\n 5,\n 80\n )\n\n r = deepcopy(r_base)\n r.user_voltage = 'MV'\n r = overclockRecipe(r)\n\n assert r.eut == 20\n assert r.dur == 20\n\n r = deepcopy(r_base)\n r.user_voltage = 'HV'\n r = overclockRecipe(r)\n\n assert r.eut == 80\n assert r.dur == 5\n\n\ndef test_pyrolyseOverclock():\n r_base = Recipe(\n 'pyrolyse oven',\n 'MV',\n IngredientCollection(\n Ingredient('oak wood', 16),\n Ingredient('nitrogen', 1000),\n ),\n IngredientCollection(\n Ingredient('charcoal', 20),\n Ingredient('wood tar', 1500),\n ),\n 96,\n 320\n )\n\n r = deepcopy(r_base)\n r.user_voltage = 'HV'\n r.coils = 'kanthal'\n r = overclockRecipe(r)\n\n assert r.eut == 384\n assert r.dur == 160\n\n r = deepcopy(r_base)\n r.user_voltage = 'MV'\n r.coils = 'cupronickel'\n r = overclockRecipe(r)\n\n assert r.eut == 96\n assert r.dur == 640\n\n\ndef test_EBFOverclock():\n r_base = Recipe(\n 'electric blast furnace',\n 'LV',\n IngredientCollection(\n Ingredient('iron dust', 1),\n Ingredient('oxygen gas', 1000),\n ),\n IngredientCollection(\n Ingredient('steel ingot', 1),\n Ingredient('tiny pile of ashes', 1),\n ),\n 120,\n 500,\n heat=1000,\n )\n\n r = deepcopy(r_base)\n r.user_voltage = 'LV'\n r.coils = 'cupronickel'\n r = overclockRecipe(r)\n\n assert r.eut == 120\n assert r.dur == 500\n\n r = deepcopy(r_base)\n r.user_voltage = 'MV'\n r.coils = 'kanthal' # 2701K\n r = overclockRecipe(r)\n\n # excess heat = 1701K\n # should get 0.95x eut and one 2x OC\n\n assert r.eut == 120*4*.95\n assert r.dur == 500/2\n\n r = deepcopy(r_base)\n r.user_voltage = 'MV'\n r.coils = 'nichrome' # 3601K\n r = overclockRecipe(r)\n\n # excess heat = 2601K\n # should get (0.95**2)x eut and one 4x OC\n\n assert r.eut == 120*4*(.95**2)\n assert r.dur == 500/4\n\n r = deepcopy(r_base)\n r.user_voltage = 'HV'\n r.coils = 'nichrome' # 3601K\n r = overclockRecipe(r)\n\n # excess heat = 2601K\n # should get (0.95**2)x eut, one 4x OC, and one 2x OC\n\n assert r.eut == 120*(4**2)*(.95**2)\n assert r.dur == 500/4/2","repo_name":"Eldrinn-Elantey/gtnh-flow","sub_path":"tests/test_overclocks.py","file_name":"test_overclocks.py","file_ext":"py","file_size_in_byte":3394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"69"} +{"seq_id":"23506187550","text":"N = int(input())\narr = list(map(int, input().split(\" \")))\n\ndef solution():\n max_dp = [1] * N\n answer = -1\n for i in range(N):\n for j in range(i):\n if arr[i] > arr[j]:\n max_dp[i] = max(max_dp[i], max_dp[j] + 1)\n min_dp = [1] * N\n arr_re = arr[::-1]\n for k in range(N):\n for m in range(k):\n if arr_re[k] > arr_re[m]:\n min_dp[k] = max(min_dp[k], min_dp[m] + 1)\n\n for i in range(N):\n answer = max(answer, max_dp[i] + min_dp[::-1][i] - 1)\n print(answer)\nsolution()\n\n############################################################\n# 아래는 실패\n############################################################\nclass Node:\n def __init__(self, value):\n self.value = value\n self.high = []\n self.low = []\n self.max_length = 0\n\nclass Tree:\n def __init__(self):\n self.head = None\n\n def add(self, value):\n\n def recursive(node, count):\n if node is None:\n self.head = Node(value)\n return\n curr = node\n # if not curr.high and not curr.low:\n # return\n self.head.max_length = max(self.head.max_length, count)\n if curr.value < value:\n curr.high.append(Node(value))\n elif curr.value > value:\n curr.low.append(Node(value))\n\n for high_node in curr.high:\n count += 1\n recursive(high_node, count)\n\n for low_node in curr.low:\n count += 1\n recursive(low_node, count)\n recursive(self.head, 0)\n\nn = int(input())\narr = list(map(int, input().split(\" \")))\ntree = Tree()\nfor i in arr:\n tree.add(i)\nprint(tree.head.max_length // 2)\n\n","repo_name":"chacha912/nklcb_algorithm_study","sub_path":"week02/BOJ_11054_바이토닉부분수열/BOJ_11054_이경엽.py","file_name":"BOJ_11054_이경엽.py","file_ext":"py","file_size_in_byte":1779,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"} +{"seq_id":"7029840711","text":"# Compute the Number of Times a Pattern Appears in a Text\nimport sys\n\n\ndef count_pattern(text, pattern):\n pattern_length = len(pattern)\n count = 0\n for ix in range(len(text)-pattern_length+1):\n substring = text[ix:ix+pattern_length]\n if substring == pattern:\n count += 1\n return count\n\n\ndef main():\n text, pattern = sys.stdin.read().splitlines()\n result = count_pattern(text, pattern)\n print(result)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"ghostrider77/BioinformaticsProblems","sub_path":"Python/textbook_track/chapter01/ba1a.py","file_name":"ba1a.py","file_ext":"py","file_size_in_byte":486,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"21612844451","text":"from cloudman.gcp.constants import DEFAULT_BOOT_CONFIG\nfrom cloudman.gcp.utils import run, await_ssh, derive_names, run_plain\nfrom cloudman.utils.logger import log\n\n\ndef create_boot_instance(name, network, config=DEFAULT_BOOT_CONFIG):\n \"\"\"Create a boot instance for setting up a boot disk\"\"\"\n cmd = \"\"\"compute instances create {0} \\\n --boot-disk-device-name={1} \\\n --subnet={2} \\\n --zone=us-west1-b \\\n --image-family={3} \\\n --boot-disk-size={4} \\\n --network-tier=PREMIUM \\\n --machine-type=n1-highcpu-8 \\\n --accelerator=\"type=nvidia-tesla-k80,count=1\" \\\n --image-project=ubuntu-os-cloud \\\n --maintenance-policy=TERMINATE \\\n --boot-disk-type=pd-ssd \\\n --no-boot-disk-auto-delete \\\n \"\"\".format(name, name, network, config['os'], config['disk'])\n return run(cmd)\n\n\ndef setup_boot_disk(name, boot_instance, config=DEFAULT_BOOT_CONFIG):\n \"\"\"Set up a boot disk using a given script\"\"\"\n log(\"Setting up the boot disk '\" + boot_instance +\n \"'. This may take up to an hour..\")\n # Wait for SSH access\n await_ssh(boot_instance)\n # Download and run setup script\n cmd = \"\"\"gcloud compute ssh \"{0}\" --zone=us-west1-b \\\n -- \"curl {1} > /tmp/setup.sh && bash /tmp/setup.sh\" \\\n \"\"\".format(boot_instance, config['setup-script-url'])\n return run_plain(cmd)\n","repo_name":"aakashns/cloudman","sub_path":"cloudman/gcp/boot.py","file_name":"boot.py","file_ext":"py","file_size_in_byte":1343,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"69"} +{"seq_id":"31924174022","text":"# SVM results\nfrom sklearn.svm import SVC\nfrom sklearn import metrics\n\nfor kernel in ['rbf', 'linear']:\n clf = SVC(kernel=kernel).fit(Xtrain, ytrain)\n ypred = clf.predict(Xtest)\n print(\"SVC: kernel = {0}\".format(kernel))\n print(metrics.f1_score(ytest, ypred))\n plt.figure()\n plt.imshow(metrics.confusion_matrix(ypred, ytest),\n interpolation='nearest', cmap=plt.cm.binary)\n plt.colorbar()\n plt.xlabel(\"true label\")\n plt.ylabel(\"predicted label\")\n plt.title(\"SVC: kernel = {0}\".format(kernel))\n \n# random forest results\nfrom sklearn.ensemble import RandomForestClassifier\n\nfor max_depth in [3, 5, 10]:\n clf = RandomForestClassifier(max_depth=max_depth).fit(Xtrain, ytrain)\n ypred = clf.predict(Xtest)\n print(\"RF: max_depth = {0}\".format(max_depth))\n print(metrics.f1_score(ytest, ypred))\n plt.figure()\n plt.imshow(metrics.confusion_matrix(ypred, ytest),\n interpolation='nearest', cmap=plt.cm.binary)\n plt.colorbar()\n plt.xlabel(\"true label\")\n plt.ylabel(\"predicted label\")\n plt.title(\"RF: max_depth = {0}\".format(max_depth))","repo_name":"jakevdp/sklearn_pycon2014","sub_path":"notebooks/solutions/04_svm_rf.py","file_name":"04_svm_rf.py","file_ext":"py","file_size_in_byte":1113,"program_lang":"python","lang":"en","doc_type":"code","stars":226,"dataset":"github-code","pt":"69"} +{"seq_id":"23124578651","text":"from flask import Blueprint, request\nfrom flask_login import login_required, current_user\nfrom app.models import Comment, db\nfrom app.forms import CreateCommentForm, EditCommentForm\n\ncomment_routes = Blueprint('comments', __name__)\n\ndef validation_errors_to_error_messages(validation_errors):\n \"\"\"\n Simple function that turns the WTForms validation errors into a simple list\n \"\"\"\n errorMessages = []\n for field in validation_errors:\n for error in validation_errors[field]:\n errorMessages.append(f'{field} : {error}')\n return errorMessages\n\n@comment_routes.route('/')\n@login_required\ndef get_all_route_comments(route_id):\n all_route_comments = Comment.query.filter(Comment.route_id == route_id).all()\n return {'comments': [comment.to_dict() for comment in all_route_comments]}\n\n@comment_routes.route('/', methods=['POST'])\n@login_required\ndef create_comment():\n form = CreateCommentForm()\n form['csrf_token'].data = request.cookies['csrf_token']\n if form.validate_on_submit():\n comment = Comment(\n user_id=form.data['user_id'],\n route_id=form.data['route_id'],\n content=form.data['content'],\n )\n db.session.add(comment)\n db.session.commit()\n return { \"comment\": comment.to_dict() }\n return {'errors': validation_errors_to_error_messages(form.errors)}, 401\n\n@comment_routes.route('//edit', methods=['PUT'])\n@login_required\ndef edit_comment(id):\n comment = Comment.query.get(id)\n form = EditCommentForm()\n form['csrf_token'].data = request.cookies['csrf_token']\n if form.validate_on_submit():\n comment.content=form.data['content']\n db.session.commit()\n return comment.to_dict()\n return {'errors': validation_errors_to_error_messages(form.errors)}, 401\n\n@comment_routes.route('//delete', methods=['DELETE'])\n@login_required\ndef delete_comment(id):\n one_comment = Comment.query.get(id)\n db.session.delete(one_comment)\n db.session.commit()\n return {\"message\": \"Successful deletion\"}\n","repo_name":"adeswires/MapByFun","sub_path":"app/api/comment_routes.py","file_name":"comment_routes.py","file_ext":"py","file_size_in_byte":2043,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"19377395271","text":"from canvas.GeneralCanvas import GeneralCanvas\n\n# subclass of GeneralCanvas\n# displays a Baby in simulation canvas, in the Habitat\n# it can handle the zoom function with change_size method\n\n\nclass SimCanvas(GeneralCanvas):\n\n def __init__(self, root, *args):\n super().__init__(root, *args)\n self.modifier = 1\n\n def fill_with_baby_cells(self, size_mod):\n self.update()\n if self.pre_size_mod != size_mod:\n self.change_size(size_mod)\n self.pre_size_mod = size_mod\n x_mod = self.x_size\n y_mod = self.y_size\n x_dim_is_bigger = True if self.baby.dimension[0] >= self.baby.dimension[1] else False\n canvas_ratio = x_mod / self.baby.dimension[0] / 10 if x_dim_is_bigger else y_mod / self.baby.dimension[0] / 10\n dim_mod = int(x_mod / (self.baby.dimension[0] * canvas_ratio * self.modifier)) if x_dim_is_bigger else int(\n y_mod / (self.baby.dimension[1] * canvas_ratio * self.modifier))\n center_mod_y = int(self.winfo_height() / 2) - self.baby.dimension[1] * dim_mod\n center_mod_x = int(self.winfo_width() / 2) - self.baby.dimension[0] * dim_mod\n for cell in self.baby.cells:\n cell.dimension = {'x_dim': dim_mod,\n 'y_dim': dim_mod}\n self.delete('all')\n for cell in self.baby.cells:\n x = cell.position[0] * cell.dimension['x_dim'] + center_mod_x if x_dim_is_bigger else cell.position[0] * \\\n cell.dimension['x_dim'] + center_mod_y\n y = cell.position[1] * cell.dimension['y_dim'] + center_mod_y if x_dim_is_bigger else cell.position[1] * \\\n cell.dimension['y_dim'] + center_mod_x\n wn_x = int(cell.dimension['x_dim'] + cell.dimension['x_dim'] + x)\n wn_y = int(cell.dimension['y_dim'] + cell.dimension['y_dim'] + y)\n es_x = int(cell.dimension['x_dim'] * 2 + cell.dimension['x_dim'] + x)\n es_y = int(cell.dimension['y_dim'] * 2 + cell.dimension['y_dim'] + y)\n self.create_rectangle(wn_x, wn_y, es_x, es_y, fill='#000000', outline='#D3D3D3')\n self.update()\n\n def change_size(self, size_value):\n if size_value < self.pre_size_mod:\n self.x_size *= 1.3\n self.y_size *= 1.3\n if self.modifier < 8:\n self.modifier *= 1.3\n else:\n self.x_size /= 1.3\n self.y_size /= 1.3\n if self.modifier > 0.15:\n self.modifier /= 1.3\n","repo_name":"tmsBodnar/GOF","sub_path":"canvas/SimCanvas.py","file_name":"SimCanvas.py","file_ext":"py","file_size_in_byte":2497,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"37076521960","text":"from os import path\nfrom setuptools import setup\n\n\nDISTNAME = \"earthpy\"\nDESCRIPTION = (\n \"A set of helper functions to make working with spatial data \"\n \"in open source tools easier. This package is maintained by \"\n \"Earth Lab and was originally designed to support the earth \"\n \"analytics education program.\"\n)\nMAINTAINER = \"Leah Wasser\"\nMAINTAINER_EMAIL = \"leah.wasser@colorado.edu\"\n\n\n# read the contents of your README file\nthis_directory = path.abspath(path.dirname(__file__))\nwith open(path.join(this_directory, \"README.md\"), encoding=\"utf-8\") as f:\n LONG_DESCRIPTION = f.read()\n\n\nif __name__ == \"__main__\":\n setup(\n name=DISTNAME,\n maintainer=MAINTAINER,\n maintainer_email=MAINTAINER_EMAIL,\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n long_description_content_type=\"text/markdown\",\n version=\"0.9.4\",\n packages=[\"earthpy\"],\n install_requires=[\n \"geopandas\",\n \"matplotlib>=2.0.0\",\n \"numpy>=1.14.0\",\n \"rasterio\",\n \"scikit-image\",\n \"requests\",\n ],\n zip_safe=False, # the package can run out of an .egg file\n classifiers=[\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: BSD License\",\n \"Programming Language :: Python\",\n \"Topic :: Software Development\",\n \"Operating System :: Microsoft :: Windows\",\n \"Operating System :: POSIX\",\n \"Operating System :: Unix\",\n \"Operating System :: MacOS\",\n ],\n package_data={\n DISTNAME: [\n \"example-data/*.json\",\n \"example-data/*.tif\",\n \"example-data/*.geojson\",\n \"example-data/*.shp\",\n \"example-data/*.shx\",\n \"example-data/*.prj\",\n \"example-data/*.dbf\",\n ]\n },\n url=\"https://github.com/earthlab/earthpy\",\n )\n","repo_name":"earthlab/earthpy","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1993,"program_lang":"python","lang":"en","doc_type":"code","stars":467,"dataset":"github-code","pt":"69"} +{"seq_id":"74453086621","text":"# -*- coding: utf-8 -*-\n# !/usr/bin/env python\nimport torch\nimport torch.nn as nn\n\n\n\"\"\"\nTrain a model to learn:\n\"hello\" --> \"ohlol\"\n\"\"\"\n\n\nbatch_size = 1\ninput_size = 4\nhidden_size = 4\nnum_layers = 1\nseq_len = 5\n\nidx2char = [\"e\", \"h\", \"l\", \"o\"]\nx_data = [1, 0, 2, 2, 3]\ny_data = [3, 1, 2, 3, 2]\none_hot_lookup = [[1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1]]\nx_one_hot = [one_hot_lookup[i] for i in x_data]\ninputs = torch.Tensor(x_one_hot).view(seq_len, batch_size, input_size)\nlabels = torch.LongTensor(y_data)\n\n\n# Rnncell\nclass ModelRnnCell(nn.Module):\n def __init__(self, input_size, hidden_size, batch_size):\n super(ModelRnnCell, self).__init__()\n self.batch_size = batch_size\n self.input_size = input_size\n self.hidden_size = hidden_size\n self.runcell = nn.RNNCell(input_size=input_size, hidden_size=hidden_size)\n\n def forward(self, input, hidden):\n hidden = self.runcell(input, hidden)\n return hidden\n\n def init_hidden(self):\n return torch.zeros(self.batch_size, self.hidden_size)\n\n\nnet = ModelRnnCell(input_size, hidden_size, batch_size)\n\n\ncriterion = torch.nn.CrossEntropyLoss()\noptimizer = torch.optim.Adam(net.parameters(), lr=0.05)\n\n# for epoch in range(15):\n# loss = 0\n# optimizer.zero_grad()\n# hidden = net.init_hidden()\n# print(\"Predicted string:\", end=\"\")\n# for input, label in zip(inputs, labels):\n# hidden = net(input, hidden)\n# loss += criterion(hidden, label)\n# _, idx = hiar[idx.item()], end=\"\")\n# loss.backward()dden.max(dim=1)\n# # print(idx2ch\n# optimizer.step()\n# print(\", Epoch [%d/15] loss=%.4f\" % (epoch + 1, loss.item()))\n\n\n#Rnn\nclass ModelRnn(nn.Module):\n def __init__(self, input_size, hidden_size, batch_size, num_layers=1):\n super(ModelRnn, self).__init__()\n self.num_layers = num_layers\n self.batch_size = batch_size\n self.input_size = input_size\n self.hidden_size = hidden_size\n self.rnn = nn.RNN(input_size=self.input_size, hidden_size=self.hidden_size, num_layers=self.num_layers)\n\n def forward(self, input):\n hidden = torch.zeros(self.num_layers, self.batch_size, self.hidden_size)\n out, _ = self.rnn(input, hidden)\n return out.view(-1, self.hidden_size)\n\n\nnet_rnn = ModelRnn(input_size, hidden_size, batch_size, num_layers)\n\ncriterion = torch.nn.CrossEntropyLoss()\noptimizer = torch.optim.Adam(net_rnn.parameters(), lr=0.15)\n\nfor epoch in range(200):\n optimizer.zero_grad()\n outputs = net_rnn(inputs)\n loss = criterion(outputs, labels)\n loss.backward()\n optimizer.step()\n\n _, idx = outputs.max(dim=1)\n idx = idx.data.numpy()\n print(\"Predicted:\", \"\".join([idx2char[x] for x in idx]), end=\"\")\n print(\", Epoch [%d/15] loss=%.4f\" % (epoch + 1, loss.item()))\n","repo_name":"Wakingupdream/pytorch_learn","sub_path":"neural network/RNN_Example.py","file_name":"RNN_Example.py","file_ext":"py","file_size_in_byte":2881,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"3910129350","text":"from tkinter import *\nfrom tkinter.ttk import *\n\nwindow = Tk()\nwindow.title(\"Hm . . . ?\")\nwindow.geometry(\"1280x720\")\n\nchk_state = IntVar()\nchk_state.set(1)\n\nchackb= Checkbutton(window, text=\"Choose partfinder\")\nchackb.grid(column=0,row=0)\n\nwindow.mainloop()","repo_name":"yuma-sakuma/pspdata","sub_path":"Hm2.py","file_name":"Hm2.py","file_ext":"py","file_size_in_byte":258,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"29148658869","text":"\ndef extract_summary(file):\n summary = []\n flag = False\n\n with open(file, 'r') as f:\n for j in f:\n if 'Sommaire' in j or flag is True:\n summary.append(j)\n flag = True\n\n if '___' in j:\n break\n\n for i in summary:\n print(i)\n\n\n# extract_summary(\".\\\\Pathologie\\\\Extraction_la_marche_a_suivre.md\")\n","repo_name":"amanikr/Dentopedia","sub_path":"extract_summary.py","file_name":"extract_summary.py","file_ext":"py","file_size_in_byte":393,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"13445375046","text":"import torch.nn as nn\nimport torch\nimport os\nimport argparse\nfrom sklearn.metrics import confusion_matrix, precision_recall_curve, mean_absolute_error\nimport torch\nfrom torch import nn, no_grad\nfrom torch.utils.data import DataLoader\nimport numpy as np\nfrom torch import optim\n\nfrom dataset.dsloader import Data\nfrom model.patch_attention import PatchSelector\n\ndef adjust_learning_rate(optimizer, decay_rate=.1):\n update_lr_group = optimizer.param_groups\n for param_group in update_lr_group:\n print('before lr: ', param_group['lr'])\n param_group['lr'] = param_group['lr'] * decay_rate\n print('after lr: ', param_group['lr'])\n return optimizer\n\ndef main(args):\n print(args)\n\n # load data\n print('============================loading data============================')\n root = os.path.join('../datasets', args.data) # dataset path\n dataset_tr = Data(root, args, 'train')\n dataset_te = Data(root, args, 'val')\n train_loader = DataLoader(dataset_tr, args.batchsz, num_workers=4, shuffle=True)\n test_loader = DataLoader(dataset_te, args.batchsz, num_workers=4, shuffle=True)\n\n # check cuda\n device = torch.device(args.dev if torch.cuda.is_available() else 'cpu')\n print('training device:', device)\n\n # build model\n num_ch = 3\n num_cls = 1 if args.loss == 'bce' else 2\n model = PatchSelector(args, num_cls)\n model = model.to(device)\n optimizer = optim.Adam(model.parameters(), lr=args.lr, betas=(0.9, 0.999), eps=1e-08, weight_decay=0)\n criterion = nn.BCEWithLogitsLoss() if args.loss == 'bce' else nn.CrossEntropyLoss(torch.Tensor([1, args.beta]).to(device=device)) # weighted cross entropy\n\n # train and validate\n print('============================Training============================')\n train_loss, test_loss = 0.0, 0.0\n tn, fp, fn, tp = 0.0, 0.0, 0.0, 0.0\n cm = np.zeros((2, 2))\n weights = [1, 1, 1]\n for epoch in range(args.epoch):\n # adjust learning rate\n if (epoch+1)%(args.epoch//3) == 0:\n optimizer = adjust_learning_rate(optimizer, decay_rate=0.1)\n\n # train model\n for xtr, ytr in train_loader:\n # generate pyramid labels\n a3tr = nn.MaxPool2d(kernel_size=(args.imgsz//args.num_patch), stride=(args.imgsz//args.num_patch))(ytr)\n a2tr = nn.MaxPool2d(kernel_size=(args.imgsz//(args.num_patch*2)), stride=(args.imgsz//(args.num_patch*2)))(ytr)\n a1tr = nn.MaxPool2d(kernel_size=(args.imgsz//(args.num_patch*4)), stride=(args.imgsz//(args.num_patch*4)))(ytr)\n\n xtr, ytr = xtr.to(device), ytr.to(device)\n a3tr, a2tr, a1tr = a3tr.to(device), a2tr.to(device), a1tr.to(device)\n optimizer.zero_grad()\n p3tr, p2tr, p1tr = model(xtr)\n\n ltr1 = criterion(p3tr, torch.squeeze(a3tr, dim=1).long() if args.loss == 'ce' else a3tr)\n ltr2 = criterion(p2tr, torch.squeeze(a2tr, dim=1).long() if args.loss == 'ce' else a2tr)\n ltr3 = criterion(p1tr, torch.squeeze(a1tr, dim=1).long() if args.loss == 'ce' else a1tr)\n ltr = weights[0]*ltr1+weights[1]*ltr2+weights[2]*ltr3\n\n ltr.backward()\n optimizer.step()\n\n train_loss += ltr.item()\n \n # evaluate model\n pred = np.zeros((0,1,args.num_patch,args.num_patch))\n gt = np.zeros((0,1,args.num_patch,args.num_patch))\n with torch.no_grad():\n for xte, yte, name in test_loader:\n ate = nn.MaxPool2d(kernel_size=(args.imgsz//args.num_patch), stride=(args.imgsz//args.num_patch))(yte)\n xte, yte, ate = xte.to(device), yte.to(device), ate.to(device)\n \n pte, p64te, p32te = model(xte)\n\n # aggregate pyramid predictions\n p64te = nn.MaxPool2d(kernel_size=2, stride=2)(p64te)\n p32te = nn.MaxPool2d(kernel_size=4, stride=4)(p32te)\n pte = torch.maximum(pte, p64te)\n pte = torch.maximum(pte, p32te)\n\n lte = criterion(pte, torch.squeeze(ate, dim=1).long() if args.loss == 'ce' else ate)\n test_loss += lte.item()\n\n if args.loss != 'bce':\n pte = torch.unsqueeze(torch.argmax(pte, 1), 1)\n pte = pte.cpu().numpy()\n ate = ate.cpu().numpy()\n\n ate[ate>=0.5] = 1\n ate[ate<0.5] = 0\n\n pred = np.append(pred, pte, axis=0)\n gt = np.append(gt, ate, axis=0)\n pte[pte>=args.th] = 1 # adjustable threshold\n pte[pte None:\n self.value = value # при инициализации отрабативает сеттер\n \n @staticmethod\n def __valid_value(value) -> None:\n if type(value) != str:\n raise TypeError('received data must be STR') \n \n @property\n def value(self) :\n return self._value\n \n @value.setter\n def value(self, value: str) -> None:\n self.__valid_value(value)\n self._value = value\n\n def __str__(self) -> str:\n return f'{self.value}'\n \n\nclass Name(Field):\n \"\"\"\n Class representing the name field in a record of the address book.\n \"\"\"\n def __init__(self, value: str) -> None:\n self.value = value\n\n # наследуем геттер и сеттер ради тренировки \n # в данном случае можно било обойтись супер в инит\n @property\n def value(self) -> str:\n return super(Name,Name).value.fget(self)\n\n @value.setter\n def value(self, value: str) -> None:\n super(Name,Name).value.fset(self, value) \n \n\n\nclass Phone(Field):\n \"\"\"\n Class representing the phone field in a record of the address book.\n \"\"\"\n def __init__(self, value: str) -> None:\n self.value = value # при инициализации отрабативает сеттер\n \n @staticmethod\n def __valid_phone(value) -> None:\n phone = ''.join(filter(str.isdigit, value))\n if 9 >= len(phone) <= 15 : #псевдо проверка номера\n raise ValueError(\"Phone number isn't correct\") \n\n @Field.value.setter # переопределяем сеттер родительского класса\n def value(self, value: str) -> None:\n super(Phone, Phone).value.__set__(self, value) # родительский сеттер проверка на стр\n self.__valid_phone(value)\n self._value = value\n \nclass FormatDateError(Exception):\n \"\"\"\n Exception, If the input date string is not in a valid date format.\n \"\"\"\n pass\n\nclass Birthday(Field):\n \"\"\"\n Class representing the birthday field in a record of the address book.\n The date is stored in ISO 8601 format.\n \"\"\"\n def __init__(self, value: str) -> None:\n self.value = value # при инициализации отрабативает сеттер\n \n @staticmethod\n def __valid_date(value: str) -> str:\n \"\"\"\n Validate and convert the input date string to a valid ISO-formatted date.\n Args:\n value (str): The input date string.\n Raises:\n FormatDateError: If the input date string is not in a valid date format.\n Returns:\n str: The valid ISO-formatted date string. \n \"\"\"\n try:\n return date.isoformat(dt_parser(str(value), settings={'STRICT_PARSING': True}).date())\n except Exception: \n raise FormatDateError('not correct date!!!')\n \n @Field.value.setter\n def value(self, value: str) -> None:\n self._value = self.__valid_date(value)\n \n \nclass RecordNotBirthdayError(Exception): \n \"\"\"\n Custom exception class to indicate that the record does not have a birthday.\n \"\"\"\n pass\n\nclass Record:\n \"\"\"\n Class representing a record in an address book.\n\n Attributes:\n name (Name): The name of the contact.\n phones (list): A list of phone numbers associated with the contact.\n birthday (Birthday): The birthday of the contact.\n \"\"\"\n \n def __init__(self, name: Name, phone: Phone, birthday: Birthday=None ) -> None:\n \n name = self.try_valid_type_name(name)\n phone = self.try_valid_type_phone(phone)\n birthday = self.try_valid_type_birthday(birthday)\n\n self.name = name\n self.phones = [phone] if phone else []\n self.birthday = birthday if birthday is not None else None\n\n @staticmethod\n def try_valid_type_name(name: str) -> Name: \n if type(name) != Name:\n try:\n return Name(name) # тут перезаписуємо змінну name в обькт классу\n except Exception: \n raise ValueError(f\"name: '{name}' must be type(Name) or a valid string representation of a Name object\")\n return name \n\n @staticmethod\n def try_valid_type_phone(phone: str) -> Phone:\n if type(phone) != Phone:\n try:\n return Phone(phone)\n except Exception:\n raise ValueError(f\"phone:{phone} must be type(Phone) or a valid string representation of a Phone object\")\n return phone \n\n @staticmethod\n def try_valid_type_birthday(birthday: str) -> str: \n if type(birthday) != Birthday and birthday != None:\n try:\n return Birthday(str(birthday))\n except Exception: \n raise ValueError(f\"birthday:{birthday} must be type(Birthday) or a valid string representation of a Birthday object\")\n return birthday \n \n \n def __str__(self) -> str:# для принта рекорда..не знаю как принято..сделал как чувствую\n birthday_str = \"birthday: \"+str(self.birthday) if self.birthday != None else \"\"\n phones_str = \" \".join([ph.value for ph in self.phones]) \n return f' name: {self.name} -->> phone(s): {phones_str} {birthday_str}'\n\n def add_phone(self, phone: Phone) -> None:\n \"\"\"\n Add a new phone number to the list of phone numbers for the contact.\n Args:\n phone (Phone) or try valid Str: The phone number to be added to the contact.\n Returns:\n None: This method does not return any value.\n \"\"\"\n phone = self.try_valid_type_phone(phone)\n self.phones.append(phone)\n\n def remove_phone(self, phone: Phone) -> None:\n \"\"\"\n Remove a phone number from the list of phone numbers for the contact.\n\n Args:\n phone (Phone) or try valid Str: The phone number to be removed from the contact.\n Raises:\n KeyError: If the phone number is not found in the contact's list of phone numbers.\n Returns: \n None: This method does not return any value.\n \"\"\"\n phone = self.try_valid_type_phone(phone)\n if phone not in self.phones:\n raise KeyError(f\"The phone '{phone}' is not in the record.\")\n self.phones.remove(phone)\n \n def change_phone(self, old_phone: Phone, new_phone : Phone) -> None:\n \"\"\"\n Change a phone number in the list of phone numbers for the contact.\n\n Args:\n old_phone (Phone): The existing phone number to be replaced.\n new_phone (Phone): The new phone number to replace the existing one.\n or try valid Str : --//-- .\n Raises:\n ValueError: If the old phone number is not found in the contact's list of phone numbers.\n \"\"\"\n old_phone = self.try_valid_type_phone(old_phone)\n new_phone = self.try_valid_type_phone(new_phone)\n if old_phone in self.phones: # если номер входит получаем индекс \n index = self.phones.index(old_phone)\n self.phones[index] = new_phone\n else:\n raise ValueError(f\"The phone '{old_phone.value}' is not in this record '{self.name}'.\")\n\n def days_to_birthday(self) -> int :\n \"\"\"\n Calculate the number of days remaining until the contact's next birthday.\n\n Returns:\n int: The number of days remaining until the contact's next birthday.\n Raises:\n RecordNotBirthdayError: If the contact does not have a birthday set.\n \"\"\"\n if self.birthday == None:\n raise RecordNotBirthdayError(\"No birthday set for the contact.\")\n today = date.today()\n bday = date.fromisoformat(self.birthday.value).replace(year=today.year) # дата др в этом году \n if today > bday : # если др уже прошло берем дату следующего(в следующем году)\n bday= bday.replace(year=today.year+1)\n return (bday - today).days\n \nclass AddressBook(UserDict):\n \"\"\"\n A class representing an address book, which is a dictionary \n with record names as keys and record objects as values.\n TODO Singelton?\n \"\"\"\n \n def add_record(self, record: Record) -> None:\n \"\"\"\n Add a record to the address book.\n\n Args:\n record (Record): The record object to be added.\n Raises:\n TypeError: If the given object is not an instance of the Record class.\n \"\"\"\n if type(record) != Record:\n raise TypeError(\"Record must be an instance of the Record class.\")\n self.data[record.name.value] = record \n \n def iterator(self, item_number: int) -> str:\n \"\"\"\n Iterate through the records in the address book and yield groups of records.\n\n Args:\n item_number (int) > 0: The number of records to be yielded at a time.\n Yields:\n str: A string containing the representation of a group of records.\n Notes:\n If the given item_number is greater than the total number of records in the address book,\n all records will be yielded in one group.\n Raises:\n ValueError: If item_number is less than or equal to 0. \n TODO красивий(табличний .format принт)\n \"\"\"\n if item_number <= 0:\n raise ValueError(\"Item number must be greater than 0.\")\n elif item_number > len(self.data): # если количство виводов(за раз) больше чем количество записей\n item_number = len(self.data) # виводим все\n counter = 0\n result = \"\"\n for id_, record in self.data.items(): # так как ми наследуемся от UserDict може юзать кк словарь\n result += f\"{str(record)}\\n\"\n counter += 1\n \n if not counter % item_number: # условие для вивода в количестве item_number накоплений\n yield result\n result = \"\"\n elif counter == len(self.data) - len(self.data) % item_number + 1: # условие для хвоста\n yield result\n \n \nif __name__ == '__main__':\n\n test = Birthday(\"26*02*1994\")\n print(test)\n name_1 = Name('Bill')\n phone_1 = Phone('1234567890')\n b_day_1 = Birthday('1994-02-26')\n\n name_2 = Name('serg')\n phone_2 = Phone('1234567890')\n b_day_2 = Birthday('1994-02-26')\n \n name_3 = Name('Oleg')\n phone_3 = Phone('1234567890')\n b_day_3 = Birthday('1994-02-26')\n\n name_4 = Name('яяЯнаа')\n phone_4 = Phone('1234567890')\n b_day_4 = Birthday('1994-02-26')\n\n rec_1 = Record(\"Лена\", \"1234554545\", test)\n print(rec_1.days_to_birthday())\n\n\n rec_2 = Record(\"Охрана\", phone_2, b_day_2)\n rec_3 = Record(\"а я не Лена\", phone_3, b_day_3)\n rec_4 = Record(name_4, phone_4, b_day_4)\n ab = AddressBook()\n ab.add_record(rec_1)\n ab.add_record(rec_2)\n ab.add_record(rec_3)\n ab.add_record(rec_4)\n # for i in ab.iterator(2):\n # print(i)\n \n # name = Name('Bill')\n # phone = Phone('1234567890')\n # b_day = Birthday('1994-02-26')\n # rec = Record(name, phone, b_day)\n # print(rec.phones[0].value)\n # print(rec.birthday.value)\n # print(rec.days_to_birthday())\n # name = Name('Bill')\n # phone = Phone('1234567890')\n # b_day = Birthday('1994-02-26')\n # rec = Record(name, phone, \"1994-02-26\")\n # ab = AddressBook()\n # ab.add_record(rec)\n # assert isinstance(ab['Bill'], Record)\n # assert isinstance(ab['Bill'].name, Name)\n # assert isinstance(ab['Bill'].phones, list)\n # assert isinstance(ab['Bill'].phones[0], Phone)\n # assert isinstance(ab['Bill'].birthday, Birthday)\n # assert ab['Bill'].phones[0].value == '1234567890'\n # print('All Ok)') \n # # a = Field(\"name\")\n # num = Phone('12345678974')\n # print(a.value)\n # print(num.value)\n # num.value = \"987654321123\"\n # print(num.value)\n # day = Birthday('1994-02-26')\n # day.value = '1994-26-02'\n # print(day.value)\n\n \n\n\n\n\n\n","repo_name":"SergiyBagmet/goit_homework_11","sub_path":"address_book.py","file_name":"address_book.py","file_ext":"py","file_size_in_byte":12729,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"717260755","text":"import pygame\n\nclass Player(pygame.sprite.Sprite):\n gif=[]\n gifDelay=0\n gifCounter=0\n left_right=1\n def __init__(self,X,Y):\n pygame.sprite.Sprite.__init__(self)\n self.imgSize=(20,20)\n self.image=pygame.image.load(\"images/player/player1.png\")\n self.gif.append(pygame.image.load(\"images/player/player1.png\"))\n self.gif.append(pygame.image.load(\"images/player/player2.png\"))\n self.gif.append(pygame.image.load(\"images/player/player3.png\"))\n self.gif.append(pygame.image.load(\"images/player/player4.png\"))\n self.gif.append(pygame.image.load(\"images/player/player5.png\"))\n self.gif.append(pygame.image.load(\"images/player/player6.png\"))\n for i in range(6):\n self.gif[i].set_colorkey((0,0,0))\n self.rect=self.image.get_rect()\n self.rect.x=X\n self.rect.y=Y\n self.left_right=1\n \n def update(self):\n self.gifDelay+=1\n if(self.gifDelay%4==0):\n self.image=self.gif[self.gifCounter]\n self.gifCounter+=1\n self.gifDelay=0\n if self.gifCounter==6:\n self.gifCounter=0\n if self.left_right==1:\n if self.rect.x>=580:\n self.rect.x=580\n self.left_right=0\n # print(\"Set true\")\n else:\n self.rect.x+=20\n if self.rect.x>=580:\n self.left_right=0\n # print(\"Set true\")\n else:\n if self.rect.x<=0:\n self.rect.x=0\n self.left_right=1\n else:\n self.rect.x-=20\n if self.rect.x<=0:\n self.left_right=1\n \n def get_left_right(self):\n return self.left_right\n def set_left_right(self, counter):\n self.left_right=counter\n","repo_name":"gowtham-sathyan/Centipede-AI","sub_path":"Player.py","file_name":"Player.py","file_ext":"py","file_size_in_byte":1858,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"70898086940","text":"def square_matrix_simple(matrix=[]):\n # Create a new matrix of the same size as the input matrix\n new_matrix = []\n for row in matrix:\n new_row = []\n for element in row:\n # Square the element and append it to the new row\n new_row.append(element ** 2)\n new_matrix.append(new_row)\n\n return new_matrix\n\n# Test the function\nif __name__ == \"__main__\":\n matrix = [\n [1, 2, 3],\n [4, 5, 6],\n [7, 8, 9]\n ]\n\n new_matrix = square_matrix_simple(matrix)\n print(new_matrix)\n print(matrix)\n\n","repo_name":"dmoore9131/alx-higher_level_programming","sub_path":"0x04-python-more_data_structures/0-square_matrix_simple.py","file_name":"0-square_matrix_simple.py","file_ext":"py","file_size_in_byte":566,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"31926328660","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nx = np.arange(0, 1, 0.01)\n\nplt.figure(figsize=(8, 4))\n\nline, = plt.plot(x, x * x)\nline.set_antialiased(False)\n\nlines = plt.plot(x, np.sin(x), x, np.cos(x))\n# 设置属性\nplt.setp(lines, color=\"r\", linewidth=1, antialiased=True)\n# 获取属性\nprint(plt.getp(lines[0]))\n\n# 获取当前的绘图对象\nf = plt.gcf()\nprint(plt.getp(f))\n\nplt.show()\n","repo_name":"alisure-ml/python-study","sub_path":"temp/study_plt/3-figure.py","file_name":"3-figure.py","file_ext":"py","file_size_in_byte":398,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"10149952831","text":"# What we are going to do ->\n# reading stream from camera -> Loading Yolo net\n# -> Read frame in loop -> Getting blob of the frame\n# -> Implementing forward pass -> getting bounding boxes\n# -> Non maximum suppression -> Drawing bounded boxes with labels\n# -> Showing processed frames\n\nimport cv2\nimport numpy as np\nimport time\n\n\n# Step 1: Reading stream\ncamera = cv2.VideoCapture(1)\n\nh, w = None, None #Predeclare for height and width of frame\n\n# Step 2: Loading yolov3 Net\n\nwith open('yolo-coco-data/coco.names') as f:\n labels = [line.strip() for line in f]\n# print(labels)\nyolo = cv2.dnn.readNetFromDarknet('yolo-coco-data/yolov3.cfg',\n 'yolo-coco-data/yolov3.weights')\n\nall_layers = yolo.getLayerNames()\n\noutput_layers = \\\n [all_layers[i-1] for i in yolo.getUnconnectedOutLayers()]\n# print(output_layers)\n\nprobability_minimum = 0.5\nthreshold = 0.3\n\ncolours = np.random.randint(0,255, size = (len(labels),3), dtype='uint8')\n\n# Step 3: Reading frames from the loop\nwhile True:\n ret, frame = camera.read()\n\n if w is None or h is None:\n h, w = frame.shape[:2]\n\n# Step 4: Implementing blob on image\n blob = cv2.dnn.blobFromImage(frame, 1/255.0, (416,416)\n , swapRB = True, crop = False)\n\n# Step 5: Forward Pass\n yolo.setInput(blob)\n start = time.time()\n output = yolo.forward(output_layers)\n end = time.time()\n print(end-start)\n\n# Step 6: Getting bounded boxes\n bounding_boxes = []\n confidences = []\n class_numbers = []\n\n for result in output:\n #first we are iterating through all the output layers\n for detected_objects in result:\n # Second we get numpy array(detected_objects)\n # that contain coordinates at first four indexs\n # then scores of all the labels\n scores = detected_objects[5:]\n class_current = np.argmax(scores)\n confidence_current = scores[class_current]\n\n if (confidence_current > probability_minimum):\n box_current = detected_objects[0:4] * np.array(\n [w,h,w,h])\n x_center,y_center,box_width,box_height = box_current\n x_min = int(x_center - (box_width/2))\n y_min = int(y_center - (box_height/2))\n\n bounding_boxes.append([x_min,y_min,int(box_width),int(box_height)])\n\n confidences.append(float(confidence_current))\n class_numbers.append(class_current)\n\n# Step 7: Non-Maximum suppression\n results = cv2.dnn.NMSBoxes(bounding_boxes,confidences,probability_minimum,\n threshold)\n\n# Step 8: Adding bounding boxes on image\n if len(results)>0:\n for i in results.flatten():\n x_min, y_min = bounding_boxes[i][0], bounding_boxes[i][1]\n box_width, box_height = bounding_boxes[i][2], bounding_boxes[i][3]\n\n colour_box_current = colours[class_numbers[i]].tolist()\n cv2.rectangle(frame,(x_min,y_min),\n (x_min+box_width,y_min+box_height),colour_box_current,2)\n\n text_box_current = '{}: {:.4f}'.format(labels[int(class_numbers[i])],\n confidences[i])\n cv2.putText(frame, text_box_current, (x_min, y_min - 5),\n cv2.FONT_HERSHEY_SIMPLEX, 0.5, colour_box_current, 2)\n\n# Step 9: Showing processed frames\n\n cv2.namedWindow('YOLO v3 real time detections', cv2.WINDOW_NORMAL)\n cv2.imshow('YOLO v3 real time detections',frame)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n\n","repo_name":"Popeye-TheSailorMan/RealTimeObjectDetectionUsingYOLOv3","sub_path":"YoloV3Camera.py","file_name":"YoloV3Camera.py","file_ext":"py","file_size_in_byte":3617,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"} +{"seq_id":"19023929265","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Aug 16 13:19:18 2019\n\n@author: Mr k\n\"\"\"\n\nimport os\nfrom keras.preprocessing.text import Tokenizer\nfrom keras.preprocessing.sequence import pad_sequences\nimport numpy as np\nfrom keras.models import Sequential\n\n\nimdb_dir='D:/deep learning/aclImdb'\ntrain_dir=os.path.join(imdb_dir,'train')\n\nlabels=[]\ntexts=[]\n\nfor label_type in ['neg','pos']:\n dir_name=os.path.join(train_dir,label_type)\n for fname in os.listdir(dir_name):\n if fname[-4:]=='.txt':\n f=open(os.path.join(dir_name,fname))\n texts.append(f.read())\n f.close()\n if label_type=='neg':\n labels.append(0)\n else:\n labels.append(1)\n \nmaxlen=100\ntraining_samples=200\nvalidation_samples=1000\nmax_words=1000\ntokenizer=Tokenizer(num_words=max_words)\ntokenizer.fit_on_texts(texts)\nsequences=tokenizer.texts_to_sequences(texts)\nword_index=tokenizer.word_index\nprint('Found %s unique token.'% len(word_index))\ndata=pad_sequences(sequences,maxlen=maxlen)\nlabels=np.asarray(labels)\nprint('Shape of data tensor:',data.shape)\nprint('Shape of labels tensor:',labels.shape)\nindices=np.arange(data.shape[0])\nnp.random.shuffle(indices)\ndata=data[indices]\nlabels=labels[indices]\nx_train=data[:training_samples]\ny_train=labels[:training_samples]\nx_val=data[training_samples:training_samples+validation_samples]\ny_val=labels[training_samples:training_samples+validation_samples]\n\n\n\nglove_dir='D:/deep.learning'\n\nembeddings_index={}\nf=open(os.path.join(glove_dir,'glove.6B.100d.txt'),'r',encoding='UTF-8')\nfor line in f:\n values=line.split()\n # print(values)\n word=values[0]\n coefs=np.asarray(values[1:],dtype='float32')\n embeddings_index[word]=coefs\nf.close()\nprint('Found %s word vectors.'% len(embeddings_index))\n\n\nembedding_dim=100\nembedding_matrix=np.zeros((max_words,embedding_dim))\nfor word,i in word_index.items():\n if i 0:\n x['offset'] = x['offset'] + subject_delta + length_delta\n\n # Create a certificate definition structure\n csr_def = atcacert_def_t(**ATCACERT_DEF_CSR)\n\n # Attach the generated template with the updated subject name\n csr_def.cert_template_size = len(atcacert_def_csr_template)\n csr_def.cert_template = POINTER(c_uint8)(create_string_buffer(bytes(atcacert_def_csr_template), csr_def.cert_template_size))\n\n # Create a CSR based on the definition provided\n csr = bytearray(len(atcacert_def_csr_template)+8)\n csr_size = AtcaReference(len(csr))\n assert 0 == atcacert_create_csr(csr_def, csr, csr_size)\n\n # Encode the CSR in the expect format (PEM)\n csr_pem = base64.b64encode(csr).decode('ascii')\n csr_pem = ''.join(csr_pem[i:i+64] + '\\n' for i in range(0,len(csr_pem),64))\n csr_pem = '-----BEGIN CERTIFICATE REQUEST-----\\n' + csr_pem + '-----END CERTIFICATE REQUEST-----\\n'\n\n print(csr_pem)\n\n # Free the library\n atcab_release()\n\n\nif __name__ == '__main__':\n parser = setup_example_runner(__file__)\n args = parser.parse_args()\n\n info(args.iface, args.device, **parse_interface_params(args.params))\n print('\\nDone')\n\n\n\n\n\n\n\n\n\n\n","repo_name":"MicrochipTech/cryptoauthtools","sub_path":"python/examples/create_csr.py","file_name":"create_csr.py","file_ext":"py","file_size_in_byte":6340,"program_lang":"python","lang":"en","doc_type":"code","stars":43,"dataset":"github-code","pt":"69"} +{"seq_id":"40693089766","text":"# -*- coding: UTF-8 -*-\nimport torch\nimport random\nimport pandas as pd\nfrom copy import deepcopy\nfrom torch.utils.data import DataLoader, Dataset\nfrom ast import literal_eval\nrandom.seed(0)\nimport numpy as np\n\n\ndef dealpadding(lis_str, max):\n list1 = np.zeros(max)\n eval_lis = literal_eval(lis_str)\n lis_len = len(eval_lis)\n for i in range(0, lis_len):\n list1[i] = int(eval_lis[i] + 1)\n return list1\n\n\nclass UserItemRatingDataset(Dataset):\n \"\"\"Wrapper, convert Tensor into Pytorch Dataset\"\"\"\n def __init__(self, user_tensor, item_tensor, target_tensor,\n ASnode1_info_type_tensor, ASnode1_AS_tier_tensor,\n ASnode1_info_traffic_tensor, ASnode1_info_ratio_tensor,\n ASnode1_info_scope_tensor, ASnode1_policy_general_tensor,\n ASnode1_policy_locations_tensor, ASnode1_policy_ratio_tensor,\n ASnode1_policy_contracts_tensor,ASnode1_appearIXP_tensor,\n ASnode1_appearFac_tensor,\n ASnode2_info_type_tensor, ASnode2_AS_tier_tensor,\n ASnode2_info_traffic_tensor,ASnode2_info_ratio_tensor,\n ASnode2_info_scope_tensor,ASnode2_policy_general_tensor,\n ASnode2_policy_locations_tensor, ASnode2_policy_ratio_tensor,\n ASnode2_policy_contracts_tensor,ASnode2_appearIXP_tensor,\n ASnode2_appearFac_tensor):\n \"\"\"\n args:\n\n target_tensor: torch.Tensor, the corresponding rating for pair\n \"\"\"\n self.user_tensor = user_tensor\n self.item_tensor = item_tensor\n self.ASnode1_info_type_tensor = ASnode1_info_type_tensor\n self.ASnode1_AS_tier_tensor = ASnode1_AS_tier_tensor\n self.ASnode1_info_traffic_tensor = ASnode1_info_traffic_tensor\n self.ASnode1_info_ratio_tensor = ASnode1_info_ratio_tensor\n self.ASnode1_info_scope_tensor = ASnode1_info_scope_tensor\n self.ASnode1_policy_general_tensor = ASnode1_policy_general_tensor\n self.ASnode1_policy_locations_tensor = ASnode1_policy_locations_tensor\n self.ASnode1_policy_ratio_tensor = ASnode1_policy_ratio_tensor\n self.ASnode1_policy_contracts_tensor = ASnode1_policy_contracts_tensor\n self.ASnode1_appearIXP_tensor = ASnode1_appearIXP_tensor\n self.ASnode1_appearFac_tensor = ASnode1_appearFac_tensor\n self.ASnode2_info_type_tensor = ASnode2_info_type_tensor\n self.ASnode2_AS_tier_tensor = ASnode2_AS_tier_tensor\n self.ASnode2_info_traffic_tensor = ASnode2_info_traffic_tensor\n self.ASnode2_info_ratio_tensor = ASnode2_info_ratio_tensor\n self.ASnode2_info_scope_tensor = ASnode2_info_scope_tensor\n self.ASnode2_policy_general_tensor = ASnode2_policy_general_tensor\n self.ASnode2_policy_locations_tensor = ASnode2_policy_locations_tensor\n self.ASnode2_policy_ratio_tensor = ASnode2_policy_ratio_tensor\n self.ASnode2_policy_contracts_tensor = ASnode2_policy_contracts_tensor\n self.ASnode2_appearIXP_tensor = ASnode2_appearIXP_tensor\n self.ASnode2_appearFac_tensor = ASnode2_appearFac_tensor\n self.target_tensor = target_tensor\n\n def __getitem__(self, index):\n return self.user_tensor[index], self.item_tensor[index], \\\n self.ASnode1_info_type_tensor[index], self.ASnode1_AS_tier_tensor[index],\\\n self.ASnode1_info_traffic_tensor[index],self.ASnode1_info_ratio_tensor[index],\\\n self.ASnode1_info_scope_tensor[index],self.ASnode1_policy_general_tensor[index],\\\n self.ASnode1_policy_locations_tensor[index],self.ASnode1_policy_ratio_tensor[index],\\\n self.ASnode1_policy_contracts_tensor[index],self.ASnode1_appearIXP_tensor[index] ,\\\n self.ASnode1_appearFac_tensor[index],\\\n self.ASnode2_info_type_tensor[index],\\\n self.ASnode2_AS_tier_tensor[index], self.ASnode2_info_traffic_tensor[index],\\\n self.ASnode2_info_ratio_tensor[index], self.ASnode2_info_scope_tensor[index],\\\n self.ASnode2_policy_general_tensor[index],self.ASnode2_policy_locations_tensor[index],\\\n self.ASnode2_policy_ratio_tensor[index], self.ASnode2_policy_contracts_tensor[index], \\\n self.ASnode2_appearIXP_tensor[index], self.ASnode2_appearFac_tensor[index],self.target_tensor[index]\n\n def __len__(self):\n return self.user_tensor.size(0)\n\n\nclass SampleGenerator(object):\n \"\"\"Construct dataset for NCF\"\"\"\n def __init__(self, train_ratings, valid_ratings):\n \"\"\"\n args:\n ratings: pd.DataFrame, which contains 4 columns = ['userId', 'itemId', 'rating', 'timestamp']\n \"\"\"\n\n self.train_ratings = train_ratings\n self.valid_ratings = valid_ratings\n\n\n def instance_a_train_loader(self, batch_size):\n \"\"\"instance train loader for one training epoch\"\"\"\n users, items, ratings,\\\n ASnode1_info_type, ASnode1_AS_tier,ASnode1_info_traffic,ASnode1_info_ratio,ASnode1_info_scope,\\\n ASnode1_policy_general,ASnode1_policy_locations,ASnode1_policy_ratio,ASnode1_policy_contracts, \\\n ASnode1_appearIXP, ASnode1_appearFac,ASnode2_info_type,ASnode2_AS_tier,ASnode2_info_traffic,ASnode2_info_ratio,ASnode2_info_scope,\\\n ASnode2_policy_general,ASnode2_policy_locations,ASnode2_policy_ratio,ASnode2_policy_contracts,ASnode2_appearIXP,ASnode2_appearFac= (\n [], [], [],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[])\n\n\n\n for row in self.train_ratings.itertuples():\n users.append(int(row.userId))\n items.append(int(row.itemId))\n ratings.append(float(row.rating))\n ASnode1_info_type.append(int(row.ASnode1_info_type))\n ASnode1_AS_tier.append(int(row.ASnode1_info_type))\n ASnode1_info_traffic.append(int(row.ASnode1_info_traffic))\n ASnode1_info_ratio.append(int(row.ASnode1_info_ratio))\n ASnode1_info_scope.append(int(row.ASnode1_info_scope))\n ASnode1_policy_general.append(int(row.ASnode1_policy_general))\n ASnode1_policy_locations.append(int(row.ASnode1_policy_locations))\n ASnode1_policy_ratio.append(int(row.ASnode1_policy_ratio))\n ASnode1_policy_contracts.append(int(row.ASnode1_policy_contracts))\n ASnode1_appearIXP.append(\n dealpadding(row.ASnode1_appearIXP, 879))\n ASnode1_appearFac.append(\n dealpadding(row.ASnode1_appearFac, 4111))\n\n\n\n ASnode2_info_type.append(int(row.ASnode2_info_type))\n ASnode2_AS_tier.append(int(row.ASnode2_info_type))\n ASnode2_info_traffic.append(int(row.ASnode2_info_traffic))\n ASnode2_info_ratio.append(int(row.ASnode2_info_ratio))\n ASnode2_info_scope.append(int(row.ASnode2_info_scope))\n ASnode2_policy_general.append(int(row.ASnode2_policy_general))\n ASnode2_policy_locations.append(int(row.ASnode2_policy_locations))\n ASnode2_policy_ratio.append(int(row.ASnode2_policy_ratio))\n ASnode2_policy_contracts.append(int(row.ASnode2_policy_contracts))\n ASnode2_appearIXP.append(\n dealpadding(row.ASnode2_appearIXP, 879))\n ASnode2_appearFac.append(\n dealpadding(row.ASnode2_appearFac, 4111))\n\n # construct data for model\n dataset = UserItemRatingDataset(\n user_tensor=torch.ShortTensor(users),\n item_tensor=torch.ShortTensor(items),\n target_tensor=torch.FloatTensor(ratings),\n ASnode1_info_type_tensor=torch.ShortTensor(ASnode1_info_type),\n ASnode1_AS_tier_tensor=torch.ShortTensor(ASnode1_AS_tier),\n ASnode1_info_traffic_tensor=torch.ShortTensor(ASnode1_info_traffic),\n ASnode1_info_ratio_tensor=torch.ShortTensor(ASnode1_info_ratio),\n ASnode1_info_scope_tensor=torch.ShortTensor(ASnode1_info_scope),\n ASnode1_policy_general_tensor=torch.ShortTensor(\n ASnode1_policy_general),\n ASnode1_policy_locations_tensor=torch.ShortTensor(\n ASnode1_policy_locations),\n ASnode1_policy_ratio_tensor=torch.ShortTensor(ASnode1_policy_ratio),\n ASnode1_policy_contracts_tensor=torch.ShortTensor(\n ASnode1_policy_contracts),\n ASnode1_appearIXP_tensor=torch.ShortTensor(ASnode1_appearIXP),\n ASnode1_appearFac_tensor=torch.ShortTensor(ASnode1_appearFac),\n ASnode2_info_type_tensor=torch.ShortTensor(ASnode2_info_type),\n ASnode2_AS_tier_tensor=torch.ShortTensor(ASnode2_AS_tier),\n ASnode2_info_traffic_tensor=torch.ShortTensor(ASnode2_info_traffic),\n ASnode2_info_ratio_tensor=torch.ShortTensor(ASnode2_info_ratio),\n ASnode2_info_scope_tensor=torch.ShortTensor(ASnode2_info_scope),\n ASnode2_policy_general_tensor=torch.ShortTensor(\n ASnode2_policy_general),\n ASnode2_policy_locations_tensor=torch.ShortTensor(\n ASnode2_policy_locations),\n ASnode2_policy_ratio_tensor=torch.ShortTensor(ASnode2_policy_ratio),\n ASnode2_policy_contracts_tensor=torch.ShortTensor(\n ASnode2_policy_contracts),\n ASnode2_appearIXP_tensor = torch.ShortTensor(ASnode2_appearIXP),\n ASnode2_appearFac_tensor = torch.ShortTensor(ASnode2_appearFac))\n return DataLoader(dataset, batch_size=batch_size, shuffle=True)\n\n ##得到验证集,现在的验证集要变成回归。\n @property\n def evaluate_data(self):\n \"\"\"create evaluate data\"\"\"\n test_users, test_items, test_ratings, ASnode1_info_type, \\\n ASnode1_AS_tier, \\\n ASnode1_info_traffic, ASnode1_info_ratio, ASnode1_info_scope, \\\n ASnode1_policy_general, ASnode1_policy_locations, \\\n ASnode1_policy_ratio, ASnode1_policy_contracts, ASnode1_appearIXP, ASnode1_appearFac,\\\n ASnode2_info_type, ASnode2_AS_tier, \\\n ASnode2_info_traffic, ASnode2_info_ratio, ASnode2_info_scope, \\\n ASnode2_policy_general, ASnode2_policy_locations, \\\n ASnode2_policy_ratio, ASnode2_policy_contracts,ASnode2_appearIXP, ASnode2_appearFac = (\n [],[],[],[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], []\n )\n\n\n\n for row in self.valid_ratings.itertuples():\n test_users.append(int(row.userId))\n test_items.append(int(row.itemId))\n test_ratings.append(float(row.rating))\n ASnode1_info_type.append(int(row.ASnode1_info_type))\n ASnode1_AS_tier.append(int(row.ASnode1_info_type))\n ASnode1_info_traffic.append(int(row.ASnode1_info_traffic))\n ASnode1_info_ratio.append(int(row.ASnode1_info_ratio))\n ASnode1_info_scope.append(int(row.ASnode1_info_scope))\n ASnode1_policy_general.append(int(row.ASnode1_policy_general))\n ASnode1_policy_locations.append(int(row.ASnode1_policy_locations))\n ASnode1_policy_ratio.append(int(row.ASnode1_policy_ratio))\n ASnode1_policy_contracts.append(int(row.ASnode1_policy_contracts))\n ASnode1_appearIXP.append(\n dealpadding(row.ASnode1_appearIXP, 879))\n ASnode1_appearFac.append(\n dealpadding(row.ASnode1_appearFac, 4111))\n\n\n ASnode2_info_type.append(int(row.ASnode2_info_type))\n ASnode2_AS_tier.append(int(row.ASnode2_info_type))\n ASnode2_info_traffic.append(int(row.ASnode2_info_traffic))\n ASnode2_info_ratio.append(int(row.ASnode2_info_ratio))\n ASnode2_info_scope.append(int(row.ASnode2_info_scope))\n ASnode2_policy_general.append(int(row.ASnode2_policy_general))\n ASnode2_policy_locations.append(int(row.ASnode2_policy_locations))\n ASnode2_policy_ratio.append(int(row.ASnode2_policy_ratio))\n ASnode2_policy_contracts.append(int(row.ASnode2_policy_contracts))\n ASnode2_appearIXP.append(\n dealpadding(row.ASnode2_appearIXP, 879))\n ASnode2_appearFac.append(\n dealpadding(row.ASnode2_appearFac, 4111))\n\n # print(torch.eye(len(ASnode2_info_prefixes4)))\n return [\n torch.ShortTensor(test_users),\n torch.ShortTensor(test_items),\n torch.ShortTensor(ASnode1_info_type),\n torch.ShortTensor(ASnode1_AS_tier),\n torch.ShortTensor(ASnode1_info_traffic),\n torch.ShortTensor(ASnode1_info_ratio),\n torch.ShortTensor(ASnode1_info_scope),\n torch.ShortTensor(ASnode1_policy_general),\n torch.ShortTensor(ASnode1_policy_locations),\n torch.ShortTensor(ASnode1_policy_ratio),\n torch.ShortTensor(ASnode1_policy_contracts),\n torch.ShortTensor(ASnode1_appearIXP),\n torch.ShortTensor(ASnode1_appearFac),\n torch.ShortTensor(ASnode2_info_type),\n torch.ShortTensor(ASnode2_AS_tier),\n torch.ShortTensor(ASnode2_info_traffic),\n torch.ShortTensor(ASnode2_info_ratio),\n torch.ShortTensor(ASnode2_info_scope),\n torch.ShortTensor(ASnode2_policy_general),\n torch.ShortTensor(ASnode2_policy_locations),\n torch.ShortTensor(ASnode2_policy_ratio),\n torch.ShortTensor(ASnode2_policy_contracts),\n torch.ShortTensor(ASnode2_appearIXP),\n torch.ShortTensor(ASnode2_appearFac),\n torch.FloatTensor(test_ratings)\n ]\n","repo_name":"zhuangshuying18/Unseen_Link_Prediction","sub_path":"data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":13627,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"} +{"seq_id":"71727635741","text":"'''\nvContact2 needs as input the proteins and a table describing which genome they\nbelong to, like this:\n\nprotein_id,contig_id,keywords\nref|NP_039777.1|,Sulfolobus spindle-shaped virus 1,ORF B-251\nref|NP_039778.1|,Sulfolobus spindle-shaped virus 1,ORF D-335\nref|NP_039779.1|,Sulfolobus spindle-shaped virus 1,ORF E-54\n\nThis script takes as input a fasta with all the proteins that were selected as\nbest-coding (prodigal-11, TAG or TGA) and generates such table.\n\nNotice I can do this because the genome_id information is in the header of the\nprotein, which is like this:\n\n>crAssphage|prodigal-11|191..346|52|+|1_1\n\ninput fasta file was generated as follows:\n\n'''\n\nimport argparse\nfrom pathlib import Path\nimport os\nfrom Bio import SeqIO\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='')\n\n requiredArgs = parser.add_argument_group(\"Required Arguments\")\n\n requiredArgs.add_argument('-i', '--input_proteins_file',\n dest='in_faa',\n type=lambda p: Path(p).resolve(strict=True),\n required=True,\n help=''\n )\n requiredArgs.add_argument('-o', '--output_table',\n dest='out_table',\n type=lambda p: Path(p).resolve(),\n required=True,\n help='Extension must be csv or vContact2 will fail'\n )\n\n return parser.parse_args()\n\ndef main():\n args = parse_args()\n\n with open(args.out_table, \"w\") as fout:\n fout.write(\"protein_id,contig_id,keywords\\n\")\n\n records = SeqIO.parse(args.in_faa, \"fasta\")\n for record in records:\n tow = list()\n genome = record.description.split(\"|\")[0]\n tow = f'{record.description},{genome},not_provided\\n'\n fout.write(tow)\n\n\n\n\n\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"dcarrillouu/PhageAnnotation","sub_path":"scripts/best-coding_to_vcontact2-input-table.py","file_name":"best-coding_to_vcontact2-input-table.py","file_ext":"py","file_size_in_byte":1979,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"26498817806","text":"import copy\nfrom FL.average import average_weights\nfrom FL.models.initialize_model import initialize_model\nimport torch\n\nclass Cloud():\n def __init__(self, args, edges, test_loader, shared_layers=0):\n self.device = args.device\n self.receiver_buffer = {}\n self.edges = edges\n self.args = args\n self.num_edges = args.num_edges\n self.test_loader = test_loader\n self.model = initialize_model(args, self.device)\n self.shared_state_dict = copy.deepcopy(self.model.shared_layers.state_dict())\n \n self.location = (0, 0)\n self.testing_acc = 0\n if not self.args.is_layered:\n self.clients = []\n \n def aggregate(self):\n received_dict = []\n sample_num = []\n if self.args.algorithm == 'W_avg':\n if self.args.is_layered:\n edges_aggregate_num = 0\n for edge in self.edges:\n if edge.all_data_num:\n edges_aggregate_num += 1\n received_dict.append(self.receiver_buffer[edge.id])\n sample_num.append(edge.all_data_num)\n\n if edges_aggregate_num == 0:\n return\n self.shared_state_dict = average_weights(w=received_dict,\n s_num=sample_num)\n self.model.update_model(copy.deepcopy(self.shared_state_dict))\n # edges_aggregate_num = 1\n # for edge in self.edges:\n # if edge.all_weight_num:\n # edges_aggregate_num += 1\n # received_dict.append(self.receiver_buffer[edge.id])\n # sample_num.append(edge.all_weight_num)\n #\n # if edges_aggregate_num == 0:\n # return\n # self.shared_state_dict = average_weights(w = received_dict,\n # s_num= sample_num)\n # self.model.update_model(copy.deepcopy(self.shared_state_dict))\n # else:\n # self.all_weight_num = 0\n # for client in self.clients:\n # if client.weight:\n # self.all_weight_num += client.weight\n # received_dict.append(self.receiver_buffer[client.id])\n # sample_num.append(client.weight)\n # if self.all_weight_num == 0:\n # return\n # self.shared_state_dict = average_weights(w = received_dict,\n # s_num= sample_num)\n # self.model.update_model(copy.deepcopy(self.shared_state_dict))\n else:\n clients_aggregate_num = 0\n\n for client in self.clients:\n if client.data_num:\n clients_aggregate_num += 1\n received_dict.append(self.receiver_buffer[client.id])\n sample_num.append(client.data_num)\n\n if clients_aggregate_num == 0:\n return\n self.shared_state_dict = average_weights(w=received_dict,\n s_num=sample_num)\n self.model.update_model(copy.deepcopy(self.shared_state_dict))\n elif self.args.algorithm == 'FD_avg':\n if self.args.is_layered: \n edges_aggregate_num = 0\n \n for edge in self.edges:\n if edge.all_data_num:\n edges_aggregate_num += 1\n received_dict.append(self.receiver_buffer[edge.id])\n sample_num.append(edge.all_data_num)\n \n if edges_aggregate_num == 0:\n return\n self.shared_state_dict = average_weights(w = received_dict,\n s_num= sample_num)\n self.model.update_model(copy.deepcopy(self.shared_state_dict))\n else:\n clients_aggregate_num = 0\n \n for client in self.clients:\n if client.data_num:\n clients_aggregate_num += 1\n received_dict.append(self.receiver_buffer[client.id])\n sample_num.append(client.data_num)\n \n if clients_aggregate_num == 0:\n return\n self.shared_state_dict = average_weights(w = received_dict,\n s_num= sample_num)\n self.model.update_model(copy.deepcopy(self.shared_state_dict)) \n elif self.args.algorithm == 'Fair':\n if not self.args.is_layered: \n for client in self.clients:\n client.test_model()\n top_clients = sorted(self.clients, key = lambda x: x.testing_acc - self.testing_acc, reverse=True)\n top_clients = top_clients[:int(len(self.clients) / 1.5)]\n clients_aggregate_num = 0\n for client in top_clients:\n # for client in self.clients:\n if client.data_num:\n clients_aggregate_num += 1\n received_dict.append(self.receiver_buffer[client.id])\n sample_num.append((client.testing_acc - self.testing_acc) * client.data_num)\n # sample_num.append(client.data_num)\n if sum(sample_num) == 0:\n return\n if clients_aggregate_num == 0:\n return\n self.shared_state_dict = average_weights(w = received_dict,\n s_num= sample_num)\n self.model.update_model(copy.deepcopy(self.shared_state_dict)) \n # delta_loss = [0] * self.clients\n # for i, client in enumerate(self.clients):\n # delta_loss[i] = (client.testing_acc - self.testing_acc, i)\n \n \n \n # # 找topK个最大的 返回下标 也就是client.id\n # topk = sorted(delta_loss, key = lambda x: x[0], reverse=True)\n # topk_data_num = [0] * self.clients\n # topk_index = []\n # for t in topk:\n # _, i = t\n # topk_index.append(i)\n # topk_data_num[i] = self.clients[i].data_num\n # topk_sum_data_num = sum(topk_data_num)\n # clients_aggregate_num = 0\n # for index in topk_index:\n # if client.data_num:\n # clients_aggregate_num += 1\n # received_dict.append(self.receiver_buffer[client.id])\n # sample_num.append(delta_loss[index] * client.data_num / topk_sum_data_num)\n # if sum(sample_num) == 0:\n # return\n # # for client in self.clients:\n # # if client.data_num:\n # # clients_aggregate_num += 1\n # # received_dict.append(self.receiver_buffer[client.id])\n # # sample_num.append(client.data_num)\n \n # if clients_aggregate_num == 0:\n # return\n \n \n else:\n pass \n else:\n pass\n def send_to_client(self, client):\n client.receiver_buffer = copy.deepcopy(self.shared_state_dict)\n client.model.update_model(client.receiver_buffer)\n\n # 添加 将全局模型发送给edge作为下一轮的初始模型 的方法\n def send_global_to_edge(self, edge):\n edge.self_receiver_buffer = copy.deepcopy(self.shared_state_dict)\n edge.model.update_model(edge.self_receiver_buffer)\n\n def test_model(self):\n correct = 0.0\n total = 0.0\n for data in self.test_loader:\n inputs, labels = data\n break\n size = labels.size(0)\n with torch.no_grad():\n for data in self.test_loader:\n inputs, labels = data\n outputs = self.model.test_model(input_batch= inputs)\n _, predict = torch.max(outputs, 1)\n total += size\n correct += (predict == labels).sum()\n self.testing_acc = correct.item() / total\n return correct.item() / total\n \n def reset_model(self):\n self.receiver_buffer = {}\n self.model = initialize_model(self.args, self.device)\n self.shared_state_dict = copy.deepcopy(self.model.shared_layers.state_dict())\n","repo_name":"wrb-18/FL_RL_main","sub_path":"FL/cloud.py","file_name":"cloud.py","file_ext":"py","file_size_in_byte":8893,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"} +{"seq_id":"69821375260","text":"class Solution:\n def maxWidthOfVerticalArea(self, points: List[List[int]]) -> int:\n arry = []\n for i in range(len(points)):\n arry.append(points[i][0])\n arry.sort()\n max_diff = 0\n for i in range(1, len(arry)):\n if arry[i] - arry[i-1] > max_diff:\n max_diff = arry[i] - arry[i-1]\n return max_diff\n\n\n# intuition, for each pillar, find the largest width","repo_name":"TomWu370/LeetCode-Python","sub_path":"1637-widest-vertical-area-between-two-points-containing-no-points/1637-widest-vertical-area-between-two-points-containing-no-points.py","file_name":"1637-widest-vertical-area-between-two-points-containing-no-points.py","file_ext":"py","file_size_in_byte":430,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"4401573467","text":"from pymongo import MongoClient\nimport re\nfrom utility import request_site_page\nfrom bs4 import BeautifulSoup as bs\nfrom init import logger_init, config_init\nfrom urllib.parse import urljoin\n\nlogger = logger_init('注会协会-数据抓取')\nconfig = config_init()\nif config['mongodb']['dev_mongo'] == '1':\n db = MongoClient(config['mongodb']['ali_mongodb_url'], username=config['mongodb']['ali_mongodb_username'],\n password=config['mongodb']['ali_mongodb_password'],\n port=int(config['mongodb']['ali_mongodb_port']))[config['mongodb']['ali_mongodb_name']]\nelse:\n db = MongoClient(\n host=config['mongodb']['mongodb_host'],\n port=int(config['mongodb']['mongodb_port']),\n username=None if config['mongodb']['mongodb_username'] == '' else config['mongodb']['mongodb_username'],\n password=None if config['mongodb']['mongodb_password'] == '' else config['mongodb']['mongodb_password'])[\n config['mongodb']['mongodb_db_name']]\n\n# 抓取数据存入cicpa_data这个collection\ndb.cicpa_data.create_index([('url', 1)])\n\n\ndef cicpa_crawler():\n result_list = [] # 用来保存最后存入数据库的数据\n prefix_url = [{'url': 'http://www.cicpa.org.cn/Industry_regulation/Monitoring_info/index', 'origin': '注册会计师协会'}]\n for each_url_info in prefix_url:\n each_url = each_url_info['url']\n stop_flag = False\n logger.info('注册会计师协会 抓取URL:' + each_url + '.html')\n # get page count\n response = request_site_page(each_url + '.html')\n if response is None:\n logger.error('网页请求错误 %s' % (each_url + '.html'))\n continue\n soup = bs(response.content, 'lxml') if response else bs('', 'lxml')\n page_count = int(re.search(r'var countPage = (\\d+)', soup.text).group(1).strip())\n logger.info('注册会计师协会' + ' 一共有%d页' % page_count)\n\n if db.crawler.find({'url': each_url + '.html'}).count() > 0:\n last_updated_url = db.crawler.find_one({'url': each_url + '.html'})['last_updated']\n else:\n last_updated_url = ''\n\n # get data\n for page_num in range(page_count):\n logger.info('注册会计师协会 -- 第%d页' % (page_num + 1))\n if page_num == 0:\n page_url = each_url + '.html'\n else:\n page_url = each_url + '_' + str(page_num) + '.html'\n try:\n page_response = request_site_page(page_url)\n if response is None:\n logger.error('网页请求错误 %s' % page_url)\n continue\n page_soup = bs(page_response.content, 'lxml')\n all_result = page_soup.find(class_='news-next-list').find_all('li')\n\n for index, each_result in enumerate(all_result):\n href = re.search(' 0:\n if db.crawler.find_one({'url': each_url + '.html'})['last_updated'] != true_url:\n db.crawler.update_one({'url': each_url + '.html'}, {'$set': {'last_updated': true_url}})\n else:\n db.crawler.insert_one(\n {'url': each_url + '.html', 'last_updated': true_url, 'origin': each_url_info['origin']})\n\n title = re.search('target=\"_blank\">(.*?)', each_result.text.strip()).group(1).strip()\n if re.search(r'对.*[做作]出惩戒', title) or \\\n (re.search(r'约谈', title) and not re.search(r'约谈工作|提示.*风险', title)):\n publish_date = re.search(r'\\((\\d{4}-\\d{1,2}-\\d{1,2})\\)', each_result.text.strip()).group(1).strip()\n if db.cicpa_data.find({'url': true_url}).count() == 0:\n logger.info('注册会计师协会新公告:' + true_url + ' title:' + title)\n post = {\n 'title': title,\n 'publishDate': publish_date,\n 'url': true_url,\n 'type': '',\n 'origin': '注册会计师协会',\n 'status': 'not parsed'\n }\n if post not in result_list:\n result_list.append(post)\n else:\n if config['crawler_update_type']['update_type'] == '0':\n break\n if stop_flag:\n logger.info('到达上次爬取的链接')\n break\n except Exception as e:\n logger.error(e)\n continue\n\n if len(result_list) > 0:\n logger.info('注册会计师协会一共有%d条新公告,导入数据库中......' % len(result_list))\n r = db.cicpa_data.insert_many(result_list)\n if len(r.inserted_ids) == len(result_list):\n logger.info('注册会计师协会公告导入完成!')\n else:\n logger.error('注册会计师协会公告导入出现问题!')\n else:\n logger.info('注册会计师协会没有新公告!')\n\n\nif __name__ == \"__main__\":\n cicpa_crawler()\n","repo_name":"nightkid101/jgcf","sub_path":"crawler_code/cicpa.py","file_name":"cicpa.py","file_ext":"py","file_size_in_byte":5912,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"686021785","text":"from __future__ import absolute_import\n\nfrom builtins import str\nimport json\nimport logging\nimport re\nimport sys\nimport traceback\n\nfrom django.shortcuts import render, redirect\nfrom django.urls import reverse\nfrom django.contrib.auth.decorators import login_required\nfrom bq_data_access.v1.feature_search.util import SearchableFieldHelper\nfrom bq_data_access.v2.feature_search.util import SearchableFieldHelper as SearchableFieldHelper_v2\nfrom bq_data_access.v2.feature_search.clinical_schema_utils import ClinicalColumnFeatureSupport\nfrom .models import VariableFavorite\nfrom workbooks.models import Workbook, Worksheet\nfrom projects.models import Program, DataSource, DataVersion, DataNode\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.contrib import messages\nfrom django.conf import settings\nfrom django.contrib.auth.models import User as Django_User\nfrom django.http import HttpResponse, JsonResponse\n\nfrom cohorts.metadata_helpers import fetch_program_attr\nfrom isb_cgc.templatetags.custom_tags import get_readable_name\n\ndebug = settings.DEBUG\n\nBLACKLIST_RE = settings.BLACKLIST_RE\n\nlogger = logging.getLogger('main_logger')\n\n\n@login_required\ndef variable_fav_list_for_new_workbook(request):\n return variable_fav_list(request=request, new_workbook=True)\n\n\n@login_required\ndef variable_fav_list(request, workbook_id=0, worksheet_id=0, new_workbook=0):\n template = 'variables/variable_list.html'\n context = {}\n\n variable_list = VariableFavorite.get_list(request.user)\n if not variable_list.count():\n variable_list = None\n context['variable_list']=variable_list\n\n if workbook_id != 0:\n try:\n workbook_model = Workbook.objects.get(id=workbook_id)\n context['workbook'] = workbook_model\n worksheet_model = Worksheet.objects.get(id=worksheet_id)\n context['worksheet'] = worksheet_model\n context['base_url'] = settings.BASE_URL\n\n if variable_list:\n template = 'variables/variables_select.html'\n else:\n return initialize_variable_selection_page(request, workbook_id=workbook_id, worksheet_id=worksheet_id)\n\n except ObjectDoesNotExist:\n messages.error(request, 'The workbook and worksheet you were referencing does not exist.')\n return redirect('variables')\n elif new_workbook:\n context['new_workbook'] = True\n if variable_list:\n template = 'variables/variables_select.html'\n else:\n return initialize_variable_selection_page(request, new_workbook=True)\n\n return render(request, template, context)\n\n\n@login_required\ndef variable_fav_detail_for_new_workbook(request, variable_fav_id):\n return variable_fav_detail(request=request, variable_fav_id=variable_fav_id, new_workbook=True)\n\n\n@login_required\ndef variable_fav_detail(request, variable_fav_id, workbook_id=0, worksheet_id=0, new_workbook=0):\n template = 'variables/variable_detail.html'\n context = {}\n if new_workbook:\n context['new_workbook'] = True\n\n if workbook_id:\n try:\n workbook_model = Workbook.objects.get(id=workbook_id)\n context['workbook'] = workbook_model\n worksheet_model = Worksheet.objects.get(id=worksheet_id)\n context['worksheet'] = worksheet_model\n except ObjectDoesNotExist:\n messages.error(request, 'The workbook you were referencing does not exist.')\n return redirect('variables')\n try:\n variable_fav = VariableFavorite.get_deep(id=variable_fav_id, user=request.user)\n context['variables'] = variable_fav\n variable_fav.mark_viewed(request)\n except ObjectDoesNotExist:\n messages.error(request, 'The variable favorite you were looking for does not exist.')\n return redirect('variables')\n\n return render(request, template, context)\n\n\n@login_required\ndef variable_fav_edit_for_new_workbook(request):\n return initialize_variable_selection_page(request, new_workbook=True)\n\n\n@login_required\ndef variable_fav_edit_for_existing_workbook(request, workbook_id=0, worksheet_id=0, variable_fav_id=0):\n return initialize_variable_selection_page(request, workbook_id=workbook_id, worksheet_id=worksheet_id)\n\n\n@login_required\ndef variable_fav_edit(request, variable_fav_id=0):\n return initialize_variable_selection_page(request, variable_list_id=variable_fav_id)\n\n\n@login_required\ndef get_user_vars(request):\n\n try:\n # User programs\n ownedPrograms = request.user.program_set.filter(active=True)\n sharedPrograms = Program.objects.filter(shared__matched_user=request.user, shared__active=True, active=True)\n programs = ownedPrograms | sharedPrograms\n # user_programs is not being used. Can a shared program actually show up twice?\n #user_programs = programs.distinct()\n\n\n # This detailed construction of user data variables WAS happening in the template. Now it is here:\n\n user_vars = {}\n if programs:\n for program in programs:\n for project in program.project_set.all():\n if project.active:\n per_proj = {\n 'progName' : program.name,\n 'projName' : project.name,\n 'progID' : program.id,\n 'projID' : project.id\n }\n user_vars[project.id] = per_proj\n per_proj_vars = []\n per_proj['vars'] = per_proj_vars\n\n for variable in project.user_feature_definitions_set.all():\n if variable.shared_map_id:\n value = variable.shared_map_id\n else:\n value = 'v2:USER:{0}:{1}'.format(str(project.id), str(variable.id))\n per_proj_var = {\n 'var_type' : 'N' if variable.is_numeric else 'C',\n 'value' : value,\n 'data_code' : value,\n 'data_text_label' : '{0}: {1}'.format(project.name, get_readable_name(variable.feature_name)),\n 'data_feature_id' : variable.id,\n 'data_feature_name' : get_readable_name(variable.feature_name)\n }\n per_proj_vars.append(per_proj_var)\n except Exception as e:\n logger.error(\"[ERROR] While trying to load user variables for variable selection page:\")\n logger.exception(e)\n messages(request,\"There was an error while trying to load your user variables - please contact the administrator.\")\n return redirect(reverse('variables'))\n\n return render(request, 'variables/variable_edit_user_data.html', {'user_vars': user_vars})\n\n\n@login_required\ndef initialize_variable_selection_page(request,\n variable_list_id=0,\n workbook_id=0,\n worksheet_id=0,\n new_workbook=False):\n template = 'variables/variable_edit.html'\n context = {'variables' : [] }\n workbook_model = None\n worksheet_model = None\n existing_variable_list = None\n\n try:\n\n if workbook_id != 0:\n try:\n workbook_model = Workbook.objects.get(id=workbook_id)\n context['workbook'] = workbook_model\n worksheet_model = Worksheet.objects.get(id=worksheet_id)\n context['worksheet'] = worksheet_model\n except ObjectDoesNotExist:\n messages.error(request, 'The workbook you were referencing does not exist.')\n return redirect(reverse('variables'))\n\n if variable_list_id != 0:\n try:\n existing_variable_list = request.user.variablefavorite_set.get(id=variable_list_id)\n if existing_variable_list.version != 'v2':\n messages.warning(request, 'Version 1 Variable lists cannot be edited due to changes in available variables.')\n return redirect(reverse('variables'))\n except ObjectDoesNotExist:\n messages.error(request, 'The variable favorite you were looking for does not exist.')\n return redirect(reverse('variables'))\n\n data_attr = [\n 'DNA_sequencing',\n 'RNA_sequencing',\n 'miRNA_sequencing',\n 'Protein',\n 'SNP_CN',\n 'DNA_methylation'\n ]\n\n # This is a list of specific data classifications which require additional filtering in order to\n # Gather categorical or numercial variables for use in the plot\n # Filter Options\n datatype_labels = {'CLIN' : 'Clinical',\n 'GEXP' : 'Gene Expression',\n 'MIRN' : 'miRNA',\n 'METH' : 'Methylation',\n 'CNVR' : 'Copy Number',\n 'RPPA' : 'Protein',\n 'GNAB' : 'Mutation'}\n\n datatype_list = SearchableFieldHelper.get_fields_for_all_datatypes()\n for type in datatype_list:\n type['label'] = datatype_labels[type['datatype']]\n\n #remove gene in fields\n for index, field in enumerate(type['fields']):\n if field['label'] == \"Gene\":\n del type['fields'][index]\n\n\n # Public programs\n isb_user = Django_User.objects.filter(username='isb').first()\n public_programs = Program.objects.filter(active=True, is_public=True, owner=isb_user)\n\n # User favorites\n favorite_list = VariableFavorite.get_list(user=request.user, version='v2')\n for fav in favorite_list:\n fav.variables = fav.get_variables()\n\n full_fave_count = VariableFavorite.get_list(user=request.user).count()\n\n program_attrs = {}\n\n for prog in public_programs:\n program_attrs[prog.id] = fetch_program_attr(prog.id, source_type=DataSource.BIGQUERY, data_type_list=[DataVersion.BIOSPECIMEN_DATA, DataVersion.CLINICAL_DATA],for_faceting=False)\n attr_codes = ClinicalColumnFeatureSupport.get_features_ids_for_column_names(list(program_attrs[prog.id].keys()))\n if 'not_found_columns' in attr_codes:\n new_keys = [x for x in list(program_attrs[prog.id].keys()) if x not in attr_codes['not_found_columns']]\n attr_codes = ClinicalColumnFeatureSupport.get_features_ids_for_column_names(new_keys)\n for attr in program_attrs[prog.id]:\n if attr in attr_codes['clinical_feature_ids']:\n program_attrs[prog.id][attr]['data_code'] = attr_codes['clinical_feature_ids'][attr]\n else:\n program_attrs[prog.id][attr]['data_code'] = 'v2:CLIN:'+attr\n\n # users can select from their saved variable favorites\n variable_favorites = VariableFavorite.get_list(request.user)\n\n has_user_data = (request.user.program_set.filter(active=True).count() > 0)\n\n all_nodes, all_programs = DataNode.get_node_programs(request.user.is_authenticated)\n\n context = {\n 'favorite_list' : favorite_list,\n 'full_favorite_list_count': full_fave_count,\n 'datatype_list' : datatype_list,\n 'data_attr' : data_attr,\n 'public_programs' : public_programs,\n 'base_url' : settings.BASE_URL,\n 'base_api_url' : settings.BASE_API_URL,\n 'variable_favorites' : variable_favorites,\n 'workbook' : workbook_model,\n 'worksheet' : worksheet_model,\n 'existing_variable_list' : existing_variable_list,\n 'new_workbook' : new_workbook,\n 'program_attrs' : program_attrs,\n 'has_user_data' : has_user_data,\n 'all_nodes': all_nodes,\n 'all_programs': all_programs\n }\n except Exception as e:\n logger.error(\"[ERROR] While attempting to initialize variable selection:\")\n logger.exception(e)\n return JsonResponse({'msg': \"There was an error while attempting to load the variable selection page - please contact the administrator.\"}, status=500)\n\n return render(request, template, context)\n\n\n@login_required\ndef variable_fav_delete(request, variable_fav_id):\n redirect_url = reverse('variables')\n if variable_fav_id:\n try:\n variable_fav_model = VariableFavorite.objects.get(id=variable_fav_id)\n if variable_fav_model.user == request.user:\n name = variable_fav_model.name\n variable_fav_model.destroy()\n messages.info(request, 'The variable favorite \\\"'+name+'\\\" has been deleted.')\n else:\n messages.error(request, 'You do not have permission to update this variable favorite list.')\n except ObjectDoesNotExist:\n messages.error(request, 'The variable list you want does not exist.')\n\n return redirect(redirect_url)\n\n\n@login_required\ndef variable_fav_copy(request, variable_fav_id):\n redirect_url = reverse('variables')\n if variable_fav_id:\n try:\n variable_fav_model = VariableFavorite.objects.get(id=variable_fav_id)\n if variable_fav_model.user == request.user:\n new_model = variable_fav_model.copy()\n messages.info(request, 'The variable favorite \\\"'+new_model.name+'\\\" has been copied from \\\"'+variable_fav_model.name+'\\\".')\n else:\n messages.error(request, 'You do not have permission to copy this variable favorite list.')\n except ObjectDoesNotExist:\n messages.error(request, 'The variable list you requested does not exist.')\n\n return redirect(redirect_url)\n\n\n@login_required\ndef variable_fav_save(request, variable_fav_id=0):\n try:\n body_unicode = request.body\n if type(body_unicode) is bytes:\n body_unicode = body_unicode.decode('utf-8')\n data = json.loads(body_unicode)\n result = {}\n\n name = data['name']\n blacklist = re.compile(BLACKLIST_RE, re.UNICODE)\n match = blacklist.search(str(name))\n if match:\n # XSS risk, log and fail this cohort save\n match = blacklist.findall(str(name))\n logger.error(\n '[ERROR] While saving a variable list, saw a malformed name: ' + name + ', characters: ' + match.__str__())\n messages.error(request, \"Your variable list's name contains invalid characters; please choose another name.\")\n result['error'] = \"Your variable list's name contains invalid characters; please choose another name.\"\n return HttpResponse(json.dumps(result), status=200)\n\n if variable_fav_id:\n try:\n variable_model = VariableFavorite.objects.get(id=variable_fav_id)\n if variable_model.user == request.user:\n variable_model.update(name = data['name'], variables = data['variables'])\n result['model'] = { 'id' : variable_model.id, 'name' : variable_model.name }\n else:\n result['error'] = 'You do not have permission to update this variable favorite list'\n messages.error(request, 'You do not have permission to update this variable favorite list')\n except ObjectDoesNotExist:\n messages.error(request, 'The variable list you want does not exist.')\n result['error'] = 'You do not have permission to update this variable favorite list'\n else:\n variable_model = VariableFavorite.create(name = data['name'],\n variables = data['variables'],\n user = request.user)\n result['model'] = { 'id' : variable_model['id'], 'name' : variable_model['name'] }\n\n return HttpResponse(json.dumps(result), status=200)\n except Exception as e:\n logger.error('[ERROR] Exception while saving variable favorite:')\n logger.exception(e)\n result['error'] = \"There was an error saving your variable favorite; it may not have been saved correctly.\"\n return HttpResponse(json.dumps(result), status=500)","repo_name":"isb-cgc/ISB-CGC-Webapp","sub_path":"variables/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":16684,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"69"} +{"seq_id":"10679290961","text":"from haigha.writer import Writer\nfrom haigha.frames.frame import Frame\n\n\nclass ContentFrame(Frame):\n\n '''\n Frame for reading in content.\n '''\n\n @classmethod\n def type(cls):\n return 3\n\n @property\n def payload(self):\n return self._payload\n\n @classmethod\n def parse(self, channel_id, payload):\n return ContentFrame(channel_id, payload)\n\n @classmethod\n def create_frames(self, channel_id, buf, frame_max):\n '''\n A generator which will create frames from a buffer given a max\n frame size.\n '''\n size = frame_max - 8 # 8 bytes overhead for frame header and footer\n offset = 0\n while True:\n payload = buf[offset:(offset + size)]\n if len(payload) == 0:\n break\n offset += size\n\n yield ContentFrame(channel_id, payload)\n if offset >= len(buf):\n break\n\n def __init__(self, channel_id, payload):\n Frame.__init__(self, channel_id)\n self._payload = payload\n\n def __str__(self):\n if isinstance(self._payload, str):\n payload = ''.join(['\\\\x%s' % (c.encode('hex'))\n for c in self._payload])\n else:\n payload = str(self._payload)\n\n return \"%s[channel: %d, payload: %s]\" % (\n self.__class__.__name__, self.channel_id, payload)\n\n def write_frame(self, buf):\n '''\n Write the frame into an existing buffer.\n '''\n writer = Writer(buf)\n\n writer.write_octet(self.type()).\\\n write_short(self.channel_id).\\\n write_long(len(self._payload)).\\\n write(self._payload).\\\n write_octet(0xce)\n\n\nContentFrame.register()\n","repo_name":"agoragames/haigha","sub_path":"haigha/frames/content_frame.py","file_name":"content_frame.py","file_ext":"py","file_size_in_byte":1758,"program_lang":"python","lang":"en","doc_type":"code","stars":163,"dataset":"github-code","pt":"69"} +{"seq_id":"37855747703","text":"\"\"\"Canonicalize Large Args (and Results)\n\nThe goal of this step is to simplify code generation.\nWe want to avoid having to deal with values that do not fit into registers.\nThose values are primarily rec and sum-types. Slices would also fall into this\ncategory but we convert them to rec in the prior step.\n\nThe only place where we allow values that do not fit into register\nare let statements.\n\nEffects of this step:\n* Rewrite arguments and results which cannot be passed in registers\n as pointers.\n* Large results will become an extra pointer parameter appended to the end\n of the parameter list.\n\n\"\"\"\n\nfrom typing import Dict, Optional, Any, List\n\nfrom FrontEnd import identifier\nfrom FrontEnd import cwast\nfrom FrontEnd import type_corpus\nfrom FrontEnd import typify\n\n############################################################\n# Convert large parameter into pointer to object allocated\n# in the caller\n############################################################\n\n\ndef FindFunSigsWithLargeArgs(tc: type_corpus.TypeCorpus) -> Dict[Any, Any]:\n out = {}\n for fun_sig in list(tc.corpus.values()):\n if not fun_sig.is_fun():\n continue\n change = False\n params: List[type_corpus.TypeCorpus] = fun_sig.parameter_types()\n for n, p in enumerate(params):\n if not p.fits_in_register():\n params[n] = tc.insert_ptr_type(False, p)\n change = True\n result = fun_sig.result_type()\n if not result.is_void() and not result.fits_in_register():\n change = True\n params.append(tc.insert_ptr_type(True, result))\n result = tc.get_void_canon_type()\n if change:\n out[fun_sig] = tc.insert_fun_type(params, result)\n return out\n\n\ndef _FixupFunctionPrototypeForLargArgs(fun: cwast.DefFun, new_sig: cwast.CanonType,\n tc: type_corpus.TypeCorpus, id_gen: identifier.IdGen):\n old_sig: cwast.CanonType = fun.x_type\n typify.UpdateNodeType(fun, new_sig)\n result_changes = old_sig.result_type() != new_sig.result_type()\n if result_changes:\n assert new_sig.result_type().is_void()\n assert len(new_sig.parameter_types()) == 1 + \\\n len(old_sig.parameter_types())\n result_type = cwast.TypePtr(\n fun.result, mut=True, x_srcloc=fun.x_srcloc, x_type=new_sig.parameter_types()[-1])\n result_param = cwast.FunParam(id_gen.NewName(\n \"result\"), result_type, x_srcloc=fun.x_srcloc, res_ref=True)\n fun.params.append(result_param)\n fun.result = cwast.TypeBase(cwast.BASE_TYPE_KIND.VOID, x_srcloc=fun.x_srcloc,\n x_type=tc.get_void_canon_type())\n changing_params = {}\n\n # note: new_sig may contain an extra param at the end\n for p, old, new in zip(fun.params, old_sig.parameter_types(), new_sig.parameter_types()):\n if old != new:\n changing_params[p] = new\n p.type = cwast.TypePtr(p.type, x_srcloc=p.x_srcloc, x_type=new)\n p.arg_ref = True\n assert result_changes or changing_params\n return changing_params, result_changes\n\n\ndef RewriteLargeArgsCalleeSide(fun: cwast.DefFun, new_sig: cwast.CanonType,\n tc: type_corpus.TypeCorpus, id_gen: identifier.IdGen):\n changing_params, result_changes = _FixupFunctionPrototypeForLargArgs(\n fun, new_sig, tc, id_gen)\n\n # print([k.name for k, v in changing_params.items()], result_changes)\n\n def replacer(node, _) -> Optional[Any]:\n\n if isinstance(node, cwast.Id) and node.x_symbol in changing_params:\n new_node = cwast.ExprDeref(\n node, x_srcloc=node.x_srcloc, x_type=node.x_type)\n typify.UpdateNodeType(node, changing_params[node.x_symbol])\n return new_node\n\n if isinstance(node, cwast.StmtReturn) and node.x_target == fun and result_changes:\n result_param: cwast.FunParam = fun.params[-1]\n result_type: cwast.CanonType = result_param.type.x_type\n assert result_type.is_pointer()\n lhs = cwast.ExprDeref(\n cwast.Id(result_param.name, x_srcloc=node.x_srcloc,\n x_type=result_type, x_symbol=result_param),\n x_srcloc=node.x_srcloc, x_type=result_type.underlying_pointer_type())\n assign = cwast.StmtAssignment(\n lhs, node.expr_ret, x_srcloc=node.x_srcloc)\n node.expr_ret = cwast.ValVoid(x_srcloc=node.x_srcloc,\n x_type=tc.get_void_canon_type())\n return cwast.EphemeralList([assign, node], x_srcloc=node.x_srcloc)\n return None\n\n cwast.MaybeReplaceAstRecursivelyPost(fun, replacer)\n cwast.EliminateEphemeralsRecursively(fun)\n\n\ndef RewriteLargeArgsCallerSide(fun: cwast.DefFun, fun_sigs_with_large_args,\n tc: type_corpus.TypeCorpus, id_gen: identifier.IdGen):\n\n def replacer(call, _) -> Optional[Any]:\n if isinstance(call, cwast.ExprCall) and call.callee.x_type in fun_sigs_with_large_args:\n old_sig: cwast.CanonType = call.callee.x_type\n new_sig: cwast.CanonType = fun_sigs_with_large_args[old_sig]\n typify.UpdateNodeType(call.callee, new_sig)\n expr_body = []\n expr = cwast.ExprStmt(\n expr_body, x_srcloc=call.x_srcloc, x_type=call.x_type)\n # note: new_sig might be longer if the result type was changed\n for n, (old, new) in enumerate(zip(old_sig.parameter_types(),\n new_sig.parameter_types())):\n if old != new:\n new_def = cwast.DefVar(id_gen.NewName(f\"arg{n}\"),\n cwast.TypeAuto(\n x_srcloc=call.x_srcloc, x_type=old),\n call.args[n], ref=True,\n x_srcloc=call.x_srcloc)\n expr_body.append(new_def)\n name = cwast.Id(new_def.name,\n x_srcloc=call.x_srcloc, x_type=old, x_symbol=new_def)\n call.args[n] = cwast.ExprAddrOf(\n name, x_srcloc=call.x_srcloc, x_type=new)\n if len(old_sig.parameter_types()) != len(new_sig.parameter_types()):\n # the result is not a argument\n new_def = cwast.DefVar(id_gen.NewName(\"result\"),\n cwast.TypeAuto(x_srcloc=call.x_srcloc,\n x_type=old_sig.result_type()),\n cwast.ValUndef(x_srcloc=call.x_srcloc),\n mut=True, ref=True,\n x_srcloc=call.x_srcloc)\n name = cwast.Id(new_def.name, x_srcloc=call.x_srcloc,\n x_type=old_sig.result_type(), x_symbol=new_def)\n call.args.append(cwast.ExprAddrOf(\n name, mut=True, x_srcloc=call.x_srcloc, x_type=new_sig.parameter_types()[-1]))\n typify.UpdateNodeType(call, tc.get_void_canon_type())\n expr_body.append(new_def)\n expr_body.append(cwast.StmtExpr(call, x_srcloc=call.x_srcloc))\n expr_body.append(cwast.StmtReturn(\n expr_ret=name, x_srcloc=call.x_srcloc, x_target=expr))\n else:\n expr_body.append(cwast.StmtReturn(\n expr_ret=call, x_srcloc=call.x_srcloc, x_target=expr))\n return expr\n return None\n cwast.MaybeReplaceAstRecursivelyPost(fun, replacer)\n","repo_name":"robertmuth/Cwerg","sub_path":"FrontEnd/canonicalize_large_args.py","file_name":"canonicalize_large_args.py","file_ext":"py","file_size_in_byte":7733,"program_lang":"python","lang":"en","doc_type":"code","stars":286,"dataset":"github-code","pt":"69"} +{"seq_id":"37980093015","text":"from pyparsing import *\n\n\nvarname = Word(alphas+\"_\", alphanums+\"_\").setName('variableName')\n\n\ndef parse_call_string(s, loc, toks):\n return toks\n\n\nobject_syntax = Forward()\n\n\ncall_function = Forward().setName('func_call')\ncall_function <<= varname\ncall_function << nestedExpr(content=delimitedList(object_syntax))\n\n\ncall_constructor = Forward().setName('class_call')\ncall_constructor <<= varname\ncall_constructor << nestedExpr('{', '}', content=delimitedList(object_syntax))\n\n\ncall = Or([\n call_function,\n call_constructor,\n])\n\n\nadvanced_string_syntax = QuotedString('`', multiline=True, convertWhitespaceEscapes=False).setName('`string`')\nstring_syntax = (quotedString ^ advanced_string_syntax).setName('string')\n\n\nobject_syntax <<= Or([\n string_syntax,\n call,\n])\n\n\nnew_parser = [\n varname,\n Literal('=').suppress().setName('='),\n object_syntax,\n]\nnew_parser = And(new_parser)","repo_name":"Gaming32/Zone","sub_path":"zone/old_pyparsing_parsers/specific_parsers.py","file_name":"specific_parsers.py","file_ext":"py","file_size_in_byte":900,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"39947074688","text":"# -*- coding:utf-8 -*-\n# @Source :\n# @Author : lewen.cai\n# @Date : 2022/11/15\n# @Description :\nimport logging\nimport shutil\nfrom prettytable import prettytable\n\nfrom pystdf.IO import Parser\nfrom pystdf.Writers import XmlWriter\nimport xml.sax\nimport sys\nimport os\n\n_logger = logging.getLogger(__name__)\n\nif getattr(sys, \"frozen\", False):\n WORKING_FOLDER = os.path.dirname(os.path.dirname(sys.executable))\nelse:\n WORKING_FOLDER = os.path.dirname((os.path.dirname(os.path.realpath(__file__))))\nXML_EXPECT_FILE = os.path.join(\n WORKING_FOLDER, \"ParseAndValidation\", \"goldenFiles\", \"dc_golden.xml\"\n)\nXML_EXPECT_DC_FILE = os.path.join(\n WORKING_FOLDER, \"ParseAndValidation\", \"goldenFiles\", \"dc_golden.xml\"\n)\nXML_EXPECT_K7_FILE = os.path.join(\n WORKING_FOLDER, \"ParseAndValidation\", \"goldenFiles\", \"k7_golden.xml\"\n)\n\n\n# SITE_NUM=\"1\" PART_FLG=\"1\"\nclass StdfHander(xml.sax.handler.ContentHandler):\n def __init__(self, expect_fail_ptr):\n super().__init__()\n self.expect_fail_ptr = expect_fail_ptr\n self.CurrentData = \"\"\n self.ptr_fail_details = dict()\n self.data_details = dict()\n self.site_num = \"0\"\n self.sites_time = dict()\n self.__loops_time = list()\n\n def startElement(self, name, attrs):\n self.CurrentData = name\n if self.CurrentData == \"Ptr\":\n result = attrs[\"RESULT\"]\n lo_limit = attrs[\"LO_LIMIT\"]\n hi_limit = attrs[\"HI_LIMIT\"]\n head_num = attrs[\"HEAD_NUM\"]\n site_num = attrs[\"SITE_NUM\"]\n key = (\"Ptr\", head_num, site_num)\n test_txt = attrs[\"TEST_TXT\"]\n if not self.data_details.get(key):\n self.data_details[key] = set()\n self.data_details[key].add(test_txt)\n if result and float(lo_limit) <= float(result) <= float(hi_limit):\n if self.expect_fail_ptr.get((\"Ptr\", head_num, site_num, test_txt)):\n self.ptr_fail_details[\n (\"Ptr\", head_num, site_num, test_txt)\n ] = \",\".join([lo_limit, result, hi_limit])\n else:\n if not self.expect_fail_ptr.get((\"Ptr\", head_num, site_num, test_txt)):\n self.ptr_fail_details[\n (\"Ptr\", head_num, site_num, test_txt)\n ] = \",\".join([lo_limit, result, hi_limit])\n\n if self.CurrentData == \"Prr\":\n head_num = attrs[\"HEAD_NUM\"]\n site_num = attrs[\"SITE_NUM\"]\n if site_num <= self.site_num:\n self.__loops_time.append(self.sites_time)\n self.sites_time = dict()\n self.sites_time[site_num] = int(attrs[\"TEST_T\"])\n self.site_num = site_num\n part_flg = attrs[\"PART_FLG\"]\n hard_bin = attrs[\"HARD_BIN\"]\n soft_bin = attrs[\"SOFT_BIN\"]\n key = (\"Prr\", head_num, site_num)\n if not self.data_details.get(key):\n self.data_details[key] = set()\n self.data_details[key].add(\"_\".join([part_flg, hard_bin, soft_bin]))\n\n if self.CurrentData == \"Ftr\":\n test_txt = attrs[\"TEST_TXT\"]\n test_flg = attrs[\"TEST_FLG\"]\n rslt_txt = attrs[\"RSLT_TXT\"]\n head_num = attrs[\"HEAD_NUM\"]\n site_num = attrs[\"SITE_NUM\"]\n key = (\"Ftr\", head_num, site_num, test_txt)\n if not self.data_details.get(key):\n self.data_details[key] = set()\n self.data_details[key].add(\"_\".join([test_flg, rslt_txt]))\n\n if self.CurrentData == \"Hbr\":\n hbin_num = attrs[\"HBIN_NUM\"]\n head_num = attrs[\"HEAD_NUM\"]\n site_num = attrs[\"SITE_NUM\"]\n key = (\"Hbr\", head_num, site_num)\n if not self.data_details.get(key):\n self.data_details[key] = set()\n self.data_details[key].add(hbin_num)\n\n if self.CurrentData == \"Sbr\":\n sbin_num = attrs[\"SBIN_NUM\"]\n head_num = attrs[\"HEAD_NUM\"]\n site_num = attrs[\"SITE_NUM\"]\n key = (\"Sbr\", head_num, site_num)\n if not self.data_details.get(key):\n self.data_details[key] = set()\n self.data_details[key].add(sbin_num)\n\n def endDocument(self):\n if self.sites_time:\n self.__loops_time.append(self.sites_time)\n\n def get_results(self):\n return self.data_details, self.ptr_fail_details, self.__loops_time\n\n def get_loops_time(self):\n return self.__loops_time\n\n\nclass __Stdf2xml_parser(object):\n def __init__(self, path, verify_mode=None):\n self.__loops_time = list()\n self.std_fail_ptr = dict()\n self.stdf2xml = self.__process_file(path)\n if verify_mode == \"dc\":\n xml_expect = XML_EXPECT_DC_FILE\n elif verify_mode == \"k7\":\n xml_expect = XML_EXPECT_K7_FILE\n else:\n xml_expect = XML_EXPECT_FILE\n shutil.copyfile(xml_expect, os.path.join(os.path.dirname(path), \"selfCal.xml\"))\n self.__expect_details, self.expect_fail_ptr, _ = self.__parse_results(\n xml_expect, self.std_fail_ptr\n )\n _logger.info(\"以下是标准文件里面不在limit范围内的测试项:\")\n self.expect_fail_ptr and self.pretty_print_ptr(self.expect_fail_ptr)\n self.std_fail_ptr = self.expect_fail_ptr\n (\n self.__actual_details,\n self.__ptr_nonlimited_failures,\n self.__loops_time,\n ) = self.__parse_results(self.stdf2xml, self.std_fail_ptr)\n\n self.__lost_items_failures = dict()\n self.__neq_failures = dict()\n self.process_results()\n\n def __process_file(self, stdf_file):\n with open(stdf_file, \"rb\") as f:\n p = Parser(inp=f)\n out_file = stdf_file[: stdf_file.rfind(\".\")] + \".xml\"\n with open(out_file, \"w\") as fout:\n p.addSink(XmlWriter(stream=fout))\n p.parse()\n return out_file\n\n def __parse_results(self, xml_path, expect_fail_ptr):\n parser = xml.sax.make_parser()\n parser.setFeature(xml.sax.handler.feature_namespaces, 0)\n stdfHander = StdfHander(expect_fail_ptr)\n parser.setContentHandler(stdfHander)\n parser.parse(xml_path)\n return stdfHander.get_results()\n\n def is_passed(self):\n if (\n self.__lost_items_failures\n or self.__neq_failures\n or self.__ptr_nonlimited_failures\n ):\n _logger.info(\"tsdata has_exception->1\")\n return False\n return True\n\n def process_results(self):\n for key in self.__expect_details:\n if not self.__actual_details.get(key):\n self.__lost_items_failures[key] = self.__expect_details[key]\n elif self.__actual_details[key] != self.__expect_details[key]:\n lost_items = [\n i\n for i in self.__expect_details[key]\n if i not in self.__actual_details[key]\n ]\n if lost_items:\n self.__lost_items_failures[key] = lost_items\n neq_items = [\n i\n for i in self.__actual_details[key]\n if i not in self.__expect_details[key]\n ]\n if neq_items:\n self.__neq_failures[key] = neq_items\n\n def get_fail_results(self):\n return (\n self.__lost_items_failures,\n self.__ptr_nonlimited_failures,\n self.__neq_failures,\n )\n\n def get_loops_time(self):\n return self.__loops_time\n\n def pretty_print_ptr(self, ptr_dict):\n table = prettytable.PrettyTable(\n [\n \"test_type\",\n \"head_num\",\n \"site_num\",\n \"test_item\",\n \"l_limit\",\n \"value\",\n \"h_limit\",\n ]\n )\n for f_key, f_value in ptr_dict.items():\n print_list = list(f_key)\n print_list.extend(f_value.split(\",\"))\n table.add_row(print_list)\n _logger.info(table)\n\n\ndef parse(path, verify_mode=None):\n return __Stdf2xml_parser(path, verify_mode=verify_mode)\n\n\nif __name__ == \"__main__\":\n fin = r\"\\\\172.18.32.10\\Public_folder\\temp\\zxy\\SW_1.1.2\\data\\K7\\FlowLog_2023_05_26_16_49_33.799.stdf\"\n stdf = parse(fin, verify_mode=\"k7\")\n lost_items_failures, ptr_nonlimited_failures, neq_failures = stdf.get_fail_results()\n print(lost_items_failures)\n print(ptr_nonlimited_failures)\n print(neq_failures)\n print(stdf.is_passed())\n","repo_name":"Tristonlong/react-electron","sub_path":"ParseAndValidation/Stdf_parser.py","file_name":"Stdf_parser.py","file_ext":"py","file_size_in_byte":8647,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"5100287576","text":"from functools import cache\nimport math\n\ndp = []\ndef F(n, k):\n if k==3: return 2**n - 1\n if n==0: return 0\n if k<=2: return float(\"inf\")\n if n==1: return 1\n if dp[n][k]==-1:\n dp[n][k] = 2*F(n-1, k)+1\n for m in range(1, n-1):\n dp[n][k] = min(dp[n][k], 2*F(m, k)+2*F(n-m, k-1))\n \n return dp[n][k]\nn = 10\ndp = [[-1 for j in range(2*n)] for i in range(2*n)]\n\nfor i in range(0, n):\n # print(i)\n for j in range(3, n):\n print(F(i, j), end = ' ')\n print()\n\n# print(F(99, 100))","repo_name":"br0der/VsCode","sub_path":"4-hanoi.py","file_name":"4-hanoi.py","file_ext":"py","file_size_in_byte":534,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"11391016666","text":"import os\nimport readchar\n\nnumero = 0\nos.system('cls' if os.name == 'nt' else 'clear')\nprint(numero)\n\n\ndef imprimir(n):\n os.system('cls' if os.name == 'nt' else 'clear')\n print(n)\n\n\nwhile True:\n k = readchar.readkey()\n if k == \"n\" or k == \"N\":\n numero += 1\n imprimir(numero)\n if numero == 50:\n break\n ","repo_name":"BJPulgarin/proyecto_integrador_ADA","sub_path":"parte_3.py","file_name":"parte_3.py","file_ext":"py","file_size_in_byte":348,"program_lang":"python","lang":"it","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"28188971832","text":"from django.urls import path\nfrom app import views\nfrom .api import PersonViewSet\nfrom rest_framework import routers\n\n\nrouter = routers.DefaultRouter()\nrouter.register('api/person', PersonViewSet, 'person')\n\nurlpatterns = [\n path('', views.home, name=\"home\"),\n path('about/', views.about_us, name=\"about-us\"),\n path('person/delete/', views.delete_person, name=\"person-delete\"),\n path('person/create', views.create_person, name=\"person-create\"),\n path('person/update/', views.update_person, name=\"person-update\"),\n path('employee/delete/', views.delete_employee, name='employee-delete'),\n path('employee/create/', views.create_employee, name=\"employee-create\"),\n] + router.urls\n","repo_name":"maxwellnewage/it-formacion-django-gestion-empleados","sub_path":"app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":745,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"26814389782","text":"import numpy as np\nfrom numpy import pi\nimport pandas as pd\nimport click\n\n\n\n\n@click.group()\n\n\ndef cmd_group():\n \"\"\"_summary_ \n \"\"\" \n pass\n@click.option(\n\"-n\",\n\"--number\",\ndefault=10,)\n\n\n@cmd_group.command()\n# @click.argument(\"number\")\n\ndef sin(number):\n \"\"\"gives list of autogenerated sin results \n\n Args:\n number (int): give the amount of pieces you want to cut your range of 0-2 pie into\n \"\"\" \n\n x = np.linspace(0, 2 * pi, number)\n df = pd.DataFrame({\"x\": x, \"sin (x)\": np.sin(x)})\n print(df)\n return\n\n@cmd_group.command()\n# @click.argument(\"number\")\n\ndef tan(number):\n \"\"\"gives list of autogenerated tan results \n\n Args:\n number (int): give the amount of pieces you want to cut your range of 0-2 pie into\n \"\"\" \n x = np.linspace(0, 2 * pi, number)\n df = pd.DataFrame({\"x\": x, \"tan (x)\": np.tan(x)})\n print(df)\n return\n\nif __name__ == \"__main__\":\n cmd_group()\n\n","repo_name":"Isayuzzz/smallangle","sub_path":"src/smallangle.py","file_name":"smallangle.py","file_ext":"py","file_size_in_byte":943,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"36522632130","text":"from django.db import models\nfrom django.conf import settings\n\n\n\nclass goal(models.Model):\n sequence_choices = [\n ('Right now', 'Right now'),\n ('1', 'First'),\n ('2', 'Second'),\n ('3', 'Third'),\n ('4', 'Fourth'),\n ('5', 'Fifth'),\n ('6', 'Sixth'),\n ('7', 'Seventh'),\n ('8', 'Eighth'),\n ('9', 'Nineth'),\n ('10', 'Tenth'),\n ]\n\n executor = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)\n title = models.CharField(max_length=60, blank=True, null=False, help_text=\"Text your goal here...\")\n priority = models.CharField(choices=sequence_choices, default='Right now', max_length=10)\n status = models.CharField(default=\"In progress\", blank=False, max_length=20)\n\n def get_absolute_url(self):\n return '/my-goals/'","repo_name":"CleRIeQ/summer-todo-site","sub_path":"summertodosite/user/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":830,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"38416012770","text":"import os\nimport shutil\n\n\n# Checks if the folders are identical. If so we are good. If not:\n# If we are in a dev workspace (as evidenced by a file existing), then we raise an exception.\n# If we are not in a dev workspace, then we remove any existing extensions/ folder and replace it with a new one.\ndef sync_extensions_code():\n def check_identical_file_content(path1: str, path2: str):\n with open(path1, 'r') as f:\n content1 = f.read()\n with open(path2, 'r') as f:\n content2 = f.read()\n return (content1 == content2)\n\n def check_identical_dir_content(d1: str, d2: str):\n if not os.path.isdir(d1):\n print(f'Directories not equal: Not a directory: {d1}')\n return False\n if not os.path.isdir(d2):\n print(f'Directories not equal: Not a directory: {d2}')\n return False\n\n excludes = '__pycache__'\n f1 = [a for a in os.listdir(d1) if a not in excludes]\n f2 = [a for a in os.listdir(d2) if a not in excludes]\n if len(f1) != len(f2):\n print(f'Directories not equal: in {d1} and {d2}')\n return False\n for a in f1:\n if a not in f2:\n return False\n path1 = d1 + '/' + a\n path2 = d2 + '/' + a\n if os.path.isfile(path1):\n if not os.path.isfile(path2):\n print(f'Directories not equal: Missing file: {path2}')\n return False\n if not check_identical_file_content(path1, path2):\n print(f'Directories not equal: files are not the same: {path1} {path2}')\n return False\n elif os.path.isdir(path1):\n if not os.path.isdir(path2):\n print(f'Directories not equal: not a directory: {path2}')\n return False\n if not check_identical_dir_content(path1, path2):\n return False\n return True\n \n thisdir = os.path.dirname(os.path.realpath(__file__))\n development_workspace = os.path.exists('../codesync.txt')\n E1 = thisdir + '/../../src/extensions'\n E2 = thisdir + '/src/extensions'\n if check_identical_dir_content(E1, E2):\n print('Code for extensions/ already in sync')\n else:\n if development_workspace:\n print(f'The following directories are not in sync: {E1} {E2}')\n print(f'This appears to be a development workspace because the ../codesync.txt file exists.')\n print(f'To resolve this issue, synchronize the two directories, or remove ../codesync.txt')\n raise Exception('The extensions directories are not in sync. See warnings above.')\n else:\n print(f'Copying extensions/ code from {E1} to {E2}')\n if os.path.exists(E2):\n shutil.rmtree(E2)\n shutil.copytree(E1, E2)\n\nif __name__ == \"__main__\":\n sync_extensions_code()\n","repo_name":"magland/labbox-ephys","sub_path":"jupyterlab/labbox_ephys_widgets_jp/sync_extensions_code.py","file_name":"sync_extensions_code.py","file_ext":"py","file_size_in_byte":2961,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"69"} +{"seq_id":"20501745327","text":"'''\npoint = 10000일때까지 좌표를 찾아보면서\narr범위를 찾음\n'''\n\nimport sys\nsys.stdin = open('input.txt', 'r')\n\ndef new_method1(a, b):\n x = y = point = 1\n while True:\n if x == a and y == b:\n return point\n if y == 1:\n y += x\n x = 1\n else:\n y -= 1\n x += 1\n point+= 1\ndef new_method2(num):\n x = y = point = 1\n while True:\n if point == num:\n return x, y\n if y == 1:\n y += x\n x = 1\n else:\n y -= 1\n x += 1\n point += 1\n\n\nT = int(input())\nfor tc in range(1,T+1):\n p, q = map(int, input().split())\n p = new_method2(p)\n q = new_method2(q)\n r = p[0]+q[0], p[1]+q[1]\n print(f'#{tc} {new_method1(r[0], r[1])}')","repo_name":"Juaaang/pyc_algorithm","sub_path":"1493.py","file_name":"1493.py","file_ext":"py","file_size_in_byte":806,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"21876472446","text":"from os import path\nimport os\nimport json\nfrom from_root import from_root\n\ndef appendCommandInConfig(command='command'):\n if(not path.exists(from_root('config/commands.json'))):\n with open(from_root('config/commands.json'), 'w') as f:\n json.dump({'commands' : {}}, f, indent=2)\n try:\n f = open(from_root('config/commands.json'), 'r')\n try:\n data = json.load(f)\n\n if ('/' in command):\n filePath = os.path.basename(command)\n dirPath = os.path.dirname(command)\n\n if(dirPath not in data['commands']):\n data[\"commands\"].update({dirPath: {\"authorization\": [], \"arguments\": {}}})\n\n with open(from_root('config/commands.json'), 'w') as f:\n json.dump(data, f, indent=2)\n\n if('commands' not in data['commands'][dirPath]):\n data['commands'][dirPath]['commands'] = {}\n\n if(filePath not in data['commands'][dirPath]['commands']):\n\n data['commands'][dirPath]['commands'][filePath] = {}\n data['commands'][dirPath]['commands'][filePath] = {\"authorization\":[], \"arguments\":{}}\n\n\n if ('arguments' in data['commands'][dirPath]):\n del data['commands'][dirPath]['arguments']\n # data['commands'][dirPath]['commands'][filePath].append({\"authorization\":[], \"arguments\":[]})\n #\n with open(from_root('config/commands.json'), 'w') as f:\n json.dump(data, f, indent=2)\n return 'Command configuration dumped.'\n\n else:\n if (command not in data['commands']):\n append = {\n command: {\n \"authorization\": [],\n \"arguments\": {},\n }\n }\n data[\"commands\"].update(append)\n\n with open(from_root('config/commands.json'), 'w') as f:\n json.dump(data,f,indent=2)\n\n return 'Command configuration dumped.'\n\n except Exception as e:\n print(str(e))\n except Exception as e:\n print(str(e))\n\n\ndef generateCommand(command='my-command'):\n if ('/' in command):\n filePath = os.path.basename(command)\n dirPath = os.path.dirname(command)\n if(not path.exists('commands/' + dirPath + '/' + filePath + '.py')):\n\n if not os.path.exists('commands/'+ dirPath):\n os.makedirs('commands/' + dirPath)\n else:\n pass\n\n\n content = r\"\"\"import discord\nimport asyncio\n\nclass \"\"\"+filePath+\"\"\":\n def __init__(self, bot, ctx, args, authorization, inputArguments):\n self.bot = bot\n self.ctx = ctx\n self.authorization = authorization\n self.args = args\n self.inputArguments = inputArguments\n\n\n async def main(self):\n await self.ctx.channel.send(\"```This is the \"\"\"+filePath+\"\"\" command output within commands folder.```\")\n \"\"\"\n\n openFile = open('commands/' + dirPath + '/' + filePath + '.py', \"w\")\n openFile.write(content)\n openFile.close()\n\n if(not path.exists('commands/' + dirPath + '.py')):\n\n fileContents = r\"\"\"import discord\nimport asyncio\n\nclass \"\"\" + dirPath + \"\"\":\n def __init__(self, bot, ctx, args, authorization, inputArguments):\n self.bot = bot\n self.ctx = ctx\n self.authorization = authorization\n self.args = args\n self.inputArguments = inputArguments\n\n\n async def main(self):\n await self.ctx.channel.send(\"```This is the \"\"\" + dirPath + \"\"\" command output within commands folder.```\")\n \"\"\"\n\n\n f = open('commands/' + dirPath + '.py', \"w\")\n f.write(fileContents)\n f.close()\n\n return \"Command file \" + filePath + \".py created in commands/\" + dirPath\n else:\n if (path.exists('commands/' + command + '.py')):\n return 'Could not generate command file. The file [commands/'+command+'.py] already exists. Remove it and try again.'\n fileContents = r\"\"\"import discord\nimport asyncio\n\nclass \"\"\" + command.replace('-','') + \"\"\":\n def __init__(self, bot, ctx, args, authorization, inputArguments):\n self.bot = bot\n self.ctx = ctx\n self.authorization = authorization\n self.args = args\n self.inputArguments = inputArguments\n\n\n async def main(self):\n await self.ctx.channel.send(\"```This is the \"\"\" + command + \"\"\" command output within commands folder.```\")\n \"\"\"\n\n f = open('commands/' +command+ '.py', \"w\")\n f.write(fileContents)\n f.close()\n\n return \"Command file \" + command + \".py created in commands/\"","repo_name":"alexanderthegreat96/dth-discord-py","sub_path":"bin/commands/generateCommand.py","file_name":"generateCommand.py","file_ext":"py","file_size_in_byte":4866,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"69"} +{"seq_id":"19109783235","text":"from django.urls import path\nfrom .views import TaskCategoryListAPIView, TaskCategoryRetrieveAPIView, TaskCategoryCreateAPIView, TaskListAPIView, \\\n TaskRetrieveAPIView, TaskCreateAPIView, TaskCategoryUpdateAPIView, TaskUpdateAPIView, TaskDeleteAPIView, \\\n TaskCategoryDeleteAPIView\n\nurlpatterns = [\n path('task/category/list/all/', TaskCategoryListAPIView.as_view(), name='task_category_list'),\n path('task/category/detail//', TaskCategoryRetrieveAPIView.as_view(), name='task_category_detail'),\n path('task/category/create/', TaskCategoryCreateAPIView.as_view(), name='task_category_create'),\n path('task/category/update//', TaskCategoryUpdateAPIView.as_view(), name='task_category_update'),\n path('task/category/delete//', TaskCategoryDeleteAPIView.as_view(), name='task_category_delete'),\n\n path('task/list/all/', TaskListAPIView.as_view(), name='task_list'),\n path('task/detail//', TaskRetrieveAPIView.as_view(), name='task_detail'),\n path('task/create/', TaskCreateAPIView.as_view(), name='task_create'),\n path('task/update//', TaskUpdateAPIView.as_view(), name='task_update'),\n path('task/delete//', TaskDeleteAPIView.as_view(), name='task_delete'),\n\n]","repo_name":"NazmulMilon/oms","sub_path":"online_medicine_shop/tasks/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1243,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"24078336975","text":"import requests\nfrom datetime import date\n\nAPI_URL_POLAND_DATA = ' https://coronavirus-19-api.herokuapp.com/countries/Poland'\nAPI_URL_WORLD_DATA = 'https://coronavirus-19-api.herokuapp.com/countries'\n\nresponse = requests.get(API_URL_POLAND_DATA)\nresponse_world = requests.get(API_URL_WORLD_DATA)\n\ndef get_daily_stats(response):\n try:\n all_data_list = response.json()\n formatted_data = f'''\n COVID Poland DAILY STATS:\n AS of {date.today()}\n ******************************\n Total Confirmeed Cases : {all_data_list['cases']}\n Total Recovered Cases : {all_data_list['recovered']}\n Total Deaths Reported: {all_data_list['deaths']}\n Confirmed Cases Yesterday: {all_data_list['todayCases']}\n Deaths Reported Yesterday: {all_data_list['todayDeaths']}\n ******************************\n ''' \n print(formatted_data)\n except:\n print('An error occurred while processing data')\n\ndef get_top5_countries_with_active_cases(response):\n try:\n all_countries_data = response.json()\n for country in all_countries_data:\n if country['active'] is None:\n country['active'] = 0\n all_countries_data.sort(key=lambda x: x['todayCases'], reverse=True)\n top5_countries = all_countries_data[1:6]\n print('Top 5 countries with most cases today in the world:')\n for index, state in enumerate(top5_countries):\n formatted_data = f'''\n ********{index + 1}*************\n State: {state['country']}\n Today: {state['todayCases']}\n Active: {state['active']}\n Total Confirmed : {state['cases']} \n ***************************\n '''\n print(formatted_data)\n except Exception as error:\n print(f'An error occured while processing data, {error}')\n\nif __name__ == \"__main__\":\n #get_daily_stats(response)\n get_top5_countries_with_active_cases(response_world)","repo_name":"zixk/pyBasics","sub_path":"modules/requests/covid_tracker_poland.py","file_name":"covid_tracker_poland.py","file_ext":"py","file_size_in_byte":2032,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"43646446144","text":"from flask import Flask, render_template, request, flash, redirect, url_for\nfrom io import BytesIO\n\nimport logging\nimport os\nimport pandas as pd\nimport math\nimport matplotlib.pyplot as plt\nimport base64\n\n\napp = Flask(__name__)\napp.secret_key = os.environ.get(\"APP_SECRET_KEY\")\n\n# Configure logger\nlogging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s: %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p', level=logging.DEBUG)\nlogger = logging.getLogger(\"Benford's Law Flask App\")\n\n\n@app.route(\"/\")\ndef home():\n \"\"\"Render the home page.\"\"\"\n logger.info(\"Home page requested\")\n return render_template(\"index.html\")\n\n\n# Allowed file extensions\nALLOWED_EXTENSIONS = {\"csv\"}\n\n\ndef allowed_file(filename):\n \"\"\"Check if the file extension is allowed.\"\"\"\n\n return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS\n\n\n@app.route(\"/upload\", methods=[\"GET\", \"POST\"])\ndef upload():\n \"\"\"Upload the CSV file and return the list of columns rendered in the column.html template.\"\"\"\n logger.info(\"File upload requested\")\n\n file = request.files[\"csvfile\"]\n selected_delimiter = request.form.get('delimiter')\n\n # Check if the file is selected\n if file.filename == \"\":\n flash(\"Please select a file.\", \"error\")\n logger.error(\"No file selected\")\n return redirect(url_for(\"home\"))\n\n # Check if the delimiter is selected\n if selected_delimiter == \"Select...\":\n flash(\"Please select a delimiter.\", \"error\")\n logger.error(\"No delimiter selected\")\n return redirect(url_for(\"home\"))\n\n # Check if the file is allowed\n if file and allowed_file(file.filename):\n file.save(\"uploaded.csv\")\n logger.info(\"File uploaded successfully\")\n\n df = pd.read_csv(\"uploaded.csv\", delimiter=selected_delimiter, engine=\"python\")\n app.config[\"dataframe\"] = df # Store the dataframe in the app config\n columns = df.columns\n logger.info(\"Columns: %s\", columns)\n\n logger.info(\"Column page requested\")\n return render_template(\"column.html\", columns=columns)\n\n flash(\"Invalid file format. Please select a CSV file.\", \"error\")\n logger.error(\"Invalid file format\")\n return redirect(url_for(\"home\"))\n\n\ndef get_first_digit(number):\n \"\"\"Return the first digit of the number.\"\"\"\n return int(str(number)[0])\n\n\n@app.route(\"/analyze\", methods=[\"POST\"])\ndef analyze():\n \"\"\"Analyze the selected column and return the result rendered in the result.html template.\"\"\"\n logger.info(\"Analysis requested\")\n\n selected_column = request.form.getlist(\"selected_column\")[0]\n df = app.config.get(\"dataframe\")[selected_column]\n\n counts = [0] * 9 # List to store the count of each first digit (from 1 to 9)\n total = 0 # Total count of numbers\n\n for num in df:\n first_digit = get_first_digit(num) # TODO: Handle errors here\n counts[first_digit - 1] += 1\n total += 1\n\n # List of observed frequencies\n observed_frequencies = [count / total for count in counts]\n\n # Expected frequencies based on Benford's Law\n expected_frequencies = [0.301, 0.176, 0.125, 0.097, 0.079, 0.067, 0.058, 0.051, 0.046]\n\n # Calculate the d-statistic\n d_statistic = math.sqrt(\n sum((observed - expected) ** 2 for observed, expected in zip(observed_frequencies, expected_frequencies)) / sum(\n expected_frequencies) ** 2)\n\n # Interpretation thresholds\n small_threshold = 0.01\n moderate_threshold = 0.2\n\n # Logging analysis details\n logger.info(\"Selected column: %s\", selected_column)\n logger.info(\"Total numbers: %d\", total)\n logger.info(\"Observed frequencies: %s\", observed_frequencies)\n logger.info(\"D-statistic: %f\", d_statistic)\n\n # Determine the interpretation\n if d_statistic < small_threshold:\n interpretation = \"The observed frequencies closely match the expected frequencies based on Benford's Law. \" \\\n \"This indicates a high level of conformity to Benford's Law.\"\n elif d_statistic < moderate_threshold:\n interpretation = \"The observed frequencies deviate slightly from Benford's Law but still within an \" \\\n \"acceptable range. It suggests a reasonable level of conformity to Benford's Law \" \\\n \"with minor discrepancies.\"\n else:\n interpretation = \"The observed frequencies deviate significantly from Benford's Law. It indicates a \" \\\n \"notable deviation and divergence from the expected frequencies.\"\n\n # Calculate the range based on the d-statistic threshold\n range_low = [expected * (1 - moderate_threshold) for expected in expected_frequencies]\n range_high = [expected * (1 + moderate_threshold) for expected in expected_frequencies]\n\n # Plot the observed and expected frequencies\n digits = list(range(1, 10))\n plt.figure()\n plt.bar(digits, observed_frequencies, color=\"skyblue\", label='Observed')\n plt.plot(digits, expected_frequencies, 'bo-', label='Benford')\n plt.fill_between(digits, range_low, range_high, color='lightcoral', alpha=0.3, label='Acceptable Range')\n plt.xlabel(\"First Digit\")\n plt.ylabel(\"Frequency (%)\")\n plt.title(\"Expected vs Observed Frequencies\")\n plt.legend()\n plt.xlim(0.5, 9.5) # Remove the 0 value and adjust x-axis limits\n plt.margins(0.05) # Adjust plot margins\n\n # Logging interpretation and plot generation\n logger.info(\"Interpretation: %s\", interpretation)\n logger.info(\"Plot generated.\")\n\n # Convert the plot to bytes\n buf = BytesIO()\n plt.savefig(buf, format=\"png\")\n buf.seek(0)\n plot = base64.b64encode(buf.getvalue()).decode(\"utf-8\")\n\n logger.info(\"Result page requested\")\n return render_template(\"result.html\", plot=plot, d_statistic=d_statistic, interpretation=interpretation)\n\n\n@app.route(\"/restart\", methods=[\"GET\"])\ndef restart():\n \"\"\"Restart the application by redirecting to the home page.\"\"\"\n logger.info(\"Restarting the application.\")\n return redirect(url_for(\"home\"))\n\n\nif __name__ == \"__main__\":\n port = int(os.environ.get(\"PORT\", 5000))\n app.run(debug=False, host=\"0.0.0.0\", port=port)\n","repo_name":"DevGlitch/benfords-law-flask-app","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6160,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"} +{"seq_id":"30584954782","text":"# -*- coding: utf-8 -*-\n\nfrom pygame.locals import K_c, K_x, K_z, K_SPACE, K_LEFT, K_RIGHT, K_UP, K_DOWN, K_LSHIFT, K_RSHIFT, K_LCTRL, K_RCTRL, K_ESCAPE, K_RETURN, K_F1, K_F2\nfrom math import ceil\n\nfrom src.constants.activities import LOCK_DELAY, FIXED_GOAL, LEVEL_CAP, COUNTDOWN_FRAMES\nfrom src.constants.gui import REFRESH_RATE\nfrom src.activities.activity import Activity\n\nfrom src.grid import Grid\nfrom src.random_bag import RandomBag\nfrom src.tetriminos.playable_tetrimino import PlayableTetrimino\nfrom src.score import Score\n\n\nclass Game(Activity):\n \"\"\"Class MenuControls. Implements screen update, reaction to key presses\n and releases and game engine when the program is showing the game.\"\"\"\n \n def __init__(self, window, sound):\n \"\"\"Constructor for the class Game.\"\"\"\n Activity.__init__(self, window, sound)\n\n self._window.init_game()\n self._sound.use_game_music()\n \n self._grid = Grid()\n self._random = RandomBag()\n self._score_keeper = Score()\n \n self._state = \"countdown\"\n self._countdown_timer = COUNTDOWN_FRAMES\n self._swap_allowed = True\n self._show_fps_counter = False\n self._keys_down = {}\n \n self._held_tetrimino = None\n self._next_queue = None\n self._current_tetrimino = None\n self._level = 1\n self._goal = FIXED_GOAL\n self._lines_cleared = 0\n self._spawn_tetrimino() \n \n \n \n def __del__(self):\n \"\"\"Destructor for the class Game.\"\"\"\n self._sound.use_menu_music()\n self._window.end_game()\n \n \n def _spawn_tetrimino(self):\n \"\"\"Gets the next tetrimino to be spawned from the random generator, and\n attempts spawning it. If collisions do not allow it to spawn there (block out), \n changes the game state to gameover and plays a sound effect.\"\"\"\n current_piece, self._next_queue = self._random.next_pieces()\n self._current_tetrimino = PlayableTetrimino(current_piece, self._grid) \n if self._current_tetrimino.blocked_out:\n self._state = \"gameover\"\n self._sound.play_sound_effect('game_gameover')\n\n\n def _fall_tetrimino(self):\n \"\"\"Method called once per screen update when the game state is running.\n This method makes the current tetromino fall by the right amount if possible\n (accounting for level, soft drops, hard drops, ...), and calls method to \n lock it into place if it stayed on the ground long enough. Also plays sound \n effects on hard drops, soft drops and landing.\"\"\"\n if self._keys_down.get(K_SPACE, False):\n n_lines_dropped, landed = self._current_tetrimino.move_down(self._grid, self._level, \"hard\")\n self._score_keeper.add_to_score(\"hard_drop\", {\"n_lines\": n_lines_dropped})\n \n if n_lines_dropped:\n self._sound.play_sound_effect('game_hard_drop')\n \n # Remove the landing aound effect as the hard drop one covers it.\n landed = False\n \n elif self._keys_down.get(K_DOWN, False):\n n_lines_dropped, landed = self._current_tetrimino.move_down(self._grid, self._level, \"soft\")\n self._score_keeper.add_to_score(\"soft_drop\", {\"n_lines\": n_lines_dropped})\n \n if n_lines_dropped:\n self._sound.play_sound_effect('game_soft_drop')\n \n else:\n _, landed = self._current_tetrimino.move_down(self._grid, self._level, \"normal\") \n \n if landed:\n self._sound.play_sound_effect('game_landing')\n \n if self._current_tetrimino.lock_counter >= LOCK_DELAY:\n self._lock_down()\n\n\n def _lock_down(self):\n \"\"\"Method called when the current tetromino stayed on the ground longer than\n its lock delay. Locks it into the current grid, and updates score, goal, level\n and lines cleared. Also spawns a new tetrimino and plays a sound effect. If\n lock out occurred, updates game state to gameover and plays corresponding sound\n effect.\"\"\"\n lines_cleared, lock_out, all_empty, reward_tspin = self._grid.lock_down(self._current_tetrimino)\n \n sound_effect = 'game_lock'\n \n if lines_cleared:\n score_lines_cleared_actions = {(1, False): \"single\", (2, False): \"double\", \n (3, False): \"triple\", (4, False): \"tetris\",\n (1, True): \"tspin_single\", (2, True): \"tspin_double\", \n (3, True): \"tspin_triple\", (4, True): \"tspin_tetris\"}\n \n action = score_lines_cleared_actions[lines_cleared, reward_tspin] \n self._score_keeper.add_to_score(action, {\"level\": self._level})\n \n sound_effects_cleared_actions = {1: \"game_single\", 2: \"game_double\", 3: \"game_triple\", 4: \"game_tetris\"}\n sound_effect = sound_effects_cleared_actions[lines_cleared]\n \n if all_empty:\n self._score_keeper.add_perfect_bonus_to_score(lines_cleared, self._level)\n sound_effect = 'game_perfect'\n \n elif reward_tspin:\n self._score_keeper.add_to_score(\"tspin_no_lines\", {\"level\": self._level})\n \n \n self._lines_cleared += lines_cleared\n self._goal -= lines_cleared\n if self._goal <= 0:\n if self._level < LEVEL_CAP:\n self._level += 1\n self._goal = FIXED_GOAL\n else:\n self._goal = 0\n \n if lock_out:\n self._state = \"gameover\"\n sound_effect = 'game_gameover'\n else:\n self._spawn_tetrimino()\n self._swap_allowed = True\n \n self._sound.play_sound_effect(sound_effect)\n\n\n def _tick_countdown(self):\n \"\"\"Method called once per frame when counting down from paused state to\n running state. Decreases the remaining countdown timer, and if the \n timer elapsed, it changes the game state to running.\"\"\"\n self._countdown_timer -= 1\n \n if self._countdown_timer <= 0:\n self._state = \"running\"\n\n\n def _swap_held_tetrimino(self):\n \"\"\"Performs the swap beween the current tetrimino and the held tetrimino,\n if it is currently allowed.\"\"\" \n if self._swap_allowed:\n old_held = self._held_tetrimino\n self._held_tetrimino = self._current_tetrimino.letter\n\n if old_held == None:\n self._spawn_tetrimino()\n else:\n self._current_tetrimino = PlayableTetrimino(old_held, self._grid)\n \n self._swap_allowed = False\n \n self._sound.play_sound_effect('game_hold')\n \n \n def event_update_screen(self, fps):\n \"\"\"Override of method from Activity class, drawing the controls menu\n on the screen. This method updates the game state, and then the graphics\n on the screen.\"\"\"\n game_state_text = \"\"\n \n if self._state == \"running\":\n self._fall_tetrimino()\n elif self._state == \"countdown\":\n game_state_text = \"Resuming in {}\".format(ceil(self._countdown_timer / REFRESH_RATE))\n self._tick_countdown()\n elif self._state == \"paused\":\n game_state_text = \"Paused\"\n \n self._window.update_game(current_grid = self._grid,\n current_tetrimino = self._current_tetrimino,\n queue = self._next_queue, held = self._held_tetrimino,\n score = self._score_keeper.score, level = self._level,\n goal = self._goal, lines = self._lines_cleared,\n fps = fps, show_fps = self._show_fps_counter,\n game_state_text = game_state_text)\n \n \n def event_key_pressed(self, key):\n \"\"\"Override of method from Activity class, reacting to key being pressed.\n Keeps track of which keys are being held (as key down events are repeated\n with a set rate when in game mode) and ignores key down events of keys that\n are being held except for right and left arrows. All other keys will only\n trigger one change in the game until they are released.\"\"\"\n key_held = self._keys_down.get(key, False)\n self._keys_down[key] = True\n \n if self._state == \"running\":\n rotation_success = False\n move_success = False\n hit_wall = False\n \n if key == K_LEFT:\n move_success, hit_wall = self._current_tetrimino.move_sideways(\"left\", self._grid)\n elif key == K_RIGHT:\n move_success, hit_wall = self._current_tetrimino.move_sideways(\"right\", self._grid)\n elif not key_held:\n if key in (K_c, K_LSHIFT, K_RSHIFT):\n self._swap_held_tetrimino()\n elif key in (K_x, K_UP):\n rotation_success = self._current_tetrimino.rotate(\"clockwise\", self._grid)\n elif key in (K_z, K_LCTRL, K_RCTRL):\n rotation_success = self._current_tetrimino.rotate(\"anticlockwise\", self._grid)\n \n if rotation_success:\n self._sound.play_sound_effect('game_rotate')\n \n if move_success:\n self._sound.play_sound_effect('game_move')\n \n if hit_wall:\n self._sound.play_sound_effect('game_alert')\n \n \n if key == K_ESCAPE or key == K_F1: \n # If game is running or counting down to resume, we allow pausing it.\n # If the game is paused, we start counting down to resume it.\n # If game is finished, we don't change the state.\n if self._state == \"running\":\n self._state = \"paused\"\n self._sound.play_sound_effect('game_pause')\n elif self._state == \"paused\":\n self._state = \"countdown\"\n self._sound.play_sound_effect('game_pause')\n self._countdown_timer = COUNTDOWN_FRAMES\n \n elif key == K_F2:\n self._show_fps_counter = not self._show_fps_counter\n \n change_activity = self._state == \"gameover\" and key in [K_RETURN, K_ESCAPE]\n \n if change_activity:\n self._sound.play_sound_effect('menu_back')\n \n return change_activity\n \n \n def event_key_released(self, key):\n \"\"\"Override of method from Activity class, reacting to key release.\"\"\"\n self._keys_down[key] = False\n\n\n","repo_name":"Andrea-Oliveri/Tetris","sub_path":"src/activities/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":10980,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"17720086264","text":"import os\nimport pygame\nfrom pygame.locals import *\nimport Widget\nfrom Image import ScaledImage\nclass RadioButtonGroup():\n def __init__(self, **kwargs):\n self.selected = None\n self.callback = kwargs.get(\"callback\", None)\n def select(self, button):\n if self.selected:\n self.selected.selected = False\n self.selected = button\n self.selected.selected = True\n if self.callback:\n self.callback(self.selected)\nclass RadioButton(Widget.Widget):\n def __init__(self, parent, **kwargs):\n self.parent = parent\n self.screen = self.parent.screen\n\n #self.label = kwargs.get(\"text\", None)\n #self.font = pygame.font.Font(os.path.join(\"..\", \"..\", \"font\", \"Lumidify_Casual.ttf\"), 20)\n self.normal_image = ScaledImage(self.screen, os.path.join(\"..\", \"..\", \"images\", \"radiobutton_normal.png\"))\n self.selected_image = ScaledImage(self.screen, os.path.join(\"..\", \"..\", \"images\", \"radiobutton_selected.png\"))\n self.boundwidth = kwargs.get(\"width\", self.normal_image.width)\n self.boundheight = kwargs.get(\"height\", self.normal_image.height)\n self.rect = Rect(0, 0, self.boundwidth, self.boundheight)\n self.bounding_rect = Rect(0, 0, self.boundwidth, self.boundheight)\n self.pressed = False\n self.selected = kwargs.get(\"selected\", False)\n self.group = kwargs.get(\"group\", None)\n if self.selected and self.group:\n self.group.select(self)\n self.resize_images()\n def update_screen(self, screen):\n self.screen = screen\n for image in [self.normal_image, self.selected_image]:\n image.screen = screen\n def resize_images(self):\n for image in [self.normal_image, self.selected_image]:\n image.resize(width=self.rect.width, height=self.rect.height)\n def resize(self, **kwargs):\n temp_width, temp_height = self.rect.width, self.rect.height\n super().resize(**kwargs)\n if temp_width != self.rect.width or temp_height != self.rect.height:\n self.resize_images()\n def calculate_pos(self):\n super().calculate_pos()\n for image in [self.normal_image, self.selected_image]:\n image.x = self.rect.x\n image.y = self.rect.y\n def update(self, event):\n mouse_pos = pygame.mouse.get_pos()\n collide_button = self.rect.collidepoint(mouse_pos)\n if event.type == MOUSEBUTTONDOWN:\n if collide_button:\n self.pressed = True\n else:\n self.pressed = False\n elif event.type == MOUSEBUTTONUP:\n if self.pressed:\n if collide_button:\n self.selected = True\n if self.group:\n self.group.select(self)\n self.pressed = False\n def draw(self):\n if self.selected:\n image = self.selected_image\n else:\n image = self.normal_image\n image.draw()\n\n","repo_name":"lumidify/BobGUI","sub_path":"RadioButton.py","file_name":"RadioButton.py","file_ext":"py","file_size_in_byte":3004,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"74779209818","text":"from __future__ import annotations\nfrom mercury.logger import logger as logging, logging_overhead\nfrom mercury.msg.network import ChannelShare\nfrom mercury.msg.packet import NetworkPacket\nfrom mercury.msg.packet.app_packet.srv_packet import SrvRelatedRequest\nfrom mercury.msg.packet.app_packet.acc_packet import *\nfrom mercury.utils.amf import AccessManagementFunction\nfrom xdevs.models import Port\nfrom ..common import ExtendedAtomic\n\n\nclass AccessManager(ExtendedAtomic):\n\n CLIENT_LOGGING_OVERHEAD = ''\n GATEWAY_LOGGING_OVERHEAD = ' '\n\n def __init__(self, gateway_id: str, wired: bool, default_server: str, amf: AccessManagementFunction):\n \"\"\"\n Gateway access manager. It implements all the logic corresponding to the gateway.\n :param gateway_id: ID of the gateway.\n :param wired: if True, gateway deals with wired clients (i.e., mobility, HO and shares are not necessary).\n :param default_server: ID of the default server that will handle unknown requests.\n :param amf: Reference to the Access Management Function. It is used to locate clients in the network.\n \"\"\"\n self.gateway_id: str = gateway_id\n self.wired: bool = wired\n self.default_server: str = default_server\n self.amf: AccessManagementFunction = amf\n self.clients: list[str] = list()\n super().__init__(f'gateway_{gateway_id}_manager')\n\n self.input_app: Port[AppPacket] = Port(AppPacket, 'input_app')\n self.input_net: Port[NetworkPacket] = Port(NetworkPacket, 'input_net')\n self.output_access_acc: Port[AppPacket] = Port(AppPacket, 'output_access_acc')\n self.output_access_srv: Port[AppPacket] = Port(AppPacket, 'output_access_srv')\n self.output_access_net: Port[NetworkPacket] = Port(NetworkPacket, 'output_access_net')\n self.output_xh_app: Port[AppPacket] = Port(AppPacket, 'output_xh_app')\n self.output_xh_net: Port[NetworkPacket] = Port(NetworkPacket, 'output_xh_net')\n self.add_in_port(self.input_app)\n self.add_in_port(self.input_net)\n for port in self.output_access_acc, self.output_access_srv, \\\n self.output_access_net, self.output_xh_app, self.output_xh_net:\n self.add_out_port(port)\n if not self.wired:\n self.clients_ho: dict[str, str] = dict()\n self.input_send_pss: Port[str] = Port(str, 'input_send_pss')\n self.output_channel_share = Port(ChannelShare, 'output_channel_share')\n self.add_in_port(self.input_send_pss)\n self.add_out_port(self.output_channel_share)\n\n def deltint_extension(self):\n self.passivate()\n\n def deltext_extension(self, e):\n access_overhead: str = logging_overhead(self._clock, self.CLIENT_LOGGING_OVERHEAD)\n xh_overhead: str = logging_overhead(self._clock, self.GATEWAY_LOGGING_OVERHEAD)\n change: bool = self.process_app(access_overhead, xh_overhead)\n self.process_net(access_overhead, xh_overhead)\n if not self.wired:\n for client_id in self.input_send_pss.values:\n msg = PSSMessage(self.gateway_id, client_id, self._clock)\n msg.send(self._clock)\n self.add_msg_to_queue(self.output_access_acc, msg)\n if change:\n share = ChannelShare(self.gateway_id, [client_id for client_id in self.clients])\n self.add_msg_to_queue(self.output_channel_share, share)\n self.passivate() if self.msg_queue_empty() else self.activate()\n\n def lambdaf_extension(self):\n pass\n\n def initialize(self):\n self.passivate()\n\n def exit(self):\n pass\n\n def process_app(self, access_overhead: str, xh_overhead: str) -> bool:\n change: bool = False\n for msg in self.input_app.values:\n if isinstance(msg, AccessPacket):\n change |= self.process_app_access(msg, access_overhead, xh_overhead)\n elif isinstance(msg, SrvRelatedRequest):\n self.process_app_srv(msg, access_overhead)\n return change\n\n def process_net(self, access_overhead: str, xh_overhead: str):\n for msg in self.input_net.values:\n if msg.node_from in self.clients:\n logging.debug(f'{access_overhead}{msg.node_from}--->{self.gateway_id}: network message')\n self.add_msg_to_queue(self.output_xh_net, msg)\n elif msg.node_to in self.clients:\n logging.debug(f'{xh_overhead}{self.gateway_id}<---{msg.node_from}: network message')\n self.add_msg_to_queue(self.output_access_net, msg)\n else:\n logging.warning(f'{xh_overhead}{self.gateway_id}: network message from/to unknown node')\n\n def process_app_access(self, msg: AccessPacket, access_overhead: str, xh_overhead: str) -> bool:\n change: bool = False\n if isinstance(msg, ConnectRequest):\n change |= self.connect_client(msg, access_overhead)\n elif isinstance(msg, DisconnectRequest):\n change |= self.disconnect_client(msg, access_overhead)\n elif not self.wired:\n if isinstance(msg, RRCMessage) and msg.client_id in self.clients:\n self.client_rrc(msg, access_overhead)\n elif isinstance(msg, HandOverRequest):\n change |= self.start_client_ho(msg, access_overhead)\n elif isinstance(msg, HandOverFinished) and msg.client_id in self.clients_ho:\n change |= self.finish_client_ho(msg, access_overhead)\n return change\n\n def process_app_srv(self, msg: SrvRelatedRequest, access_overhead: str):\n log_msg = f'{access_overhead}{msg.node_from}--->{self.gateway_id}: service-related message'\n if msg.node_from not in self.clients:\n logging.warning(f'{log_msg}, but client is not connected to gateway. Dropping message')\n elif msg.server_id is not None:\n logging.warning(f'{log_msg}, but message was redirected to {msg.server_id}. Ignoring message')\n else:\n logging.info(f'{log_msg}. Redirecting to default server {self.default_server}')\n msg.set_node_to(self.default_server)\n self.add_msg_to_queue(self.output_xh_app, msg)\n\n def connect_client(self, req: ConnectRequest, access_overhead: str) -> bool:\n change: bool = False\n response = req.client_id in self.clients\n log_function = logging.info\n log_msg = f'{access_overhead}{req.client_id}--->{self.gateway_id}: connect request'\n if response:\n log_function = logging.warning\n log_msg = f'{log_msg} (already connected)'\n else:\n response = self.amf.connect_client(req.client_id, self.gateway_id)\n if response:\n change = True\n self.clients.append(req.client_id)\n else:\n log_function = logging.warning\n log_msg = f'{log_msg} (request failed)'\n log_function(log_msg)\n response = ConnectResponse(req, response, self._clock)\n response.send(self._clock)\n self.add_msg_to_queue(self.output_access_acc, response)\n return change\n\n def disconnect_client(self, req: DisconnectRequest, access_overhead: str) -> bool:\n change: bool = False\n response = req.client_id not in self.clients\n log_function = logging.info\n log_msg = f'{access_overhead}{req.client_id}--->{self.gateway_id}: disconnect request'\n if response:\n log_function = logging.warning\n log_msg = f'{log_msg} (already disconnected)'\n else:\n response = self.amf.disconnect_client(req.client_id, self.gateway_id)\n if response:\n change = True\n self.clients.remove(req.client_id)\n else:\n log_function = logging.warning\n log_msg = f'{log_msg} (request failed)'\n log_function(log_msg)\n response = DisconnectResponse(req, response, self._clock)\n response.send(self._clock)\n self.add_msg_to_queue(self.output_access_acc, response)\n return change\n\n def start_client_ho(self, req: HandOverRequest, access_overhead: str) -> bool:\n log_msg = f'{access_overhead}{req.client_id}--->{self.gateway_id}: HO from {req.gateway_from} request'\n change = self.amf.handover_client(req.client_id, req.gateway_from, self.gateway_id)\n if change:\n logging.info(log_msg)\n self.clients.append(req.client_id)\n else:\n logging.warning(f'{log_msg} (HO failed)')\n response = HandOverResponse(req, change, self._clock)\n response.send(self._clock)\n self.add_msg_to_queue(self.output_access_acc, response)\n return change\n\n def finish_client_ho(self, req: HandOverFinished, access_overhead: str) -> bool:\n logging.info(f'{access_overhead}{req.client_id}--->{self.gateway_id}: HO finished. Result: {req.response}')\n self.clients_ho.pop(req.client_id)\n if req.response:\n self.clients.remove(req.client_id)\n return req.response\n\n def client_rrc(self, rrc: RRCMessage, access_overhead: str):\n log_msg = f'{access_overhead}{rrc.client_id}--->{self.gateway_id}: RRC message'\n best_gateway = max(rrc.perceived_snr, key=rrc.perceived_snr.get)\n if best_gateway != self.gateway_id:\n log_msg = f'{log_msg} (new best gateway {best_gateway}). Starting HO process'\n self.clients_ho[rrc.client_id] = best_gateway\n ho_data = HandOverData(rrc.client_id, self.gateway_id, best_gateway)\n start_ho = StartHandOver(ho_data, self._clock)\n start_ho.send(self._clock)\n self.add_msg_to_queue(self.output_access_acc, start_ho)\n logging.info(log_msg)\n","repo_name":"greenlsi/mercury_mso_framework","sub_path":"mercury/model/gateways/acc_manager.py","file_name":"acc_manager.py","file_ext":"py","file_size_in_byte":9821,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"69"} +{"seq_id":"27171549153","text":"from django.contrib.auth import get_user_model\nfrom django.test import TestCase\n\nfrom jobs_portal.jobs.models import JobModel\n\nUserModel = get_user_model()\n\n\nclass JobModelTest(TestCase):\n def setUp(self) -> None:\n self.user = UserModel.objects.create_user(email='test@abv.bg', password='123456')\n self.job_offer = JobModel.objects.create(\n title='test',\n description='Test description',\n salary=50,\n image='path/image/test.jpg',\n user_id=self.user.id,\n )\n\n def test_initialJobSettings(self):\n self.assertEqual('test', self.job_offer.title)\n self.assertEqual('Test description', self.job_offer.description)\n self.assertEqual(50, self.job_offer.salary)\n self.assertEqual('path/image/test.jpg', self.job_offer.image)\n self.assertEqual('София', self.job_offer.city)\n self.assertEqual('на час', self.job_offer.salary_type)\n self.assertEqual('IT', self.job_offer.work_category)\n self.assertEqual(0, self.job_offer.total_likes())\n","repo_name":"borisgarkov/SoftUni","sub_path":"Python Web/jobs_portal/tests/jobs/models/test_job_model.py","file_name":"test_job_model.py","file_ext":"py","file_size_in_byte":1072,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"16290776215","text":"from collections import deque\r\nimport sys\r\n\r\nn = int(sys.stdin.readline())\r\nq = deque()\r\nfor _ in range(n):\r\n word = sys.stdin.readline().split()\r\n a = word[0]\r\n if a == 'push_back':\r\n q.append(word[1])\r\n elif a == 'push_front':\r\n q.appendleft(word[1])\r\n elif a == 'pop_front':\r\n if len(q) == 0:\r\n print(-1)\r\n else:\r\n print(q.popleft())\r\n elif a == 'pop_back':\r\n if len(q) == 0:\r\n print(-1)\r\n else:\r\n print(q.pop())\r\n elif a == 'size':\r\n print(len(q))\r\n elif a == 'empty':\r\n if len(q) == 0:\r\n print(1)\r\n else:\r\n print(0)\r\n elif a == 'front':\r\n if len(q) == 0:\r\n print(-1)\r\n else:\r\n print(q[0])\r\n elif a == 'back':\r\n if len(q) == 0:\r\n print(-1)\r\n else:\r\n print(q[len(q) - 1])\r\n","repo_name":"jungeun97/Algorithm","sub_path":"백준/Silver/10866. 덱/덱.py","file_name":"덱.py","file_ext":"py","file_size_in_byte":907,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"12275276102","text":"import sys\n\ndef sol(ali: list, a : int):\n global ans\n if len(ali) == w-1:\n ali.append(li[a][0])\n ans = min(ans, sum(ali))\n ali.pop()\n return\n\n for x in range(a, w):\n for y in range(1, w):\n if li[x][y] == 0:\n continue\n if not visited[y]: \n visited[y] = True\n ali.append(li[x][y])\n sol(ali, y)\n visited[y] = False\n ali.pop()\n\nw = int(sys.stdin.readline())\nli = [list(map(int, sys.stdin.readline().split())) for _ in range(w)]\nvisited = [False]*w\nans = 99999999999999\n\nsol([], 0)\nprint(ans)\n\n#------------------------------------------------------------------------------------------------------------\n\nn = int(input())\nl = [list(map(int, input().split())) for _ in range(n)]\nvisit = [0] * n\nm = 1e9\n\ndef dfs(depth, start, cost):\n global m\n if depth == n-1 and l[start][0] != 0:\n m = min(m, cost+l[start][0])\n return\n for i in range(n):\n if not visit[i] and l[start][i] != 0:\n visit[i] = 1\n dfs(depth+1, i, cost+l[start][i])\n visit[i] = 0\nvisit[0] = 1\ndfs(0, 0, 0)\nprint(m)\n","repo_name":"choihs0457/Krafton_Jungle","sub_path":"Week1/외판원 순회 2.py","file_name":"외판원 순회 2.py","file_ext":"py","file_size_in_byte":1191,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"27299488241","text":"import os\nimport requests\nfrom datetime import datetime\n\n#ID_number must be an int which is the sat id number.\n#25544 for ISS, for example.\n#acceptable_age tells how old a tle can be before we choose to re-fetch\ndef get_tle(ID_number, acceptable_age = 3):\n ID_number = str(ID_number)\n if not os.path.exists(ID_number + \".tle\"):\n return web_retrieve_tle(ID_number)\n else:\n with open(ID_number + \".tle\") as f:\n loaded_tle = f.readlines()\n loaded_tle = [line[:-1] for line in loaded_tle]\n current_year = datetime.utcnow().timetuple().tm_year\n current_day_of_year = datetime.utcnow().timetuple().tm_yday\n current_epoch_day = str(current_year) + str(current_day_of_year)\n loaded_tle_epoch = \"20\" + loaded_tle[0][18:23]\n print(loaded_tle)\n tle_age = float(current_epoch_day) - float(loaded_tle_epoch)\n if tle_age > acceptable_age:\n print(\"Re-fetching old TLE\")\n return web_retrieve_tle(ID_number)\n return loaded_tle\ndef web_retrieve_tle(ID_number):\n ID_number = str(ID_number)\n session = requests.session()\n url = \"https://www.celestrak.com/NORAD/elements/gp.php?CATNR=\" + ID_number\n page = session.get(url)\n sat_tle = page.text[:-2].split(\"\\r\\n\")[1:]\n with open(ID_number + \".tle\",\"w\") as f:\n f.write(\"\\n\".join(sat_tle))\n return sat_tle\n\nif __name__ == \"__main__\":\n print(get_tle(25544))","repo_name":"bismurphy/moon_transit_finder","sub_path":"load_tle.py","file_name":"load_tle.py","file_ext":"py","file_size_in_byte":1436,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"} +{"seq_id":"22681909624","text":"import torch\nfrom torch import nn\nimport torch.nn.functional as F\n\nfrom . import utils as zutils\nfrom ..params import GameParams, ModelParams\nfrom .. import utils\n\n\n@zutils.register_model\nclass GenericModel(torch.jit.ScriptModule):\n __constants__ = [\n \"c_prime\",\n \"h_prime\",\n \"w_prime\",\n \"net1\",\n \"net2\",\n \"net3\",\n \"net4\",\n \"v1\",\n \"v2\",\n \"pi1\",\n \"pi2\",\n ]\n\n DEFAULT_FCSIZE = 1024\n DEFAULT_NNSIZE = 2\n DEFAULT_NNKS = 3\n DEFAULT_STRIDE = 1\n DEFAULT_DILATION = 1\n DEFAULT_BN = False\n # DEFAULT_BN_AFFINE = False\n\n default_game_name = \"Connect4\"\n\n def __init__(self, game_params: GameParams, model_params: ModelParams):\n torch.jit.ScriptModule.__init__(self)\n if game_params.game_name is None:\n game_params.game_name = self.__class__.default_game_name\n self.game_name = game_params.game_name\n self.game_params = game_params\n info = zutils.get_game_info(game_params)\n c, h, w = self.c, self.h, self.w = info[\"feature_size\"][:3]\n c_prime, h_prime, w_prime = self.c_prime, self.h_prime, self.w_prime = info[\n \"action_size\"\n ][:3]\n\n # fc size\n if model_params.fcsize is None:\n model_params.fcsize = self.DEFAULT_FCSIZE\n fcsize = model_params.fcsize\n # nn size\n if model_params.nnsize is None:\n model_params.nnsize = self.DEFAULT_NNSIZE\n nnsize = model_params.nnsize\n # kernel size\n if model_params.nnks is None:\n model_params.nnks = self.DEFAULT_NNKS\n nnks = model_params.nnks\n # stride\n stride = self.DEFAULT_STRIDE\n # dilation\n dilation = self.DEFAULT_DILATION\n # padding\n padding = zutils.get_consistent_padding_from_nnks(nnks=nnks, dilation=dilation)\n # batch norm\n if model_params.bn is None:\n model_params.bn = self.DEFAULT_BN\n bn = model_params.bn\n # # batch norm affine\n # if model_params.bn_affine is None:\n # model_params.bn_affine = self.DEFAULT_BN_AFFINE\n # bn_affine = model_params.bn_affine\n bn_affine = bn\n self.model_params = model_params\n\n net1 = [\n nn.Conv2d(\n c,\n int(nnsize * c),\n nnks,\n stride=stride,\n padding=padding,\n dilation=dilation,\n bias=not bn_affine,\n )\n ]\n net2 = [\n nn.Conv2d(\n int(nnsize * c),\n int(nnsize * c),\n nnks,\n stride=stride,\n padding=padding,\n dilation=dilation,\n bias=not bn_affine,\n )\n ]\n net3 = [\n nn.Conv2d(\n int(nnsize * c),\n int(nnsize * c),\n nnks,\n stride=stride,\n padding=padding,\n dilation=dilation,\n bias=not bn_affine,\n )\n ]\n net4 = [\n nn.Conv2d(\n int(nnsize * c),\n int(nnsize * c),\n nnks,\n stride=stride,\n padding=padding,\n dilation=dilation,\n bias=not bn_affine,\n )\n ]\n v1 = [nn.Linear(int(nnsize * c) * h * w, fcsize)]\n v2 = [nn.Linear(fcsize, fcsize)]\n pi1 = [nn.Linear(int(nnsize * c) * h * w, fcsize)]\n pi2 = [nn.Linear(fcsize, fcsize)]\n if bn or bn_affine:\n net1.append(\n nn.BatchNorm2d(int(nnsize * c), track_running_stats=True, affine=bn_affine)\n )\n net2.append(\n nn.BatchNorm2d(int(nnsize * c), track_running_stats=True, affine=bn_affine)\n )\n net3.append(\n nn.BatchNorm2d(int(nnsize * c), track_running_stats=True, affine=bn_affine)\n )\n net4.append(\n nn.BatchNorm2d(int(nnsize * c), track_running_stats=True, affine=bn_affine)\n )\n v1.append(\n nn.BatchNorm1d(fcsize, track_running_stats=True, affine=bn_affine)\n )\n v2.append(\n nn.BatchNorm1d(fcsize, track_running_stats=True, affine=bn_affine)\n )\n pi1.append(\n nn.BatchNorm1d(fcsize, track_running_stats=True, affine=bn_affine)\n )\n pi2.append(\n nn.BatchNorm1d(fcsize, track_running_stats=True, affine=bn_affine)\n )\n self.net1 = nn.Sequential(*net1)\n self.net2 = nn.Sequential(*net2)\n self.net3 = nn.Sequential(*net3)\n self.net4 = nn.Sequential(*net4)\n self.v1 = nn.Sequential(*v1)\n self.v2 = nn.Sequential(*v2)\n self.pi1 = nn.Sequential(*pi1)\n self.pi2 = nn.Sequential(*pi2)\n self.v3 = nn.Linear(fcsize, 1)\n self.pi3 = nn.Linear(fcsize, c_prime * h_prime * w_prime)\n\n @torch.jit.script_method\n def _forward(self, x: torch.Tensor, return_logit: bool):\n h1 = F.relu(self.net1(x))\n h2 = F.relu(self.net2(h1)) + h1\n h3 = F.relu(self.net3(h2)) + h2\n h4 = F.relu(self.net4(h3)) + h3\n v1 = F.relu(self.v1(h4.flatten(1)))\n v2 = F.relu(self.v2(v1))\n v = torch.tanh(self.v3(v2))\n pi_logit1 = F.relu(self.pi1(h4.flatten(1)))\n pi_logit2 = F.relu(self.pi2(pi_logit1))\n pi_logit = self.pi3(pi_logit2)\n if return_logit:\n return v, pi_logit\n s = pi_logit.shape\n pi = F.softmax(pi_logit.flatten(1), 1).reshape(s)\n return v, pi\n\n @torch.jit.script_method\n def forward(self, x: torch.Tensor):\n v, pi_logit = self._forward(x, True)\n pi_logit = pi_logit.view(-1, self.c_prime, self.h_prime, self.w_prime)\n reply = {\"v\": v, \"pi_logit\": pi_logit}\n return reply\n","repo_name":"facebookarchive/Polygames","sub_path":"pypolygames/model_zoo/generic_model.py","file_name":"generic_model.py","file_ext":"py","file_size_in_byte":5962,"program_lang":"python","lang":"en","doc_type":"code","stars":262,"dataset":"github-code","pt":"69"} +{"seq_id":"11887644462","text":"import argparse\nimport base64\nimport datetime\nimport httplib\nimport json\nimport socket\nimport sys\nimport time\nimport urllib2\nimport urlparse\nimport xml.etree.ElementTree as ET\n\nclass Echo360CaptureDevice(object):\n # This class is a wrapper for the Echo360 Capture device API.\n def __init__(self, server, username, password, debuglevel=None, timeout=10):\n self.server = server\n self.username = username\n self.password = password\n self.debug = debuglevel\n self.timeout = int(timeout)\n self.utc_offset = None\n self.connection_test = self.status_system()\n if self.connection_test.success():\n self.utc_offset = self.connection_test.utc_offset\n\n def request(self, method, path, headers=None, body=None, timeout=None):\n # Perform the request and all exception handling.\n # Returns:\n # status - HTTP status or an exception code\n # reason - A human readable error message, HTTP reason or exception related message\n # headers - A dict that may contain HTTP response headers\n # data - None or response data.\n # allow override in a subclass to support other http libraries (such as Diesel.io)\n url = urlparse.urlparse(urlparse.urljoin(self.server, path))\n if len(url.netloc) == 0:\n return('Invalid URL', 'Missing IP address or domain name.', {}, None)\n try:\n if url.scheme == 'https':\n conn = httplib.HTTPSConnection(url.hostname, url.port, timeout=self.timeout)\n elif url.scheme == 'http':\n conn = httplib.HTTPConnection(url.hostname, url.port, timeout=self.timeout)\n else:\n return('Invalid URL', 'The URL scheme must be http or https.', {}, None)\n except Exception as e:\n return('unknown', 'Unknown error: {0}'.format(repr(e)), {}, None)\n if self.debug is not None:\n conn.set_debuglevel(self.debug)\n try:\n conn.request(method, url.path, body, headers)\n resp = conn.getresponse()\n return (resp.status, resp.reason, dict(resp.getheaders()), resp.read())\n except socket.timeout as e:\n # This exception is raised when a timeout occurs on a socket which has had\n # timeouts enabled via a prior call to settimeout().\n if timeout is None:\n return('timeout', 'Network connection timed out.', {}, None)\n else:\n return('timeout', 'Network connection timed out (after {0} seconds).'.format(timeout), {}, None)\n except socket.error as e:\n # This exception is raised for socket-related errors.\n if e.errno == 8:\n # socket.gaierror: [Errno 8] nodename nor servname provided, or not known\n return('socket-8', 'Unknown host: {0}'.format(args.server), {}, None)\n elif e.errno == 61:\n # socket.error: [Errno 61] Connection refused\n return('socket-61', 'Server connection refused: {0}'.format(args.server), {}, None)\n elif e.errno is not None:\n return('socket', 'Network error ({0}): {1}'.format(e.errno, e.strerror), {}, None)\n else:\n return('unknown', 'Network error: {0}'.format(repr(e)), {}, None)\n\n def call_api(self, command, method=None, post_data=None, title=None, dump_xml=None):\n if method is None:\n if post_data is None:\n method = 'GET'\n else:\n method = 'POST'\n if self.username is not None and self.password is not None: \n req_headers = { 'Authorization' : 'Basic ' + base64.b64encode(self.username + ':' + self.password) }\n else:\n req_headers = {}\n (status, reason, headers, data) = self.request(method, command, req_headers, post_data, self.timeout)\n if 'Content-Type' in headers and headers['Content-Type'] == 'text/xml':\n xml_data = ET.fromstring(data)\n # some libraries convert to lower-case\n elif 'content-type' in headers and headers['content-type'] == 'text/xml':\n xml_data = ET.fromstring(data)\n else:\n xml_data = None\n if status == 200:\n return Echo360CaptureDeviceResponse(command, 'success', 'Ok', data=data, xml_data=xml_data, \n device=self, utc_offset=self.utc_offset, title=title, dump_xml=dump_xml)\n else:\n # 409 Conflict is used as an error response for capture/stop, capture/confidence_monitor and possibly others\n # 501 Is used as an error for capture/new-capture\n if self.debug > 0:\n print('Debug command {0}\\nStatus: {1} reason:{2}'.format(command, status, reason))\n if self.debug > 4:\n print('Response data:\\n{0}'.format(data))\n return Echo360CaptureDeviceResponse(command, status, reason, data=data, xml_data=xml_data,\n device=self, utc_offset=self.utc_offset, title=title, dump_xml=dump_xml)\n\n def fetch_file(self, command):\n pass\n\n def capture_status_str(self, sleep=None):\n # Fetch the capture status\n if sleep is not None:\n time.sleep(sleep)\n response = self.status_monitoring()\n if response.success():\n text = 'State={0}'.format(response.state)\n if response.check_attribute('duration'):\n text += '; duration={0}'.format(response.duration)\n if response.check_attribute('start_time_local'):\n text += '; start time (local)={0}'.format(response.start_time_local)\n if response.check_attribute('confidence_monitoring'):\n if response.confidence_monitoring != 'false':\n text += '; confidence monitoring={0}'.format(response.confidence_monitoring)\n return text\n else:\n if response._result_code == 401:\n return 'User {0} is not authorised to perform status/monitoring, '.format(self.username) + \\\n 'or username or password are not correct.'\n return 'Unknown device error ({0}): {1}'.format(\n response._result_code, response._result_message)\n\n # (3) Device API Calls\n # The method names match the API names.\n\n # (3.1) Device and Capture Status API Calls\n # The Status API calls are used to return status and capture information for the device. The Status calls \n # in this section are GET only, and are used specifically to retrieve information.\n\n def status_system(self, dump_xml=None):\n \"\"\"\n (3.1.1) Get System Status returns the current status of the device.\n\n curl --silent --user $adminlogincreds --insecure --url $apiurl\"/status/system\"\n \"\"\"\n response = self.call_api('status/system', title='Get System Status', dump_xml=dump_xml)\n if response.success():\n response.add_timestamp('wall-clock-time')\n response.add_value('content/state')\n response.add_value('utc-offset')\n response.add_value('serial-number')\n response.add_value('system-version')\n response.add_timestamp('up-since')\n response.add_timestamp('last-sync')\n return response\n\n def status_captures(self, dump_xml=None):\n \"\"\"\n (3.1.2) Get Capture Status returns information on the status of both the next and the current capture.\n\n curl --silent --user $adminlogincreds --insecure --url $apiurl\"/status/captures\"\n \"\"\"\n response = self.call_api('status/captures', title='Get Capture Status', dump_xml=dump_xml)\n if response.success():\n response.add_timestamp('wall-clock-time')\n self._current_capture(response)\n response.state = response.current_state\n self._next_capture(response)\n return response\n\n def _next_capture(self, response):\n response.add_value('next/schedule/type')\n response.add_timestamp('next/schedule/start-time')\n response.add_value('next/schedule/duration')\n response.add_value('next/schedule/parameters/title')\n response.add_value('next/schedule/parameters/section')\n response.add_value('next/schedule/parameters/capture-profile/name')\n response.add_value('next/state')\n response.add_timestamp('next/start-time')\n response.add_value('next/duration')\n\n def status_next_capture(self, dump_xml=None):\n \"\"\"\n (3.1.3) Get Next Capture Status returns information on the status of only the next capture.\n\n curl --silent --user $adminlogincreds --insecure --url $apiurl\"/status/next_capture\"\n \"\"\"\n response = self.call_api('status/next_capture', title='Get Next Capture Status', dump_xml=dump_xml)\n if response.success(): \n response.add_timestamp('wall-clock-time')\n self._next_capture(response)\n return response\n\n def _current_capture(self, response):\n response.add_value('current/schedule/type')\n response.add_timestamp('current/schedule/start-time')\n response.add_value('current/schedule/duration')\n response.add_value('current/schedule/parameters/title')\n response.add_value('current/schedule/parameters/section')\n response.add_value('current/schedule/parameters/capture-profile/name')\n response.add_value('current/state')\n response.add_timestamp('current/start-time')\n response.add_value('current/duration')\n\n def status_current_capture(self, dump_xml=None):\n \"\"\"\n (3.1.4) Get Current Capture Status returns information on the status of only the current capture.\n\n curl --silent --user $adminlogincreds --insecure --url $apiurl\"/status/current_capture\"\n \"\"\"\n response = self.call_api('status/current_capture', title='Get Current Capture Status', dump_xml=dump_xml)\n if response.success(): \n response.add_timestamp('wall-clock-time')\n self._current_capture(response)\n response.state = response.current_state\n return response\n\n def status_monitoring(self, dump_xml=None):\n \"\"\"\n (3.1.5) Get Capture Status with Monitoring Information returns real-time monitoring information on the \n current capture. This call is useful for returning the filename for a thumbnail (display or \n video) to use in the Show Current Video or Display View API call described in \n 'monitoring_snapshot()' below.\n\n curl --silent --user $adminlogincreds --insecure --url $apiurl\"/status/monitoring\"\n \"\"\"\n response = self.call_api('status/monitoring', title='Get Capture Status with Monitoring Information', dump_xml=dump_xml)\n if response.success(): \n response.add_value('state')\n response.add_timestamp('start-time')\n response.add_value('duration')\n response.add_value('confidence-monitoring')\n return response\n\n def monitoring_snapshot(self, url, dump_xml=None):\n \"\"\"\n (3.1.6) Show Current Video or Display View returns a snapshot image of the video or display input \n for the current capture. This is an image of what the Video input or Display input for the current \n capture is at the moment the call is made.\n Use the filename information returned from the Get Capture Status call described in \n status_monitoring() above.\n\n Video: curl --user admin:password --insecure --data 'duration=900&capture_profile_name=\n Display/Video (Podcast/Vodcast/EchoPlayer). Optimized for quality/full motion video&\n description=test-description' \n --url https://192.168.61.10:8443/monitoring/video_ntsc_graphics-channel2-stream0.jpg\n\n Display: curl --user admin:password --insecure --data 'duration=900&capture_profile_name=\n Display/Video (Podcast/Vodcast/EchoPlayer). Optimized for quality/full motion video&\n description=test-description' \n --url https://192.168.61.10:8443/monitoring/vga_display_graphics-channel1-stream0.jpg\n \"\"\"\n raise \"unimplemented\"\n\n def status_get_user_sections(self, dump_xml=None):\n \"\"\"\n (3.1.7) Get User Sections returns a list of the sections assigned to the user whose credentials \n (username and password) are sent with the API call. Response includes both Section Name and \n GUID along with the capture profile configured for each section.\n\n curl --silent --user $adminlogincreds --insecure --url $apiurl\"/status/get_user_sections\"\n \"\"\"\n return self.call_api('status/get_user_sections', title='Get User Sections', dump_xml=dump_xml)\n\n def status_get_user_ref(self, dump_xml=None):\n \"\"\"\n (3.1.8) Get Authenticated User Reference ID returns the user reference ID (GUID) of the user whose \n credentials (username and password) are sent with the API call.\n\n curl --user admin:password --insecure --url https://192.168.61.10:8443/status/get_user_ref\n \"\"\"\n response = self.call_api('status/get_user_ref', title='Get Authenticated User Reference ID', dump_xml=dump_xml)\n if response.success():\n response.add_value('', name='authenticated-user-ref')\n return response \n\n # (3.2) Diagnostics API Calls\n # The API calls identified below retrieve and perform diagnostic and maintenance duties for the capture \n # device identified in the call. This section includes log retrieval calls.\n # The API calls in this section can only be performed by an Administrator.\n\n def diagnostics_clear_cache(self):\n \"\"\"\n (3.2.1) Clear User Cache\n Clears the user cache on the device.\n Generally speaking, most Capture API calls can be performed by either \"local users\", such as an \n admin or instructor, or ESS users, such as capture devices. When a user accesses any of the capture \n API calls, the user is authenticated against the ESS. The API sends the credentials to the ESS and \n the ESS responds to the API indicating authentication (or failure) for the user. This process can \n take some time, so it is not done for every call. Instead, whenever successful ESS user \n authentication occurs, the API caches the user credentials and validates against that, speeding up \n response time. However, if an ESS administrator changes a user's password, deletes an account or a \n device, or other similar action, the Capture API has no way of knowing. In this instance, the admin \n can either, reset/power cycle the capture device, or use this API call to force clear the cache.\n\n curl --silent --user $adminlogincreds --insecure -d --url $apiurl\"/diagnostics/clear_cache\"\n \"\"\"\n return self.call_api('diagnostics/clear_cache', method='POST', \n title='Clear User Cache')\n\n def diagnostics_ping(self, url):\n \"\"\"\n (3.2.2) Ping Host Connectivity\n Test the connectivity of a host or an IP using the ping utility.\n\n curl --silent --user $adminlogincreds --insecure -d --url $apiurl\"/diagnostics/ping/www.google.com\"\n \"\"\"\n return self.call_api('diagnostics/ping/' + url, method='POST', title='Ping Host Connectivity')\n\n def diagnostics_traceroute(self, url):\n \"\"\"\n (3.2.3) Trace Route Path and Time\n Returns the route path and transit time of a host on an IP.\n\n curl --silent --user $adminlogincreds --insecure -d --url $apiurl\"/diagnostics/traceroute/www.google.com\"\n \"\"\"\n return self.call_api('diagnostics/traceroute/' + url, method='POST', title='Trace Route Path and Time')\n\n def diagnostics_restart_all(self):\n \"\"\"\n (3.2.4) Restart Device Executables\n Restarts all of the Device executables.\n\n curl --silent -d --user $adminlogincreds --insecure --url $apiurl\"/diagnostics/restart_all\"\n \"\"\"\n return self.call_api('diagnostics/restart_all', method='POST', title='Restart Device Executables')\n\n def diagnostics_reboot(self):\n \"\"\"\n (3.2.5) Reboot Device\n Performs a soft reboot of the Device.\n\n curl --silent -d --user $adminlogincreds --insecure --url $apiurl\"/diagnostics/reboot\"\n \"\"\"\n return self.call_api('diagnostics/reboot', method='POST', title='Reboot Device')\n\n def diagnostics_system_info_ifconfig(self):\n \"\"\"\n (3.2.6) Get Device Network Configuration\n Returns the network configuration for the Device.\n\n curl --silent --user $adminlogincreds --insecure --url $apiurl\"/diagnostics/system-info/ifconfig\"\n \"\"\"\n return self.call_api('diagnostics/system-info/ifconfig', title='Get Device Network Configuration')\n\n def diagnostics_system_info_tasks(self):\n \"\"\"\n (3.2.7) Get Device Tasks\n Returns the current tasks file for the Device. The task file is basically a list of the \n currently scheduled captures (tasks) for the device.\n\n curl --silent --user $adminlogincreds --insecure --url $apiurl\"/diagnostics/system-info/tasks\"\n \"\"\"\n return self.call_api('diagnostics/system-info/tasks', title='Get Device Tasks')\n\n def diagnostics_system_info_device(self):\n \"\"\"\n (3.2.8) Get Device Configuration File\n Returns the contents of the device XML file for the Device.\n\n curl --silent --user $adminlogincreds --insecure --url $apiurl\"/diagnostics/system-info/device\"\n \"\"\"\n return self.call_api('diagnostics/system-info/device', title='Get Device Configuration File')\n\n def diagnostics_system_info_top(self):\n \"\"\"\n (3.2.9) Get Device Processes\n Returns a list of the processes currently running on the Device.\n\n curl --silent --user $adminlogincreds --insecure --url $apiurl\"/diagnostics/system-info/top\"\n \"\"\"\n return self.call_api('diagnostics/system-info/top', title='Get Device Processes')\n\n def diagnostics_system_info_dmesg(self):\n \"\"\"\n (3.2.10) Get Device Message Buffer\n Returns the message buffer of the Device kernel.\n\n curl --silent --user $adminlogincreds --insecure --url $apiurl\"/diagnostics/system-info/dmesg\"\n \"\"\"\n return self.call_api('diagnostics/system-info/dmesg', title='Get Device Message Buffer')\n\n def diagnostics_recovery_saved_content(self):\n \"\"\"\n (3.2.11) Get Saved Content on the Device\n Returns a list of all saved content on the device. Can be used to determine if recovery of a capture \n is necessary, and if so, to obtain the capture ID of the capture to be re-uploaded.\n\n curl --silent --user $adminlogincreds --insecure --url $apiurl\"/diagnostics/recovery/saved-content\"\n \"\"\"\n # TODO: Can be many captures. Will cuurently fail if more than one.\n response = self.call_api('diagnostics/recovery/saved-content', title='Get Saved Content on the Device')\n if response.success():\n response.add_value('capture/title')\n response.add_timestamp('capture/start-time')\n response.add_value('capture/duration')\n response.add_value('capture/section')\n return response \n\n def diagnostics_capture_id_upload(self, id):\n \"\"\"\n (3.2.12) Re-Upload Content from the Device to the ESS\n Reuploads saved content from the device to the ESS. Use the capture ID returned from the Get Saved Content \n on the Device call identified in section 3.2.11 above to identify the capture to upload and obtain the \n capture ID.\n\n curl --silent --user $adminlogincreds --insecure -d --url $apiurl\"/diagnostics/recovery/4d951a96-9702-4321-abe6-a0f232ae1e36/upload\"\n \"\"\"\n raise \"unimplemented\"\n\n def log_list_last_count(self, count, dump_xml=None):\n \"\"\"\n (3.2.13) Retrieve the Last X Number of Log Messages\n Returns the last x number of log messages specified in the call. \n\n curl --silent --user $adminlogincreds --insecure --url $apiurl\"/log-list-last-count/3\"\n \"\"\"\n response = self.call_api('log-list-last-count/' + str(count), title='Retrieve the Last X Number of Log Messages', dump_xml=dump_xml)\n if dump_xml:\n return response\n if response.xml() is not None:\n xml = response.xml()\n entries = []\n for child in xml:\n entry = {}\n for line in child.text.split('\\n'):\n if len(line) > 0:\n #print line\n part = line.split(':', 1)\n entry[part[0]] = part[1].replace('\"', '').strip()\n entries.append(entry)\n response.entries = entries # List of Dict's\n return response \n\n # (3.3) CaptureControlAPICalls\n # The API calls described below are used to create and manipulate captures performed by the capture device \n # identified in the call.\n\n def capture_new_capture(self, duration, profile, description):\n \"\"\"\n (3.3.1) Create New Capture\n Creates and starts a new ad-hoc capture using the parameters described in the table below. \n All parameters must be defined.\n\n 'duration' is in seconds.\n\n curl --user $adminlogincreds --insecure --data \"duration=300&capture_profile_name=display-audio&description=test-description\" -d --url $apiurl\"/capture/new_capture\"\n # \n \"\"\"\n response = self.call_api('capture/new_capture', \n post_data='duration={0}&capture_profile_name={1}&description={2}'.format(duration, profile, description),\n title='Create New Capture')\n response.check_for_error()\n return response \n\n def capture_confidence_monitor(self, duration, profile, description):\n \"\"\"\n (3.3.2) Create \"Confidence Monitor\" Capture\n Creates and starts a new ad-hoc \"confidence monitor\" capture, providing monitoring of the capture. \n All parameters, described in the below table, must be defined.\n A confidence monitor is a dummy capture that does not get archived, sent to the ESS, or saved in any way. \n In all other regards. this call functions the same as a \"new_capture\" call described immediately above.\n If you want to confirm a real capture will work, use the Show Current Video or Display View call described \n in section 3.1.6 above.\n\n 'duration' is in seconds.\n\n curl --user admin:password --insecure --data 'duration=900&capture_profile_name=Display/Video (Podcast/Vodcast/EchoPlayer).\n Optimized for quality/full motion video&description=test-description' \n --url https://192.168.61.10:8443/capture/confidence_monitor\n # \n \"\"\"\n response = self.call_api('capture/confidence_monitor', \n post_data='duration={0}&capture_profile_name={1}&description={2}'.format(duration, profile, description),\n title='Create Confidence Monitor Capture')\n response.check_for_error()\n return response \n\n def capture_extend(self, duration):\n \"\"\"\n (3.3.3) Extend a Capture\n Sends a command to extend the current capture by the amount of time, in seconds, provided in the duration \n parameter. Captures cannot be extended past the start time of the next scheduled capture.\n If the capture cannot be extended for the duration identified, the capture will be extended as far as \n possible within the given schedule constraints.\n\n 'duration' is in seconds.\n\n curl --user $adminlogincreds --insecure --data \"duration=300&extend=Submit+Query\" --url $apiurl\"/capture/extend\"\n # \n \"\"\"\n response = self.call_api('capture/extend', \n post_data='duration={0}&extend=Submit+Query'.format(duration),\n title='Extend a Capture')\n response.check_for_error()\n return response\n\n def capture_pause(self):\n \"\"\"\n (3.3.4) Pause a Capture\n Sends a command to pause the current recording. There must be a running capture in the recording state for this \n command to have any effect.\n\n curl --user $logincreds --insecure --data \"\" -d --url $apiurl\"/capture/pause\"\n # \n \"\"\"\n response = self.call_api('capture/pause', method='POST', title='Pause a Capture')\n response.check_for_error()\n return response \n\n def capture_record(self):\n \"\"\"\n (3.3.5) Start or Resume a Capture\n Sends a command to start recording. This command only works under following two conditions:\n There is a running capture that is currently paused. This command resumes the paused capture.\n There is a scheduled capture in the \"waiting\" or pre-roll state. This command allows you to start the scheduled \n recording early/immediately.\n\n curl --user $logincreds --insecure --data \"\" -d --url $apiurl\"/capture/record\"\n # \n \"\"\"\n response = self.call_api('capture/record', method='POST', title='Start or Resume a Capture')\n response.check_for_error()\n return response \n\n def capture_stop(self):\n \"\"\"\n (3.3.6) Stop a Capture\n Sends the command to stop recording. There must be a currently recording capture for this command to have any \n effect. NOTE that captures are processed and uploaded immediately upon stopping the capture.\n\n curl --user $logincreds --insecure --data \"\" -d --url $apiurl\"/capture/stop\"\n # \n \"\"\"\n response = self.call_api('capture/stop', method='POST', title='Stop a Capture')\n response.check_for_error()\n return response \n\n def __str__(self):\n return 'Cature device {0}, user {1}'.format(self.server, self.username)\n \n\nclass Echo360CaptureDeviceResponse(object):\n \"\"\"\n Other results are added as attributes of the object.\n \"\"\"\n\n def __init__(self, command=None, result_code=None, result_message=None, data=None, xml_data=None, \n device=None, utc_offset=None, title=None, dump_xml=None):\n self._command = command.replace('/', '_').replace('-', '_')\n self._result_code = result_code\n self._result_message = result_message\n self._data = data\n self._xml = xml_data # might be None\n self.title(title)\n self._device = device\n self._utc_offset = utc_offset\n self._dump_xml = dump_xml\n\n def title(self, new_title):\n if new_title is None:\n if self._title is None:\n return self._command\n else:\n return self._title\n else:\n self._title = new_title\n return self._title\n\n def add_value(self, xpath, name=None):\n # Adds a value to the response using attribute name 'name'.\n # Return 'name' (e.g. 'start_time'). Method will fail if no XML data.\n # The attribute is set to the desired value, if found in the XML data, or None.\n if name is None:\n name = xpath.replace('/', '_').replace('-', '_')\n text = self._xml.find('./' + xpath)\n if text is None:\n self.__dict__[name] = None\n else:\n self.__dict__[name] = text.text\n return name\n\n def add_timestamp(self, xpath, name=None):\n # Adds a value to the response using appribute name 'name', and also name_local with the\n # timestamp in local time, using self._utc_offset.\n # Returns the attribute 'name' or 'name'_local. (e.g. 'start_time' or 'start_time_local').\n # Method will fail if no XML data.\n # Capture device timestamps are always UTC (e.g. '2014-06-05T00:27:37.000Z').\n # The attribute is set to the local time, or None if the timestamp is None or if self._utc_offset is None.\n name = self.add_value(xpath, name)\n node_value = self.__dict__[name]\n if node_value is None:\n return name\n local_name = name + '_local'\n if self._utc_offset is None:\n self.__dict__[local_name] = None\n else:\n ts_struct_time = time.strptime(node_value.split('.',1)[0], \"%Y-%m-%dT%H:%M:%S\")\n ts_datetime = datetime.datetime.fromtimestamp(time.mktime(ts_struct_time)) + \\\n datetime.timedelta(minutes = int(self._utc_offset))\n self.__dict__[local_name] = ts_datetime.strftime('%Y-%m-%dT%H:%M:%S')\n return local_name\n\n def check_attribute(self, attr):\n if attr in self.__dict__:\n if self.__dict__[attr] is not None:\n return True\n return False\n\n def check_for_error(self):\n # check for:\n # \n # \n if self._xml is not None:\n if self._xml.tag == 'ok':\n self._result_code = 'success'\n self._result_message = self._xml.attrib['text']\n elif self._xml.tag == 'error':\n self._result_code = 'error'\n self._result_message = self._xml.attrib['text']\n\n def xml(self):\n return self._xml\n\n def __str__(self):\n # Useful for testing and in CLI situations.\n if self.success():\n string = '{0}: {1} {2}'.format(self._command, self._result_code, self._result_message)\n data = []\n for key in sorted(self.__dict__):\n if not key.startswith('_'):\n data.append('{0}: {1}'.format(key, self.__dict__[key]))\n if len(data) > 0:\n string += '\\nData: ' + '\\n '.join(data)\n if self._dump_xml:\n string = string + '\\nRaw XML:\\n' + self._data\n return string\n elif self._result_code == 401:\n return 'You are not authorised to use the command {0}'.format(self._command)\n else:\n return 'Command failed {0}: {1} {2}'.format(self._command, self._result_code, self._result_message)\n\n def success(self):\n return self._result_code == 'success'\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\n description='Echo360 Capture Device CLI',\n )\n parser.add_argument('-s', '--server', help='capture device ', required=True)\n parser.add_argument('-u', '--user', help='username', default='admin')\n parser.add_argument('-p', '--password', help='password', default=None)\n parser.add_argument('-d', '--debug', help='debug level', default=0, type=int)\n parser.add_argument('-t', '--timeout', help='HTTP timeout', default=4, type=int)\n parser.add_argument('-l', '--sleep', help='sleep before status (seconds)', default=2, type=int)\n parser.add_argument('-c', '--command', help='command (e.g. status)', default='status',\n choices=['system-status', 'status', \n 'new-capture', 'confidence-monitor', 'pause', 'resume', 'extend', 'stop',\n 'status-get-user-sections', 'status-get-user-ref', 'diagnostics-clear-cache',\n 'ping', 'traceroute', 'restart-all', 'reboot', 'log', 'system-info',\n 'status-captures', 'status-current-capture', 'status-next-capture',\n 'test-system', 'test-status', 'test-capture', 'test-confidence'])\n parser.add_argument('--duration', help='duration (seconds)', default=3600+1800, type=int)\n parser.add_argument('--profile', help='profile name', default=None)\n parser.add_argument('--description', help='description', default='capture-device.py')\n parser.add_argument('--count', help='execute command multiple times', default=1, type=int)\n parser.add_argument('--url', help='URL for ping and traceroute', default=None)\n parser.add_argument('--xml', help='Print the raw XML', action='store_true')\n args = parser.parse_args()\n\n try: # catch ctrl-c\n device = Echo360CaptureDevice(args.server, args.user, args.password, \n debuglevel=args.debug, timeout=args.timeout)\n\n # test access\n if not device.connection_test.success():\n if device.connection_test._result_code == 401:\n print('Connection Test Error (401): Incorrect capture device username or password.')\n sys.exit(1)\n elif device.connection_test._result_code == 404:\n print('Connection Test Error (404): Capture Device API error: command not found.')\n sys.exit(2)\n else:\n print('Connection Test Error ({0}): {1} to {2}'.format(\n device.connection_test._result_code, device.connection_test._result_message, args.server))\n sys.exit(3)\n\n if args.command == 'system-status':\n print(str(device.status_system(dump_xml=args.xml)))\n elif args.command == 'status':\n print(device.capture_status_str())\n if args.count > 1:\n for i in range(1, args.count):\n print(device.capture_status_str(sleep=args.sleep))\n # TODO: monitoring_snapshot(self, url)\n elif args.command == 'new-capture':\n print(str(device.capture_new_capture(args.duration, args.profile , args.description)))\n print(device.capture_status_str(sleep=args.sleep))\n elif args.command == 'confidence-monitor':\n print(str(device.capture_confidence_monitor(args.duration, args.profile , args.description)))\n print(device.capture_status_str(sleep=args.sleep))\n elif args.command == 'pause':\n print(str(device.capture_pause()))\n print(device.capture_status_str(sleep=args.sleep))\n elif args.command == 'resume':\n print(str(device.capture_record()))\n print(device.capture_status_str(sleep=args.sleep))\n elif args.command == 'extend':\n print(str(device.capture_extend(args.duration)))\n print(device.capture_status_str(sleep=args.sleep))\n elif args.command == 'stop':\n print(str(device.capture_stop()))\n print(device.capture_status_str(sleep=args.sleep))\n elif args.command == 'status-get-user-sections':\n # TODO: print(str(device.status_get_user_sections()))\n print('Not implemented yet.')\n elif args.command == 'status-get-user-ref':\n print(str(device.status_get_user_ref(dump_xml=args.xml)))\n elif args.command == 'diagnostics-clear-cache':\n print(str(device.diagnostics_clear_cache()))\n elif args.command == 'ping':\n if args.url is None:\n print(\"No ping URL specified. Use '--url'\")\n else:\n response = device.diagnostics_ping(args.url)\n if response.success():\n print('{0}\\n{1}'.format(str(response), response._data))\n else:\n print(str(response))\n elif args.command == 'traceroute':\n if args.url is None:\n print(\"No traceroute URL specified. Use '--url'\")\n else:\n response = device.diagnostics_traceroute(args.url)\n if response.success():\n t = response._data.replace('
', '\\n')\n print('{0}\\n{1}'.format(str(response), t))\n else:\n print(str(response))\n elif args.command == 'restart-all':\n print(str(device.diagnostics_restart_all()))\n elif args.command == 'reboot':\n print(str(device.diagnostics_reboot()))\n elif args.command == 'log':\n print(json.dumps(device.log_list_last_count(args.count, dump_xml=args.xml).entries, indent=4, sort_keys=True))\n elif args.command == 'system-info':\n response = device.diagnostics_system_info_ifconfig()\n if response.success():\n t = response._data.replace('
', '\\n').replace('
', '\\n')\n print('{0}\\n{1}'.format(str(response), t))\n else:\n print(str(response))\n # TODO: XML result\n # response = device.diagnostics_system_info_device()\n # print('{0}\\n{1}'.format(str(response), response._data))\n response = device.diagnostics_system_info_top()\n if response.success():\n t = response._data.replace('', '').replace('
', '\\n').replace('
', '\\n')\n print('{0}\\n{1}'.format(str(response), t))\n else:\n print(str(response))\n response = device.diagnostics_system_info_dmesg()\n if response.success():\n t = response._data.replace('
', '\\n').replace('
', '\\n')\n print('{0}\\n{1}'.format(str(response), t))\n else:\n print(str(response))\n elif args.command == 'status-captures':\n print(str(device.status_captures(dump_xml=args.xml)))\n elif args.command == 'status-current-capture':\n print(str(device.status_current_capture(dump_xml=args.xml)))\n elif args.command == 'status-next-capture':\n print(str(device.status_next_capture(dump_xml=args.xml)))\n elif args.command == 'test-system':\n print('\\nDevice status_system')\n print(str(device.status_system(dump_xml=args.xml)))\n elif args.command == 'test-status':\n print('\\nDevice status_system')\n print(str(device.status_system(dump_xml=args.xml)))\n print('\\nDevice status_monitoring')\n print(str(device.status_monitoring(dump_xml=args.xml)))\n print('\\nDevice status_captures')\n print(str(device.status_captures(dump_xml=args.xml)))\n print('\\nDevice status_current_capture')\n print(str(device.status_current_capture(dump_xml=args.xml)))\n print('\\nDevice status_next_capture')\n print(str(device.status_next_capture(dump_xml=args.xml)))\n elif args.command == 'test-capture':\n sleep = args.sleep\n print('\\nstop; new_capture; pause; record; extend; pause; stop')\n print(str(device.capture_stop()))\n print(device.capture_status_str(sleep=sleep))\n print(str(device.capture_new_capture(3500, 'Trinity Standard Lecture', 'test from python')))\n print(device.capture_status_str(sleep=sleep))\n print(str(device.capture_pause()))\n print(device.capture_status_str(sleep=sleep))\n print(str(device.capture_record()))\n print(device.capture_status_str(sleep=sleep))\n print(str(device.capture_extend(400)))\n print(device.capture_status_str(sleep=sleep))\n print(str(device.capture_pause()))\n print(device.capture_status_str(sleep=sleep))\n print(str(device.capture_stop()))\n print(device.capture_status_str(sleep=sleep))\n elif args.command == 'test-confidence':\n sleep = args.sleep\n print('\\nconfidence_monitor; stop')\n print(str(device.capture_confidence_monitor(360, 'Trinity Standard Lecture', 'test from python')))\n print(device.capture_status_str(sleep=sleep))\n print(str(device.capture_stop()))\n print(device.capture_status_str(sleep=sleep))\n\n except KeyboardInterrupt:\n print('\\nCtrl-C User requested exit.')\n\n\n","repo_name":"tonyallan/echo360","sub_path":"capture_device.py","file_name":"capture_device.py","file_ext":"py","file_size_in_byte":39814,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"69"} +{"seq_id":"74536751578","text":"# 53ms, 14.1MB\nimport collections\n\n\nclass Solution:\n def numJewelsInStones(self, jewels: str, stones: str) -> int:\n\n num = 0\n jewel_dic = collections.defaultdict(int)\n\n for char in jewels:\n jewel_dic[char] = 1\n\n for char in stones:\n if jewel_dic[char] == 1:\n num += 1\n\n return num\n","repo_name":"hoduulmu/ps","sub_path":"파이썬_알고리즘_인터뷰/11장 해시 테이블/771_jewels_and_stones/my_code2.py","file_name":"my_code2.py","file_ext":"py","file_size_in_byte":356,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"1190316030","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n'''\n@AUTHOR:Joselyn Zhao\n@CONTACT:zhaojing17@foxmail.com\n@HOME_PAGE:joselynzhao.top\n@SOFTWERE:PyCharm\n@FILE:shaixuan.py\n@TIME:2020/4/17 22:04\n@DES:\n'''\n\nmylist = [1,4,-5,10,-7,2,3,-1]\n# zheng\nzheng = [n for n in mylist if n>0]\nprint(zheng)\nfu = [n for n in mylist if n<0]\nprint(fu)","repo_name":"joselynzhao/Python-data-structure-and-algorithm","sub_path":"data_structure/list/shaixuan.py","file_name":"shaixuan.py","file_ext":"py","file_size_in_byte":323,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"} +{"seq_id":"5937221837","text":"\n# Keywords arguments\ndef report_product(name, quantity, country = \"unknow\"):\n print(f'Product: {name}')\n print(f'Quantity: {quantity}')\n print(f'Country: {country}')\n\n\n# *Args\ndef calculate_average(*args):\n num_values = len(args)\n\n if num_values == 0:\n return None\n \n total_sum = sum(args)\n average = total_sum / num_values\n \n return average\n\n\ndef main():\n average_products = calculate_average(121, 250, 96, 15, 5)\n print(average_products)\n print(\"*\" * 40)\n report_product(quantity=200, name=\"GadgetX\", country=\"Perú\")\n print(\"*\" * 40)\n report_product(quantity=400, name=\"BookX\")\n\n\nif __name__ == '__main__':\n main()","repo_name":"AlvaroVidal21/CursoPython","sub_path":"Dalto/1_Seccion_Intermedia/Codigo/004_Args.py","file_name":"004_Args.py","file_ext":"py","file_size_in_byte":676,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"4539761394","text":"from sqlalchemy import and_, or_\n\nfrom common.utils import QUESTIONS_PAGE_LIMIT\n\n\ndef all_query_results_as_list_of_dicts(query):\n def to_dict(tup):\n human_readable_names = [c[\"name\"] for c in query.column_descriptions]\n return {k: v for (k, v) in zip(human_readable_names, tup, strict=True)}\n\n return [to_dict(tup) for tup in query.all()]\n\n\ndef construct_remaining_questions_query(db, YNQuestion, YNAnswer):\n def query_remaining_questions(user_id):\n answered_questions_for_user_q = YNAnswer.query \\\n .filter_by(user_id=user_id) \\\n .subquery()\n remaining_questions_for_user_q = db.session.query(YNQuestion) \\\n .outerjoin(answered_questions_for_user_q) \\\n .filter_by(answer=None)\n questions = remaining_questions_for_user_q.limit(QUESTIONS_PAGE_LIMIT).all()\n return questions\n return query_remaining_questions\n\n\ndef construct_answers_statistics_query(db, YNQuestion, YNAnswer):\n def query_answers_statistics(user_id):\n answered_questions_for_user_q = db.session.query(YNAnswer) \\\n .filter(and_(YNAnswer.user_id == user_id, or_(YNAnswer.answer == 0, YNAnswer.answer == 1))) \\\n .subquery()\n comparisons_for_user_q = db.session.query(\n (YNQuestion.answer == answered_questions_for_user_q.c.answer).label(\"is_correct\"),\n answered_questions_for_user_q.c.probability.label(\"probability\")\n ).join(answered_questions_for_user_q)\n return all_query_results_as_list_of_dicts(comparisons_for_user_q)\n return query_answers_statistics\n\n\ndef construct_predictions_statistics_query(db, Prediction):\n def query_answers_statistics(user_id):\n resolved_predictions_for_user_q = db.session.query(\n Prediction.result.label(\"is_correct\"),\n Prediction.probability.label(\"probability\"))\\\n .filter(and_(Prediction.user_id == user_id,\n or_(Prediction.result == Prediction.PredictionResult.RESOLVED_FALSE.value,\n Prediction.result == Prediction.PredictionResult.RESOLVED_TRUE.value)))\n return all_query_results_as_list_of_dicts(resolved_predictions_for_user_q)\n return query_answers_statistics\n\n\ndef construct_answers_history_query(db, YNQuestion, YNAnswer):\n def query_answers_statistics(user_id):\n answered_questions_for_user_q = db.session.query(YNAnswer) \\\n .filter_by(user_id=user_id) \\\n .subquery()\n answers_for_user_q = db.session.query(\n YNQuestion.answer.label(\"real_answer\"),\n answered_questions_for_user_q.c.answer.label(\"user_answer\"),\n answered_questions_for_user_q.c.probability.label(\"probability\"),\n YNQuestion.question.label(\"question\"),\n YNQuestion.comment.label(\"comment\")) \\\n .join(answered_questions_for_user_q)\n return all_query_results_as_list_of_dicts(answers_for_user_q)\n return query_answers_statistics\n","repo_name":"Siarshai/PredictionCalibrationApp","sub_path":"calibration_backend/db_ops/queries.py","file_name":"queries.py","file_ext":"py","file_size_in_byte":3027,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"28135575211","text":"import math\r\n\r\n\r\ndef IsPointInArea(x, y):\r\n st01 = math.sqrt((x - 1) ** 2 + (y - 1) ** 2) <= 2\r\n st02 = math.sqrt((x - 1) ** 2 + (y - 1) ** 2) >= 2\r\n st1 = y >= -x and y >= 2*x + 2 and st01\r\n st2 = y <= -x and y <= 2*x + 2 and st02\r\n return st1 or st2\r\n\r\n\r\nx = float(input())\r\ny = float(input())\r\n\r\nif IsPointInArea(x, y):\r\n print('YES')\r\nelse:\r\n print('NO')\r\n","repo_name":"DobriyD/educational-programm","sub_path":"week4/123.py","file_name":"123.py","file_ext":"py","file_size_in_byte":381,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"31184055433","text":"import numpy as np\nimport random\n\n\nclass OptStruct:\n \"\"\"\n 数据结构,维护所有需要操作的值\n :parameter :\n data_mat_in - 数据矩阵\n class_labels - 数据标签\n C - 松弛变量\n tolerate - 容错率\n k_tup -- 包含核函数信息的元组,第一个参数存放核函数类别,第二个参数存放必要的核函数需要用到的参数\n \"\"\"\n\n def __init__(self, data_mat_in, class_labels, C, tolerate, k_tup):\n # 数据矩阵X\n self.X = data_mat_in\n # 标签数据\n self.label_mat = class_labels\n # 松弛变量\n self.C = C\n # 容错率\n self.tolerate = tolerate\n # 矩阵行数\n self.m = np.shape(data_mat_in)[0]\n # 初始化alpha、b参数\n self.alphas = np.mat(np.zeros((self.m, 1)))\n self.b = 0\n # 根据矩阵行数初始化误差缓存,第一列为是否有效的标志位,第二位为实际误差E的值\n self.eCache = np.mat(np.zeros((self.m, 2)))\n # 初始化核K\n self.K = np.mat(np.zeros((self.m, self.m)))\n # 计算所有数据的核K, K[x1, x2]低维运算得到先映射到高维再点积的高维运算结果\n for i in range(self.m):\n self.K[:, i] = kernel_trans(self.X, self.X[i, :], k_tup)\n\n\ndef kernel_trans(X, A, k_tup):\n \"\"\"\n 通过核函数将数据转换至更高维的空间\n :param X: -- 数据矩阵\n :param A: -- 单个数据的向量\n :param k_tup: -- 包含核函数信息的元组\n :return: K - 计算的核K\n \"\"\"\n m, n = np.shape(X)\n K = np.mat(np.zeros((m, 1)))\n # 线性函数只进行内积\n if k_tup[0] == 'lin':\n K = X * A.T\n # 高斯核函数根据高斯核函数公式进行计算\n elif k_tup[0] == 'rbf':\n # 对于矩阵中的每一个元素计算高斯函数的值\n for j in range(m):\n # 计算核函数的分子(x - y)^2\n delta_row = X[j, :] - A\n K[j] = delta_row * delta_row.T\n # 计算高斯核K\n K = np.exp(K / (-1 * k_tup[1] ** 2))\n else:\n # 抛出错误,可以通过raise显示引发异常。一旦执行了raise语句,raise后面的语句将不能执行\n raise NameError('核函数无法识别')\n return K\n\n\ndef cal_Ek(oS, k):\n \"\"\"\n 计算误差\n :param oS: 数据结构\n :param k: 标号为k的数据\n :return: Ek - 标号为k的数据误差\n \"\"\"\n fx_k = float(np.multiply(oS.alphas, oS.label_mat).T * oS.K[:, k] + oS.b)\n Ek = fx_k - float(oS.label_mat[k])\n return Ek\n\n\ndef select_j_rand(i, m):\n # 随机选择一个不等于i的j\n j = i\n while j == i:\n # random.uniform(x, y)方法将随机生成一个实数,它在 [x,y) 范围内\n # 不能用radiant()!!!生成[x, y]范围的整数\n j = int(random.uniform(0, m))\n return j\n\n\ndef select_j(oS, i, Ei):\n \"\"\"\n 内循环j的选取——启发方式 + 随机选择\n :param oS: 数据结构\n :param i: 标号为i的数据的索引值\n :param Ei: 标号为i的数据误差\n :return:\n j, max_k - 标号为j或max_k的数据索引值\n Ej - 标号为j的数据误差\n \"\"\"\n # 初始化\n max_k = -1\n max_delta_e = 0\n Ej = 0\n # 根据Ei更新误差缓存\n oS.eCache[i] = [1, Ei]\n # 返回误差不为零的数据的索引值\n # 矩阵操作.A表示把矩阵转换为数组array\n # 存储有效误差的行索引——eCache的第一列的不为零的行下标\n valid_eCache_list = np.nonzero(oS.eCache[:, 0].A)[0]\n # 若有不为零的误差,则遍历找到最大的Ek\n if len(valid_eCache_list) > 1:\n for k in valid_eCache_list:\n # 不计算i\n if k == i:\n continue\n Ek = cal_Ek(oS, k)\n delta_e = abs(Ei - Ek)\n # 找到使|Ei - Ek|最大的Ek\n if delta_e > max_delta_e:\n max_k = k\n max_delta_e = delta_e\n Ej = Ek\n return max_k, Ej\n # 没有不为零的误差,则随机选择alpha_j的索引\n else:\n j = select_j_rand(i, oS.m)\n Ej = cal_Ek(oS, j)\n return j, Ej\n\n\ndef update_Ek(oS, k):\n \"\"\"\n 计算Ek,并更新误差缓存\n :param oS: 数据结构\n :param k: 标号为k的数据的索引值\n :return: 无\n \"\"\"\n Ek = cal_Ek(oS, k)\n oS.eCache[k] = [1, Ek]\n\n\ndef clip_alpha(aj, H, L):\n # 修剪alpha\n if aj > H:\n aj = H\n if aj < L:\n aj = L\n return aj\n\n\ndef inner_L(i, oS):\n \"\"\"\n 优化的SMO算法\n :param i: 标号为i的数据的索引值\n :param oS: 数据结构\n :return:\n 1 -- 有任意一对alpha值发生变化\n 0 -- 没有任意一对alpha值发生变化或变化太小\n \"\"\"\n # 计算误差Ei\n Ei = cal_Ek(oS, i)\n # 优化alpha,设定一定的容错率\n if ((oS.label_mat[i] * Ei < -oS.tolerate) and (oS.alphas[i] < oS.C)) \\\n or ((oS.label_mat[i] * Ei > oS.tolerate) and (oS.alphas[i] > 0)):\n # 使用内循环启发方式选择j并计算Ej\n j, Ej = select_j(oS, i, Ei)\n # 保存更新前的alpha值\n alpha_i_old = oS.alphas[i].copy()\n alpha_j_old = oS.alphas[j].copy()\n # 计算上下界L和H\n if oS.label_mat[i] != oS.label_mat[j]:\n L = max(0, oS.alphas[j] - oS.alphas[i])\n H = min(oS.C, oS.C + oS.alphas[j] - oS.alphas[i])\n else:\n L = max(0, oS.alphas[j] + oS.alphas[i] - oS.C)\n H = min(oS.C, oS.alphas[j] + oS.alphas[i])\n if L == H:\n # print(\"L == H\")\n return 0\n # 计算eta,直接利用核函数进行低维运算\n eta = 2.0 * oS.K[i, j] - oS.K[j, j] - oS.K[i, i]\n if eta >= 0:\n # print(\"eta >= 0\")\n return 0\n # 更新alpha_j\n oS.alphas[j] -= oS.label_mat[j] * (Ei - Ej) / eta\n # 修剪alpha_j\n oS.alphas[j] = clip_alpha(oS.alphas[j], H, L)\n # 更新Ej至缓存误差\n update_Ek(oS, j)\n if abs(oS.alphas[j] - alpha_j_old) < 0.00001:\n # print(\"alpha_j变化太小\")\n return 0\n # 更新alpha_i\n oS.alphas[i] += oS.label_mat[j] * oS.label_mat[i] * (alpha_j_old - oS.alphas[j])\n # 更新Ei至缓存误差\n update_Ek(oS, i)\n # 更新b_1, b_2,利用核函数\n b1 = oS.b - Ei - oS.label_mat[i] * (oS.alphas[i] - alpha_i_old) * oS.K[i, i] \\\n - oS.label_mat[j] * (oS.alphas[j] - alpha_j_old) * oS.K[i, j]\n b2 = oS.b - Ej - oS.label_mat[i] * (oS.alphas[i] - alpha_i_old) * oS.K[i, j] \\\n - oS.label_mat[j] * (oS.alphas[j] * alpha_j_old) * oS.K[j, j]\n # 根据b_1, b_2更新b\n if 0 < oS.alphas[i] < oS.C:\n oS.b = b1\n elif 0 < oS.alphas[j] < oS.C:\n oS.b = b2\n else:\n oS.b = (b1 + b2) / 2.0\n # 成功更新一对alpha返回1\n return 1\n else:\n return 0\n\n\ndef smo_P(data_mat_in, class_labels, C, tolerate, max_iter, k_tup=('lin', 0)):\n \"\"\"\n 完整的线性SMO算法\n :param data_mat_in: 数据矩阵\n :param class_labels: 数据标签\n :param C: 松弛变量\n :param tolerate: 容错率\n :param max_iter: 最大迭代次数\n :param k_tup: 包含核函数信息的元组\n :return:\n oS.b -- SMO算法中计算的b\n oS.alphas -- SMO算法计算中的alphas\n \"\"\"\n # 初始化\n oS = OptStruct(np.mat(data_mat_in), np.mat(class_labels).transpose(), C, tolerate, k_tup)\n iter_num = 0\n entire_set = True\n alpha_pair_changed = 0\n # 外循环条件——达到最大循环次数、内循环返回1,即有一对alpha成功被优化更新\n while (iter_num < max_iter) and ((alpha_pair_changed > 0) or entire_set):\n alpha_pair_changed = 0\n # 若还需优化,则遍历整个数据集进行优化\n if entire_set:\n for i in range(oS.m):\n alpha_pair_changed += inner_L(i, oS)\n # print(f'全样本遍历: 第{iter_num}次迭代,样本: {i}, alpha优化次数: {alpha_pair_changed}')\n iter_num += 1\n # entire_set = False\n else:\n # 存储非边界alpha的行索引信息\n non_bound_is = np.nonzero((oS.alphas.A > 0) * (oS.alphas.A < C))[0]\n for i in non_bound_is:\n alpha_pair_changed += inner_L(i, oS)\n # print(f'非边界遍历:第{iter_num}次迭代 样本:{i}, alpha优化次数: {alpha_pair_changed}')\n iter_num += 1\n # 实现交替遍历全体数据集和边界alpha进行更新\n # 若遍历完整个数据集,则使entire_set = False,继续更新非边界alpha\n # 若遍历完整个数据集后无优化alpha,则说明已收敛,使entire_set = False 后可提前结束循环\n if entire_set:\n entire_set = False\n # 若更新非边界alpha且无优化,则使entire_set = True,继续更新整个数据集\n elif alpha_pair_changed == 0:\n entire_set = True\n # print(f'迭代次数:{iter_num}')\n return oS.b, oS.alphas\n\n\n\n\n\n\n\n\n","repo_name":"Upupupdown/The-first-project","sub_path":"SVM.py","file_name":"SVM.py","file_ext":"py","file_size_in_byte":9185,"program_lang":"python","lang":"zh","doc_type":"code","stars":2,"dataset":"github-code","pt":"69"} +{"seq_id":"13581429742","text":"#!/usr/bin/env python\nimport rospy\nfrom std_msgs.msg import Float64\nimport numpy as np\nfrom Robot.UR.URRobot import URRobot\nhost = \"192.168.178.4\"\nrobot = URRobot(host)\ntcp=robot.get_tcp_position()\n\ndef Joints():\n pub1 = rospy.Publisher('X', Float64, queue_size=10)\n pub2 = rospy.Publisher('Y', Float64, queue_size=10)\n pub3 = rospy.Publisher('Z', Float64, queue_size=10)\n rospy.init_node('UR10', anonymous=True)\n rate = rospy.Rate(10) # 10hz\n while not rospy.is_shutdown():\n X = tcp[0]\n Y = tcp[1]\n Z = tcp[2]\n rospy.loginfo(\"X: %s, Y: %s, Z: %s\", X , Y, Z)\n pub1.publish(X)\n pub2.publish(Y)\n pub3.publish(Z)\n rate.sleep()\n\n\nif __name__ == '__main__':\n try:\n Joints()\n except rospy.ROSInterruptException:\n pass\n","repo_name":"MobMonRob/ur10_datasampling","sub_path":"scripts/tcp_cb2.py","file_name":"tcp_cb2.py","file_ext":"py","file_size_in_byte":806,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"37445538843","text":"\"\"\"\n Unified Calendar\n ================\n\n Module to convert Gregorian calendar dates to Unified calendar dates as featured in the science-fiction novel\n\n A P A R T\n O F T H E\n W O R L D\n\n by R.M. Beristain\n\n\n To run on python interpreter:\n ```\n >>> from unidate import UnifiedDate as UD\n\n >>> ud = UD() # creates instance and initializes to today's date in Unified format.\n >>> ud.unify('YYYY-MM-DD') # converts Gregorian ISO date to Unified date.\n >>> ud.reverse_unidate('YYYY-QM-DD') # converts Unified ISO date to Gregorian date.\n >>> print(ud) # displays Gregorian date and various Unified date formats.\n >>> ud.print_calendar() # prints this year's Unified calendar\n ```\n For more details see `help(unidate.UnifiedDate)`\n\n Requires Python 3.6+\n\n License\n -------\n This work is licensed under the Creative Commons Attribution-ShareAlike 3.0 Australia License.\n To view a copy of this license, visit http: //creativecommons.org/licenses/by-sa/3.0/au/\n or send a letter to Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.\n\"\"\"\nfrom __future__ import annotations\n\n__author__ = \"R.M. Beristain\"\n__version__ = \"1.1.1\"\n\nfrom datetime import datetime, timedelta\nfrom math import trunc\n\nfrom calendar_data.definitions import UniDay, UniWeek, UniMonth, UQ, UnifiedDateType\nfrom calendar_data.names import FestiveDate, RegularDate\nfrom exceptions import InvalidUnifiedDateValue\nfrom presentation.styling import Style, Variant\n\n\nclass UnifiedDate:\n \"\"\"\n Transform Gregorian dates to Unified.\n\n By default we Unify the current system date if instantiated without parameters.\n To convert an arbitrary date, create an instance with an ISO 8601-formatted date string. E.g:\n\n ```\n udate=UnifiedDate('2015-04-20')\n ```\n\n You can also pass an ISO 8601-formatted date to an existing instance by calling the `unify` method.\n\n `UnifiedDate` can be printed to show a date's Unified format along with Sout-Western Territory and Austral\n formats.\n\n Unified Calendar Year Zero starts at Gregorian 5600 BC. This program doesn't convert prehistoric dates.\n\n Parameters\n ----------\n - user_date: ISO 8601-format date string (e.g. '2019-01-01')\n\n Properties\n ----------\n - gregorian_date : ISO 8601-format date string (e.g. '2019-01-01')\n - unified_date: Unified Date named tuple containing the following fields:\n - UnifiedWeek tuple\n - UnifiedDay tuple\n - UnifiedMonth tuple\n - year: unified year\n \"\"\"\n\n _regular_date = RegularDate()\n\n WEEKDAY = _regular_date.WEEKDAY\n _UNIFIED_MONTH_NAME_SHORT = _regular_date.UNIFIED_MONTH_NAME_SHORT\n _UNIFIED_MONTH_NAME_LONG = _regular_date.UNIFIED_MONTH_NAME_LONG\n _TERRITORIAN_MONTH_NAME_BASE = _regular_date.TERRITORIAN_MONTH_NAME_BASE\n _AUSTRAL_MONTH_NAME_BASE = _regular_date.AUSTRAL_MONTH_NAME_BASE\n _TERRITORIAN_MONTH_NAME_LONG = _regular_date.TERRITORIAN_MONTH_NAME_LONG\n _AUSTRAL_MONTH_NAME_LONG = _regular_date.AUSTRAL_MONTH_NAME_LONG\n\n del _regular_date\n\n _year_start: datetime | None = None # datetime object containing first day of date's year\n unified_date: UnifiedDateType | None = None\n swt_date: UnifiedDateType | None = None\n austral_date: UnifiedDateType | None = None\n gregorian_date: datetime | None = None\n\n def __init__(self, user_date: str = None, style: str = \"Long\") -> None:\n \"\"\"\n Initialises default values\n\n Parameters\n ----------\n - user_date: Gregorian date in ISO 8601 format.\n - style - month representation style. Can be one of 'Long' or 'Short'\n \"\"\"\n # This will validate it is (more or less) correct.\n user_date = (\n datetime.strptime(user_date, \"%Y-%m-%d\").date().isoformat()\n if user_date\n else datetime.now().date().isoformat()\n )\n self.gregorian_date = user_date\n self.unified_date = self.unify(user_date, style)\n\n def __str__(self) -> str:\n \"returns unified date in a nice format\"\n return (\n f\"{'Gregorian:':<15}{self.gregorian_date:>10} - \"\n f\"{datetime.strptime(self.gregorian_date, '%Y-%m-%d').strftime('%A %d of %B, %Y')}\\n\"\n f\"{'Unified ISO:':<15}{self.format_date(variant=Variant.UNI, style='ISO'):>10}\\n\"\n f\"{'Unified Short:':<15}{self.format_date(Variant.UNI, style='Short')}\\n\"\n f\"{'Unified Long:':<15}{self.format_date(Variant.UNI, 'Long')}\\n\"\n f\"{'Territorian:':<15}{self.format_date(Variant.SWT, 'Long')}\\n\"\n f\"{'Austral:':<15}{self.format_date(Variant.AUS, 'Long')}\\n\"\n f\"\\t{self.unified_date}\\n\"\n )\n\n def __repr__(self) -> str:\n return self.__str__()\n\n def __check_variant(self, variant: Variant | str) -> Variant | str:\n \"\"\"Check if value given is a valid Unified Calendar `Variant`.\n\n If the value is a `Variant` instance or a known `Variant` value, return the `Variant`. If not, return the\n original parameter unchanged.\n\n Parameters\n ----------\n variant : any\n Unified calendar Variant. Should be a valid `Variant` Enum, but can also be one of the Enum's\n values (e.g Variant.UNI or 'Unified' are both accepted)\n\n Returns\n -------\n Union[Variant, str]\n A `Variant`, or the original parameter.\n \"\"\"\n if isinstance(variant, Variant):\n return variant\n else:\n for this in Variant:\n if this.value.upper() == variant.upper().strip():\n return this\n\n return variant\n\n def __check_style(self, style: Style | str) -> Style | str:\n \"\"\"Check if value given is a valid Unified `Style` of date representaion.\n\n If the value is a known `Style` instance, return the `Style`. If not, return the original parameter unchanged.\n\n Parameters\n ----------\n style : any\n Representation style for a Unified Calendar variant. Should be a valid `Style` Enum, but can also be one of\n the Enum's values (e.g. Style.LONG or 'Long' are both accepted)\n\n Returns\n -------\n Union[Style, str]\n A `Style`, or the original parameter.\n \"\"\"\n if isinstance(style, Style):\n return style\n else:\n for this in Style:\n if this.value.upper() == style.upper().strip():\n return this\n\n return style\n\n def format_date(self, variant: Variant = Variant.UNI, style: Style = Style.LONG) -> str:\n \"\"\"\n Set and return Unified Date, formatted according to a regional variant (e.g. South-Western Territories).\n\n NOTE: Non-unified variants don't have a short-format name; they use the same name as the Unified variant.\n\n If ``style=\"Short\"`` is specified for \"SWT\" or \"Austral\" variants, `format_date` will return a short-format\n **string**, but the actual tuple stored will retain the Long name of the month. Only the day name is\n shortened.\n For example:\n\n ```\n >>> ud.format_date(variant=Variant.SWT, style='Short')\n 'D4 4, Q2B 7620' # returned day name is shortened for SWT and Austral variants.\n\n >>> ud.swt_date.month.name\n 'Spring wane' # stored month name is always kept in long-format for all variants.\n ```\n\n Parameters\n ----------\n - variant: Regional month name variant. Variants are defined in `Variant` Enum.\n - style: Calendar representation style. Styles are defined in `Style` Enum.\n\n Returns\n -------\n - Unified date in specified format.\n \"\"\"\n variant = self.__check_variant(variant)\n style = self.__check_style(style)\n\n if variant == Variant.UNI:\n date = self.unified_date\n elif variant == Variant.SWT:\n date = self.swt_date\n elif variant == Variant.AUS:\n date = self.austral_date\n else:\n raise ValueError(f\"Unknown variant: {variant}. Expected {Variant}\")\n\n if not date:\n raise InvalidUnifiedDateValue(date)\n\n if style == Style.ISO: # ISO 8601U \"Unified ISO format\"\n return f\"{date.year}-{date.month.numeric.quarter}{date.month.numeric.month}-{date.day.number:02}\"\n\n if date.weekday.regular:\n date = UnifiedDateType( # replace existing date tuple with requested format\n weekday=date.weekday,\n day=self.get_uniday(\n weekday=date.weekday,\n # invalid or unknown formats are also displayed as 'Long'\n style=Style.SHORT if style == Style.SHORT else Style.LONG,\n ),\n month=self.get_unimonth(weekday=date.weekday, variant=variant, style=style),\n year=date.year,\n )\n\n if style == Style.SHORT:\n _day_number = f\"{date.day.number}\"\n else:\n _day_number = f\"{date.day.number:02}\" # For Style.LONG and .ISO\n\n return f\"{date.day.name} {_day_number}, {date.month.name} {date.year}\"\n\n # festive\n return \"{month} {year}\".format(month=date.month.name, year=date.year)\n\n def get_uniweek(self, day: int) -> UniWeek:\n \"\"\"\n Parameters\n ==========\n - day: day number (1..366) of the year.\n\n Returns:\n ========\n - UnifiedWeek namedtuple contaning:\n - regular - flag to indicate if date is regular or festive: 0=festive, 1=regular\n - number - numeric value for day of the week [1-6]\n - yearday - numeric value for day of the year [1-366]\n \"\"\"\n if day in FestiveDate.DAY:\n return UniWeek(0, FestiveDate.DAY.index(day), day)\n\n if 1 < day <= 91:\n day -= 1\n elif 92 < day <= 182:\n day -= 2\n elif 183 < day <= 273:\n day -= 3\n elif 274 < day <= 364:\n day -= 4\n else:\n raise InvalidUnifiedDateValue(f\"Day out of range: {day!r}\")\n\n return UniWeek(1, (((day % 90) % 18) % 6) or 6, day)\n\n def get_uniday(self, weekday: UniWeek, style: Style = Style.LONG) -> UniDay:\n \"\"\"\n Takes a UniWeek and returns UniDayTuple with (name of the week day, date)\n\n If `style` is specified, return day name in that format. Applies only to regular week days; festive\n day names don't change.\n\n All weeks always start in a Firstday and end in a Sixtday.\n\n Parameters\n ----------\n - weekday: UniWeek\n - style: Calendar representation style. Styles are defined in `Style` Enum.\n \"\"\"\n if weekday.regular == 0:\n return UniDay(FestiveDate.SHORT_NAME[weekday.number], 0)\n\n month_day = ((weekday.yearday % 90) % 18) or 18\n if month_day < 1 or month_day > 18:\n raise InvalidUnifiedDateValue(f\"Invalid week tuple: {weekday!r}\")\n\n if self.__check_style(style) == Style.LONG:\n return UniDay(\"\".join(k for k, v in self.WEEKDAY.items() if weekday.number in v), month_day)\n\n return UniDay(f\"D{month_day}\", month_day)\n\n def get_unimonth(self, weekday: UniWeek, variant: Variant = Variant.UNI, style: Style = Style.LONG) -> UniMonth:\n \"\"\"\n Take a unified weekday, return unified month.\n\n Parameters\n ----------\n - weekday:\n Unified weekday namedtuple\n - variant:\n Regional month name variant. Variants are defined in `Variant` Enum.\n - style:\n Calendar representation style. Styles are defined in `Style` Enum.\n \"\"\"\n if weekday.regular:\n # date is a regular day\n month_number = int(trunc((weekday.yearday - 1) / 18.0)) + 1\n if month_number > 20:\n month_number = 20\n else:\n # date is a festivity. These months don't have number, only name.\n month_number = FestiveDate.SHORT_NAME[weekday.number] # use week day number as index\n\n if self.__check_style(style) == Style.SHORT:\n # Return short style only if explicitely requested, else Long.\n return self._UNIFIED_MONTH_NAME_SHORT[month_number]\n\n variant = self.__check_variant(variant)\n\n if variant == Variant.AUS:\n return self._AUSTRAL_MONTH_NAME_LONG[month_number]\n if variant == Variant.SWT:\n return self._TERRITORIAN_MONTH_NAME_LONG[month_number]\n # invalid or unknown variants are returned as \"Unified\"\n return self._UNIFIED_MONTH_NAME_LONG[month_number]\n\n def unify(self, user_date: str = None, style: Style = Style.LONG) -> UnifiedDateType:\n \"\"\"\n Convert user-provided Gregorian date to Unified and Territorian dates.\n\n Takes a gregorian date string and returns a UnifiedDateType tuple containing:\n unified weekday tuple, unified day name tuple, unified month.\n\n Also populates SWT and Austral dates, but those aren't returned.\n\n NOTE: Only dates between 1AD and 9999AD in _Gregorian_ Calendar can be converted (but Julian dates\n aren't supported)\n\n Parameters\n ----------\n - user_date:\n ISO 8601-formatted Gregorian date (e.g. '2020-12-31'). Optionally accepts the sting 'Today' for\n current sytem date. If no value (None) is provided, defaults to 'Today'\n - style:\n Calendar representation style. Styles are defined in `Style` Enum.\n\n Returns\n -------\n - Unified Date as UnifiedDateType {'weekday': UnifiedWeek, 'day': UnifiedDay, 'month': UnifiedMonth, 'year': year}\n \"\"\"\n if not user_date:\n user_date = datetime.now().date().isoformat()\n self.gregorian_date = user_date\n\n try:\n udate = datetime.strptime(user_date, \"%Y-%m-%d\")\n except ValueError:\n msg = f\"Date {user_date!r} must be in ISO-8601 format (YYYY-MM-DD)\"\n print(f\"Sorry, {msg}\")\n raise ValueError(msg)\n\n try:\n self._year_start = datetime.strptime(f\"{udate.year:04}-01-01\", \"%Y-%m-%d\")\n except ValueError as err:\n msg = f\"Unable to process date {udate!r}: {err}\"\n print(msg)\n raise ValueError(msg)\n\n days = udate.timetuple().tm_yday\n year = udate.year + 5600\n uni_weekday = self.get_uniweek(days)\n uni_day = self.get_uniday(uni_weekday, style=style)\n\n try:\n self.unified_date = UnifiedDateType(\n uni_weekday,\n uni_day,\n self.get_unimonth(weekday=uni_weekday, variant=Variant.UNI, style=style),\n year,\n )\n self.swt_date = UnifiedDateType(\n uni_weekday, uni_day, self.get_unimonth(weekday=uni_weekday, variant=Variant.SWT), year\n )\n self.austral_date = UnifiedDateType(\n uni_weekday, uni_day, self.get_unimonth(weekday=uni_weekday, variant=Variant.AUS), year\n )\n except Exception as e:\n print(f\"Error {type(e)}:{e}. Values: weekday={uni_weekday}, day={uni_day}\")\n raise\n else:\n return self.unified_date\n\n def print_calendar(self) -> None: # pragma: no cover\n \"Print entire year calendar for current Gregorian date\"\n\n print(f\"{'Gregorian':12} {'Unified':12} {'Long':36} {'Territorian':20} Austral\")\n _save_date = self.gregorian_date\n year_start = self._year_start\n prev = None\n\n for d in range(0, 366):\n self.gregorian_date = (year_start + timedelta(days=d)).date().isoformat()\n self.unify(self.gregorian_date)\n iso = self.format_date(\"Unified\", \"ISO\")\n uni = self.format_date(\"Unified\")\n swt = self.swt_date.month.name\n aus = self.austral_date.month.name\n\n date = f\"{self.gregorian_date:12} {iso:12} {uni:36} {swt:20} {aus}\"\n\n if self.unified_date.weekday.regular == 0:\n print(f\"\\n{'=' * 104}\")\n elif self.unified_date.month.name != prev:\n print(f\"{'-' * 104}\")\n\n if d < 365:\n print(date)\n elif self.unified_date.month.numeric.quarter == 6:\n print(date)\n\n prev = self.unified_date.month.name\n\n self.unify(_save_date) # restore last used date.\n\n def print_festive(self) -> None: # pragma: no cover\n \"\"\"\n Print gregorian dates corresponding to unified festive dates\n in the year of current Gregorian date.\n \"\"\"\n from copy import deepcopy\n\n year_start = deepcopy(self._year_start)\n _save_date = self.gregorian_date\n\n for d in self.FESTIVE_DAYS:\n self.gregorian_date = (year_start + timedelta(days=d - 1)).date().isoformat()\n self.unify(self.gregorian_date)\n print(f\"{'_' * 50}\\n{self}\")\n\n self.unify(_save_date)\n\n def print_month(self) -> None: # pragma: no cover\n \"Print unified dates for the whole month corresponding to current Gregorian date.\"\n from calendar import monthrange\n\n print(f\"\\nMonth for Gregorian date {self.gregorian_date}\\n{'^' * 40}\\n\")\n\n _save_date = self.gregorian_date\n gd = datetime.strptime(self.gregorian_date, \"%Y-%m-%d\")\n last_day = monthrange(gd.year, gd.month)[1]\n\n for d in range(1, last_day + 1):\n self.gregorian_date = datetime.strptime(f\"{gd.year}-{gd.month:02}-{d:02}\", \"%Y-%m-%d\").date().isoformat()\n self.unify(self.gregorian_date)\n if self.unified_date.month.numeric.month == 0:\n print(f'{self.gregorian_date}\\t{self.format_date(\"Unified\", \"Long\")}\\n{\"-\" * 40}')\n elif self.unified_date.day.number == 1 and self.unified_date.month.numeric.month > 1:\n print(f'\\n{self.gregorian_date}\\t{self.format_date(\"Unified\", \"Long\")}')\n else:\n print(f'{self.gregorian_date}\\t{self.format_date(\"Unified\", \"Long\")}')\n\n self.unify(_save_date)\n\n def reverse_year(self, unified_year: int) -> int:\n \"\"\"\n Parameter\n ---------\n - unified_year: numeric Unified year\n\n Returns\n -------\n - numeric Gregorian year\n \"\"\"\n if unified_year is None:\n raise InvalidUnifiedDateValue(\"Invalid year value: None\")\n\n try:\n unified_year = int(unified_year)\n if unified_year >= 0:\n return unified_year - 5600\n raise InvalidUnifiedDateValue(\"Cannot convert Unified prehistoric dates.\")\n except ValueError:\n raise InvalidUnifiedDateValue(f\"{unified_year!r} - Not a valid year.\")\n return None\n\n def reverse_unidate(self, u_date: str) -> datetime:\n \"\"\"\n Convert Unified date string to Gregorian date.\n\n Parameters\n ==========\n - u_date: ISO 8601U-formatted Unified Date string.\n\n Returns:\n ========\n - datetime object.\n \"\"\"\n user_date = u_date\n try:\n _year, _quarter_month, _day = user_date.split(\"-\")\n _year = int(_year)\n _quarter = int(_quarter_month[0])\n _month = int(_quarter_month[1])\n _day = int(_day)\n except AttributeError as err:\n print(f\"Expected a string: {err}\")\n raise\n except IndexError as err:\n print(f\"Quarter or Month seem to be out of range: {user_date}\")\n raise\n except ValueError as err:\n print(f\"Expected an ISO-8601U (YYYY-QM-DD) date: {user_date}: {err}\")\n raise\n\n _gyear = self.reverse_year(_year) # Gregorian year\n\n if _month == 0:\n DAYNUMS = [None, 1, 92, 183, 274, 365, 366]\n if _quarter < 1 or _quarter > 6:\n raise InvalidUnifiedDateValue(f\"Not an ISO-8601U date: {user_date!r}\")\n _gday = datetime.strptime(f\"{_gyear}-{DAYNUMS[_quarter]:03}\", \"%Y-%j\")\n else:\n _julian = (90 * (_quarter - 1)) + (18 * (_month - 1)) + _day\n _gday = datetime.strptime(f\"{_gyear}-{_julian:003}\", \"%Y-%j\") + timedelta(days=_quarter)\n\n self.unify(_gday.date().isoformat())\n return _gday\n\n @classmethod\n def today(cls, style=\"Long\"):\n \"Create a UnifiedDate instance from today's date\"\n return cls(datetime.now().date().isoformat(), style)\n\n\ndef startup():\n \"Display today's date in Unidate standard\"\n today = UnifiedDate.today(style=\"Short\")\n print(f\"\"\"{__doc__}\\n\\nG'day! Today is: {today.format_date(style=\"Short\")}\\n\\n{today}\"\"\")\n\n\nif __name__ == \"__main__\":\n startup()\nelse:\n startup()\n","repo_name":"RMBeristain/unidate","sub_path":"unidate.py","file_name":"unidate.py","file_ext":"py","file_size_in_byte":20955,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"69"} +{"seq_id":"4485151823","text":"import RPi.GPIO as gpio\nfrom flask import Flask,render_template\nimport datetime\napp=Flask(__name__)\n\nsw=8\nled1=10\nled2=12\nmtr1=18\nmtr2=22\nmtren=16\n\ngpio.setmode(gpio.BOARD)\ngpio.setwarnings(False)\ngpio.setup(mtr1,gpio.OUT)\ngpio.setup(mtr2,gpio.OUT)\ngpio.setup(mtren,gpio.OUT)\n\ngpio.setup(sw,gpio.IN)\ngpio.setup(led1,gpio.OUT)\ngpio.setup(led2,gpio.OUT)\ngpio.output(mtren,gpio.LOW)\ngpio.output(mtr1,gpio.LOW)\ngpio.output(mtr2,gpio.LOW)\n\nswsts=0\nmtrsts=\"OFF \"\nled1sts=0\nled2sts=0\n\ndef fwd_motor():\n gpio.output(mtr1,gpio.HIGH)\n gpio.output(mtr2,gpio.LOW)\n gpio.output(mtren,gpio.HIGH)\n\ndef rev_motor():\n gpio.output(mtr1,gpio.LOW)\n gpio.output(mtr2,gpio.HIGH)\n gpio.output(mtren,gpio.HIGH)\n \ndef stop_motor():\n gpio.output(mtren,gpio.LOW)\n gpio.output(mtr1,gpio.LOW)\n gpio.output(mtr2,gpio.LOW)\n\n\n@app.route(\"/\")\ndef index():\n mtrsts='off'\n swsts=gpio.input(sw)\n led1sts=gpio.input(led1)\n led2sts=gpio.input(led2)\n now = datetime.datetime.now()\n timeString=now.strftime('%Y-%m-%d %H:%M:%S')\n templateData={\n 'mtrsts':mtrsts,\n 'swsts':swsts,\n 'led1':led1sts,\n 'led2':led2sts,\n 'time':timeString,\n 'title':'RPIFUN',\n }\n return render_template(\"home.html\",**templateData)\n\n@app.route(\"//\")\ndef action(device,action):\n mtrsts='off'\n now=datetime.datetime.now()\n timeString=now.strftime('%Y-%m-%d %H:%M:%S')\n\n if device=='led1':\n actuator=led1\n if device=='led2':\n actuator=led2\n\n if action=='on':\n gpio.output(actuator,gpio.HIGH)\n if action=='off':\n gpio.output(actuator,gpio.LOW)\n\n if device=='mtr':\n if(action=='forward'):\n mtrsts='forward'\n fwd_motor()\n if(action=='reverse'):\n mtrsts='REVERSE'\n rev_motor()\n if(action=='stop'):\n mtrsts=='OFF'\n stop_motor()\n \n swsts=gpio.input(sw)\n led1sts=gpio.input(led1)\n led2sts=gpio.input(led2)\n\n templateData={\n 'mtrsts':mtrsts,\n 'swsts':swsts,\n 'led1':led1sts,\n 'led2':led2sts,\n 'time':timeString,\n }\n return render_template(\"home.html\",**templateData)\n\n\nif __name__==\"__main__\":\n app.run()\n\n\n\n\n\n\n\n","repo_name":"raghu1199/SURVEILLANCE-ROBOT_IOT","sub_path":"Basic_Practice/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2305,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"41044261126","text":"import os\nimport os.path\nfrom datetime import date, datetime\nimport re\nimport math\nimport pandas as pd\nimport numpy as np\n\nfrom demand_manager.models import Release\n\n\nclass ReleaseLic:\n def __init__(self, lic_type, lic_file):\n self.lic_type = lic_type\n self.lic_file = lic_file\n\n self.dict_server = {}\n self.vendor = \"\"\n\n self.extract_flag = False\n self.feature = \"\"\n\n def add_feature2db(self):\n with open(self.lic_file, mode='r', encoding='utf-8' ) as f:\n for line in f:\n line = line.strip()\n if not self.extract_flag:\n if re.search(r'^SERVER\\s', line):\n list_server = line.split()\n if len(list_server) > 3:\n host = list_server[1]\n hostid = list_server[2]\n port = list_server[3]\n else:\n host = list_server[1]\n hostid = list_server[2]\n port = \"\"\n if host not in self.dict_server.keys():\n self.dict_server[host] = {\"HOST_ID\": hostid, \"PORT\": port}\n else: # TODO:\n pass\n elif re.search(r'^VENDOR\\s', line):\n self.vendor = re.sub(r'^VENDOR\\s+(\\S+).*', '\\\\1', line)\n elif re.search(r'^(FEATURE|INCREMENT)\\s', line):\n self.feature = Feature(self.lic_type, self.extract_flag)\n self.feature.extract(line)\n self.extract_flag = self.feature.extract_flag\n # if not re.search(r'\\\\$', line):\n # self.add_db()\n # else:\n # self.extract_flag = True\n else:\n self.feature.extract(line)\n self.extract_flag = self.feature.extract_flag\n # if not re.search(r'\\\\$', line):\n # print(\"Add DB...\")\n # self.add_db()\n # self.extract_flag = False\n\n\nclass Feature:\n def __init__(self, lic_type, extract_flag):\n self.lic_type = lic_type\n self.extract_flag = extract_flag\n\n self.feature = \"\"\n self.vendor = \"\"\n self.feat_version = \"\"\n self.exp_date = \"\"\n self.num_lic = 0\n self.sign = \"\"\n self.issued_date = \"\"\n self.start_date = \"\"\n self.list_param = []\n\n def extract(self, line):\n self.list_param.extend(line.split())\n if not re.search(r'[\\\\|¥]$', line):\n self.feature = self.list_param[1]\n self.vendor = self.list_param[2]\n self.feat_version = self.list_param[3]\n self.exp_date = self.list_param[4]\n self.exp_date = datetime.strptime(self.exp_date, '%d-%b-%Y').date()\n self.num_lic = self.list_param[5]\n print(self.list_param)\n print(\"FEATURE: {0}\\n\"\n \"VENDOR: {1}\\n\"\n \"FEATURE_VERSION: {2}\\n\"\n \"EXP_DATE: {3}\\n\"\n \"NUM_LIC: {4}\\n\"\n .format(self.feature, self.vendor, self.feat_version, self.exp_date, self.num_lic))\n for item in self.list_param:\n if re.search(r'=', item):\n if re.search(r'ISSUED=', item):\n self.issued_date = re.sub(r'\\S+=(\\S+)', '\\\\1', item)\n elif re.search(r'START=', item):\n self.start_date = re.sub(r'\\S+=(\\S+)', '\\\\1', item)\n self.start_date = datetime.strptime(self.start_date, '%d-%b-%Y').date()\n print(\"ISSUED_DATE: {0}\\n\"\n \"START_DATE: {1}\\n\".format(self.issued_date, self.start_date))\n\n print(\"Add DB...\")\n self.add_db()\n\n self.extract_flag = False\n else:\n self.list_param.pop()\n self.extract_flag = True\n\n def add_db(self):\n release_lic, created = Release.objects.get_or_create(\n lic_type=self.lic_type,\n lic_feature=self.feature,\n end_date = self.exp_date,\n start_date = self.start_date,\n num_lic_feature = self.num_lic,\n )\n release_lic.save()\n\n\n","repo_name":"hir0o428/my_trial_django_app","sub_path":"demand_manager/utils/import_lic.py","file_name":"import_lic.py","file_ext":"py","file_size_in_byte":4434,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"13951304739","text":"#python\n\n#This file stores the original code to run the demo.\n\n\ndef demo_run():\n #Just running the cello_demo eventually replace cello_demo with cello_kb, and demo with test.\n cello_demo = os.path.join(cello_dir, 'demo')\n os.chdir(cello_demo)\n op = os.system('mvn -e -f /cello/pom.xml -DskipTests=true -PCelloMain -Dexec.args=\"-verilog demo_verilog.v -input_promoters demo_inputs.txt -output_genes demo_outputs.txt\"')\n logging.debug(op)\n dir_list = os.listdir(cello_demo)\n logging.debug(dir_list)\n output_dirpath = 'placeholder'\n for f in dir_list:\n if f not in ['0xFE_verilog.v', 'demo_inputs.txt', 'demo_outputs.txt', 'demo_verilog.v', 'exports']:\n output_dirpath = os.path.join(cello_demo, f)\n dir_name = f\n logging.debug(output_dirpath)\n break\n if output_dirpath == 'placeholder':\n raise Exception(\"did not get output from Cello\")\n else:\n if (os.path.isfile(output_dirpath)):\n raise Exception(\"Expected directory as output from Cello, got a file: \" + output_dirpath)\n elif (os.path.isdir(output_dirpath)):\n logging.info(\"Succesfully produced directory: \" + output_dirpath)\n if dir_name[:3] == 'job':\n logging.info(\"Directory name begins with job\")\n shutil.move(output_dirpath, kb_output_folder)\n else:\n logging.critical(\"Unknown destination\")\n\n","repo_name":"OGalOz/cello","sub_path":"lib/cello_util/Trash/demo_run.py","file_name":"demo_run.py","file_ext":"py","file_size_in_byte":1538,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"18527669934","text":"from airflow import DAG\nfrom airflow.utils.dates import days_ago\nfrom airflow.providers.postgres.operators.postgres import PostgresOperator\nfrom airflow.utils.task_group import TaskGroup\n\nSTART_DATE = \"2021-01-01\"\nEND_DATE = \"2022-09-06\"\n\ndefault_args = {\n \"owner\": \"bebeto\",\n \"start_date\": days_ago(1),\n \"depends_on_past\": False,\n \"retries\": 1,\n}\n\ndef db_setup_dag(dag):\n with dag:\n with TaskGroup('Table_Tasks', dag=dag) as table_group:\n create_orders_table_task = PostgresOperator(\n task_id=\"create_orders_table_task\",\n postgres_conn_id=\"main_db\",\n sql=\"sql/create_orders.sql\",\n )\n\n create_reviews_table_task = PostgresOperator(\n task_id=\"create_reviews_table_task\",\n postgres_conn_id=\"main_db\",\n sql=\"sql/create_reviews.sql\",\n )\n\n create_shipments_table_task = PostgresOperator(\n task_id=\"create_shipments_table_task\",\n postgres_conn_id=\"main_db\",\n sql=\"sql/create_shipments.sql\",\n )\n\n create_agg_public_hol_task = PostgresOperator(\n task_id=\"create_agg_public_hol_task\",\n postgres_conn_id=\"main_db\",\n sql=\"sql/create_agg_pub_hol.sql\",\n )\n\n create_agg_shipments_task = PostgresOperator(\n task_id=\"create_agg_shipments_task\",\n postgres_conn_id=\"main_db\",\n sql=\"sql/create_agg_shipments.sql\",\n )\n\n create_best_product_task = PostgresOperator(\n task_id=\"create_best_product_task\",\n postgres_conn_id=\"main_db\",\n sql=\"sql/create_best_product.sql\",\n )\n\n table_group\n\n\ninit_db_setup = DAG(\n dag_id=\"init_db_setup_v1\",\n schedule_interval=None,\n start_date=days_ago(1),\n default_args=default_args,\n catchup=True,\n max_active_runs=1,\n tags=['data2jobs'],\n)\n\ndb_setup_dag(init_db_setup)","repo_name":"realonbebeto/ecom-data-eng","sub_path":"airflow/dags/dag_init_procedures.py","file_name":"dag_init_procedures.py","file_ext":"py","file_size_in_byte":2031,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"32094814172","text":"import numpy \nimport glob\n\ndata_dirr = 'DevData/'\n\ndev_stages = [\\\n 'Gestation/'\\\n ,'1-35weeks/'\\\n ,'12-24months/'\n ,'28MandAfter/'\\\n ]\n\ndev_labels = [\\\n '0'\\\n ,'1'\\\n ,'2'\\\n ,'3'\\\n ]\n\nin_dirr = 'all/'\n\nall_islets_info_file = 'all_islets_info.csv'\nmm = open(all_islets_info_file, 'w')\nmm.write('Stage,Subject,Islet,fileprefix,Area,n_ad,n_b\\n')\n\n#count = 0\n\nprev_area = 0\n\nfor idx, dev in enumerate(dev_stages):\n\n this_dirr = data_dirr + dev + in_dirr\n\n all_subjects = glob.glob(this_dirr+'*.tsv')\n\n for subject in all_subjects:\n\n line = subject.split('/')\n subject_name = line[-1].split('.')[0]\n\n this_stage = dev_labels[idx]\n\n islet_prefix = 'Stage'+ this_stage + '_' + subject_name \n\n ff = open(subject, 'r')\n\n isletnum = -1\n \n write_lines = []\n n_ad = 0\n n_b = 0\n\n #print(count, end='\\r')\n\n for line in ff:\n this_line = line.split('\\t')\n this_islet_num = int(this_line[0])\n ty = int(this_line[-2])\n\n this_area = float(this_line[-1])\n\n if ty == 2:\n n_b += 1\n else:\n n_ad += 1\n\n if isletnum != this_islet_num:\n\n if isletnum != -1:\n if (n_ad > 5) and (n_b > 5):\n\n isletfile = islet_prefix + '_Islet' + str(isletnum)\n\n #('Stage,Subject,Islet,fileprefix,Area,n_ad,n_b\\n')\n mm.write(this_stage\\\n + ',' + subject_name\\\n + ',' + str(isletnum)\\\n + ',' + isletfile\\\n + ',' + str(prev_area)\\\n + ',' + str(n_ad)\\\n + ',' + str(n_b)\\\n + '\\n'\n )\n\n #gg = open(data_dirr + '/all_islets/' + isletfile + '.tsv', 'w')\n\n #for lline in write_lines:\n # gg.write(lline)\n\n #gg.close()\n\n n_b = 0\n n_ad = 0\n write_lines = []\n\n isletnum = this_islet_num\n prev_area = this_area\n\n write_lines.append(line)\n\n ff.close()\n\n\n\nmm.close()\n\n\n\n","repo_name":"nihcompmed/Pancreatic-Islets","sub_path":"Analysis_code/parse_data_to_single_islet.py","file_name":"parse_data_to_single_islet.py","file_ext":"py","file_size_in_byte":2421,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"1188209685","text":"from tensorflow import keras\n\n# from tut02 import word_index\n\ndata = keras.datasets.imdb\nword_index = data.get_word_index()\nword_index = {k: (v+3) for k, v in word_index.items()}\nword_index[\"\"] = 0\nword_index[\"\"] = 1\nword_index[\"\"] = 2\nword_index[\"\"] = 3\n\n\nmodel = keras.models.load_model(\"model.h5\")\n\ndef review_encode(s):\n encoded = [1]\n for word in s:\n encoded.append(word_index.get(word, 2))\n return encoded\n\nwith open(\"data/text.txt\", encoding=\"utf-8\") as f:\n for line in f:\n nline = line.lower().replace(\",\", \"\")\\\n .replace(\".\", \"\")\\\n .replace(\"(\", \"\")\\\n .replace(\")\", \"\")\\\n .replace(\":\", \"\")\\\n .replace(\"\\\"\", \"\")\\\n .strip()\\\n .split(\" \")\n encode = review_encode(nline)\n encode = keras.preprocessing.sequence.pad_sequences(\n [encode], value=word_index[\"\"],\n padding=\"post\", maxlen=250)\n print(type(encode))\n print(encode)\n predict = model.predict(encode)\n print(line)\n print(predict[0])\n\n\n\n","repo_name":"binchen15/tensorflow-demo1","sub_path":"test_tut02.py","file_name":"test_tut02.py","file_ext":"py","file_size_in_byte":1136,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"42485539041","text":"from datetime import datetime\nfrom email.mime.text import MIMEText\nfrom email.utils import formataddr\n\nimport requests\nimport smtplib\nfrom time import sleep\nfrom lxml import etree\n\nfrom .redis_pool import get_redis\n\nsender = 'ling1ciel@163.com'\nuser = 'ling1ciel@163.com'\npassword = 'hyc121019'\n\n\ndef send_mail(to, title, url):\n ret = True\n try:\n mail_msg = \"\"\"\n

点击查看最新消息

\n

{1}

\n \"\"\".format(url, title)\n message = MIMEText(mail_msg, 'html', 'utf-8')\n message['From'] = formataddr([\"夏尔的实验室\", sender])\n message['Subject'] = '燕大研招办最新消息|夏尔的实验室'\n\n server = smtplib.SMTP_SSL(\"smtp.163.com\", 465) # 发件人邮箱中的SMTP服务器,端口是25\n server.login(user=user, password=password) # 括号中对应的是发件人邮箱账号、邮箱密码\n server.sendmail(sender, to, message.as_string()) # 括号中对应的是发件人邮箱账号、收件人邮箱账号、发送邮件\n server.quit() # 关闭连接\n except smtplib.SMTPSenderRefused as e: # 如果 try 中的语句没有执行,则会执行下面的 ret=False\n print(e)\n ret = False\n except smtplib.SMTPRecipientsRefused as e: # 如果 try 中的语句没有执行,则会执行下面的 ret=False\n print(e)\n ret = False\n except smtplib.SMTPDataError as e: # 如果 try 中的语句没有执行,则会执行下面的 ret=False\n print(e)\n ret = False\n return ret\n\n\ndef yzb_zxxx():\n with get_redis() as redis:\n headers = {\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.87 Safari/537.36\"\n }\n while True:\n print(datetime.now(), ':关闭睡眠,获取数据')\n response = requests.get(\"https://zsjyc.ysu.edu.cn/ssszsxxw/zxxx.htm\", headers=headers)\n html = etree.HTML(response.content.decode('utf-8'))\n trs = html.xpath(\"//tr[@class='list1']\")\n for tr in trs:\n title = str(tr.xpath(\"td/a/text()\")[0])\n url = str(tr.xpath(\"td/a/@href\")[0])\n if not redis.sismember('yzb:zxxx_url', url):\n # 保存数据到redis中\n redis.sadd('yzb:zxxx_title', title)\n redis.sadd('yzb:zxxx_url', url)\n\n # 发送邮件\n try:\n url = \"https://zsjyc.ysu.edu.cn\" + url.split(\"..\")[-1]\n receivers = [to.decode('utf-8') for to in redis.smembers('yzb:zxxx_to')]\n if send_mail(to=receivers, title=title, url=url):\n print(\"发送成功\")\n else:\n print(\"发送失败\")\n except smtplib.SMTPException:\n print(\"Error: 无法发送邮件\")\n print(datetime.now(), ':开启睡眠')\n sleep(5 * 60)\n\n\nif __name__ == '__main__':\n yzb_zxxx()\n","repo_name":"ciel002/django-blog","sub_path":"utils/yzb_zxxx.py","file_name":"yzb_zxxx.py","file_ext":"py","file_size_in_byte":3107,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}